From 06ec42b1aef720b88bf390bddc6d10b7c5d8e69d Mon Sep 17 00:00:00 2001 From: Anvesh Reddy Pinnapureddy Date: Fri, 28 Feb 2025 09:26:28 +0530 Subject: [PATCH] Enable e2e tests with cloud provider emulators for aws,gcp,azure (#743) * Make e2e tests functional * Provide support to run them with major cloud providers & their emulators * Upgrade helm pkg `k8.io/helm/v2` to `helm.sh/helm/v3` --- .ci/integration_test | 79 +- .gitignore | 4 + Makefile | 50 +- .../templates/etcd-backup-secret.yaml | 2 + .../templates/etcd-statefulset.yaml | 3 +- docs/development/testing_and_dependencies.md | 17 +- docs/development/tests.md | 84 + go.mod | 84 +- go.sum | 299 +- hack/ci-e2e-kind.sh | 24 + hack/config/aws_config.sh | 35 + hack/config/azure_config.sh | 33 + hack/config/gcp_config.sh | 28 + hack/deploy-azurite.sh | 14 + hack/deploy-fakegcs.sh | 14 + hack/deploy-localstack.sh | 14 + .../infrastructure/azurite/azurite.yaml | 41 + .../fake-gcs-server/fake-gcs-server.yaml | 46 + .../e2e-test/infrastructure/kind/cluster.yaml | 29 + .../infrastructure/localstack/localstack.yaml | 47 + hack/e2e-test/run-e2e-test.sh | 451 + hack/kind-up.sh | 12 + hack/tools.mk | 35 +- hack/tools/bin/.gitkeep | 0 .../{integrationcluster => }/backup_test.go | 133 +- test/e2e/integrationcluster/utils.go | 460 - .../integrationcluster_suite_test.go | 124 +- test/e2e/utils.go | 620 + .../integration/cloud_backup_test.go | 3 +- .../integration/integration_suite_test.go | 0 test/{e2e => }/integration/utils.go | 0 .../AdaLogics/go-fuzz-headers/LICENSE | 201 + .../AdaLogics/go-fuzz-headers/README.md | 93 + .../AdaLogics/go-fuzz-headers/consumer.go | 914 + .../AdaLogics/go-fuzz-headers/funcs.go | 62 + .../AdaLogics/go-fuzz-headers/sql.go | 556 + vendor/github.com/Azure/go-ansiterm/LICENSE | 21 + vendor/github.com/Azure/go-ansiterm/README.md | 12 + .../github.com/Azure/go-ansiterm/constants.go | 188 + .../github.com/Azure/go-ansiterm/context.go | 7 + .../Azure/go-ansiterm/csi_entry_state.go | 49 + .../Azure/go-ansiterm/csi_param_state.go | 38 + .../go-ansiterm/escape_intermediate_state.go | 36 + .../Azure/go-ansiterm/escape_state.go | 47 + .../Azure/go-ansiterm/event_handler.go | 90 + .../Azure/go-ansiterm/ground_state.go | 24 + .../Azure/go-ansiterm/osc_string_state.go | 31 + vendor/github.com/Azure/go-ansiterm/parser.go | 151 + .../go-ansiterm/parser_action_helpers.go | 99 + .../Azure/go-ansiterm/parser_actions.go | 119 + vendor/github.com/Azure/go-ansiterm/states.go | 71 + .../github.com/Azure/go-ansiterm/utilities.go | 21 + .../Azure/go-ansiterm/winterm/ansi.go | 196 + .../Azure/go-ansiterm/winterm/api.go | 327 + .../go-ansiterm/winterm/attr_translation.go | 100 + .../go-ansiterm/winterm/cursor_helpers.go | 101 + .../go-ansiterm/winterm/erase_helpers.go | 84 + .../go-ansiterm/winterm/scroll_helper.go | 118 + .../Azure/go-ansiterm/winterm/utilities.go | 9 + .../go-ansiterm/winterm/win_event_handler.go | 743 + vendor/github.com/BurntSushi/toml/.gitignore | 7 +- vendor/github.com/BurntSushi/toml/.travis.yml | 15 - vendor/github.com/BurntSushi/toml/COMPATIBLE | 3 - vendor/github.com/BurntSushi/toml/Makefile | 19 - vendor/github.com/BurntSushi/toml/README.md | 230 +- vendor/github.com/BurntSushi/toml/decode.go | 505 +- .../BurntSushi/toml/decode_go116.go | 19 + .../github.com/BurntSushi/toml/deprecated.go | 29 + vendor/github.com/BurntSushi/toml/doc.go | 36 +- vendor/github.com/BurntSushi/toml/encode.go | 621 +- .../BurntSushi/toml/encoding_types.go | 19 - .../BurntSushi/toml/encoding_types_1.1.go | 18 - vendor/github.com/BurntSushi/toml/error.go | 279 + .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 846 +- .../toml/{decode_meta.go => meta.go} | 124 +- vendor/github.com/BurntSushi/toml/parse.go | 715 +- vendor/github.com/BurntSushi/toml/session.vim | 1 - .../github.com/BurntSushi/toml/type_fields.go | 4 +- .../toml/{type_check.go => type_toml.go} | 23 +- vendor/github.com/MakeNowJust/heredoc/LICENSE | 21 + .../github.com/MakeNowJust/heredoc/README.md | 52 + .../github.com/MakeNowJust/heredoc/heredoc.go | 105 + .../github.com/Masterminds/semver/.travis.yml | 29 - .../Masterminds/semver/CHANGELOG.md | 109 - vendor/github.com/Masterminds/semver/Makefile | 36 - .../github.com/Masterminds/semver/README.md | 194 - .../Masterminds/semver/appveyor.yml | 44 - .../Masterminds/semver/constraints.go | 423 - vendor/github.com/Masterminds/semver/doc.go | 115 - .../Masterminds/semver/v3/.gitignore | 1 + .../Masterminds/semver/v3/.golangci.yml | 27 + .../Masterminds/semver/v3/CHANGELOG.md | 214 + .../Masterminds/semver/{ => v3}/LICENSE.txt | 0 .../github.com/Masterminds/semver/v3/Makefile | 30 + .../Masterminds/semver/v3/README.md | 258 + .../Masterminds/semver/v3/SECURITY.md | 19 + .../Masterminds/semver/{ => v3}/collection.go | 0 .../Masterminds/semver/v3/constraints.go | 594 + .../github.com/Masterminds/semver/v3/doc.go | 184 + .../Masterminds/semver/{ => v3}/version.go | 288 +- .../Masterminds/semver/version_fuzz.go | 10 - .../github.com/Masterminds/sprig/.travis.yml | 26 - vendor/github.com/Masterminds/sprig/Makefile | 13 - .../github.com/Masterminds/sprig/appveyor.yml | 26 - .../github.com/Masterminds/sprig/glide.yaml | 19 - .../github.com/Masterminds/sprig/numeric.go | 169 - vendor/github.com/Masterminds/sprig/regex.go | 35 - .../Masterminds/sprig/{ => v3}/.gitignore | 0 .../Masterminds/sprig/{ => v3}/CHANGELOG.md | 101 + .../Masterminds/sprig/{ => v3}/LICENSE.txt | 3 +- .../github.com/Masterminds/sprig/v3/Makefile | 9 + .../Masterminds/sprig/{ => v3}/README.md | 26 +- .../Masterminds/sprig/{ => v3}/crypto.go | 261 +- .../Masterminds/sprig/{ => v3}/date.go | 69 + .../Masterminds/sprig/{ => v3}/defaults.go | 82 +- .../Masterminds/sprig/{ => v3}/dict.go | 57 +- .../Masterminds/sprig/{ => v3}/doc.go | 2 +- .../Masterminds/sprig/{ => v3}/functions.go | 184 +- .../Masterminds/sprig/{ => v3}/list.go | 215 +- .../Masterminds/sprig/{ => v3}/network.go | 2 +- .../Masterminds/sprig/v3/numeric.go | 186 + .../Masterminds/sprig/{ => v3}/reflect.go | 0 .../github.com/Masterminds/sprig/v3/regex.go | 83 + .../Masterminds/sprig/{ => v3}/semver.go | 2 +- .../Masterminds/sprig/{ => v3}/strings.go | 13 +- .../Masterminds/sprig/{ => v3}/url.go | 36 +- .../Masterminds/squirrel/.gitignore | 1 + .../Masterminds/squirrel/.travis.yml | 30 + .../github.com/Masterminds/squirrel/LICENSE | 23 + .../github.com/Masterminds/squirrel/README.md | 142 + .../github.com/Masterminds/squirrel/case.go | 128 + .../github.com/Masterminds/squirrel/delete.go | 191 + .../Masterminds/squirrel/delete_ctx.go | 69 + .../github.com/Masterminds/squirrel/expr.go | 419 + .../github.com/Masterminds/squirrel/insert.go | 298 + .../Masterminds/squirrel/insert_ctx.go | 69 + .../github.com/Masterminds/squirrel/part.go | 63 + .../Masterminds/squirrel/placeholder.go | 114 + vendor/github.com/Masterminds/squirrel/row.go | 22 + .../github.com/Masterminds/squirrel/select.go | 403 + .../Masterminds/squirrel/select_ctx.go | 69 + .../Masterminds/squirrel/squirrel.go | 183 + .../Masterminds/squirrel/squirrel_ctx.go | 93 + .../Masterminds/squirrel/statement.go | 104 + .../Masterminds/squirrel/stmtcacher.go | 121 + .../Masterminds/squirrel/stmtcacher_ctx.go | 86 + .../Masterminds/squirrel/stmtcacher_noctx.go | 21 + .../github.com/Masterminds/squirrel/update.go | 288 + .../Masterminds/squirrel/update_ctx.go | 69 + .../github.com/Masterminds/squirrel/where.go | 30 + vendor/github.com/Microsoft/hcsshim/LICENSE | 21 + .../hcsshim/osversion/osversion_windows.go | 59 + .../osversion/platform_compat_windows.go | 35 + .../hcsshim/osversion/windowsbuilds.go | 84 + .../asaskevich/govalidator/.gitignore | 15 + .../asaskevich/govalidator/.travis.yml | 18 + .../asaskevich/govalidator/CONTRIBUTING.md | 63 + .../github.com/asaskevich/govalidator/LICENSE | 21 + .../asaskevich/govalidator/README.md | 616 + .../asaskevich/govalidator/arrays.go | 58 + .../asaskevich/govalidator/converter.go | 64 + .../github.com/asaskevich/govalidator/doc.go | 3 + .../asaskevich/govalidator/error.go | 47 + .../asaskevich/govalidator/numerics.go | 97 + .../asaskevich/govalidator/patterns.go | 105 + .../asaskevich/govalidator/types.go | 653 + .../asaskevich/govalidator/utils.go | 270 + .../asaskevich/govalidator/validator.go | 1530 + .../asaskevich/govalidator/wercker.yml | 15 + .../chai2010/gettext-go/.travis.yml | 5 + vendor/github.com/chai2010/gettext-go/LICENSE | 27 + .../github.com/chai2010/gettext-go/README.md | 191 + vendor/github.com/chai2010/gettext-go/doc.go | 67 + vendor/github.com/chai2010/gettext-go/fs.go | 84 + .../github.com/chai2010/gettext-go/fs_json.go | 66 + .../github.com/chai2010/gettext-go/fs_os.go | 91 + .../github.com/chai2010/gettext-go/fs_zip.go | 142 + .../github.com/chai2010/gettext-go/gettext.go | 219 + .../github.com/chai2010/gettext-go/locale.go | 205 + .../github.com/chai2010/gettext-go/mo/doc.go | 74 + .../chai2010/gettext-go/mo/encoder.go | 105 + .../github.com/chai2010/gettext-go/mo/file.go | 197 + .../chai2010/gettext-go/mo/header.go | 109 + .../chai2010/gettext-go/mo/message.go | 52 + .../github.com/chai2010/gettext-go/mo/util.go | 110 + .../chai2010/gettext-go/plural/doc.go | 36 + .../chai2010/gettext-go/plural/formula.go | 181 + .../chai2010/gettext-go/plural/table.go | 55 + .../chai2010/gettext-go/po/comment.go | 270 + .../github.com/chai2010/gettext-go/po/doc.go | 24 + .../github.com/chai2010/gettext-go/po/file.go | 81 + .../chai2010/gettext-go/po/header.go | 106 + .../chai2010/gettext-go/po/line_reader.go | 62 + .../chai2010/gettext-go/po/message.go | 193 + .../github.com/chai2010/gettext-go/po/re.go | 58 + .../github.com/chai2010/gettext-go/po/util.go | 114 + vendor/github.com/chai2010/gettext-go/tr.go | 175 + vendor/github.com/chai2010/gettext-go/util.go | 34 + .../github.com/containerd/containerd/LICENSE | 191 + .../github.com/containerd/containerd/NOTICE | 16 + .../archive/compression/compression.go | 323 + .../archive/compression/compression_fuzzer.go | 28 + .../containerd/containerd/content/adaptor.go | 52 + .../containerd/containerd/content/content.go | 201 + .../containerd/containerd/content/helpers.go | 334 + .../content/local/content_local_fuzzer.go | 76 + .../containerd/content/local/locks.go | 62 + .../containerd/content/local/readerat.go | 72 + .../containerd/content/local/store.go | 704 + .../containerd/content/local/store_bsd.go | 33 + .../containerd/content/local/store_openbsd.go | 33 + .../containerd/content/local/store_unix.go | 33 + .../containerd/content/local/store_windows.go | 26 + .../containerd/content/local/test_helper.go | 38 + .../containerd/content/local/writer.go | 208 + .../containerd/containerd/errdefs/errors.go | 92 + .../containerd/containerd/errdefs/grpc.go | 147 + .../containerd/containerd/filters/adaptor.go | 33 + .../containerd/containerd/filters/filter.go | 178 + .../containerd/containerd/filters/parser.go | 290 + .../containerd/containerd/filters/quote.go | 252 + .../containerd/containerd/filters/scanner.go | 297 + .../containerd/images/annotations.go | 23 + .../containerd/containerd/images/diffid.go | 81 + .../containerd/containerd/images/handlers.go | 322 + .../containerd/containerd/images/image.go | 440 + .../containerd/images/importexport.go | 37 + .../containerd/containerd/images/labels.go | 21 + .../containerd/images/mediatypes.go | 215 + .../containerd/containerd/labels/labels.go | 29 + .../containerd/containerd/labels/validate.go | 41 + .../containerd/log/context_deprecated.go | 149 + .../containerd/pkg/randutil/randutil.go | 48 + .../containerd/platforms/compare.go | 203 + .../containerd/platforms/cpuinfo.go | 43 + .../containerd/platforms/cpuinfo_linux.go | 161 + .../containerd/platforms/cpuinfo_other.go | 59 + .../containerd/platforms/database.go | 109 + .../containerd/platforms/defaults.go | 27 + .../containerd/platforms/defaults_darwin.go | 44 + .../containerd/platforms/defaults_freebsd.go | 43 + .../containerd/platforms/defaults_unix.go | 40 + .../containerd/platforms/defaults_windows.go | 119 + .../containerd/platforms/platforms.go | 276 + .../containerd/platforms/platforms_other.go | 34 + .../containerd/platforms/platforms_windows.go | 42 + .../containerd/reference/docker/helpers.go | 58 + .../containerd/reference/docker/normalize.go | 196 + .../containerd/reference/docker/reference.go | 453 + .../containerd/reference/docker/regexp.go | 191 + .../containerd/reference/docker/sort.go | 73 + .../containerd/reference/reference.go | 166 + .../containerd/remotes/docker/auth/fetch.go | 225 + .../containerd/remotes/docker/auth/parse.go | 200 + .../containerd/remotes/docker/authorizer.go | 353 + .../containerd/remotes/docker/converter.go | 87 + .../remotes/docker/converter_fuzz.go | 55 + .../containerd/remotes/docker/errcode.go | 283 + .../containerd/remotes/docker/errdesc.go | 154 + .../containerd/remotes/docker/fetcher.go | 313 + .../containerd/remotes/docker/fetcher_fuzz.go | 81 + .../containerd/remotes/docker/handler.go | 149 + .../remotes/docker/httpreadseeker.go | 178 + .../containerd/remotes/docker/pusher.go | 545 + .../containerd/remotes/docker/registry.go | 244 + .../containerd/remotes/docker/resolver.go | 729 + .../remotes/docker/schema1/converter.go | 608 + .../containerd/remotes/docker/scope.go | 101 + .../containerd/remotes/docker/status.go | 101 + .../containerd/remotes/errors/errors.go | 55 + .../containerd/containerd/remotes/handlers.go | 413 + .../containerd/containerd/remotes/resolver.go | 94 + .../containerd/containerd/tracing/helpers.go | 94 + .../containerd/containerd/tracing/log.go | 66 + .../containerd/containerd/tracing/tracing.go | 129 + .../containerd/containerd/version/version.go | 34 + .../github.com/containerd/log/.golangci.yml | 30 + vendor/github.com/containerd/log/LICENSE | 191 + vendor/github.com/containerd/log/README.md | 17 + vendor/github.com/containerd/log/context.go | 182 + vendor/github.com/docker/cli/AUTHORS | 852 + vendor/github.com/docker/cli/LICENSE | 191 + vendor/github.com/docker/cli/NOTICE | 19 + .../docker/cli/cli/config/config.go | 153 + .../docker/cli/cli/config/configfile/file.go | 352 + .../cli/cli/config/configfile/file_unix.go | 36 + .../cli/cli/config/configfile/file_windows.go | 5 + .../cli/cli/config/credentials/credentials.go | 17 + .../cli/config/credentials/default_store.go | 21 + .../credentials/default_store_darwin.go | 5 + .../config/credentials/default_store_linux.go | 13 + .../credentials/default_store_unsupported.go | 8 + .../credentials/default_store_windows.go | 5 + .../cli/cli/config/credentials/file_store.go | 81 + .../cli/config/credentials/native_store.go | 143 + .../docker/cli/cli/config/types/authconfig.go | 22 + .../docker/distribution/.dockerignore | 1 + .../github.com/docker/distribution/.gitignore | 38 + .../docker/distribution/.golangci.yml | 27 + .../github.com/docker/distribution/.mailmap | 51 + .../docker/distribution/BUILDING.md | 117 + .../docker/distribution/CONTRIBUTING.md | 148 + .../github.com/docker/distribution/Dockerfile | 60 + vendor/github.com/docker/distribution/LICENSE | 202 + .../docker/distribution/MAINTAINERS | 243 + .../github.com/docker/distribution/Makefile | 102 + .../github.com/docker/distribution/README.md | 130 + .../github.com/docker/distribution/ROADMAP.md | 267 + .../github.com/docker/distribution/blobs.go | 265 + .../docker/distribution/digestset/set.go | 247 + vendor/github.com/docker/distribution/doc.go | 7 + .../docker/distribution/docker-bake.hcl | 56 + .../github.com/docker/distribution/errors.go | 119 + .../docker/distribution/manifests.go | 125 + .../docker/distribution/metrics/prometheus.go | 13 + .../docker/distribution/reference/helpers.go | 42 + .../distribution/reference/normalize.go | 199 + .../distribution/reference/reference.go | 433 + .../docker/distribution/reference/regexp.go | 143 + .../docker/distribution/registry.go | 118 + .../registry/api/errcode/errors.go | 267 + .../registry/api/errcode/handler.go | 40 + .../registry/api/errcode/register.go | 138 + .../registry/api/v2/descriptors.go | 1613 + .../distribution/registry/api/v2/doc.go | 9 + .../distribution/registry/api/v2/errors.go | 145 + .../registry/api/v2/headerparser.go | 161 + .../distribution/registry/api/v2/routes.go | 40 + .../distribution/registry/api/v2/urls.go | 254 + .../registry/client/auth/api_version.go | 58 + .../registry/client/auth/challenge/addr.go | 27 + .../client/auth/challenge/authchallenge.go | 237 + .../registry/client/auth/session.go | 530 + .../registry/client/blob_writer.go | 162 + .../distribution/registry/client/errors.go | 141 + .../registry/client/repository.go | 870 + .../registry/client/transport/http_reader.go | 249 + .../registry/client/transport/transport.go | 147 + .../registry/storage/cache/cache.go | 35 + .../cache/cachedblobdescriptorstore.go | 129 + .../registry/storage/cache/memory/memory.go | 179 + vendor/github.com/docker/distribution/tags.go | 27 + .../docker/distribution/vendor.conf | 51 + .../docker/docker-credential-helpers/LICENSE | 20 + .../client/client.go | 121 + .../client/command.go | 57 + .../credentials/credentials.go | 186 + .../credentials/error.go | 102 + .../credentials/helper.go | 14 + .../credentials/version.go | 16 + vendor/github.com/docker/docker/AUTHORS | 2390 + vendor/github.com/docker/docker/LICENSE | 191 + vendor/github.com/docker/docker/NOTICE | 19 + .../docker/docker/api/types/auth.go | 7 + .../docker/docker/api/types/blkiodev/blkio.go | 23 + .../docker/docker/api/types/client.go | 444 + .../docker/docker/api/types/configs.go | 67 + .../container/change_response_deprecated.go | 6 + .../docker/api/types/container/change_type.go | 15 + .../api/types/container/change_types.go | 23 + .../docker/api/types/container/config.go | 96 + .../api/types/container/container_top.go | 22 + .../api/types/container/container_update.go | 16 + .../api/types/container/create_response.go | 19 + .../api/types/container/filesystem_change.go | 19 + .../docker/api/types/container/hostconfig.go | 456 + .../api/types/container/hostconfig_unix.go | 42 + .../api/types/container/hostconfig_windows.go | 40 + .../api/types/container/wait_exit_error.go | 12 + .../api/types/container/wait_response.go | 18 + .../api/types/container/waitcondition.go | 22 + .../docker/docker/api/types/error_response.go | 13 + .../docker/api/types/error_response_ext.go | 6 + .../docker/docker/api/types/filters/errors.go | 37 + .../docker/docker/api/types/filters/parse.go | 346 + .../docker/api/types/graph_driver_data.go | 23 + .../docker/docker/api/types/id_response.go | 13 + .../api/types/image_delete_response_item.go | 15 + .../docker/docker/api/types/image_summary.go | 94 + .../docker/docker/api/types/mount/mount.go | 140 + .../docker/api/types/network/network.go | 126 + .../docker/docker/api/types/plugin.go | 203 + .../docker/docker/api/types/plugin_device.go | 25 + .../docker/docker/api/types/plugin_env.go | 25 + .../docker/api/types/plugin_interface_type.go | 21 + .../docker/docker/api/types/plugin_mount.go | 37 + .../docker/api/types/plugin_responses.go | 71 + .../docker/docker/api/types/port.go | 23 + .../docker/api/types/registry/authconfig.go | 99 + .../docker/api/types/registry/authenticate.go | 21 + .../docker/api/types/registry/registry.go | 120 + .../api/types/service_update_response.go | 12 + .../docker/docker/api/types/stats.go | 181 + .../docker/api/types/strslice/strslice.go | 30 + .../docker/docker/api/types/swarm/common.go | 48 + .../docker/docker/api/types/swarm/config.go | 40 + .../docker/api/types/swarm/container.go | 80 + .../docker/docker/api/types/swarm/network.go | 121 + .../docker/docker/api/types/swarm/node.go | 139 + .../docker/docker/api/types/swarm/runtime.go | 27 + .../docker/api/types/swarm/runtime/gen.go | 3 + .../api/types/swarm/runtime/plugin.pb.go | 754 + .../api/types/swarm/runtime/plugin.proto | 21 + .../docker/docker/api/types/swarm/secret.go | 36 + .../docker/docker/api/types/swarm/service.go | 202 + .../docker/docker/api/types/swarm/swarm.go | 237 + .../docker/docker/api/types/swarm/task.go | 225 + .../docker/docker/api/types/types.go | 811 + .../docker/api/types/versions/README.md | 14 + .../docker/api/types/versions/compare.go | 65 + .../docker/api/types/volume/cluster_volume.go | 420 + .../docker/api/types/volume/create_options.go | 29 + .../docker/api/types/volume/list_response.go | 18 + .../docker/docker/api/types/volume/options.go | 8 + .../docker/docker/api/types/volume/volume.go | 75 + .../docker/api/types/volume/volume_update.go | 7 + .../github.com/docker/docker/errdefs/defs.go | 69 + .../github.com/docker/docker/errdefs/doc.go | 8 + .../docker/docker/errdefs/helpers.go | 279 + .../docker/docker/errdefs/http_helpers.go | 46 + vendor/github.com/docker/docker/errdefs/is.go | 107 + .../docker/pkg/homedir/homedir_linux.go | 105 + .../docker/pkg/homedir/homedir_others.go | 33 + .../docker/docker/pkg/homedir/homedir_unix.go | 39 + .../docker/pkg/homedir/homedir_windows.go | 24 + .../docker/docker/pkg/ioutils/buffer.go | 51 + .../docker/docker/pkg/ioutils/bytespipe.go | 187 + .../docker/docker/pkg/ioutils/fswriters.go | 161 + .../docker/docker/pkg/ioutils/readers.go | 151 + .../docker/pkg/ioutils/tempdir_deprecated.go | 10 + .../docker/docker/pkg/ioutils/writeflusher.go | 92 + .../docker/docker/pkg/ioutils/writers.go | 66 + .../docker/pkg/jsonmessage/jsonmessage.go | 307 + .../docker/docker/pkg/longpath/longpath.go | 43 + .../github.com/docker/docker/registry/auth.go | 196 + .../docker/docker/registry/config.go | 448 + .../docker/docker/registry/config_unix.go | 17 + .../docker/docker/registry/config_windows.go | 20 + .../docker/docker/registry/endpoint_v1.go | 185 + .../docker/docker/registry/errors.go | 36 + .../docker/docker/registry/registry.go | 180 + .../docker/docker/registry/search.go | 139 + .../docker/docker/registry/service.go | 167 + .../docker/docker/registry/service_v2.go | 80 + .../docker/docker/registry/session.go | 216 + .../docker/docker/registry/types.go | 39 + .../github.com/docker/go-connections/LICENSE | 191 + .../docker/go-connections/nat/nat.go | 242 + .../docker/go-connections/nat/parse.go | 57 + .../docker/go-connections/nat/sort.go | 96 + .../go-connections/tlsconfig/certpool_go17.go | 18 + .../tlsconfig/certpool_other.go | 13 + .../docker/go-connections/tlsconfig/config.go | 254 + .../tlsconfig/config_client_ciphers.go | 17 + .../tlsconfig/config_legacy_client_ciphers.go | 15 + .../docker/go-metrics/CONTRIBUTING.md | 55 + vendor/github.com/docker/go-metrics/LICENSE | 191 + .../github.com/docker/go-metrics/LICENSE.docs | 425 + vendor/github.com/docker/go-metrics/NOTICE | 16 + vendor/github.com/docker/go-metrics/README.md | 91 + .../github.com/docker/go-metrics/counter.go | 52 + vendor/github.com/docker/go-metrics/docs.go | 3 + vendor/github.com/docker/go-metrics/gauge.go | 72 + .../github.com/docker/go-metrics/handler.go | 74 + .../github.com/docker/go-metrics/helpers.go | 10 + .../github.com/docker/go-metrics/namespace.go | 315 + .../github.com/docker/go-metrics/register.go | 15 + vendor/github.com/docker/go-metrics/timer.go | 85 + vendor/github.com/docker/go-metrics/unit.go | 12 + .../docker/go-units/CONTRIBUTING.md | 67 + vendor/github.com/docker/go-units/LICENSE | 191 + vendor/github.com/docker/go-units/MAINTAINERS | 46 + vendor/github.com/docker/go-units/README.md | 16 + vendor/github.com/docker/go-units/circle.yml | 11 + vendor/github.com/docker/go-units/duration.go | 35 + vendor/github.com/docker/go-units/size.go | 154 + vendor/github.com/docker/go-units/ulimit.go | 123 + .../github.com/evanphx/json-patch/README.md | 4 +- vendor/github.com/evanphx/json-patch/patch.go | 4 +- .../exponent-io/jsonpath/.gitignore | 24 + .../exponent-io/jsonpath/.travis.yml | 5 + .../github.com/exponent-io/jsonpath/LICENSE | 21 + .../github.com/exponent-io/jsonpath/README.md | 66 + .../exponent-io/jsonpath/decoder.go | 210 + .../github.com/exponent-io/jsonpath/path.go | 67 + .../exponent-io/jsonpath/pathaction.go | 61 + vendor/github.com/fatih/color/LICENSE.md | 20 + vendor/github.com/fatih/color/README.md | 178 + vendor/github.com/fatih/color/color.go | 618 + vendor/github.com/fatih/color/doc.go | 135 + .../github.com/felixge/httpsnoop/.gitignore | 0 .../github.com/felixge/httpsnoop/.travis.yml | 6 + .../github.com/felixge/httpsnoop/LICENSE.txt | 19 + vendor/github.com/felixge/httpsnoop/Makefile | 10 + vendor/github.com/felixge/httpsnoop/README.md | 95 + .../felixge/httpsnoop/capture_metrics.go | 86 + vendor/github.com/felixge/httpsnoop/docs.go | 10 + .../httpsnoop/wrap_generated_gteq_1.8.go | 436 + .../httpsnoop/wrap_generated_lt_1.8.go | 278 + vendor/github.com/ghodss/yaml/.gitignore | 20 - vendor/github.com/ghodss/yaml/.travis.yml | 7 - vendor/github.com/ghodss/yaml/README.md | 121 - vendor/github.com/ghodss/yaml/fields.go | 501 - vendor/github.com/ghodss/yaml/yaml.go | 277 - .../github.com/go-errors/errors/.travis.yml | 8 + .../github.com/go-errors/errors/LICENSE.MIT | 7 + vendor/github.com/go-errors/errors/README.md | 82 + vendor/github.com/go-errors/errors/error.go | 209 + .../github.com/go-errors/errors/error_1_13.go | 31 + .../go-errors/errors/error_backward.go | 57 + .../go-errors/errors/parse_panic.go | 127 + .../github.com/go-errors/errors/stackframe.go | 122 + vendor/github.com/go-gorp/gorp/v3/.gitignore | 11 + vendor/github.com/go-gorp/gorp/v3/.travis.yml | 36 + .../go-gorp/gorp/v3/CONTRIBUTING.md | 34 + vendor/github.com/go-gorp/gorp/v3/LICENSE | 22 + vendor/github.com/go-gorp/gorp/v3/README.md | 809 + vendor/github.com/go-gorp/gorp/v3/column.go | 76 + vendor/github.com/go-gorp/gorp/v3/db.go | 985 + vendor/github.com/go-gorp/gorp/v3/dialect.go | 105 + .../go-gorp/gorp/v3/dialect_mysql.go | 169 + .../go-gorp/gorp/v3/dialect_oracle.go | 139 + .../go-gorp/gorp/v3/dialect_postgres.go | 149 + .../go-gorp/gorp/v3/dialect_snowflake.go | 152 + .../go-gorp/gorp/v3/dialect_sqlite.go | 112 + .../go-gorp/gorp/v3/dialect_sqlserver.go | 145 + vendor/github.com/go-gorp/gorp/v3/doc.go | 11 + vendor/github.com/go-gorp/gorp/v3/errors.go | 31 + vendor/github.com/go-gorp/gorp/v3/gorp.go | 672 + vendor/github.com/go-gorp/gorp/v3/hooks.go | 42 + vendor/github.com/go-gorp/gorp/v3/index.go | 49 + .../github.com/go-gorp/gorp/v3/lockerror.go | 56 + vendor/github.com/go-gorp/gorp/v3/logging.go | 42 + .../github.com/go-gorp/gorp/v3/nulltypes.go | 65 + vendor/github.com/go-gorp/gorp/v3/select.go | 359 + vendor/github.com/go-gorp/gorp/v3/table.go | 258 + .../go-gorp/gorp/v3/table_bindings.go | 305 + vendor/github.com/go-gorp/gorp/v3/test_all.sh | 23 + .../github.com/go-gorp/gorp/v3/transaction.go | 239 + vendor/github.com/go-logr/stdr/LICENSE | 201 + vendor/github.com/go-logr/stdr/README.md | 6 + vendor/github.com/go-logr/stdr/stdr.go | 170 + vendor/github.com/google/shlex/COPYING | 202 + vendor/github.com/google/shlex/README | 2 + vendor/github.com/google/shlex/shlex.go | 416 + vendor/github.com/gorilla/mux/AUTHORS | 8 + vendor/github.com/gorilla/mux/LICENSE | 27 + vendor/github.com/gorilla/mux/README.md | 805 + vendor/github.com/gorilla/mux/doc.go | 306 + vendor/github.com/gorilla/mux/middleware.go | 74 + vendor/github.com/gorilla/mux/mux.go | 606 + vendor/github.com/gorilla/mux/regexp.go | 388 + vendor/github.com/gorilla/mux/route.go | 736 + vendor/github.com/gorilla/mux/test_helpers.go | 19 + vendor/github.com/gosuri/uitable/.travis.yml | 6 + vendor/github.com/gosuri/uitable/LICENSE | 10 + vendor/github.com/gosuri/uitable/Makefile | 4 + vendor/github.com/gosuri/uitable/README.md | 67 + vendor/github.com/gosuri/uitable/table.go | 205 + .../gosuri/uitable/util/strutil/strutil.go | 101 + .../gosuri/uitable/util/wordwrap/LICENSE.md | 21 + .../gosuri/uitable/util/wordwrap/README.md | 39 + .../gosuri/uitable/util/wordwrap/wordwrap.go | 85 + .../gregjones/httpcache/.travis.yml | 19 + .../gregjones/httpcache/LICENSE.txt | 7 + .../github.com/gregjones/httpcache/README.md | 25 + .../gregjones/httpcache/httpcache.go | 551 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 178 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 150 + .../hashicorp/go-multierror/append.go | 43 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 121 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + vendor/github.com/huandu/xstrings/README.md | 9 +- vendor/github.com/huandu/xstrings/convert.go | 44 +- vendor/github.com/imdario/mergo/.travis.yml | 3 + vendor/github.com/imdario/mergo/README.md | 34 +- vendor/github.com/imdario/mergo/merge.go | 11 +- vendor/github.com/imdario/mergo/mergo.go | 4 +- vendor/github.com/jmoiron/sqlx/.gitignore | 25 + vendor/github.com/jmoiron/sqlx/.travis.yml | 26 + vendor/github.com/jmoiron/sqlx/LICENSE | 23 + vendor/github.com/jmoiron/sqlx/README.md | 213 + vendor/github.com/jmoiron/sqlx/bind.go | 265 + vendor/github.com/jmoiron/sqlx/doc.go | 12 + vendor/github.com/jmoiron/sqlx/named.go | 458 + .../github.com/jmoiron/sqlx/named_context.go | 132 + .../jmoiron/sqlx/reflectx/README.md | 17 + .../jmoiron/sqlx/reflectx/reflect.go | 444 + vendor/github.com/jmoiron/sqlx/sqlx.go | 1051 + .../github.com/jmoiron/sqlx/sqlx_context.go | 414 + vendor/github.com/lann/builder/.gitignore | 2 + vendor/github.com/lann/builder/.travis.yml | 7 + vendor/github.com/lann/builder/LICENSE | 21 + vendor/github.com/lann/builder/README.md | 68 + vendor/github.com/lann/builder/builder.go | 225 + vendor/github.com/lann/builder/reflect.go | 24 + vendor/github.com/lann/builder/registry.go | 59 + vendor/github.com/lann/ps/LICENSE | 7 + vendor/github.com/lann/ps/README.md | 10 + vendor/github.com/lann/ps/list.go | 93 + vendor/github.com/lann/ps/map.go | 311 + vendor/github.com/lann/ps/profile.sh | 3 + vendor/github.com/lib/pq/.gitignore | 6 + vendor/github.com/lib/pq/LICENSE.md | 8 + vendor/github.com/lib/pq/README.md | 36 + vendor/github.com/lib/pq/TESTS.md | 33 + vendor/github.com/lib/pq/array.go | 895 + vendor/github.com/lib/pq/buf.go | 91 + vendor/github.com/lib/pq/conn.go | 2112 + vendor/github.com/lib/pq/conn_go115.go | 8 + vendor/github.com/lib/pq/conn_go18.go | 247 + vendor/github.com/lib/pq/connector.go | 120 + vendor/github.com/lib/pq/copy.go | 348 + vendor/github.com/lib/pq/doc.go | 268 + vendor/github.com/lib/pq/encode.go | 632 + vendor/github.com/lib/pq/error.go | 523 + vendor/github.com/lib/pq/krb.go | 27 + vendor/github.com/lib/pq/notice.go | 72 + vendor/github.com/lib/pq/notify.go | 858 + vendor/github.com/lib/pq/oid/doc.go | 6 + vendor/github.com/lib/pq/oid/types.go | 343 + vendor/github.com/lib/pq/rows.go | 93 + vendor/github.com/lib/pq/scram/scram.go | 264 + vendor/github.com/lib/pq/ssl.go | 204 + vendor/github.com/lib/pq/ssl_permissions.go | 93 + vendor/github.com/lib/pq/ssl_windows.go | 10 + vendor/github.com/lib/pq/url.go | 76 + vendor/github.com/lib/pq/user_other.go | 10 + vendor/github.com/lib/pq/user_posix.go | 25 + vendor/github.com/lib/pq/user_windows.go | 27 + vendor/github.com/lib/pq/uuid.go | 23 + .../github.com/liggitt/tabwriter/.travis.yml | 11 + vendor/github.com/liggitt/tabwriter/LICENSE | 27 + vendor/github.com/liggitt/tabwriter/README.md | 7 + .../github.com/liggitt/tabwriter/tabwriter.go | 637 + vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 38 + .../mattn/go-colorable/colorable_others.go | 38 + .../mattn/go-colorable/colorable_windows.go | 1047 + .../github.com/mattn/go-colorable/go.test.sh | 12 + .../mattn/go-colorable/noncolorable.go | 57 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../github.com/mattn/go-isatty/isatty_bsd.go | 19 + .../mattn/go-isatty/isatty_others.go | 16 + .../mattn/go-isatty/isatty_plan9.go | 23 + .../mattn/go-isatty/isatty_solaris.go | 21 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 125 + .../github.com/mattn/go-runewidth/.travis.yml | 16 + vendor/github.com/mattn/go-runewidth/LICENSE | 21 + .../github.com/mattn/go-runewidth/README.md | 27 + .../github.com/mattn/go-runewidth/go.test.sh | 12 + .../mattn/go-runewidth/runewidth.go | 257 + .../mattn/go-runewidth/runewidth_appengine.go | 8 + .../mattn/go-runewidth/runewidth_js.go | 9 + .../mattn/go-runewidth/runewidth_posix.go | 82 + .../mattn/go-runewidth/runewidth_table.go | 437 + .../mattn/go-runewidth/runewidth_windows.go | 28 + .../mitchellh/copystructure/.travis.yml | 12 - .../mitchellh/copystructure/README.md | 42 +- .../mitchellh/copystructure/copystructure.go | 93 +- .../mitchellh/go-wordwrap/LICENSE.md | 21 + .../mitchellh/go-wordwrap/README.md | 39 + .../mitchellh/go-wordwrap/wordwrap.go | 83 + .../mitchellh/reflectwalk/reflectwalk.go | 21 +- vendor/github.com/moby/locker/LICENSE | 190 + vendor/github.com/moby/locker/README.md | 65 + vendor/github.com/moby/locker/locker.go | 112 + vendor/github.com/moby/term/.gitignore | 8 + vendor/github.com/moby/term/LICENSE | 191 + vendor/github.com/moby/term/README.md | 36 + vendor/github.com/moby/term/ascii.go | 66 + vendor/github.com/moby/term/doc.go | 3 + vendor/github.com/moby/term/proxy.go | 88 + vendor/github.com/moby/term/term.go | 85 + vendor/github.com/moby/term/term_unix.go | 98 + vendor/github.com/moby/term/term_windows.go | 176 + vendor/github.com/moby/term/termios_bsd.go | 13 + vendor/github.com/moby/term/termios_nonbsd.go | 13 + vendor/github.com/moby/term/termios_unix.go | 35 + .../github.com/moby/term/termios_windows.go | 37 + .../moby/term/windows/ansi_reader.go | 252 + .../moby/term/windows/ansi_writer.go | 57 + .../github.com/moby/term/windows/console.go | 43 + vendor/github.com/moby/term/windows/doc.go | 5 + .../monochromegane/go-gitignore/.travis.yml | 6 + .../monochromegane/go-gitignore/LICENSE | 21 + .../monochromegane/go-gitignore/README.md | 95 + .../go-gitignore/depth_holder.go | 79 + .../go-gitignore/full_scan_patterns.go | 31 + .../monochromegane/go-gitignore/gitignore.go | 80 + .../go-gitignore/index_scan_patterns.go | 35 + .../go-gitignore/initial_holder.go | 62 + .../monochromegane/go-gitignore/match.go | 24 + .../monochromegane/go-gitignore/pattern.go | 69 + .../monochromegane/go-gitignore/patterns.go | 22 + .../monochromegane/go-gitignore/util.go | 45 + vendor/github.com/morikuni/aec/LICENSE | 21 + vendor/github.com/morikuni/aec/README.md | 178 + vendor/github.com/morikuni/aec/aec.go | 137 + vendor/github.com/morikuni/aec/ansi.go | 59 + vendor/github.com/morikuni/aec/builder.go | 388 + vendor/github.com/morikuni/aec/sample.gif | Bin 0 -> 12548 bytes vendor/github.com/morikuni/aec/sgr.go | 202 + .../opencontainers/go-digest/.mailmap | 4 + .../opencontainers/go-digest/.pullapprove.yml | 28 + .../opencontainers/go-digest/.travis.yml | 5 + .../opencontainers/go-digest/CONTRIBUTING.md | 72 + .../opencontainers/go-digest/LICENSE | 192 + .../opencontainers/go-digest/LICENSE.docs | 425 + .../opencontainers/go-digest/MAINTAINERS | 5 + .../opencontainers/go-digest/README.md | 96 + .../opencontainers/go-digest/algorithm.go | 193 + .../opencontainers/go-digest/digest.go | 157 + .../opencontainers/go-digest/digester.go | 40 + .../opencontainers/go-digest/doc.go | 62 + .../opencontainers/go-digest/verifiers.go | 46 + .../opencontainers/image-spec/LICENSE | 191 + .../image-spec/specs-go/v1/annotations.go | 62 + .../image-spec/specs-go/v1/config.go | 111 + .../image-spec/specs-go/v1/descriptor.go | 80 + .../image-spec/specs-go/v1/index.go | 38 + .../image-spec/specs-go/v1/layout.go | 32 + .../image-spec/specs-go/v1/manifest.go | 41 + .../image-spec/specs-go/v1/mediatype.go | 75 + .../image-spec/specs-go/version.go | 32 + .../image-spec/specs-go/versioned.go | 23 + vendor/github.com/peterbourgon/diskv/LICENSE | 19 + .../github.com/peterbourgon/diskv/README.md | 141 + .../peterbourgon/diskv/compression.go | 64 + vendor/github.com/peterbourgon/diskv/diskv.go | 624 + vendor/github.com/peterbourgon/diskv/index.go | 115 + .../rubenv/sql-migrate/.dockerignore | 6 + .../github.com/rubenv/sql-migrate/.gitignore | 9 + .../rubenv/sql-migrate/.golangci.yaml | 98 + .../github.com/rubenv/sql-migrate/Dockerfile | 25 + vendor/github.com/rubenv/sql-migrate/LICENSE | 21 + vendor/github.com/rubenv/sql-migrate/Makefile | 11 + .../github.com/rubenv/sql-migrate/README.md | 416 + vendor/github.com/rubenv/sql-migrate/doc.go | 238 + .../github.com/rubenv/sql-migrate/migrate.go | 877 + .../rubenv/sql-migrate/migrate_go116.go | 23 + .../rubenv/sql-migrate/sqlparse/LICENSE | 22 + .../rubenv/sql-migrate/sqlparse/README.md | 7 + .../rubenv/sql-migrate/sqlparse/sqlparse.go | 226 + .../russross/blackfriday/v2/.gitignore | 8 + .../russross/blackfriday/v2/.travis.yml | 17 + .../russross/blackfriday/v2/LICENSE.txt | 29 + .../russross/blackfriday/v2/README.md | 335 + .../russross/blackfriday/v2/block.go | 1612 + .../github.com/russross/blackfriday/v2/doc.go | 46 + .../russross/blackfriday/v2/entities.go | 2236 + .../github.com/russross/blackfriday/v2/esc.go | 70 + .../russross/blackfriday/v2/html.go | 952 + .../russross/blackfriday/v2/inline.go | 1228 + .../russross/blackfriday/v2/markdown.go | 950 + .../russross/blackfriday/v2/node.go | 360 + .../russross/blackfriday/v2/smartypants.go | 457 + .../github.com/shopspring/decimal/.gitignore | 9 + .../github.com/shopspring/decimal/.travis.yml | 19 + .../shopspring/decimal/CHANGELOG.md | 49 + vendor/github.com/shopspring/decimal/LICENSE | 45 + .../github.com/shopspring/decimal/README.md | 130 + .../shopspring/decimal/decimal-go.go | 415 + .../github.com/shopspring/decimal/decimal.go | 1904 + .../github.com/shopspring/decimal/rounding.go | 160 + vendor/github.com/spf13/cast/.gitignore | 25 + vendor/github.com/spf13/cast/LICENSE | 21 + vendor/github.com/spf13/cast/Makefile | 40 + vendor/github.com/spf13/cast/README.md | 75 + vendor/github.com/spf13/cast/cast.go | 176 + vendor/github.com/spf13/cast/caste.go | 1476 + .../spf13/cast/timeformattype_string.go | 27 + .../gojsonpointer/LICENSE-APACHE-2.0.txt} | 2 +- .../xeipuuv/gojsonpointer/README.md | 41 + .../xeipuuv/gojsonpointer/pointer.go | 211 + .../gojsonreference/LICENSE-APACHE-2.0.txt | 202 + .../xeipuuv/gojsonreference/README.md | 10 + .../xeipuuv/gojsonreference/reference.go | 147 + .../xeipuuv/gojsonschema/.gitignore | 3 + .../xeipuuv/gojsonschema/.travis.yml | 9 + .../gojsonschema/LICENSE-APACHE-2.0.txt | 202 + .../github.com/xeipuuv/gojsonschema/README.md | 466 + .../github.com/xeipuuv/gojsonschema/draft.go | 125 + .../github.com/xeipuuv/gojsonschema/errors.go | 364 + .../xeipuuv/gojsonschema/format_checkers.go | 368 + .../xeipuuv/gojsonschema/glide.yaml | 13 + .../xeipuuv/gojsonschema/internalLog.go | 37 + .../xeipuuv/gojsonschema/jsonContext.go | 73 + .../xeipuuv/gojsonschema/jsonLoader.go | 386 + .../xeipuuv/gojsonschema/locales.go | 472 + .../github.com/xeipuuv/gojsonschema/result.go | 220 + .../github.com/xeipuuv/gojsonschema/schema.go | 1087 + .../xeipuuv/gojsonschema/schemaLoader.go | 206 + .../xeipuuv/gojsonschema/schemaPool.go | 215 + .../gojsonschema/schemaReferencePool.go | 68 + .../xeipuuv/gojsonschema/schemaType.go | 83 + .../xeipuuv/gojsonschema/subSchema.go | 149 + .../github.com/xeipuuv/gojsonschema/types.go | 62 + .../github.com/xeipuuv/gojsonschema/utils.go | 197 + .../xeipuuv/gojsonschema/validation.go | 858 + vendor/github.com/xlab/treeprint/.gitignore | 3 + vendor/github.com/xlab/treeprint/LICENSE | 20 + vendor/github.com/xlab/treeprint/README.md | 154 + vendor/github.com/xlab/treeprint/helpers.go | 47 + vendor/github.com/xlab/treeprint/struct.go | 322 + vendor/github.com/xlab/treeprint/treeprint.go | 294 + .../instrumentation/net/http/otelhttp/LICENSE | 201 + .../net/http/otelhttp/client.go | 61 + .../net/http/otelhttp/common.go | 46 + .../net/http/otelhttp/config.go | 208 + .../instrumentation/net/http/otelhttp/doc.go | 18 + .../net/http/otelhttp/handler.go | 275 + .../http/otelhttp/internal/semconvutil/gen.go | 21 + .../otelhttp/internal/semconvutil/httpconv.go | 552 + .../otelhttp/internal/semconvutil/netconv.go | 368 + .../net/http/otelhttp/labeler.go | 65 + .../net/http/otelhttp/transport.go | 193 + .../net/http/otelhttp/version.go | 28 + .../instrumentation/net/http/otelhttp/wrap.go | 99 + .../go.opentelemetry.io/otel/.codespellignore | 5 + vendor/go.opentelemetry.io/otel/.codespellrc | 10 + .../go.opentelemetry.io/otel/.gitattributes | 3 + vendor/go.opentelemetry.io/otel/.gitignore | 25 + vendor/go.opentelemetry.io/otel/.gitmodules | 3 + vendor/go.opentelemetry.io/otel/.golangci.yml | 281 + vendor/go.opentelemetry.io/otel/.lycheeignore | 6 + .../otel/.markdownlint.yaml | 29 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 2737 + vendor/go.opentelemetry.io/otel/CODEOWNERS | 17 + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 620 + vendor/go.opentelemetry.io/otel/LICENSE | 201 + vendor/go.opentelemetry.io/otel/Makefile | 291 + vendor/go.opentelemetry.io/otel/README.md | 111 + vendor/go.opentelemetry.io/otel/RELEASING.md | 139 + vendor/go.opentelemetry.io/otel/VERSIONING.md | 224 + .../go.opentelemetry.io/otel/attribute/doc.go | 16 + .../otel/attribute/encoder.go | 146 + .../otel/attribute/filter.go | 60 + .../otel/attribute/iterator.go | 161 + .../go.opentelemetry.io/otel/attribute/key.go | 134 + .../go.opentelemetry.io/otel/attribute/kv.go | 86 + .../go.opentelemetry.io/otel/attribute/set.go | 429 + .../otel/attribute/type_string.go | 31 + .../otel/attribute/value.go | 270 + .../otel/baggage/baggage.go | 552 + .../otel/baggage/context.go | 39 + .../go.opentelemetry.io/otel/baggage/doc.go | 20 + .../go.opentelemetry.io/otel/codes/codes.go | 116 + vendor/go.opentelemetry.io/otel/codes/doc.go | 21 + vendor/go.opentelemetry.io/otel/doc.go | 34 + .../go.opentelemetry.io/otel/error_handler.go | 38 + .../go.opentelemetry.io/otel/get_main_pkgs.sh | 41 + vendor/go.opentelemetry.io/otel/handler.go | 48 + .../otel/internal/attribute/attribute.go | 111 + .../otel/internal/baggage/baggage.go | 43 + .../otel/internal/baggage/context.go | 92 + .../go.opentelemetry.io/otel/internal/gen.go | 29 + .../otel/internal/global/handler.go | 102 + .../otel/internal/global/instruments.go | 359 + .../otel/internal/global/internal_logging.go | 69 + .../otel/internal/global/meter.go | 354 + .../otel/internal/global/propagator.go | 82 + .../otel/internal/global/state.go | 156 + .../otel/internal/global/trace.go | 192 + .../otel/internal/rawhelpers.go | 55 + .../otel/internal_logging.go | 26 + vendor/go.opentelemetry.io/otel/metric.go | 53 + .../go.opentelemetry.io/otel/metric/LICENSE | 201 + .../otel/metric/asyncfloat64.go | 271 + .../otel/metric/asyncint64.go | 269 + .../go.opentelemetry.io/otel/metric/config.go | 92 + vendor/go.opentelemetry.io/otel/metric/doc.go | 170 + .../otel/metric/embedded/embedded.go | 234 + .../otel/metric/instrument.go | 334 + .../go.opentelemetry.io/otel/metric/meter.go | 212 + .../otel/metric/syncfloat64.go | 179 + .../otel/metric/syncint64.go | 179 + .../go.opentelemetry.io/otel/propagation.go | 31 + .../otel/propagation/baggage.go | 58 + .../otel/propagation/doc.go | 24 + .../otel/propagation/propagation.go | 153 + .../otel/propagation/trace_context.go | 159 + .../go.opentelemetry.io/otel/requirements.txt | 1 + .../otel/semconv/v1.17.0/doc.go | 20 + .../otel/semconv/v1.17.0/event.go | 199 + .../otel/semconv/v1.17.0/exception.go | 20 + .../otel/semconv/v1.17.0/http.go | 21 + .../otel/semconv/v1.17.0/resource.go | 2010 + .../otel/semconv/v1.17.0/schema.go | 20 + .../otel/semconv/v1.17.0/trace.go | 3375 ++ .../otel/semconv/v1.21.0/attribute_group.go | 1877 + .../otel/semconv/v1.21.0/doc.go | 20 + .../otel/semconv/v1.21.0/event.go | 199 + .../otel/semconv/v1.21.0/exception.go | 20 + .../otel/semconv/v1.21.0/resource.go | 2310 + .../otel/semconv/v1.21.0/schema.go | 20 + .../otel/semconv/v1.21.0/trace.go | 2495 + vendor/go.opentelemetry.io/otel/trace.go | 47 + vendor/go.opentelemetry.io/otel/trace/LICENSE | 201 + .../go.opentelemetry.io/otel/trace/config.go | 333 + .../go.opentelemetry.io/otel/trace/context.go | 61 + vendor/go.opentelemetry.io/otel/trace/doc.go | 66 + .../otel/trace/nonrecording.go | 27 + vendor/go.opentelemetry.io/otel/trace/noop.go | 89 + .../go.opentelemetry.io/otel/trace/trace.go | 551 + .../otel/trace/tracestate.go | 212 + .../otel/verify_examples.sh | 85 + vendor/go.opentelemetry.io/otel/version.go | 20 + vendor/go.opentelemetry.io/otel/versions.yaml | 55 + vendor/go.starlark.net/LICENSE | 29 + .../internal/compile/compile.go | 1924 + .../internal/compile/serial.go | 395 + .../go.starlark.net/internal/spell/spell.go | 115 + vendor/go.starlark.net/resolve/binding.go | 74 + vendor/go.starlark.net/resolve/resolve.go | 969 + vendor/go.starlark.net/starlark/debug.go | 42 + vendor/go.starlark.net/starlark/empty.s | 3 + vendor/go.starlark.net/starlark/eval.go | 1648 + vendor/go.starlark.net/starlark/hashtable.go | 390 + vendor/go.starlark.net/starlark/int.go | 452 + .../go.starlark.net/starlark/int_generic.go | 34 + .../go.starlark.net/starlark/int_posix64.go | 91 + vendor/go.starlark.net/starlark/interp.go | 705 + vendor/go.starlark.net/starlark/library.go | 2289 + vendor/go.starlark.net/starlark/profile.go | 449 + vendor/go.starlark.net/starlark/unpack.go | 355 + vendor/go.starlark.net/starlark/value.go | 1500 + .../go.starlark.net/starlarkstruct/module.go | 43 + .../go.starlark.net/starlarkstruct/struct.go | 282 + vendor/go.starlark.net/syntax/grammar.txt | 129 + vendor/go.starlark.net/syntax/parse.go | 1028 + vendor/go.starlark.net/syntax/quote.go | 309 + vendor/go.starlark.net/syntax/scan.go | 1123 + vendor/go.starlark.net/syntax/syntax.go | 525 + vendor/go.starlark.net/syntax/walk.go | 161 + vendor/golang.org/x/crypto/cast5/cast5.go | 536 + .../x/crypto/openpgp/armor/armor.go | 233 + .../x/crypto/openpgp/armor/encode.go | 161 + .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/clearsign/clearsign.go | 424 + .../x/crypto/openpgp/elgamal/elgamal.go | 130 + .../x/crypto/openpgp/errors/errors.go | 78 + vendor/golang.org/x/crypto/openpgp/keys.go | 693 + .../x/crypto/openpgp/packet/compressed.go | 123 + .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 208 + .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 + .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 161 + .../x/crypto/openpgp/packet/packet.go | 590 + .../x/crypto/openpgp/packet/private_key.go | 384 + .../x/crypto/openpgp/packet/public_key.go | 753 + .../x/crypto/openpgp/packet/public_key_v3.go | 279 + .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 + .../x/crypto/openpgp/packet/signature_v3.go | 146 + .../openpgp/packet/symmetric_key_encrypted.go | 155 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../x/crypto/openpgp/packet/userattribute.go | 90 + .../x/crypto/openpgp/packet/userid.go | 159 + vendor/golang.org/x/crypto/openpgp/read.go | 448 + vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 + vendor/golang.org/x/crypto/openpgp/write.go | 418 + vendor/golang.org/x/sync/errgroup/errgroup.go | 135 + vendor/golang.org/x/sync/errgroup/go120.go | 13 + .../golang.org/x/sync/errgroup/pre_go120.go | 14 + vendor/golang.org/x/sys/execabs/execabs.go | 102 + .../golang.org/x/sys/execabs/execabs_go118.go | 17 + .../golang.org/x/sys/execabs/execabs_go119.go | 20 + vendor/helm.sh/helm/v3/LICENSE | 202 + .../helm/v3/internal/fileutil/fileutil.go | 50 + .../helm/v3/internal/resolver/resolver.go | 263 + .../helm/v3/internal}/sympath/walk.go | 11 +- .../helm/v3/internal/third_party/dep/fs/fs.go | 372 + .../v3/internal/third_party/dep/fs/rename.go} | 54 +- .../third_party/dep/fs/rename_windows.go | 69 + .../deployment/util/deploymentutil.go | 178 + .../helm.sh/helm/v3/internal/tlsutil/cfg.go | 58 + .../helm.sh/helm/v3/internal/tlsutil/tls.go | 78 + .../helm/v3/internal/urlutil/urlutil.go | 73 + .../helm/v3/internal/version/version.go | 81 + vendor/helm.sh/helm/v3/pkg/action/action.go | 424 + .../helm.sh/helm/v3/pkg/action/dependency.go | 230 + .../helm/v3/pkg/action}/doc.go | 13 +- vendor/helm.sh/helm/v3/pkg/action/get.go | 47 + .../helm/v3/pkg/action/get_metadata.go | 69 + .../helm.sh/helm/v3/pkg/action/get_values.go | 60 + vendor/helm.sh/helm/v3/pkg/action/history.go | 58 + vendor/helm.sh/helm/v3/pkg/action/hooks.go | 159 + vendor/helm.sh/helm/v3/pkg/action/install.go | 815 + .../helm.sh/helm/v3/pkg/action/lazyclient.go | 197 + vendor/helm.sh/helm/v3/pkg/action/lint.go | 129 + vendor/helm.sh/helm/v3/pkg/action/list.go | 324 + vendor/helm.sh/helm/v3/pkg/action/package.go | 181 + vendor/helm.sh/helm/v3/pkg/action/pull.go | 172 + vendor/helm.sh/helm/v3/pkg/action/push.go | 112 + .../helm/v3/pkg/action/registry_login.go | 88 + .../helm/v3/pkg/action/registry_logout.go} | 28 +- .../helm/v3/pkg/action/release_testing.go | 152 + .../helm/v3/pkg/action/resource_policy.go | 46 + vendor/helm.sh/helm/v3/pkg/action/rollback.go | 265 + vendor/helm.sh/helm/v3/pkg/action/show.go | 165 + vendor/helm.sh/helm/v3/pkg/action/status.go | 95 + .../helm.sh/helm/v3/pkg/action/uninstall.go | 251 + vendor/helm.sh/helm/v3/pkg/action/upgrade.go | 627 + vendor/helm.sh/helm/v3/pkg/action/validate.go | 184 + vendor/helm.sh/helm/v3/pkg/action/verify.go | 59 + vendor/helm.sh/helm/v3/pkg/chart/chart.go | 173 + .../helm.sh/helm/v3/pkg/chart/dependency.go | 82 + vendor/helm.sh/helm/v3/pkg/chart/errors.go | 30 + vendor/helm.sh/helm/v3/pkg/chart/file.go | 27 + .../helm/v3/pkg/chart/loader/archive.go | 205 + .../helm/v3/pkg/chart/loader/directory.go | 119 + .../helm.sh/helm/v3/pkg/chart/loader/load.go | 200 + vendor/helm.sh/helm/v3/pkg/chart/metadata.go | 172 + .../helm/v3/pkg/chartutil/capabilities.go | 126 + .../helm/v3}/pkg/chartutil/chartfile.go | 58 +- .../helm.sh/helm/v3/pkg/chartutil/coalesce.go | 293 + .../helm/v3/pkg/chartutil/compatible.go | 34 + .../helm.sh/helm/v3/pkg/chartutil/create.go | 723 + .../helm/v3/pkg/chartutil/dependencies.go | 356 + .../helm/v3}/pkg/chartutil/doc.go | 21 +- .../helm.sh/helm/v3/pkg/chartutil/errors.go | 35 + .../helm/v3}/pkg/chartutil/expand.go | 20 +- .../helm/v3/pkg/chartutil/jsonschema.go | 93 + vendor/helm.sh/helm/v3/pkg/chartutil/save.go | 244 + .../helm/v3/pkg/chartutil/validate_name.go | 112 + .../helm.sh/helm/v3/pkg/chartutil/values.go | 212 + vendor/helm.sh/helm/v3/pkg/cli/environment.go | 258 + .../helm.sh/helm/v3/pkg/cli/roundtripper.go | 80 + .../v3/pkg/downloader/chart_downloader.go | 406 + vendor/helm.sh/helm/v3/pkg/downloader/doc.go | 24 + .../helm.sh/helm/v3/pkg/downloader/manager.go | 905 + .../helm/v3}/pkg/engine/doc.go | 11 +- vendor/helm.sh/helm/v3/pkg/engine/engine.go | 441 + vendor/helm.sh/helm/v3/pkg/engine/files.go | 165 + vendor/helm.sh/helm/v3/pkg/engine/funcs.go | 176 + .../helm.sh/helm/v3/pkg/engine/lookup_func.go | 143 + .../helm/v3/pkg/getter/doc.go} | 20 +- vendor/helm.sh/helm/v3/pkg/getter/getter.go | 212 + .../helm.sh/helm/v3/pkg/getter/httpgetter.go | 157 + .../helm.sh/helm/v3/pkg/getter/ocigetter.go | 155 + .../helm/v3/pkg/getter/plugingetter.go | 110 + vendor/helm.sh/helm/v3/pkg/helmpath/home.go | 44 + .../helm.sh/helm/v3/pkg/helmpath/lazypath.go | 72 + .../helm/v3/pkg/helmpath/lazypath_darwin.go | 34 + .../helm/v3/pkg/helmpath/lazypath_unix.go | 45 + .../helm/v3/pkg/helmpath/lazypath_windows.go | 24 + .../helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go | 34 + vendor/helm.sh/helm/v3/pkg/ignore/doc.go | 68 + .../helm/v3}/pkg/ignore/rules.go | 21 +- vendor/helm.sh/helm/v3/pkg/kube/client.go | 850 + vendor/helm.sh/helm/v3/pkg/kube/config.go | 30 + vendor/helm.sh/helm/v3/pkg/kube/converter.go | 69 + vendor/helm.sh/helm/v3/pkg/kube/factory.go | 51 + vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go | 160 + .../helm.sh/helm/v3/pkg/kube/fake/printer.go | 136 + vendor/helm.sh/helm/v3/pkg/kube/interface.go | 116 + vendor/helm.sh/helm/v3/pkg/kube/ready.go | 463 + vendor/helm.sh/helm/v3/pkg/kube/resource.go | 85 + .../helm/v3/pkg/kube/resource_policy.go | 27 + vendor/helm.sh/helm/v3/pkg/kube/result.go | 28 + vendor/helm.sh/helm/v3/pkg/kube/wait.go | 128 + vendor/helm.sh/helm/v3/pkg/lint/lint.go | 43 + .../helm/v3/pkg/lint/rules/chartfile.go | 209 + .../helm/v3/pkg/lint/rules/dependencies.go | 103 + .../helm/v3/pkg/lint/rules/deprecations.go | 106 + .../helm/v3/pkg/lint/rules/template.go | 351 + .../helm.sh/helm/v3/pkg/lint/rules/values.go | 86 + .../helm/v3/pkg/lint/support/doc.go} | 14 +- .../helm/v3/pkg/lint/support/message.go | 76 + vendor/helm.sh/helm/v3/pkg/plugin/hooks.go | 29 + vendor/helm.sh/helm/v3/pkg/plugin/plugin.go | 284 + vendor/helm.sh/helm/v3/pkg/postrender/exec.go | 109 + .../helm/v3/pkg/postrender/postrender.go | 29 + vendor/helm.sh/helm/v3/pkg/provenance/doc.go | 38 + vendor/helm.sh/helm/v3/pkg/provenance/sign.go | 427 + vendor/helm.sh/helm/v3/pkg/pusher/doc.go | 21 + .../helm.sh/helm/v3/pkg/pusher/ocipusher.go | 152 + vendor/helm.sh/helm/v3/pkg/pusher/pusher.go | 122 + vendor/helm.sh/helm/v3/pkg/registry/client.go | 703 + .../helm.sh/helm/v3/pkg/registry/constants.go | 37 + vendor/helm.sh/helm/v3/pkg/registry/util.go | 247 + vendor/helm.sh/helm/v3/pkg/release/hook.go | 106 + vendor/helm.sh/helm/v3/pkg/release/info.go | 40 + vendor/helm.sh/helm/v3/pkg/release/mock.go | 116 + vendor/helm.sh/helm/v3/pkg/release/release.go | 49 + .../helm.sh/helm/v3/pkg/release/responses.go | 24 + vendor/helm.sh/helm/v3/pkg/release/status.go | 49 + .../helm/v3}/pkg/releaseutil/filter.go | 8 +- .../helm/v3/pkg/releaseutil/kind_sorter.go | 160 + .../helm/v3}/pkg/releaseutil/manifest.go | 19 +- .../v3/pkg/releaseutil/manifest_sorter.go | 233 + .../helm.sh/helm/v3/pkg/releaseutil/sorter.go | 78 + vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go | 317 + vendor/helm.sh/helm/v3/pkg/repo/doc.go | 94 + vendor/helm.sh/helm/v3/pkg/repo/index.go | 390 + vendor/helm.sh/helm/v3/pkg/repo/repo.go | 125 + .../helm/v3/pkg/storage/driver/cfgmaps.go | 263 + .../helm/v3/pkg/storage/driver/driver.go | 105 + .../helm/v3/pkg/storage/driver/labels.go | 48 + .../helm/v3/pkg/storage/driver/memory.go | 240 + .../helm/v3/pkg/storage/driver/records.go | 124 + .../helm/v3/pkg/storage/driver/secrets.go | 256 + .../helm.sh/helm/v3/pkg/storage/driver/sql.go | 697 + .../helm/v3/pkg/storage/driver/util.go | 122 + vendor/helm.sh/helm/v3/pkg/storage/storage.go | 266 + vendor/helm.sh/helm/v3/pkg/time/time.go | 91 + .../helm/v3/pkg/uploader/chart_uploader.go | 58 + vendor/helm.sh/helm/v3/pkg/uploader/doc.go | 21 + vendor/k8s.io/api/admission/v1/doc.go | 23 + .../k8s.io/api/admission/v1/generated.pb.go | 1783 + .../k8s.io/api/admission/v1/generated.proto | 167 + vendor/k8s.io/api/admission/v1/register.go | 53 + vendor/k8s.io/api/admission/v1/types.go | 169 + .../v1/types_swagger_doc_generated.go | 78 + .../api/admission/v1/zz_generated.deepcopy.go | 142 + vendor/k8s.io/api/admission/v1beta1/doc.go | 24 + .../api/admission/v1beta1/generated.pb.go | 1783 + .../api/admission/v1beta1/generated.proto | 167 + .../k8s.io/api/admission/v1beta1/register.go | 53 + vendor/k8s.io/api/admission/v1beta1/types.go | 174 + .../v1beta1/types_swagger_doc_generated.go | 78 + .../v1beta1/zz_generated.deepcopy.go | 142 + .../zz_generated.prerelease-lifecycle.go | 50 + vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go | 23 + .../api/imagepolicy/v1alpha1/generated.pb.go | 1375 + .../api/imagepolicy/v1alpha1/generated.proto | 88 + .../api/imagepolicy/v1alpha1/register.go | 51 + .../k8s.io/api/imagepolicy/v1alpha1/types.go | 82 + .../v1alpha1/types_swagger_doc_generated.go | 72 + .../v1alpha1/zz_generated.deepcopy.go | 121 + vendor/k8s.io/apiextensions-apiserver/LICENSE | 202 + .../pkg/apis/apiextensions/deepcopy.go | 302 + .../pkg/apis/apiextensions/doc.go | 21 + .../pkg/apis/apiextensions/helpers.go | 257 + .../pkg/apis/apiextensions/register.go | 51 + .../pkg/apis/apiextensions/types.go | 422 + .../apis/apiextensions/types_jsonschema.go | 319 + .../apiextensions/v1/.import-restrictions | 5 + .../pkg/apis/apiextensions/v1/conversion.go | 215 + .../pkg/apis/apiextensions/v1/deepcopy.go | 262 + .../pkg/apis/apiextensions/v1/defaults.go | 61 + .../pkg/apis/apiextensions/v1/doc.go | 25 + .../pkg/apis/apiextensions/v1/generated.pb.go | 9423 ++++ .../pkg/apis/apiextensions/v1/generated.proto | 792 + .../pkg/apis/apiextensions/v1/marshal.go | 136 + .../pkg/apis/apiextensions/v1/register.go | 62 + .../pkg/apis/apiextensions/v1/types.go | 485 + .../apis/apiextensions/v1/types_jsonschema.go | 412 + .../v1/zz_generated.conversion.go | 1326 + .../apiextensions/v1/zz_generated.deepcopy.go | 717 + .../apiextensions/v1/zz_generated.defaults.go | 58 + .../v1beta1/.import-restrictions | 5 + .../apis/apiextensions/v1beta1/conversion.go | 70 + .../apis/apiextensions/v1beta1/deepcopy.go | 278 + .../apis/apiextensions/v1beta1/defaults.go | 82 + .../pkg/apis/apiextensions/v1beta1/doc.go | 26 + .../apiextensions/v1beta1/generated.pb.go | 9463 ++++ .../apiextensions/v1beta1/generated.proto | 829 + .../pkg/apis/apiextensions/v1beta1/marshal.go | 136 + .../apis/apiextensions/v1beta1/register.go | 62 + .../pkg/apis/apiextensions/v1beta1/types.go | 531 + .../apiextensions/v1beta1/types_jsonschema.go | 412 + .../v1beta1/zz_generated.conversion.go | 1374 + .../v1beta1/zz_generated.deepcopy.go | 716 + .../v1beta1/zz_generated.defaults.go | 56 + .../zz_generated.prerelease-lifecycle.go | 98 + .../apiextensions/zz_generated.deepcopy.go | 608 + .../pkg/api/validation/path/name.go | 68 + .../unstructured/unstructuredscheme/scheme.go | 129 + .../apimachinery/pkg/util/cache/expiring.go | 202 + .../pkg/util/cache/lruexpirecache.go | 173 + .../k8s.io/apimachinery/pkg/util/diff/diff.go | 138 + .../pkg/util/duration/duration.go | 93 + .../apimachinery/pkg/util/version/doc.go | 18 + .../apimachinery/pkg/util/version/version.go | 372 + vendor/k8s.io/apiserver/LICENSE | 202 + .../pkg/endpoints/deprecation/deprecation.go | 134 + vendor/k8s.io/cli-runtime/LICENSE | 202 + .../pkg/genericclioptions/builder_flags.go | 231 + .../genericclioptions/builder_flags_fake.go | 54 + .../pkg/genericclioptions/client_config.go | 72 + .../pkg/genericclioptions/command_headers.go | 94 + .../pkg/genericclioptions/config_flags.go | 491 + .../genericclioptions/config_flags_fake.go | 127 + .../cli-runtime/pkg/genericclioptions/doc.go | 19 + .../pkg/genericclioptions/filename_flags.go | 82 + .../pkg/genericclioptions/io_options.go | 54 + .../pkg/genericclioptions/json_yaml_flags.go | 79 + .../pkg/genericclioptions/jsonpath_flags.go | 137 + .../genericclioptions/kube_template_flags.go | 94 + .../pkg/genericclioptions/name_flags.go | 83 + .../pkg/genericclioptions/print_flags.go | 171 + .../pkg/genericclioptions/record_flags.go | 201 + .../pkg/genericclioptions/template_flags.go | 136 + .../pkg/genericiooptions/io_options.go | 56 + .../cli-runtime/pkg/printers/discard.go | 30 + vendor/k8s.io/cli-runtime/pkg/printers/doc.go | 19 + .../cli-runtime/pkg/printers/interface.go | 54 + .../k8s.io/cli-runtime/pkg/printers/json.go | 79 + .../cli-runtime/pkg/printers/jsonpath.go | 147 + .../cli-runtime/pkg/printers/managedfields.go | 59 + .../k8s.io/cli-runtime/pkg/printers/name.go | 130 + .../cli-runtime/pkg/printers/sourcechecker.go | 60 + .../cli-runtime/pkg/printers/tableprinter.go | 589 + .../cli-runtime/pkg/printers/tabwriter.go | 36 + .../cli-runtime/pkg/printers/template.go | 118 + .../cli-runtime/pkg/printers/terminal.go | 75 + .../cli-runtime/pkg/printers/typesetter.go | 95 + .../pkg/printers/warningprinter.go | 55 + .../k8s.io/cli-runtime/pkg/printers/yaml.go | 85 + .../cli-runtime/pkg/resource/builder.go | 1259 + .../k8s.io/cli-runtime/pkg/resource/client.go | 69 + .../cli-runtime/pkg/resource/crd_finder.go | 110 + vendor/k8s.io/cli-runtime/pkg/resource/doc.go | 24 + .../k8s.io/cli-runtime/pkg/resource/fake.go | 40 + .../resource/fallback_query_param_verifier.go | 59 + .../k8s.io/cli-runtime/pkg/resource/helper.go | 321 + .../cli-runtime/pkg/resource/interfaces.go | 103 + .../pkg/resource/kustomizevisitor.go | 54 + .../k8s.io/cli-runtime/pkg/resource/mapper.go | 166 + .../pkg/resource/metadata_decoder.go | 56 + .../pkg/resource/query_param_verifier.go | 176 + .../pkg/resource/query_param_verifier_v3.go | 145 + .../k8s.io/cli-runtime/pkg/resource/result.go | 242 + .../k8s.io/cli-runtime/pkg/resource/scheme.go | 82 + .../cli-runtime/pkg/resource/selector.go | 92 + .../cli-runtime/pkg/resource/visitor.go | 770 + .../discovery/cached/disk/cached_discovery.go | 325 + .../discovery/cached/disk/round_tripper.go | 120 + .../discovery/cached/memory/memcache.go | 332 + .../k8s.io/client-go/openapi/cached/client.go | 54 + .../client-go/openapi/cached/groupversion.go | 58 + vendor/k8s.io/client-go/openapi3/root.go | 182 + vendor/k8s.io/client-go/scale/client.go | 238 + vendor/k8s.io/client-go/scale/doc.go | 21 + vendor/k8s.io/client-go/scale/interfaces.go | 47 + .../client-go/scale/scheme/appsint/doc.go | 22 + .../scale/scheme/appsint/register.go | 55 + .../scale/scheme/appsv1beta1/conversion.go | 73 + .../client-go/scale/scheme/appsv1beta1/doc.go | 20 + .../scale/scheme/appsv1beta1/register.go | 45 + .../appsv1beta1/zz_generated.conversion.go | 134 + .../scale/scheme/appsv1beta2/conversion.go | 73 + .../client-go/scale/scheme/appsv1beta2/doc.go | 20 + .../scale/scheme/appsv1beta2/register.go | 45 + .../appsv1beta2/zz_generated.conversion.go | 134 + .../scale/scheme/autoscalingv1/conversion.go | 54 + .../scale/scheme/autoscalingv1/doc.go | 20 + .../scale/scheme/autoscalingv1/register.go | 45 + .../autoscalingv1/zz_generated.conversion.go | 133 + vendor/k8s.io/client-go/scale/scheme/doc.go | 22 + .../scale/scheme/extensionsint/doc.go | 22 + .../scale/scheme/extensionsint/register.go | 55 + .../scheme/extensionsv1beta1/conversion.go | 73 + .../scale/scheme/extensionsv1beta1/doc.go | 20 + .../scheme/extensionsv1beta1/register.go | 45 + .../zz_generated.conversion.go | 134 + .../k8s.io/client-go/scale/scheme/register.go | 54 + vendor/k8s.io/client-go/scale/scheme/types.go | 60 + .../scale/scheme/zz_generated.deepcopy.go | 92 + vendor/k8s.io/client-go/scale/util.go | 197 + .../third_party/forked/golang/LICENSE | 27 + .../third_party/forked/golang/PATENTS | 22 + .../forked/golang/template/exec.go | 52 + .../forked/golang/template/funcs.go | 177 + vendor/k8s.io/client-go/tools/cache/OWNERS | 28 + .../client-go/tools/cache/controller.go | 515 + .../client-go/tools/cache/delta_fifo.go | 803 + vendor/k8s.io/client-go/tools/cache/doc.go | 24 + .../client-go/tools/cache/expiration_cache.go | 214 + .../tools/cache/expiration_cache_fakes.go | 57 + .../tools/cache/fake_custom_store.go | 102 + vendor/k8s.io/client-go/tools/cache/fifo.go | 382 + vendor/k8s.io/client-go/tools/cache/heap.go | 322 + vendor/k8s.io/client-go/tools/cache/index.go | 101 + .../k8s.io/client-go/tools/cache/listers.go | 169 + .../k8s.io/client-go/tools/cache/listwatch.go | 112 + .../client-go/tools/cache/mutation_cache.go | 262 + .../tools/cache/mutation_detector.go | 166 + .../client-go/tools/cache/object-names.go | 65 + .../k8s.io/client-go/tools/cache/reflector.go | 912 + .../reflector_data_consistency_detector.go | 119 + .../tools/cache/reflector_metrics.go | 89 + .../tools/cache/retry_with_deadline.go | 78 + .../client-go/tools/cache/shared_informer.go | 1012 + vendor/k8s.io/client-go/tools/cache/store.go | 295 + .../client-go/tools/cache/synctrack/lazy.go | 83 + .../tools/cache/synctrack/synctrack.go | 120 + .../tools/cache/thread_safe_store.go | 363 + .../client-go/tools/cache/undelta_store.go | 89 + vendor/k8s.io/client-go/tools/pager/pager.go | 289 + .../client-go/tools/watch/informerwatcher.go | 150 + .../client-go/tools/watch/retrywatcher.go | 295 + vendor/k8s.io/client-go/tools/watch/until.go | 168 + vendor/k8s.io/client-go/util/jsonpath/doc.go | 20 + .../client-go/util/jsonpath/jsonpath.go | 582 + vendor/k8s.io/client-go/util/jsonpath/node.go | 256 + .../k8s.io/client-go/util/jsonpath/parser.go | 527 + vendor/k8s.io/component-base/LICENSE | 202 + vendor/k8s.io/component-base/version/OWNERS | 16 + vendor/k8s.io/component-base/version/base.go | 63 + .../k8s.io/component-base/version/dynamic.go | 77 + .../k8s.io/component-base/version/version.go | 42 + .../k8s.io/helm/pkg/chartutil/capabilities.go | 73 - .../capabilities_versions_generated.go | 576 - vendor/k8s.io/helm/pkg/chartutil/create.go | 509 - vendor/k8s.io/helm/pkg/chartutil/files.go | 236 - vendor/k8s.io/helm/pkg/chartutil/load.go | 378 - .../k8s.io/helm/pkg/chartutil/requirements.go | 480 - vendor/k8s.io/helm/pkg/chartutil/save.go | 227 - vendor/k8s.io/helm/pkg/chartutil/values.go | 421 - vendor/k8s.io/helm/pkg/engine/engine.go | 388 - vendor/k8s.io/helm/pkg/helm/client.go | 565 - vendor/k8s.io/helm/pkg/helm/fake.go | 437 - vendor/k8s.io/helm/pkg/helm/interface.go | 44 - vendor/k8s.io/helm/pkg/helm/option.go | 535 - vendor/k8s.io/helm/pkg/ignore/doc.go | 67 - vendor/k8s.io/helm/pkg/manifest/splitter.go | 46 - .../helm/pkg/proto/hapi/chart/chart.pb.go | 124 - .../helm/pkg/proto/hapi/chart/config.pb.go | 129 - .../helm/pkg/proto/hapi/chart/metadata.pb.go | 329 - .../helm/pkg/proto/hapi/chart/template.pb.go | 88 - .../helm/pkg/proto/hapi/release/hook.pb.go | 252 - .../helm/pkg/proto/hapi/release/info.pb.go | 118 - .../helm/pkg/proto/hapi/release/release.pb.go | 151 - .../helm/pkg/proto/hapi/release/status.pb.go | 171 - .../pkg/proto/hapi/release/test_run.pb.go | 150 - .../pkg/proto/hapi/release/test_suite.pb.go | 103 - .../helm/pkg/proto/hapi/services/tiller.pb.go | 2000 - .../helm/pkg/proto/hapi/version/version.pb.go | 94 - vendor/k8s.io/helm/pkg/releaseutil/sorter.go | 100 - vendor/k8s.io/helm/pkg/renderutil/deps.go | 50 - vendor/k8s.io/helm/pkg/renderutil/render.go | 93 - .../k8s.io/helm/pkg/storage/errors/errors.go | 27 - vendor/k8s.io/helm/pkg/version/compatible.go | 65 - vendor/k8s.io/helm/pkg/version/version.go | 54 - .../pkg/util/proto/validation/errors.go | 79 + .../pkg/util/proto/validation/types.go | 299 + .../pkg/util/proto/validation/validation.go | 30 + vendor/k8s.io/kubectl/LICENSE | 201 + .../k8s.io/kubectl/pkg/cmd/util/env_file.go | 103 + vendor/k8s.io/kubectl/pkg/cmd/util/factory.go | 72 + .../pkg/cmd/util/factory_client_access.go | 218 + vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go | 915 + .../pkg/cmd/util/kubectl_match_version.go | 129 + .../kubectl/pkg/cmd/util/override_options.go | 90 + .../k8s.io/kubectl/pkg/cmd/util/printing.go | 29 + vendor/k8s.io/kubectl/pkg/scheme/install.go | 82 + vendor/k8s.io/kubectl/pkg/scheme/scheme.go | 39 + vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go | 215 + .../kubectl/pkg/util/i18n/translations/OWNERS | 7 + .../pkg/util/i18n/translations/README.md | 82 + .../pkg/util/i18n/translations/extract.py | 105 + .../pkg/util/i18n/translations/kubectl/OWNERS | 6 + .../kubectl/de_DE/LC_MESSAGES/k8s.mo | Bin 0 -> 17420 bytes .../kubectl/de_DE/LC_MESSAGES/k8s.po | 2920 + .../kubectl/default/LC_MESSAGES/k8s.mo | Bin 0 -> 153024 bytes .../kubectl/default/LC_MESSAGES/k8s.po | 5077 ++ .../kubectl/en_US/LC_MESSAGES/k8s.mo | Bin 0 -> 153024 bytes .../kubectl/en_US/LC_MESSAGES/k8s.po | 5077 ++ .../kubectl/fr_FR/LC_MESSAGES/k8s.mo | Bin 0 -> 1233 bytes .../kubectl/fr_FR/LC_MESSAGES/k8s.po | 103 + .../kubectl/it_IT/LC_MESSAGES/k8s.mo | Bin 0 -> 20017 bytes .../kubectl/it_IT/LC_MESSAGES/k8s.po | 3249 ++ .../kubectl/ja_JP/LC_MESSAGES/k8s.mo | Bin 0 -> 19210 bytes .../kubectl/ja_JP/LC_MESSAGES/k8s.po | 3365 ++ .../kubectl/ko_KR/LC_MESSAGES/k8s.mo | Bin 0 -> 1274 bytes .../kubectl/ko_KR/LC_MESSAGES/k8s.po | 96 + .../kubectl/pt_BR/LC_MESSAGES/k8s.mo | Bin 0 -> 19980 bytes .../kubectl/pt_BR/LC_MESSAGES/k8s.po | 3250 ++ .../i18n/translations/kubectl/template.pot | 3291 ++ .../kubectl/zh_CN/LC_MESSAGES/k8s.mo | Bin 0 -> 18566 bytes .../kubectl/zh_CN/LC_MESSAGES/k8s.po | 3236 ++ .../kubectl/zh_TW/LC_MESSAGES/k8s.mo | Bin 0 -> 1187 bytes .../kubectl/zh_TW/LC_MESSAGES/k8s.po | 81 + .../test/default/LC_MESSAGES/k8s.mo | Bin 0 -> 563 bytes .../test/default/LC_MESSAGES/k8s.po | 28 + .../test/en_US/LC_MESSAGES/k8s.mo | Bin 0 -> 563 bytes .../test/en_US/LC_MESSAGES/k8s.po | 28 + .../kubectl/pkg/util/interrupt/interrupt.go | 104 + vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS | 6 + vendor/k8s.io/kubectl/pkg/util/openapi/doc.go | 21 + .../kubectl/pkg/util/openapi/openapi.go | 177 + .../pkg/util/openapi/openapi_getter.go | 82 + .../pkg/util/templates/command_groups.go | 59 + .../pkg/util/templates/help_flags_printer.go | 76 + .../kubectl/pkg/util/templates/markdown.go | 116 + .../kubectl/pkg/util/templates/normalizers.go | 97 + .../kubectl/pkg/util/templates/templater.go | 319 + .../kubectl/pkg/util/templates/templates.go | 104 + vendor/k8s.io/kubectl/pkg/util/term/resize.go | 132 + .../kubectl/pkg/util/term/resizeevents.go | 62 + .../pkg/util/term/resizeevents_windows.go | 62 + vendor/k8s.io/kubectl/pkg/util/term/term.go | 115 + .../kubectl/pkg/util/term/term_writer.go | 146 + .../k8s.io/kubectl/pkg/validation/schema.go | 153 + .../kubectl/pkg/validation/validation.go | 144 + vendor/k8s.io/utils/buffer/ring_growing.go | 72 + vendor/k8s.io/utils/exec/README.md | 5 + vendor/k8s.io/utils/exec/doc.go | 18 + vendor/k8s.io/utils/exec/exec.go | 256 + vendor/k8s.io/utils/exec/fixup_go118.go | 32 + vendor/k8s.io/utils/exec/fixup_go119.go | 40 + vendor/k8s.io/utils/pointer/OWNERS | 10 + vendor/k8s.io/utils/pointer/README.md | 3 + vendor/k8s.io/utils/pointer/pointer.go | 249 + vendor/k8s.io/utils/trace/README.md | 67 + vendor/k8s.io/utils/trace/trace.go | 319 + vendor/modules.txt | 501 +- vendor/oras.land/oras-go/LICENSE | 201 + .../oras.land/oras-go/pkg/artifact/consts.go | 22 + vendor/oras.land/oras-go/pkg/auth/client.go | 45 + .../oras.land/oras-go/pkg/auth/client_opts.go | 123 + .../oras-go/pkg/auth/docker/client.go | 123 + .../oras-go/pkg/auth/docker/login.go | 103 + .../oras-go/pkg/auth/docker/login_tls.go | 220 + .../oras-go/pkg/auth/docker/logout.go | 42 + .../oras-go/pkg/auth/docker/resolver.go | 86 + .../oras.land/oras-go/pkg/content/consts.go | 57 + .../oras-go/pkg/content/decompress.go | 151 + .../oras.land/oras-go/pkg/content/errors.go | 33 + vendor/oras.land/oras-go/pkg/content/file.go | 534 + .../oras.land/oras-go/pkg/content/gunzip.go | 72 + .../oras-go/pkg/content/interface.go | 26 + .../oras.land/oras-go/pkg/content/iowriter.go | 112 + .../oras.land/oras-go/pkg/content/manifest.go | 95 + .../oras.land/oras-go/pkg/content/memory.go | 284 + .../oras-go/pkg/content/multireader.go | 56 + .../oras-go/pkg/content/multiwriter.go | 42 + vendor/oras.land/oras-go/pkg/content/oci.go | 335 + vendor/oras.land/oras-go/pkg/content/opts.go | 112 + .../oras-go/pkg/content/passthrough.go | 286 + .../oras.land/oras-go/pkg/content/readerat.go | 68 + .../oras.land/oras-go/pkg/content/registry.go | 84 + vendor/oras.land/oras-go/pkg/content/untar.go | 157 + vendor/oras.land/oras-go/pkg/content/utils.go | 223 + .../oras.land/oras-go/pkg/context/context.go | 24 + .../oras.land/oras-go/pkg/context/logger.go | 50 + vendor/oras.land/oras-go/pkg/oras/copy.go | 213 + vendor/oras.land/oras-go/pkg/oras/errors.go | 42 + vendor/oras.land/oras-go/pkg/oras/opts.go | 254 + vendor/oras.land/oras-go/pkg/oras/provider.go | 79 + vendor/oras.land/oras-go/pkg/oras/store.go | 213 + .../oras-go/pkg/registry/reference.go | 177 + .../oras-go/pkg/registry/remote/auth/cache.go | 158 + .../pkg/registry/remote/auth/challenge.go | 166 + .../pkg/registry/remote/auth/client.go | 367 + .../pkg/registry/remote/auth/credential.go | 39 + .../oras-go/pkg/registry/remote/auth/scope.go | 231 + .../remote/internal/errutil/errors.go | 83 + .../registry/remote/internal/syncutil/once.go | 69 + .../oras-go/pkg/registry/remote/repository.go | 171 + .../oras-go/pkg/registry/remote/url.go | 42 + .../oras-go/pkg/registry/remote/utils.go | 72 + .../oras-go/pkg/registry/repository.go | 57 + vendor/oras.land/oras-go/pkg/target/target.go | 26 + vendor/sigs.k8s.io/kustomize/api/LICENSE | 201 + .../api/filters/annotations/annotations.go | 52 + .../kustomize/api/filters/annotations/doc.go | 6 + .../kustomize/api/filters/fieldspec/doc.go | 6 + .../api/filters/fieldspec/fieldspec.go | 182 + .../api/filters/filtersutil/setters.go | 105 + .../kustomize/api/filters/fsslice/doc.go | 6 + .../kustomize/api/filters/fsslice/fsslice.go | 47 + .../api/filters/iampolicygenerator/doc.go | 6 + .../iampolicygenerator/iampolicygenerator.go | 55 + .../kustomize/api/filters/imagetag/doc.go | 12 + .../api/filters/imagetag/imagetag.go | 72 + .../kustomize/api/filters/imagetag/legacy.go | 104 + .../kustomize/api/filters/imagetag/updater.go | 71 + .../kustomize/api/filters/labels/doc.go | 6 + .../kustomize/api/filters/labels/labels.go | 53 + .../kustomize/api/filters/nameref/doc.go | 6 + .../kustomize/api/filters/nameref/nameref.go | 413 + .../api/filters/nameref/seqfilter.go | 60 + .../kustomize/api/filters/namespace/doc.go | 9 + .../api/filters/namespace/namespace.go | 217 + .../api/filters/patchjson6902/doc.go | 6 + .../filters/patchjson6902/patchjson6902.go | 65 + .../api/filters/patchstrategicmerge/doc.go | 6 + .../patchstrategicmerge.go | 36 + .../kustomize/api/filters/prefix/doc.go | 6 + .../kustomize/api/filters/prefix/prefix.go | 50 + .../kustomize/api/filters/refvar/doc.go | 6 + .../kustomize/api/filters/refvar/expand.go | 147 + .../kustomize/api/filters/refvar/refvar.go | 113 + .../kustomize/api/filters/replacement/doc.go | 7 + .../api/filters/replacement/replacement.go | 244 + .../kustomize/api/filters/replicacount/doc.go | 6 + .../api/filters/replicacount/replicacount.go | 48 + .../kustomize/api/filters/suffix/doc.go | 6 + .../kustomize/api/filters/suffix/suffix.go | 50 + .../api/filters/valueadd/valueadd.go | 134 + .../kustomize/api/hasher/hasher.go | 155 + vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go | 56 + .../sigs.k8s.io/kustomize/api/image/image.go | 66 + .../accumulator/loadconfigfromcrds.go | 198 + .../accumulator/namereferencetransformer.go | 164 + .../internal/accumulator/refvartransformer.go | 57 + .../internal/accumulator/resaccumulator.go | 190 + .../builtins/AnnotationsTransformer.go | 38 + .../internal/builtins/ConfigMapGenerator.go | 39 + .../api/internal/builtins/HashTransformer.go | 40 + .../builtins/HelmChartInflationGenerator.go | 329 + .../internal/builtins/IAMPolicyGenerator.go | 33 + .../internal/builtins/ImageTagTransformer.go | 41 + .../api/internal/builtins/LabelTransformer.go | 38 + .../internal/builtins/NamespaceTransformer.go | 74 + .../builtins/PatchJson6902Transformer.go | 105 + .../PatchStrategicMergeTransformer.go | 89 + .../api/internal/builtins/PatchTransformer.go | 153 + .../internal/builtins/PrefixTransformer.go | 96 + .../builtins/ReplacementTransformer.go | 78 + .../builtins/ReplicaCountTransformer.go | 73 + .../api/internal/builtins/SecretGenerator.go | 39 + .../internal/builtins/SortOrderTransformer.go | 244 + .../internal/builtins/SuffixTransformer.go | 96 + .../internal/builtins/ValueAddTransformer.go | 141 + .../kustomize/api/internal/builtins/doc.go | 8 + .../api/internal/generators/configmap.go | 52 + .../api/internal/generators/secret.go | 59 + .../api/internal/generators/utils.go | 124 + .../kustomize/api/internal/git/cloner.go | 50 + .../kustomize/api/internal/git/gitrunner.go | 55 + .../kustomize/api/internal/git/repospec.go | 387 + .../api/internal/kusterr/yamlformaterror.go | 55 + .../api/internal/plugins/builtinconfig/doc.go | 10 + .../builtinconfig/loaddefaultconfig.go | 42 + .../builtinconfig/namebackreferences.go | 99 + .../builtinconfig/transformerconfig.go | 156 + .../builtinplugintype_string.go | 41 + .../plugins/builtinhelpers/builtins.go | 115 + .../internal/plugins/execplugin/execplugin.go | 191 + .../api/internal/plugins/fnplugin/fnplugin.go | 202 + .../api/internal/plugins/loader/loader.go | 332 + .../api/internal/plugins/utils/utils.go | 240 + .../target/errmissingkustomization.go | 44 + .../api/internal/target/kusttarget.go | 574 + .../target/kusttarget_configplugin.go | 446 + .../api/internal/target/multitransformer.go | 41 + .../api/internal/utils/annotations.go | 29 + .../api/internal/utils/errtimeout.go | 29 + .../api/internal/utils/makeResIds.go | 68 + .../api/internal/utils/stringslice.go | 44 + .../kustomize/api/internal/utils/timedcall.go | 23 + .../api/internal/validate/fieldvalidator.go | 68 + .../builtinpluginconsts/commonannotations.go | 47 + .../builtinpluginconsts/commonlabels.go | 113 + .../builtinpluginconsts/defaultconfig.go | 42 + .../api/konfig/builtinpluginconsts/doc.go | 8 + .../api/konfig/builtinpluginconsts/images.go | 18 + .../builtinpluginconsts/metadatalabels.go | 51 + .../konfig/builtinpluginconsts/nameprefix.go | 11 + .../builtinpluginconsts/namereference.go | 427 + .../konfig/builtinpluginconsts/namespace.go | 20 + .../konfig/builtinpluginconsts/namesuffix.go | 11 + .../konfig/builtinpluginconsts/replicas.go | 23 + .../builtinpluginconsts/templatelabels.go | 8 + .../builtinpluginconsts/varreference.go | 223 + .../sigs.k8s.io/kustomize/api/konfig/doc.go | 7 + .../kustomize/api/konfig/general.go | 49 + .../kustomize/api/konfig/plugins.go | 138 + .../sigs.k8s.io/kustomize/api/krusty/doc.go | 11 + .../kustomize/api/krusty/kustomizer.go | 163 + .../kustomize/api/krusty/options.go | 70 + vendor/sigs.k8s.io/kustomize/api/kv/kv.go | 197 + .../kustomize/api/loader/errors.go | 11 + .../kustomize/api/loader/fileloader.go | 349 + .../kustomize/api/loader/loader.go | 35 + .../kustomize/api/loader/loadrestrictions.go | 35 + .../kustomize/api/provenance/provenance.go | 84 + .../kustomize/api/provider/depprovider.go | 42 + .../kustomize/api/resmap/factory.go | 145 + .../kustomize/api/resmap/resmap.go | 333 + .../kustomize/api/resmap/reswrangler.go | 764 + .../sigs.k8s.io/kustomize/api/resource/doc.go | 5 + .../kustomize/api/resource/factory.go | 293 + .../kustomize/api/resource/idset.go | 30 + .../kustomize/api/resource/origin.go | 106 + .../kustomize/api/resource/resource.go | 534 + .../builtinpluginloadingoptions_string.go | 25 + .../kustomize/api/types/configmapargs.go | 10 + vendor/sigs.k8s.io/kustomize/api/types/doc.go | 9 + .../api/types/erronlybuiltinpluginsallowed.go | 29 + .../kustomize/api/types/errunabletofind.go | 36 + .../kustomize/api/types/fieldspec.go | 91 + .../kustomize/api/types/generationbehavior.go | 46 + .../kustomize/api/types/generatorargs.go | 27 + .../kustomize/api/types/generatoroptions.go | 76 + .../kustomize/api/types/helmchartargs.go | 185 + .../kustomize/api/types/iampolicygenerator.go | 36 + .../sigs.k8s.io/kustomize/api/types/image.go | 25 + .../kustomize/api/types/kustomization.go | 351 + .../kustomize/api/types/kvpairsources.go | 36 + .../sigs.k8s.io/kustomize/api/types/labels.go | 30 + .../kustomize/api/types/loadrestrictions.go | 24 + .../api/types/loadrestrictions_string.go | 25 + .../kustomize/api/types/objectmeta.go | 13 + .../sigs.k8s.io/kustomize/api/types/pair.go | 10 + .../sigs.k8s.io/kustomize/api/types/patch.go | 34 + .../api/types/patchstrategicmerge.go | 9 + .../kustomize/api/types/pluginconfig.go | 47 + .../kustomize/api/types/pluginrestrictions.go | 62 + .../api/types/pluginrestrictions_string.go | 25 + .../kustomize/api/types/replacement.go | 87 + .../kustomize/api/types/replacementfield.go | 9 + .../kustomize/api/types/replica.go | 16 + .../kustomize/api/types/secretargs.go | 19 + .../kustomize/api/types/selector.go | 124 + .../kustomize/api/types/sortoptions.go | 28 + .../kustomize/api/types/typemeta.go | 11 + vendor/sigs.k8s.io/kustomize/api/types/var.go | 211 + vendor/sigs.k8s.io/kustomize/kyaml/LICENSE | 201 + .../kustomize/kyaml/comments/comments.go | 83 + .../kustomize/kyaml/errors/errors.go | 50 + vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go | 10 + .../kustomize/kyaml/fieldmeta/fieldmeta.go | 275 + .../kustomize/kyaml/filesys/confirmeddir.go | 79 + .../kustomize/kyaml/filesys/doc.go | 7 + .../kustomize/kyaml/filesys/file.go | 15 + .../kustomize/kyaml/filesys/fileinfo.go | 34 + .../kustomize/kyaml/filesys/fileondisk.go | 27 + .../kustomize/kyaml/filesys/filesystem.go | 153 + .../kustomize/kyaml/filesys/fsnode.go | 647 + .../kustomize/kyaml/filesys/fsondisk.go | 141 + .../kustomize/kyaml/filesys/fsondisk_unix.go | 15 + .../kyaml/filesys/fsondisk_windows.go | 18 + .../kustomize/kyaml/filesys/util.go | 143 + .../kyaml/fn/runtime/container/container.go | 208 + .../kustomize/kyaml/fn/runtime/exec/doc.go | 5 + .../kustomize/kyaml/fn/runtime/exec/exec.go | 54 + .../kyaml/fn/runtime/runtimeutil/doc.go | 5 + .../fn/runtime/runtimeutil/functiontypes.go | 312 + .../fn/runtime/runtimeutil/runtimeutil.go | 281 + .../kyaml/fn/runtime/runtimeutil/types.go | 8 + .../kyaml/fn/runtime/starlark/context.go | 79 + .../kyaml/fn/runtime/starlark/doc.go | 36 + .../kyaml/fn/runtime/starlark/starlark.go | 180 + .../forked/github.com/go-yaml/yaml/LICENSE | 50 + .../forked/github.com/go-yaml/yaml/NOTICE} | 7 +- .../forked/github.com/go-yaml/yaml/README.md | 150 + .../forked/github.com/go-yaml/yaml/apic.go | 747 + .../forked/github.com/go-yaml/yaml/decode.go | 1000 + .../github.com/go-yaml/yaml/emitterc.go | 2028 + .../forked/github.com/go-yaml/yaml/encode.go | 577 + .../forked/github.com/go-yaml/yaml/parserc.go | 1258 + .../forked/github.com/go-yaml/yaml/readerc.go | 434 + .../forked/github.com/go-yaml/yaml/resolve.go | 326 + .../github.com/go-yaml/yaml/scannerc.go | 3038 ++ .../forked/github.com/go-yaml/yaml/sorter.go | 134 + .../forked/github.com/go-yaml/yaml/writerc.go | 48 + .../forked/github.com/go-yaml/yaml/yaml.go | 708 + .../forked/github.com/go-yaml/yaml/yamlh.go | 809 + .../github.com/go-yaml/yaml/yamlprivateh.go | 198 + .../github.com/qri-io/starlib/util/LICENSE | 21 + .../github.com/qri-io/starlib/util/doc.go | 25 + .../github.com/qri-io/starlib/util/util.go | 275 + .../kustomize/kyaml/kio/byteio_reader.go | 349 + .../kustomize/kyaml/kio/byteio_writer.go | 198 + vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go | 35 + .../kustomize/kyaml/kio/filters/filters.go | 210 + .../kustomize/kyaml/kio/filters/fmtr.go | 314 + .../kustomize/kyaml/kio/filters/grep.go | 117 + .../kustomize/kyaml/kio/filters/local.go | 38 + .../kustomize/kyaml/kio/filters/merge.go | 86 + .../kustomize/kyaml/kio/filters/merge3.go | 317 + .../kustomize/kyaml/kio/filters/modify.go | 4 + .../kyaml/kio/filters/stripcomments.go | 32 + .../kustomize/kyaml/kio/ignorefilesmatcher.go | 105 + vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go | 447 + .../kustomize/kyaml/kio/kioutil/kioutil.go | 420 + .../kustomize/kyaml/kio/pkgio_reader.go | 360 + .../kustomize/kyaml/kio/pkgio_writer.go | 150 + .../sigs.k8s.io/kustomize/kyaml/kio/tree.go | 519 + .../kustomize/kyaml/openapi/Makefile | 62 + .../kustomize/kyaml/openapi/README.md | 84 + .../openapi/kubernetesapi/openapiinfo.go | 18 + .../openapi/kubernetesapi/v1_21_2/swagger.go | 249 + .../openapi/kubernetesapi/v1_21_2/swagger.pb | 44195 ++++++++++++++++ .../kyaml/openapi/kustomizationapi/swagger.go | 248 + .../openapi/kustomizationapi/swagger.json | 130 + .../kustomize/kyaml/openapi/openapi.go | 776 + .../kustomize/kyaml/order/syncorder.go | 121 + .../sigs.k8s.io/kustomize/kyaml/resid/gvk.go | 255 + .../kustomize/kyaml/resid/resid.go | 145 + .../kustomize/kyaml/runfn/runfn.go | 549 + .../kustomize/kyaml/sets/string.go | 64 + .../kustomize/kyaml/sets/stringlist.go | 44 + .../kustomize/kyaml/sliceutil/slice.go | 25 + .../kustomize/kyaml/utils/pathsplitter.go | 71 + .../sigs.k8s.io/kustomize/kyaml/yaml/alias.go | 109 + .../kustomize/kyaml/yaml/compatibility.go | 100 + .../sigs.k8s.io/kustomize/kyaml/yaml/const.go | 30 + .../kustomize/kyaml/yaml/datamap.go | 121 + .../sigs.k8s.io/kustomize/kyaml/yaml/doc.go | 49 + .../kustomize/kyaml/yaml/filters.go | 146 + .../sigs.k8s.io/kustomize/kyaml/yaml/fns.go | 878 + .../k8sgen/pkg/labels/copied.deepcopy.go | 44 + .../yaml/internal/k8sgen/pkg/labels/labels.go | 192 + .../internal/k8sgen/pkg/labels/selector.go | 925 + .../internal/k8sgen/pkg/selection/operator.go | 36 + .../internal/k8sgen/pkg/util/errors/errors.go | 252 + .../internal/k8sgen/pkg/util/sets/empty.go | 24 + .../internal/k8sgen/pkg/util/sets/string.go | 206 + .../pkg/util/validation/field/errors.go | 275 + .../k8sgen/pkg/util/validation/field/path.go | 94 + .../k8sgen/pkg/util/validation/validation.go | 506 + .../sigs.k8s.io/kustomize/kyaml/yaml/kfns.go | 137 + .../kustomize/kyaml/yaml/mapnode.go | 40 + .../sigs.k8s.io/kustomize/kyaml/yaml/match.go | 333 + .../kustomize/kyaml/yaml/merge2/merge2.go | 188 + .../kyaml/yaml/merge2/smpdirective.go | 101 + .../kyaml/yaml/merge2/smpdirective_string.go | 26 + .../kustomize/kyaml/yaml/merge3/merge3.go | 45 + .../kustomize/kyaml/yaml/merge3/visitor.go | 172 + .../sigs.k8s.io/kustomize/kyaml/yaml/order.go | 107 + .../sigs.k8s.io/kustomize/kyaml/yaml/rnode.go | 1368 + .../kustomize/kyaml/yaml/schema/schema.go | 44 + .../sigs.k8s.io/kustomize/kyaml/yaml/types.go | 299 + .../sigs.k8s.io/kustomize/kyaml/yaml/util.go | 70 + .../kyaml/yaml/walk/associative_sequence.go | 385 + .../kustomize/kyaml/yaml/walk/map.go | 173 + .../yaml/walk/nonassociative_sequence.go | 13 + .../kustomize/kyaml/yaml/walk/scalar.go | 11 + .../kustomize/kyaml/yaml/walk/visitor.go | 28 + .../kustomize/kyaml/yaml/walk/walk.go | 186 + 1743 files changed, 382118 insertions(+), 13737 deletions(-) create mode 100755 hack/ci-e2e-kind.sh create mode 100644 hack/config/aws_config.sh create mode 100644 hack/config/azure_config.sh create mode 100644 hack/config/gcp_config.sh create mode 100755 hack/deploy-azurite.sh create mode 100755 hack/deploy-fakegcs.sh create mode 100755 hack/deploy-localstack.sh create mode 100644 hack/e2e-test/infrastructure/azurite/azurite.yaml create mode 100644 hack/e2e-test/infrastructure/fake-gcs-server/fake-gcs-server.yaml create mode 100644 hack/e2e-test/infrastructure/kind/cluster.yaml create mode 100644 hack/e2e-test/infrastructure/localstack/localstack.yaml create mode 100755 hack/e2e-test/run-e2e-test.sh create mode 100755 hack/kind-up.sh create mode 100644 hack/tools/bin/.gitkeep rename test/e2e/{integrationcluster => }/backup_test.go (66%) delete mode 100644 test/e2e/integrationcluster/utils.go rename test/e2e/{integrationcluster => }/integrationcluster_suite_test.go (51%) create mode 100644 test/e2e/utils.go rename test/{e2e => }/integration/cloud_backup_test.go (99%) rename test/{e2e => }/integration/integration_suite_test.go (100%) rename test/{e2e => }/integration/utils.go (100%) create mode 100644 vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE create mode 100644 vendor/github.com/AdaLogics/go-fuzz-headers/README.md create mode 100644 vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go create mode 100644 vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go create mode 100644 vendor/github.com/AdaLogics/go-fuzz-headers/sql.go create mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE create mode 100644 vendor/github.com/Azure/go-ansiterm/README.md create mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go create mode 100644 vendor/github.com/Azure/go-ansiterm/context.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go create mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go create mode 100644 vendor/github.com/Azure/go-ansiterm/states.go create mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go delete mode 100644 vendor/github.com/BurntSushi/toml/.travis.yml delete mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE delete mode 100644 vendor/github.com/BurntSushi/toml/Makefile create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go delete mode 100644 vendor/github.com/BurntSushi/toml/encoding_types.go delete mode 100644 vendor/github.com/BurntSushi/toml/encoding_types_1.1.go create mode 100644 vendor/github.com/BurntSushi/toml/error.go create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go rename vendor/github.com/BurntSushi/toml/{decode_meta.go => meta.go} (59%) delete mode 100644 vendor/github.com/BurntSushi/toml/session.vim rename vendor/github.com/BurntSushi/toml/{type_check.go => type_toml.go} (75%) create mode 100644 vendor/github.com/MakeNowJust/heredoc/LICENSE create mode 100644 vendor/github.com/MakeNowJust/heredoc/README.md create mode 100644 vendor/github.com/MakeNowJust/heredoc/heredoc.go delete mode 100644 vendor/github.com/Masterminds/semver/.travis.yml delete mode 100644 vendor/github.com/Masterminds/semver/CHANGELOG.md delete mode 100644 vendor/github.com/Masterminds/semver/Makefile delete mode 100644 vendor/github.com/Masterminds/semver/README.md delete mode 100644 vendor/github.com/Masterminds/semver/appveyor.yml delete mode 100644 vendor/github.com/Masterminds/semver/constraints.go delete mode 100644 vendor/github.com/Masterminds/semver/doc.go create mode 100644 vendor/github.com/Masterminds/semver/v3/.gitignore create mode 100644 vendor/github.com/Masterminds/semver/v3/.golangci.yml create mode 100644 vendor/github.com/Masterminds/semver/v3/CHANGELOG.md rename vendor/github.com/Masterminds/semver/{ => v3}/LICENSE.txt (100%) create mode 100644 vendor/github.com/Masterminds/semver/v3/Makefile create mode 100644 vendor/github.com/Masterminds/semver/v3/README.md create mode 100644 vendor/github.com/Masterminds/semver/v3/SECURITY.md rename vendor/github.com/Masterminds/semver/{ => v3}/collection.go (100%) create mode 100644 vendor/github.com/Masterminds/semver/v3/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/v3/doc.go rename vendor/github.com/Masterminds/semver/{ => v3}/version.go (55%) delete mode 100644 vendor/github.com/Masterminds/semver/version_fuzz.go delete mode 100644 vendor/github.com/Masterminds/sprig/.travis.yml delete mode 100644 vendor/github.com/Masterminds/sprig/Makefile delete mode 100644 vendor/github.com/Masterminds/sprig/appveyor.yml delete mode 100644 vendor/github.com/Masterminds/sprig/glide.yaml delete mode 100644 vendor/github.com/Masterminds/sprig/numeric.go delete mode 100644 vendor/github.com/Masterminds/sprig/regex.go rename vendor/github.com/Masterminds/sprig/{ => v3}/.gitignore (100%) rename vendor/github.com/Masterminds/sprig/{ => v3}/CHANGELOG.md (74%) rename vendor/github.com/Masterminds/sprig/{ => v3}/LICENSE.txt (96%) create mode 100644 vendor/github.com/Masterminds/sprig/v3/Makefile rename vendor/github.com/Masterminds/sprig/{ => v3}/README.md (65%) rename vendor/github.com/Masterminds/sprig/{ => v3}/crypto.go (65%) rename vendor/github.com/Masterminds/sprig/{ => v3}/date.go (52%) rename vendor/github.com/Masterminds/sprig/{ => v3}/defaults.go (53%) rename vendor/github.com/Masterminds/sprig/{ => v3}/dict.go (63%) rename vendor/github.com/Masterminds/sprig/{ => v3}/doc.go (92%) rename vendor/github.com/Masterminds/sprig/{ => v3}/functions.go (58%) rename vendor/github.com/Masterminds/sprig/{ => v3}/list.go (58%) rename vendor/github.com/Masterminds/sprig/{ => v3}/network.go (75%) create mode 100644 vendor/github.com/Masterminds/sprig/v3/numeric.go rename vendor/github.com/Masterminds/sprig/{ => v3}/reflect.go (100%) create mode 100644 vendor/github.com/Masterminds/sprig/v3/regex.go rename vendor/github.com/Masterminds/sprig/{ => v3}/semver.go (90%) rename vendor/github.com/Masterminds/sprig/{ => v3}/strings.go (97%) rename vendor/github.com/Masterminds/sprig/{ => v3}/url.go (64%) create mode 100644 vendor/github.com/Masterminds/squirrel/.gitignore create mode 100644 vendor/github.com/Masterminds/squirrel/.travis.yml create mode 100644 vendor/github.com/Masterminds/squirrel/LICENSE create mode 100644 vendor/github.com/Masterminds/squirrel/README.md create mode 100644 vendor/github.com/Masterminds/squirrel/case.go create mode 100644 vendor/github.com/Masterminds/squirrel/delete.go create mode 100644 vendor/github.com/Masterminds/squirrel/delete_ctx.go create mode 100644 vendor/github.com/Masterminds/squirrel/expr.go create mode 100644 vendor/github.com/Masterminds/squirrel/insert.go create mode 100644 vendor/github.com/Masterminds/squirrel/insert_ctx.go create mode 100644 vendor/github.com/Masterminds/squirrel/part.go create mode 100644 vendor/github.com/Masterminds/squirrel/placeholder.go create mode 100644 vendor/github.com/Masterminds/squirrel/row.go create mode 100644 vendor/github.com/Masterminds/squirrel/select.go create mode 100644 vendor/github.com/Masterminds/squirrel/select_ctx.go create mode 100644 vendor/github.com/Masterminds/squirrel/squirrel.go create mode 100644 vendor/github.com/Masterminds/squirrel/squirrel_ctx.go create mode 100644 vendor/github.com/Masterminds/squirrel/statement.go create mode 100644 vendor/github.com/Masterminds/squirrel/stmtcacher.go create mode 100644 vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go create mode 100644 vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go create mode 100644 vendor/github.com/Masterminds/squirrel/update.go create mode 100644 vendor/github.com/Masterminds/squirrel/update_ctx.go create mode 100644 vendor/github.com/Masterminds/squirrel/where.go create mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go create mode 100644 vendor/github.com/asaskevich/govalidator/.gitignore create mode 100644 vendor/github.com/asaskevich/govalidator/.travis.yml create mode 100644 vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md create mode 100644 vendor/github.com/asaskevich/govalidator/LICENSE create mode 100644 vendor/github.com/asaskevich/govalidator/README.md create mode 100644 vendor/github.com/asaskevich/govalidator/arrays.go create mode 100644 vendor/github.com/asaskevich/govalidator/converter.go create mode 100644 vendor/github.com/asaskevich/govalidator/doc.go create mode 100644 vendor/github.com/asaskevich/govalidator/error.go create mode 100644 vendor/github.com/asaskevich/govalidator/numerics.go create mode 100644 vendor/github.com/asaskevich/govalidator/patterns.go create mode 100644 vendor/github.com/asaskevich/govalidator/types.go create mode 100644 vendor/github.com/asaskevich/govalidator/utils.go create mode 100644 vendor/github.com/asaskevich/govalidator/validator.go create mode 100644 vendor/github.com/asaskevich/govalidator/wercker.yml create mode 100644 vendor/github.com/chai2010/gettext-go/.travis.yml create mode 100644 vendor/github.com/chai2010/gettext-go/LICENSE create mode 100644 vendor/github.com/chai2010/gettext-go/README.md create mode 100644 vendor/github.com/chai2010/gettext-go/doc.go create mode 100644 vendor/github.com/chai2010/gettext-go/fs.go create mode 100644 vendor/github.com/chai2010/gettext-go/fs_json.go create mode 100644 vendor/github.com/chai2010/gettext-go/fs_os.go create mode 100644 vendor/github.com/chai2010/gettext-go/fs_zip.go create mode 100644 vendor/github.com/chai2010/gettext-go/gettext.go create mode 100644 vendor/github.com/chai2010/gettext-go/locale.go create mode 100644 vendor/github.com/chai2010/gettext-go/mo/doc.go create mode 100644 vendor/github.com/chai2010/gettext-go/mo/encoder.go create mode 100644 vendor/github.com/chai2010/gettext-go/mo/file.go create mode 100644 vendor/github.com/chai2010/gettext-go/mo/header.go create mode 100644 vendor/github.com/chai2010/gettext-go/mo/message.go create mode 100644 vendor/github.com/chai2010/gettext-go/mo/util.go create mode 100644 vendor/github.com/chai2010/gettext-go/plural/doc.go create mode 100644 vendor/github.com/chai2010/gettext-go/plural/formula.go create mode 100644 vendor/github.com/chai2010/gettext-go/plural/table.go create mode 100644 vendor/github.com/chai2010/gettext-go/po/comment.go create mode 100644 vendor/github.com/chai2010/gettext-go/po/doc.go create mode 100644 vendor/github.com/chai2010/gettext-go/po/file.go create mode 100644 vendor/github.com/chai2010/gettext-go/po/header.go create mode 100644 vendor/github.com/chai2010/gettext-go/po/line_reader.go create mode 100644 vendor/github.com/chai2010/gettext-go/po/message.go create mode 100644 vendor/github.com/chai2010/gettext-go/po/re.go create mode 100644 vendor/github.com/chai2010/gettext-go/po/util.go create mode 100644 vendor/github.com/chai2010/gettext-go/tr.go create mode 100644 vendor/github.com/chai2010/gettext-go/util.go create mode 100644 vendor/github.com/containerd/containerd/LICENSE create mode 100644 vendor/github.com/containerd/containerd/NOTICE create mode 100644 vendor/github.com/containerd/containerd/archive/compression/compression.go create mode 100644 vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go create mode 100644 vendor/github.com/containerd/containerd/content/adaptor.go create mode 100644 vendor/github.com/containerd/containerd/content/content.go create mode 100644 vendor/github.com/containerd/containerd/content/helpers.go create mode 100644 vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go create mode 100644 vendor/github.com/containerd/containerd/content/local/locks.go create mode 100644 vendor/github.com/containerd/containerd/content/local/readerat.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store_bsd.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store_openbsd.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store_unix.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store_windows.go create mode 100644 vendor/github.com/containerd/containerd/content/local/test_helper.go create mode 100644 vendor/github.com/containerd/containerd/content/local/writer.go create mode 100644 vendor/github.com/containerd/containerd/errdefs/errors.go create mode 100644 vendor/github.com/containerd/containerd/errdefs/grpc.go create mode 100644 vendor/github.com/containerd/containerd/filters/adaptor.go create mode 100644 vendor/github.com/containerd/containerd/filters/filter.go create mode 100644 vendor/github.com/containerd/containerd/filters/parser.go create mode 100644 vendor/github.com/containerd/containerd/filters/quote.go create mode 100644 vendor/github.com/containerd/containerd/filters/scanner.go create mode 100644 vendor/github.com/containerd/containerd/images/annotations.go create mode 100644 vendor/github.com/containerd/containerd/images/diffid.go create mode 100644 vendor/github.com/containerd/containerd/images/handlers.go create mode 100644 vendor/github.com/containerd/containerd/images/image.go create mode 100644 vendor/github.com/containerd/containerd/images/importexport.go create mode 100644 vendor/github.com/containerd/containerd/images/labels.go create mode 100644 vendor/github.com/containerd/containerd/images/mediatypes.go create mode 100644 vendor/github.com/containerd/containerd/labels/labels.go create mode 100644 vendor/github.com/containerd/containerd/labels/validate.go create mode 100644 vendor/github.com/containerd/containerd/log/context_deprecated.go create mode 100644 vendor/github.com/containerd/containerd/pkg/randutil/randutil.go create mode 100644 vendor/github.com/containerd/containerd/platforms/compare.go create mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo.go create mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go create mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go create mode 100644 vendor/github.com/containerd/containerd/platforms/database.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_darwin.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_unix.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_windows.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms_other.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms_windows.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/helpers.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/normalize.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/reference.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/regexp.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/sort.go create mode 100644 vendor/github.com/containerd/containerd/reference/reference.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/authorizer.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/converter.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/errcode.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/errdesc.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/fetcher.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/handler.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/pusher.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/registry.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/resolver.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/scope.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/status.go create mode 100644 vendor/github.com/containerd/containerd/remotes/errors/errors.go create mode 100644 vendor/github.com/containerd/containerd/remotes/handlers.go create mode 100644 vendor/github.com/containerd/containerd/remotes/resolver.go create mode 100644 vendor/github.com/containerd/containerd/tracing/helpers.go create mode 100644 vendor/github.com/containerd/containerd/tracing/log.go create mode 100644 vendor/github.com/containerd/containerd/tracing/tracing.go create mode 100644 vendor/github.com/containerd/containerd/version/version.go create mode 100644 vendor/github.com/containerd/log/.golangci.yml create mode 100644 vendor/github.com/containerd/log/LICENSE create mode 100644 vendor/github.com/containerd/log/README.md create mode 100644 vendor/github.com/containerd/log/context.go create mode 100644 vendor/github.com/docker/cli/AUTHORS create mode 100644 vendor/github.com/docker/cli/LICENSE create mode 100644 vendor/github.com/docker/cli/NOTICE create mode 100644 vendor/github.com/docker/cli/cli/config/config.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file_unix.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file_windows.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/credentials.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/file_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/native_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/types/authconfig.go create mode 100644 vendor/github.com/docker/distribution/.dockerignore create mode 100644 vendor/github.com/docker/distribution/.gitignore create mode 100644 vendor/github.com/docker/distribution/.golangci.yml create mode 100644 vendor/github.com/docker/distribution/.mailmap create mode 100644 vendor/github.com/docker/distribution/BUILDING.md create mode 100644 vendor/github.com/docker/distribution/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/distribution/Dockerfile create mode 100644 vendor/github.com/docker/distribution/LICENSE create mode 100644 vendor/github.com/docker/distribution/MAINTAINERS create mode 100644 vendor/github.com/docker/distribution/Makefile create mode 100644 vendor/github.com/docker/distribution/README.md create mode 100644 vendor/github.com/docker/distribution/ROADMAP.md create mode 100644 vendor/github.com/docker/distribution/blobs.go create mode 100644 vendor/github.com/docker/distribution/digestset/set.go create mode 100644 vendor/github.com/docker/distribution/doc.go create mode 100644 vendor/github.com/docker/distribution/docker-bake.hcl create mode 100644 vendor/github.com/docker/distribution/errors.go create mode 100644 vendor/github.com/docker/distribution/manifests.go create mode 100644 vendor/github.com/docker/distribution/metrics/prometheus.go create mode 100644 vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/distribution/registry.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/descriptors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/routes.go create mode 100644 vendor/github.com/docker/distribution/registry/api/v2/urls.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/api_version.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/session.go create mode 100644 vendor/github.com/docker/distribution/registry/client/blob_writer.go create mode 100644 vendor/github.com/docker/distribution/registry/client/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/client/repository.go create mode 100644 vendor/github.com/docker/distribution/registry/client/transport/http_reader.go create mode 100644 vendor/github.com/docker/distribution/registry/client/transport/transport.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cache.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go create mode 100644 vendor/github.com/docker/distribution/tags.go create mode 100644 vendor/github.com/docker/distribution/vendor.conf create mode 100644 vendor/github.com/docker/docker-credential-helpers/LICENSE create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/client.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/command.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/error.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/helper.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/version.go create mode 100644 vendor/github.com/docker/docker/AUTHORS create mode 100644 vendor/github.com/docker/docker/LICENSE create mode 100644 vendor/github.com/docker/docker/NOTICE create mode 100644 vendor/github.com/docker/docker/api/types/auth.go create mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go create mode 100644 vendor/github.com/docker/docker/api/types/client.go create mode 100644 vendor/github.com/docker/docker/api/types/configs.go create mode 100644 vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/container/change_type.go create mode 100644 vendor/github.com/docker/docker/api/types/container/change_types.go create mode 100644 vendor/github.com/docker/docker/api/types/container/config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/create_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/filesystem_change.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_exit_error.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response_ext.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/errors.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go create mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go create mode 100644 vendor/github.com/docker/docker/api/types/id_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go create mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go create mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go create mode 100644 vendor/github.com/docker/docker/api/types/network/network.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go create mode 100644 vendor/github.com/docker/docker/api/types/port.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authconfig.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go create mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go create mode 100644 vendor/github.com/docker/docker/api/types/stats.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto create mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go create mode 100644 vendor/github.com/docker/docker/api/types/types.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/cluster_volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/create_options.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/list_response.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/options.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_update.go create mode 100644 vendor/github.com/docker/docker/errdefs/defs.go create mode 100644 vendor/github.com/docker/docker/errdefs/doc.go create mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/is.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_others.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go create mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath.go create mode 100644 vendor/github.com/docker/docker/registry/auth.go create mode 100644 vendor/github.com/docker/docker/registry/config.go create mode 100644 vendor/github.com/docker/docker/registry/config_unix.go create mode 100644 vendor/github.com/docker/docker/registry/config_windows.go create mode 100644 vendor/github.com/docker/docker/registry/endpoint_v1.go create mode 100644 vendor/github.com/docker/docker/registry/errors.go create mode 100644 vendor/github.com/docker/docker/registry/registry.go create mode 100644 vendor/github.com/docker/docker/registry/search.go create mode 100644 vendor/github.com/docker/docker/registry/service.go create mode 100644 vendor/github.com/docker/docker/registry/service_v2.go create mode 100644 vendor/github.com/docker/docker/registry/session.go create mode 100644 vendor/github.com/docker/docker/registry/types.go create mode 100644 vendor/github.com/docker/go-connections/LICENSE create mode 100644 vendor/github.com/docker/go-connections/nat/nat.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go create mode 100644 vendor/github.com/docker/go-metrics/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-metrics/LICENSE create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.docs create mode 100644 vendor/github.com/docker/go-metrics/NOTICE create mode 100644 vendor/github.com/docker/go-metrics/README.md create mode 100644 vendor/github.com/docker/go-metrics/counter.go create mode 100644 vendor/github.com/docker/go-metrics/docs.go create mode 100644 vendor/github.com/docker/go-metrics/gauge.go create mode 100644 vendor/github.com/docker/go-metrics/handler.go create mode 100644 vendor/github.com/docker/go-metrics/helpers.go create mode 100644 vendor/github.com/docker/go-metrics/namespace.go create mode 100644 vendor/github.com/docker/go-metrics/register.go create mode 100644 vendor/github.com/docker/go-metrics/timer.go create mode 100644 vendor/github.com/docker/go-metrics/unit.go create mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/docker/go-units/MAINTAINERS create mode 100644 vendor/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/docker/go-units/circle.yml create mode 100644 vendor/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/exponent-io/jsonpath/.gitignore create mode 100644 vendor/github.com/exponent-io/jsonpath/.travis.yml create mode 100644 vendor/github.com/exponent-io/jsonpath/LICENSE create mode 100644 vendor/github.com/exponent-io/jsonpath/README.md create mode 100644 vendor/github.com/exponent-io/jsonpath/decoder.go create mode 100644 vendor/github.com/exponent-io/jsonpath/path.go create mode 100644 vendor/github.com/exponent-io/jsonpath/pathaction.go create mode 100644 vendor/github.com/fatih/color/LICENSE.md create mode 100644 vendor/github.com/fatih/color/README.md create mode 100644 vendor/github.com/fatih/color/color.go create mode 100644 vendor/github.com/fatih/color/doc.go create mode 100644 vendor/github.com/felixge/httpsnoop/.gitignore create mode 100644 vendor/github.com/felixge/httpsnoop/.travis.yml create mode 100644 vendor/github.com/felixge/httpsnoop/LICENSE.txt create mode 100644 vendor/github.com/felixge/httpsnoop/Makefile create mode 100644 vendor/github.com/felixge/httpsnoop/README.md create mode 100644 vendor/github.com/felixge/httpsnoop/capture_metrics.go create mode 100644 vendor/github.com/felixge/httpsnoop/docs.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go delete mode 100644 vendor/github.com/ghodss/yaml/.gitignore delete mode 100644 vendor/github.com/ghodss/yaml/.travis.yml delete mode 100644 vendor/github.com/ghodss/yaml/README.md delete mode 100644 vendor/github.com/ghodss/yaml/fields.go delete mode 100644 vendor/github.com/ghodss/yaml/yaml.go create mode 100644 vendor/github.com/go-errors/errors/.travis.yml create mode 100644 vendor/github.com/go-errors/errors/LICENSE.MIT create mode 100644 vendor/github.com/go-errors/errors/README.md create mode 100644 vendor/github.com/go-errors/errors/error.go create mode 100644 vendor/github.com/go-errors/errors/error_1_13.go create mode 100644 vendor/github.com/go-errors/errors/error_backward.go create mode 100644 vendor/github.com/go-errors/errors/parse_panic.go create mode 100644 vendor/github.com/go-errors/errors/stackframe.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/.gitignore create mode 100644 vendor/github.com/go-gorp/gorp/v3/.travis.yml create mode 100644 vendor/github.com/go-gorp/gorp/v3/CONTRIBUTING.md create mode 100644 vendor/github.com/go-gorp/gorp/v3/LICENSE create mode 100644 vendor/github.com/go-gorp/gorp/v3/README.md create mode 100644 vendor/github.com/go-gorp/gorp/v3/column.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/db.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/dialect.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/dialect_mysql.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/dialect_oracle.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/dialect_postgres.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/dialect_snowflake.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/dialect_sqlite.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/dialect_sqlserver.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/doc.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/errors.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/gorp.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/hooks.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/index.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/lockerror.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/logging.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/nulltypes.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/select.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/table.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/table_bindings.go create mode 100644 vendor/github.com/go-gorp/gorp/v3/test_all.sh create mode 100644 vendor/github.com/go-gorp/gorp/v3/transaction.go create mode 100644 vendor/github.com/go-logr/stdr/LICENSE create mode 100644 vendor/github.com/go-logr/stdr/README.md create mode 100644 vendor/github.com/go-logr/stdr/stdr.go create mode 100644 vendor/github.com/google/shlex/COPYING create mode 100644 vendor/github.com/google/shlex/README create mode 100644 vendor/github.com/google/shlex/shlex.go create mode 100644 vendor/github.com/gorilla/mux/AUTHORS create mode 100644 vendor/github.com/gorilla/mux/LICENSE create mode 100644 vendor/github.com/gorilla/mux/README.md create mode 100644 vendor/github.com/gorilla/mux/doc.go create mode 100644 vendor/github.com/gorilla/mux/middleware.go create mode 100644 vendor/github.com/gorilla/mux/mux.go create mode 100644 vendor/github.com/gorilla/mux/regexp.go create mode 100644 vendor/github.com/gorilla/mux/route.go create mode 100644 vendor/github.com/gorilla/mux/test_helpers.go create mode 100644 vendor/github.com/gosuri/uitable/.travis.yml create mode 100644 vendor/github.com/gosuri/uitable/LICENSE create mode 100644 vendor/github.com/gosuri/uitable/Makefile create mode 100644 vendor/github.com/gosuri/uitable/README.md create mode 100644 vendor/github.com/gosuri/uitable/table.go create mode 100644 vendor/github.com/gosuri/uitable/util/strutil/strutil.go create mode 100644 vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md create mode 100644 vendor/github.com/gosuri/uitable/util/wordwrap/README.md create mode 100644 vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go create mode 100644 vendor/github.com/gregjones/httpcache/.travis.yml create mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt create mode 100644 vendor/github.com/gregjones/httpcache/README.md create mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/jmoiron/sqlx/.gitignore create mode 100644 vendor/github.com/jmoiron/sqlx/.travis.yml create mode 100644 vendor/github.com/jmoiron/sqlx/LICENSE create mode 100644 vendor/github.com/jmoiron/sqlx/README.md create mode 100644 vendor/github.com/jmoiron/sqlx/bind.go create mode 100644 vendor/github.com/jmoiron/sqlx/doc.go create mode 100644 vendor/github.com/jmoiron/sqlx/named.go create mode 100644 vendor/github.com/jmoiron/sqlx/named_context.go create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/README.md create mode 100644 vendor/github.com/jmoiron/sqlx/reflectx/reflect.go create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx.go create mode 100644 vendor/github.com/jmoiron/sqlx/sqlx_context.go create mode 100644 vendor/github.com/lann/builder/.gitignore create mode 100644 vendor/github.com/lann/builder/.travis.yml create mode 100644 vendor/github.com/lann/builder/LICENSE create mode 100644 vendor/github.com/lann/builder/README.md create mode 100644 vendor/github.com/lann/builder/builder.go create mode 100644 vendor/github.com/lann/builder/reflect.go create mode 100644 vendor/github.com/lann/builder/registry.go create mode 100644 vendor/github.com/lann/ps/LICENSE create mode 100644 vendor/github.com/lann/ps/README.md create mode 100644 vendor/github.com/lann/ps/list.go create mode 100644 vendor/github.com/lann/ps/map.go create mode 100644 vendor/github.com/lann/ps/profile.sh create mode 100644 vendor/github.com/lib/pq/.gitignore create mode 100644 vendor/github.com/lib/pq/LICENSE.md create mode 100644 vendor/github.com/lib/pq/README.md create mode 100644 vendor/github.com/lib/pq/TESTS.md create mode 100644 vendor/github.com/lib/pq/array.go create mode 100644 vendor/github.com/lib/pq/buf.go create mode 100644 vendor/github.com/lib/pq/conn.go create mode 100644 vendor/github.com/lib/pq/conn_go115.go create mode 100644 vendor/github.com/lib/pq/conn_go18.go create mode 100644 vendor/github.com/lib/pq/connector.go create mode 100644 vendor/github.com/lib/pq/copy.go create mode 100644 vendor/github.com/lib/pq/doc.go create mode 100644 vendor/github.com/lib/pq/encode.go create mode 100644 vendor/github.com/lib/pq/error.go create mode 100644 vendor/github.com/lib/pq/krb.go create mode 100644 vendor/github.com/lib/pq/notice.go create mode 100644 vendor/github.com/lib/pq/notify.go create mode 100644 vendor/github.com/lib/pq/oid/doc.go create mode 100644 vendor/github.com/lib/pq/oid/types.go create mode 100644 vendor/github.com/lib/pq/rows.go create mode 100644 vendor/github.com/lib/pq/scram/scram.go create mode 100644 vendor/github.com/lib/pq/ssl.go create mode 100644 vendor/github.com/lib/pq/ssl_permissions.go create mode 100644 vendor/github.com/lib/pq/ssl_windows.go create mode 100644 vendor/github.com/lib/pq/url.go create mode 100644 vendor/github.com/lib/pq/user_other.go create mode 100644 vendor/github.com/lib/pq/user_posix.go create mode 100644 vendor/github.com/lib/pq/user_windows.go create mode 100644 vendor/github.com/lib/pq/uuid.go create mode 100644 vendor/github.com/liggitt/tabwriter/.travis.yml create mode 100644 vendor/github.com/liggitt/tabwriter/LICENSE create mode 100644 vendor/github.com/liggitt/tabwriter/README.md create mode 100644 vendor/github.com/liggitt/tabwriter/tabwriter.go create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mattn/go-runewidth/.travis.yml create mode 100644 vendor/github.com/mattn/go-runewidth/LICENSE create mode 100644 vendor/github.com/mattn/go-runewidth/README.md create mode 100644 vendor/github.com/mattn/go-runewidth/go.test.sh create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_appengine.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_js.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_posix.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_table.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_windows.go delete mode 100644 vendor/github.com/mitchellh/copystructure/.travis.yml create mode 100644 vendor/github.com/mitchellh/go-wordwrap/LICENSE.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/README.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/wordwrap.go create mode 100644 vendor/github.com/moby/locker/LICENSE create mode 100644 vendor/github.com/moby/locker/README.md create mode 100644 vendor/github.com/moby/locker/locker.go create mode 100644 vendor/github.com/moby/term/.gitignore create mode 100644 vendor/github.com/moby/term/LICENSE create mode 100644 vendor/github.com/moby/term/README.md create mode 100644 vendor/github.com/moby/term/ascii.go create mode 100644 vendor/github.com/moby/term/doc.go create mode 100644 vendor/github.com/moby/term/proxy.go create mode 100644 vendor/github.com/moby/term/term.go create mode 100644 vendor/github.com/moby/term/term_unix.go create mode 100644 vendor/github.com/moby/term/term_windows.go create mode 100644 vendor/github.com/moby/term/termios_bsd.go create mode 100644 vendor/github.com/moby/term/termios_nonbsd.go create mode 100644 vendor/github.com/moby/term/termios_unix.go create mode 100644 vendor/github.com/moby/term/termios_windows.go create mode 100644 vendor/github.com/moby/term/windows/ansi_reader.go create mode 100644 vendor/github.com/moby/term/windows/ansi_writer.go create mode 100644 vendor/github.com/moby/term/windows/console.go create mode 100644 vendor/github.com/moby/term/windows/doc.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/.travis.yml create mode 100644 vendor/github.com/monochromegane/go-gitignore/LICENSE create mode 100644 vendor/github.com/monochromegane/go-gitignore/README.md create mode 100644 vendor/github.com/monochromegane/go-gitignore/depth_holder.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/full_scan_patterns.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/gitignore.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/index_scan_patterns.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/initial_holder.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/match.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/pattern.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/patterns.go create mode 100644 vendor/github.com/monochromegane/go-gitignore/util.go create mode 100644 vendor/github.com/morikuni/aec/LICENSE create mode 100644 vendor/github.com/morikuni/aec/README.md create mode 100644 vendor/github.com/morikuni/aec/aec.go create mode 100644 vendor/github.com/morikuni/aec/ansi.go create mode 100644 vendor/github.com/morikuni/aec/builder.go create mode 100644 vendor/github.com/morikuni/aec/sample.gif create mode 100644 vendor/github.com/morikuni/aec/sgr.go create mode 100644 vendor/github.com/opencontainers/go-digest/.mailmap create mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml create mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml create mode 100644 vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/go-digest/README.md create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go create mode 100644 vendor/github.com/opencontainers/go-digest/digest.go create mode 100644 vendor/github.com/opencontainers/go-digest/digester.go create mode 100644 vendor/github.com/opencontainers/go-digest/doc.go create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go create mode 100644 vendor/github.com/peterbourgon/diskv/LICENSE create mode 100644 vendor/github.com/peterbourgon/diskv/README.md create mode 100644 vendor/github.com/peterbourgon/diskv/compression.go create mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go create mode 100644 vendor/github.com/peterbourgon/diskv/index.go create mode 100644 vendor/github.com/rubenv/sql-migrate/.dockerignore create mode 100644 vendor/github.com/rubenv/sql-migrate/.gitignore create mode 100644 vendor/github.com/rubenv/sql-migrate/.golangci.yaml create mode 100644 vendor/github.com/rubenv/sql-migrate/Dockerfile create mode 100644 vendor/github.com/rubenv/sql-migrate/LICENSE create mode 100644 vendor/github.com/rubenv/sql-migrate/Makefile create mode 100644 vendor/github.com/rubenv/sql-migrate/README.md create mode 100644 vendor/github.com/rubenv/sql-migrate/doc.go create mode 100644 vendor/github.com/rubenv/sql-migrate/migrate.go create mode 100644 vendor/github.com/rubenv/sql-migrate/migrate_go116.go create mode 100644 vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE create mode 100644 vendor/github.com/rubenv/sql-migrate/sqlparse/README.md create mode 100644 vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go create mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore create mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml create mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt create mode 100644 vendor/github.com/russross/blackfriday/v2/README.md create mode 100644 vendor/github.com/russross/blackfriday/v2/block.go create mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go create mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go create mode 100644 vendor/github.com/russross/blackfriday/v2/html.go create mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go create mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go create mode 100644 vendor/github.com/russross/blackfriday/v2/node.go create mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go create mode 100644 vendor/github.com/shopspring/decimal/.gitignore create mode 100644 vendor/github.com/shopspring/decimal/.travis.yml create mode 100644 vendor/github.com/shopspring/decimal/CHANGELOG.md create mode 100644 vendor/github.com/shopspring/decimal/LICENSE create mode 100644 vendor/github.com/shopspring/decimal/README.md create mode 100644 vendor/github.com/shopspring/decimal/decimal-go.go create mode 100644 vendor/github.com/shopspring/decimal/decimal.go create mode 100644 vendor/github.com/shopspring/decimal/rounding.go create mode 100644 vendor/github.com/spf13/cast/.gitignore create mode 100644 vendor/github.com/spf13/cast/LICENSE create mode 100644 vendor/github.com/spf13/cast/Makefile create mode 100644 vendor/github.com/spf13/cast/README.md create mode 100644 vendor/github.com/spf13/cast/cast.go create mode 100644 vendor/github.com/spf13/cast/caste.go create mode 100644 vendor/github.com/spf13/cast/timeformattype_string.go rename vendor/{k8s.io/helm/LICENSE => github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt} (99%) create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/pointer.go create mode 100644 vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonreference/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonreference/reference.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/.gitignore create mode 100644 vendor/github.com/xeipuuv/gojsonschema/.travis.yml create mode 100644 vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonschema/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonschema/draft.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/errors.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/format_checkers.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/glide.yaml create mode 100644 vendor/github.com/xeipuuv/gojsonschema/internalLog.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonContext.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/locales.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/result.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaPool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaType.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/subSchema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/types.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/utils.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/validation.go create mode 100644 vendor/github.com/xlab/treeprint/.gitignore create mode 100644 vendor/github.com/xlab/treeprint/LICENSE create mode 100644 vendor/github.com/xlab/treeprint/README.md create mode 100644 vendor/github.com/xlab/treeprint/helpers.go create mode 100644 vendor/github.com/xlab/treeprint/struct.go create mode 100644 vendor/github.com/xlab/treeprint/treeprint.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go create mode 100644 vendor/go.opentelemetry.io/otel/.codespellignore create mode 100644 vendor/go.opentelemetry.io/otel/.codespellrc create mode 100644 vendor/go.opentelemetry.io/otel/.gitattributes create mode 100644 vendor/go.opentelemetry.io/otel/.gitignore create mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml create mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore create mode 100644 vendor/go.opentelemetry.io/otel/.markdownlint.yaml create mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md create mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS create mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/otel/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/Makefile create mode 100644 vendor/go.opentelemetry.io/otel/README.md create mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md create mode 100644 vendor/go.opentelemetry.io/otel/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/otel/attribute/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/encoder.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/filter.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/iterator.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/key.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/kv.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/set.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/type_string.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/value.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/codes.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go create mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh create mode 100644 vendor/go.opentelemetry.io/otel/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/instruments.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/propagator.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/state.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/config.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/trace_context.go create mode 100644 vendor/go.opentelemetry.io/otel/requirements.txt create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/trace/config.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/context.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/nonrecording.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracestate.go create mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/go.opentelemetry.io/otel/version.go create mode 100644 vendor/go.opentelemetry.io/otel/versions.yaml create mode 100644 vendor/go.starlark.net/LICENSE create mode 100644 vendor/go.starlark.net/internal/compile/compile.go create mode 100644 vendor/go.starlark.net/internal/compile/serial.go create mode 100644 vendor/go.starlark.net/internal/spell/spell.go create mode 100644 vendor/go.starlark.net/resolve/binding.go create mode 100644 vendor/go.starlark.net/resolve/resolve.go create mode 100644 vendor/go.starlark.net/starlark/debug.go create mode 100644 vendor/go.starlark.net/starlark/empty.s create mode 100644 vendor/go.starlark.net/starlark/eval.go create mode 100644 vendor/go.starlark.net/starlark/hashtable.go create mode 100644 vendor/go.starlark.net/starlark/int.go create mode 100644 vendor/go.starlark.net/starlark/int_generic.go create mode 100644 vendor/go.starlark.net/starlark/int_posix64.go create mode 100644 vendor/go.starlark.net/starlark/interp.go create mode 100644 vendor/go.starlark.net/starlark/library.go create mode 100644 vendor/go.starlark.net/starlark/profile.go create mode 100644 vendor/go.starlark.net/starlark/unpack.go create mode 100644 vendor/go.starlark.net/starlark/value.go create mode 100644 vendor/go.starlark.net/starlarkstruct/module.go create mode 100644 vendor/go.starlark.net/starlarkstruct/struct.go create mode 100644 vendor/go.starlark.net/syntax/grammar.txt create mode 100644 vendor/go.starlark.net/syntax/parse.go create mode 100644 vendor/go.starlark.net/syntax/quote.go create mode 100644 vendor/go.starlark.net/syntax/scan.go create mode 100644 vendor/go.starlark.net/syntax/syntax.go create mode 100644 vendor/go.starlark.net/syntax/walk.go create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 vendor/golang.org/x/sync/errgroup/go120.go create mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/helm.sh/helm/v3/LICENSE create mode 100644 vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go create mode 100644 vendor/helm.sh/helm/v3/internal/resolver/resolver.go rename vendor/{k8s.io/helm/pkg => helm.sh/helm/v3/internal}/sympath/walk.go (91%) create mode 100644 vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go rename vendor/{github.com/ghodss/yaml/LICENSE => helm.sh/helm/v3/internal/third_party/dep/fs/rename.go} (55%) create mode 100644 vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go create mode 100644 vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go create mode 100644 vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go create mode 100644 vendor/helm.sh/helm/v3/internal/tlsutil/tls.go create mode 100644 vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go create mode 100644 vendor/helm.sh/helm/v3/internal/version/version.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/action.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/dependency.go rename vendor/{k8s.io/helm/pkg/manifest => helm.sh/helm/v3/pkg/action}/doc.go (64%) create mode 100644 vendor/helm.sh/helm/v3/pkg/action/get.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/get_metadata.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/get_values.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/history.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/hooks.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/install.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/lazyclient.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/lint.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/list.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/package.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/pull.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/push.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/registry_login.go rename vendor/{k8s.io/helm/pkg/renderutil/doc.go => helm.sh/helm/v3/pkg/action/registry_logout.go} (53%) create mode 100644 vendor/helm.sh/helm/v3/pkg/action/release_testing.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/resource_policy.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/rollback.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/show.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/status.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/uninstall.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/upgrade.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/validate.go create mode 100644 vendor/helm.sh/helm/v3/pkg/action/verify.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chart/chart.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chart/dependency.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chart/errors.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chart/file.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chart/loader/load.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chart/metadata.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go rename vendor/{k8s.io/helm => helm.sh/helm/v3}/pkg/chartutil/chartfile.go (55%) create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/create.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go rename vendor/{k8s.io/helm => helm.sh/helm/v3}/pkg/chartutil/doc.go (61%) create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/errors.go rename vendor/{k8s.io/helm => helm.sh/helm/v3}/pkg/chartutil/expand.go (82%) create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/save.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go create mode 100644 vendor/helm.sh/helm/v3/pkg/chartutil/values.go create mode 100644 vendor/helm.sh/helm/v3/pkg/cli/environment.go create mode 100644 vendor/helm.sh/helm/v3/pkg/cli/roundtripper.go create mode 100644 vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go create mode 100644 vendor/helm.sh/helm/v3/pkg/downloader/doc.go create mode 100644 vendor/helm.sh/helm/v3/pkg/downloader/manager.go rename vendor/{k8s.io/helm => helm.sh/helm/v3}/pkg/engine/doc.go (65%) create mode 100644 vendor/helm.sh/helm/v3/pkg/engine/engine.go create mode 100644 vendor/helm.sh/helm/v3/pkg/engine/files.go create mode 100644 vendor/helm.sh/helm/v3/pkg/engine/funcs.go create mode 100644 vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go rename vendor/{k8s.io/helm/pkg/manifest/types.go => helm.sh/helm/v3/pkg/getter/doc.go} (65%) create mode 100644 vendor/helm.sh/helm/v3/pkg/getter/getter.go create mode 100644 vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go create mode 100644 vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go create mode 100644 vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go create mode 100644 vendor/helm.sh/helm/v3/pkg/helmpath/home.go create mode 100644 vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go create mode 100644 vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go create mode 100644 vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go create mode 100644 vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go create mode 100644 vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go create mode 100644 vendor/helm.sh/helm/v3/pkg/ignore/doc.go rename vendor/{k8s.io/helm => helm.sh/helm/v3}/pkg/ignore/rules.go (94%) create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/client.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/config.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/converter.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/factory.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/interface.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/ready.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/resource.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/result.go create mode 100644 vendor/helm.sh/helm/v3/pkg/kube/wait.go create mode 100644 vendor/helm.sh/helm/v3/pkg/lint/lint.go create mode 100644 vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go create mode 100644 vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go create mode 100644 vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go create mode 100644 vendor/helm.sh/helm/v3/pkg/lint/rules/template.go create mode 100644 vendor/helm.sh/helm/v3/pkg/lint/rules/values.go rename vendor/{k8s.io/helm/pkg/chartutil/transform.go => helm.sh/helm/v3/pkg/lint/support/doc.go} (66%) create mode 100644 vendor/helm.sh/helm/v3/pkg/lint/support/message.go create mode 100644 vendor/helm.sh/helm/v3/pkg/plugin/hooks.go create mode 100644 vendor/helm.sh/helm/v3/pkg/plugin/plugin.go create mode 100644 vendor/helm.sh/helm/v3/pkg/postrender/exec.go create mode 100644 vendor/helm.sh/helm/v3/pkg/postrender/postrender.go create mode 100644 vendor/helm.sh/helm/v3/pkg/provenance/doc.go create mode 100644 vendor/helm.sh/helm/v3/pkg/provenance/sign.go create mode 100644 vendor/helm.sh/helm/v3/pkg/pusher/doc.go create mode 100644 vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go create mode 100644 vendor/helm.sh/helm/v3/pkg/pusher/pusher.go create mode 100644 vendor/helm.sh/helm/v3/pkg/registry/client.go create mode 100644 vendor/helm.sh/helm/v3/pkg/registry/constants.go create mode 100644 vendor/helm.sh/helm/v3/pkg/registry/util.go create mode 100644 vendor/helm.sh/helm/v3/pkg/release/hook.go create mode 100644 vendor/helm.sh/helm/v3/pkg/release/info.go create mode 100644 vendor/helm.sh/helm/v3/pkg/release/mock.go create mode 100644 vendor/helm.sh/helm/v3/pkg/release/release.go create mode 100644 vendor/helm.sh/helm/v3/pkg/release/responses.go create mode 100644 vendor/helm.sh/helm/v3/pkg/release/status.go rename vendor/{k8s.io/helm => helm.sh/helm/v3}/pkg/releaseutil/filter.go (89%) create mode 100644 vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go rename vendor/{k8s.io/helm => helm.sh/helm/v3}/pkg/releaseutil/manifest.go (70%) create mode 100644 vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go create mode 100644 vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go create mode 100644 vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go create mode 100644 vendor/helm.sh/helm/v3/pkg/repo/doc.go create mode 100644 vendor/helm.sh/helm/v3/pkg/repo/index.go create mode 100644 vendor/helm.sh/helm/v3/pkg/repo/repo.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/driver/records.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/driver/util.go create mode 100644 vendor/helm.sh/helm/v3/pkg/storage/storage.go create mode 100644 vendor/helm.sh/helm/v3/pkg/time/time.go create mode 100644 vendor/helm.sh/helm/v3/pkg/uploader/chart_uploader.go create mode 100644 vendor/helm.sh/helm/v3/pkg/uploader/doc.go create mode 100644 vendor/k8s.io/api/admission/v1/doc.go create mode 100644 vendor/k8s.io/api/admission/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/admission/v1/generated.proto create mode 100644 vendor/k8s.io/api/admission/v1/register.go create mode 100644 vendor/k8s.io/api/admission/v1/types.go create mode 100644 vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/admission/v1beta1/register.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/types.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/LICENSE create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/diff/diff.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/duration/duration.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/version/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/version/version.go create mode 100644 vendor/k8s.io/apiserver/LICENSE create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go create mode 100644 vendor/k8s.io/cli-runtime/LICENSE create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/client_config.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericiooptions/io_options.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/discard.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/doc.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/interface.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/json.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/managedfields.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/name.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/template.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/terminal.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/warningprinter.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/yaml.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/builder.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/client.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/doc.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/fake.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/fallback_query_param_verifier.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/helper.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/kustomizevisitor.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/mapper.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier_v3.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/result.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/scheme.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/selector.go create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/visitor.go create mode 100644 vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go create mode 100644 vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go create mode 100644 vendor/k8s.io/client-go/discovery/cached/memory/memcache.go create mode 100644 vendor/k8s.io/client-go/openapi/cached/client.go create mode 100644 vendor/k8s.io/client-go/openapi/cached/groupversion.go create mode 100644 vendor/k8s.io/client-go/openapi3/root.go create mode 100644 vendor/k8s.io/client-go/scale/client.go create mode 100644 vendor/k8s.io/client-go/scale/doc.go create mode 100644 vendor/k8s.io/client-go/scale/interfaces.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsint/doc.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsint/register.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/doc.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/register.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/types.go create mode 100644 vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/scale/util.go create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go create mode 100644 vendor/k8s.io/client-go/tools/cache/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/cache/controller.go create mode 100644 vendor/k8s.io/client-go/tools/cache/delta_fifo.go create mode 100644 vendor/k8s.io/client-go/tools/cache/doc.go create mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache.go create mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go create mode 100644 vendor/k8s.io/client-go/tools/cache/fake_custom_store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/fifo.go create mode 100644 vendor/k8s.io/client-go/tools/cache/heap.go create mode 100644 vendor/k8s.io/client-go/tools/cache/index.go create mode 100644 vendor/k8s.io/client-go/tools/cache/listers.go create mode 100644 vendor/k8s.io/client-go/tools/cache/listwatch.go create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_cache.go create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_detector.go create mode 100644 vendor/k8s.io/client-go/tools/cache/object-names.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_metrics.go create mode 100644 vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go create mode 100644 vendor/k8s.io/client-go/tools/cache/shared_informer.go create mode 100644 vendor/k8s.io/client-go/tools/cache/store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go create mode 100644 vendor/k8s.io/client-go/tools/cache/thread_safe_store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/undelta_store.go create mode 100644 vendor/k8s.io/client-go/tools/pager/pager.go create mode 100644 vendor/k8s.io/client-go/tools/watch/informerwatcher.go create mode 100644 vendor/k8s.io/client-go/tools/watch/retrywatcher.go create mode 100644 vendor/k8s.io/client-go/tools/watch/until.go create mode 100644 vendor/k8s.io/client-go/util/jsonpath/doc.go create mode 100644 vendor/k8s.io/client-go/util/jsonpath/jsonpath.go create mode 100644 vendor/k8s.io/client-go/util/jsonpath/node.go create mode 100644 vendor/k8s.io/client-go/util/jsonpath/parser.go create mode 100644 vendor/k8s.io/component-base/LICENSE create mode 100644 vendor/k8s.io/component-base/version/OWNERS create mode 100644 vendor/k8s.io/component-base/version/base.go create mode 100644 vendor/k8s.io/component-base/version/dynamic.go create mode 100644 vendor/k8s.io/component-base/version/version.go delete mode 100644 vendor/k8s.io/helm/pkg/chartutil/capabilities.go delete mode 100644 vendor/k8s.io/helm/pkg/chartutil/capabilities_versions_generated.go delete mode 100644 vendor/k8s.io/helm/pkg/chartutil/create.go delete mode 100644 vendor/k8s.io/helm/pkg/chartutil/files.go delete mode 100644 vendor/k8s.io/helm/pkg/chartutil/load.go delete mode 100644 vendor/k8s.io/helm/pkg/chartutil/requirements.go delete mode 100644 vendor/k8s.io/helm/pkg/chartutil/save.go delete mode 100644 vendor/k8s.io/helm/pkg/chartutil/values.go delete mode 100644 vendor/k8s.io/helm/pkg/engine/engine.go delete mode 100644 vendor/k8s.io/helm/pkg/helm/client.go delete mode 100644 vendor/k8s.io/helm/pkg/helm/fake.go delete mode 100644 vendor/k8s.io/helm/pkg/helm/interface.go delete mode 100644 vendor/k8s.io/helm/pkg/helm/option.go delete mode 100644 vendor/k8s.io/helm/pkg/ignore/doc.go delete mode 100644 vendor/k8s.io/helm/pkg/manifest/splitter.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/chart/chart.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/chart/config.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/chart/template.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/hook.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/info.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/release.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/status.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/test_run.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/release/test_suite.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/services/tiller.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/proto/hapi/version/version.pb.go delete mode 100644 vendor/k8s.io/helm/pkg/releaseutil/sorter.go delete mode 100644 vendor/k8s.io/helm/pkg/renderutil/deps.go delete mode 100644 vendor/k8s.io/helm/pkg/renderutil/render.go delete mode 100644 vendor/k8s.io/helm/pkg/storage/errors/errors.go delete mode 100644 vendor/k8s.io/helm/pkg/version/compatible.go delete mode 100644 vendor/k8s.io/helm/pkg/version/version.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go create mode 100644 vendor/k8s.io/kubectl/LICENSE create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/env_file.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/factory.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/override_options.go create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/printing.go create mode 100644 vendor/k8s.io/kubectl/pkg/scheme/install.go create mode 100644 vendor/k8s.io/kubectl/pkg/scheme/scheme.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/OWNERS create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/README.md create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/OWNERS create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.mo create mode 100644 vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.po create mode 100644 vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/doc.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/markdown.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/templater.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/templates.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/resize.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/term.go create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/term_writer.go create mode 100644 vendor/k8s.io/kubectl/pkg/validation/schema.go create mode 100644 vendor/k8s.io/kubectl/pkg/validation/validation.go create mode 100644 vendor/k8s.io/utils/buffer/ring_growing.go create mode 100644 vendor/k8s.io/utils/exec/README.md create mode 100644 vendor/k8s.io/utils/exec/doc.go create mode 100644 vendor/k8s.io/utils/exec/exec.go create mode 100644 vendor/k8s.io/utils/exec/fixup_go118.go create mode 100644 vendor/k8s.io/utils/exec/fixup_go119.go create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/README.md create mode 100644 vendor/k8s.io/utils/pointer/pointer.go create mode 100644 vendor/k8s.io/utils/trace/README.md create mode 100644 vendor/k8s.io/utils/trace/trace.go create mode 100644 vendor/oras.land/oras-go/LICENSE create mode 100644 vendor/oras.land/oras-go/pkg/artifact/consts.go create mode 100644 vendor/oras.land/oras-go/pkg/auth/client.go create mode 100644 vendor/oras.land/oras-go/pkg/auth/client_opts.go create mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/client.go create mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/login.go create mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go create mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/logout.go create mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/resolver.go create mode 100644 vendor/oras.land/oras-go/pkg/content/consts.go create mode 100644 vendor/oras.land/oras-go/pkg/content/decompress.go create mode 100644 vendor/oras.land/oras-go/pkg/content/errors.go create mode 100644 vendor/oras.land/oras-go/pkg/content/file.go create mode 100644 vendor/oras.land/oras-go/pkg/content/gunzip.go create mode 100644 vendor/oras.land/oras-go/pkg/content/interface.go create mode 100644 vendor/oras.land/oras-go/pkg/content/iowriter.go create mode 100644 vendor/oras.land/oras-go/pkg/content/manifest.go create mode 100644 vendor/oras.land/oras-go/pkg/content/memory.go create mode 100644 vendor/oras.land/oras-go/pkg/content/multireader.go create mode 100644 vendor/oras.land/oras-go/pkg/content/multiwriter.go create mode 100644 vendor/oras.land/oras-go/pkg/content/oci.go create mode 100644 vendor/oras.land/oras-go/pkg/content/opts.go create mode 100644 vendor/oras.land/oras-go/pkg/content/passthrough.go create mode 100644 vendor/oras.land/oras-go/pkg/content/readerat.go create mode 100644 vendor/oras.land/oras-go/pkg/content/registry.go create mode 100644 vendor/oras.land/oras-go/pkg/content/untar.go create mode 100644 vendor/oras.land/oras-go/pkg/content/utils.go create mode 100644 vendor/oras.land/oras-go/pkg/context/context.go create mode 100644 vendor/oras.land/oras-go/pkg/context/logger.go create mode 100644 vendor/oras.land/oras-go/pkg/oras/copy.go create mode 100644 vendor/oras.land/oras-go/pkg/oras/errors.go create mode 100644 vendor/oras.land/oras-go/pkg/oras/opts.go create mode 100644 vendor/oras.land/oras-go/pkg/oras/provider.go create mode 100644 vendor/oras.land/oras-go/pkg/oras/store.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/reference.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/auth/cache.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/auth/challenge.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/auth/client.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/auth/credential.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/auth/scope.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/repository.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/url.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/utils.go create mode 100644 vendor/oras.land/oras-go/pkg/registry/repository.go create mode 100644 vendor/oras.land/oras-go/pkg/target/target.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/LICENSE create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/annotations/annotations.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/annotations/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/fieldspec.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/filtersutil/setters.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/fsslice/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/fsslice/fsslice.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/iampolicygenerator.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/imagetag/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/imagetag/imagetag.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/imagetag/legacy.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/labels/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/labels/labels.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/nameref/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/nameref/seqfilter.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/namespace/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/namespace/namespace.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/patchstrategicmerge.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/prefix/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/prefix/prefix.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/refvar/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/refvar/expand.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/refvar/refvar.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/replacement/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/replicacount/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/replicacount/replicacount.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/suffix/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/suffix/suffix.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/filters/valueadd/valueadd.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/hasher/hasher.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/image/image.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/accumulator/namereferencetransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/accumulator/refvartransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/AnnotationsTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/ConfigMapGenerator.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/HashTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/IAMPolicyGenerator.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/ImageTagTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/LabelTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchStrategicMergeTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/PrefixTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplicaCountTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/SecretGenerator.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/SuffixTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/ValueAddTransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/builtins/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/generators/configmap.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/generators/secret.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/generators/utils.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/git/gitrunner.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/git/repospec.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/kusterr/yamlformaterror.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtinplugintype_string.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtins.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/fnplugin/fnplugin.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/plugins/utils/utils.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/target/errmissingkustomization.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/target/multitransformer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/utils/annotations.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/utils/makeResIds.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/utils/stringslice.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/internal/validate/fieldvalidator.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/metadatalabels.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/templatelabels.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/general.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/krusty/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/krusty/options.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/kv/kv.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/loader/errors.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/loader/loader.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/provider/depprovider.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/resmap/factory.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/resmap/resmap.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/resource/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/resource/factory.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/resource/idset.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/resource/origin.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/resource/resource.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/builtinpluginloadingoptions_string.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/configmapargs.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/erronlybuiltinpluginsallowed.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/errunabletofind.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/generationbehavior.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/generatoroptions.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/iampolicygenerator.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/image.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/kustomization.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/kvpairsources.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/labels.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions_string.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/objectmeta.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/pair.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/patch.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/patchstrategicmerge.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions_string.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/replacement.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/replacementfield.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/replica.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/secretargs.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/selector.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/sortoptions.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/typemeta.go create mode 100644 vendor/sigs.k8s.io/kustomize/api/types/var.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/LICENSE create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/comments/comments.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/errors/errors.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fieldmeta/fieldmeta.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/confirmeddir.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/file.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileinfo.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileondisk.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsnode.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_unix.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_windows.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/filesys/util.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/exec.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/functiontypes.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/runtimeutil.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/types.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/context.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/starlark.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE rename vendor/{k8s.io/helm/pkg/version/doc.go => sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE} (77%) create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/util.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_reader.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_writer.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/filters.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/fmtr.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/grep.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/local.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge3.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/modify.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/stripcomments.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/kioutil/kioutil.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_reader.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_writer.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/kio/tree.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/openapi/Makefile create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/openapi/README.md create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/openapiinfo.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2/swagger.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2/swagger.pb create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.json create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/order/syncorder.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/resid/gvk.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/resid/resid.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/runfn/runfn.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/sets/string.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/sets/stringlist.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/sliceutil/slice.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/utils/pathsplitter.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/const.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/datamap.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/doc.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/filters.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/copied.deepcopy.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/labels.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/selector.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection/operator.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors/errors.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/empty.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/string.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/errors.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/path.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/validation.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/mapnode.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective_string.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/merge3.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/visitor.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/order.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/schema/schema.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/util.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/associative_sequence.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/nonassociative_sequence.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/scalar.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/visitor.go create mode 100644 vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/walk.go diff --git a/.ci/integration_test b/.ci/integration_test index a4ac53db4..113014fe8 100755 --- a/.ci/integration_test +++ b/.ci/integration_test @@ -276,7 +276,7 @@ function run_test_as_processes() { setup_test_cluster echo "Starting integration tests..." - cd test/e2e/integration + cd test/integration set +e ginkgo -r -mod=vendor @@ -295,81 +295,6 @@ function run_test_as_processes() { fi } -function run_test_on_cluster() { - if ! [ -x "$(command -v ginkgo)" ]; then - setup_ginkgo - fi - - export TEST_ID=${STORAGE_CONTAINER} - if [ "$STORAGE_CONTAINER" == "" ]; then - setup_awscli - get_test_id - setup-aws-infrastructure - fi - - # TODO: change the etcd wrapper version to a newer version which spawns etcd v3.4.34 - export ETCD_WRAPPER_VERSION=${ETCD_WRAPPER_VERSION:-"v0.2.0"} - echo "etcd-wrapper version: ${ETCD_WRAPPER_VERSION}" - - export ETCDBR_VERSION=${ETCDBR_VERSION:-${ETCDBR_VER:-"v0.12.1"}} - echo "etcd-backup-restore version: ${ETCDBR_VERSION}" - - echo "Starting integration tests on k8s cluster." - - set +e - - if [ -r "$INTEGRATION_TEST_KUBECONFIG" ]; then - KUBECONFIG=$INTEGRATION_TEST_KUBECONFIG STORAGE_CONTAINER=$TEST_ID ginkgo -v -timeout=15m -mod=vendor test/e2e/integrationcluster - TEST_RESULT=$? - echo "Successfully completed all tests." - else - echo "Invalid kubeconfig for integration test $INTEGRATION_TEST_KUBECONFIG" - TEST_RESULT=255 - fi - - set -e - - echo "Done with integration test on k8s cluster." - - if [ "$STORAGE_CONTAINER" == "" ]; then - echo "Deleting test bucket..." - cleanup-aws-infrastructure - fi -} - -function run_test_on_tm() { - if [ "$ACCESS_KEY_ID" == "" ] || [ "$SECRET_ACCESS_KEY_B64" == "" ] || [ "$AWS_REGION" == "" ] ; then - echo "AWS S3 credentials unavailable. Exiting." - exit 1 - fi - export SECRET_ACCESS_KEY=`echo $SECRET_ACCESS_KEY_B64 | base64 -d` - export REGION=$AWS_REGION - - get_tm_test_id - export STORAGE_CONTAINER=$TEST_ID - export ETCDBR_VER=$EFFECTIVE_VERSION - - setup_awscli - write_aws_secret "${ACCESS_KEY_ID}" "${SECRET_ACCESS_KEY}" "${REGION}" - create_s3_bucket - - export INTEGRATION_TEST_KUBECONFIG=$TM_KUBECONFIG_PATH/shoot.config - echo "Starting integration tests on TM cluster $PROJECT_NAMESPACE/$SHOOT_NAME." - run_test_on_cluster - echo "Done with integration test on TM cluster." - cleanup-aws-infrastructure -} - -case $1 in - tm) - run_test_on_tm - ;; - cluster) - run_test_on_cluster - ;; - *) - run_test_as_processes - ;; -esac +run_test_as_processes exit $TEST_RESULT diff --git a/.gitignore b/.gitignore index d94ff83f9..ddf52b884 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ # Build Binaries bin +hack/tools/bin test/output test/e2e_test_data default.bkp* @@ -28,6 +29,9 @@ compctedsnap.bkp* .vscode .idea/ +# kubeconfig +hack/e2e-test/infrastructure/kind/kubeconfig + # developers workspace tmp dev diff --git a/Makefile b/Makefile index f41413fff..0598d0424 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 VERSION ?= $(shell cat VERSION) +REPO_ROOT := $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))") REGISTRY ?= europe-docker.pkg.dev/gardener-project/snapshots IMAGE_REPOSITORY := $(REGISTRY)/gardener/etcdbrctl IMAGE_TAG := $(VERSION) @@ -11,10 +12,11 @@ PLATFORM ?= $(shell docker info --format '{{.OSType}}/{{.Architecture BIN_DIR := bin COVERPROFILE := test/output/coverprofile.out IMG ?= ${IMAGE_REPOSITORY}:${IMAGE_TAG} +KUBECONFIG_PATH :=$(REPO_ROOT)/hack/e2e-test/infrastructure/kind/kubeconfig .DEFAULT_GOAL := build-local -include hack/tools.mk +include $(REPO_ROOT)/hack/tools.mk .PHONY: revendor revendor: @@ -26,6 +28,8 @@ update-dependencies: @env go get -u @make revendor +kind-up kind-down ci-e2e-kind ci-e2e-kind-aws ci-e2e-kind-azure ci-e2e-kind-gcp: export KUBECONFIG = $(KUBECONFIG_PATH) + .PHONY: build build: @.ci/build @@ -74,11 +78,47 @@ perf-regression-test: integration-test: @.ci/integration_test -.PHONY: integration-test-cluster -integration-test-cluster: - @.ci/integration_test cluster - .PHONY: show-coverage show-coverage: @if [ ! -f $(COVERPROFILE) ]; then echo "$(COVERPROFILE) is not yet built. Please run 'COVER=true make test'"; false; fi @go tool cover -html $(COVERPROFILE) + +.PHONY: test-e2e +test-e2e: $(KIND) $(HELM) $(GINKGO) $(KUBECTL) + @"$(REPO_ROOT)/hack/e2e-test/run-e2e-test.sh" $(PROVIDERS) $(KUBECONFIG) + +.PHONY: kind-up +kind-up: $(KIND) + ./hack/kind-up.sh + +.PHONY: kind-down +kind-down: $(KIND) + kind delete cluster --name etcdbr-e2e + +.PHONY: deploy-localstack +deploy-localstack: $(KUBECTL) + ./hack/deploy-localstack.sh $(KUBECONFIG) + +.PHONY: deploy-fakegcs +deploy-fakegcs: $(KUBECTL) + ./hack/deploy-fakegcs.sh $(KUBECONFIG) + +.PHONY: deploy-azurite +deploy-azurite: $(KUBECTL) + ./hack/deploy-azurite.sh $(KUBECONFIG) + +.PHONY: ci-e2e-kind +ci-e2e-kind: + ./hack/ci-e2e-kind.sh $(PROVIDERS) + +.PHONY: ci-e2e-kind-aws +ci-e2e-kind-aws: + ./hack/ci-e2e-kind.sh aws + +.PHONY: ci-e2e-kind-azure +ci-e2e-kind-azure: + ./hack/ci-e2e-kind.sh azure + +.PHONY: ci-e2e-kind-gcp +ci-e2e-kind-gcp: + ./hack/ci-e2e-kind.sh gcp diff --git a/chart/etcd-backup-restore/templates/etcd-backup-secret.yaml b/chart/etcd-backup-restore/templates/etcd-backup-secret.yaml index 4fecc4050..3e78009ce 100644 --- a/chart/etcd-backup-restore/templates/etcd-backup-secret.yaml +++ b/chart/etcd-backup-restore/templates/etcd-backup-secret.yaml @@ -14,7 +14,9 @@ data: region: {{ .Values.backup.s3.region | b64enc }} secretAccessKey: {{ .Values.backup.s3.secretAccessKey | b64enc }} accessKeyID: {{ .Values.backup.s3.accessKeyID | b64enc }} + {{- if .Values.backup.s3.s3ForcePathStyle }} s3ForcePathStyle: {{ .Values.backup.s3.s3ForcePathStyle | b64enc}} + {{- end }} {{- if .Values.backup.s3.endpoint }} endpoint: {{ .Values.backup.s3.endpoint | b64enc }} {{- end }} diff --git a/chart/etcd-backup-restore/templates/etcd-statefulset.yaml b/chart/etcd-backup-restore/templates/etcd-statefulset.yaml index b66774ea0..978b3fe08 100644 --- a/chart/etcd-backup-restore/templates/etcd-statefulset.yaml +++ b/chart/etcd-backup-restore/templates/etcd-statefulset.yaml @@ -350,7 +350,7 @@ spec: - name: local-backup hostPath: path: {{ .Values.backup.local.path }}/{{ .Values.backup.storageContainer }} - type: Directory + type: DirectoryOrCreate {{- else }} - name: etcd-backup secret: @@ -366,3 +366,4 @@ spec: resources: requests: storage: {{ .Values.storageCapacity }} + \ No newline at end of file diff --git a/docs/development/testing_and_dependencies.md b/docs/development/testing_and_dependencies.md index b08ce9d9d..2752fbe06 100644 --- a/docs/development/testing_and_dependencies.md +++ b/docs/development/testing_and_dependencies.md @@ -33,24 +33,15 @@ By default, we run tests without computing code coverage. To get the code covera ### Integration tests -You can also run integration tests for etcd-backup-restore on any given Kubernetes cluster. The test creates namespace `integration-test` on the cluster and deploys the [etcd-backup-restore helm chart](../../chart/etcd-backup-restore) which in turn deploys the required secrets, configmap, services and finally the statefulset which contains the pod that runs etcd and backup-restore as a sidecar. +Integration tests are process based tests i.e spins up `etcd` and `etcdbr` as processes and runs the tests to check the functionality. The tests expect that the AWS credentials are present in the `$HOME/.aws` directory. Make sure to provide the correct credentials before running the tests. ```sh -make integration-test-cluster +make integration-test ``` -:warning: Prerequisite for this command is to set the following environment variables: - -- INTEGRATION_TEST_KUBECONFIG: kubeconfig to the cluster on which you wish to run the test - -- ETCD_WRAPPER_VERSION: optional, defaults to `v0.2.0` -- ETCDBR_VERSION: optional, defaults to `v0.12.1` -- ACCESS_KEY_ID: S3 credentials -- SECRET_ACCESS_KEY: S3 credentials -- REGION: S3 credentials -- STORAGE_CONTAINER: S3 bucket name +### E2E tests -If you have a working setup of [TestMachinery](https://github.com/gardener/test-infra), you can run the integration tests on a TM-generated cluster as well. +The e2e tests for etcd-backup-restore are cluster-based tests located in the test/e2e package, run on Kubernetes clusters using both emulators and real cloud providers (AWS, GCP, Azure). These tests deploy the etcd-backup-restore helm chart to verify full functionality in various environments. ### Performance regression tests diff --git a/docs/development/tests.md b/docs/development/tests.md index 730b7c8da..b6b83ac1d 100644 --- a/docs/development/tests.md +++ b/docs/development/tests.md @@ -18,6 +18,14 @@ Integration tests include the basic working of: - **data validation**: corrupted etcd data should be marked for deletion and restoration should be triggered - **restoration**: etcd data should be restored correctly from latest set of snapshots (full + deltas) +**Note**: The tests expects that the aws credentials are present in the `$HOME/.aws` directory. Make sure to provide the correct credentials before running the tests. + +These tests can be run locally with both `etcd` and `etcdbr` running as processes. To execute the tests, run the following command: + +```sh +make integration-test +``` + ### Unit tests Each package within this repo contains its own set of unit tests to test the functionality of the methods contained within the packages. @@ -25,3 +33,79 @@ Each package within this repo contains its own set of unit tests to test the fun ### Performance regression tests These tests help check any regression in performance in terms of memory consumption and CPU utilization. + +### End-to-end tests + +The e2e tests for etcd-backup-restore are cluster based tests located in the `test/e2e` package. These tests are run on a Kubernetes cluster and test the full functionality of etcd-backup-restore. The tests create a provider namespace on the cluster and deploy the [etcd-backup-restore helm chart](../../chart/etcd-backup-restore) which in turn deploys the required secrets, configmap, services and finally the statefulset which deploys the pod that runs etcd and backup-restore as a sidecar. + +These tests are setup to be run with both emulators and real cloud providers. The emulators can be used for local development and testing as well as running jobs to test code changes when a PR is raised. The real cloud providers can be used for testing in a real cloud environment to ensure that the changes work as expected in an actual environment. + +Currently, the tests can be run using the following cloud providers: + +- AWS +- GCP +- Azure + +#### Running the e2e tests with the emulators + +##### On a Kind Cluster + +To run the e2e tests with the emulators, run the following command: + +```sh +make ci-e2e-kind PROVIDERS="{providers}" +``` + +By default, when no provider is specified, the tests are run using AWS emulator i.e Localstack as storage provider. The provider can be specified as comma separated values of the cloud providers mentioned above in small case. For example, to run the tests on AWS and GCP, run the following command: + +```sh +make ci-e2e-kind PROVIDERS="aws,gcp" +``` + +##### On any Kubernetes Cluster + +The e2e tests can also be run on any other cluster by running the following command: + +> **_NOTE:_** If using emulators for e2e tests, make sure to port-forward the snapstore service to the local machine before running the tests. + +- For AWS: `kubectl port-forward service/localstack 4566:4566` +- For GCP: `kubectl port-forward service/fake-gcs 4443:4443 8000:8000` +- For Azure: `kubectl port-forward service/azurite 10000:10000` + +```sh +make test-e2e PROVIDERS="{providers}" KUBECONFIG="{path-to-kubeconfig}" +``` + +#### Running the e2e tests with real cloud providers + +To run the tests with real cloud providers, the required credentials need to be set as environment variables before running the tests. See the below sections for the required environment variables for each cloud provider. To test with multiple providers, set the required environment variables for each provider. + +##### Set the required environment variables + +- AWS: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_DEFAULT_REGION` + +- GCP: + - `GOOGLE_APPLICATION_CREDENTIALS` + - `GCP_PROJECT_ID` + +- Azure: + - `STORAGE_ACCOUNT` + - `STORAGE_KEY` + +##### Run the tests + +To run the tests with a kind cluster, run the following command: + +```sh +make ci-e2e-kind PROVIDERS="{providers}" +``` + +To run the tests on any other cluster, run the following command: + +```sh +make test-e2e PROVIDERS="{providers}" KUBECONFIG="{path-to-kubeconfig}" +``` + diff --git a/go.mod b/go.mod index 74d3da499..f9a78226e 100644 --- a/go.mod +++ b/go.mod @@ -25,10 +25,10 @@ require ( go.uber.org/mock v0.5.0 go.uber.org/zap v1.27.0 google.golang.org/api v0.126.0 + helm.sh/helm/v3 v3.14.0 k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.2 - k8s.io/helm v2.17.0+incompatible // haven't upgraded yet k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 sigs.k8s.io/controller-runtime v0.17.5 sigs.k8s.io/yaml v1.4.0 @@ -37,24 +37,45 @@ require ( require ( cloud.google.com/go v0.110.6 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/BurntSushi/toml v0.3.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/sprig v2.22.0+incompatible // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/containerd/containerd v1.7.11 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/cli v24.0.6+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.7+incompatible // indirect + github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -70,38 +91,73 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/s2a-go v0.1.4 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/huandu/xstrings v1.5.0 // indirect - github.com/imdario/mergo v0.3.11 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmoiron/sqlx v1.3.5 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/rubenv/sql-migrate v1.5.2 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/satori/go.uuid v1.2.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect github.com/soheilhy/cmux v0.1.5 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect + github.com/xlab/treeprint v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.32.0 // indirect golang.org/x/net v0.34.0 // indirect @@ -122,8 +178,16 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.29.2 // indirect + k8s.io/apiserver v0.29.2 // indirect + k8s.io/cli-runtime v0.29.0 // indirect + k8s.io/component-base v0.29.2 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubectl v0.29.0 // indirect + oras.land/oras-go v1.2.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 83af65ddc..9d33f2f08 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,8 @@ cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= @@ -18,31 +20,66 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible h1:hLUNPbx10wawWW7DeNExvTrlb90db3UnnNTFKHZEFhE= github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -52,6 +89,14 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= +github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= @@ -60,11 +105,33 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbp github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= @@ -75,17 +142,35 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= @@ -94,11 +179,20 @@ github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2Kv github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= +github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= +github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= +github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= +github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= @@ -126,6 +220,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -135,6 +231,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -149,6 +246,9 @@ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/Z github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -161,32 +261,56 @@ github.com/gophercloud/gophercloud v0.17.0 h1:BgVw0saxyeHWH5us/SQe1ltp0GRnytjmOL github.com/gophercloud/gophercloud v0.17.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gophercloud/utils v0.0.0-20200204043447-9864b6f1f12f h1:JCE3TtmNKlOUeXXdxLe1ipU7F0GOxcj+BenaG4uiz8Y= github.com/gophercloud/utils v0.0.0-20200204043447-9864b6f1f12f/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= -github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= @@ -202,43 +326,112 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= +github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= +github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= +github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= +github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= @@ -246,15 +439,27 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= +github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -262,6 +467,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -275,18 +481,43 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.etcd.io/etcd v0.0.0-20240911181550-c123b3ea3db3 h1:Gaf7SDXngBwsrA//y4Bc1ADj2z4dShObCgrMkA9ugKs= go.etcd.io/etcd v0.0.0-20240911181550-c123b3ea3db3/go.mod h1:7RpRtGwPHiQ17cfUdrf1zCSxUzcFQdhUpBT4TukySrM= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -298,12 +529,14 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -315,13 +548,17 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -332,6 +569,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -348,24 +586,35 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -374,6 +623,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -436,11 +686,13 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -449,8 +701,13 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +helm.sh/helm/v3 v3.14.0 h1:TaZIH6uOchn7L27ptwnnuHJiFrT/BsD4dFdp/HLT2nM= +helm.sh/helm/v3 v3.14.0/go.mod h1:2itvvDv2WSZXTllknfQo6j7u3VVgMAvm8POCDgYH424= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= @@ -459,20 +716,32 @@ k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2I k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/apiserver v0.29.2 h1:+Z9S0dSNr+CjnVXQePG8TcBWHr3Q7BmAr7NraHvsMiQ= +k8s.io/apiserver v0.29.2/go.mod h1:B0LieKVoyU7ykQvPFm7XSdIHaCHSzCzQWPFa5bqbeMQ= +k8s.io/cli-runtime v0.29.0 h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4= +k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk= k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= -k8s.io/helm v2.17.0+incompatible h1:Bpn6o1wKLYqKM3+Osh8e+1/K2g/GsQJ4F4yNF2+deao= -k8s.io/helm v2.17.0+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= +k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= +k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= +oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw= sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/hack/ci-e2e-kind.sh b/hack/ci-e2e-kind.sh new file mode 100755 index 000000000..da5b6546a --- /dev/null +++ b/hack/ci-e2e-kind.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 +set -o errexit +set -o nounset +set -o pipefail + +export ETCDBR_IMAGE="europe-docker.pkg.dev/gardener-project/public/gardener/etcdbrctl" +export ETCD_WRAPPER_IMAGE="europe-docker.pkg.dev/gardener-project/public/gardener/etcd-wrapper" +export ETCDBR_VERSION="dev-latest" # This version gets built from the current branch, loaded into the kind cluster and used for the e2e tests +export ETCD_WRAPPER_VERSION="latest" + +TEST_PROVIDERS=${1:-"aws"} + +make kind-up + +trap 'make kind-down' EXIT + +make PROVIDERS=${TEST_PROVIDERS} \ + KUBECONFIG=${KUBECONFIG} \ + STEPS="setup,test" \ + test-e2e diff --git a/hack/config/aws_config.sh b/hack/config/aws_config.sh new file mode 100644 index 000000000..59b2c540b --- /dev/null +++ b/hack/config/aws_config.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +# Check if all required AWS environment variables are set +if [[ -z "${AWS_ACCESS_KEY_ID:-}" || -z "${AWS_SECRET_ACCESS_KEY:-}" || -z "${AWS_DEFAULT_REGION:-}" ]]; then + if [[ -n "${AWS_ACCESS_KEY_ID:-}" || -n "${AWS_SECRET_ACCESS_KEY:-}" || -n "${AWS_DEFAULT_REGION:-}" ]]; then + echo "Error: Partial AWS environment variables set. Please set all or none of AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_DEFAULT_REGION." + exit 1 + fi + # Use Localstack if none of the AWS environment variables are set + export LOCALSTACK_HOST="localstack.default:4566" + export AWS_ENDPOINT_URL_S3="http://localhost:4566" + export AWS_ACCESS_KEY_ID="ACCESSKEYAWSUSER" + export AWS_SECRET_ACCESS_KEY="sEcreTKey" + export AWS_DEFAULT_REGION="us-east-2" + export USE_LOCALSTACK=true +else + export USE_LOCALSTACK=false +fi + +export AWS_APPLICATION_CREDENTIALS="/tmp/.aws" + +rm -rf /tmp/.aws +mkdir -p /tmp/.aws +echo -n "${AWS_ACCESS_KEY_ID}" > "${AWS_APPLICATION_CREDENTIALS}/accessKeyID" +echo -n "${AWS_SECRET_ACCESS_KEY}" > "${AWS_APPLICATION_CREDENTIALS}/secretAccessKey" +echo -n "${AWS_DEFAULT_REGION}" > "${AWS_APPLICATION_CREDENTIALS}/region" + +if [[ "${USE_LOCALSTACK}" == "true" ]]; then + echo -n "${AWS_ENDPOINT_URL_S3}" > "${AWS_APPLICATION_CREDENTIALS}/endpoint" + echo -n "true" > "${AWS_APPLICATION_CREDENTIALS}/s3ForcePathStyle" +fi diff --git a/hack/config/azure_config.sh b/hack/config/azure_config.sh new file mode 100644 index 000000000..8f645712a --- /dev/null +++ b/hack/config/azure_config.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +# Check if all required Azure environment variables are set +if [[ -z "${STORAGE_ACCOUNT:-}" || -z "${STORAGE_KEY:-}" ]]; then + if [[ -n "${STORAGE_ACCOUNT:-}" || -n "${STORAGE_KEY:-}" ]]; then + echo "Error: Partial Azure environment variables set. Please set both or none of STORAGE_ACCOUNT and STORAGE_KEY." + exit 1 + fi + # Use Azurite if none of the Azure environment variables are set + AZURITE_DOMAIN_LOCAL="localhost:10000" + export AZURITE_DOMAIN="azurite-service.default:10000" + export STORAGE_ACCOUNT="devstoreaccount1" + export STORAGE_KEY="Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + export AZURE_STORAGE_CONNECTION_STRING="DefaultEndpointsProtocol=http;AccountName=${STORAGE_ACCOUNT};AccountKey=${STORAGE_KEY};BlobEndpoint=http://${AZURITE_DOMAIN_LOCAL}/${STORAGE_ACCOUNT};" + export USE_AZURITE=true +else + export USE_AZURITE=false +fi + +export AZURE_APPLICATION_CREDENTIALS="/tmp/.azure" +rm -rf "${AZURE_APPLICATION_CREDENTIALS}" +mkdir -p "${AZURE_APPLICATION_CREDENTIALS}" +echo -n "${STORAGE_ACCOUNT}" > "${AZURE_APPLICATION_CREDENTIALS}/storageAccount" +echo -n "${STORAGE_KEY}" > "${AZURE_APPLICATION_CREDENTIALS}/storageKey" + +if [[ "${USE_AZURITE}" == "true" ]]; then + echo -n "${AZURITE_DOMAIN_LOCAL}" > "${AZURE_APPLICATION_CREDENTIALS}/domain" + echo -n "true" > "${AZURE_APPLICATION_CREDENTIALS}/emulatorEnabled" +fi diff --git a/hack/config/gcp_config.sh b/hack/config/gcp_config.sh new file mode 100644 index 000000000..0d2d4bd0c --- /dev/null +++ b/hack/config/gcp_config.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +# Check if all required GCP environment variables are set +if [[ -z "${GCP_PROJECT_ID:-}" || -z "${GOOGLE_APPLICATION_CREDENTIALS:-}" ]]; then + if [[ -n "${GCP_PROJECT_ID:-}" || -n "${GOOGLE_APPLICATION_CREDENTIALS:-}" ]]; then + echo "Error: Partial GCP environment variables set. Please set both or none of GCP_PROJECT_ID and GOOGLE_APPLICATION_CREDENTIALS." + exit 1 + fi + # Use fake GCS emulator if none of the GCP environment variables are set + export GOOGLE_EMULATOR_HOST="fake-gcs.default:8000" + GCP_SERVICE_ACCOUNT_JSON="place-your-service-account-json-here" + FAKEGCS_LOCAL_URL="http://localhost:8000/storage/v1/" + USE_FAKEGCS=true +else + USE_FAKEGCS=false +fi + +if [[ "${USE_FAKEGCS}" == "true" ]]; then + rm -rf /tmp/.gcp + mkdir -p /tmp/.gcp + echo -n "${FAKEGCS_LOCAL_URL}" > /tmp/.gcp/storageAPIEndpoint + echo -n "true" > /tmp/.gcp/emulatorEnabled + export GOOGLE_APPLICATION_CREDENTIALS="/tmp/.gcp/service-account.json" +fi diff --git a/hack/deploy-azurite.sh b/hack/deploy-azurite.sh new file mode 100755 index 000000000..21f1b8aad --- /dev/null +++ b/hack/deploy-azurite.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 +set -o errexit +set -o nounset +set -o pipefail + +KUBECONFIG=$1 + +kubectl --kubeconfig=${KUBECONFIG} apply -f ./hack/e2e-test/infrastructure/azurite/azurite.yaml +kubectl --kubeconfig=${KUBECONFIG} rollout status deploy/azurite +kubectl --kubeconfig=${KUBECONFIG} wait --for=condition=ready pod -l app=azurite --timeout=240s diff --git a/hack/deploy-fakegcs.sh b/hack/deploy-fakegcs.sh new file mode 100755 index 000000000..226cb73c2 --- /dev/null +++ b/hack/deploy-fakegcs.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 +set -o errexit +set -o nounset +set -o pipefail + +KUBECONFIG=$1 + +kubectl --kubeconfig=${KUBECONFIG} apply -f ./hack/e2e-test/infrastructure/fake-gcs-server/fake-gcs-server.yaml +kubectl --kubeconfig=${KUBECONFIG} rollout status deploy/fake-gcs +kubectl --kubeconfig=${KUBECONFIG} wait --for=condition=ready pod -l app=fake-gcs --timeout=240s diff --git a/hack/deploy-localstack.sh b/hack/deploy-localstack.sh new file mode 100755 index 000000000..a926f09e5 --- /dev/null +++ b/hack/deploy-localstack.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 +set -o errexit +set -o nounset +set -o pipefail + +KUBECONFIG=$1 + +kubectl --kubeconfig=${KUBECONFIG} apply -f ./hack/e2e-test/infrastructure/localstack/localstack.yaml +kubectl --kubeconfig=${KUBECONFIG} rollout status deploy/localstack +kubectl --kubeconfig=${KUBECONFIG} wait --for=condition=ready pod -l app=localstack --timeout=240s diff --git a/hack/e2e-test/infrastructure/azurite/azurite.yaml b/hack/e2e-test/infrastructure/azurite/azurite.yaml new file mode 100644 index 000000000..a9582f49e --- /dev/null +++ b/hack/e2e-test/infrastructure/azurite/azurite.yaml @@ -0,0 +1,41 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: azurite + name: azurite +spec: + replicas: 1 + selector: + matchLabels: + app: azurite + strategy: + type: Recreate + template: + metadata: + labels: + app: azurite + spec: + containers: + - name: azurite + image: mcr.microsoft.com/azure-storage/azurite:latest + imagePullPolicy: IfNotPresent + ports: + - containerPort: 10000 + hostPort: 10000 + command: ["azurite-blob", "--blobHost", "0.0.0.0", "--blobPort", "10000", "--disableProductStyleUrl"] + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: azurite + name: azurite-service +spec: + ports: + - name: storage-azurite-blobs + port: 10000 + targetPort: 10000 + selector: + app: azurite diff --git a/hack/e2e-test/infrastructure/fake-gcs-server/fake-gcs-server.yaml b/hack/e2e-test/infrastructure/fake-gcs-server/fake-gcs-server.yaml new file mode 100644 index 000000000..348a61768 --- /dev/null +++ b/hack/e2e-test/infrastructure/fake-gcs-server/fake-gcs-server.yaml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: fake-gcs + name: fake-gcs +spec: + replicas: 1 + selector: + matchLabels: + app: fake-gcs + strategy: + type: Recreate + template: + metadata: + labels: + app: fake-gcs + spec: + containers: + - name: fake-gcs-server + image: fsouza/fake-gcs-server:1.47 + imagePullPolicy: IfNotPresent + args: ["-scheme", "both", "-public-host" , "fake-gcs.default:8000"] + ports: + - containerPort: 4443 + hostPort: 4443 + - containerPort: 8000 + hostPort: 8000 + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: fake-gcs + name: fake-gcs +spec: + ports: + - name: "4443-https" + port: 4443 + targetPort: 4443 + - name: "8000-http" + port: 8000 + targetPort: 8000 + selector: + app: fake-gcs diff --git a/hack/e2e-test/infrastructure/kind/cluster.yaml b/hack/e2e-test/infrastructure/kind/cluster.yaml new file mode 100644 index 000000000..58499261b --- /dev/null +++ b/hack/e2e-test/infrastructure/kind/cluster.yaml @@ -0,0 +1,29 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: kindest/node:v1.27.1 + extraPortMappings: + # For localstack + # port-forward the 4566 port from host machine to localstack svc running inside the kind cluster + - containerPort: 4566 + hostPort: 4566 + listenAddress: "127.0.0.1" + protocol: TCP + # For Fake-GCS + # port-forward the 8000 port from host machine to fake-gcs svc running inside the kind cluster + - containerPort: 8000 + hostPort: 8000 + listenAddress: "127.0.0.1" + protocol: TCP + # port-forward the 4443 port from host machine to fake-gcs svc running inside the kind cluster + - containerPort: 4443 + hostPort: 4443 + listenAddress: "127.0.0.1" + protocol: TCP + # For Azurite + # port-forward the 10000 port from host machine to azurite svc running inside the kind cluster + - containerPort: 10000 + hostPort: 10000 + listenAddress: "127.0.0.1" + protocol: TCP diff --git a/hack/e2e-test/infrastructure/localstack/localstack.yaml b/hack/e2e-test/infrastructure/localstack/localstack.yaml new file mode 100644 index 000000000..6dbdfeb42 --- /dev/null +++ b/hack/e2e-test/infrastructure/localstack/localstack.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: localstack + name: localstack +spec: + replicas: 1 + selector: + matchLabels: + app: localstack + strategy: + type: Recreate + template: + metadata: + labels: + app: localstack + spec: + containers: + - env: + - name: DATA_DIR + value: /tmp + - name: DEBUG + value: "1" + - name: SERVICES + value: s3 + image: localstack/localstack:s3-latest + imagePullPolicy: IfNotPresent + name: localstack-service + ports: + - containerPort: 4566 + hostPort: 4566 + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: localstack + name: localstack +spec: + ports: + - name: "4566" + port: 4566 + targetPort: 4566 + selector: + app: localstack diff --git a/hack/e2e-test/run-e2e-test.sh b/hack/e2e-test/run-e2e-test.sh new file mode 100755 index 000000000..c1a2a2000 --- /dev/null +++ b/hack/e2e-test/run-e2e-test.sh @@ -0,0 +1,451 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o nounset +set -o pipefail + +function containsElement () { + array=(${1//,/ }) + for i in "${!array[@]}" + do + if [[ "${array[i]}" == "$2" ]]; then + return 0 + fi + done + return 1 +} + +if [[ $(uname) == 'Darwin' ]]; then + READLINK_BIN="greadlink" +else + READLINK_BIN="readlink" +fi + +# SOURCE_PATH - path to component repository root directory. +export SOURCE_PATH="$(${READLINK_BIN} -f "$(dirname ${0})/..")" + +TEST_ID_PREFIX="etcdbr-e2e-test" + +export GOBIN="${SOURCE_PATH}/bin" +export PATH="${GOBIN}:${PATH}" +SOURCE_PATH=$(dirname "${SOURCE_PATH}") +cd "${SOURCE_PATH}" + +# container_deletion_trap deletes the containers for the providers upon receiving a trap signal in the test() function +function container_deletion_trap() { + delete_containers $INFRA_PROVIDERS + exit 1 +} + +# get_test_id generates a unique test id based on the git commit +function get_test_id() { + git_commit=`git show -s --format="%H"` + export TEST_ID=${TEST_ID_PREFIX}-${git_commit} + echo "Test id: ${TEST_ID}" +} + +# cleanup_aws_container deletes the container for the AWS provider +function cleanup_aws_container() { + echo "Cleaning up AWS infrastructure..." + echo "Deleting test bucket..." + result=$(aws s3api get-bucket-location --bucket ${TEST_ID} 2>&1 || true) + if [[ $result == *NoSuchBucket* ]]; then + echo "Bucket doesn't exit. Might have been deleted already." + return + fi + if ! aws s3 rb s3://${TEST_ID} --force; then + echo "Failed to delete AWS test bucket." + return 1 + fi + echo "Successfully deleted AWS test bucket." + unset LOCALSTACK_HOST AWS_DEFAULT_REGION + echo "Cleaning up AWS infrastructure completed." +} + +# cleanup_gcp_container deletes the container for the GCP provider +function cleanup_gcp_container() { + echo "Cleaning up GCS infrastructure..." + if [[ -z ${GOOGLE_EMULATOR_HOST:-""} ]]; then + echo "Deleting GCS bucket ${TEST_ID} ..." + if ! gsutil rm -r gs://"${TEST_ID}"/; then + echo "Failed to delete GCS bucket ${TEST_ID}." + return 1 + fi + echo "Successfully deleted GCS bucket ${TEST_ID} ." + else + echo "Deleting GCS bucket ${TEST_ID} ..." + gsutil -o "Credentials:gs_json_host=127.0.0.1" -o "Credentials:gs_json_port=4443" -o "Boto:https_validate_certificates=False" rm -rf gs://${TEST_ID}/ + echo "Successfully deleted GCS bucket ${TEST_ID} ." + fi + + unset GOOGLE_EMULATOR_HOST GOOGLE_APPLICATION_CREDENTIALS GCP_PROJECT_ID + echo "Cleaning up GCS infrastructure completed." +} + +# cleanup_azure_container deletes the container for the Azure provider +function cleanup_azure_container() { + echo "Cleaning up Azure infrastructure..." + echo "Deleting test container..." + if [[ -n ${AZURITE_DOMAIN:-""} ]]; then + if ! az storage container delete --connection-string "${AZURE_STORAGE_CONNECTION_STRING}" --name "${TEST_ID}"; then + echo "Failed to delete Azure test bucket." + return 1 + fi + else + if ! az storage container delete --account-name "${STORAGE_ACCOUNT}" --account-key "${STORAGE_KEY}" --name "${TEST_ID}"; then + echo "Failed to delete Azure test bucket." + return 1 + fi + fi + echo "Successfully deleted test container." + unset STORAGE_ACCOUNT STORAGE_KEY AZURITE_DOMAIN AZURE_STORAGE_CONNECTION_STRING + echo "Cleaning up Azure infrastructure completed." +} + +# setup_awscli installs the awscli +function setup_awscli() { + if ! $(which aws > /dev/null); then + echo "Installing awscli..." + if pip3 install --break-system-packages awscli; then + echo "Successfully installed awscli." + else + echo "Failed to install awscli." + return 1 + fi + else + echo "awscli is already installed." + fi +} + +# create_aws_container creates the container for the AWS provider +function create_aws_container() { + echo "Setting up AWS infrastructure..." + echo "Creating test bucket..." + result=$(aws s3api get-bucket-location --bucket ${TEST_ID} 2>&1 || true) + if [[ $result == *NoSuchBucket* ]]; then + echo "Creating S3 bucket ${TEST_ID} in region ${AWS_DEFAULT_REGION}" + accumulated_exit_code=0 + aws s3api create-bucket --bucket ${TEST_ID} --region ${AWS_DEFAULT_REGION} --create-bucket-configuration LocationConstraint=${AWS_DEFAULT_REGION} --acl private || accumulated_exit_code=1 + # Block public access to the S3 bucket + aws s3api put-public-access-block --bucket ${TEST_ID} --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" || accumulated_exit_code=1 + # Deny non-HTTPS requests to the S3 bucket, except for localstack which is exposed on an HTTP endpoint + if [[ -z "${LOCALSTACK_HOST:-}" ]]; then + aws s3api put-bucket-policy --bucket ${TEST_ID} --policy "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::${TEST_ID}\",\"arn:aws:s3:::${TEST_ID}/*\"],\"Condition\":{\"Bool\":{\"aws:SecureTransport\":\"false\"},\"NumericLessThan\":{\"s3:TlsVersion\":\"1.2\"}}}]}" || accumulated_exit_code=1 + fi + if [ $accumulated_exit_code -ne 0 ]; then + echo "Failed to create AWS test bucket." + return 1 + fi + else + echo $result + if [[ $result != *${AWS_DEFAULT_REGION}* ]]; then + echo "Bucket already exists in a different region. Please delete the bucket and try again." + return 1 + fi + fi + echo "Successfully created test bucket." + echo "Setting up AWS infrastructure completed." +} + +# usage_aws prints the usage for the AWS provider +function usage_aws { + cat < /dev/null); then + echo "Installing gsutil..." + pip3 install gsutil + echo "Successfully installed gsutil." + else + echo "gsutil is already installed." + fi +} + +# create_gcp_container creates the container for the GCP provider +function create_gcp_container() { + echo "Setting up GCS infrastructure..." + echo "Creating test bucket..." + if [[ -z ${GOOGLE_EMULATOR_HOST:-""} ]]; then + gsutil mb "gs://${TEST_ID}" + else + gsutil -o "Credentials:gs_json_host=127.0.0.1" -o "Credentials:gs_json_port=4443" -o "Boto:https_validate_certificates=False" mb "gs://${TEST_ID}" + fi + echo "Successfully created test bucket." + echo "Setting up GCS infrastructure completed." +} + +# usage_gcp prints the usage for the GCP provider +function usage_gcp() { + cat < /dev/null); then + echo "gcloud is not installed. Please install gcloud and try again." + return 1 + fi + echo "Authorizing access to Gcloud..." + if gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}" --project="${GCP_PROJECT_ID}"; then + echo "Successfully authorized Gcloud." + else + echo "Failed to authorize Gcloud." + return 1 + fi +} + +# setup_gcp_e2e sets up the GCP infrastructure for the e2e tests including deploying fake-gcs and/or installing the gsutil +function setup_gcp_e2e() { + if [[ -n ${GOOGLE_EMULATOR_HOST:-""} ]]; then + make deploy-fakegcs $KUBECONFIG + else + echo "GOOGLE_EMULATOR_HOST is not set. Using real GCS infra for testing." + if [[ -z ${GCP_PROJECT_ID:-""} ]] || [[ -z ${GOOGLE_APPLICATION_CREDENTIALS} ]]; then + usage_gcp + fi + authorize_gcloud + fi + setup_gsutil +} + +# create_azure_container creates the container for the Azure provider +function create_azure_container() { + echo "Setting up Azure infrastructure..." + echo "Creating test bucket..." + if [[ -n ${AZURITE_DOMAIN:-""} ]]; then + az storage container create --connection-string "${AZURE_STORAGE_CONNECTION_STRING}" --name "${TEST_ID}" + else + az storage container create --account-name "${STORAGE_ACCOUNT}" --account-key "${STORAGE_KEY}" --name "${TEST_ID}" + fi + echo "Successfully created test bucket." + echo "Setting up Azure infrastructure completed." +} + +# setup_azcli installs the azcli +function setup_azcli() { + if ! $(which az > /dev/null); then + echo "Installing azure-cli" + apt update + apt install -y curl + curl -sL https://aka.ms/InstallAzureCLIDeb | bash + echo "Successfully installed azure-cli." + else + echo "azure-cli is already installed." + fi +} + +# usage_azure prints the usage for the Azure provider +function usage_azure() { + cat <=", 1)) Expect(numDeltas).Should(Equal(0)) populatorStopCh := make(chan struct{}) populatorDoneCh := make(chan struct{}) recorderStopCh := make(chan struct{}) recorderResultCh := make(chan SnapListResult) - go runEtcdPopulatorWithoutError(logger, populatorStopCh, populatorDoneCh, kubeconfigPath, releaseNamespace, podName, "etcd") + go runEtcdPopulatorWithoutError(logger, populatorStopCh, populatorDoneCh, kubeconfigPath, releaseNamespace, podName, debugContainerName) go recordCumulativeSnapList(logger, recorderStopCh, recorderResultCh, store) time.Sleep(70 * time.Second) close(populatorStopCh) @@ -77,7 +82,8 @@ var _ = Describe("Backup", func() { populatorDoneCh := make(chan struct{}) recorderStopCh := make(chan struct{}) recorderResultCh := make(chan SnapListResult) - go runEtcdPopulatorWithoutError(logger, populatorStopCh, populatorDoneCh, kubeconfigPath, releaseNamespace, podName, "etcd") + Expect(kubeconfigPath).ShouldNot(BeEmpty()) + go runEtcdPopulatorWithoutError(logger, populatorStopCh, populatorDoneCh, kubeconfigPath, releaseNamespace, podName, debugContainerName) go recordCumulativeSnapList(logger, recorderStopCh, recorderResultCh, store) time.Sleep(190 * time.Second) close(populatorStopCh) @@ -100,25 +106,23 @@ var _ = Describe("Backup", func() { Describe("Defragmentor", func() { It("should defragment the data", func() { - cmd := "ETCDCTL_API=3 etcdctl put defrag-1 val-1" - stdout, stderr, err := executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd := "for i in $(seq 0 499); do ETCDCTL_API=3 ./nonroot/hacks/etcdctl put defrag-1 val-$i; done" + _, stderr, err := executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) - Expect(stdout).Should(Equal("OK")) - cmd = "ETCDCTL_API=3 etcdctl del defrag-1" - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + oldDbSize, oldRevision, err := getDbSizeAndRevision(kubeconfigPath, releaseNamespace, podName, debugContainerName) Expect(err).ShouldNot(HaveOccurred()) - Expect(stderr).Should(BeEmpty()) + fmt.Printf("Old db size is %d, old revision is %d\n", oldDbSize, oldRevision) - oldDbSize, oldRevision, err := getDbSizeAndRevision(kubeconfigPath, releaseNamespace, podName, "etcd") + cmd = "ETCDCTL_API=3 ./nonroot/hacks/etcdctl defrag" + _, stderr, err = executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) + Expect(stderr).Should(BeEmpty()) - logger.Infof("waiting for defragmentation to occur atleast once") - time.Sleep(70 * time.Second) - - newDbSize, newRevision, err := getDbSizeAndRevision(kubeconfigPath, releaseNamespace, podName, "etcd") + newDbSize, newRevision, err := getDbSizeAndRevision(kubeconfigPath, releaseNamespace, podName, debugContainerName) Expect(err).ShouldNot(HaveOccurred()) + fmt.Printf("New db size is %d, new revision is %d\n", newDbSize, newRevision) Expect(newRevision).Should(BeNumerically("==", oldRevision)) Expect(newDbSize).Should(BeNumerically("<", oldDbSize)) @@ -131,13 +135,13 @@ var _ = Describe("Backup", func() { Expect(err).ShouldNot(HaveOccurred()) oldFullSnapTimestamp := fullSnap.CreatedOn.Unix() - cmd := "ETCDCTL_API=3 etcdctl put full-1 val-1" - stdout, stderr, err := executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd := "ETCDCTL_API=3 ./nonroot/hacks/etcdctl put full-1 val-1" + stdout, stderr, err := executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) Expect(stdout).Should(Equal("OK")) - snap, err := triggerOnDemandSnapshot(kubeconfigPath, releaseNamespace, podName, "backup-restore", backupClientPort, "full") + snap, err := triggerOnDemandSnapshot(kubeconfigPath, releaseNamespace, podName, debugContainerName, backupClientPort, "full") Expect(err).ShouldNot(HaveOccurred()) Expect(snap).ShouldNot(BeNil()) Expect(snap.Kind).Should(Equal(brtypes.SnapshotKindFull)) @@ -157,13 +161,13 @@ var _ = Describe("Backup", func() { oldDeltaSnapTimestamp = deltaSnapList[len(deltaSnapList)-1].CreatedOn.Unix() } - cmd := "ETCDCTL_API=3 etcdctl put delta-1 val-1" - stdout, stderr, err := executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd := "ETCDCTL_API=3 ./nonroot/hacks/etcdctl put delta-1 val-1" + stdout, stderr, err := executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) Expect(stdout).Should(Equal("OK")) - snap, err := triggerOnDemandSnapshot(kubeconfigPath, releaseNamespace, podName, "backup-restore", backupClientPort, "delta") + snap, err := triggerOnDemandSnapshot(kubeconfigPath, releaseNamespace, podName, debugContainerName, backupClientPort, "delta") Expect(err).ShouldNot(HaveOccurred()) Expect(snap).ShouldNot(BeNil()) Expect(snap.Kind).Should(Equal(brtypes.SnapshotKindDelta)) @@ -182,7 +186,7 @@ var _ = Describe("Backup", func() { fullSnap, deltaSnapList, err := miscellaneous.GetLatestFullSnapshotAndDeltaSnapList(store) Expect(err).ShouldNot(HaveOccurred()) - latestSnapshots, err := getLatestSnapshots(kubeconfigPath, releaseNamespace, podName, "backup-restore", backupClientPort) + latestSnapshots, err := getLatestSnapshots(kubeconfigPath, releaseNamespace, podName, debugContainerName, backupClientPort) Expect(err).ShouldNot(HaveOccurred()) // since there is a chance of taking a scheduled full snapshot @@ -227,18 +231,18 @@ var _ = Describe("Backup", func() { var i int JustBeforeEach(func() { i++ - cmd := fmt.Sprintf("ETCDCTL_API=3 etcdctl put init-%d val-%d", i, i) - stdout, stderr, err := executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd := fmt.Sprintf("ETCDCTL_API=3 ./nonroot/hacks/etcdctl put init-%d val-%d", i, i) + stdout, stderr, err := executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) Expect(stdout).Should(Equal("OK")) - _, err = triggerOnDemandSnapshot(kubeconfigPath, releaseNamespace, podName, "backup-restore", backupClientPort, "delta") + _, err = triggerOnDemandSnapshot(kubeconfigPath, releaseNamespace, podName, debugContainerName, backupClientPort, "delta") Expect(err).ShouldNot(HaveOccurred()) i++ - cmd = fmt.Sprintf("ETCDCTL_API=3 etcdctl put init-%d val-%d", i, i) - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd = fmt.Sprintf("ETCDCTL_API=3 ./nonroot/hacks/etcdctl put init-%d val-%d", i, i) + stdout, stderr, err = executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) Expect(stdout).Should(Equal("OK")) @@ -253,20 +257,26 @@ var _ = Describe("Backup", func() { err = waitForPodToBeRunning(typedClient, podName, releaseNamespace) Expect(err).ShouldNot(HaveOccurred()) logger.Infof("waiting for %s endpoint to be ready", etcdEndpointName) - err = waitForEndpointPortsToBeReady(typedClient, etcdEndpointName, releaseNamespace, []int32{etcdClientPort}) + err = waitForEndpointPortsToBeReady(typedClient, etcdEndpointName, releaseNamespace, []int32{etcdClientPort, backupClientPort}) + Expect(err).ShouldNot(HaveOccurred()) + logger.Infof("pod %s and endpoint %s ready", podName, etcdEndpointName) + + logger.Infof("Attaching ephemeral container %s to pod %s/%s\n", debugContainerName, releaseNamespace, podName) + err = attachEphemeralContainer(kubeconfigPath, releaseNamespace, podName, debugContainerName, "etcd") Expect(err).ShouldNot(HaveOccurred()) - logger.Infof("waiting for %s endpoint to be ready", backupEndpointName) - err = waitForEndpointPortsToBeReady(typedClient, backupEndpointName, releaseNamespace, []int32{backupClientPort}) + logger.Infof("Ephemeral container %s attached to pod %s/%s\n", debugContainerName, releaseNamespace, podName) + logger.Infof("Installing etcdctl on ephemeral container %s\n", debugContainerName) + err = installEtcdctl(kubeconfigPath, releaseNamespace, podName, debugContainerName) Expect(err).ShouldNot(HaveOccurred()) - logger.Infof("pod %s and endpoints %s, %s ready", podName, etcdEndpointName, backupEndpointName) + logger.Infof("etcdctl installed on ephemeral container %s\n", debugContainerName) cmd := fmt.Sprintf("curl http://localhost:%d/initialization/status -s", backupClientPort) - stdout, stderr, err := executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "backup-restore", cmd) + stdout, stderr, err := executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stdout).Should(Equal("New")) - cmd = "ETCDCTL_API=3 etcdctl get init-1" - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd = "ETCDCTL_API=3 ./nonroot/hacks/etcdctl get init-1" + stdout, stderr, err = executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) lines := strings.Split(stdout, "\n") @@ -274,8 +284,8 @@ var _ = Describe("Backup", func() { Expect(lines[0]).Should(Equal("init-1")) Expect(lines[1]).Should(Equal("val-1")) - cmd = "ETCDCTL_API=3 etcdctl get init-2" - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd = "ETCDCTL_API=3 ./nonroot/hacks/etcdctl get init-2" + stdout, stderr, err = executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) lines = strings.Split(stdout, "\n") @@ -287,8 +297,8 @@ var _ = Describe("Backup", func() { Context("when data is corrupt", func() { It("should restore data from latest snapshot", func() { testDataCorruptionRestoration := func() { - cmd := "rm -rf /var/etcd/data/new.etcd/member" - stdout, stderr, err := executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "backup-restore", cmd) + cmd := "export ETCD_PID=$(pgrep etcd-wrapper) && rm -r proc/${ETCD_PID}/root/var/etcd/data/new.etcd/member" + stdout, stderr, err := executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) Expect(stdout).Should(BeEmpty()) @@ -302,20 +312,27 @@ var _ = Describe("Backup", func() { err = waitForPodToBeRunning(typedClient, podName, releaseNamespace) Expect(err).ShouldNot(HaveOccurred()) logger.Infof("waiting for %s endpoint to be ready", etcdEndpointName) - err = waitForEndpointPortsToBeReady(typedClient, etcdEndpointName, releaseNamespace, []int32{etcdClientPort}) + err = waitForEndpointPortsToBeReady(typedClient, etcdEndpointName, releaseNamespace, []int32{etcdClientPort, backupClientPort}) Expect(err).ShouldNot(HaveOccurred()) - logger.Infof("waiting for %s endpoint to be ready", backupEndpointName) - err = waitForEndpointPortsToBeReady(typedClient, backupEndpointName, releaseNamespace, []int32{backupClientPort}) + logger.Infof("pod %s and endpoints %s ready", podName, etcdEndpointName) + + logger.Infof("Attaching ephemeral container %s to pod %s/%s\n", debugContainerName, releaseNamespace, podName) + err = attachEphemeralContainer(kubeconfigPath, releaseNamespace, podName, debugContainerName, "etcd") + Expect(err).ShouldNot(HaveOccurred()) + logger.Infof("Ephemeral container %s attached to pod %s/%s\n", debugContainerName, releaseNamespace, podName) + logger.Infof("Installing etcdctl on ephemeral container %s\n", debugContainerName) + err = installEtcdctl(kubeconfigPath, releaseNamespace, podName, debugContainerName) Expect(err).ShouldNot(HaveOccurred()) - logger.Infof("pod %s and endpoints %s, %s ready", podName, etcdEndpointName, backupEndpointName) + logger.Infof("etcdctl installed on ephemeral container %s\n", debugContainerName) cmd = fmt.Sprintf("curl http://localhost:%d/initialization/status -s", backupClientPort) - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "backup-restore", cmd) + stdout, stderr, err = executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) + Expect(stderr).Should(BeEmpty()) Expect(stdout).Should(Equal("New")) - cmd = "ETCDCTL_API=3 etcdctl get init-3" - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd = "ETCDCTL_API=3 ./nonroot/hacks/etcdctl get init-3" + stdout, stderr, err = executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) lines := strings.Split(stdout, "\n") @@ -323,8 +340,8 @@ var _ = Describe("Backup", func() { Expect(lines[0]).Should(Equal("init-3")) Expect(lines[1]).Should(Equal("val-3")) - cmd = "ETCDCTL_API=3 etcdctl get init-4" - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, releaseNamespace, podName, "etcd", cmd) + cmd = "ETCDCTL_API=3 ./nonroot/hacks/etcdctl get init-4" + stdout, stderr, err = executeContainerCommand(kubeconfigPath, releaseNamespace, podName, debugContainerName, cmd) Expect(err).ShouldNot(HaveOccurred()) Expect(stderr).Should(BeEmpty()) Expect(stdout).Should(BeEmpty()) diff --git a/test/e2e/integrationcluster/utils.go b/test/e2e/integrationcluster/utils.go deleted file mode 100644 index 616273e40..000000000 --- a/test/e2e/integrationcluster/utils.go +++ /dev/null @@ -1,460 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors -// -// SPDX-License-Identifier: Apache-2.0 - -package integrationcluster - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "os" - "strings" - "time" - - "github.com/gardener/etcd-backup-restore/pkg/snapstore" - brtypes "github.com/gardener/etcd-backup-restore/pkg/types" - "sigs.k8s.io/yaml" - - "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/remotecommand" - "k8s.io/helm/pkg/chartutil" - "k8s.io/helm/pkg/helm" -) - -// EndpointStatus stores result from output of etcdctl endpoint status command -type EndpointStatus []struct { - Endpoint string `json:"Endpoint"` - Status struct { - Header struct { - ClusterID int64 `json:"cluster_id"` - MemberID int64 `json:"member_id"` - Revision int64 `json:"revision"` - RaftTerm int64 `json:"raft_term"` - } `json:"header"` - Version string `json:"version"` - DbSize int64 `json:"dbSize"` - Leader int64 `json:"leader"` - RaftIndex int64 `json:"raftIndex"` - RaftTerm int64 `json:"raftTerm"` - } `json:"Status"` -} - -// SnapListResult stores the snaplist and any associated error -type SnapListResult struct { - Snapshots brtypes.SnapList `json:"snapshots"` - Error error `json:"error"` -} - -// LatestSnapshots stores the result from output of /snapshot/latest http call -type LatestSnapshots struct { - FullSnapshot *brtypes.Snapshot `json:"fullSnapshot"` - DeltaSnapshots []*brtypes.Snapshot `json:"deltaSnapshots"` -} - -func getEnvOrError(key string) (string, error) { - if value, ok := os.LookupEnv(key); ok { - return value, nil - } - - return "", fmt.Errorf("environment variable not found: %s", key) -} - -func getKubeconfig(kubeconfigPath string) (*rest.Config, error) { - return clientcmd.BuildConfigFromFlags("", kubeconfigPath) -} - -func getEnvOrFallback(key, fallback string) string { - if value, ok := os.LookupEnv(key); ok { - return value - } - - return fallback -} - -func getKubernetesTypedClient(logger *logrus.Logger, kubeconfigPath string) (*kubernetes.Clientset, error) { - config, err := getKubeconfig(kubeconfigPath) - if err != nil { - return nil, err - } - - typedClient, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, err - } - - return typedClient, nil -} - -func helmDeployChart(logger *logrus.Logger, timeout time.Duration, kubeconfigPath, chartPath, releaseName, releaseNamespace string, chartValues map[string]interface{}, waitForResourcesToBeReady bool) error { - client := helm.NewClient(helm.Host(kubeconfigPath)) - - chartRequested, err := chartutil.Load(chartPath) - if err != nil { - return err - } - - valuesToRender, err := yaml.Marshal(chartValues) - if err != nil { - return fmt.Errorf("failed to marshal values: %v", err) - } - - // Install the chart - _, err = client.InstallReleaseFromChart( - chartRequested, - releaseNamespace, - helm.ValueOverrides(valuesToRender), - helm.ReleaseName(releaseName), - helm.InstallWait(waitForResourcesToBeReady), - ) - - if err != nil { - logger.Infof("helm chart installation to release '%s' failed", releaseName) - return err - } - - logger.Infof("successfully installed release '%s'", releaseName) - return nil -} - -func waitForNamespaceToBeCreated(k8sClient *kubernetes.Clientset, name string) error { - var ( - err error - ) - namespaceClient := k8sClient.CoreV1().Namespaces() - - for { - _, err = namespaceClient.Get(context.TODO(), name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - continue - } else if err != nil { - return err - } - break - } - - return nil -} - -func waitForNamespaceToBeDeleted(k8sClient *kubernetes.Clientset, name string) error { - var ( - err error - ) - namespaceClient := k8sClient.CoreV1().Namespaces() - - for { - _, err = namespaceClient.Get(context.TODO(), name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - break - } else if err != nil { - return err - } - } - - return nil -} - -func waitForPodToBeRunning(k8sClient *kubernetes.Clientset, name, namespace string) error { - var ( - pod *corev1.Pod - err error - ) - podClient := k8sClient.CoreV1().Pods(namespace) - - for { - pod, err = podClient.Get(context.TODO(), name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - continue - } else if err != nil { - return err - } - if pod.Status.Phase == corev1.PodRunning { - break - } - } - - return nil -} - -func waitForEndpointPortsToBeReady(k8sClient *kubernetes.Clientset, name, namespace string, ports []int32) error { - var ( - endpoint *corev1.Endpoints - err error - ) - endpointClient := k8sClient.CoreV1().Endpoints(namespace) - portsMap := make(map[int32]bool) - -OuterLoop: - for { - endpoint, err = endpointClient.Get(context.TODO(), name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - continue - } else if err != nil { - return err - } - for _, port := range ports { - portsMap[port] = false - } - for _, subset := range endpoint.Subsets { - if len(subset.NotReadyAddresses) != 0 { - continue OuterLoop - } - for _, port := range subset.Ports { - if _, ok := portsMap[port.Port]; ok { - portsMap[port.Port] = true - } - } - } - for _, isFound := range portsMap { - if isFound == false { - continue OuterLoop - } - } - break - } - - return nil -} - -// getRemoteCommandExecutor builds and returns a remote command Executor from the given command on the specified container -func getRemoteCommandExecutor(kubeconfigPath, namespace, podName, containerName, command string) (remotecommand.Executor, error) { - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - return nil, err - } - - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, err - } - coreClient := clientset.CoreV1() - - req := coreClient.RESTClient(). - Post(). - Namespace(namespace). - Resource("pods"). - Name(podName). - SubResource("exec"). - VersionedParams(&corev1.PodExecOptions{ - Container: containerName, - Command: []string{ - "/bin/sh", - "-c", - command, - }, - Stdin: false, - Stdout: true, - Stderr: true, - }, scheme.ParameterCodec) - - exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) - if err != nil { - return nil, err - } - - return exec, nil -} - -// executeRemoteCommand executes a remote shell command on the given pod and container -// and returns the stdout and stderr logs -func executeRemoteCommand(kubeconfigPath, namespace, podName, containerName, command string) (string, string, error) { - exec, err := getRemoteCommandExecutor(kubeconfigPath, namespace, podName, containerName, command) - if err != nil { - return "", "", err - } - - buf := &bytes.Buffer{} - errBuf := &bytes.Buffer{} - err = exec.Stream(remotecommand.StreamOptions{ - Stdout: buf, - Stderr: errBuf, - }) - if err != nil { - return "", "", err - } - - return strings.TrimSpace(buf.String()), strings.TrimSpace(errBuf.String()), nil -} - -func getSnapstore(storageProvider, storageContainer, storePrefix string) (brtypes.SnapStore, error) { - snapstoreConfig := &brtypes.SnapstoreConfig{ - Provider: storageProvider, - Container: storageContainer, - Prefix: storePrefix, - } - store, err := snapstore.GetSnapstore(snapstoreConfig) - if err != nil { - return nil, err - } - - return store, nil -} - -func getTotalFullAndDeltaSnapshotCounts(snapList brtypes.SnapList) (int, int) { - var numFulls, numDeltas int - for _, snap := range snapList { - if snap.Kind == brtypes.SnapshotKindFull { - numFulls++ - } else if snap.Kind == brtypes.SnapshotKindDelta { - numDeltas++ - } - } - return numFulls, numDeltas -} - -func purgeSnapstore(store brtypes.SnapStore) error { - snapList, err := store.List(false) - if err != nil { - return err - } - - for _, snap := range snapList { - err = store.Delete(*snap) - if err != nil { - return err - } - } - - return nil -} - -func runEtcdPopulatorWithoutError(logger *logrus.Logger, stopCh <-chan struct{}, doneCh chan<- struct{}, kubeconfigPath, namespace, podName, containerName string) { - var ( - cmd string - stdout string - stderr string - err error - i = 1 - ) - - for { - select { - case <-stopCh: - logger.Infof("populator received stop channel. Populated etcd till (foo-%d, bar-%d). Exiting", i-1, i-1) - close(doneCh) - return - default: - cmd = fmt.Sprintf("ETCDCTL_API=3 etcdctl put foo-%d bar-%d", i, i) - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, namespace, podName, containerName, cmd) - if err != nil || stderr != "" || stdout != "OK" { - logger.Infof("failed to put (foo-%d, bar-%d). Retrying", i, i) - continue - } - if i%10 == 0 { - logger.Infof("put (foo-%d, bar-%d) successful", i, i) - cmd = fmt.Sprintf("ETCDCTL_API=3 etcdctl del foo-%d", i) - stdout, stderr, err = executeRemoteCommand(kubeconfigPath, namespace, podName, containerName, cmd) - if err != nil || stderr != "" || stdout != "1" { - logger.Infof("failed to put (foo-%d, bar-%d). Retrying", i, i) - continue - } - } - i++ - time.Sleep(time.Duration(time.Millisecond * 500)) - } - } -} - -func recordCumulativeSnapList(logger *logrus.Logger, stopCh <-chan struct{}, resultCh chan<- SnapListResult, store brtypes.SnapStore) { - var snapListResult SnapListResult - - for { - select { - case <-stopCh: - logger.Infof("recorder received stop channel. Exiting") - resultCh <- snapListResult - return - default: - snaps, err := store.List(false) - if err != nil { - snapListResult.Error = err - resultCh <- snapListResult - return - } - - for _, snap := range snaps { - if snapshotInSnapList(snap, snapListResult.Snapshots) { - continue - } - snapListResult.Snapshots = append(snapListResult.Snapshots, snap) - } - - time.Sleep(time.Duration(time.Second)) - } - } -} - -func snapshotInSnapList(snapshot *brtypes.Snapshot, snapList brtypes.SnapList) bool { - for _, snap := range snapList { - if snap.SnapName == snapshot.SnapName { - return true - } - } - return false -} - -func getDbSizeAndRevision(kubeconfigPath, namespace, podName, containerName string) (int64, int64, error) { - var endpointStatus EndpointStatus - cmd := "ETCDCTL_API=3 etcdctl endpoint status -w=json" - stdout, stderr, err := executeRemoteCommand(kubeconfigPath, namespace, podName, "etcd", cmd) - if err != nil { - return 0, 0, err - } - if stderr != "" { - return 0, 0, fmt.Errorf("stderr: %s", stderr) - } - if err = json.Unmarshal([]byte(stdout), &endpointStatus); err != nil { - return 0, 0, err - } - dbSize := endpointStatus[0].Status.DbSize - revision := endpointStatus[0].Status.Header.Revision - - return dbSize, revision, nil -} - -func triggerOnDemandSnapshot(kubeconfigPath, namespace, podName, containerName string, port int, snapshotKind string) (*brtypes.Snapshot, error) { - var snapshot *brtypes.Snapshot - cmd := fmt.Sprintf("curl http://localhost:%d/snapshot/%s -s", port, snapshotKind) - stdout, _, err := executeRemoteCommand(kubeconfigPath, namespace, podName, containerName, cmd) - if err != nil { - return nil, err - } - if stdout == "" { - return nil, fmt.Errorf("empty response") - } - if err = json.Unmarshal([]byte(stdout), &snapshot); err != nil { - return nil, err - } - - return snapshot, nil -} - -func getLatestSnapshots(kubeconfigPath, namespace, podName, containerName string, port int) (*LatestSnapshots, error) { - var latestSnapshots *LatestSnapshots - cmd := fmt.Sprintf("curl http://localhost:%d/snapshot/latest -s", port) - stdout, _, err := executeRemoteCommand(kubeconfigPath, namespace, podName, containerName, cmd) - if err != nil { - return nil, err - } - if stdout == "" { - return nil, fmt.Errorf("empty response") - } - if err = json.Unmarshal([]byte(stdout), &latestSnapshots); err != nil { - return nil, err - } - - latestSnapshots.FullSnapshot.CreatedOn = latestSnapshots.FullSnapshot.CreatedOn.Truncate(time.Second) - for _, snap := range latestSnapshots.DeltaSnapshots { - snap.CreatedOn = snap.CreatedOn.Truncate(time.Second) - } - - return latestSnapshots, nil -} diff --git a/test/e2e/integrationcluster/integrationcluster_suite_test.go b/test/e2e/integrationcluster_suite_test.go similarity index 51% rename from test/e2e/integrationcluster/integrationcluster_suite_test.go rename to test/e2e/integrationcluster_suite_test.go index 08cedce58..0c90164aa 100644 --- a/test/e2e/integrationcluster/integrationcluster_suite_test.go +++ b/test/e2e/integrationcluster_suite_test.go @@ -2,72 +2,79 @@ // // SPDX-License-Identifier: Apache-2.0 -package integrationcluster +package e2e import ( "context" "fmt" "path" + "strings" "testing" "time" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" ) const ( - envSourcePath = "SOURCE_PATH" - envKubeconfigPath = "KUBECONFIG" - envEtcdVersion = "ETCD_VERSION" - envEtcdbrVersion = "ETCDBR_VERSION" - helmChartBasePath = "chart/etcd-backup-restore" - releaseName = "main" - releaseNamespace = "integration-test" - envStorageContainer = "STORAGE_CONTAINER" - envAccessKeyID = "ACCESS_KEY_ID" - envSecretAccessKey = "SECRET_ACCESS_KEY" - envRegion = "REGION" - timeoutPeriod = time.Second * 60 - etcdClientPort = 2379 - backupClientPort = 8080 + envSourcePath = "SOURCE_PATH" + envKubeconfigPath = "KUBECONFIG" + envEtcdbrVersion = "ETCDBR_VERSION" + envEtcdbrImage = "ETCDBR_IMAGE" + envEtcdWrapperVersion = "ETCD_WRAPPER_VERSION" + envEtcdWrapperImage = "ETCD_WRAPPER_IMAGE" + helmChartBasePath = "chart/etcd-backup-restore" + releaseNamePrefix = "main" + releaseNamespacePrefix = "e2e-test" + envStorageContainer = "STORAGE_CONTAINER" + envProvider = "PROVIDER" + debugContainerName = "debug" + timeoutPeriod = 5 * time.Minute + etcdClientPort = 2379 + backupClientPort = 8080 ) var ( logger = logrus.New() - err error kubeconfigPath string typedClient *kubernetes.Clientset storageContainer string storageProvider string - storePrefix = fmt.Sprintf("%s-etcd", releaseName) + releaseNamespace string + providerName string + storePrefix = fmt.Sprintf("%s-etcd", releaseNamePrefix) ) -func TestIntegrationcluster(t *testing.T) { +func TestE2E(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Integrationcluster Suite") + RunSpecs(t, "E2E Suite") } var _ = BeforeSuite(func() { var ( - etcdVersion = getEnvAndExpectNoError(envEtcdVersion) - etcdbrVersion = getEnvAndExpectNoError(envEtcdbrVersion) - region = getEnvAndExpectNoError(envRegion) - secretAccessKey = getEnvAndExpectNoError(envSecretAccessKey) - accessKeyID = getEnvAndExpectNoError(envAccessKeyID) - chartValues map[string]interface{} + etcdbrVersion = getEnvAndExpectNoError(envEtcdbrVersion) + etcdbrImage = getEnvAndExpectNoError(envEtcdbrImage) + etcdWrapperVersion = getEnvAndExpectNoError(envEtcdWrapperVersion) + etcdWrapperImage = getEnvAndExpectNoError(envEtcdWrapperImage) + chartPath = path.Join(getEnvOrFallback(envSourcePath, "."), helmChartBasePath) + chartValues map[string]interface{} ) - - kubeconfigPath = getEnvAndExpectNoError(envKubeconfigPath) storageContainer = getEnvAndExpectNoError(envStorageContainer) - storageProvider = "S3" // support for only S3 buckets at the moment + kubeconfigPath = getEnvAndExpectNoError(envKubeconfigPath) + Expect(kubeconfigPath).ShouldNot(BeEmpty()) + + providerName = getEnvAndExpectNoError(envProvider) + provider, err := getProvider(providerName) + Expect(err).ShouldNot(HaveOccurred()) + Expect(provider).ShouldNot(BeNil()) + logger.Infof("provider: %s", provider.name) + storageProvider = provider.storage.provider store, err := getSnapstore(storageProvider, storageContainer, storePrefix) Expect(err).ShouldNot(HaveOccurred()) @@ -76,10 +83,10 @@ var _ = BeforeSuite(func() { Expect(err).ShouldNot(HaveOccurred()) logger.Printf("setting up k8s client using KUBECONFIG=%s", kubeconfigPath) - typedClient, err = getKubernetesTypedClient(logger, kubeconfigPath) + typedClient, err = getKubernetesTypedClient(kubeconfigPath) Expect(err).NotTo(HaveOccurred()) namespacesClient := typedClient.CoreV1().Namespaces() - + releaseNamespace = fmt.Sprintf("%s-%s", releaseNamespacePrefix, providerName) logger.Infof("creating namespace %s", releaseNamespace) ns, err := namespacesClient.Create(context.TODO(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -93,39 +100,36 @@ var _ = BeforeSuite(func() { logger.Infof("created namespace %s", ns.Name) - chartPath := path.Join(getEnvOrFallback(envSourcePath, "."), helmChartBasePath) - - logger.Infof("deploying helm chart at %s", chartPath) - chartValues = map[string]interface{}{ "images": map[string]interface{}{ - "etcd": map[string]interface{}{ - "tag": etcdVersion, + "etcdWrapper": map[string]interface{}{ + "repository": etcdWrapperImage, + "tag": etcdWrapperVersion, + "pullPolicy": "IfNotPresent", }, "etcdBackupRestore": map[string]interface{}{ - "tag": etcdbrVersion, + "repository": etcdbrImage, + "tag": etcdbrVersion, + "pullPolicy": "Never", }, }, "backup": map[string]interface{}{ - "storageProvider": storageProvider, - "storageContainer": storageContainer, - "s3": map[string]interface{}{ - "region": region, - "secretAccessKey": secretAccessKey, - "accessKeyID": accessKeyID, - }, - "schedule": "*/1 * * * *", - "deltaSnapshotPeriod": "10s", - "maxBackups": 2, - "garbageCollectionPolicy": "LimitBased", - "garbageCollectionPeriod": "30s", - "defragmentationSchedule": "*/1 * * * *", + "storageProvider": storageProvider, + "storageContainer": storageContainer, + strings.ToLower(storageProvider): provider.storage.secretData, + "schedule": "*/1 * * * *", + "deltaSnapshotPeriod": "10s", + "maxBackups": 2, + "garbageCollectionPolicy": "LimitBased", + "garbageCollectionPeriod": "30s", + "defragmentationSchedule": "*/1 * * * *", }, } - - err = helmDeployChart(logger, timeoutPeriod, kubeconfigPath, chartPath, releaseName, releaseNamespace, chartValues, true) + err = helmDeployChart(logger, timeoutPeriod, kubeconfigPath, chartPath, fmt.Sprintf("%s-%s", releaseNamePrefix, providerName), releaseNamespace, chartValues, true) + if err != nil { + fmt.Printf("error deploying helm chart for provider %s: %v\n", providerName, err) + } Expect(err).NotTo(HaveOccurred()) - logger.Infof("deployed helm chart to release '%s'", releaseName) }) var _ = AfterSuite(func() { @@ -133,12 +137,10 @@ var _ = AfterSuite(func() { Expect(err).NotTo(HaveOccurred()) logger.Printf("setting up k8s client using KUBECONFIG=%s", kubeconfigPath) - typedClient, err = getKubernetesTypedClient(logger, kubeconfigPath) + typedClient, err = getKubernetesTypedClient(kubeconfigPath) Expect(err).NotTo(HaveOccurred()) - namespacesClient := typedClient.CoreV1().Namespaces() - logger.Infof("deleting namespace %s", releaseNamespace) - namespacesClient = typedClient.CoreV1().Namespaces() + namespacesClient := typedClient.CoreV1().Namespaces() err = namespacesClient.Delete(context.TODO(), releaseNamespace, metav1.DeleteOptions{}) if apierrors.IsNotFound(err) { logger.Infof("namespace %s does not exist", releaseNamespace) diff --git a/test/e2e/utils.go b/test/e2e/utils.go new file mode 100644 index 000000000..4cb88ca0b --- /dev/null +++ b/test/e2e/utils.go @@ -0,0 +1,620 @@ +// SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/gardener/etcd-backup-restore/pkg/snapstore" + brtypes "github.com/gardener/etcd-backup-restore/pkg/types" + + "github.com/sirupsen/logrus" + "go.etcd.io/etcd/etcdserver/etcdserverpb" + helmaction "helm.sh/helm/v3/pkg/action" + helmchart "helm.sh/helm/v3/pkg/chart" + helmloader "helm.sh/helm/v3/pkg/chart/loader" + helmkube "helm.sh/helm/v3/pkg/kube" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/remotecommand" +) + +// SnapListResult stores the snaplist and any associated error +type SnapListResult struct { + Snapshots brtypes.SnapList `json:"snapshots"` + Error error `json:"error"` +} + +// LatestSnapshots stores the result from output of /snapshot/latest http call +type LatestSnapshots struct { + FullSnapshot *brtypes.Snapshot `json:"fullSnapshot"` + DeltaSnapshots []*brtypes.Snapshot `json:"deltaSnapshots"` +} + +const ( + providerAWS = "aws" + providerAzure = "azure" + providerGCP = "gcp" + + // AWS related variables + envAWSCreds = "AWS_APPLICATION_CREDENTIALS" // #nosec G101 -- This is not a hardcoded password, but only the environment variable to the credentials. + envLocalStackHost = "LOCALSTACK_HOST" + fileS3AccessKeyID = "accessKeyID" + fileS3SecretAccessKey = "secretAccessKey" + fileS3Region = "region" + // GCP related variables + envGoogleCreds = "GOOGLE_APPLICATION_CREDENTIALS" // #nosec G101 -- This is not a hardcoded password, but only the environment variable to the credentials. + envFakeGCSHost = "GOOGLE_EMULATOR_HOST" + // Azure related variables + envAzureCreds = "AZURE_APPLICATION_CREDENTIALS" // #nosec G101 -- This is not a hardcoded password, but only the environment variable to the credentials. + envAzuriteDomain = "AZURITE_DOMAIN" + fileAzureStorageAccount = "storageAccount" + fileAzureStorageKey = "storageKey" +) + +type storage struct { + provider string + secretData map[string]interface{} +} + +type testProvider struct { + name string + storage *storage +} + +func getProvider(providerName string) (testProvider, error) { + var provider testProvider + switch providerName { + case providerAWS: + awsCredsDir := getEnvOrFallback(envAWSCreds, "") + if awsCredsDir == "" { + return testProvider{}, fmt.Errorf("aws credential directory not found") + } + s3AccessKeyID := readCredentialFileFromDir(awsCredsDir, fileS3AccessKeyID) + s3SecretAccessKey := readCredentialFileFromDir(awsCredsDir, fileS3SecretAccessKey) + s3Region := readCredentialFileFromDir(awsCredsDir, fileS3Region) + secretData := map[string]interface{}{ + "accessKeyID": s3AccessKeyID, + "secretAccessKey": s3SecretAccessKey, + "region": s3Region, + } + localStackHost := getEnvOrFallback(envLocalStackHost, "") + if localStackHost != "" { + secretData["endpoint"] = fmt.Sprintf("http://%s", localStackHost) + secretData["s3ForcePathStyle"] = "true" + } + provider = testProvider{ + name: "aws", + storage: &storage{ + provider: brtypes.SnapstoreProviderS3, + secretData: secretData, + }, + } + case providerGCP: + secretData := map[string]interface{}{} + fakeGCSHost := getEnvOrFallback(envFakeGCSHost, "") + if fakeGCSHost == "" { + file, err := os.ReadFile(os.Getenv(envGoogleCreds)) + if err != nil { + return testProvider{}, err + } + jsonStr := string(file) + secretData["serviceAccountJson"] = jsonStr + } else { + secretData["emulatorEnabled"] = "true" + secretData["storageAPIEndpoint"] = fmt.Sprintf("http://%s/storage/v1/", fakeGCSHost) + secretData["serviceAccountJson"] = "dummy-service-account-json" + } + provider = testProvider{ + name: "gcp", + storage: &storage{ + provider: brtypes.SnapstoreProviderGCS, + secretData: secretData, + }, + } + case providerAzure: + azureCredsDir := getEnvOrFallback(envAzureCreds, "") + if azureCredsDir == "" { + return testProvider{}, fmt.Errorf("azure credential directory not found") + } + azureStorageAccount := readCredentialFileFromDir(azureCredsDir, fileAzureStorageAccount) + azureStorageKey := readCredentialFileFromDir(azureCredsDir, fileAzureStorageKey) + secretData := map[string]interface{}{ + "storageAccount": azureStorageAccount, + "storageKey": azureStorageKey, + } + azuriteDomain := getEnvOrFallback(envAzuriteDomain, "") + if azuriteDomain != "" { + secretData["emulatorEnabled"] = "true" + secretData["domain"] = azuriteDomain + } + provider = testProvider{ + name: "azure", + storage: &storage{ + provider: brtypes.SnapstoreProviderABS, + secretData: secretData, + }, + } + default: + return testProvider{}, fmt.Errorf("unsupported provider: %s", providerName) + } + return provider, nil +} + +func getEnvOrError(key string) (string, error) { + if value, ok := os.LookupEnv(key); ok { + return value, nil + } + + return "", fmt.Errorf("environment variable not found: %s", key) +} + +func getKubeconfig(kubeconfigPath string) (*rest.Config, error) { + return clientcmd.BuildConfigFromFlags("", kubeconfigPath) +} + +func getEnvOrFallback(key, fallback string) string { + if value, ok := os.LookupEnv(key); ok { + return value + } + return fallback +} + +func readCredentialFileFromDir(dir string, filename string) string { + file, err := os.ReadFile(fmt.Sprintf("%s/%s", dir, filename)) + if err != nil { + return "" + } + return string(file) +} + +func getKubernetesTypedClient(kubeconfigPath string) (*kubernetes.Clientset, error) { + config, err := getKubeconfig(kubeconfigPath) + if err != nil { + return nil, err + } + + typedClient, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + + return typedClient, nil +} + +func helmDeployChart(logger *logrus.Logger, timeout time.Duration, kubeconfigPath, chartPath, releaseName, releaseNamespace string, chartValues map[string]interface{}, waitForResourcesToBeReady bool) error { + var ( + chart *helmchart.Chart + err error + ) + + chart, err = helmloader.Load(chartPath) + if err != nil { + return err + } + + actionConfig := new(helmaction.Configuration) + if err = actionConfig.Init(helmkube.GetConfig(kubeconfigPath, "", releaseNamespace), releaseNamespace, "configmap", func(format string, v ...interface{}) {}); err != nil { + return err + } + + installClient := helmaction.NewInstall(actionConfig) + installClient.Namespace = releaseNamespace + installClient.ReleaseName = releaseName + installClient.Wait = waitForResourcesToBeReady + installClient.Timeout = timeout + + rel, err := installClient.Run(chart, chartValues) + if err != nil { + logger.Infof("failed to install chart %s: %v", chartPath, err) + return err + } + + logger.Infof("successfully installed chart %s with release name %s", chartPath, rel.Name) + return nil +} + +func waitForNamespaceToBeCreated(k8sClient *kubernetes.Clientset, name string) error { + var ( + err error + ) + namespaceClient := k8sClient.CoreV1().Namespaces() + + for { + _, err = namespaceClient.Get(context.TODO(), name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + continue + } else if err != nil { + return err + } + break + } + + return nil +} + +func waitForNamespaceToBeDeleted(k8sClient *kubernetes.Clientset, name string) error { + var ( + err error + ) + namespaceClient := k8sClient.CoreV1().Namespaces() + + for { + _, err = namespaceClient.Get(context.TODO(), name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + break + } else if err != nil { + return err + } + } + + return nil +} + +func waitForPodToBeRunning(k8sClient *kubernetes.Clientset, name, namespace string) error { + var ( + pod *corev1.Pod + err error + ) + podClient := k8sClient.CoreV1().Pods(namespace) + + for { + pod, err = podClient.Get(context.TODO(), name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + continue + } else if err != nil { + return err + } + if pod.Status.Phase == corev1.PodRunning { + break + } + } + + return nil +} + +func waitForEndpointPortsToBeReady(k8sClient *kubernetes.Clientset, name, namespace string, ports []int32) error { + var ( + endpoint *corev1.Endpoints + err error + ) + endpointClient := k8sClient.CoreV1().Endpoints(namespace) + portsMap := make(map[int32]bool) + +OuterLoop: + for { + endpoint, err = endpointClient.Get(context.TODO(), name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + continue + } else if err != nil { + return err + } + for _, port := range ports { + portsMap[port] = false + } + for _, subset := range endpoint.Subsets { + if len(subset.NotReadyAddresses) != 0 { + continue OuterLoop + } + for _, port := range subset.Ports { + if _, ok := portsMap[port.Port]; ok { + portsMap[port.Port] = true + } + } + } + for _, isFound := range portsMap { + if isFound == false { + continue OuterLoop + } + } + break + } + + return nil +} + +func installEtcdctl(kubeconfigPath, namespace, podName, containerName string) error { + cmd := "cd /nonroot/hacks && ./install_etcdctl" + _, _, err := executeContainerCommand(kubeconfigPath, namespace, podName, containerName, cmd) + if err != nil { + return err + } + return nil +} + +// attachEphemeralContainer attaches the debug container to the Etcd pod targetting the etcd container to debug and/or execute commands +func attachEphemeralContainer(kubeconfigPath, namespace, podName, debugContainerName, targetContainer string) error { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return err + } + + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + return err + } + + pod, err := clientSet.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return err + } + + if pod.Spec.EphemeralContainers == nil { + pod.Spec.EphemeralContainers = make([]corev1.EphemeralContainer, 0) + } else { + for _, container := range pod.Spec.EphemeralContainers { + if container.Name == debugContainerName { + // ephemeral container already exists + for _, ephContainerStatus := range pod.Status.EphemeralContainerStatuses { + if ephContainerStatus.Name == debugContainerName { + if ephContainerStatus.State.Running != nil { + return nil + } + } + } + return fmt.Errorf("ephemeral container %s already exists, but not running", debugContainerName) + } + } + } + + log.Printf("Creating ephemeral container %s\n", debugContainerName) + pod.Spec.EphemeralContainers = append(pod.Spec.EphemeralContainers, corev1.EphemeralContainer{ + TargetContainerName: targetContainer, + EphemeralContainerCommon: corev1.EphemeralContainerCommon{ + Image: "europe-docker.pkg.dev/sap-se-gcp-k8s-delivery/releases-public/eu_gcr_io/gardener-project/gardener/ops-toolbelt:0.25.0-mod1", + Name: debugContainerName, + ImagePullPolicy: "IfNotPresent", + Command: []string{"/bin/bash", "-c", "--"}, + Args: []string{"trap : TERM INT; sleep 9999999999d & wait"}, + }, + }) + _, err = clientSet.CoreV1().Pods(namespace).UpdateEphemeralContainers(context.Background(), podName, pod, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("unable to create ephemeral container: %v", err) + } + time.Sleep(1 * time.Minute) + for _, ephContainerStatus := range pod.Status.EphemeralContainerStatuses { + if ephContainerStatus.Name == debugContainerName { + if ephContainerStatus.State.Running == nil { + return fmt.Errorf("ephemeral container %s not running", debugContainerName) + } + } + } + return nil +} + +// executeContainerCommand executes a remote shell command on the given pod and container +// and returns the stdout and stderr logs +func executeContainerCommand(kubeconfigPath, podNamespace, podName, containerName, command string) (string, string, error) { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return "", "", err + } + + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + return "", "", err + } + req := clientSet.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(podName). + Namespace(podNamespace). + SubResource("exec"). + Param("container", containerName). + VersionedParams(&corev1.PodExecOptions{ + Command: []string{"/bin/bash", "-c", command}, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: false, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) + if err != nil { + return "", "", err + } + + var stdout, stderr bytes.Buffer + err = exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{ + Stdout: &stdout, + Stderr: &stderr, + }) + if err != nil { + return "", "", err + } + + return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), nil +} + +func getSnapstore(storageProvider, storageContainer, storePrefix string) (brtypes.SnapStore, error) { + snapstoreConfig := &brtypes.SnapstoreConfig{ + Provider: storageProvider, + Container: storageContainer, + Prefix: storePrefix, + } + store, err := snapstore.GetSnapstore(snapstoreConfig) + if err != nil { + return nil, err + } + + return store, nil +} + +func getTotalFullAndDeltaSnapshotCounts(snapList brtypes.SnapList) (int, int) { + var numFulls, numDeltas int + for _, snap := range snapList { + if snap.Kind == brtypes.SnapshotKindFull { + numFulls++ + } else if snap.Kind == brtypes.SnapshotKindDelta { + numDeltas++ + } + } + return numFulls, numDeltas +} + +func purgeSnapstore(store brtypes.SnapStore) error { + snapList, err := store.List(false) + if err != nil { + return err + } + + for _, snap := range snapList { + err = store.Delete(*snap) + if err != nil { + return err + } + } + + return nil +} + +func runEtcdPopulatorWithoutError(logger *logrus.Logger, stopCh <-chan struct{}, doneCh chan<- struct{}, kubeconfigPath, namespace, podName, containerName string) { + var ( + cmd string + stdout string + stderr string + err error + i = 1 + ) + for { + select { + case <-stopCh: + logger.Infof("populator received stop channel. Populated etcd till (foo-%d, bar-%d). Exiting", i-1, i-1) + close(doneCh) + return + default: + cmd = fmt.Sprintf("ETCDCTL_API=3 ./nonroot/hacks/etcdctl put foo-%d bar-%d", i, i) + stdout, stderr, err = executeContainerCommand(kubeconfigPath, namespace, podName, containerName, cmd) + if err != nil || stderr != "" || stdout != "OK" { + logger.Infof("failed to put (foo-%d, bar-%d). Retrying", i, i) + continue + } + if i%10 == 0 { + logger.Infof("put (foo-%d, bar-%d) successful", i, i) + cmd = fmt.Sprintf("ETCDCTL_API=3 ./nonroot/hacks/etcdctl del foo-%d", i) + stdout, stderr, err = executeContainerCommand(kubeconfigPath, namespace, podName, containerName, cmd) + if err != nil || stderr != "" || stdout != "1" { + logger.Infof("failed to delete (foo-%d, bar-%d). Retrying", i, i) + continue + } + } + i++ + time.Sleep(time.Duration(time.Millisecond * 500)) + } + } +} + +func recordCumulativeSnapList(logger *logrus.Logger, stopCh <-chan struct{}, resultCh chan<- SnapListResult, store brtypes.SnapStore) { + var snapListResult SnapListResult + + for { + select { + case <-stopCh: + logger.Infof("recorder received stop channel. Exiting") + resultCh <- snapListResult + return + default: + snaps, err := store.List(false) + if err != nil { + snapListResult.Error = err + resultCh <- snapListResult + return + } + + for _, snap := range snaps { + if snapshotInSnapList(snap, snapListResult.Snapshots) { + continue + } + snapListResult.Snapshots = append(snapListResult.Snapshots, snap) + } + + time.Sleep(time.Duration(time.Second)) + } + } +} + +func snapshotInSnapList(snapshot *brtypes.Snapshot, snapList brtypes.SnapList) bool { + for _, snap := range snapList { + if snap.SnapName == snapshot.SnapName { + return true + } + } + return false +} + +type endpointStatusResult struct { + Endpoint string `json:"Endpoint"` + Status etcdserverpb.StatusResponse `json:"Status"` +} + +func getDbSizeAndRevision(kubeconfigPath, namespace, podName, containerName string) (int64, int64, error) { + cmd := "ETCDCTL_API=3 ./nonroot/hacks/etcdctl endpoint status -w=json" + stdout, stderr, err := executeContainerCommand(kubeconfigPath, namespace, podName, containerName, cmd) + if err != nil { + return 0, 0, err + } + if stderr != "" { + return 0, 0, fmt.Errorf("stderr: %s", stderr) + } + var endpointStatusResults []endpointStatusResult + if err = json.Unmarshal([]byte(stdout), &endpointStatusResults); err != nil { + return 0, 0, err + } + statusResponse := endpointStatusResults[0].Status + dbSize := statusResponse.DbSize + revision := statusResponse.Header.Revision + return dbSize, revision, nil +} + +func triggerOnDemandSnapshot(kubeconfigPath, namespace, podName, containerName string, port int, snapshotKind string) (*brtypes.Snapshot, error) { + var snapshot *brtypes.Snapshot + cmd := fmt.Sprintf("curl http://localhost:%d/snapshot/%s -s", port, snapshotKind) + stdout, _, err := executeContainerCommand(kubeconfigPath, namespace, podName, containerName, cmd) + if err != nil { + return nil, err + } + if stdout == "" { + return nil, fmt.Errorf("empty response") + } + if err = json.Unmarshal([]byte(stdout), &snapshot); err != nil { + return nil, err + } + + return snapshot, nil +} + +func getLatestSnapshots(kubeconfigPath, namespace, podName, containerName string, port int) (*LatestSnapshots, error) { + var latestSnapshots *LatestSnapshots + cmd := fmt.Sprintf("curl http://localhost:%d/snapshot/latest -s", port) + stdout, _, err := executeContainerCommand(kubeconfigPath, namespace, podName, containerName, cmd) + if err != nil { + return nil, err + } + if stdout == "" { + return nil, fmt.Errorf("empty response") + } + if err = json.Unmarshal([]byte(stdout), &latestSnapshots); err != nil { + return nil, err + } + + latestSnapshots.FullSnapshot.CreatedOn = latestSnapshots.FullSnapshot.CreatedOn.Truncate(time.Second) + for _, snap := range latestSnapshots.DeltaSnapshots { + snap.CreatedOn = snap.CreatedOn.Truncate(time.Second) + } + + return latestSnapshots, nil +} diff --git a/test/e2e/integration/cloud_backup_test.go b/test/integration/cloud_backup_test.go similarity index 99% rename from test/e2e/integration/cloud_backup_test.go rename to test/integration/cloud_backup_test.go index 04b6d60bf..41dffdfa7 100644 --- a/test/e2e/integration/cloud_backup_test.go +++ b/test/integration/cloud_backup_test.go @@ -17,7 +17,8 @@ import ( "github.com/gardener/etcd-backup-restore/pkg/initializer/validator" "github.com/gardener/etcd-backup-restore/pkg/snapstore" brtypes "github.com/gardener/etcd-backup-restore/pkg/types" - . "github.com/gardener/etcd-backup-restore/test/e2e/integration" + . "github.com/gardener/etcd-backup-restore/test/integration" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/sirupsen/logrus" diff --git a/test/e2e/integration/integration_suite_test.go b/test/integration/integration_suite_test.go similarity index 100% rename from test/e2e/integration/integration_suite_test.go rename to test/integration/integration_suite_test.go diff --git a/test/e2e/integration/utils.go b/test/integration/utils.go similarity index 100% rename from test/e2e/integration/utils.go rename to test/integration/utils.go diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE b/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/README.md b/vendor/github.com/AdaLogics/go-fuzz-headers/README.md new file mode 100644 index 000000000..0a0d60c74 --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/README.md @@ -0,0 +1,93 @@ +# go-fuzz-headers +This repository contains various helper functions for go fuzzing. It is mostly used in combination with [go-fuzz](https://github.com/dvyukov/go-fuzz), but compatibility with fuzzing in the standard library will also be supported. Any coverage guided fuzzing engine that provides an array or slice of bytes can be used with go-fuzz-headers. + + +## Usage +Using go-fuzz-headers is easy. First create a new consumer with the bytes provided by the fuzzing engine: + +```go +import ( + fuzz "github.com/AdaLogics/go-fuzz-headers" +) +data := []byte{'R', 'a', 'n', 'd', 'o', 'm'} +f := fuzz.NewConsumer(data) + +``` + +This creates a `Consumer` that consumes the bytes of the input as it uses them to fuzz different types. + +After that, `f` can be used to easily create fuzzed instances of different types. Below are some examples: + +### Structs +One of the most useful features of go-fuzz-headers is its ability to fill structs with the data provided by the fuzzing engine. This is done with a single line: +```go +type Person struct { + Name string + Age int +} +p := Person{} +// Fill p with values based on the data provided by the fuzzing engine: +err := f.GenerateStruct(&p) +``` + +This includes nested structs too. In this example, the fuzz Consumer will also insert values in `p.BestFriend`: +```go +type PersonI struct { + Name string + Age int + BestFriend PersonII +} +type PersonII struct { + Name string + Age int +} +p := PersonI{} +err := f.GenerateStruct(&p) +``` + +If the consumer should insert values for unexported fields as well as exported, this can be enabled with: + +```go +f.AllowUnexportedFields() +``` + +...and disabled with: + +```go +f.DisallowUnexportedFields() +``` + +### Other types: + +Other useful APIs: + +```go +createdString, err := f.GetString() // Gets a string +createdInt, err := f.GetInt() // Gets an integer +createdByte, err := f.GetByte() // Gets a byte +createdBytes, err := f.GetBytes() // Gets a byte slice +createdBool, err := f.GetBool() // Gets a boolean +err := f.FuzzMap(target_map) // Fills a map +createdTarBytes, err := f.TarBytes() // Gets bytes of a valid tar archive +err := f.CreateFiles(inThisDir) // Fills inThisDir with files +createdString, err := f.GetStringFrom("anyCharInThisString", ofThisLength) // Gets a string that consists of chars from "anyCharInThisString" and has the exact length "ofThisLength" +``` + +Most APIs are added as they are needed. + +## Projects that use go-fuzz-headers +- [runC](https://github.com/opencontainers/runc) +- [Istio](https://github.com/istio/istio) +- [Vitess](https://github.com/vitessio/vitess) +- [Containerd](https://github.com/containerd/containerd) + +Feel free to add your own project to the list, if you use go-fuzz-headers to fuzz it. + + + + +## Status +The project is under development and will be updated regularly. + +## References +go-fuzz-headers' approach to fuzzing structs is strongly inspired by [gofuzz](https://github.com/google/gofuzz). \ No newline at end of file diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go new file mode 100644 index 000000000..adfeedf5e --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go @@ -0,0 +1,914 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofuzzheaders + +import ( + "archive/tar" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "time" + "unsafe" +) + +var ( + MaxTotalLen uint32 = 2000000 + maxDepth = 100 +) + +func SetMaxTotalLen(newLen uint32) { + MaxTotalLen = newLen +} + +type ConsumeFuzzer struct { + data []byte + dataTotal uint32 + CommandPart []byte + RestOfArray []byte + NumberOfCalls int + position uint32 + fuzzUnexportedFields bool + curDepth int + Funcs map[reflect.Type]reflect.Value +} + +func IsDivisibleBy(n int, divisibleby int) bool { + return (n % divisibleby) == 0 +} + +func NewConsumer(fuzzData []byte) *ConsumeFuzzer { + return &ConsumeFuzzer{ + data: fuzzData, + dataTotal: uint32(len(fuzzData)), + Funcs: make(map[reflect.Type]reflect.Value), + curDepth: 0, + } +} + +func (f *ConsumeFuzzer) Split(minCalls, maxCalls int) error { + if f.dataTotal == 0 { + return errors.New("could not split") + } + numberOfCalls := int(f.data[0]) + if numberOfCalls < minCalls || numberOfCalls > maxCalls { + return errors.New("bad number of calls") + } + if int(f.dataTotal) < numberOfCalls+numberOfCalls+1 { + return errors.New("length of data does not match required parameters") + } + + // Define part 2 and 3 of the data array + commandPart := f.data[1 : numberOfCalls+1] + restOfArray := f.data[numberOfCalls+1:] + + // Just a small check. It is necessary + if len(commandPart) != numberOfCalls { + return errors.New("length of commandPart does not match number of calls") + } + + // Check if restOfArray is divisible by numberOfCalls + if !IsDivisibleBy(len(restOfArray), numberOfCalls) { + return errors.New("length of commandPart does not match number of calls") + } + f.CommandPart = commandPart + f.RestOfArray = restOfArray + f.NumberOfCalls = numberOfCalls + return nil +} + +func (f *ConsumeFuzzer) AllowUnexportedFields() { + f.fuzzUnexportedFields = true +} + +func (f *ConsumeFuzzer) DisallowUnexportedFields() { + f.fuzzUnexportedFields = false +} + +func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error { + e := reflect.ValueOf(targetStruct).Elem() + return f.fuzzStruct(e, false) +} + +func (f *ConsumeFuzzer) setCustom(v reflect.Value) error { + // First: see if we have a fuzz function for it. + doCustom, ok := f.Funcs[v.Type()] + if !ok { + return fmt.Errorf("could not find a custom function") + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return fmt.Errorf("could not use a custom function") + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return fmt.Errorf("could not use a custom function") + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return fmt.Errorf("could not use a custom function") + } + + verr := doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ + F: f, + })}) + + // check if we return an error + if verr[0].IsNil() { + return nil + } + return fmt.Errorf("could not use a custom function") +} + +func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error { + if f.curDepth >= maxDepth { + // return err or nil here? + return nil + } + f.curDepth++ + defer func() { f.curDepth-- }() + + // We check if we should check for custom functions + if customFunctions && e.IsValid() && e.CanAddr() { + err := f.setCustom(e.Addr()) + if err != nil { + return err + } + } + + switch e.Kind() { + case reflect.Struct: + for i := 0; i < e.NumField(); i++ { + var v reflect.Value + if !e.Field(i).CanSet() { + if f.fuzzUnexportedFields { + v = reflect.NewAt(e.Field(i).Type(), unsafe.Pointer(e.Field(i).UnsafeAddr())).Elem() + } + if err := f.fuzzStruct(v, customFunctions); err != nil { + return err + } + } else { + v = e.Field(i) + if err := f.fuzzStruct(v, customFunctions); err != nil { + return err + } + } + } + case reflect.String: + str, err := f.GetString() + if err != nil { + return err + } + if e.CanSet() { + e.SetString(str) + } + case reflect.Slice: + var maxElements uint32 + // Byte slices should not be restricted + if e.Type().String() == "[]uint8" { + maxElements = 10000000 + } else { + maxElements = 50 + } + + randQty, err := f.GetUint32() + if err != nil { + return err + } + numOfElements := randQty % maxElements + if (f.dataTotal - f.position) < numOfElements { + numOfElements = f.dataTotal - f.position + } + + uu := reflect.MakeSlice(e.Type(), int(numOfElements), int(numOfElements)) + + for i := 0; i < int(numOfElements); i++ { + // If we have more than 10, then we can proceed with that. + if err := f.fuzzStruct(uu.Index(i), customFunctions); err != nil { + if i >= 10 { + if e.CanSet() { + e.Set(uu) + } + return nil + } else { + return err + } + } + } + if e.CanSet() { + e.Set(uu) + } + case reflect.Uint16: + newInt, err := f.GetUint16() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } + case reflect.Uint32: + newInt, err := f.GetUint32() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } + case reflect.Uint64: + newInt, err := f.GetInt() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + newInt, err := f.GetInt() + if err != nil { + return err + } + if e.CanSet() { + e.SetInt(int64(newInt)) + } + case reflect.Float32: + newFloat, err := f.GetFloat32() + if err != nil { + return err + } + if e.CanSet() { + e.SetFloat(float64(newFloat)) + } + case reflect.Float64: + newFloat, err := f.GetFloat64() + if err != nil { + return err + } + if e.CanSet() { + e.SetFloat(float64(newFloat)) + } + case reflect.Map: + if e.CanSet() { + e.Set(reflect.MakeMap(e.Type())) + const maxElements = 50 + randQty, err := f.GetInt() + if err != nil { + return err + } + numOfElements := randQty % maxElements + for i := 0; i < numOfElements; i++ { + key := reflect.New(e.Type().Key()).Elem() + if err := f.fuzzStruct(key, customFunctions); err != nil { + return err + } + val := reflect.New(e.Type().Elem()).Elem() + if err = f.fuzzStruct(val, customFunctions); err != nil { + return err + } + e.SetMapIndex(key, val) + } + } + case reflect.Ptr: + if e.CanSet() { + e.Set(reflect.New(e.Type().Elem())) + if err := f.fuzzStruct(e.Elem(), customFunctions); err != nil { + return err + } + return nil + } + case reflect.Uint8: + b, err := f.GetByte() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(b)) + } + } + return nil +} + +func (f *ConsumeFuzzer) GetStringArray() (reflect.Value, error) { + // The max size of the array: + const max uint32 = 20 + + arraySize := f.position + if arraySize > max { + arraySize = max + } + stringArray := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("string")), int(arraySize), int(arraySize)) + if f.position+arraySize >= f.dataTotal { + return stringArray, errors.New("could not make string array") + } + + for i := 0; i < int(arraySize); i++ { + stringSize := uint32(f.data[f.position]) + if f.position+stringSize >= f.dataTotal { + return stringArray, nil + } + stringToAppend := string(f.data[f.position : f.position+stringSize]) + strVal := reflect.ValueOf(stringToAppend) + stringArray = reflect.Append(stringArray, strVal) + f.position += stringSize + } + return stringArray, nil +} + +func (f *ConsumeFuzzer) GetInt() (int, error) { + if f.position >= f.dataTotal { + return 0, errors.New("not enough bytes to create int") + } + returnInt := int(f.data[f.position]) + f.position++ + return returnInt, nil +} + +func (f *ConsumeFuzzer) GetByte() (byte, error) { + if f.position >= f.dataTotal { + return 0x00, errors.New("not enough bytes to get byte") + } + returnByte := f.data[f.position] + f.position++ + return returnByte, nil +} + +func (f *ConsumeFuzzer) GetNBytes(numberOfBytes int) ([]byte, error) { + if f.position >= f.dataTotal { + return nil, errors.New("not enough bytes to get byte") + } + returnBytes := make([]byte, 0, numberOfBytes) + for i := 0; i < numberOfBytes; i++ { + newByte, err := f.GetByte() + if err != nil { + return nil, err + } + returnBytes = append(returnBytes, newByte) + } + return returnBytes, nil +} + +func (f *ConsumeFuzzer) GetUint16() (uint16, error) { + u16, err := f.GetNBytes(2) + if err != nil { + return 0, err + } + littleEndian, err := f.GetBool() + if err != nil { + return 0, err + } + if littleEndian { + return binary.LittleEndian.Uint16(u16), nil + } + return binary.BigEndian.Uint16(u16), nil +} + +func (f *ConsumeFuzzer) GetUint32() (uint32, error) { + u32, err := f.GetNBytes(4) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(u32), nil +} + +func (f *ConsumeFuzzer) GetUint64() (uint64, error) { + u64, err := f.GetNBytes(8) + if err != nil { + return 0, err + } + littleEndian, err := f.GetBool() + if err != nil { + return 0, err + } + if littleEndian { + return binary.LittleEndian.Uint64(u64), nil + } + return binary.BigEndian.Uint64(u64), nil +} + +func (f *ConsumeFuzzer) GetBytes() ([]byte, error) { + var length uint32 + var err error + length, err = f.GetUint32() + if err != nil { + return nil, errors.New("not enough bytes to create byte array") + } + + if length == 0 { + length = 30 + } + bytesLeft := f.dataTotal - f.position + if bytesLeft <= 0 { + return nil, errors.New("not enough bytes to create byte array") + } + + // If the length is the same as bytes left, we will not overflow + // the remaining bytes. + if length != bytesLeft { + length = length % bytesLeft + } + byteBegin := f.position + if byteBegin+length < byteBegin { + return nil, errors.New("numbers overflow") + } + f.position = byteBegin + length + return f.data[byteBegin:f.position], nil +} + +func (f *ConsumeFuzzer) GetString() (string, error) { + if f.position >= f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + length, err := f.GetUint32() + if err != nil { + return "nil", errors.New("not enough bytes to create string") + } + if f.position > MaxTotalLen { + return "nil", errors.New("created too large a string") + } + byteBegin := f.position + if byteBegin >= f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + if byteBegin+length > f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + if byteBegin > byteBegin+length { + return "nil", errors.New("numbers overflow") + } + f.position = byteBegin + length + return string(f.data[byteBegin:f.position]), nil +} + +func (f *ConsumeFuzzer) GetBool() (bool, error) { + if f.position >= f.dataTotal { + return false, errors.New("not enough bytes to create bool") + } + if IsDivisibleBy(int(f.data[f.position]), 2) { + f.position++ + return true, nil + } else { + f.position++ + return false, nil + } +} + +func (f *ConsumeFuzzer) FuzzMap(m interface{}) error { + return f.GenerateStruct(m) +} + +func returnTarBytes(buf []byte) ([]byte, error) { + return buf, nil + // Count files + var fileCounter int + tr := tar.NewReader(bytes.NewReader(buf)) + for { + _, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + fileCounter++ + } + if fileCounter >= 1 { + return buf, nil + } + return nil, fmt.Errorf("not enough files were created\n") +} + +func setTarHeaderFormat(hdr *tar.Header, f *ConsumeFuzzer) error { + ind, err := f.GetInt() + if err != nil { + hdr.Format = tar.FormatGNU + //return nil + } + switch ind % 4 { + case 0: + hdr.Format = tar.FormatUnknown + case 1: + hdr.Format = tar.FormatUSTAR + case 2: + hdr.Format = tar.FormatPAX + case 3: + hdr.Format = tar.FormatGNU + } + return nil +} + +func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error { + ind, err := f.GetInt() + if err != nil { + return err + } + switch ind % 13 { + case 0: + hdr.Typeflag = tar.TypeReg + case 1: + hdr.Typeflag = tar.TypeLink + linkname, err := f.GetString() + if err != nil { + return err + } + hdr.Linkname = linkname + case 2: + hdr.Typeflag = tar.TypeSymlink + linkname, err := f.GetString() + if err != nil { + return err + } + hdr.Linkname = linkname + case 3: + hdr.Typeflag = tar.TypeChar + case 4: + hdr.Typeflag = tar.TypeBlock + case 5: + hdr.Typeflag = tar.TypeDir + case 6: + hdr.Typeflag = tar.TypeFifo + case 7: + hdr.Typeflag = tar.TypeCont + case 8: + hdr.Typeflag = tar.TypeXHeader + case 9: + hdr.Typeflag = tar.TypeXGlobalHeader + case 10: + hdr.Typeflag = tar.TypeGNUSparse + case 11: + hdr.Typeflag = tar.TypeGNULongName + case 12: + hdr.Typeflag = tar.TypeGNULongLink + } + return nil +} + +func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) { + return f.GetBytes() + /*length, err := f.GetUint32() + if err != nil { + return nil, errors.New("not enough bytes to create byte array") + } + + // A bit of optimization to attempt to create a file body + // when we don't have as many bytes left as "length" + remainingBytes := f.dataTotal - f.position + if remainingBytes <= 0 { + return nil, errors.New("created too large a string") + } + if f.position+length > MaxTotalLen { + return nil, errors.New("created too large a string") + } + byteBegin := f.position + if byteBegin >= f.dataTotal { + return nil, errors.New("not enough bytes to create byte array") + } + if length == 0 { + return nil, errors.New("zero-length is not supported") + } + if byteBegin+length >= f.dataTotal { + return nil, errors.New("not enough bytes to create byte array") + } + if byteBegin+length < byteBegin { + return nil, errors.New("numbers overflow") + } + f.position = byteBegin + length + return f.data[byteBegin:f.position], nil*/ +} + +// getTarFileName is similar to GetString(), but creates string based +// on the length of f.data to reduce the likelihood of overflowing +// f.data. +func (f *ConsumeFuzzer) getTarFilename() (string, error) { + return f.GetString() + /*length, err := f.GetUint32() + if err != nil { + return "nil", errors.New("not enough bytes to create string") + } + + // A bit of optimization to attempt to create a file name + // when we don't have as many bytes left as "length" + remainingBytes := f.dataTotal - f.position + if remainingBytes <= 0 { + return "nil", errors.New("created too large a string") + } + if f.position > MaxTotalLen { + return "nil", errors.New("created too large a string") + } + byteBegin := f.position + if byteBegin >= f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + if byteBegin+length > f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + if byteBegin > byteBegin+length { + return "nil", errors.New("numbers overflow") + } + f.position = byteBegin + length + return string(f.data[byteBegin:f.position]), nil*/ +} + +type TarFile struct { + Hdr *tar.Header + Body []byte +} + +// TarBytes returns valid bytes for a tar archive +func (f *ConsumeFuzzer) TarBytes() ([]byte, error) { + numberOfFiles, err := f.GetInt() + if err != nil { + return nil, err + } + var tarFiles []*TarFile + tarFiles = make([]*TarFile, 0) + + const maxNoOfFiles = 100 + for i := 0; i < numberOfFiles%maxNoOfFiles; i++ { + var filename string + var filebody []byte + var sec, nsec int + var err error + + filename, err = f.getTarFilename() + if err != nil { + var sb strings.Builder + sb.WriteString("file-") + sb.WriteString(strconv.Itoa(i)) + filename = sb.String() + } + filebody, err = f.createTarFileBody() + if err != nil { + var sb strings.Builder + sb.WriteString("filebody-") + sb.WriteString(strconv.Itoa(i)) + filebody = []byte(sb.String()) + } + + sec, err = f.GetInt() + if err != nil { + sec = 1672531200 // beginning of 2023 + } + nsec, err = f.GetInt() + if err != nil { + nsec = 1703980800 // end of 2023 + } + + hdr := &tar.Header{ + Name: filename, + Size: int64(len(filebody)), + Mode: 0o600, + ModTime: time.Unix(int64(sec), int64(nsec)), + } + if err := setTarHeaderTypeflag(hdr, f); err != nil { + return []byte(""), err + } + if err := setTarHeaderFormat(hdr, f); err != nil { + return []byte(""), err + } + tf := &TarFile{ + Hdr: hdr, + Body: filebody, + } + tarFiles = append(tarFiles, tf) + } + + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + defer tw.Close() + + for _, tf := range tarFiles { + tw.WriteHeader(tf.Hdr) + tw.Write(tf.Body) + } + return buf.Bytes(), nil +} + +// This is similar to TarBytes, but it returns a series of +// files instead of raw tar bytes. The advantage of this +// api is that it is cheaper in terms of cpu power to +// modify or check the files in the fuzzer with TarFiles() +// because it avoids creating a tar reader. +func (f *ConsumeFuzzer) TarFiles() ([]*TarFile, error) { + numberOfFiles, err := f.GetInt() + if err != nil { + return nil, err + } + var tarFiles []*TarFile + tarFiles = make([]*TarFile, 0) + + const maxNoOfFiles = 100 + for i := 0; i < numberOfFiles%maxNoOfFiles; i++ { + filename, err := f.getTarFilename() + if err != nil { + return tarFiles, err + } + filebody, err := f.createTarFileBody() + if err != nil { + return tarFiles, err + } + + sec, err := f.GetInt() + if err != nil { + return tarFiles, err + } + nsec, err := f.GetInt() + if err != nil { + return tarFiles, err + } + + hdr := &tar.Header{ + Name: filename, + Size: int64(len(filebody)), + Mode: 0o600, + ModTime: time.Unix(int64(sec), int64(nsec)), + } + if err := setTarHeaderTypeflag(hdr, f); err != nil { + hdr.Typeflag = tar.TypeReg + } + if err := setTarHeaderFormat(hdr, f); err != nil { + return tarFiles, err // should not happend + } + tf := &TarFile{ + Hdr: hdr, + Body: filebody, + } + tarFiles = append(tarFiles, tf) + } + return tarFiles, nil +} + +// CreateFiles creates pseudo-random files in rootDir. +// It creates subdirs and places the files there. +// It is the callers responsibility to ensure that +// rootDir exists. +func (f *ConsumeFuzzer) CreateFiles(rootDir string) error { + numberOfFiles, err := f.GetInt() + if err != nil { + return err + } + maxNumberOfFiles := numberOfFiles % 4000 // This is completely arbitrary + if maxNumberOfFiles == 0 { + return errors.New("maxNumberOfFiles is nil") + } + + var noOfCreatedFiles int + for i := 0; i < maxNumberOfFiles; i++ { + // The file to create: + fileName, err := f.GetString() + if err != nil { + if noOfCreatedFiles > 0 { + // If files have been created, we don't return an error. + break + } else { + return errors.New("could not get fileName") + } + } + if strings.Contains(fileName, "..") || (len(fileName) > 0 && fileName[0] == 47) || strings.Contains(fileName, "\\") { + continue + } + fullFilePath := filepath.Join(rootDir, fileName) + + // Find the subdirectory of the file + if subDir := filepath.Dir(fileName); subDir != "" && subDir != "." { + // create the dir first; avoid going outside the root dir + if strings.Contains(subDir, "../") || (len(subDir) > 0 && subDir[0] == 47) || strings.Contains(subDir, "\\") { + continue + } + dirPath := filepath.Join(rootDir, subDir) + if _, err := os.Stat(dirPath); os.IsNotExist(err) { + err2 := os.MkdirAll(dirPath, 0o777) + if err2 != nil { + continue + } + } + fullFilePath = filepath.Join(dirPath, fileName) + } else { + // Create symlink + createSymlink, err := f.GetBool() + if err != nil { + if noOfCreatedFiles > 0 { + break + } else { + return errors.New("could not create the symlink") + } + } + if createSymlink { + symlinkTarget, err := f.GetString() + if err != nil { + return err + } + err = os.Symlink(symlinkTarget, fullFilePath) + if err != nil { + return err + } + // stop loop here, since a symlink needs no further action + noOfCreatedFiles++ + continue + } + // We create a normal file + fileContents, err := f.GetBytes() + if err != nil { + if noOfCreatedFiles > 0 { + break + } else { + return errors.New("could not create the file") + } + } + err = os.WriteFile(fullFilePath, fileContents, 0o666) + if err != nil { + continue + } + noOfCreatedFiles++ + } + } + return nil +} + +// GetStringFrom returns a string that can only consist of characters +// included in possibleChars. It returns an error if the created string +// does not have the specified length. +func (f *ConsumeFuzzer) GetStringFrom(possibleChars string, length int) (string, error) { + if (f.dataTotal - f.position) < uint32(length) { + return "", errors.New("not enough bytes to create a string") + } + output := make([]byte, 0, length) + for i := 0; i < length; i++ { + charIndex, err := f.GetInt() + if err != nil { + return string(output), err + } + output = append(output, possibleChars[charIndex%len(possibleChars)]) + } + return string(output), nil +} + +func (f *ConsumeFuzzer) GetRune() ([]rune, error) { + stringToConvert, err := f.GetString() + if err != nil { + return []rune("nil"), err + } + return []rune(stringToConvert), nil +} + +func (f *ConsumeFuzzer) GetFloat32() (float32, error) { + u32, err := f.GetNBytes(4) + if err != nil { + return 0, err + } + littleEndian, err := f.GetBool() + if err != nil { + return 0, err + } + if littleEndian { + u32LE := binary.LittleEndian.Uint32(u32) + return math.Float32frombits(u32LE), nil + } + u32BE := binary.BigEndian.Uint32(u32) + return math.Float32frombits(u32BE), nil +} + +func (f *ConsumeFuzzer) GetFloat64() (float64, error) { + u64, err := f.GetNBytes(8) + if err != nil { + return 0, err + } + littleEndian, err := f.GetBool() + if err != nil { + return 0, err + } + if littleEndian { + u64LE := binary.LittleEndian.Uint64(u64) + return math.Float64frombits(u64LE), nil + } + u64BE := binary.BigEndian.Uint64(u64) + return math.Float64frombits(u64BE), nil +} + +func (f *ConsumeFuzzer) CreateSlice(targetSlice interface{}) error { + return f.GenerateStruct(targetSlice) +} diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go b/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go new file mode 100644 index 000000000..8ca3a61b8 --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go @@ -0,0 +1,62 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofuzzheaders + +import ( + "fmt" + "reflect" +) + +type Continue struct { + F *ConsumeFuzzer +} + +func (f *ConsumeFuzzer) AddFuncs(fuzzFuncs []interface{}) { + for i := range fuzzFuncs { + v := reflect.ValueOf(fuzzFuncs[i]) + if v.Kind() != reflect.Func { + panic("Need only funcs!") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 1 { + fmt.Println(t.NumIn(), t.NumOut()) + + panic("Need 2 in and 1 out params. In must be the type. Out must be an error") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("fuzzFunc must take pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("fuzzFunc's second parameter must be type Continue") + } + f.Funcs[argT] = v + } +} + +func (f *ConsumeFuzzer) GenerateWithCustom(targetStruct interface{}) error { + e := reflect.ValueOf(targetStruct).Elem() + return f.fuzzStruct(e, true) +} + +func (c Continue) GenerateStruct(targetStruct interface{}) error { + return c.F.GenerateStruct(targetStruct) +} + +func (c Continue) GenerateStructWithCustom(targetStruct interface{}) error { + return c.F.GenerateWithCustom(targetStruct) +} diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go b/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go new file mode 100644 index 000000000..2afd49f84 --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go @@ -0,0 +1,556 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofuzzheaders + +import ( + "fmt" + "strings" +) + +// returns a keyword by index +func getKeyword(f *ConsumeFuzzer) (string, error) { + index, err := f.GetInt() + if err != nil { + return keywords[0], err + } + for i, k := range keywords { + if i == index { + return k, nil + } + } + return keywords[0], fmt.Errorf("could not get a kw") +} + +// Simple utility function to check if a string +// slice contains a string. +func containsString(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +// These keywords are used specifically for fuzzing Vitess +var keywords = []string{ + "accessible", "action", "add", "after", "against", "algorithm", + "all", "alter", "always", "analyze", "and", "as", "asc", "asensitive", + "auto_increment", "avg_row_length", "before", "begin", "between", + "bigint", "binary", "_binary", "_utf8mb4", "_utf8", "_latin1", "bit", + "blob", "bool", "boolean", "both", "by", "call", "cancel", "cascade", + "cascaded", "case", "cast", "channel", "change", "char", "character", + "charset", "check", "checksum", "coalesce", "code", "collate", "collation", + "column", "columns", "comment", "committed", "commit", "compact", "complete", + "compressed", "compression", "condition", "connection", "constraint", "continue", + "convert", "copy", "cume_dist", "substr", "substring", "create", "cross", + "csv", "current_date", "current_time", "current_timestamp", "current_user", + "cursor", "data", "database", "databases", "day", "day_hour", "day_microsecond", + "day_minute", "day_second", "date", "datetime", "dec", "decimal", "declare", + "default", "definer", "delay_key_write", "delayed", "delete", "dense_rank", + "desc", "describe", "deterministic", "directory", "disable", "discard", + "disk", "distinct", "distinctrow", "div", "double", "do", "drop", "dumpfile", + "duplicate", "dynamic", "each", "else", "elseif", "empty", "enable", + "enclosed", "encryption", "end", "enforced", "engine", "engines", "enum", + "error", "escape", "escaped", "event", "exchange", "exclusive", "exists", + "exit", "explain", "expansion", "export", "extended", "extract", "false", + "fetch", "fields", "first", "first_value", "fixed", "float", "float4", + "float8", "flush", "for", "force", "foreign", "format", "from", "full", + "fulltext", "function", "general", "generated", "geometry", "geometrycollection", + "get", "global", "gtid_executed", "grant", "group", "grouping", "groups", + "group_concat", "having", "header", "high_priority", "hosts", "hour", "hour_microsecond", + "hour_minute", "hour_second", "if", "ignore", "import", "in", "index", "indexes", + "infile", "inout", "inner", "inplace", "insensitive", "insert", "insert_method", + "int", "int1", "int2", "int3", "int4", "int8", "integer", "interval", + "into", "io_after_gtids", "is", "isolation", "iterate", "invoker", "join", + "json", "json_table", "key", "keys", "keyspaces", "key_block_size", "kill", "lag", + "language", "last", "last_value", "last_insert_id", "lateral", "lead", "leading", + "leave", "left", "less", "level", "like", "limit", "linear", "lines", + "linestring", "load", "local", "localtime", "localtimestamp", "lock", "logs", + "long", "longblob", "longtext", "loop", "low_priority", "manifest", + "master_bind", "match", "max_rows", "maxvalue", "mediumblob", "mediumint", + "mediumtext", "memory", "merge", "microsecond", "middleint", "min_rows", "minute", + "minute_microsecond", "minute_second", "mod", "mode", "modify", "modifies", + "multilinestring", "multipoint", "multipolygon", "month", "name", + "names", "natural", "nchar", "next", "no", "none", "not", "no_write_to_binlog", + "nth_value", "ntile", "null", "numeric", "of", "off", "offset", "on", + "only", "open", "optimize", "optimizer_costs", "option", "optionally", + "or", "order", "out", "outer", "outfile", "over", "overwrite", "pack_keys", + "parser", "partition", "partitioning", "password", "percent_rank", "plugins", + "point", "polygon", "precision", "primary", "privileges", "processlist", + "procedure", "query", "quarter", "range", "rank", "read", "reads", "read_write", + "real", "rebuild", "recursive", "redundant", "references", "regexp", "relay", + "release", "remove", "rename", "reorganize", "repair", "repeat", "repeatable", + "replace", "require", "resignal", "restrict", "return", "retry", "revert", + "revoke", "right", "rlike", "rollback", "row", "row_format", "row_number", + "rows", "s3", "savepoint", "schema", "schemas", "second", "second_microsecond", + "security", "select", "sensitive", "separator", "sequence", "serializable", + "session", "set", "share", "shared", "show", "signal", "signed", "slow", + "smallint", "spatial", "specific", "sql", "sqlexception", "sqlstate", + "sqlwarning", "sql_big_result", "sql_cache", "sql_calc_found_rows", + "sql_no_cache", "sql_small_result", "ssl", "start", "starting", + "stats_auto_recalc", "stats_persistent", "stats_sample_pages", "status", + "storage", "stored", "straight_join", "stream", "system", "vstream", + "table", "tables", "tablespace", "temporary", "temptable", "terminated", + "text", "than", "then", "time", "timestamp", "timestampadd", "timestampdiff", + "tinyblob", "tinyint", "tinytext", "to", "trailing", "transaction", "tree", + "traditional", "trigger", "triggers", "true", "truncate", "uncommitted", + "undefined", "undo", "union", "unique", "unlock", "unsigned", "update", + "upgrade", "usage", "use", "user", "user_resources", "using", "utc_date", + "utc_time", "utc_timestamp", "validation", "values", "variables", "varbinary", + "varchar", "varcharacter", "varying", "vgtid_executed", "virtual", "vindex", + "vindexes", "view", "vitess", "vitess_keyspaces", "vitess_metadata", + "vitess_migration", "vitess_migrations", "vitess_replication_status", + "vitess_shards", "vitess_tablets", "vschema", "warnings", "when", + "where", "while", "window", "with", "without", "work", "write", "xor", + "year", "year_month", "zerofill", +} + +// Keywords that could get an additional keyword +var needCustomString = []string{ + "DISTINCTROW", "FROM", // Select keywords: + "GROUP BY", "HAVING", "WINDOW", + "FOR", + "ORDER BY", "LIMIT", + "INTO", "PARTITION", "AS", // Insert Keywords: + "ON DUPLICATE KEY UPDATE", + "WHERE", "LIMIT", // Delete keywords + "INFILE", "INTO TABLE", "CHARACTER SET", // Load keywords + "TERMINATED BY", "ENCLOSED BY", + "ESCAPED BY", "STARTING BY", + "TERMINATED BY", "STARTING BY", + "IGNORE", + "VALUE", "VALUES", // Replace tokens + "SET", // Update tokens + "ENGINE =", // Drop tokens + "DEFINER =", "ON SCHEDULE", "RENAME TO", // Alter tokens + "COMMENT", "DO", "INITIAL_SIZE = ", "OPTIONS", +} + +var alterTableTokens = [][]string{ + {"CUSTOM_FUZZ_STRING"}, + {"CUSTOM_ALTTER_TABLE_OPTIONS"}, + {"PARTITION_OPTIONS_FOR_ALTER_TABLE"}, +} + +var alterTokens = [][]string{ + { + "DATABASE", "SCHEMA", "DEFINER = ", "EVENT", "FUNCTION", "INSTANCE", + "LOGFILE GROUP", "PROCEDURE", "SERVER", + }, + {"CUSTOM_FUZZ_STRING"}, + { + "ON SCHEDULE", "ON COMPLETION PRESERVE", "ON COMPLETION NOT PRESERVE", + "ADD UNDOFILE", "OPTIONS", + }, + {"RENAME TO", "INITIAL_SIZE = "}, + {"ENABLE", "DISABLE", "DISABLE ON SLAVE", "ENGINE"}, + {"COMMENT"}, + {"DO"}, +} + +var setTokens = [][]string{ + {"CHARACTER SET", "CHARSET", "CUSTOM_FUZZ_STRING", "NAMES"}, + {"CUSTOM_FUZZ_STRING", "DEFAULT", "="}, + {"CUSTOM_FUZZ_STRING"}, +} + +var dropTokens = [][]string{ + {"TEMPORARY", "UNDO"}, + { + "DATABASE", "SCHEMA", "EVENT", "INDEX", "LOGFILE GROUP", + "PROCEDURE", "FUNCTION", "SERVER", "SPATIAL REFERENCE SYSTEM", + "TABLE", "TABLESPACE", "TRIGGER", "VIEW", + }, + {"IF EXISTS"}, + {"CUSTOM_FUZZ_STRING"}, + {"ON", "ENGINE = ", "RESTRICT", "CASCADE"}, +} + +var renameTokens = [][]string{ + {"TABLE"}, + {"CUSTOM_FUZZ_STRING"}, + {"TO"}, + {"CUSTOM_FUZZ_STRING"}, +} + +var truncateTokens = [][]string{ + {"TABLE"}, + {"CUSTOM_FUZZ_STRING"}, +} + +var createTokens = [][]string{ + {"OR REPLACE", "TEMPORARY", "UNDO"}, // For create spatial reference system + { + "UNIQUE", "FULLTEXT", "SPATIAL", "ALGORITHM = UNDEFINED", "ALGORITHM = MERGE", + "ALGORITHM = TEMPTABLE", + }, + { + "DATABASE", "SCHEMA", "EVENT", "FUNCTION", "INDEX", "LOGFILE GROUP", + "PROCEDURE", "SERVER", "SPATIAL REFERENCE SYSTEM", "TABLE", "TABLESPACE", + "TRIGGER", "VIEW", + }, + {"IF NOT EXISTS"}, + {"CUSTOM_FUZZ_STRING"}, +} + +/* +// For future use. +var updateTokens = [][]string{ + {"LOW_PRIORITY"}, + {"IGNORE"}, + {"SET"}, + {"WHERE"}, + {"ORDER BY"}, + {"LIMIT"}, +} +*/ + +var replaceTokens = [][]string{ + {"LOW_PRIORITY", "DELAYED"}, + {"INTO"}, + {"PARTITION"}, + {"CUSTOM_FUZZ_STRING"}, + {"VALUES", "VALUE"}, +} + +var loadTokens = [][]string{ + {"DATA"}, + {"LOW_PRIORITY", "CONCURRENT", "LOCAL"}, + {"INFILE"}, + {"REPLACE", "IGNORE"}, + {"INTO TABLE"}, + {"PARTITION"}, + {"CHARACTER SET"}, + {"FIELDS", "COLUMNS"}, + {"TERMINATED BY"}, + {"OPTIONALLY"}, + {"ENCLOSED BY"}, + {"ESCAPED BY"}, + {"LINES"}, + {"STARTING BY"}, + {"TERMINATED BY"}, + {"IGNORE"}, + {"LINES", "ROWS"}, + {"CUSTOM_FUZZ_STRING"}, +} + +// These Are everything that comes after "INSERT" +var insertTokens = [][]string{ + {"LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", "IGNORE"}, + {"INTO"}, + {"PARTITION"}, + {"CUSTOM_FUZZ_STRING"}, + {"AS"}, + {"ON DUPLICATE KEY UPDATE"}, +} + +// These are everything that comes after "SELECT" +var selectTokens = [][]string{ + {"*", "CUSTOM_FUZZ_STRING", "DISTINCTROW"}, + {"HIGH_PRIORITY"}, + {"STRAIGHT_JOIN"}, + {"SQL_SMALL_RESULT", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT"}, + {"SQL_NO_CACHE", "SQL_CALC_FOUND_ROWS"}, + {"CUSTOM_FUZZ_STRING"}, + {"FROM"}, + {"WHERE"}, + {"GROUP BY"}, + {"HAVING"}, + {"WINDOW"}, + {"ORDER BY"}, + {"LIMIT"}, + {"CUSTOM_FUZZ_STRING"}, + {"FOR"}, +} + +// These are everything that comes after "DELETE" +var deleteTokens = [][]string{ + {"LOW_PRIORITY", "QUICK", "IGNORE", "FROM", "AS"}, + {"PARTITION"}, + {"WHERE"}, + {"ORDER BY"}, + {"LIMIT"}, +} + +var alter_table_options = []string{ + "ADD", "COLUMN", "FIRST", "AFTER", "INDEX", "KEY", "FULLTEXT", "SPATIAL", + "CONSTRAINT", "UNIQUE", "FOREIGN KEY", "CHECK", "ENFORCED", "DROP", "ALTER", + "NOT", "INPLACE", "COPY", "SET", "VISIBLE", "INVISIBLE", "DEFAULT", "CHANGE", + "CHARACTER SET", "COLLATE", "DISABLE", "ENABLE", "KEYS", "TABLESPACE", "LOCK", + "FORCE", "MODIFY", "SHARED", "EXCLUSIVE", "NONE", "ORDER BY", "RENAME COLUMN", + "AS", "=", "ASC", "DESC", "WITH", "WITHOUT", "VALIDATION", "ADD PARTITION", + "DROP PARTITION", "DISCARD PARTITION", "IMPORT PARTITION", "TRUNCATE PARTITION", + "COALESCE PARTITION", "REORGANIZE PARTITION", "EXCHANGE PARTITION", + "ANALYZE PARTITION", "CHECK PARTITION", "OPTIMIZE PARTITION", "REBUILD PARTITION", + "REPAIR PARTITION", "REMOVE PARTITIONING", "USING", "BTREE", "HASH", "COMMENT", + "KEY_BLOCK_SIZE", "WITH PARSER", "AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG_ROW_LENGTH", + "CHECKSUM", "INSERT_METHOD", "ROW_FORMAT", "DYNAMIC", "FIXED", "COMPRESSED", "REDUNDANT", + "COMPACT", "SECONDARY_ENGINE_ATTRIBUTE", "STATS_AUTO_RECALC", "STATS_PERSISTENT", + "STATS_SAMPLE_PAGES", "ZLIB", "LZ4", "ENGINE_ATTRIBUTE", "KEY_BLOCK_SIZE", "MAX_ROWS", + "MIN_ROWS", "PACK_KEYS", "PASSWORD", "COMPRESSION", "CONNECTION", "DIRECTORY", + "DELAY_KEY_WRITE", "ENCRYPTION", "STORAGE", "DISK", "MEMORY", "UNION", +} + +// Creates an 'alter table' statement. 'alter table' is an exception +// in that it has its own function. The majority of statements +// are created by 'createStmt()'. +func createAlterTableStmt(f *ConsumeFuzzer) (string, error) { + maxArgs, err := f.GetInt() + if err != nil { + return "", err + } + maxArgs = maxArgs % 30 + if maxArgs == 0 { + return "", fmt.Errorf("could not create alter table stmt") + } + + var stmt strings.Builder + stmt.WriteString("ALTER TABLE ") + for i := 0; i < maxArgs; i++ { + // Calculate if we get existing token or custom string + tokenType, err := f.GetInt() + if err != nil { + return "", err + } + if tokenType%4 == 1 { + customString, err := f.GetString() + if err != nil { + return "", err + } + stmt.WriteString(" " + customString) + } else { + tokenIndex, err := f.GetInt() + if err != nil { + return "", err + } + stmt.WriteString(" " + alter_table_options[tokenIndex%len(alter_table_options)]) + } + } + return stmt.String(), nil +} + +func chooseToken(tokens []string, f *ConsumeFuzzer) (string, error) { + index, err := f.GetInt() + if err != nil { + return "", err + } + var token strings.Builder + token.WriteString(tokens[index%len(tokens)]) + if token.String() == "CUSTOM_FUZZ_STRING" { + customFuzzString, err := f.GetString() + if err != nil { + return "", err + } + return customFuzzString, nil + } + + // Check if token requires an argument + if containsString(needCustomString, token.String()) { + customFuzzString, err := f.GetString() + if err != nil { + return "", err + } + token.WriteString(" " + customFuzzString) + } + return token.String(), nil +} + +var stmtTypes = map[string][][]string{ + "DELETE": deleteTokens, + "INSERT": insertTokens, + "SELECT": selectTokens, + "LOAD": loadTokens, + "REPLACE": replaceTokens, + "CREATE": createTokens, + "DROP": dropTokens, + "RENAME": renameTokens, + "TRUNCATE": truncateTokens, + "SET": setTokens, + "ALTER": alterTokens, + "ALTER TABLE": alterTableTokens, // ALTER TABLE has its own set of tokens +} + +var stmtTypeEnum = map[int]string{ + 0: "DELETE", + 1: "INSERT", + 2: "SELECT", + 3: "LOAD", + 4: "REPLACE", + 5: "CREATE", + 6: "DROP", + 7: "RENAME", + 8: "TRUNCATE", + 9: "SET", + 10: "ALTER", + 11: "ALTER TABLE", +} + +func createStmt(f *ConsumeFuzzer) (string, error) { + stmtIndex, err := f.GetInt() + if err != nil { + return "", err + } + stmtIndex = stmtIndex % len(stmtTypes) + + queryType := stmtTypeEnum[stmtIndex] + tokens := stmtTypes[queryType] + + // We have custom creator for ALTER TABLE + if queryType == "ALTER TABLE" { + query, err := createAlterTableStmt(f) + if err != nil { + return "", err + } + return query, nil + } + + // Here we are creating a query that is not + // an 'alter table' query. For available + // queries, see "stmtTypes" + + // First specify the first query keyword: + var query strings.Builder + query.WriteString(queryType) + + // Next create the args for the + queryArgs, err := createStmtArgs(tokens, f) + if err != nil { + return "", err + } + query.WriteString(" " + queryArgs) + return query.String(), nil +} + +// Creates the arguments of a statements. In a select statement +// that would be everything after "select". +func createStmtArgs(tokenslice [][]string, f *ConsumeFuzzer) (string, error) { + var query, token strings.Builder + + // We go through the tokens in the tokenslice, + // create the respective token and add it to + // "query" + for _, tokens := range tokenslice { + // For extra randomization, the fuzzer can + // choose to not include this token. + includeThisToken, err := f.GetBool() + if err != nil { + return "", err + } + if !includeThisToken { + continue + } + + // There may be several tokens to choose from: + if len(tokens) > 1 { + chosenToken, err := chooseToken(tokens, f) + if err != nil { + return "", err + } + query.WriteString(" " + chosenToken) + } else { + token.WriteString(tokens[0]) + + // In case the token is "CUSTOM_FUZZ_STRING" + // we will then create a non-structured string + if token.String() == "CUSTOM_FUZZ_STRING" { + customFuzzString, err := f.GetString() + if err != nil { + return "", err + } + query.WriteString(" " + customFuzzString) + continue + } + + // Check if token requires an argument. + // Tokens that take an argument can be found + // in 'needCustomString'. If so, we add a + // non-structured string to the token. + if containsString(needCustomString, token.String()) { + customFuzzString, err := f.GetString() + if err != nil { + return "", err + } + token.WriteString(fmt.Sprintf(" %s", customFuzzString)) + } + query.WriteString(fmt.Sprintf(" %s", token.String())) + } + } + return query.String(), nil +} + +// Creates a semi-structured query. It creates a string +// that is a combination of the keywords and random strings. +func createQuery(f *ConsumeFuzzer) (string, error) { + queryLen, err := f.GetInt() + if err != nil { + return "", err + } + maxLen := queryLen % 60 + if maxLen == 0 { + return "", fmt.Errorf("could not create a query") + } + var query strings.Builder + for i := 0; i < maxLen; i++ { + // Get a new token: + useKeyword, err := f.GetBool() + if err != nil { + return "", err + } + if useKeyword { + keyword, err := getKeyword(f) + if err != nil { + return "", err + } + query.WriteString(" " + keyword) + } else { + customString, err := f.GetString() + if err != nil { + return "", err + } + query.WriteString(" " + customString) + } + } + if query.String() == "" { + return "", fmt.Errorf("could not create a query") + } + return query.String(), nil +} + +// GetSQLString is the API that users interact with. +// +// Usage: +// +// f := NewConsumer(data) +// sqlString, err := f.GetSQLString() +func (f *ConsumeFuzzer) GetSQLString() (string, error) { + var query string + veryStructured, err := f.GetBool() + if err != nil { + return "", err + } + if veryStructured { + query, err = createStmt(f) + if err != nil { + return "", err + } + } else { + query, err = createQuery(f) + if err != nil { + return "", err + } + } + return query, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 000000000..e3d9a64d1 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 000000000..261c041e7 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,12 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 000000000..96504a33b --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 000000000..8d66e777c --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 000000000..bcbe00d0c --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 000000000..7ed5e01c3 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 000000000..1c719db9e --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 000000000..6390abd23 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 000000000..98087b38c --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 000000000..52451e946 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 000000000..593b10ab6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + oscState.parser.logf("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 000000000..03cec7ada --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,151 @@ +package ansiterm + +import ( + "errors" + "log" + "os" +) + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state + + logf func(string, ...interface{}) +} + +type Option func(*AnsiParser) + +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f + } +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + for _, o := range opts { + o(ap) + } + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } + + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + ap.logf("WARNING: newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 000000000..de0a1f9cd --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,99 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 000000000..0bb5e51e9 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,119 @@ +package ansiterm + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + ap.logf("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + ap.logf("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) + + ap.logf("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 000000000..f2ea1fcd1 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 000000000..392114493 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 000000000..5599082ae --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,196 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" + windows "golang.org/x/sys/windows" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + + // syscall uses negative numbers + // windows package uses very big uint32 + // Keep these switches split so we don't have to convert ints too much. + switch uint32(nFile) { + case windows.STD_INPUT_HANDLE: + file = os.Stdin + case windows.STD_OUTPUT_HANDLE: + file = os.Stdout + case windows.STD_ERROR_HANDLE: + file = os.Stderr + default: + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 000000000..6055e33b9 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,327 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 000000000..cbec8f728 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 000000000..3ee06ea72 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 000000000..244b5fa25 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 000000000..2d27fa1d0 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 000000000..afa7635d7 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 000000000..2d40fb75a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,743 @@ +// +build windows + +package winterm + +import ( + "bytes" + "log" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" +) + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD + logf func(string, ...interface{}) +} + +type Option func(*windowsAnsiEventHandler) + +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f + } +} + +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + h := &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + h.logf("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + h.logf("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + h.logf("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + h.logf("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + h.logf("set window failed: %v", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.logf("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + h.logf("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + h.logf("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + h.logf("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + h.logf("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + h.logf("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore index 0cd380037..fe79e3add 100644 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -1,5 +1,2 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck -toml.test +/toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 8b8afc4f0..000000000 --- a/vendor/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 6efcfd0ce..000000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) - diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848d3..000000000 --- a/vendor/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 7c1b37ecc..3651cfa96 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,46 +1,26 @@ -## TOML parser and encoder for Go with reflection - TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/toml-lang/toml +reflection interface similar to Go's standard library `json` and `xml` packages. -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godoc.org/github.com/BurntSushi/toml +Documentation: https://godocs.io/github.com/BurntSushi/toml -Installation: +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). -```bash -go get github.com/BurntSushi/toml -``` +This library requires Go 1.13 or newer; add it to your go.mod with: -Try the toml validator: + % go get github.com/BurntSushi/toml@latest -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` +It also comes with a TOML validator CLI tool: -[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml ### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: +For the simplest example, consider some TOML file as just a list of keys and +values: ```toml Age = 25 @@ -50,29 +30,23 @@ Perfection = [ 6, 28, 496, 8128 ] DOB = 1987-07-05T05:45:00Z ``` -Which could be defined in Go as: +Which can be decoded with: ```go type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time } -``` -And then decoded with: - -```go var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} +_, err := toml.Decode(tomlData, &conf) ``` -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: ```toml some_key_NAME = "wat" @@ -80,139 +54,67 @@ some_key_NAME = "wat" ```go type TOML struct { - ObscureKey string `toml:"some_key_NAME"` + ObscureKey string `toml:"some_key_NAME"` } ``` -### Using the `encoding.TextUnmarshaler` interface +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. -Here's an example that automatically parses duration strings into -`time.Duration` values: +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: ```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] ``` -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: +Can be decoded with: ```go -type duration struct { - time.Duration +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address } -func (d *duration) UnmarshalText(text []byte) error { +func (a *address) UnmarshalText(text []byte) error { var err error - d.Duration, err = time.ParseDuration(string(text)) + a.Address, err = mail.ParseAddress(string(text)) return err } -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} } ``` -Note that a case insensitive match will be tried if an exact match can't be -found. +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. -A working example of the above can be found in `_examples/example.{go,toml}`. +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index b0fd51d5b..4d38f3bfc 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -1,146 +1,188 @@ package toml import ( + "bytes" + "encoding" + "encoding/json" "fmt" "io" "io/ioutil" "math" + "os" "reflect" + "strconv" "strings" "time" ) -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) -} - // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. type Unmarshaler interface { UnmarshalTOML(interface{}) error } -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v interface{}) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) return err } +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + // Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. // -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. // -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. // -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. type Primitive struct { undecoded interface{} context Key } -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) +// Decoder decodes TOML data. // -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. // -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) +// TOML table arrays correspond to either a slice of structs or a slice of maps. // -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. // -// TOML datetimes correspond to Go `time.Time` values. +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. // -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. // -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. // -// Key mapping +// # Key mapping // -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. // -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. // -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { - return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) } if rv.IsNil() { - return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) - } - p, err := parse(data) - if err != nil { - return MetaData{}, err + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) } - return md, md.unify(p.mapping, indirect(rv)) -} -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) if err != nil { return MetaData{}, err } - return Decode(string(bs), v) -} -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) + p, err := parse(string(data)) if err != nil { return MetaData{}, err } - return Decode(string(bs), v) + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) } // unify performs a sort of type unification based on the structure of `rv`, @@ -149,9 +191,9 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { // Save the undecoded data and the key context into the primitive // value. context := make(Key, len(md.context)) @@ -163,36 +205,24 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return nil } - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + return v.UnmarshalTOML(data) } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { + if v, ok := rvi.(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) } - // BUG(burntsushi) + + // TODO: // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). k := rv.Kind() - // laziness if k >= reflect.Int && k <= reflect.Uint64 { return md.unifyInt(data, rv) } @@ -218,17 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("unsupported type %s", rv.Type()) + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: + case reflect.Float32, reflect.Float64: return md.unifyFloat64(data, rv) } - return e("unsupported type %s", rv.Kind()) + return md.e("unsupported type %s", rv.Kind()) } func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { @@ -237,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { if mapping == nil { return nil } - return e("type mismatch for %s: expected table but found %T", + return md.e("type mismatch for %s: expected table but found %T", rv.Type().String(), mapping) } @@ -259,17 +286,18 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { for _, i := range f.index { subv = indirect(subv.Field(i)) } + if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true + md.decoded[md.context.add(key).String()] = struct{}{} md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { + + err := md.unify(datum, subv) + if err != nil { return err } md.context = md.context[0 : len(md.context)-1] } else if f.name != "" { - // Bad user! No soup for you! - return e("cannot write unexported field %s.%s", - rv.Type().String(), f.name) + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) } } } @@ -277,28 +305,43 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { } func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + tmap, ok := mapping.(map[string]interface{}) if !ok { if tmap == nil { return nil } - return badtype("map", mapping) + return md.badtype("map", mapping) } if rv.IsNil() { rv.Set(reflect.MakeMap(rv.Type())) } for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true + md.decoded[md.context.add(k).String()] = struct{}{} md.context = append(md.context, k) - rvkey := indirect(reflect.New(rv.Type().Key())) rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { + + err := md.unify(v, indirect(rvval)) + if err != nil { return err } md.context = md.context[0 : len(md.context)-1] - rvkey.SetString(k) + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + rv.SetMapIndex(rvkey, rvval) } return nil @@ -310,12 +353,10 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { if !datav.IsValid() { return nil } - return badtype("slice", data) + return md.badtype("slice", data) } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) } return md.unifySliceArray(datav, rv) } @@ -326,7 +367,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { if !datav.IsValid() { return nil } - return badtype("slice", data) + return md.badtype("slice", data) } n := datav.Len() if rv.IsNil() || rv.Cap() < n { @@ -337,37 +378,45 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { } func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { return err } } return nil } -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } return nil } - return badtype("time.Time", data) -} -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { if s, ok := data.(string); ok { rv.SetString(s) return nil } - return badtype("string", data) + return md.badtype("string", data) } func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + rvk := rv.Kind() + if num, ok := data.(float64); ok { - switch rv.Kind() { + switch rvk { case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } fallthrough case reflect.Float64: rv.SetFloat(num) @@ -376,54 +425,60 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { } return nil } - return badtype("float", data) + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) } func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("value %d is out of range for int8", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("value %d is out of range for int16", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("value %d is out of range for int32", num) - } + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("value %d is out of range for uint8", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("value %d is out of range for uint16", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("value %d is out of range for uint32", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") + rv.SetInt(int64(dur)) + return nil } - return nil } - return badtype("integer", data) + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil } func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { @@ -431,7 +486,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { rv.SetBool(b) return nil } - return badtype("boolean", data) + return md.badtype("boolean", data) } func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { @@ -439,10 +494,16 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { return nil } -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { var s string switch sdata := data.(type) { - case TextMarshaler: + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: text, err := sdata.MarshalText() if err != nil { return err @@ -459,7 +520,7 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { case float64: s = fmt.Sprintf("%f", sdata) default: - return badtype("primitive (string-like)", data) + return md.badtype("primitive (string-like)", data) } if err := v.UnmarshalText([]byte(s)); err != nil { return err @@ -467,22 +528,54 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { return nil } +func (md *MetaData) badtype(dst string, data interface{}) error { + return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...interface{}) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + // rvalue returns a reflect.Value of `v`. All pointers are resolved. func rvalue(v interface{}) reflect.Value { return indirect(reflect.ValueOf(v)) } // indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. // -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). func indirect(v reflect.Value) reflect.Value { if v.Kind() != reflect.Ptr { if v.CanSet() { pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { return pv } } @@ -498,12 +591,12 @@ func isUnifiable(rv reflect.Value) bool { if rv.CanSet() { return true } - if _, ok := rv.Interface().(TextUnmarshaler); ok { + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { return true } return false } - -func badtype(expected string, data interface{}) error { - return e("cannot load TOML value of type %T into a Go %s", data, expected) -} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 000000000..086d0b686 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 000000000..b9e309717 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,29 @@ +package toml + +import ( + "encoding" + "io" +) + +// TextMarshaler is an alias for encoding.TextMarshaler. +// +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go index b371f396e..81a7c0fe9 100644 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -1,27 +1,11 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/toml-lang/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// There is also support for delaying decoding with the Primitive type, and +// querying the set of keys in a TOML document with the MetaData type. +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index d905c21a2..9cd25d757 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -2,57 +2,127 @@ package toml import ( "bufio" + "encoding" + "encoding/json" "errors" "fmt" "io" + "math" "reflect" "sort" "strconv" "strings" "time" + + "github.com/BurntSushi/toml/internal" ) type tomlEncodeError struct{ error } var ( - errArrayMixedElementTypes = errors.New( - "toml: cannot encode array with mixed element types") - errArrayNilElement = errors.New( - "toml: cannot encode array with nil element") - errNonString = errors.New( - "toml: cannot encode a map with non-string key type") - errAnonNonStruct = errors.New( - "toml: cannot encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "toml: TOML array element cannot contain a table") - errNoKey = errors.New( - "toml: top-level values must be Go maps or structs") - errAnything = errors.New("") // used in testing + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing ) -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", +var dblQuotedReplacer = strings.NewReplacer( "\"", "\\\"", "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, ) -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. // -// The indentation level can be controlled with the Indent field. +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. type Encoder struct { - // A single indentation level. By default it is two spaces. + // String to use for a single indentation level; default is two spaces. Indent string - // hasWritten is whether we have written any output to w yet. - hasWritten bool w *bufio.Writer + hasWritten bool // written any output to w yet? } -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. +// NewEncoder create a new Encoder. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ w: bufio.NewWriter(w), @@ -60,32 +130,14 @@ func NewEncoder(w io.Writer) *Encoder { } } -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. // -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) +// An error is returned if the value given cannot be encoded to a valid TOML +// document. func (enc *Encoder) Encode(v interface{}) error { rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { return err } return enc.w.Flush() @@ -106,13 +158,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { } func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) return } @@ -123,12 +177,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) + enc.writeKeyValue(key, rv, false) case reflect.Array, reflect.Slice: if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { enc.eArrayOfTables(key, rv) } else { - enc.keyEqElement(key, rv) + enc.writeKeyValue(key, rv, false) } case reflect.Interface: if rv.IsNil() { @@ -148,55 +202,114 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) { case reflect.Struct: enc.eTable(key, rv) default: - panic(e("unsupported type for key '%s': %s", key, k)) + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) } } -// eElement encodes any value that can be an array element (primitives and -// arrays). +// eElement encodes any value that can be an array element. func (enc *Encoder) eElement(rv reflect.Value) { switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { encPanic(err) - } else { - enc.writeQuoted(string(s)) } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) } + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) case reflect.Bool: enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: enc.wf(strconv.FormatUint(rv.Uint(), 10)) case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } case reflect.Array, reflect.Slice: enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) case reflect.Interface: enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) default: - panic(e("unexpected primitive type: %s", rv.Kind())) + encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) } } -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. func floatAddDecimal(fstr string) string { if !strings.Contains(fstr, ".") { return fstr + ".0" @@ -205,14 +318,14 @@ func floatAddDecimal(fstr string) string { } func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) } func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() enc.wf("[") for i := 0; i < length; i++ { - elem := rv.Index(i) + elem := eindirect(rv.Index(i)) enc.eElement(elem) if i != length-1 { enc.wf(", ") @@ -226,44 +339,43 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { encPanic(errNoKey) } for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) + trv := eindirect(rv.Index(i)) if isNil(trv) { continue } - panicIfInvalidKey(key) enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.wf("%s[[%s]]", enc.indentStr(key), key) enc.newline() - enc.eMapOrStruct(key, trv) + enc.eMapOrStruct(key, trv, false) } } func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) if len(key) == 1 { // Output an extra newline between top-level tables. // (The newline isn't written if nothing else has been written though.) enc.newline() } if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.wf("%s[%s]", enc.indentStr(key), key) enc.newline() } - enc.eMapOrStruct(key, rv) + enc.eMapOrStruct(key, rv, false) } -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { case reflect.Map: - enc.eMap(key, rv) + enc.eMap(key, rv, inline) case reflect.Struct: - enc.eStruct(key, rv) + enc.eStruct(key, rv, inline) default: + // Should never happen? panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) } } -func (enc *Encoder) eMap(key Key, rv reflect.Value) { +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { rt := rv.Type() if rt.Key().Kind() != reflect.String { encPanic(errNonString) @@ -274,68 +386,100 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) { var mapKeysDirect, mapKeysSub []string for _, mapKey := range rv.MapKeys() { k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { mapKeysSub = append(mapKeysSub, k) } else { mapKeysDirect = append(mapKeysDirect, k) } } - var writeMapKeys = func(mapKeys []string) { + var writeMapKeys = func(mapKeys []string, trailC bool) { sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + if isNil(val) { continue } - enc.encode(key.add(mapKey), mrv) + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } } } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } } -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that + // a field that creates a new table then all keys under it will be in that // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) addFields = func(rt reflect.Type, rv reflect.Value, start []int) { for i := 0; i < rt.NumField(); i++ { f := rt.Field(i) - // skip unexported fields - if f.PkgPath != "" && !f.Anonymous { + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { continue } - frv := rv.Field(i) - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - // Treat anonymous struct fields with - // tag names as though they are not - // anonymous, like encoding/json does. - if getOptions(f.Tag).name == "" { - addFields(t, frv, f.Index) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && - getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), f.Index) - } - continue - } - // Fall through to the normal field encoding logic below - // for non-struct anonymous fields. + + frv := eindirect(rv.Field(i)) + + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue } } - if typeIsHash(tomlTypeOfGo(frv)) { + if typeIsTable(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { fieldsDirect = append(fieldsDirect, append(start, f.Index...)) @@ -344,48 +488,81 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) { } addFields(rt, rv, nil) - var writeFields = func(fields [][]int) { + writeFields := func(fields [][]int) { for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { continue } - opts := getOptions(sft.Tag) - if opts.skip { + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. continue } - keyName := sft.Name + + keyName := fieldType.Name if opts.name != "" { keyName = opts.name } - if opts.omitempty && isEmpty(sf) { - continue - } - if opts.omitzero && isZero(sf) { + + if opts.omitzero && isZero(fieldVal) { continue } - enc.encode(key.add(keyName), sf) + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } } } + + if inline { + enc.wf("{") + } writeFields(fieldsDirect) writeFields(fieldsSub) + if inline { + enc.wf("}") + } } -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. func tomlTypeOfGo(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() { return nil } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + switch rv.Kind() { case reflect.Bool: return tomlBool @@ -397,7 +574,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { case reflect.Float32, reflect.Float64: return tomlFloat case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { + if isTableArray(rv) { return tomlArrayHash } return tomlArray @@ -407,54 +584,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { return tomlString case reflect.Map: return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") } } -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false } - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) + + if ret && !typeEqual(tomlHash, tt) { + ret = false } } - return firstType + return ret } type tagOptions struct { @@ -499,8 +657,26 @@ func isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !isEmpty(rv.Field(i)) { + return false + } + } + return true case reflect.Bool: return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() } return false } @@ -511,18 +687,34 @@ func (enc *Encoder) newline() { } } -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┠+// │ ┌───┠┌────â”│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. if len(key) == 0 { - encPanic(errNoKey) + enc.eElement(val) + return } - panicIfInvalidKey(key) enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) - enc.newline() + if !inline { + enc.newline() + } } func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { encPanic(err) } enc.hasWritten = true @@ -536,13 +728,25 @@ func encPanic(err error) { panic(tomlEncodeError{err}) } +// Resolve any level of pointers to the actual value (e.g. **string → string). func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } return v } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) } func isNil(rv reflect.Value) bool { @@ -553,16 +757,3 @@ func isNil(rv reflect.Value) bool { return false } } - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd60..000000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d04..000000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 000000000..efd68865b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,279 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithPosition returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i interface{} // int or float + size string // "int64", "uint16", etc. + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹ⷠ│ 9.2 × 10¹ⷠ+ uint8 │ 0 │ 255 + uint16 │ 0 │ 65535 + uint32 │ 0 │ 4294967295 + uint64 │ 0 │ 1.8 × 10¹⸠+ +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 000000000..022f15bc2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index e0a742a88..3545a6ad6 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -2,6 +2,8 @@ package toml import ( "fmt" + "reflect" + "runtime" "strings" "unicode" "unicode/utf8" @@ -29,61 +31,49 @@ const ( itemArrayTableStart itemArrayTableEnd itemKeyStart + itemKeyEnd itemCommentStart itemInlineTableStart itemInlineTableEnd ) -const ( - eof = 0 - comma = ',' - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' - inlineTableStart = '{' - inlineTableEnd = '}' -) +const eof = 0 type stateFn func(lx *lexer) stateFn +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item - - // Allow for backing up up to three runes. - // This is necessary because TOML contains 3-rune tokens (""" and '''). - prevWidths [3]int - nprev int // how many of prevWidths are in use - // If we emit an eof, we can still back up, but it is not OK to call - // next again. - atEOF bool + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. stack []stateFn } type item struct { - typ itemType - val string - line int + typ itemType + val string + err error + pos Position } func (lx *lexer) nextItem() item { @@ -93,17 +83,19 @@ func (lx *lexer) nextItem() item { return item default: lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) } } } -func lex(input string) *lexer { +func lex(input string, tomlNext bool) *lexer { lx := &lexer{ - input: input, - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, } return lx } @@ -125,19 +117,36 @@ func (lx *lexer) current() string { return lx.input[lx.start:lx.pos] } +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} lx.start = lx.pos } func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} lx.start = lx.pos } func (lx *lexer) next() (r rune) { if lx.atEOF { - panic("next called after EOF") + panic("BUG in lexer: next called after EOF") } if lx.pos >= len(lx.input) { lx.atEOF = true @@ -147,12 +156,25 @@ func (lx *lexer) next() (r rune) { if lx.input[lx.pos] == '\n' { lx.line++ } + lx.prevWidths[3] = lx.prevWidths[2] lx.prevWidths[2] = lx.prevWidths[1] lx.prevWidths[1] = lx.prevWidths[0] - if lx.nprev < 3 { + if lx.nprev < 4 { lx.nprev++ } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + lx.prevWidths[0] = w lx.pos += w return r @@ -163,19 +185,21 @@ func (lx *lexer) ignore() { lx.start = lx.pos } -// backup steps back one rune. Can be called only twice between calls to next. +// backup steps back one rune. Can be called 4 times between calls to next. func (lx *lexer) backup() { if lx.atEOF { lx.atEOF = false return } if lx.nprev < 1 { - panic("backed up too far") + panic("BUG in lexer: backed up too far") } w := lx.prevWidths[0] lx.prevWidths[0] = lx.prevWidths[1] lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] lx.nprev-- + lx.pos -= w if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { lx.line-- @@ -211,18 +235,58 @@ func (lx *lexer) skip(pred func(rune) bool) { } } -// errorf stops all lexing by emitting an error and returning `nil`. +// error stops all lexing by emitting an error and returning `nil`. +// // Note that any value that is a character is escaped if it's a special // character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} return nil } +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + // lexTop consumes elements at the top level of TOML data. func lexTop(lx *lexer) stateFn { r := lx.next() @@ -230,10 +294,10 @@ func lexTop(lx *lexer) stateFn { return lexSkip(lx, lexTop) } switch r { - case commentStart: + case '#': lx.push(lexTop) return lexCommentStart - case tableStart: + case '[': return lexTableStart case eof: if lx.pos > lx.start { @@ -256,7 +320,7 @@ func lexTop(lx *lexer) stateFn { func lexTopEnd(lx *lexer) stateFn { r := lx.next() switch { - case r == commentStart: + case r == '#': // a comment will read to a newline for us. lx.push(lexTop) return lexCommentStart @@ -269,8 +333,9 @@ func lexTopEnd(lx *lexer) stateFn { lx.emit(itemEOF) return nil } - return lx.errorf("expected a top-level item to end with a newline, "+ - "comment, or EOF, but got %q instead", r) + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) } // lexTable lexes the beginning of a table. Namely, it makes sure that @@ -279,7 +344,7 @@ func lexTopEnd(lx *lexer) stateFn { // It also handles the case that this is an item in an array of tables. // e.g., '[[name]]'. func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { + if lx.peek() == '[' { lx.next() lx.emit(itemArrayTableStart) lx.push(lexArrayTableEnd) @@ -296,9 +361,8 @@ func lexTableEnd(lx *lexer) stateFn { } func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("expected end of table array name delimiter %q, "+ - "but got %q instead", arrayTableEnd, r) + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) } lx.emit(itemArrayTableEnd) return lexTopEnd @@ -307,31 +371,18 @@ func lexArrayTableEnd(lx *lexer) stateFn { func lexTableNameStart(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("unexpected end of table name " + - "(table names cannot be empty)") - case r == tableSep: - return lx.errorf("unexpected table separator " + - "(table names cannot be empty)") - case r == stringStart || r == rawStringStart: + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': lx.ignore() lx.push(lexTableNameEnd) - return lexValue // reuse string lexing + return lexQuotedName default: - return lexBareTableName - } -} - -// lexBareTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r) { - return lexBareTableName + lx.push(lexTableNameEnd) + return lexBareName } - lx.backup() - lx.emit(itemText) - return lexTableNameEnd } // lexTableNameEnd reads the end of a piece of a table name, optionally @@ -341,69 +392,107 @@ func lexTableNameEnd(lx *lexer) stateFn { switch r := lx.next(); { case isWhitespace(r): return lexTableNameEnd - case r == tableSep: + case r == '.': lx.ignore() return lexTableNameStart - case r == tableEnd: + case r == ']': return lx.pop() default: - return lx.errorf("expected '.' or ']' to end table name, "+ - "but got %q instead", r) + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) } } -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r, lx.tomlNext) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() switch { - case r == keySep: - return lx.errorf("unexpected key separator %q", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': lx.ignore() + fallthrough + default: // Bare key lx.emit(itemKeyStart) - return lexBareKey + return lexKeyNameStart } } -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.backup() - lx.emit(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emit(itemText) - return lexKeyEnd +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName default: - return lx.errorf("bare keys cannot contain %q", r) + lx.push(lexKeyEnd) + return lexBareName } } // lexKeyEnd consumes the end of a key and trims whitespace (up to the key // separator). func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) case isWhitespace(r): return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) default: - return lx.errorf("expected key separator %q, but got %q instead", - keySep, r) + return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -422,17 +511,17 @@ func lexValue(lx *lexer) stateFn { return lexNumberOrDateStart } switch r { - case arrayStart: + case '[': lx.ignore() lx.emit(itemArray) return lexArrayValue - case inlineTableStart: + case '{': lx.ignore() lx.emit(itemInlineTableStart) return lexInlineTableValue - case stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { + case '"': + if lx.accept('"') { + if lx.accept('"') { lx.ignore() // Ignore """ return lexMultilineString } @@ -440,9 +529,9 @@ func lexValue(lx *lexer) stateFn { } lx.ignore() // ignore the '"' return lexString - case rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { lx.ignore() // Ignore """ return lexMultilineRawString } @@ -450,10 +539,15 @@ func lexValue(lx *lexer) stateFn { } lx.ignore() // ignore the "'" return lexRawString - case '+', '-': - return lexNumberStart case '.': // special error case, be kind to users return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart } if unicode.IsLetter(r) { // Be permissive here; lexBool will give a nice error if the @@ -463,6 +557,9 @@ func lexValue(lx *lexer) stateFn { lx.backup() return lexBool } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } return lx.errorf("expected value but found %q instead", r) } @@ -473,14 +570,12 @@ func lexArrayValue(lx *lexer) stateFn { switch { case isWhitespace(r) || isNL(r): return lexSkip(lx, lexArrayValue) - case r == commentStart: + case r == '#': lx.push(lexArrayValue) return lexCommentStart - case r == comma: + case r == ',': return lx.errorf("unexpected comma") - case r == arrayEnd: - // NOTE(caleb): The spec isn't clear about whether you can have - // a trailing comma or not, so we'll allow it. + case r == ']': return lexArrayEnd } @@ -493,23 +588,20 @@ func lexArrayValue(lx *lexer) stateFn { // the next value (or the end of the array): it ignores whitespace and newlines // and expects either a ',' or a ']'. func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { + switch r := lx.next(); { case isWhitespace(r) || isNL(r): return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: + case r == '#': lx.push(lexArrayValueEnd) return lexCommentStart - case r == comma: + case r == ',': lx.ignore() return lexArrayValue // move on to the next value - case r == arrayEnd: + case r == ']': return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) } - return lx.errorf( - "expected a comma or array terminator %q, but got %q instead", - arrayEnd, r, - ) } // lexArrayEnd finishes the lexing of an array. @@ -528,13 +620,16 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': lx.push(lexInlineTableValue) return lexCommentStart - case r == comma: + case r == ',': return lx.errorf("unexpected comma") - case r == inlineTableEnd: + case r == '}': return lexInlineTableEnd } lx.backup() @@ -546,23 +641,39 @@ func lexInlineTableValue(lx *lexer) stateFn { // key/value pair and the next pair (or the end of the table): // it ignores whitespace and expects either a ',' or a '}'. func lexInlineTableValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { + switch r := lx.next(); { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': lx.push(lexInlineTableValueEnd) return lexCommentStart - case r == comma: + case r == ',': lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } + return lx.errorf("trailing comma not allowed in inline tables") + } return lexInlineTableValue - case r == inlineTableEnd: + case r == '}': return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) } - return lx.errorf("expected a comma or an inline table terminator %q, "+ - "but got %q instead", inlineTableEnd, r) +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" } // lexInlineTableEnd finishes the lexing of an inline table. @@ -579,13 +690,13 @@ func lexString(lx *lexer) stateFn { r := lx.next() switch { case r == eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected '"'`) case isNL(r): - return lx.errorf("strings cannot contain newlines") + return lx.errorPrevLine(errLexStringNL{}) case r == '\\': lx.push(lexString) return lexStringEscape - case r == stringEnd: + case r == '"': lx.backup() lx.emit(itemString) lx.next() @@ -598,19 +709,47 @@ func lexString(lx *lexer) stateFn { // lexMultilineString consumes the inner contents of a string. It assumes that // the beginning '"""' has already been consumed and ignored. func lexMultilineString(lx *lexer) stateFn { - switch lx.next() { + r := lx.next() + switch r { + default: + return lexMultilineString case eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected '"""'`) case '\\': return lexMultilineStringEscape - case stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. lx.backup() lx.backup() lx.emit(itemMultilineString) - lx.next() + lx.next() /// Read over ''' again and discard it. lx.next() lx.next() lx.ignore() @@ -618,8 +757,8 @@ func lexMultilineString(lx *lexer) stateFn { } lx.backup() } + return lexMultilineString } - return lexMultilineString } // lexRawString consumes a raw string. Nothing can be escaped in such a string. @@ -627,35 +766,54 @@ func lexMultilineString(lx *lexer) stateFn { func lexRawString(lx *lexer) stateFn { r := lx.next() switch { + default: + return lexRawString case r == eof: - return lx.errorf("unexpected EOF") + return lx.errorf(`unexpected EOF; expected "'"`) case isNL(r): - return lx.errorf("strings cannot contain newlines") - case r == rawStringEnd: + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': lx.backup() lx.emit(itemRawString) lx.next() lx.ignore() return lx.pop() } - return lexRawString } -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'''" has already been consumed and +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { - switch lx.next() { + r := lx.next() + switch r { + default: + return lexMultilineRawString case eof: - return lx.errorf("unexpected EOF") - case rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. lx.backup() lx.backup() lx.emit(itemRawMultilineString) - lx.next() + lx.next() /// Read over ''' again and discard it. lx.next() lx.next() lx.ignore() @@ -663,15 +821,14 @@ func lexMultilineRawString(lx *lexer) stateFn { } lx.backup() } + return lexMultilineRawString } - return lexMultilineRawString } // lexMultilineStringEscape consumes an escaped character. It assumes that the // preceding '\\' has already been consumed. func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { + if isNL(lx.next()) { /// \ escaping newline. return lexMultilineString } lx.backup() @@ -682,6 +839,11 @@ func lexMultilineStringEscape(lx *lexer) stateFn { func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough case 'b': fallthrough case 't': @@ -694,16 +856,36 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '"': fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough case '\\': return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape case 'u': return lexShortUnicodeEscape case 'U': return lexLongUnicodeEscape } - return lx.errorf("invalid escape character %q; only the following "+ - "escape characters are allowed: "+ - `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) + return lx.error(errLexEscape{r}) +} + +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected two hexadecimal digits after '\x', but got %q instead`, + lx.current()) + } + } + return lx.pop() } func lexShortUnicodeEscape(lx *lexer) stateFn { @@ -711,8 +893,9 @@ func lexShortUnicodeEscape(lx *lexer) stateFn { for i := 0; i < 4; i++ { r = lx.next() if !isHexadecimal(r) { - return lx.errorf(`expected four hexadecimal digits after '\u', `+ - "but got %q instead", lx.current()) + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) } } return lx.pop() @@ -723,28 +906,33 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { for i := 0; i < 8; i++ { r = lx.next() if !isHexadecimal(r) { - return lx.errorf(`expected eight hexadecimal digits after '\U', `+ - "but got %q instead", lx.current()) + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) } } return lx.pop() } -// lexNumberOrDateStart consumes either an integer, a float, or datetime. +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. func lexNumberOrDateStart(lx *lexer) stateFn { r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } switch r { - case '_': - return lexNumber - case 'e', 'E': - return lexFloat - case '.': - return lx.errorf("floats must start with a digit, not '.'") + case '0': + return lexBaseNumberOrDate } - return lx.errorf("expected a digit but got %q", r) + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate } // lexNumberOrDate consumes either an integer, float or datetime. @@ -754,10 +942,10 @@ func lexNumberOrDate(lx *lexer) stateFn { return lexNumberOrDate } switch r { - case '-': + case '-', ':': return lexDatetime case '_': - return lexNumber + return lexDecimalNumber case '.', 'e', 'E': return lexFloat } @@ -775,41 +963,156 @@ func lexDatetime(lx *lexer) stateFn { return lexDatetime } switch r { - case '-', 'T', ':', '.', 'Z', '+': + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': return lexDatetime } lx.backup() - lx.emit(itemDatetime) + lx.emitTrim(itemDatetime) return lx.pop() } -// lexNumberStart consumes either an integer or a float. It assumes that a sign -// has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. -func lexNumberStart(lx *lexer) stateFn { - // We MUST see a digit. Even floats have to start with a digit. +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("floats must start with a digit, not '.'") + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) } - return lx.errorf("expected a digit but got %q", r) + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") } - return lexNumber + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) } -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. if isDigit(r) { - return lexNumber + return lexNumberOrDate } switch r { case '_': - return lexNumber + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber case '.', 'e', 'E': return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger } lx.backup() @@ -867,49 +1170,31 @@ func lexCommentStart(lx *lexer) stateFn { // It will consume *up to* the first newline character, and pass control // back to the last state on the stack. func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() lx.emit(itemText) return lx.pop() + default: + return lexComment } - lx.next() - return lexComment } // lexSkip ignores all slurped input and moves on to the next state. func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') + lx.ignore() + return nextState } -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" } func (itype itemType) String() string { @@ -938,12 +1223,18 @@ func (itype itemType) String() string { return "TableEnd" case itemKeyStart: return "KeyStart" + case itemKeyEnd: + return "KeyEnd" case itemArray: return "Array" case itemArrayEnd: return "ArrayEnd" case itemCommentStart: return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" } panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) } @@ -951,3 +1242,42 @@ func (itype itemType) String() string { func (item item) String() string { return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) } + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/meta.go similarity index 59% rename from vendor/github.com/BurntSushi/toml/decode_meta.go rename to vendor/github.com/BurntSushi/toml/meta.go index b9914a679..2e78b24e9 100644 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -1,33 +1,40 @@ package toml -import "strings" +import ( + "strings" +) -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo mapping map[string]interface{} - types map[string]tomlType keys []Key - decoded map[string]bool - context Key // Used only during decoding. + decoded map[string]struct{} + data []byte // Input file; for errors. } -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., +// IsDefined reports if the key exists in the TOML data. // -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. // -// IsDefined will return false if an empty key given. Keys are case sensitive. +// Returns false for an empty key. func (md *MetaData) IsDefined(key ...string) bool { if len(key) == 0 { return false } - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) for _, k := range key { if hash, ok = hashOrVal.(map[string]interface{}); !ok { return false @@ -41,58 +48,20 @@ func (md *MetaData) IsDefined(key ...string) bool { // Type returns a string representation of the type of the key specified. // -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() } return "" } -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } - return k[i] -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - // Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. // -// The list will have the same order as the keys appeared in the TOML data. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. // // All keys returned are non-empty. func (md *MetaData) Keys() []Key { @@ -102,7 +71,7 @@ func (md *MetaData) Keys() []Key { // Undecoded returns all keys that have not been decoded in the order in which // they appear in the original TOML document. // -// This includes keys that haven't been decoded because of a Primitive value. +// This includes keys that haven't been decoded because of a [Primitive] value. // Once the Primitive value is decoded, the keys will be considered decoded. // // Also note that decoding into an empty interface will result in no decoding, @@ -113,9 +82,40 @@ func (md *MetaData) Keys() []Key { func (md *MetaData) Undecoded() []Key { undecoded := make([]Key, 0, len(md.keys)) for _, key := range md.keys { - if !md.decoded[key.String()] { + if _, ok := md.decoded[key.String()]; !ok { undecoded = append(undecoded, key) } } return undecoded } + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c, false) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 50869ef92..9c1915369 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,57 +2,80 @@ package toml import ( "fmt" + "os" "strconv" "strings" "time" - "unicode" "unicode/utf8" + + "github.com/BurntSushi/toml/internal" ) type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + tomlNext bool - // the base key name for everything except hashes - currentKey string + ordered []Key // List of keys in the order that they appear in the TOML data. - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]interface{} // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). } -type parseError string - -func (pe parseError) Error() string { - return string(pe) +type keyInfo struct { + pos Position + tomlType tomlType } func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + defer func() { if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr return } panic(r) } }() + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 + data = data[2:] + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + p = &parser{ + keyInfo: make(map[string]keyInfo), mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), + lx: lex(data, tomlNext), ordered: make([]Key, 0), - implicits: make(map[string]bool), + implicits: make(map[string]struct{}), + tomlNext: tomlNext, } for { item := p.next() @@ -65,20 +88,57 @@ func parse(data string) (p *parser, err error) { return p, nil } +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) } func (p *parser) next() item { it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) if it.typ == itemError { - p.panicf("%s", it.val) + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) } return it } +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + func (p *parser) bug(format string, v ...interface{}) { panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) } @@ -97,44 +157,60 @@ func (p *parser) assertEqual(expected, got itemType) { func (p *parser) topLevel(item item) { switch item.typ { - case itemCommentStart: - p.approxLine = item.line + case itemCommentStart: // # .. p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line + case itemTableStart: // [ .. ] + name := p.nextPos() var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) } - p.assertEqual(itemTableEnd, kg.typ) + p.assertEqual(itemTableEnd, name.typ) - p.establishContext(key, false) - p.setType("", tomlHash) + p.addContext(key, false) + p.setType("", tomlHash, item.pos) p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) } - p.assertEqual(itemArrayTableEnd, kg.typ) + p.assertEqual(itemArrayTableEnd, name.typ) - p.establishContext(key, true) - p.setType("", tomlArrayHash) + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.set(p.currentKey, val, typ, vItem.pos) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext p.currentKey = "" default: p.bug("Unexpected type at top level: %s", item.typ) @@ -148,180 +224,271 @@ func (p *parser) keyString(it item) string { return it.val case itemString, itemMultilineString, itemRawString, itemRawMultilineString: - s, _ := p.value(it) + s, _ := p.value(it, false) return s.(string) default: p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") } + panic("unreachable") } +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { switch it.typ { case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) case itemBool: switch it.val { case "true": return true, p.typeOfPrimitive(it) case "false": return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - if !numUnderscoresOK(it.val) { - p.panicf("Invalid integer %q: underscores must be surrounded by digits", - it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseInt(val, 10, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) } - return num, p.typeOfPrimitive(it) - case itemFloat: - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicf("Invalid float %q: underscores must be "+ - "surrounded by digits", it.val) - } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicf("Invalid float %q: '.' must be followed "+ - "by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.panicf("Invalid float value: %q", it.val) - } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) } - return num, p.typeOfPrimitive(it) - case itemDatetime: - var t time.Time - var ok bool - var err error - for _, format := range []string{ - "2006-01-02T15:04:05Z07:00", - "2006-01-02T15:04:05", - "2006-01-02", - } { - t, err = time.ParseInLocation(format, it.val, time.Local) - if err == nil { - ok = true - break - } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) } - if !ok { - p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location + next bool +}{ + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) + + var ( + types []tomlType - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue } - return array, p.typeOfArray(types) - case itemInlineTableStart: - var ( - hash = make(map[string]interface{}) - outerContext = p.context - outerKey = p.currentKey - ) - p.context = append(p.context, p.currentKey) - p.currentKey = "" - for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { - if it.typ != itemKeyStart { - p.bug("Expected key start but instead found %q, around line %d", - it.val, p.approxLine) - } - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + _ = types + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" - // retrieve key - k := p.next() - p.approxLine = k.line - kname := p.keyString(k) + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) - // retrieve value - p.currentKey = kname - val, typ := p.value(p.next()) - // make sure we keep metadata up to date - p.setType(kname, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - hash[kname] = val + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue } - p.context = outerContext - p.currentKey = outerKey - return hash, tomlHash + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ, it.pos) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false } // numUnderscoresOK checks whether each underscore in s is surrounded by // characters that are not underscores. func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } accept := false for _, r := range s { if r == '_' { if !accept { return false } - accept = false - continue } - accept = true + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) } return accept } @@ -338,13 +505,12 @@ func numPeriodsOK(s string) bool { return !period } -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. // // Establishing the context also makes sure that the key isn't a duplicate, and // will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { +func (p *parser) addContext(key Key, array bool) { var ok bool // Always start at the top level and drill down for our context. @@ -383,7 +549,7 @@ func (p *parser) establishContext(key Key, array bool) { // list of tables for it. k := key[len(key)-1] if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) + hashContext[k] = make([]map[string]interface{}, 0, 4) } // Add a new table. But make sure the key hasn't already been used @@ -391,8 +557,7 @@ func (p *parser) establishContext(key Key, array bool) { if hash, ok := hashContext[k].([]map[string]interface{}); ok { hashContext[k] = append(hash, make(map[string]interface{})) } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) + p.panicf("Key '%s' was already created and cannot be used as an array.", key) } } else { p.setValue(key[len(key)-1], make(map[string]interface{})) @@ -400,15 +565,22 @@ func (p *parser) establishContext(key Key, array bool) { p.context = append(p.context, key[len(key)-1]) } +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { + p.setValue(key, val) + p.setType(key, typ, pos) +} + // setValue sets the given key to the given value in the current context. // It will make sure that the key hasn't already been defined, account for // implicit key groups. func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) for _, k := range p.context { keyContext = append(keyContext, k) if tmpHash, ok = hash[k]; !ok { @@ -422,24 +594,26 @@ func (p *parser) setValue(key string, value interface{}) { case map[string]interface{}: hash = t default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) + p.panicf("Key '%s' has already been defined.", keyContext) } } keyContext = append(keyContext, key) if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) // // But we have to make sure to stop marking it as an implicit. (So that // another redefinition provokes an error.) // // Note that since it has already been defined (as a hash), we don't // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } if p.isImplicit(keyContext) { p.removeImplicit(keyContext) return @@ -449,41 +623,37 @@ func (p *parser) setValue(key string, value interface{}) { // key, which is *always* wrong. p.panicf("Key '%s' has already been defined.", keyContext) } + hash[key] = value } -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. // // Note that if `key` is empty, then the type given will be applied to the // current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { +func (p *parser) setType(key string, typ tomlType, pos Position) { keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } + keyContext = append(keyContext, p.context...) if len(key) > 0 { // allow type setting for hashes keyContext = append(keyContext, key) } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} } -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } // current returns the full key name of the current context. func (p *parser) current() string { @@ -497,24 +667,67 @@ func (p *parser) current() string { } func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] } - return s[1:] + return s } -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. +func (p *parser) stripEscapedNewlines(s string) string { + var b strings.Builder + var i int + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() + } + i += ix + + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue + } + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } } + if j == i+1 { + // Not a whitespace escape. + i++ + continue + } + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. + // (It's a bad escape sequence, but we can let + // replaceEscapes catch it.) + i++ + continue + } + b.WriteString(s[:i]) + s = s[j:] + i = 0 } - return strings.Join(esc, "") } -func (p *parser) replaceEscapes(str string) string { - var replaced []rune +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) s := []byte(str) r := 0 for r < len(s) { @@ -532,7 +745,8 @@ func (p *parser) replaceEscapes(str string) string { switch s[r] { default: p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) case 'b': replaced = append(replaced, rune(0x0008)) r += 1 @@ -548,24 +762,35 @@ func (p *parser) replaceEscapes(str string) string { case 'r': replaced = append(replaced, rune(0x000D)) r += 1 + case 'e': + if p.tomlNext { + replaced = append(replaced, rune(0x001B)) + r += 1 + } case '"': replaced = append(replaced, rune(0x0022)) r += 1 case '\\': replaced = append(replaced, rune(0x005C)) r += 1 + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) + replaced = append(replaced, escaped) + r += 3 + } case 'u': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+5). (Because the lexer guarantees this // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) replaced = append(replaced, escaped) r += 5 case 'U': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+9). (Because the lexer guarantees this // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) replaced = append(replaced, escaped) r += 9 } @@ -573,20 +798,14 @@ func (p *parser) replaceEscapes(str string) string { return string(replaced) } -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { s := string(bs) hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) } if !utf8.ValidRune(rune(hex)) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) } return rune(hex) } - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164be0..000000000 --- a/vendor/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go index 608997c22..254ca82e5 100644 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field { next := []field{{typ: t}} // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} + var count map[reflect.Type]int + var nextCount map[reflect.Type]int // Types already visited at an earlier level. visited := map[reflect.Type]bool{} diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_toml.go similarity index 75% rename from vendor/github.com/BurntSushi/toml/type_check.go rename to vendor/github.com/BurntSushi/toml/type_toml.go index c73f8afc1..4e90d7737 100644 --- a/vendor/github.com/BurntSushi/toml/type_check.go +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool { return t1.typeString() == t2.typeString() } -func typeIsHash(t tomlType) bool { +func typeIsTable(t tomlType) bool { return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) } @@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) panic("unreachable") } - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE new file mode 100644 index 000000000..6d0eb9d5d --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2019 TSUYUSATO Kitsune + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md new file mode 100644 index 000000000..e9924d297 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/README.md @@ -0,0 +1,52 @@ +# heredoc + +[![Build Status](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![GoDoc](https://godoc.org/github.com/MakeNowJusti/heredoc?status.svg)](https://godoc.org/github.com/MakeNowJust/heredoc) + +## About + +Package heredoc provides the here-document with keeping indent. + +## Install + +```console +$ go get github.com/MakeNowJust/heredoc +``` + +## Import + +```go +// usual +import "github.com/MakeNowJust/heredoc" +``` + +## Example + +```go +package main + +import ( + "fmt" + "github.com/MakeNowJust/heredoc" +) + +func main() { + fmt.Println(heredoc.Doc(` + Lorem ipsum dolor sit amet, consectetur adipisicing elit, + sed do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, ... + `)) + // Output: + // Lorem ipsum dolor sit amet, consectetur adipisicing elit, + // sed do eiusmod tempor incididunt ut labore et dolore magna + // aliqua. Ut enim ad minim veniam, ... + // +} +``` + +## API Document + + - [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc) + +## License + +This software is released under the MIT License, see LICENSE. diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go new file mode 100644 index 000000000..1fc046955 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go @@ -0,0 +1,105 @@ +// Copyright (c) 2014-2019 TSUYUSATO Kitsune +// This software is released under the MIT License. +// http://opensource.org/licenses/mit-license.php + +// Package heredoc provides creation of here-documents from raw strings. +// +// Golang supports raw-string syntax. +// +// doc := ` +// Foo +// Bar +// ` +// +// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to +// +// "\n\tFoo\n\tBar\n" +// +// I dont't want this! +// +// However this problem is solved by package heredoc. +// +// doc := heredoc.Doc(` +// Foo +// Bar +// `) +// +// Is equivalent to +// +// "Foo\nBar\n" +package heredoc + +import ( + "fmt" + "strings" + "unicode" +) + +const maxInt = int(^uint(0) >> 1) + +// Doc returns un-indented string as here-document. +func Doc(raw string) string { + skipFirstLine := false + if len(raw) > 0 && raw[0] == '\n' { + raw = raw[1:] + } else { + skipFirstLine = true + } + + lines := strings.Split(raw, "\n") + + minIndentSize := getMinIndent(lines, skipFirstLine) + lines = removeIndentation(lines, minIndentSize, skipFirstLine) + + return strings.Join(lines, "\n") +} + +// getMinIndent calculates the minimum indentation in lines, excluding empty lines. +func getMinIndent(lines []string, skipFirstLine bool) int { + minIndentSize := maxInt + + for i, line := range lines { + if i == 0 && skipFirstLine { + continue + } + + indentSize := 0 + for _, r := range []rune(line) { + if unicode.IsSpace(r) { + indentSize += 1 + } else { + break + } + } + + if len(line) == indentSize { + if i == len(lines)-1 && indentSize < minIndentSize { + lines[i] = "" + } + } else if indentSize < minIndentSize { + minIndentSize = indentSize + } + } + return minIndentSize +} + +// removeIndentation removes n characters from the front of each line in lines. +// Skips first line if skipFirstLine is true, skips empty lines. +func removeIndentation(lines []string, n int, skipFirstLine bool) []string { + for i, line := range lines { + if i == 0 && skipFirstLine { + continue + } + + if len(lines[i]) >= n { + lines[i] = line[n:] + } + } + return lines +} + +// Docf returns unindented and formatted string as here-document. +// Formatting is done as for fmt.Printf(). +func Docf(raw string, args ...interface{}) string { + return fmt.Sprintf(Doc(raw), args...) +} diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml deleted file mode 100644 index 096369d44..000000000 --- a/vendor/github.com/Masterminds/semver/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go - -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - tip - -# Setting sudo access to false will let Travis CI use containers rather than -# VMs to run the tests. For more details see: -# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ -# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ -sudo: false - -script: - - make setup - - make test - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md deleted file mode 100644 index e405c9a84..000000000 --- a/vendor/github.com/Masterminds/semver/CHANGELOG.md +++ /dev/null @@ -1,109 +0,0 @@ -# 1.5.0 (2019-09-11) - -## Added - -- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) - -## Changed - -- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) -- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) -- #72: Adding docs comment pointing to vert for a cli -- #71: Update the docs on pre-release comparator handling -- #89: Test with new go versions (thanks @thedevsaddam) -- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) - -## Fixed - -- #78: Fix unchecked error in example code (thanks @ravron) -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case -- #97: Fixed copyright file for proper display on GitHub -- #107: Fix handling prerelease when sorting alphanum and num -- #109: Fixed where Validate sometimes returns wrong message on error - -# 1.4.2 (2018-04-10) - -## Changed -- #72: Updated the docs to point to vert for a console appliaction -- #71: Update the docs on pre-release comparator handling - -## Fixed -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case - -# 1.4.1 (2018-04-02) - -## Fixed -- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) - -# 1.4.0 (2017-10-04) - -## Changed -- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) - -# 1.3.1 (2017-07-10) - -## Fixed -- Fixed #57: number comparisons in prerelease sometimes inaccurate - -# 1.3.0 (2017-05-02) - -## Added -- #45: Added json (un)marshaling support (thanks @mh-cbon) -- Stability marker. See https://masterminds.github.io/stability/ - -## Fixed -- #51: Fix handling of single digit tilde constraint (thanks @dgodd) - -## Changed -- #55: The godoc icon moved from png to svg - -# 1.2.3 (2017-04-03) - -## Fixed -- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * - -# Release 1.2.2 (2016-12-13) - -## Fixed -- #34: Fixed issue where hyphen range was not working with pre-release parsing. - -# Release 1.2.1 (2016-11-28) - -## Fixed -- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" - properly. - -# Release 1.2.0 (2016-11-04) - -## Added -- #20: Added MustParse function for versions (thanks @adamreese) -- #15: Added increment methods on versions (thanks @mh-cbon) - -## Fixed -- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and - might not satisfy the intended compatibility. The change here ignores pre-releases - on constraint checks (e.g., ~ or ^) when a pre-release is not part of the - constraint. For example, `^1.2.3` will ignore pre-releases while - `^1.2.3-alpha` will include them. - -# Release 1.1.1 (2016-06-30) - -## Changed -- Issue #9: Speed up version comparison performance (thanks @sdboyer) -- Issue #8: Added benchmarks (thanks @sdboyer) -- Updated Go Report Card URL to new location -- Updated Readme to add code snippet formatting (thanks @mh-cbon) -- Updating tagging to v[SemVer] structure for compatibility with other tools. - -# Release 1.1.0 (2016-03-11) - -- Issue #2: Implemented validation to provide reasons a versions failed a - constraint. - -# Release 1.0.1 (2015-12-31) - -- Fixed #1: * constraint failing on valid versions. - -# Release 1.0.0 (2015-10-20) - -- Initial release diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile deleted file mode 100644 index a7a1b4e36..000000000 --- a/vendor/github.com/Masterminds/semver/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -.PHONY: setup -setup: - go get -u gopkg.in/alecthomas/gometalinter.v1 - gometalinter.v1 --install - -.PHONY: test -test: validate lint - @echo "==> Running tests" - go test -v - -.PHONY: validate -validate: - @echo "==> Running static validations" - @gometalinter.v1 \ - --disable-all \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || exit_code=1 - -.PHONY: lint -lint: - @echo "==> Running linters" - @gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md deleted file mode 100644 index 1b52d2f43..000000000 --- a/vendor/github.com/Masterminds/semver/README.md +++ /dev/null @@ -1,194 +0,0 @@ -# SemVer - -The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: - -* Parse semantic versions -* Sort semantic versions -* Check if a semantic version fits within a set of constraints -* Optionally work with a `v` prefix - -[![Stability: -Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) -[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) - -If you are looking for a command line tool for version comparisons please see -[vert](https://github.com/Masterminds/vert) which uses this library. - -## Parsing Semantic Versions - -To parse a semantic version use the `NewVersion` function. For example, - -```go - v, err := semver.NewVersion("1.2.3-beta.1+build345") -``` - -If there is an error the version wasn't parseable. The version object has methods -to get the parts of the version, compare it to other versions, convert the -version back into a string, and get the original string. For more details -please see the [documentation](https://godoc.org/github.com/Masterminds/semver). - -## Sorting Semantic Versions - -A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) -package from the standard library. For example, - -```go - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) -``` - -## Checking Version Constraints - -Checking a version against version constraints is one of the most featureful -parts of the package. - -```go - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) -``` - -## Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma separated and comparisons. These are then separated by || separated or -comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - -* `=`: equal (aliased to no operator) -* `!=`: not equal -* `>`: greater than -* `<`: less than -* `>=`: greater than or equal to -* `<=`: less than or equal to - -## Working With Pre-release Versions - -Pre-releases, for those not familiar with them, are used for software releases -prior to stable or generally available releases. Examples of pre-releases include -development, alpha, beta, and release candidate releases. A pre-release may be -a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -order of precidence, pre-releases come before their associated releases. In this -example `1.2.3-beta.1 < 1.2.3`. - -According to the Semantic Version specification pre-releases may not be -API compliant with their release counterpart. It says, - -> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. - -SemVer comparisons without a pre-release comparator will skip pre-release versions. -For example, `>=1.2.3` will skip pre-releases when looking at a list of releases -while `>=1.2.3-0` will evaluate and find pre-releases. - -The reason for the `0` as a pre-release version in the example comparison is -because pre-releases can only contain ASCII alphanumerics and hyphens (along with -`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) - -Understanding ASCII sort ordering is important because A-Z comes before a-z. That -means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case -sensitivity doesn't apply here. This is due to ASCII sort ordering which is what -the spec specifies. - -## Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - -* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` -* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` - -## Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the pack level comparison (see tilde below). For example, - -* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `>= 1.2.x` is equivalent to `>= 1.2.0` -* `<= 2.x` is equivalent to `< 3` -* `*` is equivalent to `>= 0.0.0` - -## Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - -* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` -* `~1` is equivalent to `>= 1, < 2` -* `~2.3` is equivalent to `>= 2.3, < 2.4` -* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `~1.x` is equivalent to `>= 1, < 2` - -## Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes. This is useful -when comparisons of API versions as a major change is API breaking. For example, - -* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` -* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -* `^2.3` is equivalent to `>= 2.3, < 3` -* `^2.x` is equivalent to `>= 2.0.0, < 3` - -# Validation - -In addition to testing a version against a constraint, a version can be validated -against a constraint. When validation fails a slice of errors containing why a -version didn't meet the constraint is returned. For example, - -```go - c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - - // Validate a version against a constraint. - a, msgs := c.Validate(v) - // a is false - for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" - } -``` - -# Fuzzing - - [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. - -1. `go-fuzz-build` -2. `go-fuzz -workdir=fuzz` - -# Contribute - -If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) -or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml deleted file mode 100644 index b2778df15..000000000 --- a/vendor/github.com/Masterminds/semver/appveyor.yml +++ /dev/null @@ -1,44 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\semver -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go version - - go env - - go get -u gopkg.in/alecthomas/gometalinter.v1 - - set PATH=%PATH%;%GOPATH%\bin - - gometalinter.v1.exe --install - -build_script: - - go install -v ./... - -test_script: - - "gometalinter.v1 \ - --disable-all \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || exit_code=1" - - "gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || :" - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go deleted file mode 100644 index b94b93413..000000000 --- a/vendor/github.com/Masterminds/semver/constraints.go +++ /dev/null @@ -1,423 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "regexp" - "strings" -) - -// Constraints is one or more constraint that a semantic version can be -// checked against. -type Constraints struct { - constraints [][]*constraint -} - -// NewConstraint returns a Constraints instance that a Version instance can -// be checked against. If there is a parse error it will be returned. -func NewConstraint(c string) (*Constraints, error) { - - // Rewrite - ranges into a comparison operation. - c = rewriteRange(c) - - ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) - for k, v := range ors { - cs := strings.Split(v, ",") - result := make([]*constraint, len(cs)) - for i, s := range cs { - pc, err := parseConstraint(s) - if err != nil { - return nil, err - } - - result[i] = pc - } - or[k] = result - } - - o := &Constraints{constraints: or} - return o, nil -} - -// Check tests if a version satisfies the constraints. -func (cs Constraints) Check(v *Version) bool { - // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { - joy := true - for _, c := range o { - if !c.check(v) { - joy = false - break - } - } - - if joy { - return true - } - } - - return false -} - -// Validate checks if a version satisfies a constraint. If not a slice of -// reasons for the failure are returned in addition to a bool. -func (cs Constraints) Validate(v *Version) (bool, []error) { - // loop over the ORs and check the inner ANDs - var e []error - - // Capture the prerelease message only once. When it happens the first time - // this var is marked - var prerelesase bool - for _, o := range cs.constraints { - joy := true - for _, c := range o { - // Before running the check handle the case there the version is - // a prerelease and the check is not searching for prereleases. - if c.con.pre == "" && v.pre != "" { - if !prerelesase { - em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - e = append(e, em) - prerelesase = true - } - joy = false - - } else { - - if !c.check(v) { - em := fmt.Errorf(c.msg, v, c.orig) - e = append(e, em) - joy = false - } - } - } - - if joy { - return true, []error{} - } - } - - return false, e -} - -var constraintOps map[string]cfunc -var constraintMsg map[string]string -var constraintRegex *regexp.Regexp - -func init() { - constraintOps = map[string]cfunc{ - "": constraintTildeOrEqual, - "=": constraintTildeOrEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "=>": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "=<": constraintLessThanEqual, - "~": constraintTilde, - "~>": constraintTilde, - "^": constraintCaret, - } - - constraintMsg = map[string]string{ - "": "%s is not equal to %s", - "=": "%s is not equal to %s", - "!=": "%s is equal to %s", - ">": "%s is less than or equal to %s", - "<": "%s is greater than or equal to %s", - ">=": "%s is less than %s", - "=>": "%s is less than %s", - "<=": "%s is greater than %s", - "=<": "%s is greater than %s", - "~": "%s does not have same major and minor version as %s", - "~>": "%s does not have same major and minor version as %s", - "^": "%s does not have same major version as %s", - } - - ops := make([]string, 0, len(constraintOps)) - for k := range constraintOps { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegex = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - cvRegex)) - - constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( - `\s*(%s)\s+-\s+(%s)\s*`, - cvRegex, cvRegex)) -} - -// An individual constraint -type constraint struct { - // The callback function for the restraint. It performs the logic for - // the constraint. - function cfunc - - msg string - - // The version used in the constraint check. For example, if a constraint - // is '<= 2.0.0' the con a version instance representing 2.0.0. - con *Version - - // The original parsed version (e.g., 4.x from != 4.x) - orig string - - // When an x is used as part of the version (e.g., 1.x) - minorDirty bool - dirty bool - patchDirty bool -} - -// Check if a version meets the constraint -func (c *constraint) check(v *Version) bool { - return c.function(v, c) -} - -type cfunc func(v *Version, c *constraint) bool - -func parseConstraint(c string) (*constraint, error) { - m := constraintRegex.FindStringSubmatch(c) - if m == nil { - return nil, fmt.Errorf("improper constraint: %s", c) - } - - ver := m[2] - orig := ver - minorDirty := false - patchDirty := false - dirty := false - if isX(m[3]) { - ver = "0.0.0" - dirty = true - } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { - minorDirty = true - dirty = true - ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) - } else if isX(strings.TrimPrefix(m[5], ".")) { - dirty = true - patchDirty = true - ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) - } - - con, err := NewVersion(ver) - if err != nil { - - // The constraintRegex should catch any regex parsing errors. So, - // we should never get here. - return nil, errors.New("constraint Parser Error") - } - - cs := &constraint{ - function: constraintOps[m[1]], - msg: constraintMsg[m[1]], - con: con, - orig: orig, - minorDirty: minorDirty, - patchDirty: patchDirty, - dirty: dirty, - } - return cs, nil -} - -// Constraint functions -func constraintNotEqual(v *Version, c *constraint) bool { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if c.con.Major() != v.Major() { - return true - } - if c.con.Minor() != v.Minor() && !c.minorDirty { - return true - } else if c.minorDirty { - return false - } - - return false - } - - return !v.Equal(c.con) -} - -func constraintGreaterThan(v *Version, c *constraint) bool { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - return v.Compare(c.con) == 1 -} - -func constraintLessThan(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if !c.dirty { - return v.Compare(c.con) < 0 - } - - if v.Major() > c.con.Major() { - return false - } else if v.Minor() > c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -func constraintGreaterThanEqual(v *Version, c *constraint) bool { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - return v.Compare(c.con) >= 0 -} - -func constraintLessThanEqual(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if !c.dirty { - return v.Compare(c.con) <= 0 - } - - if v.Major() > c.con.Major() { - return false - } else if v.Minor() > c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -// ~*, ~>* --> >= 0.0.0 (any) -// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 -// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 -// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 -// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 -// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if v.LessThan(c.con) { - return false - } - - // ~0.0.0 is a special case where all constraints are accepted. It's - // equivalent to >= 0.0.0. - if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && - !c.minorDirty && !c.patchDirty { - return true - } - - if v.Major() != c.con.Major() { - return false - } - - if v.Minor() != c.con.Minor() && !c.minorDirty { - return false - } - - return true -} - -// When there is a .x (dirty) status it automatically opts in to ~. Otherwise -// it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if c.dirty { - c.msg = constraintMsg["~"] - return constraintTilde(v, c) - } - - return v.Equal(c.con) -} - -// ^* --> (any) -// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 -// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 -// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 -// ^1.2.3 --> >=1.2.3, <2.0.0 -// ^1.2.0 --> >=1.2.0, <2.0.0 -func constraintCaret(v *Version, c *constraint) bool { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false - } - - if v.LessThan(c.con) { - return false - } - - if v.Major() != c.con.Major() { - return false - } - - return true -} - -var constraintRangeRegex *regexp.Regexp - -const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -func isX(x string) bool { - switch x { - case "x", "*", "X": - return true - default: - return false - } -} - -func rewriteRange(i string) string { - m := constraintRangeRegex.FindAllStringSubmatch(i, -1) - if m == nil { - return i - } - o := i - for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) - o = strings.Replace(o, v[0], t, 1) - } - - return o -} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go deleted file mode 100644 index 6a6c24c6d..000000000 --- a/vendor/github.com/Masterminds/semver/doc.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. - -Specifically it provides the ability to: - - * Parse semantic versions - * Sort semantic versions - * Check if a semantic version fits within a set of constraints - * Optionally work with a `v` prefix - -Parsing Semantic Versions - -To parse a semantic version use the `NewVersion` function. For example, - - v, err := semver.NewVersion("1.2.3-beta.1+build345") - -If there is an error the version wasn't parseable. The version object has methods -to get the parts of the version, compare it to other versions, convert the -version back into a string, and get the original string. For more details -please see the documentation at https://godoc.org/github.com/Masterminds/semver. - -Sorting Semantic Versions - -A set of versions can be sorted using the `sort` package from the standard library. -For example, - - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) - -Checking Version Constraints - -Checking a version against version constraints is one of the most featureful -parts of the package. - - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parseable. - } - - v, err := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) - -Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma separated and comparisons. These are then separated by || separated or -comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - - * `=`: equal (aliased to no operator) - * `!=`: not equal - * `>`: greater than - * `<`: less than - * `>=`: greater than or equal to - * `<=`: less than or equal to - -Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - - * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` - * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` - -Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the pack level comparison (see tilde below). For example, - - * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` - * `>= 1.2.x` is equivalent to `>= 1.2.0` - * `<= 2.x` is equivalent to `<= 3` - * `*` is equivalent to `>= 0.0.0` - -Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - - * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` - * `~1` is equivalent to `>= 1, < 2` - * `~2.3` is equivalent to `>= 2.3, < 2.4` - * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` - * `~1.x` is equivalent to `>= 1, < 2` - -Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes. This is useful -when comparisons of API versions as a major change is API breaking. For example, - - * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` - * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` - * `^2.3` is equivalent to `>= 2.3, < 3` - * `^2.x` is equivalent to `>= 2.0.0, < 3` -*/ -package semver diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 000000000..6b061e617 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 000000000..fbc633259 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,27 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - govet + - staticcheck + - errcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 000000000..f12626423 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,214 @@ +# Changelog + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt similarity index 100% rename from vendor/github.com/Masterminds/semver/LICENSE.txt rename to vendor/github.com/Masterminds/semver/v3/LICENSE.txt diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 000000000..0e7b5c713 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,30 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 000000000..eab8cac3b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,258 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version. + +There are three major versions fo the `semver` package. + +* 3.x.x is the stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://github.com/Masterminds/semver) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 000000000..a30a66b1f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go similarity index 100% rename from vendor/github.com/Masterminds/semver/collection.go rename to vendor/github.com/Masterminds/semver/v3/collection.go diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 000000000..8461c7ed9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,594 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 000000000..74f97caa5 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/v3/version.go similarity index 55% rename from vendor/github.com/Masterminds/semver/version.go rename to vendor/github.com/Masterminds/semver/v3/version.go index 400d4f934..7c4bed334 100644 --- a/vendor/github.com/Masterminds/semver/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -2,6 +2,7 @@ package semver import ( "bytes" + "database/sql/driver" "encoding/json" "errors" "fmt" @@ -13,13 +14,23 @@ import ( // The compiled version of the regex created at init() is cached here so it // only needs to be created once. var versionRegex *regexp.Regexp -var validPrereleaseRegex *regexp.Regexp var ( // ErrInvalidSemVer is returned a version is found to be invalid when // being parsed. ErrInvalidSemVer = errors.New("Invalid Semantic Version") + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + // ErrInvalidMetadata is returned when the metadata is an invalid format ErrInvalidMetadata = errors.New("Invalid Metadata string") @@ -27,30 +38,123 @@ var ( ErrInvalidPrerelease = errors.New("Invalid Prerelease string") ) -// SemVerRegex is the regular expression used to parse a semantic version. -const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` -// ValidPrerelease is the regular expression which validates -// both prerelease and metadata values. -const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` - // Version represents a single semantic version. type Version struct { - major, minor, patch int64 + major, minor, patch uint64 pre string metadata string original string } func init() { - versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") - validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil } // NewVersion parses a given version and returns an instance of Version or -// an error if unable to parse the version. +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). func NewVersion(v string) (*Version, error) { m := versionRegex.FindStringSubmatch(v) if m == nil { @@ -63,36 +167,65 @@ func NewVersion(v string) (*Version, error) { original: v, } - var temp int64 - temp, err := strconv.ParseInt(m[1], 10, 64) + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } - sv.major = temp if m[2] != "" { - temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } - sv.minor = temp } else { sv.minor = 0 } if m[3] != "" { - temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } - sv.patch = temp } else { sv.patch = 0 } + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + return sv, nil } +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + // MustParse parses a given version and panics on error. func MustParse(v string) *Version { sv, err := NewVersion(v) @@ -107,7 +240,7 @@ func MustParse(v string) *Version { // See the Original() method to retrieve the original value. Semantic Versions // don't contain a leading v per the spec. Instead it's optional on // implementation. -func (v *Version) String() string { +func (v Version) String() string { var buf bytes.Buffer fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) @@ -127,33 +260,32 @@ func (v *Version) Original() string { } // Major returns the major version. -func (v *Version) Major() int64 { +func (v Version) Major() uint64 { return v.major } // Minor returns the minor version. -func (v *Version) Minor() int64 { +func (v Version) Minor() uint64 { return v.minor } // Patch returns the patch version. -func (v *Version) Patch() int64 { +func (v Version) Patch() uint64 { return v.patch } // Prerelease returns the pre-release version. -func (v *Version) Prerelease() string { +func (v Version) Prerelease() string { return v.pre } // Metadata returns the metadata on the version. -func (v *Version) Metadata() string { +func (v Version) Metadata() string { return v.metadata } // originalVPrefix returns the original 'v' prefix if any. -func (v *Version) originalVPrefix() string { - +func (v Version) originalVPrefix() string { // Note, only lowercase v is supported as a prefix by the parser. if v.original != "" && v.original[:1] == "v" { return v.original[:1] @@ -165,7 +297,7 @@ func (v *Version) originalVPrefix() string { // If the current version does not have prerelease/metadata information, // it unsets metadata and prerelease values, increments patch number. // If the current version has any of prerelease or metadata information, -// it unsets both values and keeps curent patch value +// it unsets both values and keeps current patch value func (v Version) IncPatch() Version { vNext := v // according to http://semver.org/#spec-item-9 @@ -217,11 +349,13 @@ func (v Version) IncMajor() Version { } // SetPrerelease defines the prerelease value. -// Value must not include the required 'hypen' prefix. +// Value must not include the required 'hyphen' prefix. func (v Version) SetPrerelease(prerelease string) (Version, error) { vNext := v - if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { - return vNext, ErrInvalidPrerelease + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } } vNext.pre = prerelease vNext.original = v.originalVPrefix() + "" + vNext.String() @@ -232,8 +366,10 @@ func (v Version) SetPrerelease(prerelease string) (Version, error) { // Value must not include the required 'plus' prefix. func (v Version) SetMetadata(metadata string) (Version, error) { vNext := v - if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { - return vNext, ErrInvalidMetadata + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } } vNext.metadata = metadata vNext.original = v.originalVPrefix() + "" + vNext.String() @@ -261,7 +397,9 @@ func (v *Version) Equal(o *Version) bool { // the version smaller, equal, or larger than the other version. // // Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is -// lower than the version without a prerelease. +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. func (v *Version) Compare(o *Version) int { // Compare the major, minor, and patch version for differences. If a // difference is found return the comparison. @@ -308,16 +446,54 @@ func (v *Version) UnmarshalJSON(b []byte) error { v.pre = temp.pre v.metadata = temp.metadata v.original = temp.original - temp = nil return nil } // MarshalJSON implements JSON.Marshaler interface. -func (v *Version) MarshalJSON() ([]byte, error) { +func (v Version) MarshalJSON() ([]byte, error) { return json.Marshal(v.String()) } -func compareSegment(v, o int64) int { +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { if v < o { return -1 } @@ -329,7 +505,6 @@ func compareSegment(v, o int64) int { } func comparePrerelease(v, o string) int { - // split the prelease versions by their part. The separator, per the spec, // is a . sparts := strings.Split(v, ".") @@ -421,5 +596,44 @@ func comparePrePart(s, o string) int { return 1 } return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil } diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go deleted file mode 100644 index b42bcd62b..000000000 --- a/vendor/github.com/Masterminds/semver/version_fuzz.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build gofuzz - -package semver - -func Fuzz(data []byte) int { - if _, err := NewVersion(string(data)); err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/Masterminds/sprig/.travis.yml b/vendor/github.com/Masterminds/sprig/.travis.yml deleted file mode 100644 index b9da8b825..000000000 --- a/vendor/github.com/Masterminds/sprig/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -language: go - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -# Setting sudo access to false will let Travis CI use containers rather than -# VMs to run the tests. For more details see: -# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ -# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ -sudo: false - -script: - - make setup test - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/sprig/Makefile b/vendor/github.com/Masterminds/sprig/Makefile deleted file mode 100644 index 63a93fdf7..000000000 --- a/vendor/github.com/Masterminds/sprig/Makefile +++ /dev/null @@ -1,13 +0,0 @@ - -HAS_GLIDE := $(shell command -v glide;) - -.PHONY: test -test: - go test -v . - -.PHONY: setup -setup: -ifndef HAS_GLIDE - go get -u github.com/Masterminds/glide -endif - glide install diff --git a/vendor/github.com/Masterminds/sprig/appveyor.yml b/vendor/github.com/Masterminds/sprig/appveyor.yml deleted file mode 100644 index d545a987a..000000000 --- a/vendor/github.com/Masterminds/sprig/appveyor.yml +++ /dev/null @@ -1,26 +0,0 @@ - -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\sprig -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go get -u github.com/Masterminds/glide - - set PATH=%GOPATH%\bin;%PATH% - - go version - - go env - -build_script: - - glide install - - go install ./... - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/sprig/glide.yaml b/vendor/github.com/Masterminds/sprig/glide.yaml deleted file mode 100644 index f317d2b2b..000000000 --- a/vendor/github.com/Masterminds/sprig/glide.yaml +++ /dev/null @@ -1,19 +0,0 @@ -package: github.com/Masterminds/sprig -import: -- package: github.com/Masterminds/goutils - version: ^1.0.0 -- package: github.com/google/uuid - version: ^1.0.0 -- package: golang.org/x/crypto - subpackages: - - scrypt -- package: github.com/Masterminds/semver - version: ^v1.2.2 -- package: github.com/stretchr/testify - version: ^v1.2.2 -- package: github.com/imdario/mergo - version: ~0.3.7 -- package: github.com/huandu/xstrings - version: ^1.2 -- package: github.com/mitchellh/copystructure - version: ^1.0.0 diff --git a/vendor/github.com/Masterminds/sprig/numeric.go b/vendor/github.com/Masterminds/sprig/numeric.go deleted file mode 100644 index f4af4af2a..000000000 --- a/vendor/github.com/Masterminds/sprig/numeric.go +++ /dev/null @@ -1,169 +0,0 @@ -package sprig - -import ( - "fmt" - "math" - "reflect" - "strconv" -) - -// toFloat64 converts 64-bit floats -func toFloat64(v interface{}) float64 { - if str, ok := v.(string); ok { - iv, err := strconv.ParseFloat(str, 64) - if err != nil { - return 0 - } - return iv - } - - val := reflect.Indirect(reflect.ValueOf(v)) - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return float64(val.Int()) - case reflect.Uint8, reflect.Uint16, reflect.Uint32: - return float64(val.Uint()) - case reflect.Uint, reflect.Uint64: - return float64(val.Uint()) - case reflect.Float32, reflect.Float64: - return val.Float() - case reflect.Bool: - if val.Bool() == true { - return 1 - } - return 0 - default: - return 0 - } -} - -func toInt(v interface{}) int { - //It's not optimal. Bud I don't want duplicate toInt64 code. - return int(toInt64(v)) -} - -// toInt64 converts integer types to 64-bit integers -func toInt64(v interface{}) int64 { - if str, ok := v.(string); ok { - iv, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return 0 - } - return iv - } - - val := reflect.Indirect(reflect.ValueOf(v)) - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return val.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32: - return int64(val.Uint()) - case reflect.Uint, reflect.Uint64: - tv := val.Uint() - if tv <= math.MaxInt64 { - return int64(tv) - } - // TODO: What is the sensible thing to do here? - return math.MaxInt64 - case reflect.Float32, reflect.Float64: - return int64(val.Float()) - case reflect.Bool: - if val.Bool() == true { - return 1 - } - return 0 - default: - return 0 - } -} - -func max(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb > aa { - aa = bb - } - } - return aa -} - -func min(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb < aa { - aa = bb - } - } - return aa -} - -func until(count int) []int { - step := 1 - if count < 0 { - step = -1 - } - return untilStep(0, count, step) -} - -func untilStep(start, stop, step int) []int { - v := []int{} - - if stop < start { - if step >= 0 { - return v - } - for i := start; i > stop; i += step { - v = append(v, i) - } - return v - } - - if step <= 0 { - return v - } - for i := start; i < stop; i += step { - v = append(v, i) - } - return v -} - -func floor(a interface{}) float64 { - aa := toFloat64(a) - return math.Floor(aa) -} - -func ceil(a interface{}) float64 { - aa := toFloat64(a) - return math.Ceil(aa) -} - -func round(a interface{}, p int, r_opt ...float64) float64 { - roundOn := .5 - if len(r_opt) > 0 { - roundOn = r_opt[0] - } - val := toFloat64(a) - places := toFloat64(p) - - var round float64 - pow := math.Pow(10, places) - digit := pow * val - _, div := math.Modf(digit) - if div >= roundOn { - round = math.Ceil(digit) - } else { - round = math.Floor(digit) - } - return round / pow -} - -// converts unix octal to decimal -func toDecimal(v interface{}) int64 { - result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) - if err != nil { - return 0 - } - return result -} diff --git a/vendor/github.com/Masterminds/sprig/regex.go b/vendor/github.com/Masterminds/sprig/regex.go deleted file mode 100644 index 2016f6633..000000000 --- a/vendor/github.com/Masterminds/sprig/regex.go +++ /dev/null @@ -1,35 +0,0 @@ -package sprig - -import ( - "regexp" -) - -func regexMatch(regex string, s string) bool { - match, _ := regexp.MatchString(regex, s) - return match -} - -func regexFindAll(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.FindAllString(s, n) -} - -func regexFind(regex string, s string) string { - r := regexp.MustCompile(regex) - return r.FindString(s) -} - -func regexReplaceAll(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllString(s, repl) -} - -func regexReplaceAllLiteral(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllLiteralString(s, repl) -} - -func regexSplit(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.Split(s, n) -} diff --git a/vendor/github.com/Masterminds/sprig/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/Masterminds/sprig/.gitignore rename to vendor/github.com/Masterminds/sprig/v3/.gitignore diff --git a/vendor/github.com/Masterminds/sprig/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md similarity index 74% rename from vendor/github.com/Masterminds/sprig/CHANGELOG.md rename to vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md index 6a79fbde4..2ce45dd4e 100644 --- a/vendor/github.com/Masterminds/sprig/CHANGELOG.md +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -1,5 +1,106 @@ # Changelog +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + ## Release 2.22.0 (2019-10-02) ### Added diff --git a/vendor/github.com/Masterminds/sprig/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt similarity index 96% rename from vendor/github.com/Masterminds/sprig/LICENSE.txt rename to vendor/github.com/Masterminds/sprig/v3/LICENSE.txt index 5c95accc2..f311b1eaa 100644 --- a/vendor/github.com/Masterminds/sprig/LICENSE.txt +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -1,5 +1,4 @@ -Sprig -Copyright (C) 2013 Masterminds +Copyright (C) 2013-2020 Masterminds Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 000000000..78d409cde --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md similarity index 65% rename from vendor/github.com/Masterminds/sprig/README.md rename to vendor/github.com/Masterminds/sprig/v3/README.md index b70569585..3e22c60e1 100644 --- a/vendor/github.com/Masterminds/sprig/README.md +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -1,6 +1,9 @@ # Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) [![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) -[![Build Status](https://travis-ci.org/Masterminds/sprig.svg?branch=master)](https://travis-ci.org/Masterminds/sprig) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) The Go language comes with a [built-in template language](http://golang.org/pkg/text/template/), but not @@ -11,6 +14,25 @@ It is inspired by the template functions found in [Twig](http://twig.sensiolabs.org/documentation) and in various JavaScript libraries, such as [underscore.js](http://underscorejs.org/). +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release, there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.10 or later of that package. +Using v0.3.9 will cause sprig tests to fail. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + ## Usage **Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for @@ -28,7 +50,7 @@ To load the Sprig `FuncMap`: ```go import ( - "github.com/Masterminds/sprig" + "github.com/Masterminds/sprig/v3" "html/template" ) diff --git a/vendor/github.com/Masterminds/sprig/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go similarity index 65% rename from vendor/github.com/Masterminds/sprig/crypto.go rename to vendor/github.com/Masterminds/sprig/v3/crypto.go index 7a418ba88..13a5cd559 100644 --- a/vendor/github.com/Masterminds/sprig/crypto.go +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -2,10 +2,12 @@ package sprig import ( "bytes" + "crypto" "crypto/aes" "crypto/cipher" "crypto/dsa" "crypto/ecdsa" + "crypto/ed25519" "crypto/elliptic" "crypto/hmac" "crypto/rand" @@ -21,13 +23,16 @@ import ( "encoding/pem" "errors" "fmt" - "io" "hash/adler32" + "io" "math/big" "net" "time" + "strings" + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" "golang.org/x/crypto/scrypt" ) @@ -46,14 +51,38 @@ func adler32sum(input string) string { return fmt.Sprintf("%d", hash) } +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + // uuidv4 provides a safe and secure UUID v4 implementation func uuidv4() string { - return fmt.Sprintf("%s", uuid.New()) + return uuid.New().String() } -var master_password_seed = "com.lyndir.masterpassword" +var masterPasswordSeed = "com.lyndir.masterpassword" -var password_type_templates = map[string][][]byte{ +var passwordTypeTemplates = map[string][][]byte{ "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), @@ -66,7 +95,7 @@ var password_type_templates = map[string][][]byte{ "pin": {[]byte("nnnn")}, } -var template_characters = map[byte]string{ +var templateCharacters = map[byte]string{ 'V': "AEIOU", 'C': "BCDFGHJKLMNPQRSTVWXYZ", 'v': "aeiou", @@ -78,14 +107,14 @@ var template_characters = map[byte]string{ 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", } -func derivePassword(counter uint32, password_type, password, user, site string) string { - var templates = password_type_templates[password_type] +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] if templates == nil { - return fmt.Sprintf("cannot find password template %s", password_type) + return fmt.Sprintf("cannot find password template %s", passwordType) } var buffer bytes.Buffer - buffer.WriteString(master_password_seed) + buffer.WriteString(masterPasswordSeed) binary.Write(&buffer, binary.BigEndian, uint32(len(user))) buffer.WriteString(user) @@ -95,7 +124,7 @@ func derivePassword(counter uint32, password_type, password, user, site string) return fmt.Sprintf("failed to derive password: %s", err) } - buffer.Truncate(len(master_password_seed)) + buffer.Truncate(len(masterPasswordSeed)) binary.Write(&buffer, binary.BigEndian, uint32(len(site))) buffer.WriteString(site) binary.Write(&buffer, binary.BigEndian, counter) @@ -107,9 +136,9 @@ func derivePassword(counter uint32, password_type, password, user, site string) buffer.Truncate(0) for i, element := range temp { - pass_chars := template_characters[element] - pass_char := pass_chars[int(seed[i+1])%len(pass_chars)] - buffer.WriteByte(pass_char) + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) } return buffer.String() @@ -133,6 +162,8 @@ func generatePrivateKey(typ string) string { case "ecdsa": // again, good enough for government work priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) default: return "Unknown type " + typ } @@ -143,6 +174,8 @@ func generatePrivateKey(typ string) string { return string(pem.EncodeToMemory(pemBlockForKey(priv))) } +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey type DSAKeyFormat struct { Version int P, Q, G, Y, X *big.Int @@ -163,7 +196,73 @@ func pemBlockForKey(priv interface{}) *pem.Block { b, _ := x509.MarshalECPrivateKey(k) return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} default: - return nil + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) } } @@ -197,14 +296,10 @@ func buildCustomCertificate(b64cert string, b64key string) (certificate, error) ) } - decodedKey, _ := pem.Decode(key) - if decodedKey == nil { - return crt, errors.New("unable to decode key") - } - _, err = x509.ParsePKCS1PrivateKey(decodedKey.Bytes) + _, err = parsePrivateKeyPEM(string(key)) if err != nil { return crt, fmt.Errorf( - "error parsing prive key: decodedKey.Bytes: %s", + "error parsing private key: %s", err, ) } @@ -218,6 +313,31 @@ func buildCustomCertificate(b64cert string, b64key string) (certificate, error) func generateCertificateAuthority( cn string, daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, ) (certificate, error) { ca := certificate{} @@ -231,24 +351,44 @@ func generateCertificateAuthority( x509.KeyUsageCertSign template.IsCA = true + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { priv, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { - return ca, fmt.Errorf("error generating rsa key: %s", err) + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} - ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) if err != nil { - return ca, err + return certificate{}, fmt.Errorf("parsing private key: %s", err) } - - return ca, nil + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) } -func generateSelfSignedCertificate( +func generateSelfSignedCertificateWithKeyInternal( cn string, ips []interface{}, alternateDNS []interface{}, daysValid int, + priv crypto.PrivateKey, ) (certificate, error) { cert := certificate{} @@ -257,25 +397,47 @@ func generateSelfSignedCertificate( return cert, err } + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { priv, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { - return cert, fmt.Errorf("error generating rsa key: %s", err) + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} - cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) if err != nil { - return cert, err + return certificate{}, fmt.Errorf("parsing private key: %s", err) } - - return cert, nil + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) } -func generateSignedCertificate( +func generateSignedCertificateWithKeyInternal( cn string, ips []interface{}, alternateDNS []interface{}, daysValid int, ca certificate, + priv crypto.PrivateKey, ) (certificate, error) { cert := certificate{} @@ -290,14 +452,10 @@ func generateSignedCertificate( err, ) } - decodedSignerKey, _ := pem.Decode([]byte(ca.Key)) - if decodedSignerKey == nil { - return cert, errors.New("unable to decode key") - } - signerKey, err := x509.ParsePKCS1PrivateKey(decodedSignerKey.Bytes) + signerKey, err := parsePrivateKeyPEM(ca.Key) if err != nil { return cert, fmt.Errorf( - "error parsing prive key: decodedSignerKey.Bytes: %s", + "error parsing private key: %s", err, ) } @@ -307,35 +465,31 @@ func generateSignedCertificate( return cert, err } - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return cert, fmt.Errorf("error generating rsa key: %s", err) - } - cert.Cert, cert.Key, err = getCertAndKey( template, priv, signerCert, signerKey, ) - if err != nil { - return cert, err - } - return cert, nil + return cert, err } func getCertAndKey( template *x509.Certificate, - signeeKey *rsa.PrivateKey, + signeeKey crypto.PrivateKey, parent *x509.Certificate, - signingKey *rsa.PrivateKey, + signingKey crypto.PrivateKey, ) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } derBytes, err := x509.CreateCertificate( rand.Reader, template, parent, - &signeeKey.PublicKey, + signeePubKey, signingKey, ) if err != nil { @@ -353,15 +507,12 @@ func getCertAndKey( keyBuffer := bytes.Buffer{} if err := pem.Encode( &keyBuffer, - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(signeeKey), - }, + pemBlockForKey(signeeKey), ); err != nil { return "", "", fmt.Errorf("error pem-encoding key: %s", err) } - return string(certBuffer.Bytes()), string(keyBuffer.Bytes()), nil + return certBuffer.String(), keyBuffer.String(), nil } func getBaseCertTemplate( diff --git a/vendor/github.com/Masterminds/sprig/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go similarity index 52% rename from vendor/github.com/Masterminds/sprig/date.go rename to vendor/github.com/Masterminds/sprig/v3/date.go index d1d6155d7..ed022ddac 100644 --- a/vendor/github.com/Masterminds/sprig/date.go +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -55,6 +55,14 @@ func dateModify(fmt string, date time.Time) time.Time { return date.Add(d) } +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + func dateAgo(date interface{}) string { var t time.Time @@ -73,11 +81,72 @@ func dateAgo(date interface{}) string { return duration.String() } +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + func toDate(fmt, str string) time.Time { t, _ := time.ParseInLocation(fmt, str, time.Local) return t } +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + func unixEpoch(date time.Time) string { return strconv.FormatInt(date.Unix(), 10) } diff --git a/vendor/github.com/Masterminds/sprig/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go similarity index 53% rename from vendor/github.com/Masterminds/sprig/defaults.go rename to vendor/github.com/Masterminds/sprig/v3/defaults.go index ed6a8ab29..b9f979666 100644 --- a/vendor/github.com/Masterminds/sprig/defaults.go +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -1,10 +1,18 @@ package sprig import ( + "bytes" "encoding/json" + "math/rand" "reflect" + "strings" + "time" ) +func init() { + rand.Seed(time.Now().UnixNano()) +} + // dfault checks whether `given` is set, and returns default if not set. // // This returns `d` if `given` appears not to be set, and `given` otherwise. @@ -37,7 +45,7 @@ func empty(given interface{}) bool { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return g.Len() == 0 case reflect.Bool: - return g.Bool() == false + return !g.Bool() case reflect.Complex64, reflect.Complex128: return g.Complex() == 0 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -61,18 +69,90 @@ func coalesce(v ...interface{}) interface{} { return nil } +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + // toJson encodes an item into a JSON string func toJson(v interface{}) string { output, _ := json.Marshal(v) return string(output) } +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + // toPrettyJson encodes an item into a pretty (indented) JSON string func toPrettyJson(v interface{}) string { output, _ := json.MarshalIndent(v, "", " ") return string(output) } +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + // ternary returns the first value if the last value is true, otherwise returns the second value. func ternary(vt interface{}, vf interface{}, v bool) interface{} { if v { diff --git a/vendor/github.com/Masterminds/sprig/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go similarity index 63% rename from vendor/github.com/Masterminds/sprig/dict.go rename to vendor/github.com/Masterminds/sprig/v3/dict.go index 738405b43..ade889698 100644 --- a/vendor/github.com/Masterminds/sprig/dict.go +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -5,6 +5,13 @@ import ( "github.com/mitchellh/copystructure" ) +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { d[key] = value return d @@ -90,6 +97,15 @@ func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface return dst } +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { for _, src := range srcs { if err := mergo.MergeWithOverwrite(&dst, src); err != nil { @@ -100,6 +116,15 @@ func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) return dst } +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + func values(dict map[string]interface{}) []interface{} { values := []interface{}{} for _, value := range dict { @@ -110,10 +135,40 @@ func values(dict map[string]interface{}) []interface{} { } func deepCopy(i interface{}) interface{} { - c, err := copystructure.Copy(i) + c, err := mustDeepCopy(i) if err != nil { panic("deepCopy error: " + err.Error()) } return c } + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/Masterminds/sprig/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go similarity index 92% rename from vendor/github.com/Masterminds/sprig/doc.go rename to vendor/github.com/Masterminds/sprig/v3/doc.go index 8f8f1d737..aabb9d448 100644 --- a/vendor/github.com/Masterminds/sprig/doc.go +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -1,5 +1,5 @@ /* -Sprig: Template functions for Go. +Package sprig provides template functions for Go. This package contains a number of utility functions for working with data inside of Go `html/template` and `text/template` files. diff --git a/vendor/github.com/Masterminds/sprig/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go similarity index 58% rename from vendor/github.com/Masterminds/sprig/functions.go rename to vendor/github.com/Masterminds/sprig/v3/functions.go index 7b5b0af86..57fcec1d9 100644 --- a/vendor/github.com/Masterminds/sprig/functions.go +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -3,8 +3,10 @@ package sprig import ( "errors" "html/template" + "math/rand" "os" "path" + "path/filepath" "reflect" "strconv" "strings" @@ -13,9 +15,10 @@ import ( util "github.com/Masterminds/goutils" "github.com/huandu/xstrings" + "github.com/shopspring/decimal" ) -// Produce the function map. +// FuncMap produces the function map. // // Use this to pass the functions into the template engine: // @@ -63,7 +66,7 @@ func GenericFuncMap() map[string]interface{} { } // These functions are not guaranteed to evaluate to the same result for given input, because they -// refer to the environemnt or global state. +// refer to the environment or global state. var nonhermeticFunctions = []string{ // Date functions "date", @@ -80,6 +83,7 @@ var nonhermeticFunctions = []string{ "randAlpha", "randAscii", "randNumeric", + "randBytes", "uuidv4", // OS @@ -94,17 +98,22 @@ var genericMap = map[string]interface{}{ "hello": func() string { return "Hello!" }, // Date functions - "date": date, - "date_in_zone": dateInZone, - "date_modify": dateModify, - "now": func() time.Time { return time.Now() }, - "htmlDate": htmlDate, - "htmlDateInZone": htmlDateInZone, - "dateInZone": dateInZone, - "dateModify": dateModify, - "ago": dateAgo, - "toDate": toDate, - "unixEpoch": unixEpoch, + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, // Strings "abbrev": abbrev, @@ -158,6 +167,7 @@ var genericMap = map[string]interface{}{ "int64": toInt64, "int": toInt, "float64": toFloat64, + "seq": seq, "toDecimal": toDecimal, //"gt": func(a, b int) bool {return a > b}, @@ -194,9 +204,28 @@ var genericMap = map[string]interface{}{ } return val }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, "biggest": max, "max": max, "min": min, + "maxf": maxf, + "minf": minf, "ceil": ceil, "floor": floor, "round": round, @@ -207,14 +236,24 @@ var genericMap = map[string]interface{}{ "sortAlpha": sortAlpha, // Defaults - "default": dfault, - "empty": empty, - "coalesce": coalesce, - "compact": compact, - "deepCopy": deepCopy, - "toJson": toJson, - "toPrettyJson": toPrettyJson, - "ternary": ternary, + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, // Reflection "typeOf": typeOf, @@ -225,19 +264,26 @@ var genericMap = map[string]interface{}{ "deepEqual": reflect.DeepEqual, // OS: - "env": func(s string) string { return os.Getenv(s) }, - "expandenv": func(s string) string { return os.ExpandEnv(s) }, + "env": os.Getenv, + "expandenv": os.ExpandEnv, // Network: "getHostByName": getHostByName, - // File Paths: + // Paths: "base": path.Base, "dir": path.Dir, "clean": path.Clean, "ext": path.Ext, "isAbs": path.IsAbs, + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + // Encoding: "b64enc": base64encode, "b64dec": base64decode, @@ -245,42 +291,65 @@ var genericMap = map[string]interface{}{ "b32dec": base32decode, // Data Structures: - "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. - "list": list, - "dict": dict, - "set": set, - "unset": unset, - "hasKey": hasKey, - "pluck": pluck, - "keys": keys, - "pick": pick, - "omit": omit, - "merge": merge, - "mergeOverwrite": mergeOverwrite, - "values": values, + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, "append": push, "push": push, - "prepend": prepend, - "first": first, - "rest": rest, - "last": last, - "initial": initial, - "reverse": reverse, - "uniq": uniq, - "without": without, - "has": has, - "slice": slice, - "concat": concat, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, "genPrivateKey": generatePrivateKey, "derivePassword": derivePassword, "buildCustomCert": buildCustomCertificate, "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, "encryptAES": encryptAES, "decryptAES": decryptAES, + "randBytes": randBytes, // UUIDs: "uuidv4": uuidv4, @@ -293,12 +362,19 @@ var genericMap = map[string]interface{}{ "fail": func(msg string) (string, error) { return "", errors.New(msg) }, // Regex - "regexMatch": regexMatch, - "regexFindAll": regexFindAll, - "regexFind": regexFind, - "regexReplaceAll": regexReplaceAll, - "regexReplaceAllLiteral": regexReplaceAllLiteral, - "regexSplit": regexSplit, + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, // URLs: "urlParse": urlParse, diff --git a/vendor/github.com/Masterminds/sprig/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go similarity index 58% rename from vendor/github.com/Masterminds/sprig/list.go rename to vendor/github.com/Masterminds/sprig/v3/list.go index c0381bbb6..ca0fbb789 100644 --- a/vendor/github.com/Masterminds/sprig/list.go +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -2,6 +2,7 @@ package sprig import ( "fmt" + "math" "reflect" "sort" ) @@ -15,6 +16,15 @@ func list(v ...interface{}) []interface{} { } func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -26,14 +36,23 @@ func push(list interface{}, v interface{}) []interface{} { nl[i] = l2.Index(i).Interface() } - return append(nl, v) + return append(nl, v), nil default: - panic(fmt.Sprintf("Cannot push on type %s", tp)) + return nil, fmt.Errorf("Cannot push on type %s", tp) } } func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { //return append([]interface{}{v}, list...) tp := reflect.TypeOf(list).Kind() @@ -47,14 +66,67 @@ func prepend(list interface{}, v interface{}) []interface{} { nl[i] = l2.Index(i).Interface() } - return append([]interface{}{v}, nl...) + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil default: - panic(fmt.Sprintf("Cannot prepend on type %s", tp)) + return nil, fmt.Errorf("Cannot chunk type %s", tp) } } func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -62,16 +134,25 @@ func last(list interface{}) interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } - return l2.Index(l - 1).Interface() + return l2.Index(l - 1).Interface(), nil default: - panic(fmt.Sprintf("Cannot find last on type %s", tp)) + return nil, fmt.Errorf("Cannot find last on type %s", tp) } } func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -79,16 +160,25 @@ func first(list interface{}) interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } - return l2.Index(0).Interface() + return l2.Index(0).Interface(), nil default: - panic(fmt.Sprintf("Cannot find first on type %s", tp)) + return nil, fmt.Errorf("Cannot find first on type %s", tp) } } func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -96,7 +186,7 @@ func rest(list interface{}) []interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } nl := make([]interface{}, l-1) @@ -104,13 +194,22 @@ func rest(list interface{}) []interface{} { nl[i-1] = l2.Index(i).Interface() } - return nl + return nl, nil default: - panic(fmt.Sprintf("Cannot find rest on type %s", tp)) + return nil, fmt.Errorf("Cannot find rest on type %s", tp) } } func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -118,7 +217,7 @@ func initial(list interface{}) []interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } nl := make([]interface{}, l-1) @@ -126,9 +225,9 @@ func initial(list interface{}) []interface{} { nl[i] = l2.Index(i).Interface() } - return nl + return nl, nil default: - panic(fmt.Sprintf("Cannot find initial on type %s", tp)) + return nil, fmt.Errorf("Cannot find initial on type %s", tp) } } @@ -145,6 +244,15 @@ func sortAlpha(list interface{}) []string { } func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { tp := reflect.TypeOf(v).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -157,13 +265,22 @@ func reverse(v interface{}) []interface{} { nl[l-i-1] = l2.Index(i).Interface() } - return nl + return nl, nil default: - panic(fmt.Sprintf("Cannot find reverse on type %s", tp)) + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) } } func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -179,13 +296,22 @@ func compact(list interface{}) []interface{} { } } - return nl + return nl, nil default: - panic(fmt.Sprintf("Cannot compact on type %s", tp)) + return nil, fmt.Errorf("Cannot compact on type %s", tp) } } func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -201,9 +327,9 @@ func uniq(list interface{}) []interface{} { } } - return dest + return dest, nil default: - panic(fmt.Sprintf("Cannot find uniq on type %s", tp)) + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) } } @@ -217,6 +343,15 @@ func inList(haystack []interface{}, needle interface{}) bool { } func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -232,15 +367,24 @@ func without(list interface{}, omit ...interface{}) []interface{} { } } - return res + return res, nil default: - panic(fmt.Sprintf("Cannot find without on type %s", tp)) + return nil, fmt.Errorf("Cannot find without on type %s", tp) } } func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { if haystack == nil { - return false + return false, nil } tp := reflect.TypeOf(haystack).Kind() switch tp { @@ -251,13 +395,13 @@ func has(needle interface{}, haystack interface{}) bool { for i := 0; i < l; i++ { item = l2.Index(i).Interface() if reflect.DeepEqual(needle, item) { - return true + return true, nil } } - return false + return false, nil default: - panic(fmt.Sprintf("Cannot find has on type %s", tp)) + return false, fmt.Errorf("Cannot find has on type %s", tp) } } @@ -267,6 +411,15 @@ func has(needle interface{}, haystack interface{}) bool { // slice $list 3 5 -> list[3:5] // slice $list 3 -> list[3:5] = list[3:] func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { tp := reflect.TypeOf(list).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -274,7 +427,7 @@ func slice(list interface{}, indices ...interface{}) interface{} { l := l2.Len() if l == 0 { - return nil + return nil, nil } var start, end int @@ -287,9 +440,9 @@ func slice(list interface{}, indices ...interface{}) interface{} { end = toInt(indices[1]) } - return l2.Slice(start, end).Interface() + return l2.Slice(start, end).Interface(), nil default: - panic(fmt.Sprintf("list should be type of slice or array but %s", tp)) + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) } } diff --git a/vendor/github.com/Masterminds/sprig/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go similarity index 75% rename from vendor/github.com/Masterminds/sprig/network.go rename to vendor/github.com/Masterminds/sprig/v3/network.go index d786cc736..108d78a94 100644 --- a/vendor/github.com/Masterminds/sprig/network.go +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -7,6 +7,6 @@ import ( func getHostByName(name string) string { addrs, _ := net.LookupHost(name) - //TODO: add error handing when release v3 cames out + //TODO: add error handing when release v3 comes out return addrs[rand.Intn(len(addrs))] } diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 000000000..f68e4182e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/vendor/github.com/Masterminds/sprig/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/Masterminds/sprig/reflect.go rename to vendor/github.com/Masterminds/sprig/v3/reflect.go diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 000000000..fab551018 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/Masterminds/sprig/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go similarity index 90% rename from vendor/github.com/Masterminds/sprig/semver.go rename to vendor/github.com/Masterminds/sprig/v3/semver.go index c2bf8a1fd..3fbe08aa6 100644 --- a/vendor/github.com/Masterminds/sprig/semver.go +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -1,7 +1,7 @@ package sprig import ( - sv2 "github.com/Masterminds/semver" + sv2 "github.com/Masterminds/semver/v3" ) func semverCompare(constraint, version string) (bool, error) { diff --git a/vendor/github.com/Masterminds/sprig/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go similarity index 97% rename from vendor/github.com/Masterminds/sprig/strings.go rename to vendor/github.com/Masterminds/sprig/v3/strings.go index 943fa3e8a..e0ae628c8 100644 --- a/vendor/github.com/Masterminds/sprig/strings.go +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -154,9 +154,9 @@ func strslice(v interface{}) []string { default: if v == nil { return []string{} - } else { - return []string{strval(v)} } + + return []string{strval(v)} } } } @@ -187,10 +187,13 @@ func strval(v interface{}) string { } func trunc(c int, s string) string { - if len(s) <= c { - return s + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] } - return s[0:c] + return s } func join(sep string, v interface{}) string { diff --git a/vendor/github.com/Masterminds/sprig/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go similarity index 64% rename from vendor/github.com/Masterminds/sprig/url.go rename to vendor/github.com/Masterminds/sprig/v3/url.go index 5f22d801f..b8e120e19 100644 --- a/vendor/github.com/Masterminds/sprig/url.go +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -7,7 +7,8 @@ import ( ) func dictGetOrEmpty(dict map[string]interface{}, key string) string { - value, ok := dict[key]; if !ok { + value, ok := dict[key] + if !ok { return "" } tp := reflect.TypeOf(value).Kind() @@ -20,19 +21,19 @@ func dictGetOrEmpty(dict map[string]interface{}, key string) string { // parses given URL to return dict object func urlParse(v string) map[string]interface{} { dict := map[string]interface{}{} - parsedUrl, err := url.Parse(v) + parsedURL, err := url.Parse(v) if err != nil { panic(fmt.Sprintf("unable to parse url: %s", err)) } - dict["scheme"] = parsedUrl.Scheme - dict["host"] = parsedUrl.Host - dict["hostname"] = parsedUrl.Hostname() - dict["path"] = parsedUrl.Path - dict["query"] = parsedUrl.RawQuery - dict["opaque"] = parsedUrl.Opaque - dict["fragment"] = parsedUrl.Fragment - if parsedUrl.User != nil { - dict["userinfo"] = parsedUrl.User.String() + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() } else { dict["userinfo"] = "" } @@ -42,25 +43,24 @@ func urlParse(v string) map[string]interface{} { // join given dict to URL string func urlJoin(d map[string]interface{}) string { - resUrl := url.URL{ + resURL := url.URL{ Scheme: dictGetOrEmpty(d, "scheme"), Host: dictGetOrEmpty(d, "host"), Path: dictGetOrEmpty(d, "path"), RawQuery: dictGetOrEmpty(d, "query"), Opaque: dictGetOrEmpty(d, "opaque"), Fragment: dictGetOrEmpty(d, "fragment"), - } userinfo := dictGetOrEmpty(d, "userinfo") - var user *url.Userinfo = nil + var user *url.Userinfo if userinfo != "" { - tempUrl, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) if err != nil { panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) } - user = tempUrl.User + user = tempURL.User } - resUrl.User = user - return resUrl.String() + resURL.User = user + return resURL.String() } diff --git a/vendor/github.com/Masterminds/squirrel/.gitignore b/vendor/github.com/Masterminds/squirrel/.gitignore new file mode 100644 index 000000000..4a0699f0b --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.gitignore @@ -0,0 +1 @@ +squirrel.test \ No newline at end of file diff --git a/vendor/github.com/Masterminds/squirrel/.travis.yml b/vendor/github.com/Masterminds/squirrel/.travis.yml new file mode 100644 index 000000000..7bb6da487 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.travis.yml @@ -0,0 +1,30 @@ +language: go + +go: + - 1.11.x + - 1.12.x + - 1.13.x + +services: + - mysql + - postgresql + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +before_script: + - mysql -e 'CREATE DATABASE squirrel;' + - psql -c 'CREATE DATABASE squirrel;' -U postgres + +script: + - go test + - cd integration + - go test -args -driver sqlite3 + - go test -args -driver mysql -dataSource travis@/squirrel + - go test -args -driver postgres -dataSource 'postgres://postgres@localhost/squirrel?sslmode=disable' + +notifications: + irc: "irc.freenode.net#masterminds" diff --git a/vendor/github.com/Masterminds/squirrel/LICENSE b/vendor/github.com/Masterminds/squirrel/LICENSE new file mode 100644 index 000000000..b459007fd --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Squirrel: The Masterminds +Copyright (c) 2014-2015, Lann Martin. Copyright (C) 2015-2016, Google. Copyright (C) 2015, Matt Farina and Matt Butcher. + + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Masterminds/squirrel/README.md b/vendor/github.com/Masterminds/squirrel/README.md new file mode 100644 index 000000000..1d37f282f --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/README.md @@ -0,0 +1,142 @@ +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +### Squirrel is "complete". +Bug fixes will still be merged (slowly). Bug reports are welcome, but I will not necessarily respond to them. If another fork (or substantially similar project) actively improves on what Squirrel does, let me know and I may link to it here. + + +# Squirrel - fluent SQL generator for Go + +```go +import "github.com/Masterminds/squirrel" +``` + + +[![GoDoc](https://godoc.org/github.com/Masterminds/squirrel?status.png)](https://godoc.org/github.com/Masterminds/squirrel) +[![Build Status](https://api.travis-ci.org/Masterminds/squirrel.svg?branch=master)](https://travis-ci.org/Masterminds/squirrel) + +**Squirrel is not an ORM.** For an application of Squirrel, check out +[structable, a table-struct mapper](https://github.com/Masterminds/structable) + + +Squirrel helps you build SQL queries from composable parts: + +```go +import sq "github.com/Masterminds/squirrel" + +users := sq.Select("*").From("users").Join("emails USING (email_id)") + +active := users.Where(sq.Eq{"deleted_at": nil}) + +sql, args, err := active.ToSql() + +sql == "SELECT * FROM users JOIN emails USING (email_id) WHERE deleted_at IS NULL" +``` + +```go +sql, args, err := sq. + Insert("users").Columns("name", "age"). + Values("moe", 13).Values("larry", sq.Expr("? + 5", 12)). + ToSql() + +sql == "INSERT INTO users (name,age) VALUES (?,?),(?,? + 5)" +``` + +Squirrel can also execute queries directly: + +```go +stooges := users.Where(sq.Eq{"username": []string{"moe", "larry", "curly", "shemp"}}) +three_stooges := stooges.Limit(3) +rows, err := three_stooges.RunWith(db).Query() + +// Behaves like: +rows, err := db.Query("SELECT * FROM users WHERE username IN (?,?,?,?) LIMIT 3", + "moe", "larry", "curly", "shemp") +``` + +Squirrel makes conditional query building a breeze: + +```go +if len(q) > 0 { + users = users.Where("name LIKE ?", fmt.Sprint("%", q, "%")) +} +``` + +Squirrel wants to make your life easier: + +```go +// StmtCache caches Prepared Stmts for you +dbCache := sq.NewStmtCache(db) + +// StatementBuilder keeps your syntax neat +mydb := sq.StatementBuilder.RunWith(dbCache) +select_users := mydb.Select("*").From("users") +``` + +Squirrel loves PostgreSQL: + +```go +psql := sq.StatementBuilder.PlaceholderFormat(sq.Dollar) + +// You use question marks for placeholders... +sql, _, _ := psql.Select("*").From("elephants").Where("name IN (?,?)", "Dumbo", "Verna").ToSql() + +/// ...squirrel replaces them using PlaceholderFormat. +sql == "SELECT * FROM elephants WHERE name IN ($1,$2)" + + +/// You can retrieve id ... +query := sq.Insert("nodes"). + Columns("uuid", "type", "data"). + Values(node.Uuid, node.Type, node.Data). + Suffix("RETURNING \"id\""). + RunWith(m.db). + PlaceholderFormat(sq.Dollar) + +query.QueryRow().Scan(&node.id) +``` + +You can escape question marks by inserting two question marks: + +```sql +SELECT * FROM nodes WHERE meta->'format' ??| array[?,?] +``` + +will generate with the Dollar Placeholder: + +```sql +SELECT * FROM nodes WHERE meta->'format' ?| array[$1,$2] +``` + +## FAQ + +* **How can I build an IN query on composite keys / tuples, e.g. `WHERE (col1, col2) IN ((1,2),(3,4))`? ([#104](https://github.com/Masterminds/squirrel/issues/104))** + + Squirrel does not explicitly support tuples, but you can get the same effect with e.g.: + + ```go + sq.Or{ + sq.Eq{"col1": 1, "col2": 2}, + sq.Eq{"col1": 3, "col2": 4}} + ``` + + ```sql + WHERE (col1 = 1 AND col2 = 2) OR (col1 = 3 AND col2 = 4) + ``` + + (which should produce the same query plan as the tuple version) + +* **Why doesn't `Eq{"mynumber": []uint8{1,2,3}}` turn into an `IN` query? ([#114](https://github.com/Masterminds/squirrel/issues/114))** + + Values of type `[]byte` are handled specially by `database/sql`. In Go, [`byte` is just an alias of `uint8`](https://golang.org/pkg/builtin/#byte), so there is no way to distinguish `[]uint8` from `[]byte`. + +* **Some features are poorly documented!** + + This isn't a frequent complaints section! + +* **Some features are poorly documented?** + + Yes. The tests should be considered a part of the documentation; take a look at those for ideas on how to express more complex queries. + +## License + +Squirrel is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/Masterminds/squirrel/case.go b/vendor/github.com/Masterminds/squirrel/case.go new file mode 100644 index 000000000..299e14b9d --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/case.go @@ -0,0 +1,128 @@ +package squirrel + +import ( + "bytes" + "errors" + + "github.com/lann/builder" +) + +func init() { + builder.Register(CaseBuilder{}, caseData{}) +} + +// sqlizerBuffer is a helper that allows to write many Sqlizers one by one +// without constant checks for errors that may come from Sqlizer +type sqlizerBuffer struct { + bytes.Buffer + args []interface{} + err error +} + +// WriteSql converts Sqlizer to SQL strings and writes it to buffer +func (b *sqlizerBuffer) WriteSql(item Sqlizer) { + if b.err != nil { + return + } + + var str string + var args []interface{} + str, args, b.err = nestedToSql(item) + + if b.err != nil { + return + } + + b.WriteString(str) + b.WriteByte(' ') + b.args = append(b.args, args...) +} + +func (b *sqlizerBuffer) ToSql() (string, []interface{}, error) { + return b.String(), b.args, b.err +} + +// whenPart is a helper structure to describe SQLs "WHEN ... THEN ..." expression +type whenPart struct { + when Sqlizer + then Sqlizer +} + +func newWhenPart(when interface{}, then interface{}) whenPart { + return whenPart{newPart(when), newPart(then)} +} + +// caseData holds all the data required to build a CASE SQL construct +type caseData struct { + What Sqlizer + WhenParts []whenPart + Else Sqlizer +} + +// ToSql implements Sqlizer +func (d *caseData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.WhenParts) == 0 { + err = errors.New("case expression must contain at lease one WHEN clause") + + return + } + + sql := sqlizerBuffer{} + + sql.WriteString("CASE ") + if d.What != nil { + sql.WriteSql(d.What) + } + + for _, p := range d.WhenParts { + sql.WriteString("WHEN ") + sql.WriteSql(p.when) + sql.WriteString("THEN ") + sql.WriteSql(p.then) + } + + if d.Else != nil { + sql.WriteString("ELSE ") + sql.WriteSql(d.Else) + } + + sql.WriteString("END") + + return sql.ToSql() +} + +// CaseBuilder builds SQL CASE construct which could be used as parts of queries. +type CaseBuilder builder.Builder + +// ToSql builds the query into a SQL string and bound args. +func (b CaseBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(caseData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b CaseBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// what sets optional value for CASE construct "CASE [value] ..." +func (b CaseBuilder) what(expr interface{}) CaseBuilder { + return builder.Set(b, "What", newPart(expr)).(CaseBuilder) +} + +// When adds "WHEN ... THEN ..." part to CASE construct +func (b CaseBuilder) When(when interface{}, then interface{}) CaseBuilder { + // TODO: performance hint: replace slice of WhenPart with just slice of parts + // where even indices of the slice belong to "when"s and odd indices belong to "then"s + return builder.Append(b, "WhenParts", newWhenPart(when, then)).(CaseBuilder) +} + +// What sets optional "ELSE ..." part for CASE construct +func (b CaseBuilder) Else(expr interface{}) CaseBuilder { + return builder.Set(b, "Else", newPart(expr)).(CaseBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/delete.go b/vendor/github.com/Masterminds/squirrel/delete.go new file mode 100644 index 000000000..f3f31e63e --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/delete.go @@ -0,0 +1,191 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +type deleteData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + From string + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes []Sqlizer +} + +func (d *deleteData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *deleteData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.From) == 0 { + err = fmt.Errorf("delete statements must specify a From table") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("DELETE FROM ") + sql.WriteString(d.From) + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// DeleteBuilder builds SQL DELETE statements. +type DeleteBuilder builder.Builder + +func init() { + builder.Register(DeleteBuilder{}, deleteData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b DeleteBuilder) PlaceholderFormat(f PlaceholderFormat) DeleteBuilder { + return builder.Set(b, "PlaceholderFormat", f).(DeleteBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b DeleteBuilder) RunWith(runner BaseRunner) DeleteBuilder { + return setRunWith(b, runner).(DeleteBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b DeleteBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(deleteData) + return data.Exec() +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b DeleteBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(deleteData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b DeleteBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b DeleteBuilder) Prefix(sql string, args ...interface{}) DeleteBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b DeleteBuilder) PrefixExpr(expr Sqlizer) DeleteBuilder { + return builder.Append(b, "Prefixes", expr).(DeleteBuilder) +} + +// From sets the table to be deleted from. +func (b DeleteBuilder) From(from string) DeleteBuilder { + return builder.Set(b, "From", from).(DeleteBuilder) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b DeleteBuilder) Where(pred interface{}, args ...interface{}) DeleteBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(DeleteBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b DeleteBuilder) OrderBy(orderBys ...string) DeleteBuilder { + return builder.Extend(b, "OrderBys", orderBys).(DeleteBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b DeleteBuilder) Limit(limit uint64) DeleteBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(DeleteBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b DeleteBuilder) Offset(offset uint64) DeleteBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(DeleteBuilder) +} + +// Suffix adds an expression to the end of the query +func (b DeleteBuilder) Suffix(sql string, args ...interface{}) DeleteBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b DeleteBuilder) SuffixExpr(expr Sqlizer) DeleteBuilder { + return builder.Append(b, "Suffixes", expr).(DeleteBuilder) +} + +func (b DeleteBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(deleteData) + return data.Query() +} + +func (d *deleteData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} diff --git a/vendor/github.com/Masterminds/squirrel/delete_ctx.go b/vendor/github.com/Masterminds/squirrel/delete_ctx.go new file mode 100644 index 000000000..de83c55df --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/delete_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *deleteData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *deleteData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *deleteData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(deleteData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(deleteData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(deleteData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b DeleteBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/expr.go b/vendor/github.com/Masterminds/squirrel/expr.go new file mode 100644 index 000000000..eba1b4579 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/expr.go @@ -0,0 +1,419 @@ +package squirrel + +import ( + "bytes" + "database/sql/driver" + "fmt" + "reflect" + "sort" + "strings" +) + +const ( + // Portable true/false literals. + sqlTrue = "(1=1)" + sqlFalse = "(1=0)" +) + +type expr struct { + sql string + args []interface{} +} + +// Expr builds an expression from a SQL fragment and arguments. +// +// Ex: +// Expr("FROM_UNIXTIME(?)", t) +func Expr(sql string, args ...interface{}) Sqlizer { + return expr{sql: sql, args: args} +} + +func (e expr) ToSql() (sql string, args []interface{}, err error) { + simple := true + for _, arg := range e.args { + if _, ok := arg.(Sqlizer); ok { + simple = false + } + } + if simple { + return e.sql, e.args, nil + } + + buf := &bytes.Buffer{} + ap := e.args + sp := e.sql + + var isql string + var iargs []interface{} + + for err == nil && len(ap) > 0 && len(sp) > 0 { + i := strings.Index(sp, "?") + if i < 0 { + // no more placeholders + break + } + if len(sp) > i+1 && sp[i+1:i+2] == "?" { + // escaped "??"; append it and step past + buf.WriteString(sp[:i+2]) + sp = sp[i+2:] + continue + } + + if as, ok := ap[0].(Sqlizer); ok { + // sqlizer argument; expand it and append the result + isql, iargs, err = as.ToSql() + buf.WriteString(sp[:i]) + buf.WriteString(isql) + args = append(args, iargs...) + } else { + // normal argument; append it and the placeholder + buf.WriteString(sp[:i+1]) + args = append(args, ap[0]) + } + + // step past the argument and placeholder + ap = ap[1:] + sp = sp[i+1:] + } + + // append the remaining sql and arguments + buf.WriteString(sp) + return buf.String(), append(args, ap...), err +} + +type concatExpr []interface{} + +func (ce concatExpr) ToSql() (sql string, args []interface{}, err error) { + for _, part := range ce { + switch p := part.(type) { + case string: + sql += p + case Sqlizer: + pSql, pArgs, err := p.ToSql() + if err != nil { + return "", nil, err + } + sql += pSql + args = append(args, pArgs...) + default: + return "", nil, fmt.Errorf("%#v is not a string or Sqlizer", part) + } + } + return +} + +// ConcatExpr builds an expression by concatenating strings and other expressions. +// +// Ex: +// name_expr := Expr("CONCAT(?, ' ', ?)", firstName, lastName) +// ConcatExpr("COALESCE(full_name,", name_expr, ")") +func ConcatExpr(parts ...interface{}) concatExpr { + return concatExpr(parts) +} + +// aliasExpr helps to alias part of SQL query generated with underlying "expr" +type aliasExpr struct { + expr Sqlizer + alias string +} + +// Alias allows to define alias for column in SelectBuilder. Useful when column is +// defined as complex expression like IF or CASE +// Ex: +// .Column(Alias(caseStmt, "case_column")) +func Alias(expr Sqlizer, alias string) aliasExpr { + return aliasExpr{expr, alias} +} + +func (e aliasExpr) ToSql() (sql string, args []interface{}, err error) { + sql, args, err = e.expr.ToSql() + if err == nil { + sql = fmt.Sprintf("(%s) AS %s", sql, e.alias) + } + return +} + +// Eq is syntactic sugar for use with Where/Having/Set methods. +type Eq map[string]interface{} + +func (eq Eq) toSQL(useNotOpr bool) (sql string, args []interface{}, err error) { + if len(eq) == 0 { + // Empty Sql{} evaluates to true. + sql = sqlTrue + return + } + + var ( + exprs []string + equalOpr = "=" + inOpr = "IN" + nullOpr = "IS" + inEmptyExpr = sqlFalse + ) + + if useNotOpr { + equalOpr = "<>" + inOpr = "NOT IN" + nullOpr = "IS NOT" + inEmptyExpr = sqlTrue + } + + sortedKeys := getSortedKeys(eq) + for _, key := range sortedKeys { + var expr string + val := eq[key] + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + r := reflect.ValueOf(val) + if r.Kind() == reflect.Ptr { + if r.IsNil() { + val = nil + } else { + val = r.Elem().Interface() + } + } + + if val == nil { + expr = fmt.Sprintf("%s %s NULL", key, nullOpr) + } else { + if isListType(val) { + valVal := reflect.ValueOf(val) + if valVal.Len() == 0 { + expr = inEmptyExpr + if args == nil { + args = []interface{}{} + } + } else { + for i := 0; i < valVal.Len(); i++ { + args = append(args, valVal.Index(i).Interface()) + } + expr = fmt.Sprintf("%s %s (%s)", key, inOpr, Placeholders(valVal.Len())) + } + } else { + expr = fmt.Sprintf("%s %s ?", key, equalOpr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (eq Eq) ToSql() (sql string, args []interface{}, err error) { + return eq.toSQL(false) +} + +// NotEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(NotEq{"id": 1}) == "id <> 1" +type NotEq Eq + +func (neq NotEq) ToSql() (sql string, args []interface{}, err error) { + return Eq(neq).toSQL(true) +} + +// Like is syntactic sugar for use with LIKE conditions. +// Ex: +// .Where(Like{"name": "%irrel"}) +type Like map[string]interface{} + +func (lk Like) toSql(opr string) (sql string, args []interface{}, err error) { + var exprs []string + for key, val := range lk { + expr := "" + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + err = fmt.Errorf("cannot use null with like operators") + return + } else { + if isListType(val) { + err = fmt.Errorf("cannot use array or slice with like operators") + return + } else { + expr = fmt.Sprintf("%s %s ?", key, opr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (lk Like) ToSql() (sql string, args []interface{}, err error) { + return lk.toSql("LIKE") +} + +// NotLike is syntactic sugar for use with LIKE conditions. +// Ex: +// .Where(NotLike{"name": "%irrel"}) +type NotLike Like + +func (nlk NotLike) ToSql() (sql string, args []interface{}, err error) { + return Like(nlk).toSql("NOT LIKE") +} + +// ILike is syntactic sugar for use with ILIKE conditions. +// Ex: +// .Where(ILike{"name": "sq%"}) +type ILike Like + +func (ilk ILike) ToSql() (sql string, args []interface{}, err error) { + return Like(ilk).toSql("ILIKE") +} + +// NotILike is syntactic sugar for use with ILIKE conditions. +// Ex: +// .Where(NotILike{"name": "sq%"}) +type NotILike Like + +func (nilk NotILike) ToSql() (sql string, args []interface{}, err error) { + return Like(nilk).toSql("NOT ILIKE") +} + +// Lt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Lt{"id": 1}) +type Lt map[string]interface{} + +func (lt Lt) toSql(opposite, orEq bool) (sql string, args []interface{}, err error) { + var ( + exprs []string + opr = "<" + ) + + if opposite { + opr = ">" + } + + if orEq { + opr = fmt.Sprintf("%s%s", opr, "=") + } + + sortedKeys := getSortedKeys(lt) + for _, key := range sortedKeys { + var expr string + val := lt[key] + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + err = fmt.Errorf("cannot use null with less than or greater than operators") + return + } + if isListType(val) { + err = fmt.Errorf("cannot use array or slice with less than or greater than operators") + return + } + expr = fmt.Sprintf("%s %s ?", key, opr) + args = append(args, val) + + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (lt Lt) ToSql() (sql string, args []interface{}, err error) { + return lt.toSql(false, false) +} + +// LtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(LtOrEq{"id": 1}) == "id <= 1" +type LtOrEq Lt + +func (ltOrEq LtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(ltOrEq).toSql(false, true) +} + +// Gt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Gt{"id": 1}) == "id > 1" +type Gt Lt + +func (gt Gt) ToSql() (sql string, args []interface{}, err error) { + return Lt(gt).toSql(true, false) +} + +// GtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(GtOrEq{"id": 1}) == "id >= 1" +type GtOrEq Lt + +func (gtOrEq GtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(gtOrEq).toSql(true, true) +} + +type conj []Sqlizer + +func (c conj) join(sep, defaultExpr string) (sql string, args []interface{}, err error) { + if len(c) == 0 { + return defaultExpr, []interface{}{}, nil + } + var sqlParts []string + for _, sqlizer := range c { + partSQL, partArgs, err := nestedToSql(sqlizer) + if err != nil { + return "", nil, err + } + if partSQL != "" { + sqlParts = append(sqlParts, partSQL) + args = append(args, partArgs...) + } + } + if len(sqlParts) > 0 { + sql = fmt.Sprintf("(%s)", strings.Join(sqlParts, sep)) + } + return +} + +// And conjunction Sqlizers +type And conj + +func (a And) ToSql() (string, []interface{}, error) { + return conj(a).join(" AND ", sqlTrue) +} + +// Or conjunction Sqlizers +type Or conj + +func (o Or) ToSql() (string, []interface{}, error) { + return conj(o).join(" OR ", sqlFalse) +} + +func getSortedKeys(exp map[string]interface{}) []string { + sortedKeys := make([]string, 0, len(exp)) + for k := range exp { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + return sortedKeys +} + +func isListType(val interface{}) bool { + if driver.IsValue(val) { + return false + } + valVal := reflect.ValueOf(val) + return valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice +} diff --git a/vendor/github.com/Masterminds/squirrel/insert.go b/vendor/github.com/Masterminds/squirrel/insert.go new file mode 100644 index 000000000..c23a57935 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/insert.go @@ -0,0 +1,298 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "io" + "sort" + "strings" + + "github.com/lann/builder" +) + +type insertData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + StatementKeyword string + Options []string + Into string + Columns []string + Values [][]interface{} + Suffixes []Sqlizer + Select *SelectBuilder +} + +func (d *insertData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *insertData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *insertData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *insertData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Into) == 0 { + err = errors.New("insert statements must specify a table") + return + } + if len(d.Values) == 0 && d.Select == nil { + err = errors.New("insert statements must have at least one set of values or select clause") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + if d.StatementKeyword == "" { + sql.WriteString("INSERT ") + } else { + sql.WriteString(d.StatementKeyword) + sql.WriteString(" ") + } + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + sql.WriteString("INTO ") + sql.WriteString(d.Into) + sql.WriteString(" ") + + if len(d.Columns) > 0 { + sql.WriteString("(") + sql.WriteString(strings.Join(d.Columns, ",")) + sql.WriteString(") ") + } + + if d.Select != nil { + args, err = d.appendSelectToSQL(sql, args) + } else { + args, err = d.appendValuesToSQL(sql, args) + } + if err != nil { + return + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +func (d *insertData) appendValuesToSQL(w io.Writer, args []interface{}) ([]interface{}, error) { + if len(d.Values) == 0 { + return args, errors.New("values for insert statements are not set") + } + + io.WriteString(w, "VALUES ") + + valuesStrings := make([]string, len(d.Values)) + for r, row := range d.Values { + valueStrings := make([]string, len(row)) + for v, val := range row { + if vs, ok := val.(Sqlizer); ok { + vsql, vargs, err := vs.ToSql() + if err != nil { + return nil, err + } + valueStrings[v] = vsql + args = append(args, vargs...) + } else { + valueStrings[v] = "?" + args = append(args, val) + } + } + valuesStrings[r] = fmt.Sprintf("(%s)", strings.Join(valueStrings, ",")) + } + + io.WriteString(w, strings.Join(valuesStrings, ",")) + + return args, nil +} + +func (d *insertData) appendSelectToSQL(w io.Writer, args []interface{}) ([]interface{}, error) { + if d.Select == nil { + return args, errors.New("select clause for insert statements are not set") + } + + selectClause, sArgs, err := d.Select.ToSql() + if err != nil { + return args, err + } + + io.WriteString(w, selectClause) + args = append(args, sArgs...) + + return args, nil +} + +// Builder + +// InsertBuilder builds SQL INSERT statements. +type InsertBuilder builder.Builder + +func init() { + builder.Register(InsertBuilder{}, insertData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b InsertBuilder) PlaceholderFormat(f PlaceholderFormat) InsertBuilder { + return builder.Set(b, "PlaceholderFormat", f).(InsertBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b InsertBuilder) RunWith(runner BaseRunner) InsertBuilder { + return setRunWith(b, runner).(InsertBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b InsertBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(insertData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b InsertBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(insertData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b InsertBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(insertData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b InsertBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b InsertBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(insertData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b InsertBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b InsertBuilder) Prefix(sql string, args ...interface{}) InsertBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b InsertBuilder) PrefixExpr(expr Sqlizer) InsertBuilder { + return builder.Append(b, "Prefixes", expr).(InsertBuilder) +} + +// Options adds keyword options before the INTO clause of the query. +func (b InsertBuilder) Options(options ...string) InsertBuilder { + return builder.Extend(b, "Options", options).(InsertBuilder) +} + +// Into sets the INTO clause of the query. +func (b InsertBuilder) Into(from string) InsertBuilder { + return builder.Set(b, "Into", from).(InsertBuilder) +} + +// Columns adds insert columns to the query. +func (b InsertBuilder) Columns(columns ...string) InsertBuilder { + return builder.Extend(b, "Columns", columns).(InsertBuilder) +} + +// Values adds a single row's values to the query. +func (b InsertBuilder) Values(values ...interface{}) InsertBuilder { + return builder.Append(b, "Values", values).(InsertBuilder) +} + +// Suffix adds an expression to the end of the query +func (b InsertBuilder) Suffix(sql string, args ...interface{}) InsertBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b InsertBuilder) SuffixExpr(expr Sqlizer) InsertBuilder { + return builder.Append(b, "Suffixes", expr).(InsertBuilder) +} + +// SetMap set columns and values for insert builder from a map of column name and value +// note that it will reset all previous columns and values was set if any +func (b InsertBuilder) SetMap(clauses map[string]interface{}) InsertBuilder { + // Keep the columns in a consistent order by sorting the column key string. + cols := make([]string, 0, len(clauses)) + for col := range clauses { + cols = append(cols, col) + } + sort.Strings(cols) + + vals := make([]interface{}, 0, len(clauses)) + for _, col := range cols { + vals = append(vals, clauses[col]) + } + + b = builder.Set(b, "Columns", cols).(InsertBuilder) + b = builder.Set(b, "Values", [][]interface{}{vals}).(InsertBuilder) + + return b +} + +// Select set Select clause for insert query +// If Values and Select are used, then Select has higher priority +func (b InsertBuilder) Select(sb SelectBuilder) InsertBuilder { + return builder.Set(b, "Select", &sb).(InsertBuilder) +} + +func (b InsertBuilder) statementKeyword(keyword string) InsertBuilder { + return builder.Set(b, "StatementKeyword", keyword).(InsertBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/insert_ctx.go b/vendor/github.com/Masterminds/squirrel/insert_ctx.go new file mode 100644 index 000000000..4541c2fed --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/insert_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *insertData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *insertData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *insertData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b InsertBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(insertData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b InsertBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(insertData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b InsertBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(insertData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b InsertBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/part.go b/vendor/github.com/Masterminds/squirrel/part.go new file mode 100644 index 000000000..c58f68f1a --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/part.go @@ -0,0 +1,63 @@ +package squirrel + +import ( + "fmt" + "io" +) + +type part struct { + pred interface{} + args []interface{} +} + +func newPart(pred interface{}, args ...interface{}) Sqlizer { + return &part{pred, args} +} + +func (p part) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case Sqlizer: + sql, args, err = nestedToSql(pred) + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string or Sqlizer, not %T", pred) + } + return +} + +func nestedToSql(s Sqlizer) (string, []interface{}, error) { + if raw, ok := s.(rawSqlizer); ok { + return raw.toSqlRaw() + } else { + return s.ToSql() + } +} + +func appendToSql(parts []Sqlizer, w io.Writer, sep string, args []interface{}) ([]interface{}, error) { + for i, p := range parts { + partSql, partArgs, err := nestedToSql(p) + if err != nil { + return nil, err + } else if len(partSql) == 0 { + continue + } + + if i > 0 { + _, err := io.WriteString(w, sep) + if err != nil { + return nil, err + } + } + + _, err = io.WriteString(w, partSql) + if err != nil { + return nil, err + } + args = append(args, partArgs...) + } + return args, nil +} diff --git a/vendor/github.com/Masterminds/squirrel/placeholder.go b/vendor/github.com/Masterminds/squirrel/placeholder.go new file mode 100644 index 000000000..8e97a6c62 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/placeholder.go @@ -0,0 +1,114 @@ +package squirrel + +import ( + "bytes" + "fmt" + "strings" +) + +// PlaceholderFormat is the interface that wraps the ReplacePlaceholders method. +// +// ReplacePlaceholders takes a SQL statement and replaces each question mark +// placeholder with a (possibly different) SQL placeholder. +type PlaceholderFormat interface { + ReplacePlaceholders(sql string) (string, error) +} + +type placeholderDebugger interface { + debugPlaceholder() string +} + +var ( + // Question is a PlaceholderFormat instance that leaves placeholders as + // question marks. + Question = questionFormat{} + + // Dollar is a PlaceholderFormat instance that replaces placeholders with + // dollar-prefixed positional placeholders (e.g. $1, $2, $3). + Dollar = dollarFormat{} + + // Colon is a PlaceholderFormat instance that replaces placeholders with + // colon-prefixed positional placeholders (e.g. :1, :2, :3). + Colon = colonFormat{} + + // AtP is a PlaceholderFormat instance that replaces placeholders with + // "@p"-prefixed positional placeholders (e.g. @p1, @p2, @p3). + AtP = atpFormat{} +) + +type questionFormat struct{} + +func (questionFormat) ReplacePlaceholders(sql string) (string, error) { + return sql, nil +} + +func (questionFormat) debugPlaceholder() string { + return "?" +} + +type dollarFormat struct{} + +func (dollarFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, "$") +} + +func (dollarFormat) debugPlaceholder() string { + return "$" +} + +type colonFormat struct{} + +func (colonFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, ":") +} + +func (colonFormat) debugPlaceholder() string { + return ":" +} + +type atpFormat struct{} + +func (atpFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, "@p") +} + +func (atpFormat) debugPlaceholder() string { + return "@p" +} + +// Placeholders returns a string with count ? placeholders joined with commas. +func Placeholders(count int) string { + if count < 1 { + return "" + } + + return strings.Repeat(",?", count)[1:] +} + +func replacePositionalPlaceholders(sql, prefix string) (string, error) { + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, "?") + if p == -1 { + break + } + + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + i++ + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "%s%d", prefix, i) + sql = sql[p+1:] + } + } + + buf.WriteString(sql) + return buf.String(), nil +} diff --git a/vendor/github.com/Masterminds/squirrel/row.go b/vendor/github.com/Masterminds/squirrel/row.go new file mode 100644 index 000000000..74ffda92b --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/row.go @@ -0,0 +1,22 @@ +package squirrel + +// RowScanner is the interface that wraps the Scan method. +// +// Scan behaves like database/sql.Row.Scan. +type RowScanner interface { + Scan(...interface{}) error +} + +// Row wraps database/sql.Row to let squirrel return new errors on Scan. +type Row struct { + RowScanner + err error +} + +// Scan returns Row.err or calls RowScanner.Scan. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + return r.RowScanner.Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/select.go b/vendor/github.com/Masterminds/squirrel/select.go new file mode 100644 index 000000000..d55ce4c74 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/select.go @@ -0,0 +1,403 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +type selectData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + Options []string + Columns []Sqlizer + From Sqlizer + Joins []Sqlizer + WhereParts []Sqlizer + GroupBys []string + HavingParts []Sqlizer + OrderByParts []Sqlizer + Limit string + Offset string + Suffixes []Sqlizer +} + +func (d *selectData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *selectData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *selectData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *selectData) ToSql() (sqlStr string, args []interface{}, err error) { + sqlStr, args, err = d.toSqlRaw() + if err != nil { + return + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sqlStr) + return +} + +func (d *selectData) toSqlRaw() (sqlStr string, args []interface{}, err error) { + if len(d.Columns) == 0 { + err = fmt.Errorf("select statements must have at least one result column") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("SELECT ") + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + if len(d.Columns) > 0 { + args, err = appendToSql(d.Columns, sql, ", ", args) + if err != nil { + return + } + } + + if d.From != nil { + sql.WriteString(" FROM ") + args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) + if err != nil { + return + } + } + + if len(d.Joins) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Joins, sql, " ", args) + if err != nil { + return + } + } + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.GroupBys) > 0 { + sql.WriteString(" GROUP BY ") + sql.WriteString(strings.Join(d.GroupBys, ", ")) + } + + if len(d.HavingParts) > 0 { + sql.WriteString(" HAVING ") + args, err = appendToSql(d.HavingParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderByParts) > 0 { + sql.WriteString(" ORDER BY ") + args, err = appendToSql(d.OrderByParts, sql, ", ", args) + if err != nil { + return + } + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr = sql.String() + return +} + +// Builder + +// SelectBuilder builds SQL SELECT statements. +type SelectBuilder builder.Builder + +func init() { + builder.Register(SelectBuilder{}, selectData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b SelectBuilder) PlaceholderFormat(f PlaceholderFormat) SelectBuilder { + return builder.Set(b, "PlaceholderFormat", f).(SelectBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +// For most cases runner will be a database connection. +// +// Internally we use this to mock out the database connection for testing. +func (b SelectBuilder) RunWith(runner BaseRunner) SelectBuilder { + return setRunWith(b, runner).(SelectBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b SelectBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(selectData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b SelectBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(selectData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b SelectBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(selectData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b SelectBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b SelectBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(selectData) + return data.ToSql() +} + +func (b SelectBuilder) toSqlRaw() (string, []interface{}, error) { + data := builder.GetStruct(b).(selectData) + return data.toSqlRaw() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b SelectBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b SelectBuilder) Prefix(sql string, args ...interface{}) SelectBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b SelectBuilder) PrefixExpr(expr Sqlizer) SelectBuilder { + return builder.Append(b, "Prefixes", expr).(SelectBuilder) +} + +// Distinct adds a DISTINCT clause to the query. +func (b SelectBuilder) Distinct() SelectBuilder { + return b.Options("DISTINCT") +} + +// Options adds select option to the query +func (b SelectBuilder) Options(options ...string) SelectBuilder { + return builder.Extend(b, "Options", options).(SelectBuilder) +} + +// Columns adds result columns to the query. +func (b SelectBuilder) Columns(columns ...string) SelectBuilder { + parts := make([]interface{}, 0, len(columns)) + for _, str := range columns { + parts = append(parts, newPart(str)) + } + return builder.Extend(b, "Columns", parts).(SelectBuilder) +} + +// RemoveColumns remove all columns from query. +// Must add a new column with Column or Columns methods, otherwise +// return a error. +func (b SelectBuilder) RemoveColumns() SelectBuilder { + return builder.Delete(b, "Columns").(SelectBuilder) +} + +// Column adds a result column to the query. +// Unlike Columns, Column accepts args which will be bound to placeholders in +// the columns string, for example: +// Column("IF(col IN ("+squirrel.Placeholders(3)+"), 1, 0) as col", 1, 2, 3) +func (b SelectBuilder) Column(column interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Columns", newPart(column, args...)).(SelectBuilder) +} + +// From sets the FROM clause of the query. +func (b SelectBuilder) From(from string) SelectBuilder { + return builder.Set(b, "From", newPart(from)).(SelectBuilder) +} + +// FromSelect sets a subquery into the FROM clause of the query. +func (b SelectBuilder) FromSelect(from SelectBuilder, alias string) SelectBuilder { + // Prevent misnumbered parameters in nested selects (#183). + from = from.PlaceholderFormat(Question) + return builder.Set(b, "From", Alias(from, alias)).(SelectBuilder) +} + +// JoinClause adds a join clause to the query. +func (b SelectBuilder) JoinClause(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Joins", newPart(pred, args...)).(SelectBuilder) +} + +// Join adds a JOIN clause to the query. +func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("JOIN "+join, rest...) +} + +// LeftJoin adds a LEFT JOIN clause to the query. +func (b SelectBuilder) LeftJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("LEFT JOIN "+join, rest...) +} + +// RightJoin adds a RIGHT JOIN clause to the query. +func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("RIGHT JOIN "+join, rest...) +} + +// InnerJoin adds a INNER JOIN clause to the query. +func (b SelectBuilder) InnerJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("INNER JOIN "+join, rest...) +} + +// CrossJoin adds a CROSS JOIN clause to the query. +func (b SelectBuilder) CrossJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("CROSS JOIN "+join, rest...) +} + +// Where adds an expression to the WHERE clause of the query. +// +// Expressions are ANDed together in the generated SQL. +// +// Where accepts several types for its pred argument: +// +// nil OR "" - ignored. +// +// string - SQL expression. +// If the expression has SQL placeholders then a set of arguments must be passed +// as well, one for each placeholder. +// +// map[string]interface{} OR Eq - map of SQL expressions to values. Each key is +// transformed into an expression like " = ?", with the corresponding value +// bound to the placeholder. If the value is nil, the expression will be " +// IS NULL". If the value is an array or slice, the expression will be " IN +// (?,?,...)", with one placeholder for each item in the value. These expressions +// are ANDed together. +// +// Where will panic if pred isn't any of the above types. +func (b SelectBuilder) Where(pred interface{}, args ...interface{}) SelectBuilder { + if pred == nil || pred == "" { + return b + } + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(SelectBuilder) +} + +// GroupBy adds GROUP BY expressions to the query. +func (b SelectBuilder) GroupBy(groupBys ...string) SelectBuilder { + return builder.Extend(b, "GroupBys", groupBys).(SelectBuilder) +} + +// Having adds an expression to the HAVING clause of the query. +// +// See Where. +func (b SelectBuilder) Having(pred interface{}, rest ...interface{}) SelectBuilder { + return builder.Append(b, "HavingParts", newWherePart(pred, rest...)).(SelectBuilder) +} + +// OrderByClause adds ORDER BY clause to the query. +func (b SelectBuilder) OrderByClause(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "OrderByParts", newPart(pred, args...)).(SelectBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b SelectBuilder) OrderBy(orderBys ...string) SelectBuilder { + for _, orderBy := range orderBys { + b = b.OrderByClause(orderBy) + } + + return b +} + +// Limit sets a LIMIT clause on the query. +func (b SelectBuilder) Limit(limit uint64) SelectBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(SelectBuilder) +} + +// Limit ALL allows to access all records with limit +func (b SelectBuilder) RemoveLimit() SelectBuilder { + return builder.Delete(b, "Limit").(SelectBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b SelectBuilder) Offset(offset uint64) SelectBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(SelectBuilder) +} + +// RemoveOffset removes OFFSET clause. +func (b SelectBuilder) RemoveOffset() SelectBuilder { + return builder.Delete(b, "Offset").(SelectBuilder) +} + +// Suffix adds an expression to the end of the query +func (b SelectBuilder) Suffix(sql string, args ...interface{}) SelectBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b SelectBuilder) SuffixExpr(expr Sqlizer) SelectBuilder { + return builder.Append(b, "Suffixes", expr).(SelectBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/select_ctx.go b/vendor/github.com/Masterminds/squirrel/select_ctx.go new file mode 100644 index 000000000..4c42c13f4 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/select_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *selectData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *selectData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *selectData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b SelectBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(selectData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b SelectBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(selectData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b SelectBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(selectData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b SelectBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel.go b/vendor/github.com/Masterminds/squirrel/squirrel.go new file mode 100644 index 000000000..46d456eb6 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/squirrel.go @@ -0,0 +1,183 @@ +// Package squirrel provides a fluent SQL generator. +// +// See https://github.com/Masterminds/squirrel for examples. +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +// Sqlizer is the interface that wraps the ToSql method. +// +// ToSql returns a SQL representation of the Sqlizer, along with a slice of args +// as passed to e.g. database/sql.Exec. It can also return an error. +type Sqlizer interface { + ToSql() (string, []interface{}, error) +} + +// rawSqlizer is expected to do what Sqlizer does, but without finalizing placeholders. +// This is useful for nested queries. +type rawSqlizer interface { + toSqlRaw() (string, []interface{}, error) +} + +// Execer is the interface that wraps the Exec method. +// +// Exec executes the given query as implemented by database/sql.Exec. +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Queryer is the interface that wraps the Query method. +// +// Query executes the given query as implemented by database/sql.Query. +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +// QueryRower is the interface that wraps the QueryRow method. +// +// QueryRow executes the given query as implemented by database/sql.QueryRow. +type QueryRower interface { + QueryRow(query string, args ...interface{}) RowScanner +} + +// BaseRunner groups the Execer and Queryer interfaces. +type BaseRunner interface { + Execer + Queryer +} + +// Runner groups the Execer, Queryer, and QueryRower interfaces. +type Runner interface { + Execer + Queryer + QueryRower +} + +// WrapStdSql wraps a type implementing the standard SQL interface with methods that +// squirrel expects. +func WrapStdSql(stdSql StdSql) Runner { + return &stdsqlRunner{stdSql} +} + +// StdSql encompasses the standard methods of the *sql.DB type, and other types that +// wrap these methods. +type StdSql interface { + Query(string, ...interface{}) (*sql.Rows, error) + QueryRow(string, ...interface{}) *sql.Row + Exec(string, ...interface{}) (sql.Result, error) +} + +type stdsqlRunner struct { + StdSql +} + +func (r *stdsqlRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.StdSql.QueryRow(query, args...) +} + +func setRunWith(b interface{}, runner BaseRunner) interface{} { + switch r := runner.(type) { + case StdSqlCtx: + runner = WrapStdSqlCtx(r) + case StdSql: + runner = WrapStdSql(r) + } + return builder.Set(b, "RunWith", runner) +} + +// RunnerNotSet is returned by methods that need a Runner if it isn't set. +var RunnerNotSet = fmt.Errorf("cannot run; no Runner set (RunWith)") + +// RunnerNotQueryRunner is returned by QueryRow if the RunWith value doesn't implement QueryRower. +var RunnerNotQueryRunner = fmt.Errorf("cannot QueryRow; Runner is not a QueryRower") + +// ExecWith Execs the SQL returned by s with db. +func ExecWith(db Execer, s Sqlizer) (res sql.Result, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Exec(query, args...) +} + +// QueryWith Querys the SQL returned by s with db. +func QueryWith(db Queryer, s Sqlizer) (rows *sql.Rows, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Query(query, args...) +} + +// QueryRowWith QueryRows the SQL returned by s with db. +func QueryRowWith(db QueryRower, s Sqlizer) RowScanner { + query, args, err := s.ToSql() + return &Row{RowScanner: db.QueryRow(query, args...), err: err} +} + +// DebugSqlizer calls ToSql on s and shows the approximate SQL to be executed +// +// If ToSql returns an error, the result of this method will look like: +// "[ToSql error: %s]" or "[DebugSqlizer error: %s]" +// +// IMPORTANT: As its name suggests, this function should only be used for +// debugging. While the string result *might* be valid SQL, this function does +// not try very hard to ensure it. Additionally, executing the output of this +// function with any untrusted user input is certainly insecure. +func DebugSqlizer(s Sqlizer) string { + sql, args, err := s.ToSql() + if err != nil { + return fmt.Sprintf("[ToSql error: %s]", err) + } + + var placeholder string + downCast, ok := s.(placeholderDebugger) + if !ok { + placeholder = "?" + } else { + placeholder = downCast.debugPlaceholder() + } + // TODO: dedupe this with placeholder.go + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, placeholder) + if p == -1 { + break + } + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + if i+1 > len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: too many placeholders in %#v for %d args]", + sql, len(args)) + } + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "'%v'", args[i]) + // advance our sql string "cursor" beyond the arg we placed + sql = sql[p+1:] + i++ + } + } + if i < len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: not enough placeholders in %#v for %d args]", + sql, len(args)) + } + // "append" any remaning sql that won't need interpolating + buf.WriteString(sql) + return buf.String() +} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go b/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go new file mode 100644 index 000000000..c20148ad3 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go @@ -0,0 +1,93 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + "errors" +) + +// NoContextSupport is returned if a db doesn't support Context. +var NoContextSupport = errors.New("DB does not support Context") + +// ExecerContext is the interface that wraps the ExecContext method. +// +// Exec executes the given query as implemented by database/sql.ExecContext. +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// QueryerContext is the interface that wraps the QueryContext method. +// +// QueryContext executes the given query as implemented by database/sql.QueryContext. +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +// QueryRowerContext is the interface that wraps the QueryRowContext method. +// +// QueryRowContext executes the given query as implemented by database/sql.QueryRowContext. +type QueryRowerContext interface { + QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner +} + +// RunnerContext groups the Runner interface, along with the Context versions of each of +// its methods +type RunnerContext interface { + Runner + QueryerContext + QueryRowerContext + ExecerContext +} + +// WrapStdSqlCtx wraps a type implementing the standard SQL interface plus the context +// versions of the methods with methods that squirrel expects. +func WrapStdSqlCtx(stdSqlCtx StdSqlCtx) RunnerContext { + return &stdsqlCtxRunner{stdSqlCtx} +} + +// StdSqlCtx encompasses the standard methods of the *sql.DB type, along with the Context +// versions of those methods, and other types that wrap these methods. +type StdSqlCtx interface { + StdSql + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) +} + +type stdsqlCtxRunner struct { + StdSqlCtx +} + +func (r *stdsqlCtxRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.StdSqlCtx.QueryRow(query, args...) +} + +func (r *stdsqlCtxRunner) QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner { + return r.StdSqlCtx.QueryRowContext(ctx, query, args...) +} + +// ExecContextWith ExecContexts the SQL returned by s with db. +func ExecContextWith(ctx context.Context, db ExecerContext, s Sqlizer) (res sql.Result, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.ExecContext(ctx, query, args...) +} + +// QueryContextWith QueryContexts the SQL returned by s with db. +func QueryContextWith(ctx context.Context, db QueryerContext, s Sqlizer) (rows *sql.Rows, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.QueryContext(ctx, query, args...) +} + +// QueryRowContextWith QueryRowContexts the SQL returned by s with db. +func QueryRowContextWith(ctx context.Context, db QueryRowerContext, s Sqlizer) RowScanner { + query, args, err := s.ToSql() + return &Row{RowScanner: db.QueryRowContext(ctx, query, args...), err: err} +} diff --git a/vendor/github.com/Masterminds/squirrel/statement.go b/vendor/github.com/Masterminds/squirrel/statement.go new file mode 100644 index 000000000..9420c67f8 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/statement.go @@ -0,0 +1,104 @@ +package squirrel + +import "github.com/lann/builder" + +// StatementBuilderType is the type of StatementBuilder. +type StatementBuilderType builder.Builder + +// Select returns a SelectBuilder for this StatementBuilderType. +func (b StatementBuilderType) Select(columns ...string) SelectBuilder { + return SelectBuilder(b).Columns(columns...) +} + +// Insert returns a InsertBuilder for this StatementBuilderType. +func (b StatementBuilderType) Insert(into string) InsertBuilder { + return InsertBuilder(b).Into(into) +} + +// Replace returns a InsertBuilder for this StatementBuilderType with the +// statement keyword set to "REPLACE". +func (b StatementBuilderType) Replace(into string) InsertBuilder { + return InsertBuilder(b).statementKeyword("REPLACE").Into(into) +} + +// Update returns a UpdateBuilder for this StatementBuilderType. +func (b StatementBuilderType) Update(table string) UpdateBuilder { + return UpdateBuilder(b).Table(table) +} + +// Delete returns a DeleteBuilder for this StatementBuilderType. +func (b StatementBuilderType) Delete(from string) DeleteBuilder { + return DeleteBuilder(b).From(from) +} + +// PlaceholderFormat sets the PlaceholderFormat field for any child builders. +func (b StatementBuilderType) PlaceholderFormat(f PlaceholderFormat) StatementBuilderType { + return builder.Set(b, "PlaceholderFormat", f).(StatementBuilderType) +} + +// RunWith sets the RunWith field for any child builders. +func (b StatementBuilderType) RunWith(runner BaseRunner) StatementBuilderType { + return setRunWith(b, runner).(StatementBuilderType) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b StatementBuilderType) Where(pred interface{}, args ...interface{}) StatementBuilderType { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(StatementBuilderType) +} + +// StatementBuilder is a parent builder for other builders, e.g. SelectBuilder. +var StatementBuilder = StatementBuilderType(builder.EmptyBuilder).PlaceholderFormat(Question) + +// Select returns a new SelectBuilder, optionally setting some result columns. +// +// See SelectBuilder.Columns. +func Select(columns ...string) SelectBuilder { + return StatementBuilder.Select(columns...) +} + +// Insert returns a new InsertBuilder with the given table name. +// +// See InsertBuilder.Into. +func Insert(into string) InsertBuilder { + return StatementBuilder.Insert(into) +} + +// Replace returns a new InsertBuilder with the statement keyword set to +// "REPLACE" and with the given table name. +// +// See InsertBuilder.Into. +func Replace(into string) InsertBuilder { + return StatementBuilder.Replace(into) +} + +// Update returns a new UpdateBuilder with the given table name. +// +// See UpdateBuilder.Table. +func Update(table string) UpdateBuilder { + return StatementBuilder.Update(table) +} + +// Delete returns a new DeleteBuilder with the given table name. +// +// See DeleteBuilder.Table. +func Delete(from string) DeleteBuilder { + return StatementBuilder.Delete(from) +} + +// Case returns a new CaseBuilder +// "what" represents case value +func Case(what ...interface{}) CaseBuilder { + b := CaseBuilder(builder.EmptyBuilder) + + switch len(what) { + case 0: + case 1: + b = b.what(what[0]) + default: + b = b.what(newPart(what[0], what[1:]...)) + + } + return b +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher.go b/vendor/github.com/Masterminds/squirrel/stmtcacher.go new file mode 100644 index 000000000..5bf267a13 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher.go @@ -0,0 +1,121 @@ +package squirrel + +import ( + "database/sql" + "fmt" + "sync" +) + +// Prepareer is the interface that wraps the Prepare method. +// +// Prepare executes the given query as implemented by database/sql.Prepare. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// DBProxy groups the Execer, Queryer, QueryRower, and Preparer interfaces. +type DBProxy interface { + Execer + Queryer + QueryRower + Preparer +} + +// NOTE: NewStmtCache is defined in stmtcacher_ctx.go (Go >= 1.8) or stmtcacher_noctx.go (Go < 1.8). + +// StmtCache wraps and delegates down to a Preparer type +// +// It also automatically prepares all statements sent to the underlying Preparer calls +// for Exec, Query and QueryRow and caches the returns *sql.Stmt using the provided +// query as the key. So that it can be automatically re-used. +type StmtCache struct { + prep Preparer + cache map[string]*sql.Stmt + mu sync.Mutex +} + +// Prepare delegates down to the underlying Preparer and caches the result +// using the provided query as a key +func (sc *StmtCache) Prepare(query string) (*sql.Stmt, error) { + sc.mu.Lock() + defer sc.mu.Unlock() + + stmt, ok := sc.cache[query] + if ok { + return stmt, nil + } + stmt, err := sc.prep.Prepare(query) + if err == nil { + sc.cache[query] = stmt + } + return stmt, err +} + +// Exec delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) Exec(query string, args ...interface{}) (res sql.Result, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Exec(args...) +} + +// Query delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) Query(query string, args ...interface{}) (rows *sql.Rows, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Query(args...) +} + +// QueryRow delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) QueryRow(query string, args ...interface{}) RowScanner { + stmt, err := sc.Prepare(query) + if err != nil { + return &Row{err: err} + } + return stmt.QueryRow(args...) +} + +// Clear removes and closes all the currently cached prepared statements +func (sc *StmtCache) Clear() (err error) { + sc.mu.Lock() + defer sc.mu.Unlock() + + for key, stmt := range sc.cache { + delete(sc.cache, key) + + if stmt == nil { + continue + } + + if cerr := stmt.Close(); cerr != nil { + err = cerr + } + } + + if err != nil { + return fmt.Errorf("one or more Stmt.Close failed; last error: %v", err) + } + + return +} + +type DBProxyBeginner interface { + DBProxy + Begin() (*sql.Tx, error) +} + +type stmtCacheProxy struct { + DBProxy + db *sql.DB +} + +func NewStmtCacheProxy(db *sql.DB) DBProxyBeginner { + return &stmtCacheProxy{DBProxy: NewStmtCache(db), db: db} +} + +func (sp *stmtCacheProxy) Begin() (*sql.Tx, error) { + return sp.db.Begin() +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go b/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go new file mode 100644 index 000000000..53603cf4c --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go @@ -0,0 +1,86 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" +) + +// PrepareerContext is the interface that wraps the Prepare and PrepareContext methods. +// +// Prepare executes the given query as implemented by database/sql.Prepare. +// PrepareContext executes the given query as implemented by database/sql.PrepareContext. +type PreparerContext interface { + Preparer + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// DBProxyContext groups the Execer, Queryer, QueryRower and PreparerContext interfaces. +type DBProxyContext interface { + Execer + Queryer + QueryRower + PreparerContext +} + +// NewStmtCache returns a *StmtCache wrapping a PreparerContext that caches Prepared Stmts. +// +// Stmts are cached based on the string value of their queries. +func NewStmtCache(prep PreparerContext) *StmtCache { + return &StmtCache{prep: prep, cache: make(map[string]*sql.Stmt)} +} + +// NewStmtCacher is deprecated +// +// Use NewStmtCache instead +func NewStmtCacher(prep PreparerContext) DBProxyContext { + return NewStmtCache(prep) +} + +// PrepareContext delegates down to the underlying PreparerContext and caches the result +// using the provided query as a key +func (sc *StmtCache) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) { + ctxPrep, ok := sc.prep.(PreparerContext) + if !ok { + return nil, NoContextSupport + } + sc.mu.Lock() + defer sc.mu.Unlock() + stmt, ok := sc.cache[query] + if ok { + return stmt, nil + } + stmt, err := ctxPrep.PrepareContext(ctx, query) + if err == nil { + sc.cache[query] = stmt + } + return stmt, err +} + +// ExecContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) ExecContext(ctx context.Context, query string, args ...interface{}) (res sql.Result, err error) { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return + } + return stmt.ExecContext(ctx, args...) +} + +// QueryContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) QueryContext(ctx context.Context, query string, args ...interface{}) (rows *sql.Rows, err error) { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return + } + return stmt.QueryContext(ctx, args...) +} + +// QueryRowContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return &Row{err: err} + } + return stmt.QueryRowContext(ctx, args...) +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go b/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go new file mode 100644 index 000000000..deac96777 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go @@ -0,0 +1,21 @@ +// +build !go1.8 + +package squirrel + +import ( + "database/sql" +) + +// NewStmtCacher returns a DBProxy wrapping prep that caches Prepared Stmts. +// +// Stmts are cached based on the string value of their queries. +func NewStmtCache(prep Preparer) *StmtCache { + return &StmtCacher{prep: prep, cache: make(map[string]*sql.Stmt)} +} + +// NewStmtCacher is deprecated +// +// Use NewStmtCache instead +func NewStmtCacher(prep Preparer) DBProxy { + return NewStmtCache(prep) +} diff --git a/vendor/github.com/Masterminds/squirrel/update.go b/vendor/github.com/Masterminds/squirrel/update.go new file mode 100644 index 000000000..eb2a9c4dd --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/update.go @@ -0,0 +1,288 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "sort" + "strings" + + "github.com/lann/builder" +) + +type updateData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + Table string + SetClauses []setClause + From Sqlizer + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes []Sqlizer +} + +type setClause struct { + column string + value interface{} +} + +func (d *updateData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *updateData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *updateData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Table) == 0 { + err = fmt.Errorf("update statements must specify a table") + return + } + if len(d.SetClauses) == 0 { + err = fmt.Errorf("update statements must have at least one Set clause") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("UPDATE ") + sql.WriteString(d.Table) + + sql.WriteString(" SET ") + setSqls := make([]string, len(d.SetClauses)) + for i, setClause := range d.SetClauses { + var valSql string + if vs, ok := setClause.value.(Sqlizer); ok { + vsql, vargs, err := vs.ToSql() + if err != nil { + return "", nil, err + } + if _, ok := vs.(SelectBuilder); ok { + valSql = fmt.Sprintf("(%s)", vsql) + } else { + valSql = vsql + } + args = append(args, vargs...) + } else { + valSql = "?" + args = append(args, setClause.value) + } + setSqls[i] = fmt.Sprintf("%s = %s", setClause.column, valSql) + } + sql.WriteString(strings.Join(setSqls, ", ")) + + if d.From != nil { + sql.WriteString(" FROM ") + args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) + if err != nil { + return + } + } + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// UpdateBuilder builds SQL UPDATE statements. +type UpdateBuilder builder.Builder + +func init() { + builder.Register(UpdateBuilder{}, updateData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b UpdateBuilder) PlaceholderFormat(f PlaceholderFormat) UpdateBuilder { + return builder.Set(b, "PlaceholderFormat", f).(UpdateBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b UpdateBuilder) RunWith(runner BaseRunner) UpdateBuilder { + return setRunWith(b, runner).(UpdateBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b UpdateBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(updateData) + return data.Exec() +} + +func (b UpdateBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(updateData) + return data.Query() +} + +func (b UpdateBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(updateData) + return data.QueryRow() +} + +func (b UpdateBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b UpdateBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(updateData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b UpdateBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b UpdateBuilder) Prefix(sql string, args ...interface{}) UpdateBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b UpdateBuilder) PrefixExpr(expr Sqlizer) UpdateBuilder { + return builder.Append(b, "Prefixes", expr).(UpdateBuilder) +} + +// Table sets the table to be updated. +func (b UpdateBuilder) Table(table string) UpdateBuilder { + return builder.Set(b, "Table", table).(UpdateBuilder) +} + +// Set adds SET clauses to the query. +func (b UpdateBuilder) Set(column string, value interface{}) UpdateBuilder { + return builder.Append(b, "SetClauses", setClause{column: column, value: value}).(UpdateBuilder) +} + +// SetMap is a convenience method which calls .Set for each key/value pair in clauses. +func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateBuilder { + keys := make([]string, len(clauses)) + i := 0 + for key := range clauses { + keys[i] = key + i++ + } + sort.Strings(keys) + for _, key := range keys { + val, _ := clauses[key] + b = b.Set(key, val) + } + return b +} + +// From adds FROM clause to the query +// FROM is valid construct in postgresql only. +func (b UpdateBuilder) From(from string) UpdateBuilder { + return builder.Set(b, "From", newPart(from)).(UpdateBuilder) +} + +// FromSelect sets a subquery into the FROM clause of the query. +func (b UpdateBuilder) FromSelect(from SelectBuilder, alias string) UpdateBuilder { + // Prevent misnumbered parameters in nested selects (#183). + from = from.PlaceholderFormat(Question) + return builder.Set(b, "From", Alias(from, alias)).(UpdateBuilder) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b UpdateBuilder) Where(pred interface{}, args ...interface{}) UpdateBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(UpdateBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b UpdateBuilder) OrderBy(orderBys ...string) UpdateBuilder { + return builder.Extend(b, "OrderBys", orderBys).(UpdateBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b UpdateBuilder) Limit(limit uint64) UpdateBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(UpdateBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b UpdateBuilder) Offset(offset uint64) UpdateBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(UpdateBuilder) +} + +// Suffix adds an expression to the end of the query +func (b UpdateBuilder) Suffix(sql string, args ...interface{}) UpdateBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b UpdateBuilder) SuffixExpr(expr Sqlizer) UpdateBuilder { + return builder.Append(b, "Suffixes", expr).(UpdateBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/update_ctx.go b/vendor/github.com/Masterminds/squirrel/update_ctx.go new file mode 100644 index 000000000..ad479f96f --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/update_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *updateData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *updateData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *updateData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(updateData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(updateData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(updateData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b UpdateBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/where.go b/vendor/github.com/Masterminds/squirrel/where.go new file mode 100644 index 000000000..976b63ace --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/where.go @@ -0,0 +1,30 @@ +package squirrel + +import ( + "fmt" +) + +type wherePart part + +func newWherePart(pred interface{}, args ...interface{}) Sqlizer { + return &wherePart{pred: pred, args: args} +} + +func (p wherePart) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case rawSqlizer: + return pred.toSqlRaw() + case Sqlizer: + return pred.ToSql() + case map[string]interface{}: + return Eq(pred).ToSql() + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string-keyed map or string, not %T", pred) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 000000000..49d21669a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 000000000..6c435d2b6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,59 @@ +package osversion + +import ( + "fmt" + "sync" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +var ( + osv OSVersion + once sync.Once +) + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + once.Do(func() { + var err error + osv = OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + }) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +// String returns the OSVersion formatted as a string. It implements the +// [fmt.Stringer] interface. +func (osv OSVersion) String() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + +// ToString returns the OSVersion formatted as a string. +// +// Deprecated: use [OSVersion.String]. +func (osv OSVersion) ToString() string { + return osv.String() +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go new file mode 100644 index 000000000..f8d411ad7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go @@ -0,0 +1,35 @@ +package osversion + +// List of stable ABI compliant ltsc releases +// Note: List must be sorted in ascending order +var compatLTSCReleases = []uint16{ + V21H2Server, +} + +// CheckHostAndContainerCompat checks if given host and container +// OS versions are compatible. +// It includes support for stable ABI compliant versions as well. +// Every release after WS 2022 will support the previous ltsc +// container image. Stable ABI is in preview mode for windows 11 client. +// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility +func CheckHostAndContainerCompat(host, ctr OSVersion) bool { + // check major minor versions of host and guest + if host.MajorVersion != ctr.MajorVersion || + host.MinorVersion != ctr.MinorVersion { + return false + } + + // If host is < WS 2022, exact version match is required + if host.Build < V21H2Server { + return host.Build == ctr.Build + } + + var supportedLtscRelease uint16 + for i := len(compatLTSCReleases) - 1; i >= 0; i-- { + if host.Build >= compatLTSCReleases[i] { + supportedLtscRelease = compatLTSCReleases[i] + break + } + } + return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 000000000..446369591 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,84 @@ +package osversion + +// Windows Client and Server build numbers. +// +// See: +// https://learn.microsoft.com/en-us/windows/release-health/release-information +// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info +// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + // V1607 (version 1607, codename "Redstone 1") is an alias for [RS1]. + V1607 = RS1 + // LTSC2016 (Windows Server 2016) is an alias for [RS1]. + LTSC2016 = RS1 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + // V1703 (version 1703, codename "Redstone 2") is an alias for [RS2]. + V1703 = RS2 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + // V1709 (version 1709, codename "Redstone 3") is an alias for [RS3]. + V1709 = RS3 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + // V1803 (version 1803, codename "Redstone 4") is an alias for [RS4]. + V1803 = RS4 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 + // V1809 (version 1809, codename "Redstone 5") is an alias for [RS5]. + V1809 = RS5 + // LTSC2019 (Windows Server 2019) is an alias for [RS5]. + LTSC2019 = RS5 + + // V19H1 (version 1903, codename 19H1) corresponds to Windows Server 1903 (semi-annual + // channel). + V19H1 = 18362 + // V1903 (version 1903) is an alias for [V19H1]. + V1903 = V19H1 + + // V19H2 (version 1909, codename 19H2) corresponds to Windows Server 1909 (semi-annual + // channel). + V19H2 = 18363 + // V1909 (version 1909) is an alias for [V19H2]. + V1909 = V19H2 + + // V20H1 (version 2004, codename 20H1) corresponds to Windows Server 2004 (semi-annual + // channel). + V20H1 = 19041 + // V2004 (version 2004) is an alias for [V20H1]. + V2004 = V20H1 + + // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). + V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + // LTSC2022 (Windows Server 2022) is an alias for [V21H2Server] + LTSC2022 = V21H2Server + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 + + // V22H2Win10 corresponds to Windows 10 (2022 Update). + V22H2Win10 = 19045 + + // V22H2Win11 corresponds to Windows 11 (2022 Update). + V22H2Win11 = 22621 +) diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore new file mode 100644 index 000000000..8d69a9418 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.gitignore @@ -0,0 +1,15 @@ +bin/ +.idea/ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 000000000..17c4d0a71 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,18 @@ +dist: bionic +language: go +env: GO111MODULE=on GOFLAGS='-mod vendor' +install: true +email: false + +go: + - 1.10 + - 1.11 + - 1.12 + - 1.13 + - tip + +before_script: + - go install github.com/golangci/golangci-lint/cmd/golangci-lint +script: + - golangci-lint run # run a bunch of code checkers/linters in parallel + - go test -v -race ./... # Run all the tests with the race detector enabled diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 000000000..7ed268a1e --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 000000000..2f9a31fad --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 000000000..78f999e83 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,616 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v10 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { + // ... +} + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { + // ... +}) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func HasLowerCase(str string) bool +func HasUpperCase(str string) bool +func HasWhitespace(str string) bool +func HasWhitespaceOnly(str string) bool +func InRange(value interface{}, left interface{}, right interface{}) bool +func InRangeFloat32(value, left, right float32) bool +func InRangeFloat64(value, left, right float64) bool +func InRangeInt(value, left, right interface{}) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCRC32(str string) bool +func IsCRC32b(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsExistingEmail(email string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHash(str string, algorithm string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO4217(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsIn(str string, params ...string) bool +func IsInRaw(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMD4(str string) bool +func IsMD5(str string) bool +func IsMagnetURI(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNotNull(str string) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsRipeMD128(str string) bool +func IsRipeMD160(str string) bool +func IsRsaPub(str string, params ...string) bool +func IsRsaPublicKey(str string, keylen int) bool +func IsSHA1(str string) bool +func IsSHA256(str string) bool +func IsSHA384(str string) bool +func IsSHA512(str string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTiger128(str string) bool +func IsTiger160(str string) bool +func IsTiger192(str string) bool +func IsTime(str string, format string) bool +func IsType(v interface{}, params ...string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsUnixTime(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func MaxStringLength(str string, params ...string) bool +func MinStringLength(str string, params ...string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func PrependPathToErrors(err error, path string) error +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func SetNilPtrAllowedByRequired(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(value interface{}) (res int64, err error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func TruncatingErrorf(str string, args ...interface{}) error +func UnderscoreToCamelCase(s string) string +func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type ISO693Entry +type InterfaceParamValidator +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### IsType +```go +println(govalidator.IsType("Bob", "string")) +println(govalidator.IsType(1, "int")) +i := 1 +println(govalidator.IsType(&i, "*int")) +``` + +IsType can be used through the tag `type` which is essential for map validation: +```go +type User struct { + Name string `valid:"type(string)"` + Age int `valid:"type(int)"` + Meta interface{} `valid:"type(string)"` +} +result, err := govalidator.ValidateStruct(user{"Bob", 20, "meta"}) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"stringlength(min|max)": StringLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +"rsapub(keylength)" : IsRsaPub, +``` +Validators with parameters for any type + +```go +"type(type)": IsType, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + Message2 string `valid:"animal(dog)"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + Message2: "dog", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +// Add your own struct validation tags with parameter +govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { + species := params[0] + return str == species +}) +govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) +If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` + +So here is small example of usage: +```go +var mapTemplate = map[string]interface{}{ + "name":"required,alpha", + "family":"required,alpha", + "email":"required,email", + "cell-phone":"numeric", + "address":map[string]interface{}{ + "line1":"required,alphanum", + "line2":"alphanum", + "postal-code":"numeric", + }, +} + +var inputMap = map[string]interface{}{ + "name":"Bob", + "family":"Smith", + "email":"foo@bar.baz", + "address":map[string]interface{}{ + "line1":"", + "line2":"", + "postal-code":"", + }, +} + +result, err := govalidator.ValidateMap(inputMap, mapTemplate) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` + +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +}) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +}) +``` + +###### Loop over Error() +By default .Error() returns all errors in a single String. To access each error you can do this: +```go + if err != nil { + errs := err.(govalidator.Errors).Errors() + for _, e := range errs { + fmt.Println(e.Error()) + } + } +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙠[[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 000000000..5bace2654 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,58 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 000000000..cf1e5d569 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,64 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return string(res) +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(str string) (float64, error) { + res, err := strconv.ParseFloat(str, 64) + if err != nil { + res = 0.0 + } + return res, err +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + default: + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go new file mode 100644 index 000000000..55dce62dc --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/doc.go @@ -0,0 +1,3 @@ +package govalidator + +// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 000000000..1da2336f4 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,47 @@ +package govalidator + +import ( + "sort" + "strings" +) + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + sort.Strings(errs) + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 000000000..7e6c652e1 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,97 @@ +package govalidator + +import ( + "math" + "reflect" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRange returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRange returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type +func InRange(value interface{}, left interface{}, right interface{}) bool { + + reflectValue := reflect.TypeOf(value).Kind() + reflectLeft := reflect.TypeOf(left).Kind() + reflectRight := reflect.TypeOf(right).Kind() + + if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { + return InRangeInt(value.(int), left.(int), right.(int)) + } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { + return InRangeFloat32(value.(float32), left.(float32), right.(float32)) + } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { + return InRangeFloat64(value.(float64), left.(float64), right.(float64)) + } else { + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 000000000..e55451cff --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,105 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" + IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxMagnetURI = regexp.MustCompile(MagnetURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) + rxIMEI = regexp.MustCompile(IMEI) +) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 000000000..b57b666f5 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,653 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accepts additional parameters. +type ParamValidator func(str string, params ...string) bool +type InterfaceParamValidator func(in interface{}, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value +var InterfaceParamTagMap = map[string]InterfaceParamValidator{ + "type": IsType, +} + +// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. +var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ + "type": regexp.MustCompile(`^type\((.*)\)$`), +} + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": IsInRaw, + "rsapub": IsRsaPub, + "minstringlength": MinStringLength, + "maxstringlength": MaxStringLength, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), + "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), + "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "notnull": IsNotNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, + "IMEI": IsIMEI, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'ÃŽle)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (ÃŽles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les ÃŽles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les ÃŽles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'ÃŽle)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les ÃŽles)/ Keeling (les ÃŽles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les ÃŽles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les ÃŽles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les ÃŽles)/Malouines (les ÃŽles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les ÃŽles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Ã…land Islands", "Ã…land(les ÃŽles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-ÃŽles MacDonald (l'ÃŽle)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'ÃŽle)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les ÃŽles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "ÃŽles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (ÃŽles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'ÃŽle Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les ÃŽles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "ÃŽle de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les ÃŽles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", + "VEF", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "BokmÃ¥l, Norwegian; Norwegian BokmÃ¥l"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 000000000..f4c30f824 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains checks if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches checks if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trims characters from the left side of the input. +// If second argument is empty, it will remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trims characters from the right side of the input. +// If second argument is empty, it will remove trailing spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trims characters from both sides of the input. +// If second argument is empty, it will remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList removes characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList removes characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replaces regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replaces <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse returns reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines splits string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine returns specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags removes all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName returns safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pads left side of a string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pads right side of a string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pads both sides of a string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides. +// Note that padding string can be unicode and more then one character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 000000000..298f9920d --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1530 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const RF3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail check if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail check if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL check if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL check if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI check if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter check if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric check if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric check if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman â…¨ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal check if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor check if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase check if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase check if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase check if the string contains as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt check if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat check if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy check if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull check if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// IsNotNull check if the string is not null. +func IsNotNull(str string) bool { + return !IsNull(str) +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength check if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 check if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 check if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 check if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID check if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// IsCreditCard check if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := notNumberRegexp.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + var sum int64 + var digit string + var tmpNum int64 + var shouldDouble bool + for i := len(sanitized) - 1; i >= 0; i-- { + digit = sanitized[i:(i + 1)] + tmpNum, _ = ToInt(digit) + if shouldDouble { + tmpNum *= 2 + if tmpNum >= 10 { + sum += ((tmpNum % 10) + 1) + } else { + sum += tmpNum + } + } else { + sum += tmpNum + } + shouldDouble = !shouldDouble + } + + return sum%10 == 0 +} + +// IsISBN10 check if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 check if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN check if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be check both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON check if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII check if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth check if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth check if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 check if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath check is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsMagnetURI checks if a string is valid magnet URI +func IsMagnetURI(str string) bool { + return rxMagnetURI.MatchString(str) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + var len string + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha256" { + len = "64" + } else if algo == "sha384" { + len = "96" + } else if algo == "sha512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` +func IsSHA512(str string) bool { + return IsHash(str, "sha512") +} + +// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` +func IsSHA384(str string) bool { + return IsHash(str, "sha384") +} + +// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` +func IsSHA256(str string) bool { + return IsHash(str, "sha256") +} + +// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` +func IsTiger192(str string) bool { + return IsHash(str, "tiger192") +} + +// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` +func IsTiger160(str string) bool { + return IsHash(str, "tiger160") +} + +// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` +func IsRipeMD160(str string) bool { + return IsHash(str, "ripemd160") +} + +// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` +func IsSHA1(str string) bool { + return IsHash(str, "sha1") +} + +// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` +func IsTiger128(str string) bool { + return IsHash(str, "tiger128") +} + +// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` +func IsRipeMD128(str string) bool { + return IsHash(str, "ripemd128") +} + +// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` +func IsCRC32(str string) bool { + return IsHash(str, "crc32") +} + +// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` +func IsCRC32b(str string) bool { + return IsHash(str, "crc32b") +} + +// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` +func IsMD5(str string) bool { + return IsHash(str, "md5") +} + +// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` +func IsMD4(str string) bool { + return IsHash(str, "md4") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 check if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 check if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC check if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude check if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude check if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsIMEI check if a string is valid IMEI +func IsIMEI(str string) bool { + return rxIMEI.MatchString(str) +} + +// IsRsaPublicKey check if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func PrependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = PrependPathToErrors(err3, path) + } + return err2 + } + return err +} + +// ValidateMap use validation map for fields. +// result will be equal to `false` if there are any errors. +// s is the map containing the data to be validated. +// m is the validation map in the form: +// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} +func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + var errs Errors + var index int + val := reflect.ValueOf(s) + for key, value := range s { + presentResult := true + validator, ok := m[key] + if !ok { + presentResult = false + var err error + err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + valueField := reflect.ValueOf(value) + mapResult := true + typeResult := true + structResult := true + resultField := true + switch subValidator := validator.(type) { + case map[string]interface{}: + var err error + if v, ok := value.(map[string]interface{}); !ok { + mapResult = false + err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } else { + mapResult, err = ValidateMap(v, subValidator) + if err != nil { + mapResult = false + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + } + case string: + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + subValidator != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + } + resultField, err = typeCheck(valueField, reflect.StructField{ + Name: key, + PkgPath: "", + Type: val.Type(), + Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), + Offset: 0, + Index: []int{index}, + Anonymous: false, + }, val, nil) + if err != nil { + errs = append(errs, err) + } + case nil: + // already handlerd when checked before + default: + typeResult = false + err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) + err = PrependPathToErrors(err, key) + errs = append(errs, err) + } + result = result && presentResult && typeResult && resultField && structResult && mapResult + index++ + } + // check required keys + requiredResult := true + for key, value := range m { + if schema, ok := value.(string); ok { + tags := parseTagIntoMap(schema) + if required, ok := tags["required"]; ok { + if _, ok := s[key]; !ok { + requiredResult = false + if required.customErrorMessage != "" { + err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} + } else { + err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} + } + errs = append(errs, err) + } + } + } + } + + if len(errs) > 0 { + err = errs + } + return result && requiredResult, err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver check if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsType check if interface is of some type +func IsType(v interface{}, params ...string) bool { + if len(params) == 1 { + typ := params[0] + return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) + } + return false +} + +// IsTime check if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsUnixTime check if string is valid unix timestamp value +func IsUnixTime(str string) bool { + if _, err := strconv.Atoi(str); err == nil { + return true + } + return false +} + +// IsRFC3339 check if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, RF3339WithoutZone) +} + +// IsISO4217 check if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength check string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength check string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub check whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength check string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// MinStringLength check string's minimum length (including multi byte strings) +func MinStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + return strLength >= int(min) + } + + return false +} + +// MaxStringLength check string's maximum length (including multi byte strings) +func MaxStringLength(str string, params ...string) bool { + + if len(params) == 1 { + strLength := utf8.RuneCountInString(str) + max, _ := ToInt(params[0]) + return strLength <= int(max) + } + + return false +} + +// Range check string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +func IsInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn check if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // Check if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if !isFieldSet(v) { + // an empty value is not validated, check only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for interface param validators + for key, value := range InterfaceParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := InterfaceParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + field := fmt.Sprint(v) + if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + } + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option check the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then check its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return true, nil + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +// isFieldSet returns false for nil pointers, interfaces, maps, and slices. For all other values, it returns true. +func isFieldSet(v reflect.Value) bool { + switch v.Kind() { + case reflect.Map, reflect.Slice, reflect.Interface, reflect.Ptr: + return !v.IsNil() + } + + return true +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e := e.(type) { + case Error: + m[e.Name] = e.Err.Error() + case Errors: + for _, item := range e.Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 000000000..bc5f7b086 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race -v ./... diff --git a/vendor/github.com/chai2010/gettext-go/.travis.yml b/vendor/github.com/chai2010/gettext-go/.travis.yml new file mode 100644 index 000000000..4eac3982b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - "1.14" + - tip diff --git a/vendor/github.com/chai2010/gettext-go/LICENSE b/vendor/github.com/chai2010/gettext-go/LICENSE new file mode 100644 index 000000000..8f3940825 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2013 ChaiShushan . All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/chai2010/gettext-go/README.md b/vendor/github.com/chai2010/gettext-go/README.md new file mode 100644 index 000000000..9381bd152 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/README.md @@ -0,0 +1,191 @@ +- *赞助 BTC: 1Cbd6oGAUUyBi7X7MaR4np4nTmQZXVgkCW* +- *赞助 ETH: 0x623A3C3a72186A6336C79b18Ac1eD36e1c71A8a6* +- *Go语言付费QQ群: 1055927514* + +---- + +# gettext-go: GNU gettext for Go ([Imported By Kubernetes](https://pkg.go.dev/github.com/chai2010/gettext-go@v0.1.0/gettext?tab=importedby)) + +- PkgDoc: [http://godoc.org/github.com/chai2010/gettext-go](http://godoc.org/github.com/chai2010/gettext-go) +- PkgDoc: [http://pkg.go.dev/github.com/chai2010/gettext-go](http://pkg.go.dev/github.com/chai2010/gettext-go) + +## Install + +1. `go get github.com/chai2010/gettext-go` +2. `go run hello.go` + +The godoc.org or go.dev has more information. + +## Examples + +```Go +package main + +import ( + "fmt" + + "github.com/chai2010/gettext-go" +) + +func main() { + gettext := gettext.New("hello", "./examples/locale").SetLanguage("zh_CN") + fmt.Println(gettext.Gettext("Hello, world!")) + + // Output: 你好, 世界! +} +``` + +```Go +package main + +import ( + "fmt" + + "github.com/chai2010/gettext-go" +) + +func main() { + gettext.SetLanguage("zh_CN") + gettext.BindLocale(gettext.New("hello", "locale")) + + // gettext.BindLocale("hello", "locale") // from locale dir + // gettext.BindLocale("hello", "locale.zip") // from locale zip file + // gettext.BindLocale("hello", "locale.zip", zipData) // from embedded zip data + + // translate source text + fmt.Println(gettext.Gettext("Hello, world!")) + // Output: 你好, 世界! + + // if no msgctxt in PO file (only msgid and msgstr), + // specify context as "" by + fmt.Println(gettext.PGettext("", "Hello, world!")) + // Output: 你好, 世界! + + // translate resource + fmt.Println(string(gettext.Getdata("poems.txt")))) + // Output: ... +} +``` + +Go file: [hello.go](https://github.com/chai2010/gettext-go/blob/master/examples/hello.go); PO file: [hello.po](https://github.com/chai2010/gettext-go/blob/master/examples/locale/default/LC_MESSAGES/hello.po); + +---- + +## API Changes (v0.1.0 vs v1.0.0) + +### Renamed package path + +| v0.1.0 (old) | v1.0.0 (new) | +| ----------------------------------------------- | --------------------------------------- | +| `github.com/chai2010/gettext-go/gettext` | `github.com/chai2010/gettext-go` | +| `github.com/chai2010/gettext-go/gettext/po` | `github.com/chai2010/gettext-go/po` | +| `github.com/chai2010/gettext-go/gettext/mo` | `github.com/chai2010/gettext-go/mo` | +| `github.com/chai2010/gettext-go/gettext/plural` | `github.com/chai2010/gettext-go/plural` | + +### Renamed functions + +| v0.1.0 (old) | v1.0.0 (new) | +| ---------------------------------- | --------------------------- | +| `gettext-go/gettext.*` | `gettext-go.*` | +| `gettext-go/gettext.DefaultLocal` | `gettext-go.DefaultLanguage`| +| `gettext-go/gettext.BindTextdomain`| `gettext-go.BindLocale` | +| `gettext-go/gettext.Textdomain` | `gettext-go.SetDomain` | +| `gettext-go/gettext.SetLocale` | `gettext-go.SetLanguage` | +| `gettext-go/gettext/po.Load` | `gettext-go/po.LoadFile` | +| `gettext-go/gettext/po.LoadData` | `gettext-go/po.Load` | +| `gettext-go/gettext/mo.Load` | `gettext-go/mo.LoadFile` | +| `gettext-go/gettext/mo.LoadData` | `gettext-go/mo.Load` | + +### Use empty string as the default context for `gettext.Gettext` + +```go +package main + +// v0.1.0 +// if the **context** missing, use `callerName(2)` as the context: + +// v1.0.0 +// if the **context** missing, use empty string as the context: + +func main() { + gettext.Gettext("hello") + // v0.1.0 => gettext.PGettext("main.main", "hello") + // v1.0.0 => gettext.PGettext("", "hello") + + gettext.DGettext("domain", "hello") + // v0.1.0 => gettext.DPGettext("domain", "main.main", "hello") + // v1.0.0 => gettext.DPGettext("domain", "", "hello") + + gettext.NGettext("domain", "hello", "hello2", n) + // v0.1.0 => gettext.PNGettext("domain", "main.main", "hello", "hello2", n) + // v1.0.0 => gettext.PNGettext("domain", "", "hello", "hello2", n) + + gettext.DNGettext("domain", "hello", "hello2", n) + // v0.1.0 => gettext.DPNGettext("domain", "main.main", "hello", "hello2", n) + // v1.0.0 => gettext.DPNGettext("domain", "", "hello", "hello2", n) +} +``` + +### `BindLocale` support `FileSystem` interface + +```go +// Use FileSystem: +// BindLocale(New("poedit", "name", OS("path/to/dir"))) // bind "poedit" domain +// BindLocale(New("poedit", "name", OS("path/to.zip"))) // bind "poedit" domain +``` + +## New API in v1.0.0 + +`Gettexter` interface: + +```go +type Gettexter interface { + FileSystem() FileSystem + + GetDomain() string + SetDomain(domain string) Gettexter + + GetLanguage() string + SetLanguage(lang string) Gettexter + + Gettext(msgid string) string + PGettext(msgctxt, msgid string) string + + NGettext(msgid, msgidPlural string, n int) string + PNGettext(msgctxt, msgid, msgidPlural string, n int) string + + DGettext(domain, msgid string) string + DPGettext(domain, msgctxt, msgid string) string + DNGettext(domain, msgid, msgidPlural string, n int) string + DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string + + Getdata(name string) []byte + DGetdata(domain, name string) []byte +} + +func New(domain, path string, data ...interface{}) Gettexter +``` + +`FileSystem` interface: + +```go +type FileSystem interface { + LocaleList() []string + LoadMessagesFile(domain, lang, ext string) ([]byte, error) + LoadResourceFile(domain, lang, name string) ([]byte, error) + String() string +} + +func NewFS(name string, x interface{}) FileSystem +func OS(root string) FileSystem +func ZipFS(r *zip.Reader, name string) FileSystem +func NilFS(name string) FileSystem +``` + +---- + +## BUGS + +Please report bugs to . + +Thanks! diff --git a/vendor/github.com/chai2010/gettext-go/doc.go b/vendor/github.com/chai2010/gettext-go/doc.go new file mode 100644 index 000000000..50dfea330 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/doc.go @@ -0,0 +1,67 @@ +// Copyright 2013 . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gettext implements a basic GNU's gettext library. + +Example: + import ( + "github.com/chai2010/gettext-go" + ) + + func main() { + gettext.SetLanguage("zh_CN") + + // gettext.BindLocale(gettext.New("hello", "locale")) // from locale dir + // gettext.BindLocale(gettext.New("hello", "locale.zip")) // from locale zip file + // gettext.BindLocale(gettext.New("hello", "locale.zip", zipData)) // from embedded zip data + + gettext.BindLocale(gettext.New("hello", "locale")) + + // translate source text + fmt.Println(gettext.Gettext("Hello, world!")) + // Output: 你好, 世界! + + // translate resource + fmt.Println(string(gettext.Getdata("poems.txt"))) + // Output: ... + } + +Translate directory struct("./examples/locale.zip"): + + Root: "path" or "file.zip/zipBaseName" + +-default # locale: $(LC_MESSAGES) or $(LANG) or "default" + | +-LC_MESSAGES # just for `gettext.Gettext` + | | +-hello.mo # $(Root)/$(lang)/LC_MESSAGES/$(domain).mo + | | +-hello.po # $(Root)/$(lang)/LC_MESSAGES/$(domain).po + | | \-hello.json # $(Root)/$(lang)/LC_MESSAGES/$(domain).json + | | + | \-LC_RESOURCE # just for `gettext.Getdata` + | +-hello # domain map a dir in resource translate + | +-favicon.ico # $(Root)/$(lang)/LC_RESOURCE/$(domain)/$(filename) + | \-poems.txt + | + \-zh_CN # simple chinese translate + +-LC_MESSAGES + | +-hello.po # try "$(domain).po" first + | +-hello.mo # try "$(domain).mo" second + | \-hello.json # try "$(domain).json" third + | + \-LC_RESOURCE + +-hello + +-favicon.ico # $(lang)/$(domain)/favicon.ico + \-poems.txt # $(lang)/$(domain)/poems.txt + +See: + http://en.wikipedia.org/wiki/Gettext + http://www.gnu.org/software/gettext/manual/html_node + http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html + http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html + http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html + http://www.poedit.net/ + +Please report bugs to . +Thanks! +*/ +package gettext diff --git a/vendor/github.com/chai2010/gettext-go/fs.go b/vendor/github.com/chai2010/gettext-go/fs.go new file mode 100644 index 000000000..4e66fae7c --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/fs.go @@ -0,0 +1,84 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "archive/zip" + "bytes" + "fmt" +) + +type FileSystem interface { + LocaleList() []string + LoadMessagesFile(domain, lang, ext string) ([]byte, error) + LoadResourceFile(domain, lang, name string) ([]byte, error) + String() string +} + +func NewFS(name string, x interface{}) FileSystem { + if x == nil { + if name != "" { + return OS(name) + } + return NilFS(name) + } + + switch x := x.(type) { + case []byte: + if len(x) == 0 { + return OS(name) + } + if r, err := zip.NewReader(bytes.NewReader(x), int64(len(x))); err == nil { + return ZipFS(r, name) + } + if fs, err := newJson(x, name); err == nil { + return fs + } + case string: + if len(x) == 0 { + return OS(name) + } + if r, err := zip.NewReader(bytes.NewReader([]byte(x)), int64(len(x))); err == nil { + return ZipFS(r, name) + } + if fs, err := newJson([]byte(x), name); err == nil { + return fs + } + case FileSystem: + return x + } + + return NilFS(name) +} + +func OS(root string) FileSystem { + return newOsFS(root) +} + +func ZipFS(r *zip.Reader, name string) FileSystem { + return newZipFS(r, name) +} + +func NilFS(name string) FileSystem { + return &nilFS{name} +} + +type nilFS struct { + name string +} + +func (p *nilFS) LocaleList() []string { + return nil +} + +func (p *nilFS) LoadMessagesFile(domain, lang, ext string) ([]byte, error) { + return nil, fmt.Errorf("not found") +} +func (p *nilFS) LoadResourceFile(domain, lang, name string) ([]byte, error) { + return nil, fmt.Errorf("not found") +} +func (p *nilFS) String() string { + return "gettext.nilfs(" + p.name + ")" +} diff --git a/vendor/github.com/chai2010/gettext-go/fs_json.go b/vendor/github.com/chai2010/gettext-go/fs_json.go new file mode 100644 index 000000000..c7138c995 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/fs_json.go @@ -0,0 +1,66 @@ +// Copyright 2020 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "encoding/json" + "fmt" + "sort" +) + +type jsonFS struct { + name string + x map[string]struct { + LC_MESSAGES map[string][]struct { + MsgContext string `json:"msgctxt"` // msgctxt context + MsgId string `json:"msgid"` // msgid untranslated-string + MsgIdPlural string `json:"msgid_plural"` // msgid_plural untranslated-string-plural + MsgStr []string `json:"msgstr"` // msgstr translated-string + } + LC_RESOURCE map[string]map[string]string + } +} + +func isJsonData() bool { + return false +} + +func newJson(jsonData []byte, name string) (*jsonFS, error) { + p := &jsonFS{name: name} + if err := json.Unmarshal(jsonData, &p.x); err != nil { + return nil, err + } + + return p, nil +} + +func (p *jsonFS) LocaleList() []string { + var ss []string + for lang := range p.x { + ss = append(ss, lang) + } + sort.Strings(ss) + return ss +} + +func (p *jsonFS) LoadMessagesFile(domain, lang, ext string) ([]byte, error) { + if v, ok := p.x[lang]; ok { + if v, ok := v.LC_MESSAGES[domain+ext]; ok { + return json.Marshal(v) + } + } + return nil, fmt.Errorf("not found") +} +func (p *jsonFS) LoadResourceFile(domain, lang, name string) ([]byte, error) { + if v, ok := p.x[lang]; ok { + if v, ok := v.LC_RESOURCE[domain]; ok { + return []byte(v[name]), nil + } + } + return nil, fmt.Errorf("not found") +} +func (p *jsonFS) String() string { + return "gettext.nilfs(" + p.name + ")" +} diff --git a/vendor/github.com/chai2010/gettext-go/fs_os.go b/vendor/github.com/chai2010/gettext-go/fs_os.go new file mode 100644 index 000000000..80d4f51ba --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/fs_os.go @@ -0,0 +1,91 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "archive/zip" + "bytes" + "fmt" + "io/ioutil" + "os" + "sort" + "strings" +) + +type osFS struct { + root string +} + +func newOsFS(root string) FileSystem { + // locale zip file + if fi, err := os.Stat(root); err == nil && !fi.IsDir() { + if strings.HasSuffix(strings.ToLower(root), ".zip") { + if x, err := ioutil.ReadFile(root); err == nil { + if r, err := zip.NewReader(bytes.NewReader(x), int64(len(x))); err == nil { + return ZipFS(r, root) + } + } + } + if strings.HasSuffix(strings.ToLower(root), ".json") { + if x, err := ioutil.ReadFile(root); err == nil { + if fs, err := newJson(x, root); err == nil { + return fs + } + } + } + } + + // locale dir + return &osFS{root: root} +} + +func (p *osFS) LocaleList() []string { + list, err := ioutil.ReadDir(p.root) + if err != nil { + return nil + } + ssMap := make(map[string]bool) + for _, dir := range list { + if dir.IsDir() { + ssMap[dir.Name()] = true + } + } + var locales = make([]string, 0, len(ssMap)) + for s := range ssMap { + locales = append(locales, s) + } + sort.Strings(locales) + return locales +} + +func (p *osFS) LoadMessagesFile(domain, locale, ext string) ([]byte, error) { + trName := p.makeMessagesFileName(domain, locale, ext) + rcData, err := ioutil.ReadFile(trName) + if err != nil { + return nil, err + } + return rcData, nil +} + +func (p *osFS) LoadResourceFile(domain, locale, name string) ([]byte, error) { + rcName := p.makeResourceFileName(domain, locale, name) + rcData, err := ioutil.ReadFile(rcName) + if err != nil { + return nil, err + } + return rcData, nil +} + +func (p *osFS) String() string { + return "gettext.localfs(" + p.root + ")" +} + +func (p *osFS) makeMessagesFileName(domain, lang, ext string) string { + return fmt.Sprintf("%s/%s/LC_MESSAGES/%s%s", p.root, lang, domain, ext) +} + +func (p *osFS) makeResourceFileName(domain, lang, name string) string { + return fmt.Sprintf("%s/%s/LC_RESOURCE/%s/%s", p.root, lang, domain, name) +} diff --git a/vendor/github.com/chai2010/gettext-go/fs_zip.go b/vendor/github.com/chai2010/gettext-go/fs_zip.go new file mode 100644 index 000000000..61eb8359d --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/fs_zip.go @@ -0,0 +1,142 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "archive/zip" + "fmt" + "io/ioutil" + "sort" + "strings" +) + +type zipFS struct { + root string + name string + r *zip.Reader +} + +func newZipFS(r *zip.Reader, name string) *zipFS { + fs := &zipFS{r: r, name: name} + fs.root = fs.zipRoot() + return fs +} + +func (p *zipFS) zipName() string { + name := p.name + if x := strings.LastIndexAny(name, `\/`); x != -1 { + name = name[x+1:] + } + name = strings.TrimSuffix(name, ".zip") + return name +} + +func (p *zipFS) zipRoot() string { + var somepath string + for _, f := range p.r.File { + if x := strings.Index(f.Name, "LC_MESSAGES"); x != -1 { + somepath = f.Name + } + if x := strings.Index(f.Name, "LC_RESOURCE"); x != -1 { + somepath = f.Name + } + } + if somepath == "" { + return p.zipName() + } + + ss := strings.Split(somepath, "/") + for i, s := range ss { + // $(root)/$(lang)/LC_MESSAGES + // $(root)/$(lang)/LC_RESOURCE + if (s == "LC_MESSAGES" || s == "LC_RESOURCE") && i >= 2 { + return strings.Join(ss[:i-1], "/") + } + } + + return p.zipName() +} + +func (p *zipFS) LocaleList() []string { + var locals []string + for s := range p.lsZip(p.r) { + locals = append(locals, s) + } + sort.Strings(locals) + return locals +} + +func (p *zipFS) LoadMessagesFile(domain, lang, ext string) ([]byte, error) { + trName := p.makeMessagesFileName(domain, lang, ext) + for _, f := range p.r.File { + if f.Name != trName { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + rcData, err := ioutil.ReadAll(rc) + rc.Close() + return rcData, err + } + return nil, fmt.Errorf("not found") +} + +func (p *zipFS) LoadResourceFile(domain, lang, name string) ([]byte, error) { + rcName := p.makeResourceFileName(domain, lang, name) + for _, f := range p.r.File { + if f.Name != rcName { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + rcData, err := ioutil.ReadAll(rc) + rc.Close() + return rcData, err + } + return nil, fmt.Errorf("not found") +} + +func (p *zipFS) String() string { + return "gettext.zipfs(" + p.name + ")" +} + +func (p *zipFS) makeMessagesFileName(domain, lang, ext string) string { + return fmt.Sprintf("%s/%s/LC_MESSAGES/%s%s", p.root, lang, domain, ext) +} + +func (p *zipFS) makeResourceFileName(domain, lang, name string) string { + return fmt.Sprintf("%s/%s/LC_RESOURCE/%s/%s", p.root, lang, domain, name) +} + +func (p *zipFS) lsZip(r *zip.Reader) map[string]bool { + ssMap := make(map[string]bool) + for _, f := range r.File { + if x := strings.Index(f.Name, "LC_MESSAGES"); x != -1 { + s := strings.TrimRight(f.Name[:x], `\/`) + if x = strings.LastIndexAny(s, `\/`); x != -1 { + s = s[x+1:] + } + if s != "" { + ssMap[s] = true + } + continue + } + if x := strings.Index(f.Name, "LC_RESOURCE"); x != -1 { + s := strings.TrimRight(f.Name[:x], `\/`) + if x = strings.LastIndexAny(s, `\/`); x != -1 { + s = s[x+1:] + } + if s != "" { + ssMap[s] = true + } + continue + } + } + return ssMap +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext.go b/vendor/github.com/chai2010/gettext-go/gettext.go new file mode 100644 index 000000000..7747188ab --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext.go @@ -0,0 +1,219 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +var ( + DefaultLanguage string = getDefaultLanguage() // use $(LC_MESSAGES) or $(LANG) or "default" +) + +type Gettexter interface { + FileSystem() FileSystem + + GetDomain() string + SetDomain(domain string) Gettexter + + GetLanguage() string + SetLanguage(lang string) Gettexter + + Gettext(msgid string) string + PGettext(msgctxt, msgid string) string + + NGettext(msgid, msgidPlural string, n int) string + PNGettext(msgctxt, msgid, msgidPlural string, n int) string + + DGettext(domain, msgid string) string + DPGettext(domain, msgctxt, msgid string) string + DNGettext(domain, msgid, msgidPlural string, n int) string + DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string + + Getdata(name string) []byte + DGetdata(domain, name string) []byte +} + +// New create Interface use default language. +func New(domain, path string, data ...interface{}) Gettexter { + return newLocale(domain, path, data...) +} + +var defaultGettexter struct { + lang string + domain string + Gettexter +} + +func init() { + defaultGettexter.lang = getDefaultLanguage() + defaultGettexter.domain = "default" + defaultGettexter.Gettexter = newLocale("", "") +} + +// BindLocale sets and queries program's domains. +// +// Examples: +// BindLocale(New("poedit", "locale")) // bind "poedit" domain +// +// Use zip file: +// BindLocale(New("poedit", "locale.zip")) // bind "poedit" domain +// BindLocale(New("poedit", "locale.zip", zipData)) // bind "poedit" domain +// +// Use FileSystem: +// BindLocale(New("poedit", "name", OS("path/to/dir"))) // bind "poedit" domain +// BindLocale(New("poedit", "name", OS("path/to.zip"))) // bind "poedit" domain +// +func BindLocale(g Gettexter) { + if g != nil { + defaultGettexter.Gettexter = g + defaultGettexter.SetLanguage(defaultGettexter.lang) + } else { + defaultGettexter.Gettexter = newLocale("", "") + defaultGettexter.SetLanguage(defaultGettexter.lang) + } +} + +// SetLanguage sets and queries the program's current lang. +// +// If the lang is not empty string, set the new locale. +// +// If the lang is empty string, don't change anything. +// +// Returns is the current locale. +// +// Examples: +// SetLanguage("") // get locale: return DefaultLocale +// SetLanguage("zh_CN") // set locale: return zh_CN +// SetLanguage("") // get locale: return zh_CN +func SetLanguage(lang string) string { + defaultGettexter.SetLanguage(lang) + return defaultGettexter.GetLanguage() +} + +// SetDomain sets and retrieves the current message domain. +// +// If the domain is not empty string, set the new domains. +// +// If the domain is empty string, don't change anything. +// +// Returns is the all used domains. +// +// Examples: +// SetDomain("poedit") // set domain: poedit +// SetDomain("") // get domain: return poedit +func SetDomain(domain string) string { + defaultGettexter.SetDomain(domain) + return defaultGettexter.GetDomain() +} + +// Gettext attempt to translate a text string into the user's native language, +// by looking up the translation in a message catalog. +// +// It use the caller's function name as the msgctxt. +// +// Examples: +// func Foo() { +// msg := gettext.Gettext("Hello") // msgctxt is "" +// } +func Gettext(msgid string) string { + return defaultGettexter.Gettext(msgid) +} + +// Getdata attempt to translate a resource file into the user's native language, +// by looking up the translation in a message catalog. +// +// Examples: +// func Foo() { +// Textdomain("hello") +// BindLocale("hello", "locale.zip", nilOrZipData) +// poems := gettext.Getdata("poems.txt") +// } +func Getdata(name string) []byte { + return defaultGettexter.Getdata(name) +} + +// NGettext attempt to translate a text string into the user's native language, +// by looking up the appropriate plural form of the translation in a message +// catalog. +// +// It use the caller's function name as the msgctxt. +// +// Examples: +// func Foo() { +// msg := gettext.NGettext("%d people", "%d peoples", 2) +// } +func NGettext(msgid, msgidPlural string, n int) string { + return defaultGettexter.NGettext(msgid, msgidPlural, n) +} + +// PGettext attempt to translate a text string into the user's native language, +// by looking up the translation in a message catalog. +// +// Examples: +// func Foo() { +// msg := gettext.PGettext("gettext-go.example", "Hello") // msgctxt is "gettext-go.example" +// } +func PGettext(msgctxt, msgid string) string { + return defaultGettexter.PGettext(msgctxt, msgid) +} + +// PNGettext attempt to translate a text string into the user's native language, +// by looking up the appropriate plural form of the translation in a message +// catalog. +// +// Examples: +// func Foo() { +// msg := gettext.PNGettext("gettext-go.example", "%d people", "%d peoples", 2) +// } +func PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + return defaultGettexter.PNGettext(msgctxt, msgid, msgidPlural, n) +} + +// DGettext like Gettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DGettext("poedit", "Hello") +// } +func DGettext(domain, msgid string) string { + return defaultGettexter.DGettext(domain, msgid) +} + +// DNGettext like NGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.PNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2) +// } +func DNGettext(domain, msgid, msgidPlural string, n int) string { + return defaultGettexter.DNGettext(domain, msgid, msgidPlural, n) +} + +// DPGettext like PGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DPGettext("poedit", "gettext-go.example", "Hello") +// } +func DPGettext(domain, msgctxt, msgid string) string { + return defaultGettexter.DPGettext(domain, msgctxt, msgid) +} + +// DPNGettext like PNGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DPNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2) +// } +func DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + return defaultGettexter.DPNGettext(domain, msgctxt, msgid, msgidPlural, n) +} + +// DGetdata like Getdata(), but looking up the resource in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DGetdata("hello", "poems.txt") +// } +func DGetdata(domain, name string) []byte { + return defaultGettexter.DGetdata(domain, name) +} diff --git a/vendor/github.com/chai2010/gettext-go/locale.go b/vendor/github.com/chai2010/gettext-go/locale.go new file mode 100644 index 000000000..e7a2d4b37 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/locale.go @@ -0,0 +1,205 @@ +// Copyright 2020 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "fmt" + "sync" +) + +type _Locale struct { + mutex sync.Mutex + fs FileSystem + lang string + domain string + trMap map[string]*translator + trCurrent *translator +} + +var _ Gettexter = (*_Locale)(nil) + +func newLocale(domain, path string, data ...interface{}) *_Locale { + if domain == "" { + domain = "default" + } + p := &_Locale{ + lang: DefaultLanguage, + domain: domain, + } + if len(data) > 0 { + p.fs = NewFS(path, data[0]) + } else { + p.fs = NewFS(path, nil) + } + + p.syncTrMap() + return p +} + +func (p *_Locale) makeTrMapKey(domain, _Locale string) string { + return domain + "_$$$_" + _Locale +} + +func (p *_Locale) FileSystem() FileSystem { + return p.fs +} + +func (p *_Locale) GetLanguage() string { + p.mutex.Lock() + defer p.mutex.Unlock() + + return p.lang +} +func (p *_Locale) SetLanguage(lang string) Gettexter { + p.mutex.Lock() + defer p.mutex.Unlock() + + if lang == "" { + lang = DefaultLanguage + } + if lang == p.lang { + return p + } + + p.lang = lang + p.syncTrMap() + return p +} + +func (p *_Locale) GetDomain() string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.domain +} + +func (p *_Locale) SetDomain(domain string) Gettexter { + p.mutex.Lock() + defer p.mutex.Unlock() + + if domain == "" || domain == p.domain { + return p + } + + p.domain = domain + p.syncTrMap() + return p +} + +func (p *_Locale) syncTrMap() { + p.trMap = make(map[string]*translator) + trMapKey := p.makeTrMapKey(p.domain, p.lang) + + if tr, ok := p.trMap[trMapKey]; ok { + p.trCurrent = tr + return + } + + // try load po file + if data, err := p.fs.LoadMessagesFile(p.domain, p.lang, ".po"); err == nil { + if tr, err := newPoTranslator(fmt.Sprintf("%s_%s.po", p.domain, p.lang), data); err == nil { + p.trMap[trMapKey] = tr + p.trCurrent = tr + return + } + } + + // try load mo file + if data, err := p.fs.LoadMessagesFile(p.domain, p.lang, ".mo"); err == nil { + if tr, err := newMoTranslator(fmt.Sprintf("%s_%s.mo", p.domain, p.lang), data); err == nil { + p.trMap[trMapKey] = tr + p.trCurrent = tr + return + } + } + + // try load json file + if data, err := p.fs.LoadMessagesFile(p.domain, p.lang, ".json"); err == nil { + if tr, err := newJsonTranslator(p.lang, fmt.Sprintf("%s_%s.json", p.domain, p.lang), data); err == nil { + p.trMap[trMapKey] = tr + p.trCurrent = tr + return + } + } + + // no po/mo file + p.trMap[trMapKey] = nilTranslator + p.trCurrent = nilTranslator + return +} + +func (p *_Locale) Gettext(msgid string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.trCurrent.PGettext("", msgid) +} + +func (p *_Locale) PGettext(msgctxt, msgid string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.trCurrent.PGettext(msgctxt, msgid) +} + +func (p *_Locale) NGettext(msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.trCurrent.PNGettext("", msgid, msgidPlural, n) +} + +func (p *_Locale) PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.trCurrent.PNGettext(msgctxt, msgid, msgidPlural, n) +} + +func (p *_Locale) DGettext(domain, msgid string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, "", msgid, "", 0) +} + +func (p *_Locale) DNGettext(domain, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, "", msgid, msgidPlural, n) +} + +func (p *_Locale) DPGettext(domain, msgctxt, msgid string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, msgctxt, msgid, "", 0) +} + +func (p *_Locale) DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, msgctxt, msgid, msgidPlural, n) +} + +func (p *_Locale) Getdata(name string) []byte { + return p.getdata(p.domain, name) +} + +func (p *_Locale) DGetdata(domain, name string) []byte { + return p.getdata(domain, name) +} + +func (p *_Locale) gettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + if f, ok := p.trMap[p.makeTrMapKey(domain, p.lang)]; ok { + return f.PNGettext(msgctxt, msgid, msgidPlural, n) + } + return msgid +} + +func (p *_Locale) getdata(domain, name string) []byte { + if data, err := p.fs.LoadResourceFile(domain, p.lang, name); err == nil { + return data + } + if p.lang != "default" { + if data, err := p.fs.LoadResourceFile(domain, "default", name); err == nil { + return data + } + } + return nil +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/doc.go b/vendor/github.com/chai2010/gettext-go/mo/doc.go new file mode 100644 index 000000000..5fefc1893 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/doc.go @@ -0,0 +1,74 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mo provides support for reading and writing GNU MO file. + +Examples: + import ( + "github.com/chai2010/gettext-go/mo" + ) + + func main() { + moFile, err := mo.LoadFile("test.mo") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%v", moFile) + } + +GNU MO file struct: + + byte + +------------------------------------------+ + 0 | magic number = 0x950412de | + | | + 4 | file format revision = 0 | + | | + 8 | number of strings | == N + | | + 12 | offset of table with original strings | == O + | | + 16 | offset of table with translation strings | == T + | | + 20 | size of hashing table | == S + | | + 24 | offset of hashing table | == H + | | + . . + . (possibly more entries later) . + . . + | | + O | length & offset 0th string ----------------. + O + 8 | length & offset 1st string ------------------. + ... ... | | + O + ((N-1)*8)| length & offset (N-1)th string | | | + | | | | + T | length & offset 0th translation ---------------. + T + 8 | length & offset 1st translation -----------------. + ... ... | | | | + T + ((N-1)*8)| length & offset (N-1)th translation | | | | | + | | | | | | + H | start hash table | | | | | + ... ... | | | | + H + S * 4 | end hash table | | | | | + | | | | | | + | NUL terminated 0th string <----------------' | | | + | | | | | + | NUL terminated 1st string <------------------' | | + | | | | + ... ... | | + | | | | + | NUL terminated 0th translation <---------------' | + | | | + | NUL terminated 1st translation <-----------------' + | | + ... ... + | | + +------------------------------------------+ + +The GNU MO file specification is at +http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html. +*/ +package mo diff --git a/vendor/github.com/chai2010/gettext-go/mo/encoder.go b/vendor/github.com/chai2010/gettext-go/mo/encoder.go new file mode 100644 index 000000000..f953fd3cb --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/encoder.go @@ -0,0 +1,105 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "encoding/binary" + "sort" + "strings" +) + +type moHeader struct { + MagicNumber uint32 + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 +} + +type moStrPos struct { + Size uint32 // must keep fields order + Addr uint32 +} + +func encodeFile(f *File) []byte { + hdr := &moHeader{ + MagicNumber: MoMagicLittleEndian, + } + data := encodeData(hdr, f) + data = append(encodeHeader(hdr), data...) + return data +} + +// encode data and init moHeader +func encodeData(hdr *moHeader, f *File) []byte { + msgList := []Message{f.MimeHeader.toMessage()} + for _, v := range f.Messages { + if len(v.MsgId) == 0 { + continue + } + if len(v.MsgStr) == 0 && len(v.MsgStrPlural) == 0 { + continue + } + msgList = append(msgList, v) + } + sort.Slice(msgList, func(i, j int) bool { + return msgList[i].less(&msgList[j]) + }) + + var buf bytes.Buffer + var msgIdPosList = make([]moStrPos, len(msgList)) + var msgStrPosList = make([]moStrPos, len(msgList)) + for i, v := range msgList { + // write msgid + msgId := encodeMsgId(v) + msgIdPosList[i].Addr = uint32(buf.Len() + MoHeaderSize) + msgIdPosList[i].Size = uint32(len(msgId)) + buf.WriteString(msgId) + // write msgstr + msgStr := encodeMsgStr(v) + msgStrPosList[i].Addr = uint32(buf.Len() + MoHeaderSize) + msgStrPosList[i].Size = uint32(len(msgStr)) + buf.WriteString(msgStr) + } + + hdr.MsgIdOffset = uint32(buf.Len() + MoHeaderSize) + binary.Write(&buf, binary.LittleEndian, msgIdPosList) + hdr.MsgStrOffset = uint32(buf.Len() + MoHeaderSize) + binary.Write(&buf, binary.LittleEndian, msgStrPosList) + + hdr.MsgIdCount = uint32(len(msgList)) + return buf.Bytes() +} + +// must called after encodeData +func encodeHeader(hdr *moHeader) []byte { + var buf bytes.Buffer + binary.Write(&buf, binary.LittleEndian, hdr) + return buf.Bytes() +} + +func encodeMsgId(v Message) string { + if v.MsgContext != "" && v.MsgIdPlural != "" { + return v.MsgContext + EotSeparator + v.MsgId + NulSeparator + v.MsgIdPlural + } + if v.MsgContext != "" && v.MsgIdPlural == "" { + return v.MsgContext + EotSeparator + v.MsgId + } + if v.MsgContext == "" && v.MsgIdPlural != "" { + return v.MsgId + NulSeparator + v.MsgIdPlural + } + return v.MsgId +} + +func encodeMsgStr(v Message) string { + if v.MsgIdPlural != "" { + return strings.Join(v.MsgStrPlural, NulSeparator) + } + return v.MsgStr +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/file.go b/vendor/github.com/chai2010/gettext-go/mo/file.go new file mode 100644 index 000000000..6f7ed161c --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/file.go @@ -0,0 +1,197 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "encoding/binary" + "fmt" + "io/ioutil" + "strings" +) + +const ( + MoHeaderSize = 28 + MoMagicLittleEndian = 0x950412de + MoMagicBigEndian = 0xde120495 + + EotSeparator = "\x04" // msgctxt and msgid separator + NulSeparator = "\x00" // msgid and msgstr separator +) + +// File represents an MO File. +// +// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html +type File struct { + MagicNumber uint32 + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 + MimeHeader Header + Messages []Message +} + +// Load loads mo file format data. +func Load(data []byte) (*File, error) { + return loadData(data) +} + +// Load loads a named mo file. +func LoadFile(path string) (*File, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return loadData(data) +} + +func loadData(data []byte) (*File, error) { + r := bytes.NewReader(data) + + var magicNumber uint32 + if err := binary.Read(r, binary.LittleEndian, &magicNumber); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + var bo binary.ByteOrder + switch magicNumber { + case MoMagicLittleEndian: + bo = binary.LittleEndian + case MoMagicBigEndian: + bo = binary.BigEndian + default: + return nil, fmt.Errorf("gettext: %v", "invalid magic number") + } + + var header struct { + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 + } + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if v := header.MajorVersion; v != 0 && v != 1 { + return nil, fmt.Errorf("gettext: %v", "invalid version number") + } + if v := header.MinorVersion; v != 0 && v != 1 { + return nil, fmt.Errorf("gettext: %v", "invalid version number") + } + + msgIdStart := make([]uint32, header.MsgIdCount) + msgIdLen := make([]uint32, header.MsgIdCount) + if _, err := r.Seek(int64(header.MsgIdOffset), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + for i := 0; i < int(header.MsgIdCount); i++ { + if err := binary.Read(r, bo, &msgIdLen[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if err := binary.Read(r, bo, &msgIdStart[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + } + + msgStrStart := make([]int32, header.MsgIdCount) + msgStrLen := make([]int32, header.MsgIdCount) + if _, err := r.Seek(int64(header.MsgStrOffset), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + for i := 0; i < int(header.MsgIdCount); i++ { + if err := binary.Read(r, bo, &msgStrLen[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if err := binary.Read(r, bo, &msgStrStart[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + } + + file := &File{ + MagicNumber: magicNumber, + MajorVersion: header.MajorVersion, + MinorVersion: header.MinorVersion, + MsgIdCount: header.MsgIdCount, + MsgIdOffset: header.MsgIdOffset, + MsgStrOffset: header.MsgStrOffset, + HashSize: header.HashSize, + HashOffset: header.HashOffset, + } + for i := 0; i < int(header.MsgIdCount); i++ { + if _, err := r.Seek(int64(msgIdStart[i]), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + msgIdData := make([]byte, msgIdLen[i]) + if _, err := r.Read(msgIdData); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + + if _, err := r.Seek(int64(msgStrStart[i]), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + msgStrData := make([]byte, msgStrLen[i]) + if _, err := r.Read(msgStrData); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + + if len(msgIdData) == 0 { + var msg = Message{ + MsgId: string(msgIdData), + MsgStr: string(msgStrData), + } + file.MimeHeader.fromMessage(&msg) + } else { + var msg = Message{ + MsgId: string(msgIdData), + MsgStr: string(msgStrData), + } + // Is this a context message? + if idx := strings.Index(msg.MsgId, EotSeparator); idx != -1 { + msg.MsgContext, msg.MsgId = msg.MsgId[:idx], msg.MsgId[idx+1:] + } + // Is this a plural message? + if idx := strings.Index(msg.MsgId, NulSeparator); idx != -1 { + msg.MsgId, msg.MsgIdPlural = msg.MsgId[:idx], msg.MsgId[idx+1:] + msg.MsgStrPlural = strings.Split(msg.MsgStr, NulSeparator) + msg.MsgStr = "" + } + file.Messages = append(file.Messages, msg) + } + } + + return file, nil +} + +// Save saves a mo file. +func (f *File) Save(name string) error { + return ioutil.WriteFile(name, f.Data(), 0666) +} + +// Save returns a mo file format data. +func (f *File) Data() []byte { + return encodeFile(f) +} + +// String returns the po format file string. +func (f *File) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "# version: %d.%d\n", f.MajorVersion, f.MinorVersion) + fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String()) + fmt.Fprintf(&buf, "\n") + + for k, v := range f.Messages { + fmt.Fprintf(&buf, `msgid "%v"`+"\n", k) + fmt.Fprintf(&buf, `msgstr "%s"`+"\n", v.MsgStr) + fmt.Fprintf(&buf, "\n") + } + + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/header.go b/vendor/github.com/chai2010/gettext-go/mo/header.go new file mode 100644 index 000000000..d8c7a5e3a --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/header.go @@ -0,0 +1,109 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "fmt" + "strings" +) + +// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR" +// and "FIRST AUTHOR , YEAR" ought to be replaced by sensible information. +// +// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry +type Header struct { + ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION + ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR + POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE + PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + LastTranslator string // Last-Translator: FIRST AUTHOR + LanguageTeam string // Language-Team: golang-china + Language string // Language: zh_CN + MimeVersion string // MIME-Version: 1.0 + ContentType string // Content-Type: text/plain; charset=UTF-8 + ContentTransferEncoding string // Content-Transfer-Encoding: 8bit + PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1; + XGenerator string // X-Generator: Poedit 1.5.5 + UnknowFields map[string]string +} + +func (p *Header) fromMessage(msg *Message) { + if msg.MsgId != "" || msg.MsgStr == "" { + return + } + lines := strings.Split(msg.MsgStr, "\n") + for i := 0; i < len(lines); i++ { + idx := strings.Index(lines[i], ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(lines[i][:idx]) + val := strings.TrimSpace(lines[i][idx+1:]) + switch strings.ToUpper(key) { + case strings.ToUpper("Project-Id-Version"): + p.ProjectIdVersion = val + case strings.ToUpper("Report-Msgid-Bugs-To"): + p.ReportMsgidBugsTo = val + case strings.ToUpper("POT-Creation-Date"): + p.POTCreationDate = val + case strings.ToUpper("PO-Revision-Date"): + p.PORevisionDate = val + case strings.ToUpper("Last-Translator"): + p.LastTranslator = val + case strings.ToUpper("Language-Team"): + p.LanguageTeam = val + case strings.ToUpper("Language"): + p.Language = val + case strings.ToUpper("MIME-Version"): + p.MimeVersion = val + case strings.ToUpper("Content-Type"): + p.ContentType = val + case strings.ToUpper("Content-Transfer-Encoding"): + p.ContentTransferEncoding = val + case strings.ToUpper("Plural-Forms"): + p.PluralForms = val + case strings.ToUpper("X-Generator"): + p.XGenerator = val + default: + if p.UnknowFields == nil { + p.UnknowFields = make(map[string]string) + } + p.UnknowFields[key] = val + } + } +} + +func (p *Header) toMessage() Message { + return Message{ + MsgStr: p.String(), + } +} + +// String returns the po format header string. +func (p Header) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, `msgid ""`+"\n") + fmt.Fprintf(&buf, `msgstr ""`+"\n") + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language) + if p.MimeVersion != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion) + } + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding) + if p.XGenerator != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator) + } + for k, v := range p.UnknowFields { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/message.go b/vendor/github.com/chai2010/gettext-go/mo/message.go new file mode 100644 index 000000000..b67bde0b7 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/message.go @@ -0,0 +1,52 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "fmt" +) + +// A MO file is made up of many entries, +// each entry holding the relation between an original untranslated string +// and its corresponding translation. +// +// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html +type Message struct { + MsgContext string // msgctxt context + MsgId string // msgid untranslated-string + MsgIdPlural string // msgid_plural untranslated-string-plural + MsgStr string // msgstr translated-string + MsgStrPlural []string // msgstr[0] translated-string-case-0 +} + +// String returns the po format entry string. +func (p Message) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId)) + if p.MsgIdPlural != "" { + fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural)) + } + if p.MsgStr != "" { + fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr)) + } + for i := 0; i < len(p.MsgStrPlural); i++ { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i])) + } + return buf.String() +} + +func (m_i *Message) less(m_j *Message) bool { + if a, b := m_i.MsgContext, m_j.MsgContext; a != b { + return a < b + } + if a, b := m_i.MsgId, m_j.MsgId; a != b { + return a < b + } + if a, b := m_i.MsgIdPlural, m_j.MsgIdPlural; a != b { + return a < b + } + return false +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/util.go b/vendor/github.com/chai2010/gettext-go/mo/util.go new file mode 100644 index 000000000..380451105 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/util.go @@ -0,0 +1,110 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "strings" +) + +func decodePoString(text string) string { + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + left := strings.Index(lines[i], `"`) + right := strings.LastIndex(lines[i], `"`) + if left < 0 || right < 0 || left == right { + lines[i] = "" + continue + } + line := lines[i][left+1 : right] + data := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + if line[i] != '\\' { + data = append(data, line[i]) + continue + } + if i+1 >= len(line) { + break + } + switch line[i+1] { + case 'n': // \\n -> \n + data = append(data, '\n') + i++ + case 't': // \\t -> \n + data = append(data, '\t') + i++ + case '\\': // \\\ -> ? + data = append(data, '\\') + i++ + } + } + lines[i] = string(data) + } + return strings.Join(lines, "") +} + +func encodePoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + if lines[i] == "" { + if i != len(lines)-1 { + buf.WriteString(`"\n"` + "\n") + } + continue + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + buf.WriteString(`\n"` + "\n") + } + return buf.String() +} + +func encodeCommentPoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + if len(lines) > 1 { + buf.WriteString(`""` + "\n") + } + for i := 0; i < len(lines); i++ { + if len(lines) > 0 { + buf.WriteString("#| ") + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"`) + } + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/plural/doc.go b/vendor/github.com/chai2010/gettext-go/plural/doc.go new file mode 100644 index 000000000..31cb8fae9 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/plural/doc.go @@ -0,0 +1,36 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package plural provides standard plural formulas. + +Examples: + import ( + "github.com/chai2010/gettext-go/plural" + ) + + func main() { + enFormula := plural.Formula("en_US") + xxFormula := plural.Formula("zh_CN") + + fmt.Printf("%s: %d\n", "en", enFormula(0)) + fmt.Printf("%s: %d\n", "en", enFormula(1)) + fmt.Printf("%s: %d\n", "en", enFormula(2)) + fmt.Printf("%s: %d\n", "??", xxFormula(0)) + fmt.Printf("%s: %d\n", "??", xxFormula(1)) + fmt.Printf("%s: %d\n", "??", xxFormula(2)) + fmt.Printf("%s: %d\n", "??", xxFormula(9)) + // Output: + // en: 0 + // en: 0 + // en: 1 + // ??: 0 + // ??: 0 + // ??: 1 + // ??: 8 + } + +See http://www.gnu.org/software/gettext/manual/html_node/Plural-forms.html +*/ +package plural diff --git a/vendor/github.com/chai2010/gettext-go/plural/formula.go b/vendor/github.com/chai2010/gettext-go/plural/formula.go new file mode 100644 index 000000000..679a1cd50 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/plural/formula.go @@ -0,0 +1,181 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "strings" +) + +// Formula provides the language's standard plural formula. +func Formula(lang string) func(n int) int { + if idx := index(lang); idx != -1 { + return formulaTable[fmtForms(FormsTable[idx].Value)] + } + if idx := index("??"); idx != -1 { + return formulaTable[fmtForms(FormsTable[idx].Value)] + } + return func(n int) int { + return n + } +} + +func index(lang string) int { + for i := 0; i < len(FormsTable); i++ { + if strings.HasPrefix(lang, FormsTable[i].Lang) { + return i + } + } + return -1 +} + +func fmtForms(forms string) string { + forms = strings.TrimSpace(forms) + forms = strings.Replace(forms, " ", "", -1) + return forms +} + +var formulaTable = map[string]func(n int) int{ + fmtForms("nplurals=n; plural=n-1;"): func(n int) int { + if n > 0 { + return n - 1 + } + return 0 + }, + fmtForms("nplurals=1; plural=0;"): func(n int) int { + return 0 + }, + fmtForms("nplurals=2; plural=(n != 1);"): func(n int) int { + if n <= 1 { + return 0 + } + return 1 + }, + fmtForms("nplurals=2; plural=(n > 1);"): func(n int) int { + if n <= 1 { + return 0 + } + return 1 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n != 0 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n == 2 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n == 0 || (n%100 > 0 && n%100 < 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n >= 2 && n <= 4 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n >= 2 && n <= 4 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n == 1 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"): func(n int) int { + if n%100 == 1 { + return 0 + } + if n%100 == 2 { + return 1 + } + if n%100 == 3 || n%100 == 4 { + return 2 + } + return 3 + }, +} diff --git a/vendor/github.com/chai2010/gettext-go/plural/table.go b/vendor/github.com/chai2010/gettext-go/plural/table.go new file mode 100644 index 000000000..cdc50d211 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/plural/table.go @@ -0,0 +1,55 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +// FormsTable are standard hard-coded plural rules. +// The application developers and the translators need to understand them. +// +// See GNU's gettext library source code: gettext/gettext-tools/src/plural-table.c +var FormsTable = []struct { + Lang string + Language string + Value string +}{ + {"??", "Unknown", "nplurals=1; plural=0;"}, + {"ja", "Japanese", "nplurals=1; plural=0;"}, + {"vi", "Vietnamese", "nplurals=1; plural=0;"}, + {"ko", "Korean", "nplurals=1; plural=0;"}, + {"en", "English", "nplurals=2; plural=(n != 1);"}, + {"de", "German", "nplurals=2; plural=(n != 1);"}, + {"nl", "Dutch", "nplurals=2; plural=(n != 1);"}, + {"sv", "Swedish", "nplurals=2; plural=(n != 1);"}, + {"da", "Danish", "nplurals=2; plural=(n != 1);"}, + {"no", "Norwegian", "nplurals=2; plural=(n != 1);"}, + {"nb", "Norwegian Bokmal", "nplurals=2; plural=(n != 1);"}, + {"nn", "Norwegian Nynorsk", "nplurals=2; plural=(n != 1);"}, + {"fo", "Faroese", "nplurals=2; plural=(n != 1);"}, + {"es", "Spanish", "nplurals=2; plural=(n != 1);"}, + {"pt", "Portuguese", "nplurals=2; plural=(n != 1);"}, + {"it", "Italian", "nplurals=2; plural=(n != 1);"}, + {"bg", "Bulgarian", "nplurals=2; plural=(n != 1);"}, + {"el", "Greek", "nplurals=2; plural=(n != 1);"}, + {"fi", "Finnish", "nplurals=2; plural=(n != 1);"}, + {"et", "Estonian", "nplurals=2; plural=(n != 1);"}, + {"he", "Hebrew", "nplurals=2; plural=(n != 1);"}, + {"eo", "Esperanto", "nplurals=2; plural=(n != 1);"}, + {"hu", "Hungarian", "nplurals=2; plural=(n != 1);"}, + {"tr", "Turkish", "nplurals=2; plural=(n != 1);"}, + {"pt_BR", "Brazilian", "nplurals=2; plural=(n > 1);"}, + {"fr", "French", "nplurals=2; plural=(n > 1);"}, + {"lv", "Latvian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"}, + {"ga", "Irish", "nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"}, + {"ro", "Romanian", "nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"}, + {"lt", "Lithuanian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"ru", "Russian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"uk", "Ukrainian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"be", "Belarusian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"sr", "Serbian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"hr", "Croatian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"cs", "Czech", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"}, + {"sk", "Slovak", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"}, + {"pl", "Polish", "nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"sl", "Slovenian", "nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"}, +} diff --git a/vendor/github.com/chai2010/gettext-go/po/comment.go b/vendor/github.com/chai2010/gettext-go/po/comment.go new file mode 100644 index 000000000..d4abe7c10 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/comment.go @@ -0,0 +1,270 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// Comment represents every message's comments. +type Comment struct { + StartLine int // comment start line + TranslatorComment string // # translator-comments // TrimSpace + ExtractedComment string // #. extracted-comments + ReferenceFile []string // #: src/msgcmp.c:338 src/po-lex.c:699 + ReferenceLine []int // #: src/msgcmp.c:338 src/po-lex.c:699 + Flags []string // #, fuzzy,c-format,range:0..10 + PrevMsgContext string // #| msgctxt previous-context + PrevMsgId string // #| msgid previous-untranslated-string +} + +func (p *Comment) less(q *Comment) bool { + if p.StartLine != 0 || q.StartLine != 0 { + return p.StartLine < q.StartLine + } + if a, b := len(p.ReferenceFile), len(q.ReferenceFile); a != b { + return a < b + } + for i := 0; i < len(p.ReferenceFile); i++ { + if a, b := p.ReferenceFile[i], q.ReferenceFile[i]; a != b { + return a < b + } + if a, b := p.ReferenceLine[i], q.ReferenceLine[i]; a != b { + return a < b + } + } + return false +} + +func (p *Comment) readPoComment(r *lineReader) (err error) { + *p = Comment{} + if err = r.skipBlankLine(); err != nil { + return err + } + defer func(oldPos int) { + newPos := r.currentPos() + if newPos != oldPos && err == io.EOF { + err = nil + } + }(r.currentPos()) + + p.StartLine = r.currentPos() + 1 + for { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if len(s) == 0 || s[0] != '#' { + return + } + + if err = p.readTranslatorComment(r); err != nil { + return + } + if err = p.readExtractedComment(r); err != nil { + return + } + if err = p.readReferenceComment(r); err != nil { + return + } + if err = p.readFlagsComment(r); err != nil { + return + } + if err = p.readPrevMsgContext(r); err != nil { + return + } + if err = p.readPrevMsgId(r); err != nil { + return + } + } +} + +func (p *Comment) readTranslatorComment(r *lineReader) (err error) { + const prefix = "# " // .,:| + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < 1 || s[0] != '#' { + r.unreadLine() + return nil + } + if len(s) >= 2 { + switch s[1] { + case '.', ',', ':', '|': + r.unreadLine() + return nil + } + } + if p.TranslatorComment != "" { + p.TranslatorComment += "\n" + } + p.TranslatorComment += strings.TrimSpace(s[1:]) + } +} + +func (p *Comment) readExtractedComment(r *lineReader) (err error) { + const prefix = "#." + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + if p.ExtractedComment != "" { + p.ExtractedComment += "\n" + } + p.ExtractedComment += strings.TrimSpace(s[len(prefix):]) + } +} + +func (p *Comment) readReferenceComment(r *lineReader) (err error) { + const prefix = "#:" + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + ss := strings.Split(strings.TrimSpace(s[len(prefix):]), " ") + for i := 0; i < len(ss); i++ { + idx := strings.Index(ss[i], ":") + if idx <= 0 { + continue + } + name := strings.TrimSpace(ss[i][:idx]) + line, _ := strconv.Atoi(strings.TrimSpace(ss[i][idx+1:])) + p.ReferenceFile = append(p.ReferenceFile, name) + p.ReferenceLine = append(p.ReferenceLine, line) + } + } +} + +func (p *Comment) readFlagsComment(r *lineReader) (err error) { + const prefix = "#," + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + ss := strings.Split(strings.TrimSpace(s[len(prefix):]), ",") + for i := 0; i < len(ss); i++ { + p.Flags = append(p.Flags, strings.TrimSpace(ss[i])) + } + } +} + +func (p *Comment) readPrevMsgContext(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !rePrevMsgContextComments.MatchString(s) { + return + } + p.PrevMsgContext, err = p.readString(r) + return +} + +func (p *Comment) readPrevMsgId(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !rePrevMsgIdComments.MatchString(s) { + return + } + p.PrevMsgId, err = p.readString(r) + return +} + +func (p *Comment) readString(r *lineReader) (msg string, err error) { + var s string + if s, _, err = r.readLine(); err != nil { + return + } + msg += decodePoString(s) + for { + if s, _, err = r.readLine(); err != nil { + return + } + if !reStringLineComments.MatchString(s) { + r.unreadLine() + break + } + msg += decodePoString(s) + } + return +} + +// GetFuzzy gets the fuzzy flag. +func (p *Comment) GetFuzzy() bool { + for _, s := range p.Flags { + if s == "fuzzy" { + return true + } + } + return false +} + +// SetFuzzy sets the fuzzy flag. +func (p *Comment) SetFuzzy(fuzzy bool) { + // +} + +// String returns the po format comment string. +func (p Comment) String() string { + var buf bytes.Buffer + if p.TranslatorComment != "" { + ss := strings.Split(p.TranslatorComment, "\n") + for i := 0; i < len(ss); i++ { + fmt.Fprintf(&buf, "# %s\n", ss[i]) + } + } + if p.ExtractedComment != "" { + ss := strings.Split(p.ExtractedComment, "\n") + for i := 0; i < len(ss); i++ { + fmt.Fprintf(&buf, "#. %s\n", ss[i]) + } + } + if a, b := len(p.ReferenceFile), len(p.ReferenceLine); a != 0 && a == b { + fmt.Fprintf(&buf, "#:") + for i := 0; i < len(p.ReferenceFile); i++ { + fmt.Fprintf(&buf, " %s:%d", p.ReferenceFile[i], p.ReferenceLine[i]) + } + fmt.Fprintf(&buf, "\n") + } + if len(p.Flags) != 0 { + fmt.Fprintf(&buf, "#, %s", p.Flags[0]) + for i := 1; i < len(p.Flags); i++ { + fmt.Fprintf(&buf, ", %s", p.Flags[i]) + } + fmt.Fprintf(&buf, "\n") + } + if p.PrevMsgContext != "" { + s := encodeCommentPoString(p.PrevMsgContext) + fmt.Fprintf(&buf, "#| msgctxt %s\n", s) + } + if p.PrevMsgId != "" { + s := encodeCommentPoString(p.PrevMsgId) + fmt.Fprintf(&buf, "#| msgid %s\n", s) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/po/doc.go b/vendor/github.com/chai2010/gettext-go/po/doc.go new file mode 100644 index 000000000..6cfa2a24b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/doc.go @@ -0,0 +1,24 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package po provides support for reading and writing GNU PO file. + +Examples: + import ( + "github.com/chai2010/gettext-go/po" + ) + + func main() { + poFile, err := po.LoadFile("test.po") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%v", poFile) + } + +The GNU PO file specification is at +http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html. +*/ +package po diff --git a/vendor/github.com/chai2010/gettext-go/po/file.go b/vendor/github.com/chai2010/gettext-go/po/file.go new file mode 100644 index 000000000..4a122eeb8 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/file.go @@ -0,0 +1,81 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sort" +) + +// File represents an PO File. +// +// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html +type File struct { + MimeHeader Header + Messages []Message +} + +// Load loads po file format data. +func Load(data []byte) (*File, error) { + return loadData(data) +} + +// LoadFile loads a named po file. +func LoadFile(path string) (*File, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return loadData(data) +} + +func loadData(data []byte) (*File, error) { + r := newLineReader(string(data)) + var file File + for { + var msg Message + if err := msg.readPoEntry(r); err != nil { + if err == io.EOF { + return &file, nil + } + return nil, err + } + if msg.MsgId == "" { + file.MimeHeader.parseHeader(&msg) + continue + } + file.Messages = append(file.Messages, msg) + } +} + +// Save saves a po file. +func (f *File) Save(name string) error { + return ioutil.WriteFile(name, []byte(f.String()), 0666) +} + +// Save returns a po file format data. +func (f *File) Data() []byte { + // sort the massge as ReferenceFile/ReferenceLine field + var messages []Message + messages = append(messages, f.Messages...) + sort.Slice(messages, func(i, j int) bool { + return messages[i].less(&messages[j]) + }) + + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String()) + for i := 0; i < len(messages); i++ { + fmt.Fprintf(&buf, "%s\n", messages[i].String()) + } + return buf.Bytes() +} + +// String returns the po format file string. +func (f *File) String() string { + return string(f.Data()) +} diff --git a/vendor/github.com/chai2010/gettext-go/po/header.go b/vendor/github.com/chai2010/gettext-go/po/header.go new file mode 100644 index 000000000..a9b5b6671 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/header.go @@ -0,0 +1,106 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "strings" +) + +// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR" +// and "FIRST AUTHOR , YEAR" ought to be replaced by sensible information. +// +// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry +type Header struct { + Comment // Header Comments + ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION + ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR + POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE + PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + LastTranslator string // Last-Translator: FIRST AUTHOR + LanguageTeam string // Language-Team: golang-china + Language string // Language: zh_CN + MimeVersion string // MIME-Version: 1.0 + ContentType string // Content-Type: text/plain; charset=UTF-8 + ContentTransferEncoding string // Content-Transfer-Encoding: 8bit + PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1; + XGenerator string // X-Generator: Poedit 1.5.5 + UnknowFields map[string]string +} + +func (p *Header) parseHeader(msg *Message) { + if msg.MsgId != "" || msg.MsgStr == "" { + return + } + lines := strings.Split(msg.MsgStr, "\n") + for i := 0; i < len(lines); i++ { + idx := strings.Index(lines[i], ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(lines[i][:idx]) + val := strings.TrimSpace(lines[i][idx+1:]) + switch strings.ToUpper(key) { + case strings.ToUpper("Project-Id-Version"): + p.ProjectIdVersion = val + case strings.ToUpper("Report-Msgid-Bugs-To"): + p.ReportMsgidBugsTo = val + case strings.ToUpper("POT-Creation-Date"): + p.POTCreationDate = val + case strings.ToUpper("PO-Revision-Date"): + p.PORevisionDate = val + case strings.ToUpper("Last-Translator"): + p.LastTranslator = val + case strings.ToUpper("Language-Team"): + p.LanguageTeam = val + case strings.ToUpper("Language"): + p.Language = val + case strings.ToUpper("MIME-Version"): + p.MimeVersion = val + case strings.ToUpper("Content-Type"): + p.ContentType = val + case strings.ToUpper("Content-Transfer-Encoding"): + p.ContentTransferEncoding = val + case strings.ToUpper("Plural-Forms"): + p.PluralForms = val + case strings.ToUpper("X-Generator"): + p.XGenerator = val + default: + if p.UnknowFields == nil { + p.UnknowFields = make(map[string]string) + } + p.UnknowFields[key] = val + } + } + p.Comment = msg.Comment +} + +// String returns the po format header string. +func (p Header) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s", p.Comment.String()) + fmt.Fprintf(&buf, `msgid ""`+"\n") + fmt.Fprintf(&buf, `msgstr ""`+"\n") + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language) + if p.MimeVersion != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion) + } + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding) + if p.XGenerator != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator) + } + for k, v := range p.UnknowFields { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/po/line_reader.go b/vendor/github.com/chai2010/gettext-go/po/line_reader.go new file mode 100644 index 000000000..8597273a2 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/line_reader.go @@ -0,0 +1,62 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "io" + "strings" +) + +type lineReader struct { + lines []string + pos int +} + +func newLineReader(data string) *lineReader { + data = strings.Replace(data, "\r", "", -1) + lines := strings.Split(data, "\n") + return &lineReader{lines: lines} +} + +func (r *lineReader) skipBlankLine() error { + for ; r.pos < len(r.lines); r.pos++ { + if strings.TrimSpace(r.lines[r.pos]) != "" { + break + } + } + if r.pos >= len(r.lines) { + return io.EOF + } + return nil +} + +func (r *lineReader) currentPos() int { + return r.pos +} + +func (r *lineReader) currentLine() (s string, pos int, err error) { + if r.pos >= len(r.lines) { + err = io.EOF + return + } + s, pos = r.lines[r.pos], r.pos + return +} + +func (r *lineReader) readLine() (s string, pos int, err error) { + if r.pos >= len(r.lines) { + err = io.EOF + return + } + s, pos = r.lines[r.pos], r.pos + r.pos++ + return +} + +func (r *lineReader) unreadLine() { + if r.pos >= 0 { + r.pos-- + } +} diff --git a/vendor/github.com/chai2010/gettext-go/po/message.go b/vendor/github.com/chai2010/gettext-go/po/message.go new file mode 100644 index 000000000..39936dcc7 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/message.go @@ -0,0 +1,193 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// A PO file is made up of many entries, +// each entry holding the relation between an original untranslated string +// and its corresponding translation. +// +// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html +type Message struct { + Comment // Coments + MsgContext string // msgctxt context + MsgId string // msgid untranslated-string + MsgIdPlural string // msgid_plural untranslated-string-plural + MsgStr string // msgstr translated-string + MsgStrPlural []string // msgstr[0] translated-string-case-0 +} + +func (p *Message) less(q *Message) bool { + if p.Comment.less(&q.Comment) { + return true + } + if a, b := p.MsgContext, q.MsgContext; a != b { + return a < b + } + if a, b := p.MsgId, q.MsgId; a != b { + return a < b + } + if a, b := p.MsgIdPlural, q.MsgIdPlural; a != b { + return a < b + } + return false +} + +func (p *Message) readPoEntry(r *lineReader) (err error) { + *p = Message{} + if err = r.skipBlankLine(); err != nil { + return + } + defer func(oldPos int) { + newPos := r.currentPos() + if newPos != oldPos && err == io.EOF { + err = nil + } + }(r.currentPos()) + + if err = p.Comment.readPoComment(r); err != nil { + return + } + for { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + + if p.isInvalidLine(s) { + err = fmt.Errorf("gettext: line %d, %v", r.currentPos(), "invalid line") + return + } + if reComment.MatchString(s) || reBlankLine.MatchString(s) { + return + } + + if err = p.readMsgContext(r); err != nil { + return + } + if err = p.readMsgId(r); err != nil { + return + } + if err = p.readMsgIdPlural(r); err != nil { + return + } + if err = p.readMsgStrOrPlural(r); err != nil { + return + } + } +} + +func (p *Message) readMsgContext(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgContext.MatchString(s) { + return + } + p.MsgContext, err = p.readString(r) + return +} + +func (p *Message) readMsgId(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgId.MatchString(s) { + return + } + p.MsgId, err = p.readString(r) + return +} + +func (p *Message) readMsgIdPlural(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgIdPlural.MatchString(s) { + return + } + p.MsgIdPlural, err = p.readString(r) + return nil +} + +func (p *Message) readMsgStrOrPlural(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgStr.MatchString(s) && !reMsgStrPlural.MatchString(s) { + return + } + if reMsgStrPlural.MatchString(s) { + left, right := strings.Index(s, `[`), strings.LastIndex(s, `]`) + idx, _ := strconv.Atoi(s[left+1 : right]) + s, err = p.readString(r) + if n := len(p.MsgStrPlural); (idx + 1) > n { + p.MsgStrPlural = append(p.MsgStrPlural, make([]string, (idx+1)-n)...) + } + p.MsgStrPlural[idx] = s + } else { + p.MsgStr, err = p.readString(r) + } + return nil +} + +func (p *Message) readString(r *lineReader) (msg string, err error) { + var s string + if s, _, err = r.readLine(); err != nil { + return + } + msg += decodePoString(s) + for { + if s, _, err = r.readLine(); err != nil { + return + } + if !reStringLine.MatchString(s) { + r.unreadLine() + break + } + msg += decodePoString(s) + } + return +} + +// String returns the po format entry string. +func (p Message) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s", p.Comment.String()) + if p.MsgContext != "" { + fmt.Fprintf(&buf, "msgctxt %s", encodePoString(p.MsgContext)) + } + fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId)) + if p.MsgIdPlural != "" { + fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural)) + } + if len(p.MsgStrPlural) == 0 { + if p.MsgStr != "" { + fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr)) + } else { + fmt.Fprintf(&buf, "msgstr %s", `""`+"\n") + } + } else { + for i := 0; i < len(p.MsgStrPlural); i++ { + if p.MsgStrPlural[i] != "" { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i])) + } else { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, `""`+"\n") + } + } + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/po/re.go b/vendor/github.com/chai2010/gettext-go/po/re.go new file mode 100644 index 000000000..67c240a57 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/re.go @@ -0,0 +1,58 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "regexp" +) + +var ( + reComment = regexp.MustCompile(`^#`) // # + reExtractedComments = regexp.MustCompile(`^#\.`) // #. + reReferenceComments = regexp.MustCompile(`^#:`) // #: + reFlagsComments = regexp.MustCompile(`^#,`) // #, fuzzy,c-format + rePrevMsgContextComments = regexp.MustCompile(`^#\|\s+msgctxt`) // #| msgctxt + rePrevMsgIdComments = regexp.MustCompile(`^#\|\s+msgid`) // #| msgid + reStringLineComments = regexp.MustCompile(`^#\|\s+".*"\s*$`) // #| "message" + + reMsgContext = regexp.MustCompile(`^msgctxt\s+".*"\s*$`) // msgctxt + reMsgId = regexp.MustCompile(`^msgid\s+".*"\s*$`) // msgid + reMsgIdPlural = regexp.MustCompile(`^msgid_plural\s+".*"\s*$`) // msgid_plural + reMsgStr = regexp.MustCompile(`^msgstr\s*".*"\s*$`) // msgstr + reMsgStrPlural = regexp.MustCompile(`^msgstr\s*(\[\d+\])\s*".*"\s*$`) // msgstr[0] + reStringLine = regexp.MustCompile(`^\s*".*"\s*$`) // "message" + reBlankLine = regexp.MustCompile(`^\s*$`) // +) + +func (p *Message) isInvalidLine(s string) bool { + if reComment.MatchString(s) { + return false + } + if reBlankLine.MatchString(s) { + return false + } + + if reMsgContext.MatchString(s) { + return false + } + if reMsgId.MatchString(s) { + return false + } + if reMsgIdPlural.MatchString(s) { + return false + } + if reMsgStr.MatchString(s) { + return false + } + if reMsgStrPlural.MatchString(s) { + return false + } + + if reStringLine.MatchString(s) { + return false + } + + return true +} diff --git a/vendor/github.com/chai2010/gettext-go/po/util.go b/vendor/github.com/chai2010/gettext-go/po/util.go new file mode 100644 index 000000000..d8b3b0e25 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/util.go @@ -0,0 +1,114 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "strings" +) + +func decodePoString(text string) string { + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + left := strings.Index(lines[i], `"`) + right := strings.LastIndex(lines[i], `"`) + if left < 0 || right < 0 || left == right { + lines[i] = "" + continue + } + line := lines[i][left+1 : right] + data := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + if line[i] != '\\' { + data = append(data, line[i]) + continue + } + if i+1 >= len(line) { + break + } + switch line[i+1] { + case 'n': // \\n -> \n + data = append(data, '\n') + i++ + case 't': // \\t -> \n + data = append(data, '\t') + i++ + case '\\': // \\\ -> ? + data = append(data, '\\') + i++ + } + } + lines[i] = string(data) + } + return strings.Join(lines, "") +} + +func encodePoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + if lines[i] == "" { + if i != len(lines)-1 { + buf.WriteString(`"\n"` + "\n") + } + continue + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"` + "\n") + } + } + return buf.String() +} + +func encodeCommentPoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + if len(lines) > 1 { + buf.WriteString(`""` + "\n") + } + for i := 0; i < len(lines); i++ { + if len(lines) > 0 { + buf.WriteString("#| ") + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"`) + } + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/tr.go b/vendor/github.com/chai2010/gettext-go/tr.go new file mode 100644 index 000000000..5b9d08f42 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/tr.go @@ -0,0 +1,175 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "encoding/json" + + "github.com/chai2010/gettext-go/mo" + "github.com/chai2010/gettext-go/plural" + "github.com/chai2010/gettext-go/po" +) + +var nilTranslator = &translator{ + MessageMap: make(map[string]mo.Message), + PluralFormula: plural.Formula("??"), +} + +type translator struct { + MessageMap map[string]mo.Message + PluralFormula func(n int) int +} + +func newMoTranslator(name string, data []byte) (*translator, error) { + var ( + f *mo.File + err error + ) + if len(data) != 0 { + f, err = mo.Load(data) + } else { + f, err = mo.LoadFile(name) + } + if err != nil { + return nil, err + } + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + } + for _, v := range f.Messages { + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = v + } + if lang := f.MimeHeader.Language; lang != "" { + tr.PluralFormula = plural.Formula(lang) + } else { + tr.PluralFormula = plural.Formula("??") + } + return tr, nil +} + +func newPoTranslator(name string, data []byte) (*translator, error) { + var ( + f *po.File + err error + ) + if len(data) != 0 { + f, err = po.Load(data) + } else { + f, err = po.LoadFile(name) + } + if err != nil { + return nil, err + } + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + } + for _, v := range f.Messages { + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = mo.Message{ + MsgContext: v.MsgContext, + MsgId: v.MsgId, + MsgIdPlural: v.MsgIdPlural, + MsgStr: v.MsgStr, + MsgStrPlural: v.MsgStrPlural, + } + } + if lang := f.MimeHeader.Language; lang != "" { + tr.PluralFormula = plural.Formula(lang) + } else { + tr.PluralFormula = plural.Formula("??") + } + return tr, nil +} + +func newJsonTranslator(lang, name string, jsonData []byte) (*translator, error) { + var msgList []struct { + MsgContext string `json:"msgctxt"` // msgctxt context + MsgId string `json:"msgid"` // msgid untranslated-string + MsgIdPlural string `json:"msgid_plural"` // msgid_plural untranslated-string-plural + MsgStr []string `json:"msgstr"` // msgstr translated-string + } + if err := json.Unmarshal(jsonData, &msgList); err != nil { + return nil, err + } + + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + PluralFormula: plural.Formula(lang), + } + + for _, v := range msgList { + var v_MsgStr string + var v_MsgStrPlural = v.MsgStr + + if len(v.MsgStr) != 0 { + v_MsgStr = v.MsgStr[0] + } + + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = mo.Message{ + MsgContext: v.MsgContext, + MsgId: v.MsgId, + MsgIdPlural: v.MsgIdPlural, + MsgStr: v_MsgStr, + MsgStrPlural: v_MsgStrPlural, + } + } + return tr, nil +} + +func (p *translator) PGettext(msgctxt, msgid string) string { + return p.findMsgStr(msgctxt, msgid) +} + +func (p *translator) PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + n = p.PluralFormula(n) + if ss := p.findMsgStrPlural(msgctxt, msgid, msgidPlural); len(ss) != 0 { + if n >= len(ss) { + n = len(ss) - 1 + } + if ss[n] != "" { + return ss[n] + } + } + if msgidPlural != "" && n > 0 { + return msgidPlural + } + return msgid +} + +func (p *translator) findMsgStr(msgctxt, msgid string) string { + key := p.makeMapKey(msgctxt, msgid) + if v, ok := p.MessageMap[key]; ok { + if v.MsgStr != "" { + return v.MsgStr + } + } + return msgid +} + +func (p *translator) findMsgStrPlural(msgctxt, msgid, msgidPlural string) []string { + key := p.makeMapKey(msgctxt, msgid) + if v, ok := p.MessageMap[key]; ok { + if len(v.MsgIdPlural) != 0 { + if len(v.MsgStrPlural) != 0 { + return v.MsgStrPlural + } else { + return nil + } + } else { + if len(v.MsgStr) != 0 { + return []string{v.MsgStr} + } else { + return nil + } + } + } + return nil +} + +func (p *translator) makeMapKey(msgctxt, msgid string) string { + if msgctxt != "" { + return msgctxt + mo.EotSeparator + msgid + } + return msgid +} diff --git a/vendor/github.com/chai2010/gettext-go/util.go b/vendor/github.com/chai2010/gettext-go/util.go new file mode 100644 index 000000000..b8269a605 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/util.go @@ -0,0 +1,34 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "os" + "strings" +) + +func getDefaultLanguage() string { + if v := os.Getenv("LC_MESSAGES"); v != "" { + return simplifiedLanguage(v) + } + if v := os.Getenv("LANG"); v != "" { + return simplifiedLanguage(v) + } + return "default" +} + +func simplifiedLanguage(lang string) string { + // en_US/en_US.UTF-8/zh_CN/zh_TW/el_GR@euro/... + if idx := strings.Index(lang, ":"); idx != -1 { + lang = lang[:idx] + } + if idx := strings.Index(lang, "@"); idx != -1 { + lang = lang[:idx] + } + if idx := strings.Index(lang, "."); idx != -1 { + lang = lang[:idx] + } + return strings.TrimSpace(lang) +} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 000000000..584149b6e --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 000000000..8915f0277 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go new file mode 100644 index 000000000..ceceb21f5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -0,0 +1,323 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "os" + "strconv" + "sync" + + "github.com/containerd/containerd/log" + "github.com/klauspost/compress/zstd" + exec "golang.org/x/sys/execabs" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Gzip is gzip compression algorithm. + Gzip + // Zstd is zstd compression algorithm. + Zstd +) + +const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" + +var ( + initPigz sync.Once + unpigzPath string +) + +var ( + bufioReader32KPool = &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, + } +) + +// DecompressReadCloser include the stream after decompress and the compress method detected. +type DecompressReadCloser interface { + io.ReadCloser + // GetCompression returns the compress method which is used before decompressing + GetCompression() Compression +} + +type readCloserWrapper struct { + io.Reader + compression Compression + closer func() error +} + +func (r *readCloserWrapper) Close() error { + if r.closer != nil { + return r.closer() + } + return nil +} + +func (r *readCloserWrapper) GetCompression() Compression { + return r.compression +} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (w *writeCloserWrapper) Close() error { + if w.closer != nil { + w.closer() + } + return nil +} + +type bufferedReader struct { + buf *bufio.Reader +} + +func newBufferedReader(r io.Reader) *bufferedReader { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(r) + return &bufferedReader{buf} +} + +func (r *bufferedReader) Read(p []byte) (n int, err error) { + if r.buf == nil { + return 0, io.EOF + } + n, err = r.buf.Read(p) + if err == io.EOF { + r.buf.Reset(nil) + bufioReader32KPool.Put(r.buf) + r.buf = nil + } + return +} + +func (r *bufferedReader) Peek(n int) ([]byte, error) { + if r.buf == nil { + return nil, io.EOF + } + return r.buf.Peek(n) +} + +const ( + zstdMagicSkippableStart = 0x184D2A50 + zstdMagicSkippableMask = 0xFFFFFFF0 +) + +var ( + gzipMagic = []byte{0x1F, 0x8B, 0x08} + zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} +) + +type matcher = func([]byte) bool + +func magicNumberMatcher(m []byte) matcher { + return func(source []byte) bool { + return bytes.HasPrefix(source, m) + } +} + +// zstdMatcher detects zstd compression algorithm. +// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. +// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. +func zstdMatcher() matcher { + return func(source []byte) bool { + if bytes.HasPrefix(source, zstdMagic) { + // Zstandard frame + return true + } + // skippable frame + if len(source) < 8 { + return false + } + // magic number from 0x184D2A50 to 0x184D2A5F. + if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { + return true + } + return false + } +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, fn := range map[Compression]matcher{ + Gzip: magicNumberMatcher(gzipMagic), + Zstd: zstdMatcher(), + } { + if fn(source) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { + buf := newBufferedReader(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue docker/docker#18170 + return nil, err + } + + switch compression := DetectCompression(bs); compression { + case Uncompressed: + return &readCloserWrapper{ + Reader: buf, + compression: compression, + }, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + gzReader, err := gzipDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + + return &readCloserWrapper{ + Reader: gzReader, + compression: compression, + closer: func() error { + cancel() + return gzReader.Close() + }, + }, nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + return &readCloserWrapper{ + Reader: zstdReader, + compression: compression, + closer: func() error { + zstdReader.Close() + return nil + }, + }, nil + + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + switch compression { + case Uncompressed: + return &writeCloserWrapper{dest, nil}, nil + case Gzip: + return gzip.NewWriter(dest), nil + case Zstd: + return zstd.NewWriter(dest) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Gzip: + return "gz" + case Zstd: + return "zst" + } + return "" +} + +func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + initPigz.Do(func() { + if unpigzPath = detectPigz(); unpigzPath != "" { + log.L.Debug("using pigz for decompression") + } + }) + + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + cmd.Stdin = in + cmd.Stdout = writer + + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + if err := cmd.Start(); err != nil { + return nil, err + } + + go func() { + if err := cmd.Wait(); err != nil { + writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + writer.Close() + } + }() + + return reader, nil +} + +func detectPigz() string { + path, err := exec.LookPath("unpigz") + if err != nil { + log.L.WithError(err).Debug("unpigz not found, falling back to go gzip") + return "" + } + + // Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable + value := os.Getenv(disablePigzEnv) + if value == "" { + return path + } + + disable, err := strconv.ParseBool(value) + if err != nil { + log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value) + return path + } + + if disable { + return "" + } + + return path +} diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go new file mode 100644 index 000000000..3516494ac --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go @@ -0,0 +1,28 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bytes" +) + +func FuzzDecompressStream(data []byte) int { + _, _ = DecompressStream(bytes.NewReader(data)) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/content/adaptor.go b/vendor/github.com/containerd/containerd/content/adaptor.go new file mode 100644 index 000000000..88bad2610 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/adaptor.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "strings" + + "github.com/containerd/containerd/filters" +) + +// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. +func AdaptInfo(info Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "digest": + return info.Digest.String(), true + case "size": + // TODO: support size based filtering + case "labels": + return checkMap(fieldpath[1:], info.Labels) + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go new file mode 100644 index 000000000..8eb1a1692 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -0,0 +1,201 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "time" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Store combines the methods of content-oriented interfaces into a set that +// are commonly provided by complete implementations. +// +// Overall content lifecycle: +// - Ingester is used to initiate a write operation (aka ingestion) +// - IngestManager is used to manage (e.g. list, abort) active ingestions +// - Once an ingestion is complete (see Writer.Commit), Provider is used to +// query a single piece of content by its digest +// - Manager is used to manage (e.g. list, delete) previously committed content +// +// Note that until ingestion is complete, its content is not visible through +// Provider or Manager. Once ingestion is complete, it is no longer exposed +// through IngestManager. +type Store interface { + Manager + Provider + IngestManager + Ingester +} + +// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer +type ReaderAt interface { + io.ReaderAt + io.Closer + Size() int64 +} + +// Provider provides a reader interface for specific content +type Provider interface { + // ReaderAt only requires desc.Digest to be set. + // Other fields in the descriptor may be used internally for resolving + // the location of the actual data. + ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) +} + +// Ingester writes content +type Ingester interface { + // Writer initiates a writing operation (aka ingestion). A single ingestion + // is uniquely identified by its ref, provided using a WithRef option. + // Writer can be called multiple times with the same ref to access the same + // ingestion. + // Once all the data is written, use Writer.Commit to complete the ingestion. + Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) +} + +// IngestManager provides methods for managing ingestions. An ingestion is a +// not-yet-complete writing operation initiated using Ingester and identified +// by a ref string. +type IngestManager interface { + // Status returns the status of the provided ref. + Status(ctx context.Context, ref string) (Status, error) + + // ListStatuses returns the status of any active ingestions whose ref match + // the provided regular expression. If empty, all active ingestions will be + // returned. + ListStatuses(ctx context.Context, filters ...string) ([]Status, error) + + // Abort completely cancels the ingest operation targeted by ref. + Abort(ctx context.Context, ref string) error +} + +// Info holds content specific information +type Info struct { + Digest digest.Digest + Size int64 + CreatedAt time.Time + UpdatedAt time.Time + Labels map[string]string +} + +// Status of a content operation (i.e. an ingestion) +type Status struct { + Ref string + Offset int64 + Total int64 + Expected digest.Digest + StartedAt time.Time + UpdatedAt time.Time +} + +// WalkFunc defines the callback for a blob walk. +type WalkFunc func(Info) error + +// InfoProvider provides info for content inspection. +type InfoProvider interface { + // Info will return metadata about content available in the content store. + // + // If the content is not present, ErrNotFound will be returned. + Info(ctx context.Context, dgst digest.Digest) (Info, error) +} + +// Manager provides methods for inspecting, listing and removing content. +type Manager interface { + InfoProvider + + // Update updates mutable information related to content. + // If one or more fieldpaths are provided, only those + // fields will be updated. + // Mutable fields: + // labels.* + Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) + + // Walk will call fn for each item in the content store which + // match the provided filters. If no filters are given all + // items will be walked. + Walk(ctx context.Context, fn WalkFunc, filters ...string) error + + // Delete removes the content from the store. + Delete(ctx context.Context, dgst digest.Digest) error +} + +// Writer handles writing of content into a content store +type Writer interface { + // Close closes the writer, if the writer has not been + // committed this allows resuming or aborting. + // Calling Close on a closed writer will not error. + io.WriteCloser + + // Digest may return empty digest or panics until committed. + Digest() digest.Digest + + // Commit commits the blob (but no roll-back is guaranteed on an error). + // size and expected can be zero-value when unknown. + // Commit always closes the writer, even on error. + // ErrAlreadyExists aborts the writer. + Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error + + // Status returns the current state of write + Status() (Status, error) + + // Truncate updates the size of the target blob + Truncate(size int64) error +} + +// Opt is used to alter the mutable properties of content +type Opt func(*Info) error + +// WithLabels allows labels to be set on content +func WithLabels(labels map[string]string) Opt { + return func(info *Info) error { + info.Labels = labels + return nil + } +} + +// WriterOpts is internally used by WriterOpt. +type WriterOpts struct { + Ref string + Desc ocispec.Descriptor +} + +// WriterOpt is used for passing options to Ingester.Writer. +type WriterOpt func(*WriterOpts) error + +// WithDescriptor specifies an OCI descriptor. +// Writer may optionally use the descriptor internally for resolving +// the location of the actual data. +// Write does not require any field of desc to be set. +// If the data size is unknown, desc.Size should be set to 0. +// Some implementations may also accept negative values as "unknown". +func WithDescriptor(desc ocispec.Descriptor) WriterOpt { + return func(opts *WriterOpts) error { + opts.Desc = desc + return nil + } +} + +// WithRef specifies a ref string. +func WithRef(ref string) WriterOpt { + return func(opts *WriterOpts) error { + opts.Ref = ref + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go new file mode 100644 index 000000000..5404109a6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -0,0 +1,334 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/randutil" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// maxResets is the no.of times the Copy() method can tolerate a reset of the body +const maxResets = 5 + +var ErrReset = errors.New("writer has been reset") + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +type reader interface { + Reader() io.Reader +} + +// NewReader returns a io.Reader from a ReaderAt +func NewReader(ra ReaderAt) io.Reader { + if rd, ok := ra.(reader); ok { + return rd.Reader() + } + return io.NewSectionReader(ra, 0, ra.Size()) +} + +// ReadBlob retrieves the entire contents of the blob from the provider. +// +// Avoid using this for large blobs, such as layers. +func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { + if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest { + return desc.Data, nil + } + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + defer ra.Close() + + p := make([]byte, ra.Size()) + + n, err := ra.ReadAt(p, 0) + if err == io.EOF { + if int64(n) != ra.Size() { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } + return p, err +} + +// WriteBlob writes data with the expected digest into the content store. If +// expected already exists, the method returns immediately and the reader will +// not be consumed. +// +// This is useful when the digest and size are known beforehand. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { + cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed to open writer: %w", err) + } + + return nil // already present + } + defer cw.Close() + + return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) +} + +// OpenWriter opens a new writer for the given reference, retrying if the writer +// is locked until the reference is available or returns an error. +func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { + var ( + cw Writer + err error + retry = 16 + ) + for { + cw, err = cs.Writer(ctx, opts...) + if err != nil { + if !errdefs.IsUnavailable(err) { + return nil, err + } + + // TODO: Check status to determine if the writer is active, + // continue waiting while active, otherwise return lock + // error or abort. Requires asserting for an ingest manager + + select { + case <-time.After(time.Millisecond * time.Duration(randutil.Intn(retry))): + if retry < 2048 { + retry = retry << 1 + } + continue + case <-ctx.Done(): + // Propagate lock error + return nil, err + } + + } + break + } + + return cw, err +} + +// Copy copies data with the expected digest from the reader into the +// provided content store writer. This copy commits the writer. +// +// This is useful when the digest and size are known beforehand. When +// the size or digest is unknown, these values may be empty. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r := or + if ws.Offset > 0 { + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + } + + for i := 0; i < maxResets; i++ { + if i >= 1 { + log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset") + } + copied, err := copyWithBuffer(cw, r) + if errors.Is(err, ErrReset) { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + if size != 0 && copied < size-ws.Offset { + // Short writes would return its own error, this indicates a read failure + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + } + if err := cw.Commit(ctx, size, expected, opts...); err != nil { + if errors.Is(err, ErrReset) { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + continue + } + if !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + } + } + return nil + } + + log.G(ctx).WithField("digest", expected).Errorf("failed to copy after %d retries", maxResets) + return fmt.Errorf("failed to copy after %d retries", maxResets) +} + +// CopyReaderAt copies to a writer from a given reader at for the given +// number of bytes. This copy does not commit the writer. +func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { + ws, err := cw.Status() + if err != nil { + return err + } + + copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) + if err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + if copied < n { + // Short writes would return its own error, this indicates a read failure + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + } + return nil +} + +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, fmt.Errorf("failed to get status: %w", err) + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + } + + return copyWithBuffer(cw, r) +} + +// seekReader attempts to seek the reader to the given offset, either by +// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding +// up to the given offset. +func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { + // attempt to resolve r as a seeker and setup the offset. + seeker, ok := r.(io.Seeker) + if ok { + nn, err := seeker.Seek(offset, io.SeekStart) + if nn != offset { + if err == nil { + err = fmt.Errorf("unexpected seek location without seek error") + } + return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err) + } + + if err != nil { + return nil, err + } + + return r, nil + } + + // ok, let's try io.ReaderAt! + readerAt, ok := r.(io.ReaderAt) + if ok && size > offset { + sr := io.NewSectionReader(readerAt, offset, size) + return sr, nil + } + + // well then, let's just discard up to the offset + n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset)) + if err != nil { + return nil, fmt.Errorf("failed to discard to offset: %w", err) + } + if n != offset { + return nil, errors.New("unable to discard to offset") + } + + return r, nil +} + +// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer +// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have +// a full buffer before we do a write operation to dst to reduce overheads associated +// with the write operations of small buffers. +func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { + // If the reader has a WriteTo method, use it to do the copy. + // Avoids an allocation and a copy. + if wt, ok := src.(io.WriterTo); ok { + return wt.WriteTo(dst) + } + // Similarly, if the writer has a ReadFrom method, use it to do the copy. + if rt, ok := dst.(io.ReaderFrom); ok { + return rt.ReadFrom(src) + } + bufRef := bufPool.Get().(*[]byte) + defer bufPool.Put(bufRef) + buf := *bufRef + for { + nr, er := io.ReadAtLeast(src, buf, len(buf)) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + // If an EOF happens after reading fewer than the requested bytes, + // ReadAtLeast returns ErrUnexpectedEOF. + if er != io.EOF && er != io.ErrUnexpectedEOF { + err = er + } + break + } + } + return +} diff --git a/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go b/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go new file mode 100644 index 000000000..a523f28d9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go @@ -0,0 +1,76 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "bufio" + "bytes" + "context" + _ "crypto/sha256" + "io" + "testing" + + "github.com/opencontainers/go-digest" + + "github.com/containerd/containerd/content" +) + +func FuzzContentStoreWriter(data []byte) int { + t := &testing.T{} + ctx := context.Background() + ctx, _, cs, cleanup := contentStoreEnv(t) + defer cleanup() + + cw, err := cs.Writer(ctx, content.WithRef("myref")) + if err != nil { + return 0 + } + if err := cw.Close(); err != nil { + return 0 + } + + // reopen, so we can test things + cw, err = cs.Writer(ctx, content.WithRef("myref")) + if err != nil { + return 0 + } + + err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data)))) + if err != nil { + return 0 + } + expected := digest.FromBytes(data) + + if err = cw.Commit(ctx, int64(len(data)), expected); err != nil { + return 0 + } + return 1 +} + +func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error { + nn, err := io.Copy(dst, src) + if err != nil { + return err + } + + if nn != size { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go new file mode 100644 index 000000000..1e59f39b3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/locks.go @@ -0,0 +1,62 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "fmt" + "sync" + "time" + + "github.com/containerd/containerd/errdefs" +) + +// Handles locking references + +type lock struct { + since time.Time +} + +var ( + // locks lets us lock in process + locks = make(map[string]*lock) + locksMu sync.Mutex +) + +func tryLock(ref string) error { + locksMu.Lock() + defer locksMu.Unlock() + + if v, ok := locks[ref]; ok { + // Returning the duration may help developers distinguish dead locks (long duration) from + // lock contentions (short duration). + now := time.Now() + return fmt.Errorf( + "ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since, + errdefs.ErrUnavailable, + ) + } + + locks[ref] = &lock{time.Now()} + return nil +} + +func unlock(ref string) { + locksMu.Lock() + defer locksMu.Unlock() + + delete(locks, ref) +} diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go new file mode 100644 index 000000000..899e85c0b --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/readerat.go @@ -0,0 +1,72 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "fmt" + "io" + "os" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" +) + +// readerat implements io.ReaderAt in a completely stateless manner by opening +// the referenced file for each call to ReadAt. +type sizeReaderAt struct { + size int64 + fp *os.File +} + +// OpenReader creates ReaderAt from a file +func OpenReader(p string) (content.ReaderAt, error) { + fi, err := os.Stat(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) + } + + fp, err := os.Open(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) + } + + return sizeReaderAt{size: fi.Size(), fp: fp}, nil +} + +func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { + return ra.fp.ReadAt(p, offset) +} + +func (ra sizeReaderAt) Size() int64 { + return ra.size +} + +func (ra sizeReaderAt) Close() error { + return ra.fp.Close() +} + +func (ra sizeReaderAt) Reader() io.Reader { + return io.LimitReader(ra.fp, ra.size) +} diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go new file mode 100644 index 000000000..baae3565b --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -0,0 +1,704 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/randutil" + "github.com/sirupsen/logrus" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +// LabelStore is used to store mutable labels for digests +type LabelStore interface { + // Get returns all the labels for the given digest + Get(digest.Digest) (map[string]string, error) + + // Set sets all the labels for a given digest + Set(digest.Digest, map[string]string) error + + // Update replaces the given labels for a digest, + // a key with an empty value removes a label. + Update(digest.Digest, map[string]string) (map[string]string, error) +} + +// Store is digest-keyed store for content. All data written into the store is +// stored under a verifiable digest. +// +// Store can generally support multi-reader, single-writer ingest of data, +// including resumable ingest. +type store struct { + root string + ls LabelStore +} + +// NewStore returns a local content store +func NewStore(root string) (content.Store, error) { + return NewLabeledStore(root, nil) +} + +// NewLabeledStore returns a new content store using the provided label store +// +// Note: content stores which are used underneath a metadata store may not +// require labels and should use `NewStore`. `NewLabeledStore` is primarily +// useful for tests or standalone implementations. +func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { + if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil { + return nil, err + } + + return &store{ + root: root, + ls: ls, + }, nil +} + +func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + p, err := s.blobPath(dgst) + if err != nil { + return content.Info{}, fmt.Errorf("calculating blob info path: %w", err) + } + + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) + } + + return content.Info{}, err + } + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return content.Info{}, err + } + } + return s.info(dgst, fi, labels), nil +} + +func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { + return content.Info{ + Digest: dgst, + Size: fi.Size(), + CreatedAt: fi.ModTime(), + UpdatedAt: getATime(fi), + Labels: labels, + } +} + +// ReaderAt returns an io.ReaderAt for the blob. +func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + p, err := s.blobPath(desc.Digest) + if err != nil { + return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err) + } + + reader, err := OpenReader(p) + if err != nil { + return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err) + } + + return reader, nil +} + +// Delete removes a blob by its digest. +// +// While this is safe to do concurrently, safe exist-removal logic must hold +// some global lock on the store. +func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { + bp, err := s.blobPath(dgst) + if err != nil { + return fmt.Errorf("calculating blob path for delete: %w", err) + } + + if err := os.RemoveAll(bp); err != nil { + if !os.IsNotExist(err) { + return err + } + + return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) + } + + return nil +} + +func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + if s.ls == nil { + return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition) + } + + p, err := s.blobPath(info.Digest) + if err != nil { + return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err) + } + + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound) + } + + return content.Info{}, err + } + + var ( + all bool + labels map[string]string + ) + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if labels == nil { + labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + all = true + labels = info.Labels + default: + return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument) + } + } + } else { + all = true + labels = info.Labels + } + + if all { + err = s.ls.Set(info.Digest, labels) + } else { + labels, err = s.ls.Update(info.Digest, labels) + } + if err != nil { + return content.Info{}, err + } + + info = s.info(info.Digest, fi, labels) + info.UpdatedAt = time.Now() + + if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { + log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) + } + + return info, nil +} + +func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { + root := filepath.Join(s.root, "blobs") + + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + + var alg digest.Algorithm + return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if !fi.IsDir() && !alg.Available() { + return nil + } + + // TODO(stevvooe): There are few more cases with subdirs that should be + // handled in case the layout gets corrupted. This isn't strict enough + // and may spew bad data. + + if path == root { + return nil + } + if filepath.Dir(path) == root { + alg = digest.Algorithm(filepath.Base(path)) + + if !alg.Available() { + alg = "" + return filepath.SkipDir + } + + // descending into a hash directory + return nil + } + + dgst := digest.NewDigestFromEncoded(alg, filepath.Base(path)) + if err := dgst.Validate(); err != nil { + // log error but don't report + log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") + // if we see this, it could mean some sort of corruption of the + // store or extra paths not expected previously. + } + + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return err + } + } + + info := s.info(dgst, fi, labels) + if !filter.Match(content.AdaptInfo(info)) { + return nil + } + return fn(info) + }) +} + +func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { + return s.status(s.ingestRoot(ref)) +} + +func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + return nil, err + } + + defer fp.Close() + + fis, err := fp.Readdir(-1) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, err + } + + var active []content.Status + for _, fi := range fis { + p := filepath.Join(s.root, "ingest", fi.Name()) + stat, err := s.status(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + // TODO(stevvooe): This is a common error if uploads are being + // completed while making this listing. Need to consider taking a + // lock on the whole store to coordinate this aspect. + // + // Another option is to cleanup downloads asynchronously and + // coordinate this method with the cleanup process. + // + // For now, we just skip them, as they really don't exist. + continue + } + + if filter.Match(adaptStatus(stat)) { + active = append(active, stat) + } + } + + return active, nil +} + +// WalkStatusRefs is used to walk all status references +// Failed status reads will be logged and ignored, if +// this function is called while references are being altered, +// these error messages may be produced. +func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + return err + } + + defer fp.Close() + + fis, err := fp.Readdir(-1) + if err != nil { + return err + } + + for _, fi := range fis { + rf := filepath.Join(s.root, "ingest", fi.Name(), "ref") + + ref, err := readFileString(rf) + if err != nil { + log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref") + continue + } + + if err := fn(ref); err != nil { + return err + } + } + + return nil +} + +// status works like stat above except uses the path to the ingest. +func (s *store) status(ingestPath string) (content.Status, error) { + dp := filepath.Join(ingestPath, "data") + fi, err := os.Stat(dp) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) + } + return content.Status{}, err + } + + ref, err := readFileString(filepath.Join(ingestPath, "ref")) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) + } + return content.Status{}, err + } + + startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) + if err != nil { + return content.Status{}, fmt.Errorf("could not read startedat: %w", err) + } + + updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) + if err != nil { + return content.Status{}, fmt.Errorf("could not read updatedat: %w", err) + } + + // because we don't write updatedat on every write, the mod time may + // actually be more up to date. + if fi.ModTime().After(updatedAt) { + updatedAt = fi.ModTime() + } + + return content.Status{ + Ref: ref, + Offset: fi.Size(), + Total: s.total(ingestPath), + UpdatedAt: updatedAt, + StartedAt: startedAt, + }, nil +} + +func adaptStatus(status content.Status) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "ref": + return status.Ref, true + } + + return "", false + }) +} + +// total attempts to resolve the total expected size for the write. +func (s *store) total(ingestPath string) int64 { + totalS, err := readFileString(filepath.Join(ingestPath, "total")) + if err != nil { + return 0 + } + + total, err := strconv.ParseInt(totalS, 10, 64) + if err != nil { + // represents a corrupted file, should probably remove. + return 0 + } + + return total +} + +// Writer begins or resumes the active writer identified by ref. If the writer +// is already in use, an error is returned. Only one writer may be in use per +// ref at a time. +// +// The argument `ref` is used to uniquely identify a long-lived writer transaction. +func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + // TODO(AkihiroSuda): we could create a random string or one calculated based on the context + // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 + if wOpts.Ref == "" { + return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) + } + var lockErr error + for count := uint64(0); count < 10; count++ { + if err := tryLock(wOpts.Ref); err != nil { + if !errdefs.IsUnavailable(err) { + return nil, err + } + + lockErr = err + } else { + lockErr = nil + break + } + time.Sleep(time.Millisecond * time.Duration(randutil.Intn(1< 0 && status.Total > 0 && total != status.Total { + return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total) + } + + //nolint:dupword + // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes + fp, err := os.Open(data) + if err != nil { + return status, err + } + + p := bufPool.Get().(*[]byte) + status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p) + bufPool.Put(p) + fp.Close() + return status, err +} + +// writer provides the main implementation of the Writer method. The caller +// must hold the lock correctly and release on error if there is a problem. +func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { + // TODO(stevvooe): Need to actually store expected here. We have + // code in the service that shouldn't be dealing with this. + if expected != "" { + p, err := s.blobPath(expected) + if err != nil { + return nil, fmt.Errorf("calculating expected blob path for writer: %w", err) + } + if _, err := os.Stat(p); err == nil { + return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists) + } + } + + path, refp, data := s.ingestPaths(ref) + + var ( + digester = digest.Canonical.Digester() + offset int64 + startedAt time.Time + updatedAt time.Time + ) + + foundValidIngest := false + // ensure that the ingest path has been created. + if err := os.Mkdir(path, 0755); err != nil { + if !os.IsExist(err) { + return nil, err + } + status, err := s.resumeStatus(ref, total, digester) + if err == nil { + foundValidIngest = true + updatedAt = status.UpdatedAt + startedAt = status.StartedAt + total = status.Total + offset = status.Offset + } else { + logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error()) + } + } + + if !foundValidIngest { + startedAt = time.Now() + updatedAt = startedAt + + // the ingest is new, we need to setup the target location. + // write the ref to a file for later use + if err := os.WriteFile(refp, []byte(ref), 0666); err != nil { + return nil, err + } + + if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { + return nil, err + } + + if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { + return nil, err + } + + if total > 0 { + if err := os.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { + return nil, err + } + } + } + + fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return nil, fmt.Errorf("failed to open data file: %w", err) + } + + if _, err := fp.Seek(offset, io.SeekStart); err != nil { + fp.Close() + return nil, fmt.Errorf("could not seek to current write offset: %w", err) + } + + return &writer{ + s: s, + fp: fp, + ref: ref, + path: path, + offset: offset, + total: total, + digester: digester, + startedAt: startedAt, + updatedAt: updatedAt, + }, nil +} + +// Abort an active transaction keyed by ref. If the ingest is active, it will +// be cancelled. Any resources associated with the ingest will be cleaned. +func (s *store) Abort(ctx context.Context, ref string) error { + root := s.ingestRoot(ref) + if err := os.RemoveAll(root); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound) + } + + return err + } + + return nil +} + +func (s *store) blobPath(dgst digest.Digest) (string, error) { + if err := dgst.Validate(); err != nil { + return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument) + } + + return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Encoded()), nil +} + +func (s *store) ingestRoot(ref string) string { + // we take a digest of the ref to keep the ingest paths constant length. + // Note that this is not the current or potential digest of incoming content. + dgst := digest.FromString(ref) + return filepath.Join(s.root, "ingest", dgst.Encoded()) +} + +// ingestPaths are returned. The paths are the following: +// +// - root: entire ingest directory +// - ref: name of the starting ref, must be unique +// - data: file where data is written +func (s *store) ingestPaths(ref string) (string, string, string) { + var ( + fp = s.ingestRoot(ref) + rp = filepath.Join(fp, "ref") + dp = filepath.Join(fp, "data") + ) + + return fp, rp, dp +} + +func readFileString(path string) (string, error) { + p, err := os.ReadFile(path) + return string(p), err +} + +// readFileTimestamp reads a file with just a timestamp present. +func readFileTimestamp(p string) (time.Time, error) { + b, err := os.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) + } + return time.Time{}, err + } + + var t time.Time + if err := t.UnmarshalText(b); err != nil { + return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err) + } + + return t, nil +} + +func writeTimestampFile(p string, t time.Time) error { + b, err := t.MarshalText() + if err != nil { + return err + } + return writeToCompletion(p, b, 0666) +} + +func writeToCompletion(path string, data []byte, mode os.FileMode) error { + tmp := fmt.Sprintf("%s.tmp", path) + f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) + if err != nil { + return fmt.Errorf("create tmp file: %w", err) + } + _, err = f.Write(data) + f.Close() + if err != nil { + return fmt.Errorf("write tmp file: %w", err) + } + err = os.Rename(tmp, path) + if err != nil { + return fmt.Errorf("rename tmp file: %w", err) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_bsd.go b/vendor/github.com/containerd/containerd/content/local/store_bsd.go new file mode 100644 index 000000000..7dcc19232 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_bsd.go @@ -0,0 +1,33 @@ +//go:build darwin || freebsd || netbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(st.Atimespec.Unix()) + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_openbsd.go b/vendor/github.com/containerd/containerd/content/local/store_openbsd.go new file mode 100644 index 000000000..45dfa9997 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_openbsd.go @@ -0,0 +1,33 @@ +//go:build openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(st.Atim.Unix()) + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go new file mode 100644 index 000000000..cb01c91c7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go @@ -0,0 +1,33 @@ +//go:build linux || solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(st.Atim.Unix()) + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go new file mode 100644 index 000000000..bce849979 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_windows.go @@ -0,0 +1,26 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/test_helper.go b/vendor/github.com/containerd/containerd/content/local/test_helper.go new file mode 100644 index 000000000..42de01cae --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/test_helper.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "testing" + + "github.com/containerd/containerd/content" +) + +func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) { + tmpdir := t.TempDir() + + cs, err := NewStore(tmpdir) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.Background()) + return ctx, tmpdir, cs, func() { + cancel() + } +} diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go new file mode 100644 index 000000000..b187e524c --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/writer.go @@ -0,0 +1,208 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/opencontainers/go-digest" +) + +// writer represents a write transaction against the blob store. +type writer struct { + s *store + fp *os.File // opened data file + path string // path to writer dir + ref string // ref key + offset int64 + total int64 + digester digest.Digester + startedAt time.Time + updatedAt time.Time +} + +func (w *writer) Status() (content.Status, error) { + return content.Status{ + Ref: w.ref, + Offset: w.offset, + Total: w.total, + StartedAt: w.startedAt, + UpdatedAt: w.updatedAt, + }, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *writer) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +// +// Note that writes are unbuffered to the backing file. When writing, it is +// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. +func (w *writer) Write(p []byte) (n int, err error) { + n, err = w.fp.Write(p) + w.digester.Hash().Write(p[:n]) + w.offset += int64(len(p)) + w.updatedAt = time.Now() + return n, err +} + +func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + // Ensure even on error the writer is fully closed + defer unlock(w.ref) + + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + fp := w.fp + w.fp = nil + + if fp == nil { + return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition) + } + + if err := fp.Sync(); err != nil { + fp.Close() + return fmt.Errorf("sync failed: %w", err) + } + + fi, err := fp.Stat() + closeErr := fp.Close() + if err != nil { + return fmt.Errorf("stat on ingest file failed: %w", err) + } + if closeErr != nil { + return fmt.Errorf("failed to close ingest file: %w", closeErr) + } + + if size > 0 && size != fi.Size() { + return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition) + } + + dgst := w.digester.Digest() + if expected != "" && expected != dgst { + return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition) + } + + var ( + ingest = filepath.Join(w.path, "data") + target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst + ) + + // make sure parent directories of blob exist + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return err + } + + if _, err := os.Stat(target); err == nil { + // collision with the target file! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") + } + return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists) + } + + if err := os.Rename(ingest, target); err != nil { + return err + } + + // Ingest has now been made available in the content store, attempt to complete + // setting metadata but errors should only be logged and not returned since + // the content store cannot be cleanly rolled back. + + commitTime := time.Now() + if err := os.Chtimes(target, commitTime, commitTime); err != nil { + log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time") + } + + // clean up!! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") + } + + if w.s.ls != nil && base.Labels != nil { + if err := w.s.ls.Set(dgst, base.Labels); err != nil { + log.G(ctx).WithField("digest", dgst).Error("failed to set labels") + } + } + + // change to readonly, more important for read, but provides _some_ + // protection from this point on. We use the existing perms with a mask + // only allowing reads honoring the umask on creation. + // + // This removes write and exec, only allowing read per the creation umask. + // + // NOTE: Windows does not support this operation + if runtime.GOOS != "windows" { + if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { + log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly") + } + } + + return nil +} + +// Close the writer, flushing any unwritten data and leaving the progress in +// tact. +// +// If one needs to resume the transaction, a new writer can be obtained from +// `Ingester.Writer` using the same key. The write can then be continued +// from it was left off. +// +// To abandon a transaction completely, first call close then `IngestManager.Abort` to +// clean up the associated resources. +func (w *writer) Close() (err error) { + if w.fp != nil { + w.fp.Sync() + err = w.fp.Close() + writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) + w.fp = nil + unlock(w.ref) + return + } + + return nil +} + +func (w *writer) Truncate(size int64) error { + if size != 0 { + return errors.New("Truncate: unsupported size") + } + w.offset = 0 + w.digester.Hash().Reset() + if _, err := w.fp.Seek(0, io.SeekStart); err != nil { + return err + } + return w.fp.Truncate(0) +} diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 000000000..876225597 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,92 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with fmt.Errorf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + "errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, ErrFailedPrecondition) +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, ErrUnavailable) +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, ErrNotImplemented) +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 000000000..7a9b33e05 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = fmt.Errorf("%s: %w", msg, cls) + } else { + err = cls + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go new file mode 100644 index 000000000..5a9c559c1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go new file mode 100644 index 000000000..e13f2625c --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +package filters + +import ( + "regexp" + + "github.com/containerd/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go new file mode 100644 index 000000000..32767909b --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -0,0 +1,290 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, fmt.Errorf("filters: %w", err) + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerr(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerr(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerr(pos int, format string, args ...interface{}) error { + return fmt.Errorf("parse error: %w", parseError{ + input: p.input, + pos: pos, + msg: fmt.Sprintf(format, args...), + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go new file mode 100644 index 000000000..5c800ef84 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -0,0 +1,252 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "errors" + "unicode/utf8" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1. value, the decoded Unicode code point or byte value; +// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3. tail, the remainder of the string after the character; and +// 4. an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go new file mode 100644 index 000000000..6a485467b --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/images/annotations.go b/vendor/github.com/containerd/containerd/images/annotations.go new file mode 100644 index 000000000..47d92104c --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/annotations.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +const ( + // AnnotationImageName is an annotation on a Descriptor in an index.json + // containing the `Name` value as used by an `Image` struct + AnnotationImageName = "io.containerd.image.name" +) diff --git a/vendor/github.com/containerd/containerd/images/diffid.go b/vendor/github.com/containerd/containerd/images/diffid.go new file mode 100644 index 000000000..56193cc28 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/diffid.go @@ -0,0 +1,81 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/labels" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// GetDiffID gets the diff ID of the layer blob descriptor. +func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (digest.Digest, error) { + switch desc.MediaType { + case + // If the layer is already uncompressed, we can just return its digest + MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayer, + MediaTypeDockerSchema2LayerForeign, + ocispec.MediaTypeImageLayerNonDistributable: + return desc.Digest, nil + } + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return "", err + } + v, ok := info.Labels[labels.LabelUncompressed] + if ok { + // Fast path: if the image is already unpacked, we can use the label value + return digest.Parse(v) + } + // if the image is not unpacked, we may not have the label + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return "", err + } + defer ra.Close() + r := content.NewReader(ra) + uR, err := compression.DecompressStream(r) + if err != nil { + return "", err + } + defer uR.Close() + digester := digest.Canonical.Digester() + hashW := digester.Hash() + if _, err := io.Copy(hashW, uR); err != nil { + return "", err + } + if err := ra.Close(); err != nil { + return "", err + } + digest := digester.Digest() + // memorize the computed value + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[labels.LabelUncompressed] = digest.String() + if _, err := cs.Update(ctx, info, "labels"); err != nil { + logrus.WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest) + } + return digest, nil +} diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go new file mode 100644 index 000000000..077d88e78 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -0,0 +1,322 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +var ( + // ErrSkipDesc is used to skip processing of a descriptor and + // its descendants. + ErrSkipDesc = errors.New("skip descriptor") + + // ErrStopHandler is used to signify that the descriptor + // has been handled and should not be handled further. + // This applies only to a single descriptor in a handler + // chain and does not apply to descendant descriptors. + ErrStopHandler = errors.New("stop handler") + + // ErrEmptyWalk is used when the WalkNotEmpty handlers return no + // children (e.g.: they were filtered out). + ErrEmptyWalk = errors.New("image might be filtered out") +) + +// Handler handles image manifests +type Handler interface { + Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) +} + +// HandlerFunc function implementing the Handler interface +type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) + +// Handle image manifests +func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + return fn(ctx, desc) +} + +// Handlers returns a handler that will run the handlers in sequence. +// +// A handler may return `ErrStopHandler` to stop calling additional handlers +func Handlers(handlers ...Handler) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + var children []ocispec.Descriptor + for _, handler := range handlers { + ch, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Is(err, ErrStopHandler) { + break + } + return nil, err + } + + children = append(children, ch...) + } + + return children, nil + } +} + +// Walk the resources of an image and call the handler for each. If the handler +// decodes the sub-resources for each image, +// +// This differs from dispatch in that each sibling resource is considered +// synchronously. +func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + for _, desc := range descs { + + children, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Is(err, ErrSkipDesc) { + continue // don't traverse the children. + } + return err + } + + if len(children) > 0 { + if err := Walk(ctx, handler, children...); err != nil { + return err + } + } + } + return nil +} + +// WalkNotEmpty works the same way Walk does, with the exception that it ensures that +// some children are still found by Walking the descriptors (for example, not all of +// them have been filtered out by one of the handlers). If there are no children, +// then an ErrEmptyWalk error is returned. +func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + isEmpty := true + var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := handler.Handle(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + isEmpty = false + } + + return children, nil + } + + err := Walk(ctx, notEmptyHandler, descs...) + if err != nil { + return err + } + + if isEmpty { + return ErrEmptyWalk + } + + return nil +} + +// Dispatch runs the provided handler for content specified by the descriptors. +// If the handler decode subresources, they will be visited, as well. +// +// Handlers for siblings are run in parallel on the provided descriptors. A +// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse +// any children. +// +// A concurrency limiter can be passed in to limit the number of concurrent +// handlers running. When limiter is nil, there is no limit. +// +// Typically, this function will be used with `FetchHandler`, often composed +// with other handlers. +// +// If any handler returns an error, the dispatch session will be canceled. +func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { + eg, ctx2 := errgroup.WithContext(ctx) + for _, desc := range descs { + desc := desc + + if limiter != nil { + if err := limiter.Acquire(ctx, 1); err != nil { + return err + } + } + + eg.Go(func() error { + desc := desc + + children, err := handler.Handle(ctx2, desc) + if limiter != nil { + limiter.Release(1) + } + if err != nil { + if errors.Is(err, ErrSkipDesc) { + return nil // don't traverse the children. + } + return err + } + + if len(children) > 0 { + return Dispatch(ctx2, handler, limiter, children...) + } + + return nil + }) + } + + return eg.Wait() +} + +// ChildrenHandler decodes well-known manifest types and returns their children. +// +// This is useful for supporting recursive fetch and other use cases where you +// want to do a full walk of resources. +// +// One can also replace this with another implementation to allow descending of +// arbitrary types. +func ChildrenHandler(provider content.Provider) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return Children(ctx, provider, desc) + } +} + +// SetChildrenLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { + return SetChildrenMappedLabels(manager, f, nil) +} + +// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +// The label map allows the caller to control the labels per child descriptor. +// For returned labels, the index of the child will be appended to the end +// except for the first index when the returned label does not end with '.'. +func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc { + if labelMap == nil { + labelMap = ChildGCLabels + } + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + var ( + info = content.Info{ + Digest: desc.Digest, + Labels: map[string]string{}, + } + fields = []string{} + keys = map[string]uint{} + ) + for _, ch := range children { + labelKeys := labelMap(ch) + for _, key := range labelKeys { + idx := keys[key] + keys[key] = idx + 1 + if idx > 0 || key[len(key)-1] == '.' { + key = fmt.Sprintf("%s%d", key, idx) + } + + info.Labels[key] = ch.Digest.String() + fields = append(fields, "labels."+key) + } + } + + _, err := manager.Update(ctx, info, fields...) + if err != nil { + return nil, err + } + } + + return children, err + } +} + +// FilterPlatforms is a handler wrapper which limits the descriptors returned +// based on matching the specified platform matcher. +func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + var descs []ocispec.Descriptor + + if m == nil { + descs = children + } else { + for _, d := range children { + if d.Platform == nil || m.Match(*d.Platform) { + descs = append(descs, d) + } + } + } + + return descs, nil + } +} + +// LimitManifests is a handler wrapper which filters the manifest descriptors +// returned using the provided platform. +// The results will be ordered according to the comparison operator and +// use the ordering in the manifests for equal matches. +// A limit of 0 or less is considered no limit. +// A not found error is returned if no manifest is matched. +func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + switch desc.MediaType { + case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: + sort.SliceStable(children, func(i, j int) bool { + if children[i].Platform == nil { + return false + } + if children[j].Platform == nil { + return true + } + return m.Less(*children[i].Platform, *children[j].Platform) + }) + + if n > 0 { + if len(children) == 0 { + return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound) + } + if len(children) > n { + children = children[:n] + } + } + default: + // only limit manifests from an index + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go new file mode 100644 index 000000000..2d2e36a9a --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -0,0 +1,440 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Image provides the model for how containerd views container images. +type Image struct { + // Name of the image. + // + // To be pulled, it must be a reference compatible with resolvers. + // + // This field is required. + Name string + + // Labels provide runtime decoration for the image record. + // + // There is no default behavior for how these labels are propagated. They + // only decorate the static metadata object. + // + // This field is optional. + Labels map[string]string + + // Target describes the root content for this image. Typically, this is + // a manifest, index or manifest list. + Target ocispec.Descriptor + + CreatedAt, UpdatedAt time.Time +} + +// DeleteOptions provide options on image delete +type DeleteOptions struct { + Synchronous bool +} + +// DeleteOpt allows configuring a delete operation +type DeleteOpt func(context.Context, *DeleteOptions) error + +// SynchronousDelete is used to indicate that an image deletion and removal of +// the image resources should occur synchronously before returning a result. +func SynchronousDelete() DeleteOpt { + return func(ctx context.Context, o *DeleteOptions) error { + o.Synchronous = true + return nil + } +} + +// Store and interact with images +type Store interface { + Get(ctx context.Context, name string) (Image, error) + List(ctx context.Context, filters ...string) ([]Image, error) + Create(ctx context.Context, image Image) (Image, error) + + // Update will replace the data in the store with the provided image. If + // one or more fieldpaths are provided, only those fields will be updated. + Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) + + Delete(ctx context.Context, name string, opts ...DeleteOpt) error +} + +// TODO(stevvooe): Many of these functions make strong platform assumptions, +// which are untrue in a lot of cases. More refactoring must be done here to +// make this work in all cases. + +// Config resolves the image configuration descriptor. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + return Config(ctx, provider, image.Target, platform) +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) { + desc, err := image.Config(ctx, provider, platform) + if err != nil { + return nil, err + } + return RootFS(ctx, provider, desc) +} + +// Size returns the total size of an image's packed resources. +func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) { + var size int64 + return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Size < 0 { + return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) + } + size += desc.Size + return nil, nil + }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) +} + +type platformManifest struct { + p *ocispec.Platform + m *ocispec.Manifest +} + +// Manifest resolves a manifest from the image for the given platform. +// +// When a manifest descriptor inside of a manifest index does not have +// a platform defined, the platform from the image config is considered. +// +// If the descriptor points to a non-index manifest, then the manifest is +// unmarshalled and returned without considering the platform inside of the +// config. +// +// TODO(stevvooe): This violates the current platform agnostic approach to this +// package by returning a specific manifest type. We'll need to refactor this +// to return a manifest descriptor or decide that we want to bring the API in +// this direction because this abstraction is not needed. +func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { + var ( + limit = 1 + m []platformManifest + wasIndex bool + ) + + if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + if desc.Digest != image.Digest && platform != nil { + if desc.Platform != nil && !platform.Match(*desc.Platform) { + return nil, nil + } + + if desc.Platform == nil { + p, err := content.ReadBlob(ctx, provider, manifest.Config) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(p, &image); err != nil { + return nil, err + } + + if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) { + return nil, nil + } + + } + } + + m = append(m, platformManifest{ + p: desc.Platform, + m: &manifest, + }) + + return nil, nil + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) + } + + var idx ocispec.Index + if err := json.Unmarshal(p, &idx); err != nil { + return nil, err + } + + if platform == nil { + return idx.Manifests, nil + } + + var descs []ocispec.Descriptor + for _, d := range idx.Manifests { + if d.Platform == nil || platform.Match(*d.Platform) { + descs = append(descs, d) + } + } + + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + + wasIndex = true + + if len(descs) > limit { + return descs[:limit], nil + } + return descs, nil + } + return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound) + }), image); err != nil { + return ocispec.Manifest{}, err + } + + if len(m) == 0 { + err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound) + if wasIndex { + err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound) + } + return ocispec.Manifest{}, err + } + return *m[0].m, nil +} + +// Config resolves the image configuration descriptor using a content provided +// to resolve child resources on the image. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + manifest, err := Manifest(ctx, provider, image, platform) + if err != nil { + return ocispec.Descriptor{}, err + } + return manifest.Config, err +} + +// Platforms returns one or more platforms supported by the image. +func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) { + var platformSpecs []ocispec.Platform + return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Platform != nil { + platformSpecs = append(platformSpecs, *desc.Platform) + return nil, ErrSkipDesc + } + + switch desc.MediaType { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(p, &image); err != nil { + return nil, err + } + + platformSpecs = append(platformSpecs, + platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) + } + return nil, nil + }), ChildrenHandler(provider)), image) +} + +// Check returns nil if the all components of an image are available in the +// provider for the specified platform. +// +// If available is true, the caller can assume that required represents the +// complete set of content required for the image. +// +// missing will have the components that are part of required but not available +// in the provider. +// +// If there is a problem resolving content, an error will be returned. +func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) { + mfst, err := Manifest(ctx, provider, image, platform) + if err != nil { + if errdefs.IsNotFound(err) { + return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil + } + + return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err) + } + + // TODO(stevvooe): It is possible that referenced components could have + // children, but this is rare. For now, we ignore this and only verify + // that manifest components are present. + required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) + + for _, desc := range required { + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + if errdefs.IsNotFound(err) { + missing = append(missing, desc) + continue + } else { + return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err) + } + } + ra.Close() + present = append(present, desc) + + } + + return true, required, present, missing, nil +} + +// Children returns the immediate children of content described by the descriptor. +func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var descs []ocispec.Descriptor + switch desc.MediaType { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + descs = append(descs, manifest.Layers...) + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + descs = append(descs, index.Manifests...) + default: + if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) { + // childless data types. + return nil, nil + } + log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil +} + +// unknownDocument represents a manifest, manifest list, or index that has not +// yet been validated. +type unknownDocument struct { + MediaType string `json:"mediaType,omitempty"` + Config json.RawMessage `json:"config,omitempty"` + Layers json.RawMessage `json:"layers,omitempty"` + Manifests json.RawMessage `json:"manifests,omitempty"` + FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1 +} + +// validateMediaType returns an error if the byte slice is invalid JSON or if +// the media type identifies the blob as one format but it contains elements of +// another format. +func validateMediaType(b []byte, mt string) error { + var doc unknownDocument + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if len(doc.FSLayers) != 0 { + return fmt.Errorf("media-type: schema 1 not supported") + } + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if len(doc.Manifests) != 0 || + doc.MediaType == MediaTypeDockerSchema2ManifestList || + doc.MediaType == ocispec.MediaTypeImageIndex { + return fmt.Errorf("media-type: expected manifest but found index (%s)", mt) + } + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + if len(doc.Config) != 0 || len(doc.Layers) != 0 || + doc.MediaType == MediaTypeDockerSchema2Manifest || + doc.MediaType == ocispec.MediaTypeImageManifest { + return fmt.Errorf("media-type: expected index but found manifest (%s)", mt) + } + } + return nil +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) { + p, err := content.ReadBlob(ctx, provider, configDesc) + if err != nil { + return nil, err + } + + var config ocispec.Image + if err := json.Unmarshal(p, &config); err != nil { + return nil, err + } + return config.RootFS.DiffIDs, nil +} diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go new file mode 100644 index 000000000..843adcadc --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/importexport.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Importer is the interface for image importer. +type Importer interface { + // Import imports an image from a tar stream. + Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) +} + +// Exporter is the interface for image exporter. +type Exporter interface { + // Export exports an image to a tar stream. + Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error +} diff --git a/vendor/github.com/containerd/containerd/images/labels.go b/vendor/github.com/containerd/containerd/images/labels.go new file mode 100644 index 000000000..06dfed572 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/labels.go @@ -0,0 +1,21 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +const ( + ConvertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1" +) diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go new file mode 100644 index 000000000..067963bab --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -0,0 +1,215 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/containerd/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// mediatype definitions for image components handled in containerd. +// +// oci components are generally referenced directly, although we may centralize +// here for clarity. +const ( + MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" + MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" + MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" + MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" + MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + + // Checkpoint/Restore Media Types + + MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" + MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" + MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" + MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" + MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" + MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" + MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" + MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" + + // MediaTypeDockerSchema1Manifest is the legacy Docker schema1 manifest + MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + + // Encrypted media types + + MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" + MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" +) + +// DiffCompression returns the compression as defined by the layer diff media +// type. For Docker media types without compression, "unknown" is returned to +// indicate that the media type may be compressed. If the media type is not +// recognized as a layer diff, then it returns errdefs.ErrNotImplemented +func DiffCompression(ctx context.Context, mediaType string) (string, error) { + base, ext := parseMediaTypes(mediaType) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + // These media types may have been compressed but failed to + // use the correct media type. The decompression function + // should detect and handle this case. + return "unknown", nil + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + return "gzip", nil + case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: + if len(ext) > 0 { + switch ext[len(ext)-1] { + case "gzip": + return "gzip", nil + case "zstd": + return "zstd", nil + } + } + return "", nil + default: + return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented) + } +} + +// parseMediaTypes splits the media type into the base type and +// an array of sorted extensions +func parseMediaTypes(mt string) (mediaType string, suffixes []string) { + if mt == "" { + return "", []string{} + } + mediaType, ext, ok := strings.Cut(mt, "+") + if !ok { + return mediaType, []string{} + } + + // Splitting the extensions following the mediatype "(+)gzip+encrypted". + // We expect this to be a limited list, so add an arbitrary limit (50). + // + // Note that DiffCompression is only using the last element, so perhaps we + // should split on the last "+" only. + suffixes = strings.SplitN(ext, "+", 50) + sort.Strings(suffixes) + return mediaType, suffixes +} + +// IsNonDistributable returns true if the media type is non-distributable. +func IsNonDistributable(mt string) bool { + return strings.HasPrefix(mt, "application/vnd.oci.image.layer.nondistributable.") || + strings.HasPrefix(mt, "application/vnd.docker.image.rootfs.foreign.") +} + +// IsLayerType returns true if the media type is a layer +func IsLayerType(mt string) bool { + if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { + return true + } + + // Parse Docker media types, strip off any + suffixes first + switch base, _ := parseMediaTypes(mt); base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: + return true + } + return false +} + +// IsDockerType returns true if the media type has "application/vnd.docker." prefix +func IsDockerType(mt string) bool { + return strings.HasPrefix(mt, "application/vnd.docker.") +} + +// IsManifestType returns true if the media type is an OCI-compatible manifest. +// No support for schema1 manifest. +func IsManifestType(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + return true + default: + return false + } +} + +// IsIndexType returns true if the media type is an OCI-compatible index. +func IsIndexType(mt string) bool { + switch mt { + case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: + return true + default: + return false + } +} + +// IsConfigType returns true if the media type is an OCI-compatible image config. +// No support for containerd checkpoint configs. +func IsConfigType(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + return true + default: + return false + } +} + +// IsKnownConfig returns true if the media type is a known config type, +// including containerd checkpoint configs +func IsKnownConfig(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: + return true + } + return false +} + +// ChildGCLabels returns the label for a given descriptor to reference it +func ChildGCLabels(desc ocispec.Descriptor) []string { + mt := desc.MediaType + if IsKnownConfig(mt) { + return []string{"containerd.io/gc.ref.content.config"} + } + + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + return []string{"containerd.io/gc.ref.content.m."} + } + + if IsLayerType(mt) { + return []string{"containerd.io/gc.ref.content.l."} + } + + return []string{"containerd.io/gc.ref.content."} +} + +// ChildGCLabelsFilterLayers returns the labels for a given descriptor to +// reference it, skipping layer media types +func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string { + if IsLayerType(desc.MediaType) { + return nil + } + return ChildGCLabels(desc) +} diff --git a/vendor/github.com/containerd/containerd/labels/labels.go b/vendor/github.com/containerd/containerd/labels/labels.go new file mode 100644 index 000000000..0f9bab5c5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/labels/labels.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +// LabelUncompressed is added to compressed layer contents. +// The value is digest of the uncompressed content. +const LabelUncompressed = "containerd.io/uncompressed" + +// LabelSharedNamespace is added to a namespace to allow that namespaces +// contents to be shared. +const LabelSharedNamespace = "containerd.io/namespace.shareable" + +// LabelDistributionSource is added to content to indicate its origin. +// e.g., "containerd.io/distribution.source.docker.io=library/redis" +const LabelDistributionSource = "containerd.io/distribution.source" diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go new file mode 100644 index 000000000..f83b5dde2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/labels/validate.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +import ( + "fmt" + + "github.com/containerd/containerd/errdefs" +) + +const ( + maxSize = 4096 + // maximum length of key portion of error message if len of key + len of value > maxSize + keyMaxLen = 64 +) + +// Validate a label's key and value are under 4096 bytes +func Validate(k, v string) error { + total := len(k) + len(v) + if total > maxSize { + if len(k) > keyMaxLen { + k = k[:keyMaxLen] + } + return fmt.Errorf("label key and value length (%d bytes) greater than maximum size (%d bytes), key: %s: %w", total, maxSize, k, errdefs.ErrInvalidArgument) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/log/context_deprecated.go b/vendor/github.com/containerd/containerd/log/context_deprecated.go new file mode 100644 index 000000000..9e9e8b491 --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context_deprecated.go @@ -0,0 +1,149 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/containerd/log" +) + +// G is a shorthand for [GetLogger]. +// +// Deprecated: use [log.G]. +var G = log.G + +// L is an alias for the standard logger. +// +// Deprecated: use [log.L]. +var L = log.L + +// Fields type to pass to "WithFields". +// +// Deprecated: use [log.Fields]. +type Fields = log.Fields + +// Entry is a logging entry. +// +// Deprecated: use [log.Entry]. +type Entry = log.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +// +// Deprecated: use [log.RFC3339NanoFixed]. +const RFC3339NanoFixed = log.RFC3339NanoFixed + +// Level is a logging level. +// +// Deprecated: use [log.Level]. +type Level = log.Level + +// Supported log levels. +const ( + // TraceLevel level. + // + // Deprecated: use [log.TraceLevel]. + TraceLevel Level = log.TraceLevel + + // DebugLevel level. + // + // Deprecated: use [log.DebugLevel]. + DebugLevel Level = log.DebugLevel + + // InfoLevel level. + // + // Deprecated: use [log.InfoLevel]. + InfoLevel Level = log.InfoLevel + + // WarnLevel level. + // + // Deprecated: use [log.WarnLevel]. + WarnLevel Level = log.WarnLevel + + // ErrorLevel level + // + // Deprecated: use [log.ErrorLevel]. + ErrorLevel Level = log.ErrorLevel + + // FatalLevel level. + // + // Deprecated: use [log.FatalLevel]. + FatalLevel Level = log.FatalLevel + + // PanicLevel level. + // + // Deprecated: use [log.PanicLevel]. + PanicLevel Level = log.PanicLevel +) + +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// Deprecated: use [log.SetLevel]. +func SetLevel(level string) error { + return log.SetLevel(level) +} + +// GetLevel returns the current log level. +// +// Deprecated: use [log.GetLevel]. +func GetLevel() log.Level { + return log.GetLevel() +} + +// OutputFormat specifies a log output format. +// +// Deprecated: use [log.OutputFormat]. +type OutputFormat = log.OutputFormat + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + // + // Deprecated: use [log.TextFormat]. + TextFormat log.OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + // + // Deprecated: use [log.JSONFormat]. + JSONFormat log.OutputFormat = "json" +) + +// SetFormat sets the log output format. +// +// Deprecated: use [log.SetFormat]. +func SetFormat(format OutputFormat) error { + return log.SetFormat(format) +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +// +// Deprecated: use [log.WithLogger]. +func WithLogger(ctx context.Context, logger *log.Entry) context.Context { + return log.WithLogger(ctx, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +// +// Deprecated: use [log.GetLogger]. +func GetLogger(ctx context.Context) *log.Entry { + return log.GetLogger(ctx) +} diff --git a/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go b/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go new file mode 100644 index 000000000..f4b657d7d --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package randutil provides utilities for [cyrpto/rand]. +package randutil + +import ( + "crypto/rand" + "math" + "math/big" +) + +// Int63n is similar to [math/rand.Int63n] but uses [crypto/rand.Reader] under the hood. +func Int63n(n int64) int64 { + b, err := rand.Int(rand.Reader, big.NewInt(n)) + if err != nil { + panic(err) + } + return b.Int64() +} + +// Int63 is similar to [math/rand.Int63] but uses [crypto/rand.Reader] under the hood. +func Int63() int64 { + return Int63n(math.MaxInt64) +} + +// Intn is similar to [math/rand.Intn] but uses [crypto/rand.Reader] under the hood. +func Intn(n int) int { + return int(Int63n(int64(n))) +} + +// Int is similar to [math/rand.Int] but uses [crypto/rand.Reader] under the hood. +func Int() int { + return int(Int63()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 000000000..3913ef663 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,203 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// platformVector returns an (ordered) vector of appropriate specs.Platform +// objects to try matching for the given platform object (see platforms.Only). +func platformVector(platform specs.Platform) []specs.Platform { + vector := []specs.Platform{platform} + + switch platform.Architecture { + case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } + vector = append(vector, specs.Platform{ + Architecture: "386", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + }) + case "arm": + if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) + } + } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" + } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) + } + + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 000000000..8c600fc96 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "sync" + + "github.com/containerd/containerd/log" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + var err error + cpuVariantValue, err = getCPUVariant() + if err != nil { + log.L.Errorf("Error getCPUVariant for OS %s: %v", runtime.GOOS, err) + } + } + }) + return cpuVariantValue +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go new file mode 100644 index 000000000..722d86c35 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go @@ -0,0 +1,161 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "strings" + + "github.com/containerd/containerd/errdefs" + "golang.org/x/sys/unix" +) + +// getMachineArch retrieves the machine architecture through system call +func getMachineArch() (string, error) { + var uname unix.Utsname + err := unix.Uname(&uname) + if err != nil { + return "", err + } + + arch := string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)]) + + return arch, nil +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errdefs.ErrNotFound) +} + +// getCPUVariantFromArch get CPU variant from arch through a system call +func getCPUVariantFromArch(arch string) (string, error) { + + var variant string + + arch = strings.ToLower(arch) + + if arch == "aarch64" { + variant = "8" + } else if arch[0:4] == "armv" && len(arch) >= 5 { + //Valid arch format is in form of armvXx + switch arch[3:5] { + case "v8": + variant = "8" + case "v7": + variant = "7" + case "v6": + variant = "6" + case "v5": + variant = "5" + case "v4": + variant = "4" + case "v3": + variant = "3" + default: + variant = "unknown" + } + } else { + return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errdefs.ErrInvalidArgument) + } + return variant, nil +} + +// getCPUVariant returns cpu variant for ARM +// We first try reading "Cpu architecture" field from /proc/cpuinfo +// If we can't find it, then fall back using a system call +// This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo +// was not present. +func getCPUVariant() (string, error) { + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + if errdefs.IsNotFound(err) { + //Let's try getting CPU variant from machine architecture + arch, err := getMachineArch() + if err != nil { + return "", fmt.Errorf("failure getting machine architecture: %v", err) + } + + variant, err = getCPUVariantFromArch(arch) + if err != nil { + return "", fmt.Errorf("failure getting CPU variant from machine architecture: %v", err) + } + } else { + return "", fmt.Errorf("failure getting CPU variant: %v", err) + } + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant, nil +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go new file mode 100644 index 000000000..fa5f19c42 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go @@ -0,0 +1,59 @@ +//go:build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + + "github.com/containerd/containerd/errdefs" +) + +func getCPUVariant() (string, error) { + + var variant string + + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + } else if runtime.GOOS == "freebsd" { + // FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated) + // detecting those variants is currently unimplemented + switch runtime.GOARCH { + case "arm64": + variant = "v8" + default: + variant = "unknown" + } + + } else { + return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errdefs.ErrNotImplemented) + + } + + return variant, nil +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 000000000..2e26fd3b4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64", "amd64": + arch = "amd64" + if variant == "v1" { + variant = "" + } + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 000000000..cfa3ff34a --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go new file mode 100644 index 000000000..72355ca85 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -0,0 +1,44 @@ +//go:build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go new file mode 100644 index 000000000..d3fe89e07 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 000000000..44acc47eb --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,40 @@ +//go:build !windows && !darwin && !freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 000000000..d10fa9012 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,119 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + "github.com/Microsoft/hcsshim/osversion" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +type windowsmatcher struct { + specs.Platform + osVersionPrefix string + defaultMatcher Matcher +} + +// Match matches platform with the same windows major, minor +// and build version. +func (m windowsmatcher) Match(p specs.Platform) bool { + match := m.defaultMatcher.Match(p) + + if match && m.OS == "windows" { + // HPC containers do not have OS version filled + if p.OSVersion == "" { + return true + } + + hostOsVersion := GetOsVersion(m.osVersionPrefix) + ctrOsVersion := GetOsVersion(p.OSVersion) + return osversion.CheckHostAndContainerCompat(hostOsVersion, ctrOsVersion) + } + + return match +} + +func GetOsVersion(osVersionPrefix string) osversion.OSVersion { + parts := strings.Split(osVersionPrefix, ".") + if len(parts) < 3 { + return osversion.OSVersion{} + } + + majorVersion, _ := strconv.Atoi(parts[0]) + minorVersion, _ := strconv.Atoi(parts[1]) + buildNumber, _ := strconv.Atoi(parts[2]) + + return osversion.OSVersion{ + MajorVersion: uint8(majorVersion), + MinorVersion: uint8(minorVersion), + Build: uint16(buildNumber), + } +} + +// Less sorts matched platforms in front of other platforms. +// For matched platforms, it puts platforms with larger revision +// number in front. +func (m windowsmatcher) Less(p1, p2 specs.Platform) bool { + m1, m2 := m.Match(p1), m.Match(p2) + if m1 && m2 { + r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} + +func revision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +func prefix(v string) string { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return v + } + return strings.Join(parts[0:3], ".") +} + +// Default returns the current platform's default platform specification. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 000000000..56613b076 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,276 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// # Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// # Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// # Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// # ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "fmt" + "path" + "regexp" + "runtime" + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/errdefs" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere. +type Platform = specs.Platform + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return newDefaultMatcher(platform) +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() + } + + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + return platform +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/containerd/platforms/platforms_other.go new file mode 100644 index 000000000..59beeb3d1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms_other.go @@ -0,0 +1,34 @@ +//go:build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// NewMatcher returns the default Matcher for containerd +func newDefaultMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +func GetWindowsOsVersion() string { + return "" +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go new file mode 100644 index 000000000..733d18dde --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go @@ -0,0 +1,42 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +// NewMatcher returns a Windows matcher that will match on osVersionPrefix if +// the platform is Windows otherwise use the default matcher +func newDefaultMatcher(platform specs.Platform) Matcher { + prefix := prefix(platform.OSVersion) + return windowsmatcher{ + Platform: platform, + osVersionPrefix: prefix, + defaultMatcher: &matcher{ + Platform: Normalize(platform), + }, + } +} + +func GetWindowsOsVersion() string { + major, minor, build := windows.RtlGetNtVersionNumbers() + return fmt.Sprintf("%d.%d.%d", major, minor, build) +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/helpers.go b/vendor/github.com/containerd/containerd/reference/docker/helpers.go new file mode 100644 index 000000000..386025104 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/helpers.go @@ -0,0 +1,58 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/normalize.go b/vendor/github.com/containerd/containerd/reference/docker/normalize.go new file mode 100644 index 000000000..b299bf6c0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/normalize.go @@ -0,0 +1,196 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remoteName) + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go new file mode 100644 index 000000000..4dc00474e --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -0,0 +1,453 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package docker provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := host [':' port-number] +// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A +// domain-name := domain-component ['.' domain-component]* +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package docker + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + repo := repository{} + if r, ok := ref.(namedRepository); ok { + repo.domain, repo.path = r.Domain(), r.Path() + } else { + repo.domain, repo.path = splitDomain(ref.Name()) + } + return repo +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/regexp.go b/vendor/github.com/containerd/containerd/reference/docker/regexp.go new file mode 100644 index 000000000..4be3c575e --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/regexp.go @@ -0,0 +1,191 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import "regexp" + +var ( + // alphaNumeric defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumeric = `[a-z0-9]+` + + // separator defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + separator = `(?:[._]|__|[-]*)` + + // nameComponent restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponent = expression( + alphaNumeric, + optional(repeated(separator, alphaNumeric))) + + // domainNameComponent restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp. + domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` + + // ipv6address are enclosed between square brackets and may be represented + // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format + // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as + // IPv4-Mapped are deliberately excluded. + ipv6address = expression( + literal(`[`), `(?:[a-fA-F0-9:]+)`, literal(`]`), + ) + + // domainName defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. This includes IPv4 addresses on decimal format. + domainName = expression( + domainNameComponent, + optional(repeated(literal(`.`), domainNameComponent)), + ) + + // host defines the structure of potential domains based on the URI + // Host subcomponent on rfc3986. It may be a subset of DNS domain name, + // or an IPv4 address in decimal format, or an IPv6 address between square + // brackets (excluding zone identifiers as defined by rfc6874 or special + // addresses such as IPv4-Mapped). + host = `(?:` + domainName + `|` + ipv6address + `)` + + // allowed by the URI Host subcomponent on rfc3986 to ensure backwards + // compatibility with Docker image names. + domain = expression( + host, + optional(literal(`:`), `[0-9]+`)) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = regexp.MustCompile(domain) + + tag = `[\w][\w.-]{0,127}` + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = regexp.MustCompile(tag) + + anchoredTag = anchored(tag) + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = regexp.MustCompile(anchoredTag) + + digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + // DigestRegexp matches valid digests. + DigestRegexp = regexp.MustCompile(digestPat) + + anchoredDigest = anchored(digestPat) + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = regexp.MustCompile(anchoredDigest) + + namePat = expression( + optional(domain, literal(`/`)), + nameComponent, + optional(repeated(literal(`/`), nameComponent))) + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = regexp.MustCompile(namePat) + + anchoredName = anchored( + optional(capture(domain), literal(`/`)), + capture(nameComponent, + optional(repeated(literal(`/`), nameComponent)))) + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = regexp.MustCompile(anchoredName) + + referencePat = anchored(capture(namePat), + optional(literal(":"), capture(tag)), + optional(literal("@"), capture(digestPat))) + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = regexp.MustCompile(referencePat) + + identifier = `([a-f0-9]{64})` + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = regexp.MustCompile(identifier) + + shortIdentifier = `([a-f0-9]{6,64})` + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier) + + anchoredIdentifier = anchored(identifier) + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = regexp.MustCompile(anchoredIdentifier) +) + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) string { + re := regexp.MustCompile(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re.String() +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...string) string { + var s string + for _, re := range res { + s += re + } + + return s +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...string) string { + return group(expression(res...)) + `?` +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...string) string { + return group(expression(res...)) + `+` +} + +// group wraps the regexp in a non-capturing group. +func group(res ...string) string { + return `(?:` + expression(res...) + `)` +} + +// capture wraps the expression in a capturing group. +func capture(res ...string) string { + return `(` + expression(res...) + `)` +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...string) string { + return `^` + expression(res...) + `$` +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/sort.go b/vendor/github.com/containerd/containerd/reference/docker/sort.go new file mode 100644 index 000000000..984e37528 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/sort.go @@ -0,0 +1,73 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "sort" +) + +// Sort sorts string references preferring higher information references +// The precedence is as follows: +// 1. Name + Tag + Digest +// 2. Name + Tag +// 3. Name + Digest +// 4. Name +// 5. Digest +// 6. Parse error +func Sort(references []string) []string { + var prefs []Reference + var bad []string + + for _, ref := range references { + pref, err := ParseAnyReference(ref) + if err != nil { + bad = append(bad, ref) + } else { + prefs = append(prefs, pref) + } + } + sort.Slice(prefs, func(a, b int) bool { + ar := refRank(prefs[a]) + br := refRank(prefs[b]) + if ar == br { + return prefs[a].String() < prefs[b].String() + } + return ar < br + }) + sort.Strings(bad) + var refs []string + for _, pref := range prefs { + refs = append(refs, pref.String()) + } + return append(refs, bad...) +} + +func refRank(ref Reference) uint8 { + if _, ok := ref.(Named); ok { + if _, ok = ref.(Tagged); ok { + if _, ok = ref.(Digested); ok { + return 1 + } + return 2 + } + if _, ok = ref.(Digested); ok { + return 3 + } + return 4 + } + return 5 +} diff --git a/vendor/github.com/containerd/containerd/reference/reference.go b/vendor/github.com/containerd/containerd/reference/reference.go new file mode 100644 index 000000000..a4bf6da60 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/reference.go @@ -0,0 +1,166 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package reference + +import ( + "errors" + "fmt" + "net/url" + "path" + "regexp" + "strings" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrInvalid is returned when there is an invalid reference + ErrInvalid = errors.New("invalid reference") + // ErrObjectRequired is returned when the object is required + ErrObjectRequired = errors.New("object required") + // ErrHostnameRequired is returned when the hostname is required + ErrHostnameRequired = errors.New("hostname required") +) + +// Spec defines the main components of a reference specification. +// +// A reference specification is a schema-less URI parsed into common +// components. The two main components, locator and object, are required to be +// supported by remotes. It represents a superset of the naming define in +// docker's reference schema. It aims to be compatible but not prescriptive. +// +// While the interpretation of the components, locator and object, are up to +// the remote, we define a few common parts, accessible via helper methods. +// +// The first is the hostname, which is part of the locator. This doesn't need +// to map to a physical resource, but it must parse as a hostname. We refer to +// this as the namespace. +// +// The other component made accessible by helper method is the digest. This is +// part of the object identifier, always prefixed with an '@'. If present, the +// remote may use the digest portion directly or resolve it against a prefix. +// If the object does not include the `@` symbol, the return value for `Digest` +// will be empty. +type Spec struct { + // Locator is the host and path portion of the specification. The host + // portion may refer to an actual host or just a namespace of related + // images. + // + // Typically, the locator may used to resolve the remote to fetch specific + // resources. + Locator string + + // Object contains the identifier for the remote resource. Classically, + // this is a tag but can refer to anything in a remote. By convention, any + // portion that may be a partial or whole digest will be preceded by an + // `@`. Anything preceding the `@` will be referred to as the "tag". + // + // In practice, we will see this broken down into the following formats: + // + // 1. + // 2. @ + // 3. @ + // + // We define the tag to be anything except '@' and ':'. may + // be a full valid digest or shortened version, possibly with elided + // algorithm. + Object string +} + +var splitRe = regexp.MustCompile(`[:@]`) + +// Parse parses the string into a structured ref. +func Parse(s string) (Spec, error) { + if strings.Contains(s, "://") { + return Spec{}, ErrInvalid + } + + u, err := url.Parse("dummy://" + s) + if err != nil { + return Spec{}, err + } + + if u.Scheme != "dummy" { + return Spec{}, ErrInvalid + } + + if u.Host == "" { + return Spec{}, ErrHostnameRequired + } + + var object string + + if idx := splitRe.FindStringIndex(u.Path); idx != nil { + // This allows us to retain the @ to signify digests or shortened digests in + // the object. + object = u.Path[idx[0]:] + if object[:1] == ":" { + object = object[1:] + } + u.Path = u.Path[:idx[0]] + } + + return Spec{ + Locator: path.Join(u.Host, u.Path), + Object: object, + }, nil +} + +// Hostname returns the hostname portion of the locator. +// +// Remotes are not required to directly access the resources at this host. This +// method is provided for convenience. +func (r Spec) Hostname() string { + i := strings.Index(r.Locator, "/") + + if i < 0 { + return r.Locator + } + return r.Locator[:i] +} + +// Digest returns the digest portion of the reference spec. This may be a +// partial or invalid digest, which may be used to lookup a complete digest. +func (r Spec) Digest() digest.Digest { + _, dgst := SplitObject(r.Object) + return dgst +} + +// String returns the normalized string for the ref. +func (r Spec) String() string { + if r.Object == "" { + return r.Locator + } + if r.Object[:1] == "@" { + return fmt.Sprintf("%v%v", r.Locator, r.Object) + } + + return fmt.Sprintf("%v:%v", r.Locator, r.Object) +} + +// SplitObject provides two parts of the object spec, delimited by an `@` +// symbol. +// +// Either may be empty and it is the callers job to validate them +// appropriately. +func SplitObject(obj string) (tag string, dgst digest.Digest) { + parts := strings.SplitAfterN(obj, "@", 2) + if len(parts) < 2 { + return parts[0], "" + } + return parts[0], digest.Digest(parts[1]) +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go new file mode 100644 index 000000000..64c6a38f9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go @@ -0,0 +1,225 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/containerd/containerd/log" + remoteserrors "github.com/containerd/containerd/remotes/errors" + "github.com/containerd/containerd/version" +) + +var ( + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +// GenerateTokenOptions generates options for fetching a token based on a challenge +func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) { + realm, ok := c.Parameters["realm"] + if !ok { + return TokenOptions{}, errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return TokenOptions{}, fmt.Errorf("invalid token auth challenge realm: %w", err) + } + + to := TokenOptions{ + Realm: realmURL.String(), + Service: c.Parameters["service"], + Username: username, + Secret: secret, + } + + scope, ok := c.Parameters["scope"] + if ok { + to.Scopes = append(to.Scopes, strings.Split(scope, " ")...) + } else { + log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") + } + + return to, nil +} + +// TokenOptions are options for requesting a token +type TokenOptions struct { + Realm string + Service string + Scopes []string + Username string + Secret string + + // FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token. + // + // For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request. + // https://docs.docker.com/registry/spec/auth/token/#requesting-a-token + // + // For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request. + // https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token + FetchRefreshToken bool +} + +// OAuthTokenResponse is response from fetching token with a OAuth POST request +type OAuthTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +// FetchTokenWithOAuth fetches a token using a POST request +func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) { + form := url.Values{} + if len(to.Scopes) > 0 { + form.Set("scope", strings.Join(to.Scopes, " ")) + } + form.Set("service", to.Service) + form.Set("client_id", clientID) + + if to.Username == "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", to.Secret) + } else { + form.Set("grant_type", "password") + form.Set("username", to.Username) + form.Set("password", to.Secret) + } + if to.FetchRefreshToken { + form.Set("access_type", "offline") + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, to.Realm, strings.NewReader(form.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + if len(req.Header.Get("User-Agent")) == 0 { + req.Header.Set("User-Agent", "containerd/"+version.Version) + } + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, remoteserrors.NewUnexpectedStatusErr(resp) + } + + decoder := json.NewDecoder(resp.Body) + + var tr OAuthTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, fmt.Errorf("unable to decode token response: %w", err) + } + + if tr.AccessToken == "" { + return nil, ErrNoToken + } + + return &tr, nil +} + +// FetchTokenResponse is response from fetching token with GET request +type FetchTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +// FetchToken fetches a token using a GET request +func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, to.Realm, nil) + if err != nil { + return nil, err + } + + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + if len(req.Header.Get("User-Agent")) == 0 { + req.Header.Set("User-Agent", "containerd/"+version.Version) + } + + reqParams := req.URL.Query() + + if to.Service != "" { + reqParams.Add("service", to.Service) + } + + for _, scope := range to.Scopes { + reqParams.Add("scope", scope) + } + + if to.Secret != "" { + req.SetBasicAuth(to.Username, to.Secret) + } + + if to.FetchRefreshToken { + reqParams.Add("offline_token", "true") + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, remoteserrors.NewUnexpectedStatusErr(resp) + } + + decoder := json.NewDecoder(resp.Body) + + var tr FetchTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, fmt.Errorf("unable to decode token response: %w", err) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return nil, ErrNoToken + } + + return &tr, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go new file mode 100644 index 000000000..e4529a776 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go @@ -0,0 +1,200 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "net/http" + "sort" + "strings" +) + +// AuthenticationScheme defines scheme of the authentication method +type AuthenticationScheme byte + +const ( + // BasicAuth is scheme for Basic HTTP Authentication RFC 7617 + BasicAuth AuthenticationScheme = 1 << iota + // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616 + DigestAuth + // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750 + BearerAuth +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // scheme is the auth-scheme according to RFC 2617 + Scheme AuthenticationScheme + + // parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +type byScheme []Challenge + +func (bs byScheme) Len() int { return len(bs) } +func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } + +// Sort in priority order: token > digest > basic +func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme } + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ParseAuthHeader parses challenges from WWW-Authenticate header +func ParseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + var s AuthenticationScheme + switch v { + case "basic": + s = BasicAuth + case "digest": + s = DigestAuth + case "bearer": + s = BearerAuth + default: + continue + } + challenges = append(challenges, Challenge{Scheme: s, Parameters: p}) + } + sort.Stable(byScheme(challenges)) + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + for { + var pkey string + pkey, s = expectToken(skipSpace(s)) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + return + } + s = s[1:] + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go new file mode 100644 index 000000000..9b3663cd1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -0,0 +1,353 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes/docker/auth" + remoteerrors "github.com/containerd/containerd/remotes/errors" +) + +type dockerAuthorizer struct { + credentials func(string) (string, string, error) + + client *http.Client + header http.Header + mu sync.RWMutex + + // indexed by host name + handlers map[string]*authHandler + + onFetchRefreshToken OnFetchRefreshToken +} + +type authorizerConfig struct { + credentials func(string) (string, string, error) + client *http.Client + header http.Header + onFetchRefreshToken OnFetchRefreshToken +} + +// AuthorizerOpt configures an authorizer +type AuthorizerOpt func(*authorizerConfig) + +// WithAuthClient provides the HTTP client for the authorizer +func WithAuthClient(client *http.Client) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.client = client + } +} + +// WithAuthCreds provides a credential function to the authorizer +func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.credentials = creds + } +} + +// WithAuthHeader provides HTTP headers for authorization +func WithAuthHeader(hdr http.Header) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.header = hdr + } +} + +// OnFetchRefreshToken is called on fetching request token. +type OnFetchRefreshToken func(ctx context.Context, refreshToken string, req *http.Request) + +// WithFetchRefreshToken enables fetching "refresh token" (aka "identity token", "offline token"). +func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.onFetchRefreshToken = f + } +} + +// NewDockerAuthorizer creates an authorizer using Docker's registry +// authentication spec. +// See https://docs.docker.com/registry/spec/auth/ +func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { + var ao authorizerConfig + for _, opt := range opts { + opt(&ao) + } + + if ao.client == nil { + ao.client = http.DefaultClient + } + + return &dockerAuthorizer{ + credentials: ao.credentials, + client: ao.client, + header: ao.header, + handlers: make(map[string]*authHandler), + onFetchRefreshToken: ao.onFetchRefreshToken, + } +} + +// Authorize handles auth request. +func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { + // skip if there is no auth handler + ah := a.getAuthHandler(req.URL.Host) + if ah == nil { + return nil + } + + auth, refreshToken, err := ah.authorize(ctx) + if err != nil { + return err + } + + req.Header.Set("Authorization", auth) + + if refreshToken != "" { + a.mu.RLock() + onFetchRefreshToken := a.onFetchRefreshToken + a.mu.RUnlock() + if onFetchRefreshToken != nil { + onFetchRefreshToken(ctx, refreshToken, req) + } + } + return nil +} + +func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { + a.mu.Lock() + defer a.mu.Unlock() + + return a.handlers[host] +} + +func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { + last := responses[len(responses)-1] + host := last.Request.URL.Host + + a.mu.Lock() + defer a.mu.Unlock() + for _, c := range auth.ParseAuthHeader(last.Header) { + if c.Scheme == auth.BearerAuth { + if err := invalidAuthorization(c, responses); err != nil { + delete(a.handlers, host) + return err + } + + // reuse existing handler + // + // assume that one registry will return the common + // challenge information, including realm and service. + // and the resource scope is only different part + // which can be provided by each request. + if _, ok := a.handlers[host]; ok { + return nil + } + + var username, secret string + if a.credentials != nil { + var err error + username, secret, err = a.credentials(host) + if err != nil { + return err + } + } + + common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c) + if err != nil { + return err + } + common.FetchRefreshToken = a.onFetchRefreshToken != nil + + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) + return nil + } else if c.Scheme == auth.BasicAuth && a.credentials != nil { + username, secret, err := a.credentials(host) + if err != nil { + return err + } + + if username == "" || secret == "" { + return fmt.Errorf("%w: no basic auth credentials", ErrInvalidAuthorization) + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, auth.TokenOptions{ + Username: username, + Secret: secret, + }) + return nil + } + } + return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented) +} + +// authResult is used to control limit rate. +type authResult struct { + sync.WaitGroup + token string + refreshToken string + err error +} + +// authHandler is used to handle auth request per registry server. +type authHandler struct { + sync.Mutex + + header http.Header + + client *http.Client + + // only support basic and bearer schemes + scheme auth.AuthenticationScheme + + // common contains common challenge answer + common auth.TokenOptions + + // scopedTokens caches token indexed by scopes, which used in + // bearer auth case + scopedTokens map[string]*authResult +} + +func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler { + return &authHandler{ + header: hdr, + client: client, + scheme: scheme, + common: opts, + scopedTokens: map[string]*authResult{}, + } +} + +func (ah *authHandler) authorize(ctx context.Context) (string, string, error) { + switch ah.scheme { + case auth.BasicAuth: + return ah.doBasicAuth(ctx) + case auth.BearerAuth: + return ah.doBearerAuth(ctx) + default: + return "", "", fmt.Errorf("failed to find supported auth scheme: %s: %w", string(ah.scheme), errdefs.ErrNotImplemented) + } +} + +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, string, error) { + username, secret := ah.common.Username, ah.common.Secret + + if username == "" || secret == "" { + return "", "", fmt.Errorf("failed to handle basic auth because missing username or secret") + } + + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) + return fmt.Sprintf("Basic %s", auth), "", nil +} + +func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken string, err error) { + // copy common tokenOptions + to := ah.common + + to.Scopes = GetTokenScopes(ctx, to.Scopes) + + // Docs: https://docs.docker.com/registry/spec/auth/scope + scoped := strings.Join(to.Scopes, " ") + + ah.Lock() + if r, exist := ah.scopedTokens[scoped]; exist { + ah.Unlock() + r.Wait() + return r.token, r.refreshToken, r.err + } + + // only one fetch token job + r := new(authResult) + r.Add(1) + ah.scopedTokens[scoped] = r + ah.Unlock() + + defer func() { + token = fmt.Sprintf("Bearer %s", token) + r.token, r.refreshToken, r.err = token, refreshToken, err + r.Done() + }() + + // fetch token for the resource scope + if to.Secret != "" { + defer func() { + if err != nil { + err = fmt.Errorf("failed to fetch oauth token: %w", err) + } + }() + // credential information is provided, use oauth POST endpoint + // TODO: Allow setting client_id + resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to) + if err != nil { + var errStatus remoteerrors.ErrUnexpectedStatus + if errors.As(err, &errStatus) { + // Registries without support for POST may return 404 for POST /v2/token. + // As of September 2017, GCR is known to return 404. + // As of February 2018, JFrog Artifactory is known to return 401. + // As of January 2022, ACR is known to return 400. + if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 || errStatus.StatusCode == 400 { + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) + if err != nil { + return "", "", err + } + return resp.Token, resp.RefreshToken, nil + } + log.G(ctx).WithFields(log.Fields{ + "status": errStatus.Status, + "body": string(errStatus.Body), + }).Debugf("token request failed") + } + return "", "", err + } + return resp.AccessToken, resp.RefreshToken, nil + } + // do request anonymously + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) + if err != nil { + return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err) + } + return resp.Token, resp.RefreshToken, nil +} + +func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { + errStr := c.Parameters["error"] + if errStr == "" { + return nil + } + + n := len(responses) + if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { + return nil + } + + return fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization) +} + +func sameRequest(r1, r2 *http.Request) bool { + if r1.Method != r2.Method { + return false + } + if *r1.URL != *r2.URL { + return false + } + return true +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/converter.go new file mode 100644 index 000000000..d7dca0d36 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/converter.go @@ -0,0 +1,87 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// LegacyConfigMediaType should be replaced by OCI image spec. +// +// More detail: docker/distribution#1622 +const LegacyConfigMediaType = "application/octet-stream" + +// ConvertManifest changes application/octet-stream to schema2 config media type if need. +// +// NOTE: +// 1. original manifest will be deleted by next gc round. +// 2. don't cover manifest list. +func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) { + if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest || + desc.MediaType == ocispec.MediaTypeImageManifest) { + + log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType) + return desc, nil + } + + // read manifest data + mb, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to read index data: %w", err) + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(mb, &manifest); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal data into manifest: %w", err) + } + + // check config media type + if manifest.Config.MediaType != LegacyConfigMediaType { + return desc, nil + } + + manifest.Config.MediaType = images.MediaTypeDockerSchema2Config + data, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err) + } + + // update manifest with gc labels + desc.Digest = digest.Canonical.FromBytes(data) + desc.Size = int64(len(data)) + + labels := map[string]string{} + for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to update content: %w", err) + } + return desc, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go b/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go new file mode 100644 index 000000000..908205392 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go @@ -0,0 +1,55 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "os" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/log" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +func FuzzConvertManifest(data []byte) int { + ctx := context.Background() + + // Do not log the message below + // level=warning msg="do nothing for media type: ..." + log.G(ctx).Logger.SetLevel(logrus.PanicLevel) + + f := fuzz.NewConsumer(data) + desc := ocispec.Descriptor{} + err := f.GenerateStruct(&desc) + if err != nil { + return 0 + } + tmpdir, err := os.MkdirTemp("", "fuzzing-") + if err != nil { + return 0 + } + cs, err := local.NewStore(tmpdir) + if err != nil { + return 0 + } + _, _ = ConvertManifest(ctx, cs, desc) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/errcode.go b/vendor/github.com/containerd/containerd/remotes/docker/errcode.go new file mode 100644 index 000000000..8c623bcbe --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/errcode.go @@ -0,0 +1,283 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable description of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr := daErr.(type) { + case ErrorCode: + err = daErr.WithDetail(nil) + case Error: + err = daErr + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go b/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go new file mode 100644 index 000000000..b2bd4d82b --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go @@ -0,0 +1,154 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go new file mode 100644 index 000000000..ecf245933 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -0,0 +1,313 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type dockerFetcher struct { + *dockerBase +} + +func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound) + } + + ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) + if err != nil { + return nil, err + } + + return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { + // firstly try fetch via external urls + for _, us := range desc.URLs { + u, err := url.Parse(us) + if err != nil { + log.G(ctx).WithError(err).Debugf("failed to parse %q", us) + continue + } + if u.Scheme != "http" && u.Scheme != "https" { + log.G(ctx).Debug("non-http(s) alternative url is unsupported") + continue + } + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).Info("request") + + // Try this first, parse it + host := RegistryHost{ + Client: http.DefaultClient, + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPull, + } + req := r.request(host, http.MethodGet) + // Strip namespace from base + req.path = u.Path + if u.RawQuery != "" { + req.path = req.path + "?" + u.RawQuery + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try one of the other urls. + } + + return nil, err + } + + return rc, nil + } + + // Try manifests endpoints for manifests types + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + images.MediaTypeDockerSchema1Manifest, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + + var firstErr error + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) + if err := req.addNamespace(r.refspec.Hostname()); err != nil { + return nil, err + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + continue // try another host + } + + return rc, nil + } + + return nil, firstErr + } + + // Finally use blobs endpoints + var firstErr error + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) + if err := req.addNamespace(r.refspec.Hostname()); err != nil { + return nil, err + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + continue // try another host + } + + return rc, nil + } + + if errdefs.IsNotFound(firstErr) { + firstErr = fmt.Errorf("could not fetch content descriptor %v (%v) from remote: %w", + desc.Digest, desc.MediaType, errdefs.ErrNotFound, + ) + } + + return nil, firstErr + + }) +} + +func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, ps ...string) (*request, int64, error) { + headReq := r.request(host, http.MethodHead, ps...) + if err := headReq.addNamespace(r.refspec.Hostname()); err != nil { + return nil, 0, err + } + + headResp, err := headReq.doWithRetries(ctx, nil) + if err != nil { + return nil, 0, err + } + if headResp.Body != nil { + headResp.Body.Close() + } + if headResp.StatusCode > 299 { + return nil, 0, fmt.Errorf("unexpected HEAD status code %v: %s", headReq.String(), headResp.Status) + } + + getReq := r.request(host, http.MethodGet, ps...) + if err := getReq.addNamespace(r.refspec.Hostname()); err != nil { + return nil, 0, err + } + return getReq, headResp.ContentLength, nil +} + +func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error) { + var desc ocispec.Descriptor + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", dgst)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, desc, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound) + } + + ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) + if err != nil { + return nil, desc, err + } + + var ( + getReq *request + sz int64 + firstErr error + ) + + for _, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, "blobs", dgst.String()) + if err == nil { + break + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + } + + if getReq == nil { + // Fall back to the "manifests" endpoint + for _, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, "manifests", dgst.String()) + if err == nil { + break + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + } + } + + if getReq == nil { + if errdefs.IsNotFound(firstErr) { + firstErr = fmt.Errorf("could not fetch content %v from remote: %w", dgst, errdefs.ErrNotFound) + } + if firstErr == nil { + firstErr = fmt.Errorf("could not fetch content %v from remote: (unknown)", dgst) + } + return nil, desc, firstErr + } + + seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) { + return r.open(ctx, getReq, "", offset) + }) + if err != nil { + return nil, desc, err + } + + desc = ocispec.Descriptor{ + MediaType: "application/octet-stream", + Digest: dgst, + Size: sz, + } + return seeker, desc, nil +} + +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) { + if mediatype == "" { + req.header.Set("Accept", "*/*") + } else { + req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + } + + if offset > 0 { + // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints + // will return the header without supporting the range. The content + // range must always be checked. + req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } + defer func() { + if retErr != nil { + resp.Body.Close() + } + }() + + if resp.StatusCode > 299 { + // TODO(stevvooe): When doing a offset specific request, we should + // really distinguish between a 206 and a 200. In the case of 200, we + // can discard the bytes, hiding the seek behavior from the + // implementation. + + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound) + } + var registryErr Errors + if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { + return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status) + } + return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) + } + if offset > 0 { + cr := resp.Header.Get("content-range") + if cr != "" { + if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { + return nil, fmt.Errorf("unhandled content range in response: %v", cr) + + } + } else { + // TODO: Should any cases where use of content range + // without the proper header be considered? + // 206 responses? + + // Discard up to offset + // Could use buffer pool here but this case should be rare + n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset)) + if err != nil { + return nil, fmt.Errorf("failed to discard to offset: %w", err) + } + if n != offset { + return nil, errors.New("unable to discard to offset") + } + + } + } + + return resp.Body, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go new file mode 100644 index 000000000..b98886c59 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go @@ -0,0 +1,81 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + + refDocker "github.com/containerd/containerd/reference/docker" +) + +func FuzzFetcher(data []byte) int { + dataLen := len(data) + if dataLen == 0 { + return -1 + } + + s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen)) + rw.Header().Set("content-length", fmt.Sprintf("%d", dataLen)) + rw.Write(data) + })) + defer s.Close() + + u, err := url.Parse(s.URL) + if err != nil { + return 0 + } + + f := dockerFetcher{&dockerBase{ + repository: "nonempty", + }} + host := RegistryHost{ + Client: s.Client(), + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + } + + ctx := context.Background() + req := f.request(host, http.MethodGet) + rc, err := f.open(ctx, req, "", 0) + if err != nil { + return 0 + } + b, err := io.ReadAll(rc) + if err != nil { + return 0 + } + + expected := data + if len(b) != len(expected) { + panic("len of request is not equal to len of expected but should be") + } + return 1 +} + +func FuzzParseDockerRef(data []byte) int { + _, _ = refDocker.ParseDockerRef(string(data)) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/remotes/docker/handler.go new file mode 100644 index 000000000..27638ccc0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/handler.go @@ -0,0 +1,149 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/reference" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// AppendDistributionSourceLabel updates the label of blob with distribution source. +func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return nil, err + } + + source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/") + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + info, err := manager.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + + key := distributionSourceLabelKey(source) + + originLabel := "" + if info.Labels != nil { + originLabel = info.Labels[key] + } + value := appendDistributionSourceLabel(originLabel, repo) + + // The repo name has been limited under 256 and the distribution + // label might hit the limitation of label size, when blob data + // is used as the very, very common layer. + if err := labels.Validate(key, value); err != nil { + log.G(ctx).Warnf("skip to append distribution label: %s", err) + return nil, nil + } + + info = content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + key: value, + }, + } + _, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key)) + return nil, err + }, nil +} + +func appendDistributionSourceLabel(originLabel, repo string) string { + repos := []string{} + if originLabel != "" { + repos = strings.Split(originLabel, ",") + } + repos = append(repos, repo) + + // use empty string to present duplicate items + for i := 1; i < len(repos); i++ { + tmp, j := repos[i], i-1 + for ; j >= 0 && repos[j] >= tmp; j-- { + if repos[j] == tmp { + tmp = "" + } + repos[j+1] = repos[j] + } + repos[j+1] = tmp + } + + i := 0 + for ; i < len(repos) && repos[i] == ""; i++ { + } + + return strings.Join(repos[i:], ",") +} + +func distributionSourceLabelKey(source string) string { + return fmt.Sprintf("%s.%s", labels.LabelDistributionSource, source) +} + +// selectRepositoryMountCandidate will select the repo which has longest +// common prefix components as the candidate. +func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + // NOTE: basically, it won't be error here + return "" + } + + source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") + repoLabel, ok := sources[distributionSourceLabelKey(source)] + if !ok || repoLabel == "" { + return "" + } + + n, match := 0, "" + components := strings.Split(target, "/") + for _, repo := range strings.Split(repoLabel, ",") { + // the target repo is not a candidate + if repo == target { + continue + } + + if l := commonPrefixComponents(components, repo); l >= n { + n, match = l, repo + } + } + return match +} + +func commonPrefixComponents(components []string, target string) int { + targetComponents := strings.Split(target, "/") + + i := 0 + for ; i < len(components) && i < len(targetComponents); i++ { + if components[i] != targetComponents[i] { + break + } + } + return i +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go new file mode 100644 index 000000000..824359339 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" +) + +const maxRetry = 3 + +type httpReadSeeker struct { + size int64 + offset int64 + rc io.ReadCloser + open func(offset int64) (io.ReadCloser, error) + closed bool + + errsWithNoProgress int +} + +func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) { + return &httpReadSeeker{ + size: size, + open: open, + }, nil +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.closed { + return 0, io.EOF + } + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.offset += int64(n) + if n > 0 || err == nil { + hrs.errsWithNoProgress = 0 + } + if err == io.ErrUnexpectedEOF { + // connection closed unexpectedly. try reconnecting. + if n == 0 { + hrs.errsWithNoProgress++ + if hrs.errsWithNoProgress > maxRetry { + return // too many retries for this offset with no progress + } + } + if hrs.rc != nil { + if clsErr := hrs.rc.Close(); clsErr != nil { + log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser") + } + hrs.rc = nil + } + if _, err2 := hrs.reader(); err2 == nil { + return n, nil + } + } else if err == io.EOF { + // The CRI's imagePullProgressTimeout relies on responseBody.Close to + // update the process monitor's status. If the err is io.EOF, close + // the connection since there is no more available data. + if hrs.rc != nil { + if clsErr := hrs.rc.Close(); clsErr != nil { + log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser after io.EOF") + } + hrs.rc = nil + } + } + return +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.closed { + return nil + } + hrs.closed = true + if hrs.rc != nil { + return hrs.rc.Close() + } + + return nil +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.closed { + return 0, fmt.Errorf("Fetcher.Seek: closed: %w", errdefs.ErrUnavailable) + } + + abs := hrs.offset + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs += offset + case io.SeekEnd: + if hrs.size == -1 { + return 0, fmt.Errorf("Fetcher.Seek: unknown size, cannot seek from end: %w", errdefs.ErrUnavailable) + } + abs = hrs.size + offset + default: + return 0, fmt.Errorf("Fetcher.Seek: invalid whence: %w", errdefs.ErrInvalidArgument) + } + + if abs < 0 { + return 0, fmt.Errorf("Fetcher.Seek: negative offset: %w", errdefs.ErrInvalidArgument) + } + + if abs != hrs.offset { + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Error("Fetcher.Seek: failed to close ReadCloser") + } + + hrs.rc = nil + } + + hrs.offset = abs + } + + return hrs.offset, nil +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.rc != nil { + return hrs.rc, nil + } + + if hrs.size == -1 || hrs.offset < hrs.size { + // only try to reopen the body request if we are seeking to a value + // less than the actual size. + if hrs.open == nil { + return nil, fmt.Errorf("cannot open: %w", errdefs.ErrNotImplemented) + } + + rc, err := hrs.open(hrs.offset) + if err != nil { + return nil, fmt.Errorf("httpReadSeeker: failed open: %w", err) + } + + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Error("httpReadSeeker: failed to close ReadCloser") + } + } + hrs.rc = rc + } else { + // There is an edge case here where offset == size of the content. If + // we seek, we will probably get an error for content that cannot be + // sought (?). In that case, we should err on committing the content, + // as the length is already satisfied but we just return the empty + // reader instead. + + hrs.rc = io.NopCloser(bytes.NewReader([]byte{})) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go new file mode 100644 index 000000000..678e17e12 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -0,0 +1,545 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + remoteserrors "github.com/containerd/containerd/remotes/errors" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type dockerPusher struct { + *dockerBase + object string + + // TODO: namespace tracker + tracker StatusTracker +} + +// Writer implements Ingester API of content store. This allows the client +// to receive ErrUnavailable when there is already an on-going upload. +// Note that the tracker MUST implement StatusTrackLocker interface to avoid +// race condition on StatusTracker. +func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + if wOpts.Ref == "" { + return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) + } + return p.push(ctx, wOpts.Desc, wOpts.Ref, true) +} + +func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return p.push(ctx, desc, remotes.MakeRefKey(ctx, desc), false) +} + +func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref string, unavailableOnFail bool) (content.Writer, error) { + if l, ok := p.tracker.(StatusTrackLocker); ok { + l.Lock(ref) + defer l.Unlock(ref) + } + ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true) + if err != nil { + return nil, err + } + status, err := p.tracker.GetStatus(ref) + if err == nil { + if status.Committed && status.Offset == status.Total { + return nil, fmt.Errorf("ref %v: %w", ref, errdefs.ErrAlreadyExists) + } + if unavailableOnFail && status.ErrClosed == nil { + // Another push of this ref is happening elsewhere. The rest of function + // will continue only when `errdefs.IsNotFound(err) == true` (i.e. there + // is no actively-tracked ref already). + return nil, fmt.Errorf("push is on-going: %w", errdefs.ErrUnavailable) + } + // TODO: Handle incomplete status + } else if !errdefs.IsNotFound(err) { + return nil, fmt.Errorf("failed to get status: %w", err) + } + + hosts := p.filterHosts(HostCapabilityPush) + if len(hosts) == 0 { + return nil, fmt.Errorf("no push hosts: %w", errdefs.ErrNotFound) + } + + var ( + isManifest bool + existCheck []string + host = hosts[0] + ) + + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + isManifest = true + existCheck = getManifestPath(p.object, desc.Digest) + default: + existCheck = []string{"blobs", desc.Digest.String()} + } + + req := p.request(host, http.MethodHead, existCheck...) + req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", ")) + + log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if !errors.Is(err, ErrInvalidAuthorization) { + return nil, err + } + log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push") + } else { + if resp.StatusCode == http.StatusOK { + var exists bool + if isManifest && existCheck[1] != desc.Digest.String() { + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + if dgstHeader == desc.Digest { + exists = true + } + } else { + exists = true + } + + if exists { + p.tracker.SetStatus(ref, Status{ + Committed: true, + PushStatus: PushStatus{ + Exists: true, + }, + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Offset: desc.Size, + // TODO: Set updated time? + }, + }) + resp.Body.Close() + return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) + } + } else if resp.StatusCode != http.StatusNotFound { + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + resp.Body.Close() + return nil, err + } + resp.Body.Close() + } + + if isManifest { + putPath := getManifestPath(p.object, desc.Digest) + req = p.request(host, http.MethodPut, putPath...) + req.header.Add("Content-Type", desc.MediaType) + } else { + // Start upload request + req = p.request(host, http.MethodPost, "blobs", "uploads/") + + mountedFrom := "" + var resp *http.Response + if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { + preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) + pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo) + + // NOTE: the fromRepo might be private repo and + // auth service still can grant token without error. + // but the post request will fail because of 401. + // + // for the private repo, we should remove mount-from + // query and send the request again. + resp, err = preq.doWithRetries(pctx, nil) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusUnauthorized: + log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) + + resp.Body.Close() + resp = nil + case http.StatusCreated: + mountedFrom = path.Join(p.refspec.Hostname(), fromRepo) + } + } + + if resp == nil { + resp, err = req.doWithRetries(ctx, nil) + if err != nil { + if errors.Is(err, ErrInvalidAuthorization) { + return nil, fmt.Errorf("push access denied, repository does not exist or may require authorization: %w", err) + } + return nil, err + } + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted, http.StatusNoContent: + case http.StatusCreated: + p.tracker.SetStatus(ref, Status{ + Committed: true, + PushStatus: PushStatus{ + MountedFrom: mountedFrom, + }, + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Offset: desc.Size, + }, + }) + return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) + default: + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + return nil, err + } + + var ( + location = resp.Header.Get("Location") + lurl *url.URL + lhost = host + ) + // Support paths without host in location + if strings.HasPrefix(location, "/") { + lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) + if err != nil { + return nil, fmt.Errorf("unable to parse location %v: %w", location, err) + } + } else { + if !strings.Contains(location, "://") { + location = lhost.Scheme + "://" + location + } + lurl, err = url.Parse(location) + if err != nil { + return nil, fmt.Errorf("unable to parse location %v: %w", location, err) + } + + if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { + lhost.Scheme = lurl.Scheme + lhost.Host = lurl.Host + + // Check if different than what was requested, accounting for fallback in the transport layer + requested := resp.Request.URL + if requested.Host != lhost.Host || requested.Scheme != lhost.Scheme { + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination, authorizer removed") + } + } + } + q := lurl.Query() + q.Add("digest", desc.Digest.String()) + + req = p.request(lhost, http.MethodPut) + req.header.Set("Content-Type", "application/octet-stream") + req.path = lurl.Path + "?" + q.Encode() + } + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Expected: desc.Digest, + StartedAt: time.Now(), + }, + }) + + // TODO: Support chunked upload + + pushw := newPushWriter(p.dockerBase, ref, desc.Digest, p.tracker, isManifest) + + req.body = func() (io.ReadCloser, error) { + pr, pw := io.Pipe() + pushw.setPipe(pw) + return io.NopCloser(pr), nil + } + req.size = desc.Size + + go func() { + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + pushw.setError(err) + pushw.Close() + return + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent: + default: + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + pushw.setError(err) + pushw.Close() + } + pushw.setResponse(resp) + }() + + return pushw, nil +} + +func getManifestPath(object string, dgst digest.Digest) []string { + if i := strings.IndexByte(object, '@'); i >= 0 { + if object[i+1:] != dgst.String() { + // use digest, not tag + object = "" + } else { + // strip @ for registry path to make tag + object = object[:i] + } + + } + + if object == "" { + return []string{"manifests", dgst.String()} + } + + return []string{"manifests", object} +} + +type pushWriter struct { + base *dockerBase + ref string + + pipe *io.PipeWriter + + pipeC chan *io.PipeWriter + respC chan *http.Response + closeOnce sync.Once + errC chan error + + isManifest bool + + expected digest.Digest + tracker StatusTracker +} + +func newPushWriter(db *dockerBase, ref string, expected digest.Digest, tracker StatusTracker, isManifest bool) *pushWriter { + // Initialize and create response + return &pushWriter{ + base: db, + ref: ref, + expected: expected, + tracker: tracker, + pipeC: make(chan *io.PipeWriter, 1), + respC: make(chan *http.Response, 1), + errC: make(chan error, 1), + isManifest: isManifest, + } +} + +func (pw *pushWriter) setPipe(p *io.PipeWriter) { + pw.pipeC <- p +} + +func (pw *pushWriter) setError(err error) { + pw.errC <- err +} +func (pw *pushWriter) setResponse(resp *http.Response) { + pw.respC <- resp +} + +func (pw *pushWriter) Write(p []byte) (n int, err error) { + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return n, err + } + + if pw.pipe == nil { + p, ok := <-pw.pipeC + if !ok { + return 0, io.ErrClosedPipe + } + pw.pipe = p + } else { + select { + case p, ok := <-pw.pipeC: + if !ok { + return 0, io.ErrClosedPipe + } + pw.pipe.CloseWithError(content.ErrReset) + pw.pipe = p + + // If content has already been written, the bytes + // cannot be written and the caller must reset + status.Offset = 0 + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return 0, content.ErrReset + default: + } + } + + n, err = pw.pipe.Write(p) + if errors.Is(err, io.ErrClosedPipe) { + // if the pipe is closed, we might have the original error on the error + // channel - so we should try and get it + select { + case err2 := <-pw.errC: + err = err2 + default: + } + } + status.Offset += int64(n) + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return +} + +func (pw *pushWriter) Close() error { + // Ensure pipeC is closed but handle `Close()` being + // called multiple times without panicking + pw.closeOnce.Do(func() { + close(pw.pipeC) + }) + if pw.pipe != nil { + status, err := pw.tracker.GetStatus(pw.ref) + if err == nil && !status.Committed { + // Closing an incomplete writer. Record this as an error so that following write can retry it. + status.ErrClosed = errors.New("closed incomplete writer") + pw.tracker.SetStatus(pw.ref, status) + } + return pw.pipe.Close() + } + return nil +} + +func (pw *pushWriter) Status() (content.Status, error) { + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return content.Status{}, err + } + return status.Status, nil + +} + +func (pw *pushWriter) Digest() digest.Digest { + // TODO: Get rid of this function? + return pw.expected +} + +func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + // Check whether read has already thrown an error + if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) { + return fmt.Errorf("pipe error before commit: %w", err) + } + + if err := pw.pipe.Close(); err != nil { + return err + } + // TODO: timeout waiting for response + var resp *http.Response + select { + case err := <-pw.errC: + return err + case resp = <-pw.respC: + defer resp.Body.Close() + case p, ok := <-pw.pipeC: + // check whether the pipe has changed in the commit, because sometimes Write + // can complete successfully, but the pipe may have changed. In that case, the + // content needs to be reset. + if !ok { + return io.ErrClosedPipe + } + pw.pipe.CloseWithError(content.ErrReset) + pw.pipe = p + + // If content has already been written, the bytes + // cannot be written again and the caller must reset + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return err + } + status.Offset = 0 + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return content.ErrReset + } + + // 201 is specified return status, some registries return + // 200, 202 or 204. + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: + default: + return remoteserrors.NewUnexpectedStatusErr(resp) + } + + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + + if size > 0 && size != status.Offset { + return fmt.Errorf("unexpected size %d, expected %d", status.Offset, size) + } + + if expected == "" { + expected = status.Expected + } + + actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err != nil { + return fmt.Errorf("invalid content digest in response: %w", err) + } + + if actual != expected { + return fmt.Errorf("got digest %s, expected %s", actual, expected) + } + + status.Committed = true + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + + return nil +} + +func (pw *pushWriter) Truncate(size int64) error { + // TODO: if blob close request and start new request at offset + // TODO: always error on manifest + return errors.New("cannot truncate remote upload") +} + +func requestWithMountFrom(req *request, mount, from string) *request { + creq := *req + + sep := "?" + if strings.Contains(creq.path, sep) { + sep = "&" + } + + creq.path = creq.path + sep + "mount=" + mount + "&from=" + from + + return &creq +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/remotes/docker/registry.go new file mode 100644 index 000000000..98cafcd06 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/registry.go @@ -0,0 +1,244 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "errors" + "net" + "net/http" +) + +// HostCapabilities represent the capabilities of the registry +// host. This also represents the set of operations for which +// the registry host may be trusted to perform. +// +// For example pushing is a capability which should only be +// performed on an upstream source, not a mirror. +// Resolving (the process of converting a name into a digest) +// must be considered a trusted operation and only done by +// a host which is trusted (or more preferably by secure process +// which can prove the provenance of the mapping). A public +// mirror should never be trusted to do a resolve action. +// +// | Registry Type | Pull | Resolve | Push | +// |------------------|------|---------|------| +// | Public Registry | yes | yes | yes | +// | Private Registry | yes | yes | yes | +// | Public Mirror | yes | no | no | +// | Private Mirror | yes | yes | no | +type HostCapabilities uint8 + +const ( + // HostCapabilityPull represents the capability to fetch manifests + // and blobs by digest + HostCapabilityPull HostCapabilities = 1 << iota + + // HostCapabilityResolve represents the capability to fetch manifests + // by name + HostCapabilityResolve + + // HostCapabilityPush represents the capability to push blobs and + // manifests + HostCapabilityPush + + // Reserved for future capabilities (i.e. search, catalog, remove) +) + +// Has checks whether the capabilities list has the provide capability +func (c HostCapabilities) Has(t HostCapabilities) bool { + return c&t == t +} + +// RegistryHost represents a complete configuration for a registry +// host, representing the capabilities, authorizations, connection +// configuration, and location. +type RegistryHost struct { + Client *http.Client + Authorizer Authorizer + Host string + Scheme string + Path string + Capabilities HostCapabilities + Header http.Header +} + +func (h RegistryHost) isProxy(refhost string) bool { + if refhost != h.Host { + if refhost != "docker.io" || h.Host != "registry-1.docker.io" { + return true + } + } + return false +} + +// RegistryHosts fetches the registry hosts for a given namespace, +// provided by the host component of an distribution image reference. +type RegistryHosts func(string) ([]RegistryHost, error) + +// Registries joins multiple registry configuration functions, using the same +// order as provided within the arguments. When an empty registry configuration +// is returned with a nil error, the next function will be called. +// NOTE: This function will not join configurations, as soon as a non-empty +// configuration is returned from a configuration function, it will be returned +// to the caller. +func Registries(registries ...RegistryHosts) RegistryHosts { + return func(host string) ([]RegistryHost, error) { + for _, registry := range registries { + config, err := registry(host) + if err != nil { + return config, err + } + if len(config) > 0 { + return config, nil + } + } + return nil, nil + } +} + +type registryOpts struct { + authorizer Authorizer + plainHTTP func(string) (bool, error) + host func(string) (string, error) + client *http.Client +} + +// RegistryOpt defines a registry default option +type RegistryOpt func(*registryOpts) + +// WithPlainHTTP configures registries to use plaintext http scheme +// for the provided host match function. +func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.plainHTTP = f + } +} + +// WithAuthorizer configures the default authorizer for a registry +func WithAuthorizer(a Authorizer) RegistryOpt { + return func(opts *registryOpts) { + opts.authorizer = a + } +} + +// WithHostTranslator defines the default translator to use for registry hosts +func WithHostTranslator(h func(string) (string, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.host = h + } +} + +// WithClient configures the default http client for a registry +func WithClient(c *http.Client) RegistryOpt { + return func(opts *registryOpts) { + opts.client = c + } +} + +// ConfigureDefaultRegistries is used to create a default configuration for +// registries. For more advanced configurations or per-domain setups, +// the RegistryHosts interface should be used directly. +// NOTE: This function will always return a non-empty value or error +func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { + var opts registryOpts + for _, opt := range ropts { + opt(&opts) + } + + return func(host string) ([]RegistryHost, error) { + config := RegistryHost{ + Client: opts.client, + Authorizer: opts.authorizer, + Host: host, + Scheme: "https", + Path: "/v2", + Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, + } + + if config.Client == nil { + config.Client = http.DefaultClient + } + + if opts.plainHTTP != nil { + match, err := opts.plainHTTP(host) + if err != nil { + return nil, err + } + if match { + config.Scheme = "http" + } + } + + if opts.host != nil { + var err error + config.Host, err = opts.host(config.Host) + if err != nil { + return nil, err + } + } else if host == "docker.io" { + config.Host = "registry-1.docker.io" + } + + return []RegistryHost{config}, nil + } +} + +// MatchAllHosts is a host match function which is always true. +func MatchAllHosts(string) (bool, error) { + return true, nil +} + +// MatchLocalhost is a host match function which returns true for +// localhost. +// +// Note: this does not handle matching of ip addresses in octal, +// decimal or hex form. +func MatchLocalhost(host string) (bool, error) { + switch { + case host == "::1": + return true, nil + case host == "[::1]": + return true, nil + } + h, p, err := net.SplitHostPort(host) + + // addrError helps distinguish between errors of form + // "no colon in address" and "too many colons in address". + // The former is fine as the host string need not have a + // port. Latter needs to be handled. + addrError := &net.AddrError{ + Err: "missing port in address", + Addr: host, + } + if err != nil { + if err.Error() != addrError.Error() { + return false, err + } + // host string without any port specified + h = host + } else if len(p) == 0 { + return false, errors.New("invalid host name format") + } + + // use ipv4 dotted decimal for further checking + if h == "localhost" { + h = "127.0.0.1" + } + ip := net.ParseIP(h) + + return ip.IsLoopback(), nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go new file mode 100644 index 000000000..cca4ca6a2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -0,0 +1,729 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "path" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + remoteerrors "github.com/containerd/containerd/remotes/errors" + "github.com/containerd/containerd/tracing" + "github.com/containerd/containerd/version" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // ErrInvalidAuthorization is used when credentials are passed to a server but + // those credentials are rejected. + ErrInvalidAuthorization = errors.New("authorization failed") + + // MaxManifestSize represents the largest size accepted from a registry + // during resolution. Larger manifests may be accepted using a + // resolution method other than the registry. + // + // NOTE: The max supported layers by some runtimes is 128 and individual + // layers will not contribute more than 256 bytes, making a + // reasonable limit for a large image manifests of 32K bytes. + // 4M bytes represents a much larger upper bound for images which may + // contain large annotations or be non-images. A proper manifest + // design puts large metadata in subobjects, as is consistent the + // intent of the manifest design. + MaxManifestSize int64 = 4 * 1048 * 1048 +) + +// Authorizer is used to authorize HTTP requests based on 401 HTTP responses. +// An Authorizer is responsible for caching tokens or credentials used by +// requests. +type Authorizer interface { + // Authorize sets the appropriate `Authorization` header on the given + // request. + // + // If no authorization is found for the request, the request remains + // unmodified. It may also add an `Authorization` header as + // "bearer " + // "basic " + // + // It may return remotes/errors.ErrUnexpectedStatus, which for example, + // can be used by the caller to find out the status code returned by the registry. + Authorize(context.Context, *http.Request) error + + // AddResponses adds a 401 response for the authorizer to consider when + // authorizing requests. The last response should be unauthorized and + // the previous requests are used to consider redirects and retries + // that may have led to the 401. + // + // If response is not handled, returns `ErrNotImplemented` + AddResponses(context.Context, []*http.Response) error +} + +// ResolverOptions are used to configured a new Docker register resolver +type ResolverOptions struct { + // Hosts returns registry host configurations for a namespace. + Hosts RegistryHosts + + // Headers are the HTTP request header fields sent by the resolver + Headers http.Header + + // Tracker is used to track uploads to the registry. This is used + // since the registry does not have upload tracking and the existing + // mechanism for getting blob upload status is expensive. + Tracker StatusTracker + + // Authorizer is used to authorize registry requests + // + // Deprecated: use Hosts. + Authorizer Authorizer + + // Credentials provides username and secret given a host. + // If username is empty but a secret is given, that secret + // is interpreted as a long lived token. + // + // Deprecated: use Hosts. + Credentials func(string) (string, string, error) + + // Host provides the hostname given a namespace. + // + // Deprecated: use Hosts. + Host func(string) (string, error) + + // PlainHTTP specifies to use plain http and not https + // + // Deprecated: use Hosts. + PlainHTTP bool + + // Client is the http client to used when making registry requests + // + // Deprecated: use Hosts. + Client *http.Client +} + +// DefaultHost is the default host function. +func DefaultHost(ns string) (string, error) { + if ns == "docker.io" { + return "registry-1.docker.io", nil + } + return ns, nil +} + +type dockerResolver struct { + hosts RegistryHosts + header http.Header + resolveHeader http.Header + tracker StatusTracker +} + +// NewResolver returns a new resolver to a Docker registry +func NewResolver(options ResolverOptions) remotes.Resolver { + if options.Tracker == nil { + options.Tracker = NewInMemoryTracker() + } + + if options.Headers == nil { + options.Headers = make(http.Header) + } else { + // make a copy of the headers to avoid race due to concurrent map write + options.Headers = options.Headers.Clone() + } + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + + resolveHeader := http.Header{} + if _, ok := options.Headers["Accept"]; !ok { + // set headers for all the types we support for resolution. + resolveHeader.Set("Accept", strings.Join([]string{ + images.MediaTypeDockerSchema2Manifest, + images.MediaTypeDockerSchema2ManifestList, + ocispec.MediaTypeImageManifest, + ocispec.MediaTypeImageIndex, "*/*", + }, ", ")) + } else { + resolveHeader["Accept"] = options.Headers["Accept"] + delete(options.Headers, "Accept") + } + + if options.Hosts == nil { + opts := []RegistryOpt{} + if options.Host != nil { + opts = append(opts, WithHostTranslator(options.Host)) + } + + if options.Authorizer == nil { + options.Authorizer = NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthHeader(options.Headers), + WithAuthCreds(options.Credentials)) + } + opts = append(opts, WithAuthorizer(options.Authorizer)) + + if options.Client != nil { + opts = append(opts, WithClient(options.Client)) + } + if options.PlainHTTP { + opts = append(opts, WithPlainHTTP(MatchAllHosts)) + } else { + opts = append(opts, WithPlainHTTP(MatchLocalhost)) + } + options.Hosts = ConfigureDefaultRegistries(opts...) + } + return &dockerResolver{ + hosts: options.Hosts, + header: options.Headers, + resolveHeader: resolveHeader, + tracker: options.Tracker, + } +} + +func getManifestMediaType(resp *http.Response) string { + // Strip encoding data (manifests should always be ascii JSON) + contentType := resp.Header.Get("Content-Type") + if sp := strings.IndexByte(contentType, ';'); sp != -1 { + contentType = contentType[0:sp] + } + + // As of Apr 30 2019 the registry.access.redhat.com registry does not specify + // the content type of any data but uses schema1 manifests. + if contentType == "text/plain" { + contentType = images.MediaTypeDockerSchema1Manifest + } + return contentType +} + +type countingReader struct { + reader io.Reader + bytesRead int64 +} + +func (r *countingReader) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + r.bytesRead += int64(n) + return n, err +} + +var _ remotes.Resolver = &dockerResolver{} + +func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return "", ocispec.Descriptor{}, err + } + refspec := base.refspec + if refspec.Object == "" { + return "", ocispec.Descriptor{}, reference.ErrObjectRequired + } + + var ( + firstErr error + paths [][]string + dgst = refspec.Digest() + caps = HostCapabilityPull + ) + + if dgst != "" { + if err := dgst.Validate(); err != nil { + // need to fail here, since we can't actually resolve the invalid + // digest. + return "", ocispec.Descriptor{}, err + } + + // turns out, we have a valid digest, make a url. + paths = append(paths, []string{"manifests", dgst.String()}) + + // fallback to blobs on not found. + paths = append(paths, []string{"blobs", dgst.String()}) + } else { + // Add + paths = append(paths, []string{"manifests", refspec.Object}) + caps |= HostCapabilityResolve + } + + hosts := base.filterHosts(caps) + if len(hosts) == 0 { + return "", ocispec.Descriptor{}, fmt.Errorf("no resolve hosts: %w", errdefs.ErrNotFound) + } + + ctx, err = ContextWithRepositoryScope(ctx, refspec, false) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + for _, u := range paths { + for _, host := range hosts { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) + + req := base.request(host, http.MethodHead, u...) + if err := req.addNamespace(base.refspec.Hostname()); err != nil { + return "", ocispec.Descriptor{}, err + } + + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + log.G(ctx).Debug("resolving") + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if errors.Is(err, ErrInvalidAuthorization) { + err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err) + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + log.G(ctx).WithError(err).Info("trying next host") + continue // try another host + } + resp.Body.Close() // don't care about body contents. + + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + log.G(ctx).Info("trying next host - response was http.StatusNotFound") + continue + } + if resp.StatusCode > 399 { + // Set firstErr when encountering the first non-404 status code. + if firstErr == nil { + firstErr = remoteerrors.NewUnexpectedStatusErr(resp) + } + continue // try another host + } + return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp) + } + size := resp.ContentLength + contentType := getManifestMediaType(resp) + + // if no digest was provided, then only a resolve + // trusted registry was contacted, in this case use + // the digest header (or content from GET) + if dgst == "" { + // this is the only point at which we trust the registry. we use the + // content headers to assemble a descriptor for the name. when this becomes + // more robust, we mostly get this information from a secure trust store. + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + + if dgstHeader != "" && size != -1 { + if err := dgstHeader.Validate(); err != nil { + return "", ocispec.Descriptor{}, fmt.Errorf("%q in header not a valid digest: %w", dgstHeader, err) + } + dgst = dgstHeader + } + } + if dgst == "" || size == -1 { + log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") + + req = base.request(host, http.MethodGet, u...) + if err := req.addNamespace(base.refspec.Hostname()); err != nil { + return "", ocispec.Descriptor{}, err + } + + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + bodyReader := countingReader{reader: resp.Body} + + contentType = getManifestMediaType(resp) + err = func() error { + defer resp.Body.Close() + if dgst != "" { + _, err = io.Copy(io.Discard, &bodyReader) + return err + } + + if contentType == images.MediaTypeDockerSchema1Manifest { + b, err := schema1.ReadStripSignature(&bodyReader) + if err != nil { + return err + } + + dgst = digest.FromBytes(b) + return nil + } + + dgst, err = digest.FromReader(&bodyReader) + return err + }() + if err != nil { + return "", ocispec.Descriptor{}, err + } + size = bodyReader.bytesRead + } + // Prevent resolving to excessively large manifests + if size > MaxManifestSize { + if firstErr == nil { + firstErr = fmt.Errorf("rejecting %d byte manifest for %s: %w", size, ref, errdefs.ErrNotFound) + } + continue + } + + desc := ocispec.Descriptor{ + Digest: dgst, + MediaType: contentType, + Size: size, + } + + log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") + return ref, desc, nil + } + } + + // If above loop terminates without return, then there was an error. + // "firstErr" contains the first non-404 error. That is, "firstErr == nil" + // means that either no registries were given or each registry returned 404. + + if firstErr == nil { + firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound) + } + + return "", ocispec.Descriptor{}, firstErr +} + +func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return nil, err + } + + return dockerFetcher{ + dockerBase: base, + }, nil +} + +func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return nil, err + } + + return dockerPusher{ + dockerBase: base, + object: base.refspec.Object, + tracker: r.tracker, + }, nil +} + +func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + return r.base(refspec) +} + +type dockerBase struct { + refspec reference.Spec + repository string + hosts []RegistryHost + header http.Header +} + +func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { + host := refspec.Hostname() + hosts, err := r.hosts(host) + if err != nil { + return nil, err + } + return &dockerBase{ + refspec: refspec, + repository: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, + }, nil +} + +func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { + for _, host := range r.hosts { + if host.Capabilities.Has(caps) { + hosts = append(hosts, host) + } + } + return +} + +func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { + header := r.header.Clone() + if header == nil { + header = http.Header{} + } + + for key, value := range host.Header { + header[key] = append(header[key], value...) + } + parts := append([]string{"/", host.Path, r.repository}, ps...) + p := path.Join(parts...) + // Join strips trailing slash, re-add ending "/" if included + if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { + p = p + "/" + } + return &request{ + method: method, + path: p, + header: header, + host: host, + } +} + +func (r *request) authorize(ctx context.Context, req *http.Request) error { + // Check if has header for host + if r.host.Authorizer != nil { + if err := r.host.Authorizer.Authorize(ctx, req); err != nil { + return err + } + } + + return nil +} + +func (r *request) addNamespace(ns string) (err error) { + if !r.host.isProxy(ns) { + return nil + } + var q url.Values + // Parse query + if i := strings.IndexByte(r.path, '?'); i > 0 { + r.path = r.path[:i+1] + q, err = url.ParseQuery(r.path[i+1:]) + if err != nil { + return + } + } else { + r.path = r.path + "?" + q = url.Values{} + } + q.Add("ns", ns) + + r.path = r.path + q.Encode() + + return +} + +type request struct { + method string + path string + header http.Header + host RegistryHost + body func() (io.ReadCloser, error) + size int64 +} + +func (r *request) do(ctx context.Context) (*http.Response, error) { + u := r.host.Scheme + "://" + r.host.Host + r.path + req, err := http.NewRequestWithContext(ctx, r.method, u, nil) + if err != nil { + return nil, err + } + if r.header == nil { + req.Header = http.Header{} + } else { + req.Header = r.header.Clone() // headers need to be copied to avoid concurrent map access + } + if r.body != nil { + body, err := r.body() + if err != nil { + return nil, err + } + req.Body = body + req.GetBody = r.body + if r.size > 0 { + req.ContentLength = r.size + } + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).WithFields(requestFields(req)).Debug("do request") + if err := r.authorize(ctx, req); err != nil { + return nil, fmt.Errorf("failed to authorize: %w", err) + } + + client := &http.Client{} + if r.host.Client != nil { + *client = *r.host.Client + } + if client.CheckRedirect == nil { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + if err := r.authorize(ctx, req); err != nil { + return fmt.Errorf("failed to authorize redirect: %w", err) + } + return nil + } + } + + tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "HTTPRequest")) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to do request: %w", err) + } + log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") + return resp, nil +} + +func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { + resp, err := r.do(ctx) + if err != nil { + return nil, err + } + + responses = append(responses, resp) + retry, err := r.retryRequest(ctx, responses) + if err != nil { + resp.Body.Close() + return nil, err + } + if retry { + resp.Body.Close() + return r.doWithRetries(ctx, responses) + } + return resp, err +} + +func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { + if len(responses) > 5 { + return false, nil + } + last := responses[len(responses)-1] + switch last.StatusCode { + case http.StatusUnauthorized: + log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") + if r.host.Authorizer != nil { + if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { + return true, nil + } else if !errdefs.IsNotImplemented(err) { + return false, err + } + } + + return false, nil + case http.StatusMethodNotAllowed: + // Support registries which have not properly implemented the HEAD method for + // manifests endpoint + if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { + r.method = http.MethodGet + return true, nil + } + case http.StatusRequestTimeout, http.StatusTooManyRequests: + return true, nil + } + + // TODO: Handle 50x errors accounting for attempt history + return false, nil +} + +func (r *request) String() string { + return r.host.Scheme + "://" + r.host.Host + r.path +} + +func requestFields(req *http.Request) log.Fields { + fields := map[string]interface{}{ + "request.method": req.Method, + } + for k, vals := range req.Header { + k = strings.ToLower(k) + if k == "authorization" { + continue + } + for i, v := range vals { + field := "request.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return fields +} + +func responseFields(resp *http.Response) log.Fields { + fields := map[string]interface{}{ + "response.status": resp.Status, + } + for k, vals := range resp.Header { + k = strings.ToLower(k) + for i, v := range vals { + field := "response.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return fields +} + +// IsLocalhost checks if the registry host is local. +func IsLocalhost(host string) bool { + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + + if host == "localhost" { + return true + } + + ip := net.ParseIP(host) + return ip.IsLoopback() +} + +// HTTPFallback is an http.RoundTripper which allows fallback from https to http +// for registry endpoints with configurations for both http and TLS, such as +// defaulted localhost endpoints. +type HTTPFallback struct { + http.RoundTripper +} + +func (f HTTPFallback) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := f.RoundTripper.RoundTrip(r) + var tlsErr tls.RecordHeaderError + if errors.As(err, &tlsErr) && string(tlsErr.RecordHeader[:]) == "HTTP/" { + // server gave HTTP response to HTTPS client + plainHTTPUrl := *r.URL + plainHTTPUrl.Scheme = "http" + + plainHTTPRequest := *r + plainHTTPRequest.URL = &plainHTTPUrl + + return f.RoundTripper.RoundTrip(&plainHTTPRequest) + } + + return resp, err +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go new file mode 100644 index 000000000..8c9e520cd --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -0,0 +1,608 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package schema1 provides a converter to fetch an image formatted in Docker Image Manifest v2, Schema 1. +// +// Deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1. +package schema1 + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" +) + +const ( + manifestSizeLimit = 8e6 // 8MB + labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer" +) + +type blobState struct { + diffID digest.Digest + empty bool +} + +// Converter converts schema1 manifests to schema2 on fetch +type Converter struct { + contentStore content.Store + fetcher remotes.Fetcher + + pulledManifest *manifest + + mu sync.Mutex + blobMap map[digest.Digest]blobState + layerBlobs map[digest.Digest]ocispec.Descriptor +} + +// NewConverter returns a new converter +func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter { + return &Converter{ + contentStore: contentStore, + fetcher: fetcher, + blobMap: map[digest.Digest]blobState{}, + layerBlobs: map[digest.Digest]ocispec.Descriptor{}, + } +} + +// Handle fetching descriptors for a docker media type +func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + if err := c.fetchManifest(ctx, desc); err != nil { + return nil, err + } + + m := c.pulledManifest + if len(m.FSLayers) != len(m.History) { + return nil, errors.New("invalid schema 1 manifest, history and layer mismatch") + } + descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers)) + + for i := range m.FSLayers { + if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok { + empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility)) + if err != nil { + return nil, err + } + + // Do no attempt to download a known empty blob + if !empty { + descs = append([]ocispec.Descriptor{ + { + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: c.pulledManifest.FSLayers[i].BlobSum, + Size: -1, + }, + }, descs...) + } + c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{ + empty: empty, + } + } + } + return descs, nil + case images.MediaTypeDockerSchema2LayerGzip: + if c.pulledManifest == nil { + return nil, errors.New("manifest required for schema 1 blob pull") + } + return nil, c.fetchBlob(ctx, desc) + default: + return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType) + } +} + +// ConvertOptions provides options on converting a docker schema1 manifest. +type ConvertOptions struct { + // ManifestMediaType specifies the media type of the manifest OCI descriptor. + ManifestMediaType string + + // ConfigMediaType specifies the media type of the manifest config OCI + // descriptor. + ConfigMediaType string +} + +// ConvertOpt allows configuring a convert operation. +type ConvertOpt func(context.Context, *ConvertOptions) error + +// UseDockerSchema2 is used to indicate that a schema1 manifest should be +// converted into the media types for a docker schema2 manifest. +func UseDockerSchema2() ConvertOpt { + return func(ctx context.Context, o *ConvertOptions) error { + o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest + o.ConfigMediaType = images.MediaTypeDockerSchema2Config + return nil + } +} + +// Convert a docker manifest to an OCI descriptor +func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) { + co := ConvertOptions{ + ManifestMediaType: ocispec.MediaTypeImageManifest, + ConfigMediaType: ocispec.MediaTypeImageConfig, + } + for _, opt := range opts { + if err := opt(ctx, &co); err != nil { + return ocispec.Descriptor{}, err + } + } + + history, diffIDs, err := c.schema1ManifestHistory() + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err) + } + + var img ocispec.Image + if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err) + } + + img.History = history + img.RootFS = ocispec.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + + b, err := json.MarshalIndent(img, "", " ") + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) + } + + config := ocispec.Descriptor{ + MediaType: co.ConfigMediaType, + Digest: digest.Canonical.FromBytes(b), + Size: int64(len(b)), + } + + layers := make([]ocispec.Descriptor, len(diffIDs)) + for i, diffID := range diffIDs { + layers[i] = c.layerBlobs[diffID] + } + + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: config, + Layers: layers, + } + + mb, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) + } + + desc := ocispec.Descriptor{ + MediaType: co.ManifestMediaType, + Digest: digest.Canonical.FromBytes(mb), + Size: int64(len(mb)), + } + + labels := map[string]string{} + labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String() + for i, ch := range manifest.Layers { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err) + } + + ref = remotes.MakeRefKey(ctx, config) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err) + } + + return desc, nil +} + +// ReadStripSignature reads in a schema1 manifest and returns a byte array +// with the "signatures" field stripped +func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) { + b, err := io.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB + if err != nil { + return nil, err + } + + return stripSignature(b) +} + +func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch schema 1") + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + + b, err := ReadStripSignature(rc) + rc.Close() + if err != nil { + return err + } + + var m manifest + if err := json.Unmarshal(b, &m); err != nil { + return err + } + if len(m.Manifests) != 0 || len(m.Layers) != 0 { + return errors.New("converter: expected schema1 document but found extra keys") + } + c.pulledManifest = &m + + return nil +} + +func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch blob") + + var ( + ref = remotes.MakeRefKey(ctx, desc) + calc = newBlobStateCalculator() + compressMethod = compression.Gzip + ) + + // size may be unknown, set to zero for content ingest + ingestDesc := desc + if ingestDesc.Size == -1 { + ingestDesc.Size = 0 + } + + cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + reuse, err := c.reuseLabelBlobState(ctx, desc) + if err != nil { + return err + } + + if reuse { + return nil + } + + ra, err := c.contentStore.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + r, err := compression.DecompressStream(content.NewReader(ra)) + if err != nil { + return err + } + + compressMethod = r.GetCompression() + _, err = io.Copy(calc, r) + r.Close() + if err != nil { + return err + } + } else { + defer cw.Close() + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + eg, _ := errgroup.WithContext(ctx) + pr, pw := io.Pipe() + + eg.Go(func() error { + r, err := compression.DecompressStream(pr) + if err != nil { + return err + } + + compressMethod = r.GetCompression() + _, err = io.Copy(calc, r) + r.Close() + pr.CloseWithError(err) + return err + }) + + eg.Go(func() error { + defer pw.Close() + + return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest) + }) + + if err := eg.Wait(); err != nil { + return err + } + } + + if desc.Size == -1 { + info, err := c.contentStore.Info(ctx, desc.Digest) + if err != nil { + return fmt.Errorf("failed to get blob info: %w", err) + } + desc.Size = info.Size + } + + if compressMethod == compression.Uncompressed { + log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob") + desc.MediaType = images.MediaTypeDockerSchema2Layer + } + + state := calc.State() + + cinfo := content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + labels.LabelUncompressed: state.diffID.String(), + labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), + }, + } + + if _, err := c.contentStore.Update(ctx, cinfo, "labels."+labels.LabelUncompressed, fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { + return fmt.Errorf("failed to update uncompressed label: %w", err) + } + + c.mu.Lock() + c.blobMap[desc.Digest] = state + c.layerBlobs[state.diffID] = desc + c.mu.Unlock() + + return nil +} + +func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) { + cinfo, err := c.contentStore.Info(ctx, desc.Digest) + if err != nil { + return false, fmt.Errorf("failed to get blob info: %w", err) + } + desc.Size = cinfo.Size + + diffID, ok := cinfo.Labels[labels.LabelUncompressed] + if !ok { + return false, nil + } + + emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer] + if !ok { + return false, nil + } + + isEmpty, err := strconv.ParseBool(emptyVal) + if err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty) + return false, nil + } + + bState := blobState{empty: isEmpty} + + if bState.diffID, err = digest.Parse(diffID); err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label %s: %v", labels.LabelUncompressed, diffID) + return false, nil + } + + // NOTE: there is no need to read header to get compression method + // because there are only two kinds of methods. + if bState.diffID == desc.Digest { + desc.MediaType = images.MediaTypeDockerSchema2Layer + } else { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + } + + c.mu.Lock() + c.blobMap[desc.Digest] = bState + c.layerBlobs[bState.diffID] = desc + c.mu.Unlock() + return true, nil +} + +func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) { + if c.pulledManifest == nil { + return nil, nil, errors.New("missing schema 1 manifest for conversion") + } + m := *c.pulledManifest + + if len(m.History) == 0 { + return nil, nil, errors.New("no history") + } + + history := make([]ocispec.History, len(m.History)) + diffIDs := []digest.Digest{} + for i := range m.History { + var h v1History + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err) + } + + blobSum := m.FSLayers[i].BlobSum + + state := c.blobMap[blobSum] + + history[len(history)-i-1] = ocispec.History{ + Author: h.Author, + Comment: h.Comment, + Created: &h.Created, + CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), + EmptyLayer: state.empty, + } + + if !state.empty { + diffIDs = append([]digest.Digest{state.diffID}, diffIDs...) + + } + } + + return history, diffIDs, nil +} + +type fsLayer struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type history struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type manifest struct { + FSLayers []fsLayer `json:"fsLayers"` + History []history `json:"history"` + Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest + Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index +} + +type v1History struct { + Author string `json:"author,omitempty"` + Created time.Time `json:"created"` + Comment string `json:"comment,omitempty"` + ThrowAway *bool `json:"throwaway,omitempty"` + Size *int `json:"Size,omitempty"` // used before ThrowAway field + ContainerConfig struct { + Cmd []string `json:"Cmd,omitempty"` + } `json:"container_config,omitempty"` +} + +// isEmptyLayer returns whether the v1 compatibility history describes an +// empty layer. A return value of true indicates the layer is empty, +// however false does not indicate non-empty. +func isEmptyLayer(compatHistory []byte) (bool, error) { + var h v1History + if err := json.Unmarshal(compatHistory, &h); err != nil { + return false, err + } + + if h.ThrowAway != nil { + return *h.ThrowAway, nil + } + if h.Size != nil { + return *h.Size == 0, nil + } + + // If no `Size` or `throwaway` field is given, then + // it cannot be determined whether the layer is empty + // from the history, return false + return false, nil +} + +type signature struct { + Signatures []jsParsedSignature `json:"signatures"` +} + +type jsParsedSignature struct { + Protected string `json:"protected"` +} + +type protectedBlock struct { + Length int `json:"formatLength"` + Tail string `json:"formatTail"` +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func stripSignature(b []byte) ([]byte, error) { + var sig signature + if err := json.Unmarshal(b, &sig); err != nil { + return nil, err + } + if len(sig.Signatures) == 0 { + return nil, errors.New("no signatures") + } + pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) + if err != nil { + return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err) + } + + var protected protectedBlock + if err := json.Unmarshal(pb, &protected); err != nil { + return nil, err + } + + if protected.Length > len(b) { + return nil, errors.New("invalid protected length block") + } + + tail, err := joseBase64UrlDecode(protected.Tail) + if err != nil { + return nil, fmt.Errorf("invalid tail base 64 value: %w", err) + } + + return append(b[:protected.Length], tail...), nil +} + +type blobStateCalculator struct { + empty bool + digester digest.Digester +} + +func newBlobStateCalculator() *blobStateCalculator { + return &blobStateCalculator{ + empty: true, + digester: digest.Canonical.Digester(), + } +} + +func (c *blobStateCalculator) Write(p []byte) (int, error) { + if c.empty { + for _, b := range p { + if b != 0x00 { + c.empty = false + break + } + } + } + return c.digester.Hash().Write(p) +} + +func (c *blobStateCalculator) State() blobState { + return blobState{ + empty: c.empty, + diffID: c.digester.Digest(), + } +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go new file mode 100644 index 000000000..95b4810ab --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -0,0 +1,101 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/containerd/containerd/reference" +) + +// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull" +// for "host/foo/bar:baz". +// When push is true, both pull and push are added to the scope. +func RepositoryScope(refspec reference.Spec, push bool) (string, error) { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return "", err + } + s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull" + if push { + s += ",push" + } + return s, nil +} + +// tokenScopesKey is used for the key for context.WithValue(). +// value: []string (e.g. {"registry:foo/bar:pull"}) +type tokenScopesKey struct{} + +// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value. +func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) { + s, err := RepositoryScope(refspec, push) + if err != nil { + return nil, err + } + return WithScope(ctx, s), nil +} + +// WithScope appends a custom registry auth scope to the context. +func WithScope(ctx context.Context, scope string) context.Context { + var scopes []string + if v := ctx.Value(tokenScopesKey{}); v != nil { + scopes = v.([]string) + scopes = append(scopes, scope) + } else { + scopes = []string{scope} + } + return context.WithValue(ctx, tokenScopesKey{}, scopes) +} + +// ContextWithAppendPullRepositoryScope is used to append repository pull +// scope into existing scopes indexed by the tokenScopesKey{}. +func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { + return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) +} + +// GetTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. +func GetTokenScopes(ctx context.Context, common []string) []string { + scopes := []string{} + if x := ctx.Value(tokenScopesKey{}); x != nil { + scopes = append(scopes, x.([]string)...) + } + + scopes = append(scopes, common...) + sort.Strings(scopes) + + if len(scopes) == 0 { + return scopes + } + + l := 0 + for idx := 1; idx < len(scopes); idx++ { + // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) + // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. + if scopes[l] == scopes[idx] { + continue + } + + l++ + scopes[l] = scopes[idx] + } + return scopes[:l+1] +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/status.go b/vendor/github.com/containerd/containerd/remotes/docker/status.go new file mode 100644 index 000000000..1a9227725 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/status.go @@ -0,0 +1,101 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/moby/locker" +) + +// Status of a content operation +type Status struct { + content.Status + + Committed bool + + // ErrClosed contains error encountered on close. + ErrClosed error + + // UploadUUID is used by the Docker registry to reference blob uploads + UploadUUID string + + // PushStatus contains status related to push. + PushStatus +} + +type PushStatus struct { + // MountedFrom is the source content was cross-repo mounted from (empty if no cross-repo mount was performed). + MountedFrom string + + // Exists indicates whether content already exists in the repository and wasn't uploaded. + Exists bool +} + +// StatusTracker to track status of operations +type StatusTracker interface { + GetStatus(string) (Status, error) + SetStatus(string, Status) +} + +// StatusTrackLocker to track status of operations with lock +type StatusTrackLocker interface { + StatusTracker + Lock(string) + Unlock(string) +} + +type memoryStatusTracker struct { + statuses map[string]Status + m sync.Mutex + locker *locker.Locker +} + +// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory +func NewInMemoryTracker() StatusTrackLocker { + return &memoryStatusTracker{ + statuses: map[string]Status{}, + locker: locker.New(), + } +} + +func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) { + t.m.Lock() + defer t.m.Unlock() + status, ok := t.statuses[ref] + if !ok { + return Status{}, fmt.Errorf("status for ref %v: %w", ref, errdefs.ErrNotFound) + } + return status, nil +} + +func (t *memoryStatusTracker) SetStatus(ref string, status Status) { + t.m.Lock() + t.statuses[ref] = status + t.m.Unlock() +} + +func (t *memoryStatusTracker) Lock(ref string) { + t.locker.Lock(ref) +} + +func (t *memoryStatusTracker) Unlock(ref string) { + t.locker.Unlock(ref) +} diff --git a/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/vendor/github.com/containerd/containerd/remotes/errors/errors.go new file mode 100644 index 000000000..f60ff0fc2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/errors/errors.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errors + +import ( + "fmt" + "io" + "net/http" +) + +var _ error = ErrUnexpectedStatus{} + +// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status +type ErrUnexpectedStatus struct { + Status string + StatusCode int + Body []byte + RequestURL, RequestMethod string +} + +func (e ErrUnexpectedStatus) Error() string { + return fmt.Sprintf("unexpected status from %s request to %s: %s", e.RequestMethod, e.RequestURL, e.Status) +} + +// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response +func NewUnexpectedStatusErr(resp *http.Response) error { + var b []byte + if resp.Body != nil { + b, _ = io.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB + } + err := ErrUnexpectedStatus{ + Body: b, + Status: resp.Status, + StatusCode: resp.StatusCode, + RequestMethod: resp.Request.Method, + } + if resp.Request.URL != nil { + err.RequestURL = resp.Request.URL.String() + } + return err +} diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go new file mode 100644 index 000000000..f24669dc4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -0,0 +1,413 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/semaphore" +) + +type refKeyPrefix struct{} + +// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing +// data in the content store from the FetchHandler. +// +// Used in `MakeRefKey` to determine what the key prefix should be. +func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { + var values map[string]string + if v := ctx.Value(refKeyPrefix{}); v != nil { + values = v.(map[string]string) + } else { + values = make(map[string]string) + } + + values[mediaType] = prefix + return context.WithValue(ctx, refKeyPrefix{}, values) +} + +// MakeRefKey returns a unique reference for the descriptor. This reference can be +// used to lookup ongoing processes related to the descriptor. This function +// may look to the context to namespace the reference appropriately. +func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { + key := desc.Digest.String() + if desc.Annotations != nil { + if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok { + key = fmt.Sprintf("%s@%s", name, desc.Digest.String()) + } + } + + if v := ctx.Value(refKeyPrefix{}); v != nil { + values := v.(map[string]string) + if prefix := values[desc.MediaType]; prefix != "" { + return prefix + "-" + key + } + } + + switch mt := desc.MediaType; { + case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: + return "manifest-" + key + case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: + return "index-" + key + case images.IsLayerType(mt): + return "layer-" + key + case images.IsKnownConfig(mt): + return "config-" + key + default: + log.G(ctx).Warnf("reference for unknown type: %s", mt) + return "unknown-" + key + } +} + +// FetchHandler returns a handler that will fetch all content into the ingester +// discovered in a call to Dispatch. Use with ChildrenHandler to do a full +// recursive fetch. +func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + return nil, fmt.Errorf("%v not supported", desc.MediaType) + default: + err := Fetch(ctx, ingester, fetcher, desc) + if errdefs.IsAlreadyExists(err) { + return nil, nil + } + return nil, err + } + } +} + +// Fetch fetches the given digest into the provided ingester +func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch") + + cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + if err != nil { + return err + } + defer cw.Close() + + ws, err := cw.Status() + if err != nil { + return err + } + + if desc.Size == 0 { + // most likely a poorly configured registry/web front end which responded with no + // Content-Length header; unable (not to mention useless) to commit a 0-length entry + // into the content store. Error out here otherwise the error sent back is confusing + return fmt.Errorf("unable to fetch descriptor (%s) which reports content size of zero: %w", desc.Digest, errdefs.ErrInvalidArgument) + } + if ws.Offset == desc.Size { + // If writer is already complete, commit and return + err := cw.Commit(ctx, desc.Size, desc.Digest) + if err != nil && !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + } + return err + } + + if desc.Size == int64(len(desc.Data)) { + return content.Copy(ctx, cw, bytes.NewReader(desc.Data), desc.Size, desc.Digest) + } + + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + return content.Copy(ctx, cw, rc, desc.Size, desc.Digest) +} + +// PushHandler returns a handler that will push all content from the provider +// using a writer from the pusher. +func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + err := push(ctx, provider, pusher, desc) + return nil, err + } +} + +func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("push") + + var ( + cw content.Writer + err error + ) + if cs, ok := pusher.(content.Ingester); ok { + cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + } else { + cw, err = pusher.Push(ctx, desc) + } + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + return nil + } + defer cw.Close() + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + rd := io.NewSectionReader(ra, 0, desc.Size) + return content.Copy(ctx, cw, rd, desc.Size, desc.Digest) +} + +// PushContent pushes content specified by the descriptor from the provider. +// +// Base handlers can be provided which will be called before any push specific +// handlers. +// +// If the passed in content.Provider is also a content.InfoProvider (such as +// content.Manager) then this will also annotate the distribution sources using +// labels prefixed with "containerd.io/distribution.source". +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Provider, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { + + var m sync.Mutex + manifests := []ocispec.Descriptor{} + indexStack := []ocispec.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + m.Lock() + manifests = append(manifests, desc) + m.Unlock() + return nil, images.ErrStopHandler + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + indexStack = append(indexStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + default: + return nil, nil + } + }) + + pushHandler := PushHandler(pusher, store) + + platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) + + var handler images.Handler + if m, ok := store.(content.InfoProvider); ok { + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, m) + handler = images.Handlers(annotateHandler, filterHandler, pushHandler) + } else { + handler = images.Handlers(platformFilterhandler, filterHandler, pushHandler) + } + + if wrapper != nil { + handler = wrapper(handler) + } + + if err := images.Dispatch(ctx, handler, limiter, desc); err != nil { + return err + } + + if err := images.Dispatch(ctx, pushHandler, limiter, manifests...); err != nil { + return err + } + + // Iterate in reverse order as seen, parent always uploaded after child + for i := len(indexStack) - 1; i >= 0; i-- { + err := images.Dispatch(ctx, pushHandler, limiter, indexStack[i]) + if err != nil { + // TODO(estesp): until we have a more complete method for index push, we need to report + // missing dependencies in an index/manifest list by sensing the "400 Bad Request" + // as a marker for this problem + if errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { + return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err) + } + return err + } + } + + return nil +} + +// SkipNonDistributableBlobs returns a handler that skips blobs that have a media type that is "non-distributeable". +// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed. +// +// This is based on the media type of the content: +// - application/vnd.oci.image.layer.nondistributable +// - application/vnd.docker.image.rootfs.foreign +func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if images.IsNonDistributable(desc.MediaType) { + log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob") + return nil, images.ErrSkipDesc + } + + if images.IsLayerType(desc.MediaType) { + return nil, nil + } + + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + if len(children) == 0 { + return nil, nil + } + + out := make([]ocispec.Descriptor, 0, len(children)) + for _, child := range children { + if !images.IsNonDistributable(child.MediaType) { + out = append(out, child) + } else { + log.G(ctx).WithField("digest", child.Digest).WithField("mediatype", child.MediaType).Debug("Skipping non-distributable blob") + } + } + return out, nil + } +} + +// FilterManifestByPlatformHandler allows Handler to handle non-target +// platform's manifest and configuration data. +func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // no platform information + if desc.Platform == nil || m == nil { + return children, nil + } + + var descs []ocispec.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if m.Match(*desc.Platform) { + descs = children + } else { + for _, child := range children { + if child.MediaType == images.MediaTypeDockerSchema2Config || + child.MediaType == ocispec.MediaTypeImageConfig { + + descs = append(descs, child) + } + } + } + default: + descs = children + } + return descs, nil + } +} + +// annotateDistributionSourceHandler add distribution source label into +// annotation of config or blob descriptor. +func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.InfoProvider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // Distribution source is only used for config or blob but may be inherited from + // a manifest or manifest list + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + default: + return children, nil + } + + parentSourceAnnotations := desc.Annotations + var parentLabels map[string]string + if pi, err := provider.Info(ctx, desc.Digest); err != nil { + if !errdefs.IsNotFound(err) { + return nil, err + } + } else { + parentLabels = pi.Labels + } + + for i := range children { + child := children[i] + + info, err := provider.Info(ctx, child.Digest) + if err != nil { + if !errdefs.IsNotFound(err) { + return nil, err + } + } + copyDistributionSourceLabels(info.Labels, &child) + + // Annotate with parent labels for cross repo mount or fetch. + // Parent sources may apply to all children since most registries + // enforce that children exist before the manifests. + copyDistributionSourceLabels(parentSourceAnnotations, &child) + copyDistributionSourceLabels(parentLabels, &child) + + children[i] = child + } + return children, nil + } +} + +func copyDistributionSourceLabels(from map[string]string, to *ocispec.Descriptor) { + for k, v := range from { + if !strings.HasPrefix(k, labels.LabelDistributionSource+".") { + continue + } + + if to.Annotations == nil { + to.Annotations = make(map[string]string) + } else { + // Only propagate the parent label if the child doesn't already have it. + if _, has := to.Annotations[k]; has { + continue + } + } + to.Annotations[k] = v + } +} diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go new file mode 100644 index 000000000..f200c84bc --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/resolver.go @@ -0,0 +1,94 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Resolver provides remotes based on a locator. +type Resolver interface { + // Resolve attempts to resolve the reference into a name and descriptor. + // + // The argument `ref` should be a scheme-less URI representing the remote. + // Structurally, it has a host and path. The "host" can be used to directly + // reference a specific host or be matched against a specific handler. + // + // The returned name should be used to identify the referenced entity. + // Depending on the remote namespace, this may be immutable or mutable. + // While the name may differ from ref, it should itself be a valid ref. + // + // If the resolution fails, an error will be returned. + Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) + + // Fetcher returns a new fetcher for the provided reference. + // All content fetched from the returned fetcher will be + // from the namespace referred to by ref. + Fetcher(ctx context.Context, ref string) (Fetcher, error) + + // Pusher returns a new pusher for the provided reference + // The returned Pusher should satisfy content.Ingester and concurrent attempts + // to push the same blob using the Ingester API should result in ErrUnavailable. + Pusher(ctx context.Context, ref string) (Pusher, error) +} + +// Fetcher fetches content. +// A fetcher implementation may implement the FetcherByDigest interface too. +type Fetcher interface { + // Fetch the resource identified by the descriptor. + Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) +} + +// FetcherByDigest fetches content by the digest. +type FetcherByDigest interface { + // FetchByDigest fetches the resource identified by the digest. + // + // FetcherByDigest usually returns an incomplete descriptor. + // Typically, the media type is always set to "application/octet-stream", + // and the annotations are unset. + FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error) +} + +// Pusher pushes content +type Pusher interface { + // Push returns a content writer for the given resource identified + // by the descriptor. + Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error) +} + +// FetcherFunc allows package users to implement a Fetcher with just a +// function. +type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) + +// Fetch content +func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + return fn(ctx, desc) +} + +// PusherFunc allows package users to implement a Pusher with just a +// function. +type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) + +// Push content +func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return fn(ctx, desc) +} diff --git a/vendor/github.com/containerd/containerd/tracing/helpers.go b/vendor/github.com/containerd/containerd/tracing/helpers.go new file mode 100644 index 000000000..981da6c79 --- /dev/null +++ b/vendor/github.com/containerd/containerd/tracing/helpers.go @@ -0,0 +1,94 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "encoding/json" + "fmt" + "strings" + + "go.opentelemetry.io/otel/attribute" +) + +const ( + spanDelimiter = "." +) + +func makeSpanName(names ...string) string { + return strings.Join(names, spanDelimiter) +} + +func any(k string, v interface{}) attribute.KeyValue { + if v == nil { + return attribute.String(k, "") + } + + switch typed := v.(type) { + case bool: + return attribute.Bool(k, typed) + case []bool: + return attribute.BoolSlice(k, typed) + case int: + return attribute.Int(k, typed) + case []int: + return attribute.IntSlice(k, typed) + case int8: + return attribute.Int(k, int(typed)) + case []int8: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int16: + return attribute.Int(k, int(typed)) + case []int16: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int32: + return attribute.Int64(k, int64(typed)) + case []int32: + ls := make([]int64, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int64(i)) + } + return attribute.Int64Slice(k, ls) + case int64: + return attribute.Int64(k, typed) + case []int64: + return attribute.Int64Slice(k, typed) + case float64: + return attribute.Float64(k, typed) + case []float64: + return attribute.Float64Slice(k, typed) + case string: + return attribute.String(k, typed) + case []string: + return attribute.StringSlice(k, typed) + } + + if stringer, ok := v.(fmt.Stringer); ok { + return attribute.String(k, stringer.String()) + } + if b, err := json.Marshal(v); b != nil && err == nil { + return attribute.String(k, string(b)) + } + return attribute.String(k, fmt.Sprintf("%v", v)) +} diff --git a/vendor/github.com/containerd/containerd/tracing/log.go b/vendor/github.com/containerd/containerd/tracing/log.go new file mode 100644 index 000000000..98fa16f93 --- /dev/null +++ b/vendor/github.com/containerd/containerd/tracing/log.go @@ -0,0 +1,66 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// NewLogrusHook creates a new logrus hook +func NewLogrusHook() *LogrusHook { + return &LogrusHook{} +} + +// LogrusHook is a logrus hook which adds logrus events to active spans. +// If the span is not recording or the span context is invalid, the hook is a no-op. +type LogrusHook struct{} + +// Levels returns the logrus levels that this hook is interested in. +func (h *LogrusHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// Fire is called when a log event occurs. +func (h *LogrusHook) Fire(entry *logrus.Entry) error { + span := trace.SpanFromContext(entry.Context) + if span == nil { + return nil + } + + if !span.SpanContext().IsValid() || !span.IsRecording() { + return nil + } + + span.AddEvent( + entry.Message, + trace.WithAttributes(logrusDataToAttrs(entry.Data)...), + trace.WithAttributes(attribute.String("level", entry.Level.String())), + trace.WithTimestamp(entry.Time), + ) + + return nil +} + +func logrusDataToAttrs(data logrus.Fields) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0, len(data)) + for k, v := range data { + attrs = append(attrs, any(k, v)) + } + return attrs +} diff --git a/vendor/github.com/containerd/containerd/tracing/tracing.go b/vendor/github.com/containerd/containerd/tracing/tracing.go new file mode 100644 index 000000000..80d2b95c0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/tracing/tracing.go @@ -0,0 +1,129 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "context" + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + "go.opentelemetry.io/otel/trace" +) + +// StartConfig defines configuration for a new span object. +type StartConfig struct { + spanOpts []trace.SpanStartOption +} + +type SpanOpt func(config *StartConfig) + +// WithHTTPRequest marks span as a HTTP request operation from client to server. +// It'll append attributes from the HTTP request object and mark it with `SpanKindClient` type. +// +// Deprecated: use upstream functionality from otelhttp directly instead. This function is kept for API compatibility +// but no longer works as expected due to required functionality no longer exported in OpenTelemetry libraries. +func WithHTTPRequest(_ *http.Request) SpanOpt { + return func(config *StartConfig) { + config.spanOpts = append(config.spanOpts, + trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server + ) + } +} + +// UpdateHTTPClient updates the http client with the necessary otel transport +func UpdateHTTPClient(client *http.Client, name string) { + client.Transport = otelhttp.NewTransport( + client.Transport, + otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string { + return name + }), + ) +} + +// StartSpan starts child span in a context. +func StartSpan(ctx context.Context, opName string, opts ...SpanOpt) (context.Context, *Span) { + config := StartConfig{} + for _, fn := range opts { + fn(&config) + } + tracer := otel.Tracer("") + if parent := trace.SpanFromContext(ctx); parent != nil && parent.SpanContext().IsValid() { + tracer = parent.TracerProvider().Tracer("") + } + ctx, span := tracer.Start(ctx, opName, config.spanOpts...) + return ctx, &Span{otelSpan: span} +} + +// SpanFromContext returns the current Span from the context. +func SpanFromContext(ctx context.Context) *Span { + return &Span{ + otelSpan: trace.SpanFromContext(ctx), + } +} + +// Span is wrapper around otel trace.Span. +// Span is the individual component of a trace. It represents a +// single named and timed operation of a workflow that is traced. +type Span struct { + otelSpan trace.Span +} + +// End completes the span. +func (s *Span) End() { + s.otelSpan.End() +} + +// AddEvent adds an event with provided name and options. +func (s *Span) AddEvent(name string, options ...trace.EventOption) { + s.otelSpan.AddEvent(name, options...) +} + +// SetStatus sets the status of the current span. +// If an error is encountered, it records the error and sets span status to Error. +func (s *Span) SetStatus(err error) { + if err != nil { + s.otelSpan.RecordError(err) + s.otelSpan.SetStatus(codes.Error, err.Error()) + } else { + s.otelSpan.SetStatus(codes.Ok, "") + } +} + +// SetAttributes sets kv as attributes of the span. +func (s *Span) SetAttributes(kv ...attribute.KeyValue) { + s.otelSpan.SetAttributes(kv...) +} + +// Name sets the span name by joining a list of strings in dot separated format. +func Name(names ...string) string { + return makeSpanName(names...) +} + +// Attribute takes a key value pair and returns attribute.KeyValue type. +func Attribute(k string, v interface{}) attribute.KeyValue { + return any(k, v) +} + +// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry +// specification for a span. +func HTTPStatusCodeAttributes(code int) []attribute.KeyValue { + return []attribute.KeyValue{semconv.HTTPStatusCodeKey.Int(code)} +} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go new file mode 100644 index 000000000..45767163c --- /dev/null +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -0,0 +1,34 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package version + +import "runtime" + +var ( + // Package is filled at linking time + Package = "github.com/containerd/containerd" + + // Version holds the complete version number. Filled in at linking time. + Version = "1.7.11+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" + + // GoVersion is Go tree's version. + GoVersion = runtime.Version() +) diff --git a/vendor/github.com/containerd/log/.golangci.yml b/vendor/github.com/containerd/log/.golangci.yml new file mode 100644 index 000000000..a695775df --- /dev/null +++ b/vendor/github.com/containerd/log/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - exportloopref # Checks for pointers to enclosing loop variables + - gofmt + - goimports + - gosec + - ineffassign + - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code + disable: + - errcheck + +run: + timeout: 5m + skip-dirs: + - api + - cluster + - design + - docs + - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/log/LICENSE b/vendor/github.com/containerd/log/LICENSE new file mode 100644 index 000000000..584149b6e --- /dev/null +++ b/vendor/github.com/containerd/log/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/log/README.md b/vendor/github.com/containerd/log/README.md new file mode 100644 index 000000000..00e084988 --- /dev/null +++ b/vendor/github.com/containerd/log/README.md @@ -0,0 +1,17 @@ +# log + +A Go package providing a common logging interface across containerd repositories and a way for clients to use and configure logging in containerd packages. + +This package is not intended to be used as a standalone logging package outside of the containerd ecosystem and is intended as an interface wrapper around a logging implementation. +In the future this package may be replaced with a common go logging interface. + +## Project details + +**log** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. + diff --git a/vendor/github.com/containerd/log/context.go b/vendor/github.com/containerd/log/context.go new file mode 100644 index 000000000..20153066f --- /dev/null +++ b/vendor/github.com/containerd/log/context.go @@ -0,0 +1,182 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package log provides types and functions related to logging, passing +// loggers through a context, and attaching context to the logger. +// +// # Transitional types +// +// This package contains various types that are aliases for types in [logrus]. +// These aliases are intended for transitioning away from hard-coding logrus +// as logging implementation. Consumers of this package are encouraged to use +// the type-aliases from this package instead of directly using their logrus +// equivalent. +// +// The intent is to replace these aliases with locally defined types and +// interfaces once all consumers are no longer directly importing logrus +// types. +// +// IMPORTANT: due to the transitional purpose of this package, it is not +// guaranteed for the full logrus API to be provided in the future. As +// outlined, these aliases are provided as a step to transition away from +// a specific implementation which, as a result, exposes the full logrus API. +// While no decisions have been made on the ultimate design and interface +// provided by this package, we do not expect carrying "less common" features. +package log + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" +) + +// G is a shorthand for [GetLogger]. +// +// We may want to define this locally to a package to get package tagged log +// messages. +var G = GetLogger + +// L is an alias for the standard logger. +var L = &Entry{ + Logger: logrus.StandardLogger(), + // Default is three fields plus a little extra room. + Data: make(Fields, 6), +} + +type loggerKey struct{} + +// Fields type to pass to "WithFields". +type Fields = map[string]any + +// Entry is a logging entry. It contains all the fields passed with +// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn, +// Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +// +// Entry is a transitional type, and currently an alias for [logrus.Entry]. +type Entry = logrus.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// Level is a logging level. +type Level = logrus.Level + +// Supported log levels. +const ( + // TraceLevel level. Designates finer-grained informational events + // than [DebugLevel]. + TraceLevel Level = logrus.TraceLevel + + // DebugLevel level. Usually only enabled when debugging. Very verbose + // logging. + DebugLevel Level = logrus.DebugLevel + + // InfoLevel level. General operational entries about what's going on + // inside the application. + InfoLevel Level = logrus.InfoLevel + + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel Level = logrus.WarnLevel + + // ErrorLevel level. Logs errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel Level = logrus.ErrorLevel + + // FatalLevel level. Logs and then calls "logger.Exit(1)". It exits + // even if the logging level is set to Panic. + FatalLevel Level = logrus.FatalLevel + + // PanicLevel level. This is the highest level of severity. Logs and + // then calls panic with the message passed to Debug, Info, ... + PanicLevel Level = logrus.PanicLevel +) + +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// level can be one of: +// +// - "trace" ([TraceLevel]) +// - "debug" ([DebugLevel]) +// - "info" ([InfoLevel]) +// - "warn" ([WarnLevel]) +// - "error" ([ErrorLevel]) +// - "fatal" ([FatalLevel]) +// - "panic" ([PanicLevel]) +func SetLevel(level string) error { + lvl, err := logrus.ParseLevel(level) + if err != nil { + return err + } + + L.Logger.SetLevel(lvl) + return nil +} + +// GetLevel returns the current log level. +func GetLevel() Level { + return L.Logger.GetLevel() +} + +// OutputFormat specifies a log output format. +type OutputFormat string + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + TextFormat OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + JSONFormat OutputFormat = "json" +) + +// SetFormat sets the log output format ([TextFormat] or [JSONFormat]). +func SetFormat(format OutputFormat) error { + switch format { + case TextFormat: + L.Logger.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: RFC3339NanoFixed, + FullTimestamp: true, + }) + return nil + case JSONFormat: + L.Logger.SetFormatter(&logrus.JSONFormatter{ + TimestampFormat: RFC3339NanoFixed, + }) + return nil + default: + return fmt.Errorf("unknown log format: %s", format) + } +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx)) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *Entry { + if logger := ctx.Value(loggerKey{}); logger != nil { + return logger.(*Entry) + } + return L.WithContext(ctx) +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS new file mode 100644 index 000000000..483743c99 --- /dev/null +++ b/vendor/github.com/docker/cli/AUTHORS @@ -0,0 +1,852 @@ +# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See scripts/docs/generate-authors.sh to make modifications. + +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Abreto FU +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrian Plata +Adrien Duermael +Adrien Folie +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akhil Mohan +Akihiro Suda +Akim Demaille +Alan Thompson +Albert Callarisa +Alberto Roura +Albin Kerouanton +Aleksa Sarai +Aleksander Piotrowski +Alessandro Boch +Alex Couture-Beil +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alexey Igrychev +Alexis Couvreur +Alfred Landrum +Alicia Lauerman +Allen Sun +Alvin Deng +Amen Belayneh +Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anca Iordache +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andres G. Aragoneses +Andres Leon Rangel +Andrew France +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrey Petrov +Andrii Berehuliak +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Arko Dasgupta +Arnaud Porterie +Arnaud Rebillout +Arthur Peka +Ashwini Oruganti +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bodenmiller +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benjamin Böhmke +Benjamin Nater +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Bishal Das +Boaz Shuster +Bogdan Anton +Boris Pruessmann +Brad Baker +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Brian Wieder +Bruno Sousa +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +Charlotte Mach +ChaYoung You +Chee Hau Lim +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Couzens +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Vermilion +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christopher Svensson +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Conner Crosby +Corey Farrell +Corey Quon +Cory Bennet +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +Daisuke Ito +dalanlan +Damien Nadé +Dan Cotora +Daniel Artine +Daniel Cassidy +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Daniil Nikolenko +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Alvarez +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Karlsson +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek McGowan +Des Preston +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Djordje Lukic +Dmitriy Fishman +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dominik Braun +Don Kjer +Dong Chen +DongGeon Lee +Doug Davis +Drew Erny +Ed Costello +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eric Curtin +Eric Engestrom +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik Humphrey +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Geyer +Felix Hupfeld +Felix Rabe +fezzik1620 +Filip JareÅ¡ +Flavio Crisciani +Florian Klein +Forest Johnson +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Gore +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Xie +Gianluca Borello +Gildas Cuisinier +Gio d'Amelio +Gleb Stsenov +Goksu Toprak +Gou Rao +Govind Rai +Grant Reaber +Greg Pflaum +Gsealy +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +Guillaume Tardif +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Hector S +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hu Keping +Huayi Zhang +Hugo Gabriel Eyherabide +huqun +Huu Nguyen +Hyzhou Zhy +Iain Samuel McLean Elder +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Grund +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Lambert +Jake Sanders +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Hall +Jason Heiss +Jason Plum +Jay Kamat +Jean Lecordier +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jennings Zhang +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jie Luo +Jilles Oldenbeuving +Jim Galasyn +Jim Lin +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Abbey +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard +John Howard +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jon Johnson +Jon Zeolla +Jonatas Baldin +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonathan Warriss-Simmons +Jonh Wendell +Jordan Jennings +Jorge Vallecillo +Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julien Barbier +Julien Kassar +Julien Maitrehenry +Justas Brazauskas +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Kathryn Spiers +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Kelton Bassingthwaite +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Alvarez +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +Kevin Woblick +khaled souf +Kim Eik +Kir Kolyshkin +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Kyle Mitofsky +Lachlan Cooper +Lai Jiangshan +Lars Kellogg-Stedman +Laura Frank +Laurent Erignoux +Lee Gaines +Lei Jitang +Lennie +Leo Gallucci +Leonid Skorospelov +Lewis Daly +Li Yi +Li Yi +Liang-Chi Hsieh +Lifubang +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Lucas Chan +Luka Hartwig +Lukas Heeren +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Maciej Kalisz +Madhav Puri +Madhu Venugopal +Madhur Batra +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marc Cornellà +Marco Mariani +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Ileana +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathieu Champlon +Mathieu Rollet +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Mauro Porras P +Max Shytikov +Maxime Petazzoni +Maximillian Fan Xavier +Mei ChunTao +Metal <2466052+tedhexaflow@users.noreply.github.com> +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael West +Michal Minář +MichaÅ‚ Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Dalton +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Miroslav Gula +Misty Stanley-Jones +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Morten Hekkvang +Morten Linderud +Moysés Borges +Mozi <29089388+pzhlkj6612@users.noreply.github.com> +Mrunal Patel +muicoder +Murukesh Mohanan +Muthukumar R +Máximo Cuadros +MÃ¥rten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nick Santos +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +O.S. Tezer +Odin Ugedal +ohmystack +OKA Naoya +Oliver Pomeroy +Olle Jonsson +Olli Janatuinen +Oscar Wieman +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Hemmer +Patrick Lang +Paul +Paul Kehrer +Paul Lietar +Paul Mulders +Paul Weaver +Pavel Pospisil +PaweÅ‚ Gronowski +PaweÅ‚ Pokrywka +PaweÅ‚ Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Dave Hello +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +Phong Tran +pidster +Pieter E Smit +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +qudongfang +Raghavendra K T +Rahul Kadyan +Rahul Zoldyck +Ravi Shekhar Jethani +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com> +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Roch Feuillade +Rodolfo Ortiz +Rogelio Canedo +Rohan Verma +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Sam Thibault +Samarth Shah +Sambuddha Basu +Sami Tabet +Samuel Cochran +Samuel Karp +Sandro Jäckel +Santhosh Manohar +Sargun Dhillon +Saswat Bhattacharya +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Simon Heimberg +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +Spring Lee +squeegels +Srini Brahmaroutu +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Stoica-Marcu Floris-Andrei +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Takeshi Koenuma +Takuya Noguchi +Taylor Jones +Teiva Harsanyi +Tejaswini Duggaraju +Tengfei Wang +Teppei Fukuda +Thatcher Peskens +Thibault Coupin +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Sampson +Tim Smith +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Bäckman +Tomas Tomecek +Tomasz Kopczynski +Tomáš HrÄka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulrich Bareth +Ulysses Souza +Umesh Yadav +Valentin Lorentz +Vardan Pogosian +Venkateswara Reddy Bukkasamudram +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +William Henry +Xianglin Gao +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yue Zhang +Yunxiang Huang +Zachary Romero +Zander Mackie +zebrilee +Zeel B Patel +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Ãlex González +Ãlvaro Lázaro +Ãtila Camurça Alves +ÐлекÑандр Менщиков <__Singleton__@hackerdom.ru> +å¾ä¿Šæ° diff --git a/vendor/github.com/docker/cli/LICENSE b/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 000000000..9c8e20ab8 --- /dev/null +++ b/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 000000000..58b19b6d1 --- /dev/null +++ b/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go new file mode 100644 index 000000000..b7c05c3f8 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -0,0 +1,153 @@ +package config + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" +) + +const ( + // ConfigFileName is the name of config file + ConfigFileName = "config.json" + configFileDir = ".docker" + oldConfigfile = ".dockercfg" // Deprecated: remove once we stop printing deprecation warning + contextsDir = "contexts" +) + +var ( + initConfigDir = new(sync.Once) + configDir string + homeDir string +) + +// resetHomeDir is used in testing to reset the "homeDir" package variable to +// force re-lookup of the home directory between tests. +func resetHomeDir() { + homeDir = "" +} + +func getHomeDir() string { + if homeDir == "" { + homeDir = homedir.Get() + } + return homeDir +} + +// resetConfigDir is used in testing to reset the "configDir" package variable +// and its sync.Once to force re-lookup between tests. +func resetConfigDir() { + configDir = "" + initConfigDir = new(sync.Once) +} + +func setConfigDir() { + if configDir != "" { + return + } + configDir = os.Getenv("DOCKER_CONFIG") + if configDir == "" { + configDir = filepath.Join(getHomeDir(), configFileDir) + } +} + +// Dir returns the directory the configuration file is stored in +func Dir() string { + initConfigDir.Do(setConfigDir) + return configDir +} + +// ContextStoreDir returns the directory the docker contexts are stored in +func ContextStoreDir() string { + return filepath.Join(Dir(), contextsDir) +} + +// SetDir sets the directory the configuration file is stored in +func SetDir(dir string) { + configDir = filepath.Clean(dir) +} + +// Path returns the path to a file relative to the config dir +func Path(p ...string) (string, error) { + path := filepath.Join(append([]string{Dir()}, p...)...) + if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { + return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) + } + return path, nil +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and returns values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*configfile.ConfigFile, error) { + cfg, _, err := load(configDir) + return cfg, err +} + +// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file +// so we can remove the bool return value and collapse this back into `Load` +func load(configDir string) (*configfile.ConfigFile, bool, error) { + printLegacyFileWarning := false + + if configDir == "" { + configDir = Dir() + } + + filename := filepath.Join(configDir, ConfigFileName) + configFile := configfile.New(filename) + + // Try happy path first - latest config file + if file, err := os.Open(filename); err == nil { + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = errors.Wrap(err, filename) + } + return configFile, printLegacyFileWarning, err + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return configFile, printLegacyFileWarning, errors.Wrap(err, filename) + } + + // Can't find latest config file so check for the old one + filename = filepath.Join(getHomeDir(), oldConfigfile) + if _, err := os.Stat(filename); err == nil { + printLegacyFileWarning = true + } + return configFile, printLegacyFileWarning, nil +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { + configFile, printLegacyFileWarning, err := load(Dir()) + if err != nil { + fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) + } + if printLegacyFileWarning { + _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format has been removed and the configuration file will be ignored") + } + if !configFile.ContainsAuth() { + configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) + } + return configFile +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go new file mode 100644 index 000000000..442c31110 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -0,0 +1,352 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + PluginsFormat string `json:"pluginsFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` + ServicesFormat string `json:"servicesFormat,omitempty"` + TasksFormat string `json:"tasksFormat,omitempty"` + SecretFormat string `json:"secretFormat,omitempty"` + ConfigFormat string `json:"configFormat,omitempty"` + NodesFormat string `json:"nodesFormat,omitempty"` + PruneFilters []string `json:"pruneFilters,omitempty"` + Proxies map[string]ProxyConfig `json:"proxies,omitempty"` + Experimental string `json:"experimental,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` + Plugins map[string]map[string]string `json:"plugins,omitempty"` + Aliases map[string]string `json:"aliases,omitempty"` +} + +// ProxyConfig contains proxy configuration settings +type ProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` + AllProxy string `json:"allProxy,omitempty"` +} + +// New initializes an empty configuration file for the given filename 'fn' +func New(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + Plugins: make(map[string]map[string]string), + Aliases: make(map[string]string), + } +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + if ac.Auth != "" { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return nil +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// GetAuthConfigs returns the mapping of repo to auth configuration +func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { + if configFile.AuthConfigs == nil { + configFile.AuthConfigs = make(map[string]types.AuthConfig) + } + return configFile.AuthConfigs +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + // User-Agent header is automatically set, and should not be stored in the configuration + for v := range configFile.HTTPHeaders { + if strings.EqualFold(v, "User-Agent") { + delete(configFile.HTTPHeaders, v) + } + } + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() (retErr error) { + if configFile.Filename == "" { + return errors.Errorf("Can't save config with empty filename") + } + + dir := filepath.Dir(configFile.Filename) + if err := os.MkdirAll(dir, 0o700); err != nil { + return err + } + temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename)) + if err != nil { + return err + } + defer func() { + temp.Close() + if retErr != nil { + if err := os.Remove(temp.Name()); err != nil { + logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") + } + } + }() + + err = configFile.SaveToWriter(temp) + if err != nil { + return err + } + + if err := temp.Close(); err != nil { + return errors.Wrap(err, "error closing temp file") + } + + // Handle situation where the configfile is a symlink + cfgFile := configFile.Filename + if f, err := os.Readlink(cfgFile); err == nil { + cfgFile = f + } + + // Try copying the current config file (if any) ownership and permissions + copyFilePermissions(cfgFile, temp.Name()) + return os.Rename(temp.Name(), cfgFile) +} + +// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and +// then checking this against any environment variables provided to the container +func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { + var cfgKey string + + if _, ok := configFile.Proxies[host]; !ok { + cfgKey = "default" + } else { + cfgKey = host + } + + config := configFile.Proxies[cfgKey] + permitted := map[string]*string{ + "HTTP_PROXY": &config.HTTPProxy, + "HTTPS_PROXY": &config.HTTPSProxy, + "NO_PROXY": &config.NoProxy, + "FTP_PROXY": &config.FTPProxy, + "ALL_PROXY": &config.AllProxy, + } + m := runOpts + if m == nil { + m = make(map[string]*string) + } + for k := range permitted { + if *permitted[k] == "" { + continue + } + if _, ok := m[k]; !ok { + m[k] = permitted[k] + } + if _, ok := m[strings.ToLower(k)]; !ok { + m[strings.ToLower(k)] = permitted[k] + } + } + return m +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", errors.Errorf("Something went wrong decoding auth config") + } + userName, password, ok := strings.Cut(string(decoded), ":") + if !ok || userName == "" { + return "", "", errors.Errorf("Invalid auth configuration file") + } + return userName, strings.Trim(password, "\x00"), nil +} + +// GetCredentialsStore returns a new credentials store from the settings in the +// configuration file +func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { + return newNativeStore(configFile, helper) + } + return credentials.NewFileStore(configFile) +} + +// var for unit testing. +var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { + return credentials.NewNativeStore(configFile, helperSuffix) +} + +// GetAuthConfig for a repository from the credential store +func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { + return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { + if c.CredentialHelpers != nil && registryHostname != "" { + if helper, exists := c.CredentialHelpers[registryHostname]; exists { + return helper + } + } + return c.CredentialsStore +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + addAll := func(from map[string]types.AuthConfig) { + for reg, ac := range from { + auths[reg] = ac + } + } + + defaultStore := configFile.GetCredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(newAuths) + + // Auth configs from a registry-specific helper should override those from the default store. + for registryHostname := range configFile.CredentialHelpers { + newAuth, err := configFile.GetAuthConfig(registryHostname) + if err != nil { + logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname) + continue + } + auths[registryHostname] = newAuth + } + return auths, nil +} + +// GetFilename returns the file name that this config file is based on. +func (configFile *ConfigFile) GetFilename() string { + return configFile.Filename +} + +// PluginConfig retrieves the requested option for the given plugin. +func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { + if configFile.Plugins == nil { + return "", false + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + return "", false + } + value, ok := pluginConfig[option] + return value, ok +} + +// SetPluginConfig sets the option to the given value for the given +// plugin. Passing a value of "" will remove the option. If removing +// the final config item for a given plugin then also cleans up the +// overall plugin entry. +func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { + if configFile.Plugins == nil { + configFile.Plugins = make(map[string]map[string]string) + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + pluginConfig = make(map[string]string) + configFile.Plugins[pluginname] = pluginConfig + } + if value != "" { + pluginConfig[option] = value + } else { + delete(pluginConfig, option) + } + if len(pluginConfig) == 0 { + delete(configFile.Plugins, pluginname) + } +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go new file mode 100644 index 000000000..353887547 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -0,0 +1,36 @@ +//go:build !windows +// +build !windows + +package configfile + +import ( + "os" + "syscall" +) + +// copyFilePermissions copies file ownership and permissions from "src" to "dst", +// ignoring any error during the process. +func copyFilePermissions(src, dst string) { + var ( + mode os.FileMode = 0o600 + uid, gid int + ) + + fi, err := os.Stat(src) + if err != nil { + return + } + if fi.Mode().IsRegular() { + mode = fi.Mode() + } + if err := os.Chmod(dst, mode); err != nil { + return + } + + uid = int(fi.Sys().(*syscall.Stat_t).Uid) + gid = int(fi.Sys().(*syscall.Stat_t).Gid) + + if uid > 0 && gid > 0 { + _ = os.Chown(dst, uid, gid) + } +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go new file mode 100644 index 000000000..42fffc39a --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go @@ -0,0 +1,5 @@ +package configfile + +func copyFilePermissions(src, dst string) { + // TODO implement for Windows +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go new file mode 100644 index 000000000..28d58ec48 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go new file mode 100644 index 000000000..402235bff --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -0,0 +1,21 @@ +package credentials + +import ( + exec "golang.org/x/sys/execabs" +) + +// DetectDefaultStore return the default credentials store for the platform if +// the store executable is available. +func DetectDefaultStore(store string) string { + platformDefault := defaultCredentialsStore() + + // user defined or no default for platform + if store != "" || platformDefault == "" { + return store + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { + return platformDefault + } + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go new file mode 100644 index 000000000..5d42dec62 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "osxkeychain" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go new file mode 100644 index 000000000..a9012c6d4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go @@ -0,0 +1,13 @@ +package credentials + +import ( + "os/exec" +) + +func defaultCredentialsStore() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go new file mode 100644 index 000000000..c9630ea51 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -0,0 +1,8 @@ +//go:build !windows && !darwin && !linux +// +build !windows,!darwin,!linux + +package credentials + +func defaultCredentialsStore() string { + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go new file mode 100644 index 000000000..bb799ca61 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "wincred" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go new file mode 100644 index 000000000..ea30fc300 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -0,0 +1,81 @@ +package credentials + +import ( + "strings" + + "github.com/docker/cli/cli/config/types" +) + +type store interface { + Save() error + GetAuthConfigs() map[string]types.AuthConfig + GetFilename() string +} + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file store +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file store) Store { + return &fileStore{file: file} +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.GetAuthConfigs(), serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.GetAuthConfigs()[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.GetAuthConfigs() { + if serverAddress == ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.GetAuthConfigs(), nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + authConfigs := c.file.GetAuthConfigs() + authConfigs[authConfig.ServerAddress] = authConfig + return c.file.Save() +} + +func (c *fileStore) GetFilename() string { + return c.file.GetFilename() +} + +func (c *fileStore) IsFileStore() bool { + return true +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + hostName, _, _ := strings.Cut(stripped, "/") + return hostName +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go new file mode 100644 index 000000000..f9619b038 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -0,0 +1,143 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" //nolint:gosec // ignore G101: Potential hardcoded credentials + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file store, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keychain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 000000000..056af6b84 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/.dockerignore b/vendor/github.com/docker/distribution/.dockerignore new file mode 100644 index 000000000..e660fd93d --- /dev/null +++ b/vendor/github.com/docker/distribution/.dockerignore @@ -0,0 +1 @@ +bin/ diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore new file mode 100644 index 000000000..4cf7888e9 --- /dev/null +++ b/vendor/github.com/docker/distribution/.gitignore @@ -0,0 +1,38 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace +.idea/* diff --git a/vendor/github.com/docker/distribution/.golangci.yml b/vendor/github.com/docker/distribution/.golangci.yml new file mode 100644 index 000000000..36c083b0f --- /dev/null +++ b/vendor/github.com/docker/distribution/.golangci.yml @@ -0,0 +1,27 @@ +linters: + enable: + - structcheck + - varcheck + - staticcheck + - unconvert + - gofmt + - goimports + - golint + - ineffassign + - vet + - unused + - misspell + disable: + - errcheck + +run: + deadline: 2m + skip-dirs: + - vendor + +issues: + exclude-rules: + # io/ioutil is deprecated, but won't be removed until Go v2. It's safe to ignore for the release/2.8 branch. + - text: "SA1019: \"io/ioutil\" has been deprecated since Go 1.16" + linters: + - staticcheck diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap new file mode 100644 index 000000000..d94c3936e --- /dev/null +++ b/vendor/github.com/docker/distribution/.mailmap @@ -0,0 +1,51 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita +Yu Wang yuwaMSFT2 +Yu Wang Yu Wang (UC) +Olivier Gambier dmp +Olivier Gambier Olivier +Olivier Gambier Olivier +Elsan Li æŽæ¥  elsanli(æŽæ¥ ) +Rui Cao ruicao +Gwendolynne Barr gbarr01 +Haibing Zhou 周海兵 zhouhaibing089 +Feng Honglin tifayuki +Helen Xie Helen-xie +Mike Brown Mike Brown +Manish Tomar Manish Tomar +Sakeven Jiang sakeven +Milos Gajdos Milos Gajdos +Derek McGowan Derek McGowa +Adrian Plata Adrian Plata <@users.noreply.github.com> +Sebastiaan van Stijn Sebastiaan van Stijn +Vishesh Jindal Vishesh Jindal +Wang Yan Wang Yan +Chris Patterson Chris Patterson +Eohyung Lee Eohyung Lee +João Pereira <484633+joaodrp@users.noreply.github.com> +Smasherr Smasherr +Thomas Berger Thomas Berger +Samuel Karp Samuel Karp +Justin Cormack +sayboras +CrazyMax <1951866+crazy-max@users.noreply.github.com> +Hayley Swimelar +Jose D. Gomez R +Shengjing Zhu +Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 000000000..2981d016b --- /dev/null +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,117 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/golang/lint/golint + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendor +directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry --version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 000000000..4c067d9e7 --- /dev/null +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +### Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry --version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile new file mode 100644 index 000000000..fb54b6813 --- /dev/null +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -0,0 +1,60 @@ +# syntax=docker/dockerfile:1 + +ARG GO_VERSION=1.19.9 +ARG ALPINE_VERSION=3.16 +ARG XX_VERSION=1.2.1 + +FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx +FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base +COPY --from=xx / / +RUN apk add --no-cache bash coreutils file git +ENV GO111MODULE=auto +ENV CGO_ENABLED=0 +WORKDIR /go/src/github.com/docker/distribution + +FROM base AS version +ARG PKG="github.com/docker/distribution" +RUN --mount=target=. \ + VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \ + echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \ + echo -n "${VERSION}" | tee /tmp/.version; + +FROM base AS build +ARG TARGETPLATFORM +ARG LDFLAGS="-s -w" +ARG BUILDTAGS="include_oss include_gcs" +RUN --mount=type=bind,target=/go/src/github.com/docker/distribution,rw \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=target=/go/pkg/mod,type=cache \ + --mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \ + set -x ; xx-go build -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \ + && xx-verify --static /usr/bin/registry + +FROM scratch AS binary +COPY --from=build /usr/bin/registry / + +FROM base AS releaser +ARG TARGETOS +ARG TARGETARCH +ARG TARGETVARIANT +WORKDIR /work +RUN --mount=from=binary,target=/build \ + --mount=type=bind,target=/src \ + --mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \ + VERSION=$(cat /tmp/.version) \ + && mkdir -p /out \ + && cp /build/registry /src/README.md /src/LICENSE . \ + && tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \ + && sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256" + +FROM scratch AS artifact +COPY --from=releaser /out / + +FROM alpine:${ALPINE_VERSION} +RUN apk add --no-cache ca-certificates +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml +COPY --from=binary /registry /bin/registry +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 000000000..3183620c5 --- /dev/null +++ b/vendor/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,243 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# + +[Rules] + + [Rules.maintainers] + + title = "What is a maintainer?" + + text = """ +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. +""" + + [Rules.reviewer] + + title = "What is a reviewer?" + + text = """ +A reviewer is a core role within the project. +They share in reviewing issues and pull requests and their LGTM count towards the +required LGTM count to merge a code change into the project. + +Reviewers are part of the organization but do not have write access. +Becoming a reviewer is a core aspect in the journey to becoming a maintainer. +""" + + [Rules.adding-maintainers] + + title = "How are maintainers added?" + + text = """ +Maintainers are first and foremost contributors that have shown they are +committed to the long term success of a project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, pull +request review, and triage of issues in the project for more than three months. + +Just contributing does not make you a maintainer, it is about building trust +with the current maintainers of the project and being a person that they can +depend on and trust to make decisions in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors that have +shown regular activity on the project over the prior months. From this list, +maintainer candidates are selected and proposed on the maintainers mailing list. + +After a candidate has been announced on the maintainers mailing list, the +existing maintainers are given five business days to discuss the candidate, +raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing +list. Only maintainers of the repository that the candidate is proposed for are +allowed to vote. + +If a candidate is approved, a maintainer will contact the candidate to invite +the candidate to open a pull request that adds the contributor to the +MAINTAINERS file. The candidate becomes a maintainer once the pull request is +merged. +""" + + [Rules.stepping-down-policy] + + title = "Stepping down policy" + + text = """ +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. +""" + + [Rules.inactive-maintainers] + + title = "Removal of inactive maintainers" + + text = """ +Similar to the procedure for adding new maintainers, existing maintainers can +be removed from the list if they do not show significant activity on the +project. Periodically, the maintainers review the list of maintainers and their +activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a neutral +person will contact the maintainer to ask if they want to continue being +a maintainer. If the maintainer decides to step down as a maintainer, they +open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to remain a maintainer, but is unable to perform the +required duties they can be removed with a vote of at least 66% of +the current maintainers. An e-mail is sent to the +mailing list, inviting maintainers of the project to vote. The voting period is +five business days. Issues related to a maintainer's performance should be +discussed with them among the other maintainers so that they are not surprised +by a pull request removing them. +""" + + [Rules.decisions] + + title = "How are decisions made?" + + text = """ +Short answer: EVERYTHING IS A PULL REQUEST. + +distribution is an open-source project with an open design philosophy. This means +that the repository is the source of truth for EVERY aspect of the project, +including its philosophy, design, road map, and APIs. *If it's part of the +project, it's in the repo. If it's in the repo, it's part of the project.* + +As a result, all decisions can be expressed as changes to the repository. An +implementation change is a change to the source code. An API change is a change +to the API specification. A philosophy change is a change to the philosophy +manifesto, and so on. + +All decisions affecting distribution, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. +""" + + [Rules.DCO] + + title = "Helping contributors with the DCO" + + text = """ +The [DCO or `Sign your work`]( +https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some distribution contributors are not as familiar with `git`, or have used a web +based editor, and thus asking them to `git commit --amend -s` is not the best +way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. +The most trivial way for a contributor to allow the maintainer to do this, is to +add a DCO signature in a pull requests's comment, or a maintainer can simply +note that the change is sufficiently trivial that it does not substantially +change the existing contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. +""" + + [Rules."no direct push"] + + title = "I'm a maintainer. Should I make pull requests too?" + + text = """ +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. +""" + + [Rules.tsc] + + title = "Conflict Resolution and technical disputes" + + text = """ +distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." + """ + + [Rules.meta] + + title = "How is this process changed?" + + text = "Just like everything else: by making a pull request :)" + +# Current project organization +[Org] + + [Org.Maintainers] + people = [ + "dmcgowan", + "dmp42", + "stevvooe", + ] + [Org.Reviewers] + people = [ + "manishtomar", + "caervs", + "davidswu", + "RobbKistler" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.caervs] + Name = "Ryan Abrams" + Email = "rdabrams@gmail.com" + GitHub = "caervs" + + [people.davidswu] + Name = "David Wu" + Email = "dwu7401@gmail.com" + GitHub = "davidswu" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.manishtomar] + Name = "Manish Tomar" + Email = "manish.tomar@docker.com" + GitHub = "manishtomar" + + [people.RobbKistler] + Name = "Robb Kistler" + Email = "robb.kistler@docker.com" + GitHub = "RobbKistler" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile new file mode 100644 index 000000000..75e118201 --- /dev/null +++ b/vendor/github.com/docker/distribution/Makefile @@ -0,0 +1,102 @@ +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) +REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) + + +PKG=github.com/docker/distribution + +# Project packages. +PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) +INTEGRATION_PACKAGE=${PKG} +COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) + + +# Project binaries. +COMMANDS=registry digest registry-api-descriptor-template + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +WHALE = "+" + +# Go files +# +TESTFLAGS_RACE= +GOFILES=$(shell find . -type f -name '*.go') +GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) +GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= -v $(TESTFLAGS_RACE) +TESTFLAGS_PARALLEL ?= 8 + +.PHONY: all build binaries check clean test test-race test-full integration coverage +.DEFAULT: all + +all: binaries + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + @echo "$(WHALE) $@" + ./version/version.sh > $@ + +check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") + @echo "$(WHALE) $@" + @GO111MODULE=off golangci-lint run + +test: ## run tests, except integration test with test.short + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +test-race: ## run tests, except integration test with test.short and race + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +test-full: ## run tests, except integration tests + @echo "$(WHALE) $@" + @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +integration: ## run integration tests + @echo "$(WHALE) $@" + @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} + +coverage: ## generate coverprofiles from the unit tests + @echo "$(WHALE) $@" + @rm -f coverage.txt + @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null + @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ + go test ${GO_TAGS} ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) + +FORCE: + +# Build a binary from a cmd. +bin/%: cmd/% FORCE + @echo "$(WHALE) $@${BINARY_SUFFIX}" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< + +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" + +build: + @echo "$(WHALE) $@" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) + +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md new file mode 100644 index 000000000..e513c18e9 --- /dev/null +++ b/vendor/github.com/docker/distribution/README.md @@ -0,0 +1,130 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository provides the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator](https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 000000000..701127afe --- /dev/null +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go new file mode 100644 index 000000000..2a659eaa3 --- /dev/null +++ b/vendor/github.com/docker/distribution/blobs.go @@ -0,0 +1,265 @@ +package distribution + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *v1.Platform `json:"platform,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identified by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go new file mode 100644 index 000000000..71327dca7 --- /dev/null +++ b/vendor/github.com/docker/distribution/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go new file mode 100644 index 000000000..bdd8cb708 --- /dev/null +++ b/vendor/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/vendor/github.com/docker/distribution/docker-bake.hcl b/vendor/github.com/docker/distribution/docker-bake.hcl new file mode 100644 index 000000000..91686e608 --- /dev/null +++ b/vendor/github.com/docker/distribution/docker-bake.hcl @@ -0,0 +1,56 @@ +group "default" { + targets = ["image-local"] +} + +// Special target: https://github.com/docker/metadata-action#bake-definition +target "docker-metadata-action" { + tags = ["registry:local"] +} + +target "binary" { + target = "binary" + output = ["./bin"] +} + +target "artifact" { + target = "artifact" + output = ["./bin"] +} + +target "artifact-all" { + inherits = ["artifact"] + platforms = [ + "linux/amd64", + "linux/arm/v6", + "linux/arm/v7", + "linux/arm64", + "linux/ppc64le", + "linux/s390x" + ] +} + +// Special target: https://github.com/docker/metadata-action#bake-definition +target "docker-metadata-action" { + tags = ["registry:local"] +} + +target "image" { + inherits = ["docker-metadata-action"] +} + +target "image-local" { + inherits = ["image"] + output = ["type=docker"] +} + +target "image-all" { + inherits = ["image"] + platforms = [ + "linux/amd64", + "linux/arm/v6", + "linux/arm/v7", + "linux/arm64", + "linux/ppc64le", + "linux/s390x" + ] +} diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go new file mode 100644 index 000000000..8e0b788d6 --- /dev/null +++ b/vendor/github.com/docker/distribution/errors.go @@ -0,0 +1,119 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 +// manifest but the registry is configured to reject it +var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return "unverified manifest" +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 000000000..8f84a220a --- /dev/null +++ b/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "context" + "fmt" + "mime" + + "github.com/opencontainers/go-digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the media type. + Payload() (mediaType string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediaType string + if ctHeader != "" { + var err error + mediaType, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediaType] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) + } + mappings[mediaType] = u + return nil +} diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go new file mode 100644 index 000000000..b5a532144 --- /dev/null +++ b/vendor/github.com/docker/distribution/metrics/prometheus.go @@ -0,0 +1,13 @@ +package metrics + +import "github.com/docker/go-metrics" + +const ( + // NamespacePrefix is the namespace of prometheus metrics + NamespacePrefix = "registry" +) + +var ( + // StorageNamespace is the prometheus namespace of blob/cache related operations + StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) +) diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 000000000..978df7eab --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 000000000..b3dfb7a6d --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,199 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 000000000..b7cd00b0d --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 000000000..786034932 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go new file mode 100644 index 000000000..6c3210989 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry.go @@ -0,0 +1,118 @@ +package distribution + +import ( + "context" + + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexicographically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// RepositoryRemover removes given repository +type RepositoryRemover interface { + Remove(ctx context.Context, name reference.Named) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// WithManifestMediaTypes lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 000000000..4c35b879a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr := daErr.(type) { + case ErrorCode: + err = daErr.WithDetail(nil) + case Error: + err = daErr + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 000000000..d77e70473 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,40 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + return json.NewEncoder(w).Encode(err) +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 000000000..d1e8826c6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 000000000..c3bf90f71 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1613 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + invalidPaginationResponseDescriptor = ResponseDescriptor{ + Name: "Invalid pagination number", + Description: "The received parameter n was invalid in some way, as described by the error code. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodePaginationNumberInvalid, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particular request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particular response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status received by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + invalidPaginationResponseDescriptor, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + invalidPaginationResponseDescriptor, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 000000000..cde011959 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 000000000..87e9f3c14 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,145 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) + + ErrorCodePaginationNumberInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "PAGINATION_NUMBER_INVALID", + Message: "invalid number of results requested", + Description: `Returned when the "n" parameter (number of results + to return) is not an integer, "n" is negative or "n" is bigger than + the maximum allowed.`, + HTTPStatusCode: http.StatusBadRequest, + }) +) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go new file mode 100644 index 000000000..9bc41a3a6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +var ( + // according to rfc7230 + reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) + reQuotedValue = regexp.MustCompile(`^[^\\"]+`) + reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) +) + +// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains +// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The +// function parses only the first element of the list, which is set by the very first proxy. It returns a map +// of corresponding key-value pairs and an unparsed slice of the input string. +// +// Examples of Forwarded header values: +// +// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown +// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" +// +// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into +// {"for": "192.0.2.43:443", "host": "registry.example.org"}. +func parseForwardedHeader(forwarded string) (map[string]string, string, error) { + // Following are states of forwarded header parser. Any state could transition to a failure. + const ( + // terminating state; can transition to Parameter + stateElement = iota + // terminating state; can transition to KeyValueDelimiter + stateParameter + // can transition to Value + stateKeyValueDelimiter + // can transition to one of { QuotedValue, PairEnd } + stateValue + // can transition to one of { EscapedCharacter, PairEnd } + stateQuotedValue + // can transition to one of { QuotedValue } + stateEscapedCharacter + // terminating state; can transition to one of { Parameter, Element } + statePairEnd + ) + + var ( + parameter string + value string + parse = forwarded[:] + res = map[string]string{} + state = stateElement + ) + +Loop: + for { + // skip spaces unless in quoted value + if state != stateQuotedValue && state != stateEscapedCharacter { + parse = strings.TrimLeftFunc(parse, unicode.IsSpace) + } + + if len(parse) == 0 { + if state != stateElement && state != statePairEnd && state != stateParameter { + return nil, parse, fmt.Errorf("unexpected end of input") + } + // terminating + break + } + + switch state { + // terminate at list element delimiter + case stateElement: + if parse[0] == ',' { + parse = parse[1:] + break Loop + } + state = stateParameter + + // parse parameter (the key of key-value pair) + case stateParameter: + match := reToken.FindString(parse) + if len(match) == 0 { + return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) + } + parameter = strings.ToLower(match) + parse = parse[len(match):] + state = stateKeyValueDelimiter + + // parse '=' + case stateKeyValueDelimiter: + if parse[0] != '=' { + return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) + } + parse = parse[1:] + state = stateValue + + // parse value or quoted value + case stateValue: + if parse[0] == '"' { + parse = parse[1:] + state = stateQuotedValue + } else { + value = reToken.FindString(parse) + if len(value) == 0 { + return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) + } + if _, exists := res[parameter]; exists { + return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) + } + res[parameter] = value + parse = parse[len(value):] + value = "" + state = statePairEnd + } + + // parse a part of quoted value until the first backslash + case stateQuotedValue: + match := reQuotedValue.FindString(parse) + value += match + parse = parse[len(match):] + switch { + case len(parse) == 0: + return nil, parse, fmt.Errorf("unterminated quoted string") + case parse[0] == '"': + res[parameter] = value + value = "" + parse = parse[1:] + state = statePairEnd + case parse[0] == '\\': + parse = parse[1:] + state = stateEscapedCharacter + } + + // parse escaped character in a quoted string, ignore the backslash + // transition back to QuotedValue state + case stateEscapedCharacter: + c := reEscapedCharacter.FindString(parse) + if len(c) == 0 { + return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) + } + value += c + parse = parse[1:] + state = stateQuotedValue + + // expect either a new key-value pair, new list or end of input + case statePairEnd: + switch parse[0] { + case ';': + parse = parse[1:] + state = stateParameter + case ',': + state = stateElement + default: + return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) + } + } + } + + return res, parse, nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 000000000..9612ac2e5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,40 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 000000000..3c3ec9893 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,254 @@ +package v2 + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + relative: relative, + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u, relative), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { + var ( + scheme = "http" + host = r.Host + ) + + if r.TLS != nil { + scheme = "https" + } else if len(r.URL.Scheme) > 0 { + scheme = r.URL.Scheme + } + + // Handle fowarded headers + // Prefer "Forwarded" header as defined by rfc7239 if given + // see https://tools.ietf.org/html/rfc7239 + if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { + forwardedHeader, _, err := parseForwardedHeader(forwarded) + if err == nil { + if fproto := forwardedHeader["proto"]; len(fproto) > 0 { + scheme = fproto + } + if fhost := forwardedHeader["host"]; len(fhost) > 0 { + host = fhost + } + } + } else { + if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { + scheme = forwardedProto + } + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u, relative) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + default: + return "", fmt.Errorf("reference must have a tag or digest") + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root, relative: ub.relative} +} + +type clonedRoute struct { + *mux.Route + root *url.URL + relative bool +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if cr.relative { + return routeURL, nil + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go new file mode 100644 index 000000000..7d8f1d957 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the version of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 000000000..2c3ebe165 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 000000000..fe238210c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go new file mode 100644 index 000000000..aad8a0e6f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -0,0 +1,530 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" +) + +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +const defaultClientID = "registry-client" + +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. + AuthorizeRequest(req *http.Request, params map[string]string) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: manager, + handlers: handlers, + } +} + +type endpointAuthorizer struct { + challenges challenge.Manager + handlers []AuthenticationHandler +} + +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { + pingPath := req.URL.Path + if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { + pingPath = pingPath[:v2Root+4] + } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { + pingPath = pingPath[:v1Root] + "/v2/" + } else { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: pingPath, + } + + challenges, err := ea.challenges.GetChallenges(ping) + if err != nil { + return err + } + + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, c := range challenges { + if c.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { + return err + } + } + } + } + + return nil +} + +// This is the minimum duration a token can last (in seconds). +// A token must not live less than 60 seconds because older versions +// of the Docker client didn't read their expiration from the token +// response and assumed 60 seconds. So to remain compatible with +// those implementations, a token must live at least this long. +const minimumTokenLifetimeSeconds = 60 + +// Private interface for time used by this package to enable tests to provide their own implementation. +type clock interface { + Now() time.Time +} + +type tokenHandler struct { + creds CredentialStore + transport http.RoundTripper + clock clock + + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time + + logger Logger +} + +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string +} + +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Class string + Actions []string +} + +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + repoType := "repository" + // Keep existing format for image class to maintain backwards compatibility + // with authorization servers which do not support the expanded grammar. + if rs.Class != "" && rs.Class != "image" { + repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) + } + return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) +} + +// RegistryScope represents a token scope for access +// to resources in the registry. +type RegistryScope struct { + Name string + Actions []string +} + +// String returns the string representation of the user +// using the scope grammar +func (rs RegistryScope) String() string { + return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) +} + +// Logger defines the injectable logging interface, used on TokenHandlers. +type Logger interface { + Debugf(format string, args ...interface{}) +} + +func logDebugf(logger Logger, format string, args ...interface{}) { + if logger == nil { + return + } + logger.Debugf(format, args...) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope + Logger Logger +} + +// An implementation of clock for providing real time data. +type realClock struct{} + +// Now implements clock +func (realClock) Now() time.Time { return time.Now() } + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) +} + +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, + logger: options.Logger, + } + + return handler +} + +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { + return "bearer" +} + +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, + }.String()) + } + + token, err := th.getToken(params, additionalScopes...) + if err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + return nil +} + +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } + var addedScopes bool + for _, scope := range additionalScopes { + if hasScope(scopes, scope) { + continue + } + scopes = append(scopes, scope) + addedScopes = true + } + + now := th.clock.Now() + if now.After(th.tokenExpiration) || addedScopes { + token, expiration, err := th.fetchToken(params, scopes) + if err != nil { + return "", err + } + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } + + return token, nil + } + + return th.tokenCache, nil +} + +func hasScope(scopes []string, scope string) bool { + for _, s := range scopes { + if s == scope { + return true + } + } + return false +} + +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) + + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = defaultClientID + } + form.Set("client_id", clientID) + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") + } + + resp, err := th.client().PostForm(realm.String(), form) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) + if err != nil { + return "", time.Time{}, err + } + + reqParams := req.URL.Query() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scope := range scopes { + reqParams.Add("scope", scope) + } + + if th.offlineAccess { + reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) + } + + if th.creds != nil { + username, password := th.creds.Basic(realm) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return "", time.Time{}, ErrNoToken + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + if refreshToken != "" || th.forceOAuth { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) + } + + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) +} + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return ErrNoBasicAuthCredentials +} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 000000000..695bf852f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,162 @@ +package client + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/distribution" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return HandleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go new file mode 100644 index 000000000..024df43dd --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -0,0 +1,141 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + StatusCode int + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusForbidden: + return errcode.ErrorCodeDenied.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + StatusCode: statusCode, + Response: body, + } + } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + + return errors +} + +func makeErrorList(err error) []error { + if errL, ok := err.(errcode.Errors); ok { + return []error(errL) + } + return []error{err} +} + +func mergeErrors(err1, err2 error) error { + return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) +} + +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + // Check for OAuth errors within the `WWW-Authenticate` header first + // See https://tools.ietf.org/html/rfc6750#section-3 + for _, c := range challenge.ResponseChallenges(resp) { + if c.Scheme == "bearer" { + var err errcode.Error + // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 + switch c.Parameters["error"] { + case "invalid_token": + err.Code = errcode.ErrorCodeUnauthorized + case "insufficient_scope": + err.Code = errcode.ErrorCodeDenied + default: + continue + } + if description := c.Parameters["error_description"]; description != "" { + err.Message = description + } else { + err.Message = err.Code.Message() + } + + return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + } + } + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 000000000..04e5a3ba0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,870 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + v2 "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/opencontainers/go-digest" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, + } + + return ®istry{ + client: client, + ub: ub, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + copy(entries, ctlg.Repositories) + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, HandleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + CheckRedirect: checkHTTPRedirect, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + name reference.Named +} + +func (r *repository) Named() reference.Named { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.name, + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.name, + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.name, + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + name: r.Named(), + } +} + +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + name reference.Named +} + +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + listURLStr, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + listURL, err := url.Parse(listURLStr) + if err != nil { + return tags, err + } + + for { + resp, err := t.client.Get(listURL.String()) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = append(tags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + listURL = listURL.ResolveReference(linkURL) + } else { + return tags, nil + } + } else { + return tags, HandleErrorResponse(resp) + } + } +} + +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.Parse(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + newRequest := func(method string) (*http.Response, error) { + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + resp, err := t.client.Do(req) + return resp, err + } + + resp, err := newRequest("HEAD") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: + // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers + return descriptorFromResponse(resp) + default: + // if the response is an error - there will be no body to decode. + // Issue a GET request: + // - for data from a server that does not handle HEAD + // - to get error details in case of a failure + resp, err = newRequest("GET") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return descriptorFromResponse(resp) + } + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, HandleErrorResponse(resp) +} + +// AddEtagToTag allows a client to supply an eTag to Get which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + contentDgst *digest.Digest + mediaTypes []string + ) + + for _, option := range options { + switch opt := option.(type) { + case distribution.WithTagOption: + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + case contentDigestOption: + contentDgst = opt.digest + case distribution.WithManifestMediaTypesOption: + mediaTypes = opt.MediaTypes + default: + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } + } + + if len(mediaTypes) == 0 { + mediaTypes = distribution.ManifestMediaTypes() + } + + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + for _, t := range mediaTypes { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) + } + + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, distribution.ErrManifestNotModified + } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil + } + return nil, HandleErrorResponse(resp) +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + var tagged bool + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + + resp, err := ms.client.Do(putRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.Parse(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil + } + + return "", HandleErrorResponse(resp) +} + +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + +type blobs struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + return baseURL.ResolveReference(locationURL).String(), nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + reader, err := bs.Open(ctx, dgst) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.Digester() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + resp, err := bs.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + default: + return nil, HandleErrorResponse(resp) + } +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, HandleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 000000000..9120dbed6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,249 @@ +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + errorHandler: errorHandler, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + + size int64 + + // rc is the remote read closer. + rc io.ReadCloser + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + // If we sought to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + lastReaderOffset := hrs.readerOffset + + if whence == io.SeekStart && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + + _, err := hrs.reader() + if err != nil { + hrs.readerOffset = lastReaderOffset + return 0, err + } + + newOffset := hrs.seekOffset + + switch whence { + case io.SeekCurrent: + newOffset += offset + case io.SeekEnd: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } + newOffset = hrs.size + offset + case io.SeekStart: + newOffset = offset + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + hrs.seekOffset = newOffset + } + + return hrs.seekOffset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.rc, nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.readerOffset > 0 { + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 000000000..30e45fab0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 000000000..10a390919 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 000000000..ac4c45211 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,129 @@ +package cache + +import ( + "context" + + "github.com/docker/distribution" + prometheus "github.com/docker/distribution/metrics" + "github.com/opencontainers/go-digest" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// Logger can be provided on the MetricsTracker to log errors. +// +// Usually, this is just a proxy to dcontext.GetLogger. +type Logger interface { + Errorf(format string, args ...interface{}) +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics + Logger(context.Context) Logger +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +var ( + // cacheCount is the number of total cache request received/hits/misses + cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") +) + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + cacheCount.WithValues("Request").Inc(1) + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + cacheCount.WithValues("Hit").Inc(1) + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + cacheCount.WithValues("Miss").Inc(1) + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} + +func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { + if tracker == nil { + return + } + + logger := tracker.Logger(ctx) + if logger == nil { + return + } + logger.Errorf(format, args...) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 000000000..42d94d9bd --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,179 @@ +package memory + +import ( + "context" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + "github.com/opencontainers/go-digest" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNormalizedNamed(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return repo.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.ErrBlobUnknown + } + + return repo.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + if repo == nil { + // allocate map since we are setting it now. + var ok bool + // have to read back value since we may have allocated elsewhere. + repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + repo = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = repo + } + rsimbdcp.repository = repo + } + rsimbdcp.parent.mu.Unlock() + + if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 000000000..f22df2b85 --- /dev/null +++ b/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf new file mode 100644 index 000000000..bd1b4bff6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -0,0 +1,51 @@ +github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b +github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 +github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go 4bbdd8ac624fc7a9ef7aec841c43d99b5fe65a29 https://github.com/golang-jwt/jwt.git # v3.2.2 +github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f +github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 +github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 v2.2.1 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 000000000..1ea555e2a --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 000000000..d1d0434cb --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program("store") + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program("get") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program("erase") + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program("list") + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 000000000..0183c0639 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,57 @@ +package client + +import ( + "fmt" + "io" + "os" + + exec "golang.org/x/sys/execabs" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates programs that are executed in a Shell. +func NewShellProgramFunc(name string) ProgramFunc { + return NewShellProgramFuncWithEnv(name, nil) +} + +// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables +func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + } +} + +func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { + programCmd := exec.Command(commandName, args...) + programCmd.Env = os.Environ() + if env != nil { + for k, v := range *env { + programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + programCmd.Stderr = os.Stderr + return programCmd +} + +// Shell invokes shell commands to talk with a remote credentials helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 000000000..91d9d4bba --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,186 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + var err error + if len(os.Args) != 2 { + err = fmt.Errorf("Usage: %s ", os.Args[0]) + } + + if err == nil { + err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) + } + + if err != nil { + fmt.Fprintf(os.Stdout, "%v\n", err) + os.Exit(1) + } +} + +// HandleCommand uses a helper and a key to run a credential action. +func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { + switch key { + case "store": + return Store(helper, in) + case "get": + return Get(helper, in, out) + case "erase": + return Erase(helper, in) + case "list": + return List(helper, out) + case "version": + return PrintVersion(out) + } + return fmt.Errorf("Unknown credential action `%s`", key) +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + resp := Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + } + + buffer.Reset() + if err := json.NewEncoder(buffer).Encode(resp); err != nil { + return err + } + + fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +// List returns all the serverURLs of keys in +// the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +// PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) + return nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 000000000..fe6a5aef4 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,102 @@ +package credentials + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + _, ok := err.(errCredentialsNotFound) + return ok +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return err == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + _, ok := err.(errCredentialsMissingServerURL) + return ok +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return err == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + _, ok := err.(errCredentialsMissingUsername) + return ok +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return err == errCredentialsMissingUsernameMessage +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 000000000..135acd254 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 000000000..84377c263 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,16 @@ +package credentials + +var ( + // Name is filled at linking time + Name = "" + + // Package is filled at linking time + Package = "github.com/docker/docker-credential-helpers" + + // Version holds the complete version number. Filled in at linking time. + Version = "v0.0.0+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 000000000..b31418192 --- /dev/null +++ b/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,2390 @@ +# File @generated by hack/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See hack/generate-authors.sh to make modifications. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Hnatiw +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinandan Prativadi +Abhinav Ajgaonkar +Abhishek Chanda +Abhishek Sharma +Abin Shahab +Abirdcfly +Ada Mancini +Adam Avilla +Adam Dobrawy +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Pointer +Adam Singer +Adam Walz +Adam Williams +AdamKorcz +Addam Hardy +Aditi Rajagopal +Aditya +Adnan Khan +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akash Gupta +Akhil Mohan +Akihiro Matsushima +Akihiro Suda +Akim Demaille +Akira Koyasu +Akshay Karle +Akshay Moghe +Al Tobey +alambike +Alan Hoyle +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Albin Kerouanton +Alec Benson +Alejandro González Hevia +Aleksa Sarai +Aleksandr Chebotov +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Goodman +Alex Nordlund +Alex Olshansky +Alex Samorukov +Alex Stockinger +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Midlash +Alexander Morozov +Alexander Polakov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandre Jomin +Alexandru Sfirlogea +Alexei Margasov +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis Ries +Alexis Thomas +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Ameya Gawde +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amr Gawish +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anca Iordache +Anchal Agrawal +Anda Xu +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Denisse Gómez +Andrea Luzzardi +Andrea Turli +Andreas Elvers +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrei Ushakov +Andrei Vagin +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew He +Andrew Hsu +Andrew Kim +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Pennebaker +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Kolomentsev +Andrey Petrov +Andrey Stolbovsky +André Martins +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Lindeman +Andy Rothfusz +Andy Smith +Andy Wilson +Andy Zhang +Anes Hasicic +Angel Velazquez +Anil Belur +Anil Madhavapeddy +Ankit Jain +Ankush Agarwal +Anonmily +Anran Qiao +Anshul Pundir +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anuj Varma +Anusha Ragunathan +Anyu Wang +apocas +Arash Deshmeh +arcosx +ArikaChen +Arko Dasgupta +Arnaud Lefebvre +Arnaud Porterie +Arnaud Rebillout +Artem Khramov +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asad Saeeduddin +Asbjørn Enge +Austin Vazquez +averagehuman +Avi Das +Avi Kivity +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bao Yonglei +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +BartÅ‚omiej Piotrowski +Bastiaan Bakker +Bastien Pascard +bdevloed +Bearice Ren +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Gould +Ben Hall +Ben Langfeld +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Baker +Benjamin Boudreau +Benjamin Böhmke +Benjamin Wang +Benjamin Yolken +Benny Ng +Benoit Chesneau +Bernerd Schaefer +Bernhard M. Wiedemann +Bert Goethals +Bertrand Roussel +Bevisy Zhang +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Billy Ridgway +Bily Zhang +Bin Liu +Bingshen Wang +Bjorn Neergaard +Blake Geno +Boaz Shuster +bobby abbott +Bojun Zhu +Boqin Qin +Boris Pruessmann +Boshi Lian +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brett Milford +Brett Randall +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Schwind +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Brielle Broder +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Sparr +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Catalin Pirvu +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander Govindarajan +Chanhun Jeong +Chao Wang +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chee Hau Lim +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Min +Chen Mingjie +Chen Qiu +Cheng-mean Liu +Chengfei Shang +Chengguang Xu +Chenyang Yan +chenyuzhu +Chetan Birajdar +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dias +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris Kreussling (Flatbush Gardener) +Chris McKinnel +Chris McKinnel +Chris Price +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Telfer +Chris Wahl +Chris Weyl +Chris White +Christian Becker +Christian Berendt +Christian Brauner +Christian Böhme +Christian Muehlhaeuser +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +Christoph Ziebuhr +Christophe Mehay +Christophe Troestler +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Norman +Chun Chen +Ciro S. Costa +Clayton Coleman +Clint Armstrong +Clinton Kitson +clubby789 +Cody Roseborough +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Panisset +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Conor Evans +Corbin Coleman +Corey Farrell +Cory Forsyth +Cory Snider +cressie176 +Cristian Ariza +Cristian Staretu +cristiano balducci +Cristina Yenyxe Gonzalez Garcia +Cruceru Calin-Cristian +CUI Wei +cuishuang +Cuong Manh Le +Cyprian Gracz +Cyril F +Da McGrady +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Plamadeala +Dan Stine +Dan Williams +Dani Hodovic +Dani Louca +Daniel Antlinger +Daniel Black +Daniel Dao +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel P. Berrangé +Daniel Robinson +Daniel S +Daniel Sweet +Daniel Von Fange +Daniel Watkins +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniele Rondina +Danny Berger +Danny Milosavljevic +Danny Yates +Danyal Khaliq +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Goodchild +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Bellotti +David Calavera +David Chung +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Glasser +David Lawrence +David Lechner +David M. Karr +David Mackey +David Manouchehri +David Mat +David Mcanulty +David McKay +David O'Rourke +David P Hilton +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Wang <00107082@163.com> +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +Debayan De +Deborah Gertrude Digges +deed02392 +Deep Debroy +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +Devon Estes +Devvyn Murphy +Dharmit Shah +Dhawal Yogesh Bhanushali +Dhilip Kumars +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Mandalidis +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +dingwei +Diogo Monica +DiuDiugirl +Djibril Koné +Djordje Lukic +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Sharshakov +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dmytro Iakovliev +docker-unir[bot] +Dolph Mathews +Dominic Tubach +Dominic Yin +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donghwa Kim +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Douglas Curtis +Dr Nic Williams +dragon788 +Dražen LuÄanin +Drew Erny +Drew Hubl +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elango Sivanandam +Elena Morozova +Eli Uriegas +Elias Faxö +Elias Koromilas +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Davtyan +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Eng Zer Jun +Enguerran +Eohyung Lee +epeterso +er0k +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Mountain +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Soderstrom +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erich Cordoba +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Sipsma +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Espen Suenson +Ethan Bell +Ethan Mosbaugh +Euan Harris +Euan Kemp +Eugen Krizo +Eugene Yakubovich +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeniy Makhrov +Evgeny Shmarnev +Evgeny Vereshchagin +Ewa Czechowska +Eystein MÃ¥løy Stenberg +ezbercih +Ezra Silvera +Fabian Kramm +Fabian Lauer +Fabian Raetz +Fabiano Rosas +Fabio Falci +Fabio Kung +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangming Fang +Fangyuan Gao <21551127@zju.edu.cn> +fanjiyun +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felipe Ruhland +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Feng Yan +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Feroz Salam +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +Flavio Castelli +Flavio Crisciani +Florian +Florian Klein +Florian Maier +Florian Noeding +Florian Schmaus +Florian Weingarten +Florin Asavoaie +Florin Patan +fonglh +Foysal Iqbal +Francesc Campoy +Francesco Degrassi +Francesco Mari +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Frank Yang +Fred Lifton +Frederick F. Kautz IV +Frederico F. de Oliveira +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +Frieder Bluemle +frobnicaty <92033765+frobnicaty@users.noreply.github.com> +Frédéric Dalleau +Fu JinLin +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Adrian Samfira +Gabriel Goller +Gabriel L. Somlo +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +Gaurav Singh +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoff Levand +Geoffrey Bachelet +Geon Kim +George Kontridze +George MacRorie +George Xie +Georgi Hristozov +Georgy Yakovlev +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim Feiken +Ghislain Bourgeois +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +Giovan Isa Musthofa +gissehel +Giuseppe Mazzotta +Giuseppe Scrivano +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Goldwyn Rodrigues +Gopikannan Venugopalsamy +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Millar +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Stephens +Greg Thornton +Grzegorz JaÅ›kiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +Gunadhya S. <6939749+gunadhya@users.noreply.github.com> +Guoqiang QI +guoxiuyan +Guri +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +Haichao Yang +haikuoliu +haining.cao +Hakan Özler +Hamish Hutchings +Hannes Ljungberg +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harald Niesche +Harley Laue +Harold Cooper +Harrison Turton +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xiaoxi +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hiroshi Hatake +Hiroyuki Sasagawa +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +Hongxu Jia +Honza Pokorny +Hsing-Hui Hsu +Hsing-Yu (David) Chen +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +HuanHuan Ye +Huanzhong Zhang +Huayi Zhang +Hugo Barrera +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hui Kang +Hunter Blanks +huqun +Huu Nguyen +Hyeongkyu Lee +Hyzhou Zhy +Iago López Galeiras +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Chen +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Igor Karpovich +Iliana Weller +Ilkka Laukkanen +Illia Antypenko +Illo Abdulrahim +Ilya Dmitrichenko +Ilya Gusev +Ilya Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Innovimax +Isaac Dupree +Isabel Jimenez +Isaiah Grace +Isao Jonas +Iskander Sharipov +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jack Laxson +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jacob Vallejo +Jacob Wen +Jaime Cepeda +Jaivish Kothari +Jake Champlin +Jake Moshenko +Jake Sanders +Jakub Drahos +Jakub Guzik +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Sanders +James Turnbull +James Watkins-Harvey +Jamie Hannaford +Jamshid Afshar +Jan Breig +Jan Chren +Jan Garcia +Jan Götte +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +Jasmine Hegman +Jason A. Donenfeld +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +Javier Bassi +jaxgeller +Jay +Jay Kamat +Jay Lim +Jean Rouge +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeeva S. Chelladhurai +Jeff Anderson +Jeff Hajewski +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeff Zvier +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Huntwork +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +Jhon Honce +Ji.Zhilong +Jian Liao +Jian Zhang +Jiang Jinyang +Jianyong Wu +Jie Luo +Jie Ma +Jihyun Hwang +Jilles Oldenbeuving +Jim Alateras +Jim Carroll +Jim Ehrismann +Jim Galasyn +Jim Lin +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +Jinsoo Park +Jintao Zhang +Jiri Appl +Jiri Popelka +Jiuyue Ma +Jiří Župka +Joakim Roubert +Joao Fernandes +Joao Trindade +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Surrell +Jon Wedaman +Jonas Dohse +Jonas Heinrich +Jonas Pfenniger +Jonathan A. Schweder +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Choy +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan Arentsen +Jordan Jennings +Jordan Sissel +Jordi Massaguer Pla +Jorge Marin +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Bonczkowski +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Soref +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +Joyce Jang +JP +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julien Pivotto +Julio Guerra +Julio Montes +Jun Du +Jun-Ru Chang +junxu +Jussi Nummelin +Justas Brazauskas +Justen Martin +Justin Cormack +Justin Force +Justin Keller <85903732+jk-vb@users.noreply.github.com> +Justin Menga +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérémy Leherpeur +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu (Kennan) +Kaijie Chen +Kamil DomaÅ„ski +Kamjar Gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Karanth +Karthik Nayak +Kasper Fabæch Brandt +Kate Heddleston +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Kazuhiro Sera +Kazuyoshi Kato +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Ken Reese +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kenta Tada +Kevin "qwazerty" Houdebert +Kevin Alvarez +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin Meredith +Kevin P. Kucharczyk +Kevin Parsons +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +Kirk Easterson +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konrad Ponichtera +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Kostadin Plachkov +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +Krystian Wojcicki +Kunal Kushwaha +Kunal Tyagi +Kyle Conroy +Kyle Linden +Kyle Squizzato +Kyle Wuolle +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Brehm +Laura Frank +Laurent Bernaille +Laurent Erignoux +Laurie Voss +Leandro Motta Barros +Leandro Siqueira +Lee Calcote +Lee Chao <932819864@qq.com> +Lee, Meng-Han +Lei Gong +Lei Jitang +Leiiwang +Len Weincier +Lennie +Leo Gallucci +Leonardo Nodari +Leonardo Taccari +Leszek Kowalski +Levi Blackstone +Levi Gross +Levi Harrison +Lewis Daly +Lewis Marshall +Lewis Peckover +Li Yi +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liangwei +Liao Qingwei +Lifubang +Lihua Tang +Lily Guo +limeidan +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Lotus Fenn +Louis Delossantos +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Lucas Molas +Lucas Silvestre +Luciano Mores +Luis Henrique Mulinari +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Heeren +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Rüger +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcel Edmund Franke +Marcelo Horacio Fortino +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Martins +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark Feit +Mark Jeromin +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Oates +Mark Parker +Mark Vainomaa +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Markus Kortlang +Martijn Dwars +Martijn van Oosterhout +Martin Braun +Martin Dojcak +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Muzatko +Martin Redmond +Maru Newby +Mary Anthony +Masahito Zembutsu +Masato Ohba +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Champlon +Mathieu Le Marec - Pasquet +Mathieu Parent +Mathieu Paturel +Matt Apperson +Matt Bachmann +Matt Bajor +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Morrison <3maven@gmail.com> +Matt Richardson +Matt Rickard +Matt Robenolt +Matt Schurenko +Matt Williams +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mosesohn +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Fronton +Matthieu Hauglustaine +Mattias Jernberg +Mauricio Garavaglia +mauriyouth +Max Harmathy +Max Shytikov +Max Timchenko +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Maximiliano Maccanti +Maxwell +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Menghui Chen +Mert YazıcıoÄŸlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Beskin +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Kuehn +Michael Käufl +Michael Neale +Michael Nussbaum +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael Weidmann +Michael West +Michael Zhao +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Kostrzewa +Michal Minář +Michal Rostecki +Michal Wieczorek +Michaël Pailloncy +MichaÅ‚ Czeraszkiewicz +MichaÅ‚ Gryko +MichaÅ‚ Kosek +Michiel de Jong +Mickaël Fortunato +Mickaël Remars +Miguel Angel Fernández +Miguel Morales +Miguel Perez +Mihai Borobocea +Mihuleacc Sergiu +Mikael Davranche +Mike Brown +Mike Bush +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Estes +Mike Gaffney +Mike Goelzer +Mike Leone +Mike Lundy +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miklos Szegedi +Milas Bowman +Milind Chawre +Miloslav TrmaÄ +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +Mizuki Urushida +mlarcher +Mohammad Banikazemi +Mohammad Nasirifar +Mohammed Aaqib Ansari +Mohit Soni +Moorthy RS +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mrfly +Mrunal Patel +Muayyad Alsadi +Muhammad Zohaib Aslam +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nace Oroz +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Natasha Jarus +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Carlson +Nathan Herald +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Naveed Jamil +Neal McBurnett +Neil Horman +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Adcock +Nick DeCoursin +Nick Irvine +Nick Neisen +Nick Parker +Nick Payne +Nick Russo +Nick Santos +Nick Stenning +Nick Stinemates +Nick Wood +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolas Sterchele +Nicolas V Castet +Nicolás Hock Isaza +Niel Drummond +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Edigaryev +Nikolay Milovanov +ningmingxiao +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Meyerhans +Noah Treuhaft +NobodyOnSE +noducks +Nolan Darilek +Noriki Nakamura +nponeccop +Nurahmadie +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +Odin Ugedal +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Oliver Reason +Olivier Gambier +Olle Jonsson +Olli Janatuinen +Olly Pomeroy +Omri Shiv +Onur Filiz +Oriol Francès +Oscar Bonilla <6f6231@gmail.com> +oscar.chen <2972789494@qq.com> +Oskar Niburski +Otto Kekäläinen +Ouyang Liduo +Ovidio Mallo +Panagiotis Moustafellos +Paolo G. Giarrusso +Pascal +Pascal Bach +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Haas +Patrick Hemmer +Patrick Stapleton +Patrik Cyvoct +pattichen +Paul "TBBle" Hampson +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Gomes +Paulo Ribeiro +Pavel Lobashov +Pavel MatÄ›ja +Pavel Pletenev +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Pawel Konczalski +PaweÅ‚ Gronowski +Peeyush Gupta +Peggy Li +Pei Su +Peng Tao +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Pete Woods +Peter Bourgon +Peter Braden +Peter Bücker +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Kang +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Å vihlík +Petros Angelatos +Phil +Phil Estes +Phil Sphicas +Phil Spitler +Philip Alexander Etling +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +Piotr Karbowski +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Pradip Dhara +Pradipta Kr. Banerjee +Prasanna Gautam +Pratik Karki +Prayag Verma +Priya Wadhwa +Projjol Banerji +Przemek Hejman +Puneet Pruthi +Pure White +pysqz +Qiang Huang +Qin TianHuan +Qinglan Peng +Quan Tian +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Radostin Stoyanov +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Raja Sami +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +RaviTeja Pothana +Ray Tsang +ReadmeCritic +realityone +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +Renaud Gaubert +Rhys Hiltner +Ri Xu +Ricardo N Feliciano +Rich Horwood +Rich Moyse +Rich Seymour +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Cowsill <42620235+rcowsill@users.noreply.github.com> +Rob Gulewich +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Schneider +Robert Shade +Robert Stern +Robert Terhaar +Robert Wallis +Robert Wang +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +Robin Thoni +robpc +Rodolfo Carvalho +Rodrigo Campos +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rohit Kapur +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Dudin +Roman Mazur +Roman Strashkin +Roman Volosatovs +Roman Zabaluev +Ron Smits +Ron Williams +Rong Gao +Rong Zhang +Rongxiang Song +Rony Weng +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Roy Reznik +Royce Remer +Rozhnov Alexandr +Rudolph Gottesheim +Rui Cao +Rui Lopes +Ruilin Li +Runshen Zhu +Russ Magee +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Barry +Ryan Belgrave +Ryan Campbell +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Shea +Ryan Simmen +Ryan Stelly +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Ryo Nakao +Ryoga Saito +Rémy Greinhofer +s. rannou +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sakeven Jiang +Salahuddin Khan +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sam Whited +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +sanchayanghosh +Sandeep Bansal +Sankar சஙà¯à®•à®°à¯ +Sanket Saurav +Santhosh Manohar +sapphiredev +Sargun Dhillon +Sascha Andres +Sascha Grunert +SataQiu +Satnam Singh +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Percival +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Sebastian Höffner +Sebastian Radloff +Sebastien Goasguen +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Sergio Lopez +Serhat Gülçiçek +SeungUkLee +Sevki Hasirci +Shane Canon +Shane da Silva +Shaun Kaasten +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayan Pooya +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shengjing Zhu +Shev Yan +Shih-Yuan Lee +Shihao Xia +Shijiang Wei +Shijun Qin +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +Shu-Wai Chow +shuai-z +Shukui Yang +Sian Lerk Lau +Siarhei Rasiukevich +Sidhartha Mani +sidharthamani +Silas Sewell +Silvan Jegen +Simão Reis +Simon Barendse +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Menke +Simon Taranto +Simon Vikstrom +Sindhu S +Sjoerd Langkemper +skanehira +Smark Meng +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Sotiris Salloumis +Soulou +Spencer Brown +Spencer Smith +Spike Curtis +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Staf Wagemakers +Stanislav Bondarenko +Stanislav Levin +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Steffen Butzer +Stephan Spindler +Stephen Benjamin +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +SteÌphane Este-Gracias +Stig Larsson +Su Wang +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sune Keller +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien HOUZÉ +Sébastien Luttringer +Sébastien Stormacq +Sören Tempel +Tabakhase +Tadej Janež +Takuto Sato +tang0th +Tangi Colin +Tatsuki Sugiura +Tatsushi Inagaki +Taylan Isikdemir +Taylor Jones +Ted M. Young +Tehmasp Chaudhri +Tejaswini Duggaraju +Tejesh Mehta +Terry Chu +terryding77 <550147740@qq.com> +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thiago Alves Silva +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Graf +Thomas Grainger +Thomas Hansen +Thomas Ledos +Thomas Leonard +Thomas Léveil +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Ti Zhou +Tiago Seabra +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Till Claassen +Till Wegmüller +Tim +Tim Bart +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Potter +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wagner +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timchenxiaoyu <837829664@qq.com> +timfeirg +Timo Rothenpieler +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Pfandzelter +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Parker +Tom Sweeney +Tom Wilkie +Tom X. Tobin +Tom Zhao +Tomas Janousek +Tomas Kral +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tomek MaÅ„ko +Tommaso Visconti +Tomoya Tabuchi +Tomáš HrÄka +tonic +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Toshiaki Makita +Tõnis Tiigi +Trace Andreason +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +Trishna Guha +Tristan Carel +Troy Denton +Tudor Brindus +Ty Alexander +Tycho Andersen +Tyler Brock +Tyler Brown +Tzu-Jung Lee +uhayate +Ulysse Carion +Umesh Yadav +Utz Bacher +vagrant +Vaidas Jablonskis +Valentin Kulesh +vanderliang +Velko Ivanov +Veres Lajos +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Vikas Choudhary +Vikram bir Singh +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Boulineau +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vladislav Kolesnikov +Vlastimil Zeman +Vojtech Vitek (V-Teq) +Walter Leibbrandt +Walter Stanish +Wang Chao +Wang Guoliang +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +wanghuaiqing +Ward Vandewege +WarheadsSE +Wassim Dhif +Wataru Ishida +Wayne Chang +Wayne Song +Weerasak Chongnguluam +Wei Fu +Wei Wu +Wei-Ting Kuo +weipeng +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenjun Tang +Wenkai Yin +wenlxie +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Wiktor Kwapisiewicz +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +Wilson Júnior +Wing-Kam Wong +WiseTrem +Wolfgang Nagele +Wolfgang Powisch +Wonjun Kim +WuLonghui +xamyzhao +Xia Wu +Xian Chaobo +Xianglin Gao +Xianjie +Xianlu Bird +Xiao YongBiao +Xiao Zhang +XiaoBing Jiang +Xiaodong Liu +Xiaodong Zhang +Xiaohua Ding +Xiaoxi He +Xiaoxu Chen +Xiaoyu Zhang +xichengliudui <1693291525@qq.com> +xiekeyang +Ximo Guanter Gonzálbez +xin.li +Xinbo Weng +Xinfeng Liu +Xinzi Zhou +Xiuming Chen +Xuecong Liao +xuzhaokui +Yadnyawalkya Tale +Yahya +yalpul +YAMADA Tsuyoshi +Yamasaki Masahide +Yamazaki Masashi +Yan Feng +Yan Zhu +Yang Bai +Yang Li +Yang Pengfei +yangchenliang +Yann Autissier +Yanqiang Miao +Yao Zaiyong +Yash Murty +Yassine Tijani +Yasunori Mahata +Yazhong Liu +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongxin Li +Yongzhi Pan +Yosef Fertel +You-Sheng Yang (楊有å‹) +youcai +Youcef YEKHLEF +Youfu Zhang +Yu Changchun +Yu Chengxia +Yu Peng +Yu-Ju Hong +Yuan Sun +Yuanhong Peng +Yue Zhang +Yufei Xiong +Yuhao Fang +Yuichiro Kaneko +YujiOshima +Yunxiang Huang +Yurii Rashkovskii +Yusuf Tarık Günaydın +Yves Blusseau <90z7oey02@sneakemail.com> +Yves Junqueira +Zac Dover +Zach Borboa +Zach Gershman +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenhai Gao +Zhenkun Bi +ZhiPeng Lu +zhipengzuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Ziheng Liu +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +Zou Yu +zqh +Zuhayr Elahi +Zunayed Ali +Ãlvaro Lázaro +Ãtila Camurça Alves +å°¹å‰å³° +å±ˆéª +å¾ä¿Šæ° +慕陶 +æ通 +黄艳红00139573 +ì •ìž¬ì˜ diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 000000000..6d8d58fb6 --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 000000000..58b19b6d1 --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 000000000..9ee329a2f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,7 @@ +package types // import "github.com/docker/docker/api/types" +import "github.com/docker/docker/api/types/registry" + +// AuthConfig contains authorization information for connecting to a Registry. +// +// Deprecated: use github.com/docker/docker/api/types/registry.AuthConfig +type AuthConfig = registry.AuthConfig diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 000000000..bf3463b90 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 000000000..d8cd30613 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,444 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string `json:"ID"` + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// NewHijackedResponse intializes a HijackedResponse type +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + mediaType string + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and container must be inspected +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]registry.AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// ImageListOptions holds parameters to list images with. +type ImageListOptions struct { + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. + Filters filters.Args + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // ContainerCount indicates whether container count should be computed. + ContainerCount bool +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + Platform string +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +// ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 000000000..7d5930bbe --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,67 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *ocispec.Platform + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go b/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go new file mode 100644 index 000000000..6b4b47390 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go @@ -0,0 +1,6 @@ +package container + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// +// Deprecated: use [FilesystemChange]. +type ContainerChangeResponseItem = FilesystemChange diff --git a/vendor/github.com/docker/docker/api/types/container/change_type.go b/vendor/github.com/docker/docker/api/types/container/change_type.go new file mode 100644 index 000000000..fe8d6d369 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/change_type.go @@ -0,0 +1,15 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ChangeType Kind of change +// +// Can be one of: +// +// - `0`: Modified ("C") +// - `1`: Added ("A") +// - `2`: Deleted ("D") +// +// swagger:model ChangeType +type ChangeType uint8 diff --git a/vendor/github.com/docker/docker/api/types/container/change_types.go b/vendor/github.com/docker/docker/api/types/container/change_types.go new file mode 100644 index 000000000..3a3a83866 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/change_types.go @@ -0,0 +1,23 @@ +package container + +const ( + // ChangeModify represents the modify operation. + ChangeModify ChangeType = 0 + // ChangeAdd represents the add operation. + ChangeAdd ChangeType = 1 + // ChangeDelete represents the delete operation. + ChangeDelete ChangeType = 2 +) + +func (ct ChangeType) String() string { + switch ct { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + default: + return "" + } +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 000000000..077583e66 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,96 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "io" + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// StopOptions holds the options to stop or restart a container. +type StopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If not value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// ExecStartOptions holds the options to start container's exec. +type ExecStartOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + ConsoleSize *[2]uint `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 000000000..63381da36 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process + // is an array of values corresponding to the titles. + // + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 000000000..c10f175ea --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go new file mode 100644 index 000000000..aa0e7f7d0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_response.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// OK response to ContainerCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go new file mode 100644 index 000000000..9e9c2ad1d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// FilesystemChange Change in the container's filesystem. +// +// swagger:model FilesystemChange +type FilesystemChange struct { + + // kind + // Required: true + Kind ChangeType `json:"Kind"` + + // Path to file or directory that has changed. + // + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go new file mode 100644 index 000000000..d4e6f5537 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -0,0 +1,456 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" +) + +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// cgroup namespace modes for containers +const ( + CgroupnsModeEmpty CgroupnsMode = "" + CgroupnsModePrivate CgroupnsMode = "private" + CgroupnsModeHost CgroupnsMode = "host" +) + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == CgroupnsModePrivate +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == CgroupnsModeHost +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == CgroupnsModeEmpty +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// Isolation modes for containers +const ( + IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) + IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon + IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode + IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode +) + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + // TODO consider making isolation-mode strict (case-sensitive) + v := Isolation(strings.ToLower(string(i))) + return v == IsolationDefault || v == IsolationEmpty +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationHyperV +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationProcess +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IpcMode constants +const ( + IPCModeNone IpcMode = "none" + IPCModeHost IpcMode = "host" + IPCModeContainer IpcMode = "container" + IPCModePrivate IpcMode = "private" + IPCModeShareable IpcMode = "shareable" +) + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == IPCModePrivate +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == IPCModeHost +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == IPCModeShareable +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == IPCModeNone +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !n.IsHost() +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + return n == "" || n.IsHost() +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + _, ok := containerID(string(c)) + return ok +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. + return c == "" || c.IsContainer() +} + +// Container returns the ID or name of the container whose cgroup will be used. +func (c CgroupSpec) Container() (idOrName string) { + idOrName, _ = containerID(string(c)) + return idOrName +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !n.IsHost() +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + return n == "" || n.IsHost() +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + return n == "" || n.IsHost() || validContainer(string(n)) +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset LogMode = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + + // KernelMemory specifies the kernel memory limit (in bytes) for the container. + // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) + Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} + +// containerID splits "container:" values. It returns the container +// ID or name, and whether an ID/name was found. It returns an empty string and +// a "false" if the value does not have a "container:" prefix. Further validation +// of the returned, including checking if the value is empty, should be handled +// by the caller. +func containerID(val string) (idOrName string, ok bool) { + k, v, hasSep := strings.Cut(val, ":") + if !hasSep || k != "container" { + return "", false + } + return v, true +} + +// validContainer checks if the given value is a "container:" mode with +// a non-empty name/ID. +func validContainer(val string) bool { + id, ok := containerID(val) + return ok && id != "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 000000000..24c4fa8d9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,42 @@ +//go:build !windows +// +build !windows + +package container // import "github.com/docker/docker/api/types/container" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 000000000..99f803a5b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,40 @@ +package container // import "github.com/docker/docker/api/types/container" + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go new file mode 100644 index 000000000..ab56d4eed --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go @@ -0,0 +1,12 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go new file mode 100644 index 000000000..84fc6afdd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// OK response to ContainerWait operation +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 000000000..cd8311f99 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 000000000..dc942d9d9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go new file mode 100644 index 000000000..f84f034cd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go @@ -0,0 +1,6 @@ +package types + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go new file mode 100644 index 000000000..f52f69440 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/errors.go @@ -0,0 +1,37 @@ +package filters + +import "fmt" + +// invalidFilter indicates that the provided filter or its value is invalid +type invalidFilter struct { + Filter string + Value []string +} + +func (e invalidFilter) Error() string { + msg := "invalid filter" + if e.Filter != "" { + msg += " '" + e.Filter + if e.Value != nil { + msg = fmt.Sprintf("%s=%s", msg, e.Value) + } + msg += "'" + } + return msg +} + +// InvalidParameter marks this error as ErrInvalidParameter +func (e invalidFilter) InvalidParameter() {} + +// unreachableCode is an error indicating that the code path was not expected to be reached. +type unreachableCode struct { + Filter string + Value []string +} + +// System marks this error as ErrSystem +func (e unreachableCode) System() {} + +func (e unreachableCode) Error() string { + return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value) +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 000000000..0c39ab5f1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,346 @@ +/* +Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" + +import ( + "encoding/json" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores a mapping of keys to a set of multiple values. +type Args struct { + fields map[string]map[string]bool +} + +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string +} + +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// Keys returns all the keys in list of Args +func (args Args) Keys() []string { + keys := make([]string, 0, len(args.fields)) + for k := range args.fields { + keys = append(keys, k) + } + return keys +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte("{}"), nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + buf, err := json.Marshal(a) + return string(buf), err +} + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: do not use in any new code; use ToJSON instead +func ToParamWithVersion(version string, a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + + if version != "" && versions.LessThan(version, "1.22") { + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err + } + + return ToJSON(a) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, &invalidFilter{} + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true + } else { + args.fields[key] = map[string]bool{value: true} + } +} + +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) + } + } +} + +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) +} + +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] + + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for value := range fieldValues { + testK, testV, hasValue := strings.Cut(value, "=") + + v, ok := sources[testK] + if !ok { + return false + } + if hasValue && testV != v { + return false + } + } + + return true +} + +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { + return true + } + + fieldValues := args.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// GetBoolOrDefault returns a boolean value of the key if the key is present +// and is intepretable as a boolean value. Otherwise the default value is returned. +// Error is not nil only if the filter values are not valid boolean or are conflicting. +func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { + fieldValues, ok := args.fields[key] + + if !ok { + return defaultValue, nil + } + + if len(fieldValues) == 0 { + return defaultValue, &invalidFilter{key, nil} + } + + isFalse := fieldValues["0"] || fieldValues["false"] + isTrue := fieldValues["1"] || fieldValues["true"] + + conflicting := isFalse && isTrue + invalid := !isFalse && !isTrue + + if conflicting || invalid { + return defaultValue, &invalidFilter{key, args.Get(key)} + } else if isFalse { + return false, nil + } else if isTrue { + return true, nil + } + + // This code shouldn't be reached. + return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)} +} + +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(args.fields[key]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { + return true + } + + fieldValues := args.fields[key] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] + return ok +} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { + if !accepted[name] { + return &invalidFilter{name, nil} + } + } + return nil +} + +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { + return nil + } + for v := range args.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 000000000..ce3deb331 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about the storage driver used to store the container's and +// image's filesystem. +// +// swagger:model GraphDriverData +type GraphDriverData struct { + + // Low-level storage metadata, provided as key/value pairs. + // + // This information is driver-specific, and depends on the storage-driver + // in use, and should be used for informational purposes only. + // + // Required: true + Data map[string]string `json:"Data"` + + // Name of the storage driver. + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 000000000..7592d2f8b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 000000000..b9a65a0d8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 000000000..0f6f14484 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,94 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // Number of containers using this image. Includes both stopped and running + // containers. + // + // This size is not calculated by default, and depends on which API endpoint + // is used. `-1` indicates that the value has not been set / calculated. + // + // Required: true + Containers int64 `json:"Containers"` + + // Date and time at which the image was created as a Unix timestamp + // (number of seconds sinds EPOCH). + // + // Required: true + Created int64 `json:"Created"` + + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + // + // Required: true + ID string `json:"Id"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + // + // Required: true + ParentID string `json:"ParentId"` + + // List of content-addressable digests of locally available image manifests + // that the image is referenced from. Multiple manifests can refer to the + // same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + // + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // List of image names/tags in the local image cache that reference this + // image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + // + // Required: true + RepoTags []string `json:"RepoTags"` + + // Total size of image layers that are shared between this image and other + // images. + // + // This size is not calculated by default. `-1` indicates that the value + // has not been set / calculated. + // + // Required: true + SharedSize int64 `json:"SharedSize"` + + // Total size of the image including all layers it is composed of. + // + // Required: true + Size int64 `json:"Size"` + + // Total size of the image including all layers it is composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Images are now stored + // self-contained, and no longer use a parent-chain, making this field + // an equivalent of the Size field. + // + // Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + VirtualSize int64 `json:"VirtualSize,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 000000000..ac4ce6223 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,140 @@ +package mount // import "github.com/docker/docker/api/types/mount" + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 000000000..437b184c6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,126 @@ +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" +) + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} + +var acceptedFilters = map[string]bool{ + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return filter.Validate(acceptedFilters) +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 000000000..abae48b9a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,203 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 000000000..569901067 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 000000000..32962dc2e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 000000000..c82f204e8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 000000000..5c031cf8b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 000000000..60d1fb5ad --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,71 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 000000000..d91234744 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // Host IP address that the container's port is mapped to + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go new file mode 100644 index 000000000..97a924e37 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -0,0 +1,99 @@ +package registry // import "github.com/docker/docker/api/types/registry" +import ( + "encoding/base64" + "encoding/json" + "io" + "strings" + + "github.com/pkg/errors" +) + +// AuthHeader is the name of the header used to send encoded registry +// authorization credentials for registry operations (push/pull). +const AuthHeader = "X-Registry-Auth" + +// AuthConfig contains authorization information for connecting to a Registry. +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// EncodeAuthConfig serializes the auth configuration as a base64url encoded +// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header. +// +// For details on base64url encoding, see: +// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +func EncodeAuthConfig(authConfig AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", errInvalidParameter{err} + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON +// authentication information as sent through the X-Registry-Auth header. +// +// This function always returns an AuthConfig, even if an error occurs. It is up +// to the caller to decide if authentication is required, and if the error can +// be ignored. +// +// For details on base64url encoding, see: +// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { + if authEncoded == "" { + return &AuthConfig{}, nil + } + + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + return decodeAuthConfigFromReader(authJSON) +} + +// DecodeAuthConfigBody decodes authentication information as sent as JSON in the +// body of a request. This function is to provide backward compatibility with old +// clients and API versions. Current clients and API versions expect authentication +// to be provided through the X-Registry-Auth header. +// +// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an +// error occurs. It is up to the caller to decide if authentication is required, +// and if the error can be ignored. +func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { + return decodeAuthConfigFromReader(rdr) +} + +func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { + authConfig := &AuthConfig{} + if err := json.NewDecoder(rdr).Decode(authConfig); err != nil { + // always return an (empty) AuthConfig to increase compatibility with + // the existing API. + return &AuthConfig{}, invalid(err) + } + return authConfig, nil +} + +func invalid(err error) error { + return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { return e.error } + +func (e errInvalidParameter) Unwrap() error { return e.error } diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 000000000..f0a2113e4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 000000000..b83f5d7b2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,120 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +import ( + "encoding/json" + "net" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor ocispec.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 000000000..74ea64b1b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 000000000..20daebed1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types // import "github.com/docker/docker/api/types" + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 000000000..82921cebc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice // import "github.com/docker/docker/api/types/strslice" + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 000000000..5ded7dba8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,48 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "strconv" + "time" +) + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 000000000..16202ccce --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 000000000..af5e1c0bc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,80 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + CapabilityAdd []string `json:",omitempty"` + CapabilityDrop []string `json:",omitempty"` + Ulimits []*units.Ulimit `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 000000000..98ef3284d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,121 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 000000000..bb98d5eed --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,139 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// github.com/docker/docker/api/types.Topology. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 000000000..0c77403cc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,27 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 000000000..98c2806c3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 000000000..e45045866 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,754 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: plugin.proto + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *PluginSpec) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, + 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, + 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, + 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, + 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, + 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, + 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, + 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, + 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, + 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, + 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, + 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, + 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, + 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, + 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, + 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 000000000..9ef169046 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; + repeated string env = 5; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 000000000..d5213ec98 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,36 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 000000000..6eb452d24 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,202 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` + + // ServiceStatus is an optional, extra field indicating the number of + // desired and running tasks. It is provided primarily as a shortcut to + // calculating these values client-side, which otherwise would require + // listing all tasks for a service, an operation that could be + // computation and network expensive. + ServiceStatus *ServiceStatus `json:",omitempty"` + + // JobStatus is the status of a Service which is in one of ReplicatedJob or + // GlobalJob modes. It is absent on Replicated and Global services. + JobStatus *JobStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` + ReplicatedJob *ReplicatedJob `json:",omitempty"` + GlobalJob *GlobalJob `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +// ReplicatedJob is the a type of Service which executes a defined Tasks +// in parallel until the specified number of Tasks have succeeded. +type ReplicatedJob struct { + // MaxConcurrent indicates the maximum number of Tasks that should be + // executing simultaneously for this job at any given time. There may be + // fewer Tasks that MaxConcurrent executing simultaneously; for example, if + // there are fewer than MaxConcurrent tasks needed to reach + // TotalCompletions. + // + // If this field is empty, it will default to a max concurrency of 1. + MaxConcurrent *uint64 `json:",omitempty"` + + // TotalCompletions is the total number of Tasks desired to run to + // completion. + // + // If this field is empty, the value of MaxConcurrent will be used. + TotalCompletions *uint64 `json:",omitempty"` +} + +// GlobalJob is the type of a Service which executes a Task on every Node +// matching the Service's placement constraints. These tasks run to completion +// and then exit. +// +// This type is deliberately empty. +type GlobalJob struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} + +// ServiceStatus represents the number of running tasks in a service and the +// number of tasks desired to be running. +type ServiceStatus struct { + // RunningTasks is the number of tasks for the service actually in the + // Running state + RunningTasks uint64 + + // DesiredTasks is the number of tasks desired to be running by the + // service. For replicated services, this is the replica count. For global + // services, this is computed by taking the number of tasks with desired + // state of not-Shutdown. + DesiredTasks uint64 + + // CompletedTasks is the number of tasks in the state Completed, if this + // service is in ReplicatedJob or GlobalJob mode. This field must be + // cross-referenced with the service type, because the default value of 0 + // may mean that a service is not in a job mode, or it may mean that the + // job has yet to complete any tasks. + CompletedTasks uint64 +} + +// JobStatus is the status of a job-type service. +type JobStatus struct { + // JobIteration is a value increased each time a Job is executed, + // successfully or otherwise. "Executed", in this case, means the job as a + // whole has been started, not that an individual Task has been launched. A + // job is "Executed" when its ServiceSpec is updated. JobIteration can be + // used to disambiguate Tasks belonging to different executions of a job. + // + // Though JobIteration will increase with each subsequent execution, it may + // not necessarily increase by 1, and so JobIteration should not be used to + // keep track of the number of times a job has been executed. + JobIteration Version + + // LastExecution is the time that the job was last executed, as observed by + // Swarm manager. + LastExecution time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 000000000..3eae4b9b2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,237 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Status provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type Status struct { + // NodeState represents the state of the node. + NodeState LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 000000000..ad3eeca0b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,225 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` + + // JobIteration is the JobIteration of the Service that this Task was + // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is + // used to determine which Tasks belong to which run of the job. This field + // is absent if the Service mode is Replicated or Global. + JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory) which can be advertised by a +// node and requested to be reserved for a task. +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// Limit describes limits on resources which can be requested by a task. +type Limit struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + Pids int64 `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Limit `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 000000000..b413e0200 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,811 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" + "github.com/docker/go-connections/nat" +) + +const ( + // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams + MediaTypeRawStream = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams + MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Parent is the ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + Parent string + + // Comment is an optional message that can be set when committing or + // importing the image. + Comment string + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + Created string + + // Container is the ID of the container that was used to create the image. + // + // Depending on how the image was created, this field may be empty. + Container string + + // ContainerConfig is an optional field containing the configuration of the + // container that was last committed when creating the image. + // + // Previous versions of Docker builder used this field to store build cache, + // and it is not in active use anymore. + ContainerConfig *container.Config + + // DockerVersion is the version of Docker that was used to build the image. + // + // Depending on how the image was created, this field may be empty. + DockerVersion string + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + Author string + Config *container.Config + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // VirtualSize is the total size of the image including all layers it is + // composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // Deprecated: Unused in API 1.43 and up, but kept for backward compatibility with older API versions. + VirtualSize int64 `json:"VirtualSize,omitempty"` + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver GraphDriverData + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *swarm.Status +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// NetworkAddressPool is a temp struct used by Info struct +type NetworkAddressPool struct { + Base string + Size int +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + for _, s := range strings.Split(opt, ",") { + k, v, ok := strings.Cut(s, "=") + if !ok { + return nil, fmt.Errorf("invalid security option %q", s) + } + if k == "" || v == "" { + return nil, errors.New("invalid empty security option") + } + if k == "name" { + secopt.Name = v + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only used by the Docker Swarm standalone API +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + // Type is the type of mount, see `Type` definitions in + // github.com/docker/docker/api/types/mount.Type + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. + Destination string + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + // "Legacy" runtime configuration for runc-compatible runtimes. + + Path string `json:"path,omitempty"` + Args []string `json:"runtimeArgs,omitempty"` + + // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. + + Type string `json:"runtimeType,omitempty"` + Options map[string]interface{} `json:"options,omitempty"` + + // This is exposed here only for internal use + ShimConfig *ShimConfig `json:"-"` +} + +// ShimConfig is used by runtime to configure containerd shims +type ShimConfig struct { + Binary string + Opts interface{} +} + +// DiskUsageObject represents an object type used for disk usage query filtering. +type DiskUsageObject string + +const ( + // ContainerObject represents a container DiskUsageObject. + ContainerObject DiskUsageObject = "container" + // ImageObject represents an image DiskUsageObject. + ImageObject DiskUsageObject = "image" + // VolumeObject represents a volume DiskUsageObject. + VolumeObject DiskUsageObject = "volume" + // BuildCacheObject represents a build-cache DiskUsageObject. + BuildCacheObject DiskUsageObject = "build-cache" +) + +// DiskUsageOptions holds parameters for system disk usage query. +type DiskUsageOptions struct { + // Types specifies what object types to include in the response. If empty, + // all object types are returned. + Types []DiskUsageObject +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*volume.Volume + BuildCache []*BuildCache + BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} + +// BuildCache contains information about a build cache record. +type BuildCache struct { + // ID is the unique ID of the build cache record. + ID string + // Parent is the ID of the parent build cache record. + // + // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. + Parent string `json:"Parent,omitempty"` + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. + Description string + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 000000000..1ef911edb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 000000000..621725a36 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,65 @@ +package versions // import "github.com/docker/docker/api/types/versions" + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + if v1 == v2 { + return 0 + } + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + maxVer := len(currTab) + if len(otherTab) > maxVer { + maxVer = len(otherTab) + } + for i := 0; i < maxVer; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go new file mode 100644 index 000000000..55fc5d389 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -0,0 +1,420 @@ +package volume + +import ( + "github.com/docker/docker/api/types/swarm" +) + +// ClusterVolume contains options and information specific to, and only present +// on, Swarm CSI cluster volumes. +type ClusterVolume struct { + // ID is the Swarm ID of the volume. Because cluster volumes are Swarm + // objects, they have an ID, unlike non-cluster volumes, which only have a + // Name. This ID can be used to refer to the cluster volume. + ID string + + // Meta is the swarm metadata about this volume. + swarm.Meta + + // Spec is the cluster-specific options from which this volume is derived. + Spec ClusterVolumeSpec + + // PublishStatus contains the status of the volume as it pertains to its + // publishing on Nodes. + PublishStatus []*PublishStatus `json:",omitempty"` + + // Info is information about the global status of the volume. + Info *Info `json:",omitempty"` +} + +// ClusterVolumeSpec contains the spec used to create this volume. +type ClusterVolumeSpec struct { + // Group defines the volume group of this volume. Volumes belonging to the + // same group can be referred to by group name when creating Services. + // Referring to a volume by group instructs swarm to treat volumes in that + // group interchangeably for the purpose of scheduling. Volumes with an + // empty string for a group technically all belong to the same, emptystring + // group. + Group string `json:",omitempty"` + + // AccessMode defines how the volume is used by tasks. + AccessMode *AccessMode `json:",omitempty"` + + // AccessibilityRequirements specifies where in the cluster a volume must + // be accessible from. + // + // This field must be empty if the plugin does not support + // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the + // plugin does not support it, volume will not be created. + // + // If AccessibilityRequirements is empty, but the plugin does support + // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire + // cluster is a valid target for the volume. + AccessibilityRequirements *TopologyRequirement `json:",omitempty"` + + // CapacityRange defines the desired capacity that the volume should be + // created with. If nil, the plugin will decide the capacity. + CapacityRange *CapacityRange `json:",omitempty"` + + // Secrets defines Swarm Secrets that are passed to the CSI storage plugin + // when operating on this volume. + Secrets []Secret `json:",omitempty"` + + // Availability is the Volume's desired availability. Analogous to Node + // Availability, this allows the user to take volumes offline in order to + // update or delete them. + Availability Availability `json:",omitempty"` +} + +// Availability specifies the availability of the volume. +type Availability string + +const ( + // AvailabilityActive indicates that the volume is active and fully + // schedulable on the cluster. + AvailabilityActive Availability = "active" + + // AvailabilityPause indicates that no new workloads should use the + // volume, but existing workloads can continue to use it. + AvailabilityPause Availability = "pause" + + // AvailabilityDrain indicates that all workloads using this volume + // should be rescheduled, and the volume unpublished from all nodes. + AvailabilityDrain Availability = "drain" +) + +// AccessMode defines the access mode of a volume. +type AccessMode struct { + // Scope defines the set of nodes this volume can be used on at one time. + Scope Scope `json:",omitempty"` + + // Sharing defines the number and way that different tasks can use this + // volume at one time. + Sharing SharingMode `json:",omitempty"` + + // MountVolume defines options for using this volume as a Mount-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + MountVolume *TypeMount `json:",omitempty"` + + // BlockVolume defines options for using this volume as a Block-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + BlockVolume *TypeBlock `json:",omitempty"` +} + +// Scope defines the Scope of a Cluster Volume. This is how many nodes a +// Volume can be accessed simultaneously on. +type Scope string + +const ( + // ScopeSingleNode indicates the volume can be used on one node at a + // time. + ScopeSingleNode Scope = "single" + + // ScopeMultiNode indicates the volume can be used on many nodes at + // the same time. + ScopeMultiNode Scope = "multi" +) + +// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a +// Volume at the same time can use it. +type SharingMode string + +const ( + // SharingNone indicates that only one Task may use the Volume at a + // time. + SharingNone SharingMode = "none" + + // SharingReadOnly indicates that the Volume may be shared by any + // number of Tasks, but they must be read-only. + SharingReadOnly SharingMode = "readonly" + + // SharingOneWriter indicates that the Volume may be shared by any + // number of Tasks, but all after the first must be read-only. + SharingOneWriter SharingMode = "onewriter" + + // SharingAll means that the Volume may be shared by any number of + // Tasks, as readers or writers. + SharingAll SharingMode = "all" +) + +// TypeBlock defines options for using a volume as a block-type volume. +// +// Intentionally empty. +type TypeBlock struct{} + +// TypeMount contains options for using a volume as a Mount-type +// volume. +type TypeMount struct { + // FsType specifies the filesystem type for the mount volume. Optional. + FsType string `json:",omitempty"` + + // MountFlags defines flags to pass when mounting the volume. Optional. + MountFlags []string `json:",omitempty"` +} + +// TopologyRequirement expresses the user's requirements for a volume's +// accessible topology. +type TopologyRequirement struct { + // Requisite specifies a list of Topologies, at least one of which the + // volume must be accessible from. + // + // Taken verbatim from the CSI Spec: + // + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []Topology `json:",omitempty"` + + // Preferred is a list of Topologies that the volume should attempt to be + // provisioned in. + // + // Taken from the CSI spec: + // + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []Topology `json:",omitempty"` +} + +// Topology is a map of topological domains to topological segments. +// +// This description is taken verbatim from the CSI Spec: +// +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `json:",omitempty"` +} + +// CapacityRange describes the minimum and maximum capacity a volume should be +// created with +type CapacityRange struct { + // RequiredBytes specifies that a volume must be at least this big. The + // value of 0 indicates an unspecified minimum. + RequiredBytes int64 + + // LimitBytes specifies that a volume must not be bigger than this. The + // value of 0 indicates an unspecified maximum + LimitBytes int64 +} + +// Secret represents a Swarm Secret value that must be passed to the CSI +// storage plugin when operating on this Volume. It represents one key-value +// pair of possibly many. +type Secret struct { + // Key is the name of the key of the key-value pair passed to the plugin. + Key string + + // Secret is the swarm Secret object from which to read data. This can be a + // Secret name or ID. The Secret data is retrieved by Swarm and used as the + // value of the key-value pair passed to the plugin. + Secret string +} + +// PublishState represents the state of a Volume as it pertains to its +// use on a particular Node. +type PublishState string + +const ( + // StatePending indicates that the volume should be published on + // this node, but the call to ControllerPublishVolume has not been + // successfully completed yet and the result recorded by swarmkit. + StatePending PublishState = "pending-publish" + + // StatePublished means the volume is published successfully to the node. + StatePublished PublishState = "published" + + // StatePendingNodeUnpublish indicates that the Volume should be + // unpublished on the Node, and we're waiting for confirmation that it has + // done so. After the Node has confirmed that the Volume has been + // unpublished, the state will move to StatePendingUnpublish. + StatePendingNodeUnpublish PublishState = "pending-node-unpublish" + + // StatePendingUnpublish means the volume is still published to the node + // by the controller, awaiting the operation to unpublish it. + StatePendingUnpublish PublishState = "pending-controller-unpublish" +) + +// PublishStatus represents the status of the volume as published to an +// individual node +type PublishStatus struct { + // NodeID is the ID of the swarm node this Volume is published to. + NodeID string `json:",omitempty"` + + // State is the publish state of the volume. + State PublishState `json:",omitempty"` + + // PublishContext is the PublishContext returned by the CSI plugin when + // a volume is published. + PublishContext map[string]string `json:",omitempty"` +} + +// Info contains information about the Volume as a whole as provided by +// the CSI storage plugin. +type Info struct { + // CapacityBytes is the capacity of the volume in bytes. A value of 0 + // indicates that the capacity is unknown. + CapacityBytes int64 `json:",omitempty"` + + // VolumeContext is the context originating from the CSI storage plugin + // when the Volume is created. + VolumeContext map[string]string `json:",omitempty"` + + // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This + // is distinct from the Volume's Swarm ID, which is the ID used by all of + // the Docker Engine to refer to the Volume. If this field is blank, then + // the Volume has not been successfully created yet. + VolumeID string `json:",omitempty"` + + // AccessibleTopolgoy is the topology this volume is actually accessible + // from. + AccessibleTopology []Topology `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go new file mode 100644 index 000000000..37c41a609 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/create_options.go @@ -0,0 +1,29 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateOptions VolumeConfig +// +// Volume configuration +// swagger:model CreateOptions +type CreateOptions struct { + + // cluster volume spec + ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` + + // Name of the volume driver to use. + Driver string `json:"Driver,omitempty"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + DriverOpts map[string]string `json:"DriverOpts,omitempty"` + + // User-defined key/value metadata. + Labels map[string]string `json:"Labels,omitempty"` + + // The new volume's name. If not specified, Docker generates a name. + // + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go new file mode 100644 index 000000000..ca5192a2a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/list_response.go @@ -0,0 +1,18 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ListResponse VolumeListResponse +// +// Volume list response +// swagger:model ListResponse +type ListResponse struct { + + // List of volumes + Volumes []*Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go new file mode 100644 index 000000000..8b0dd1389 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -0,0 +1,8 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +import "github.com/docker/docker/api/types/filters" + +// ListOptions holds parameters to list volumes. +type ListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go new file mode 100644 index 000000000..ea7d555e5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume.go @@ -0,0 +1,75 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // cluster volume + ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *UsageData `json:"UsageData,omitempty"` +} + +// UsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model UsageData +type UsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go new file mode 100644 index 000000000..f958f80a6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_update.go @@ -0,0 +1,7 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// UpdateOptions is configuration to update a Volume with. +type UpdateOptions struct { + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *ClusterVolumeSpec `json:"Spec,omitempty"` +} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 000000000..61e7456b4 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,69 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 000000000..c211f174f --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 000000000..fe06fb6f7 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,279 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +func (e errNotFound) Unwrap() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil || IsNotFound(err) { + return err + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +func (e errInvalidParameter) Unwrap() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil || IsInvalidParameter(err) { + return err + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +func (e errConflict) Unwrap() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil || IsConflict(err) { + return err + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +func (e errUnauthorized) Unwrap() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil || IsUnauthorized(err) { + return err + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +func (e errUnavailable) Unwrap() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +func (e errForbidden) Unwrap() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil || IsForbidden(err) { + return err + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +func (e errSystem) Unwrap() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil || IsSystem(err) { + return err + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +func (e errNotModified) Unwrap() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil || IsNotModified(err) { + return err + } + return errNotModified{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +func (e errNotImplemented) Unwrap() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil || IsNotImplemented(err) { + return err + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +func (e errUnknown) Unwrap() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil || IsUnknown(err) { + return err + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +func (e errCancelled) Unwrap() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil || IsCancelled(err) { + return err + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +func (e errDeadline) Unwrap() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil || IsDeadline(err) { + return err + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +func (e errDataLoss) Unwrap() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil || IsDataLoss(err) { + return err + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 000000000..77bda389d --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,46 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "net/http" +) + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return nil + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 000000000..3abf07d0c --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,107 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 000000000..ded1c7c8c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,105 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" + "os" + "path/filepath" + "strings" +) + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// If HOME and XDG_DATA_HOME are not set, getpwent(3) is consulted to determine the users home directory. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// If HOME and XDG_CONFIG_HOME are not set, getpwent(3) is consulted to determine the users home directory. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetLibHome returns $HOME/.local/lib +// If HOME is not set, getpwent(3) is consulted to determine the users home directory. +func GetLibHome() (string, error) { + home := Get() + if home == "" { + return "", errors.New("could not get HOME") + } + return filepath.Join(home, ".local/lib"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 000000000..11f1bec98 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,33 @@ +//go:build !linux +// +build !linux + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" +) + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} + +// GetLibHome is unsupported on non-linux system. +func GetLibHome() (string, error) { + return "", errors.New("homedir.GetLibHome() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 000000000..d1732dee5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,39 @@ +//go:build !windows +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + "os/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 000000000..2f81813b2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 000000000..466f79294 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 000000000..c1cfa62fd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,187 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. + readBlock bool // check read BytesPipe is Wait() or not +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + defer bp.mu.Unlock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + if bp.readBlock { + bp.wait.Broadcast() + } + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + return 0, bp.closeErr + } + bp.readBlock = true + bp.wait.Wait() + bp.readBlock = false + if bp.bufLen == 0 && bp.closeErr != nil { + return 0, bp.closeErr + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 000000000..82671d8cd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,161 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := os.MkdirTemp(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 000000000..de00b95e3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,151 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "context" + "io" + + // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered + // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. + _ "crypto/sha256" + _ "crypto/sha512" +) + +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { + io.Reader + closer func() error +} + +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &ReadCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go b/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go new file mode 100644 index 000000000..b3321602c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go @@ -0,0 +1,10 @@ +package ioutils + +import "github.com/docker/docker/pkg/longpath" + +// TempDir is the equivalent of [os.MkdirTemp], except that on Windows +// the result is in Windows longpath format. On Unix systems it is +// equivalent to [os.MkdirTemp]. +// +// Deprecated: use [longpath.MkdirTemp]. +var TempDir = longpath.MkdirTemp diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 000000000..91b8d1826 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 000000000..61c679497 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 000000000..035160c83 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,307 @@ +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + units "github.com/docker/go-units" + "github.com/moby/term" + "github.com/morikuni/aec" +) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// JSONError wraps a concrete Code and Message, Code is +// an integer error code, Message is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a progress message in a JSON stream. +type JSONProgress struct { + // Current is the current status and value of the progress made towards Total. + Current int64 `json:"current,omitempty"` + // Total is the end value describing when we made 100% progress for an operation. + Total int64 `json:"total,omitempty"` + // Start is the initial value for the operation. + Start int64 `json:"start,omitempty"` + // HideCounts. if true, hides the progress count indicator (xB/yB). + HideCounts bool `json:"hidecounts,omitempty"` + // Units is the unit to print for progress. It defaults to "bytes" if empty. + Units string `json:"units,omitempty"` + + // terminalFd is the fd of the current terminal, if any. It is used + // to get the terminal width. + terminalFd uintptr + + // nowFunc is used to override the current time in tests. + nowFunc func() time.Time + + // winSize is used to override the terminal width in tests. + winSize int +} + +func (p *JSONProgress) String() string { + var ( + width = p.width() + pbBox string + numbersBox string + timeLeftBox string + ) + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + return fmt.Sprintf("%8v", units.HumanSize(float64(p.Current))) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := p.now().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// now returns the current time in UTC, but can be overridden in tests +// by setting JSONProgress.nowFunc to a custom function. +func (p *JSONProgress) now() time.Time { + if p.nowFunc != nil { + return p.nowFunc() + } + return time.Now().UTC() +} + +// width returns the current terminal's width, but can be overridden +// in tests by setting JSONProgress.winSize to a non-zero value. +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` // deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` // deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) +} + +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) +} + +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) +} + +// Display prints the JSONMessage to out. If isTerminal is true, it erases +// the entire current line when displaying the progressbar. It returns an +// error if the [JSONMessage.Error] field is non-nil. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) + endl = "\r" + fmt.Fprint(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { // deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream reads a JSON message stream from in, and writes +// each [JSONMessage] to out. It returns an error if an invalid JSONMessage +// is received, or if a JSONMessage containers a non-zero [JSONMessage.Error]. +// +// Presentation of the JSONMessage depends on whether a terminal is attached, +// and on the terminal width. Progress bars ([JSONProgress]) are suppressed +// on narrower terminals (< 110 characters). +// +// - isTerminal describes if out is a terminal, in which case it prints +// a newline ("\n") at the end of each line and moves the cursor while +// displaying. +// - terminalFd is the fd of the current terminal (if any), and used +// to get the terminal width. +// - auxCallback allows handling the [JSONMessage.Aux] field. It is +// called if a JSONMessage contains an Aux field, in which case +// DisplayJSONMessagesStream does not present the JSONMessage. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]uint) + ) + + for { + var diff uint + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = uint(len(ids)) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]uint) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) + } + if err != nil { + return err + } + } + return nil +} + +// Stream is an io.Writer for output with utilities to get the output's file +// descriptor and to detect wether it's a terminal. +// +// it is subset of the streams.Out type in +// https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out +type Stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output Stream. It is +// used by the Docker CLI to print JSONMessage streams. +func DisplayJSONMessagesToStream(in io.Reader, stream Stream, auxCallback func(JSONMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 000000000..1c5dde521 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,43 @@ +// Package longpath introduces some constants and helper functions for handling +// long paths in Windows. +// +// Long paths are expected to be prepended with "\\?\" and followed by either a +// drive letter, a UNC server\share, or a volume identifier. +package longpath // import "github.com/docker/docker/pkg/longpath" + +import ( + "os" + "runtime" + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix adds the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} + +// MkdirTemp is the equivalent of [os.MkdirTemp], except that on Windows +// the result is in Windows longpath format. On Unix systems it is +// equivalent to [os.MkdirTemp]. +func MkdirTemp(dir, prefix string) (string, error) { + tempDir, err := os.MkdirTemp(dir, prefix) + if err != nil { + return "", err + } + if runtime.GOOS != "windows" { + return tempDir, nil + } + return AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go new file mode 100644 index 000000000..dd75a49f3 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/auth.go @@ -0,0 +1,196 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "net/http" + "net/url" + "strings" + "time" + + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types/registry" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// AuthClientID is used the ClientID used for the token server +const AuthClientID = "docker" + +type loginCredentialStore struct { + authConfig *registry.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +type staticCredentialStore struct { + auth *registry.AuthConfig +} + +// NewStaticCredentialStore returns a credential store +// which always returns the same credential values. +func NewStaticCredentialStore(auth *registry.AuthConfig) auth.CredentialStore { + return staticCredentialStore{ + auth: auth, + } +} + +func (scs staticCredentialStore) Basic(*url.URL) (string, string) { + if scs.auth == nil { + return "", "" + } + return scs.auth.Username, scs.auth.Password +} + +func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { + if scs.auth == nil { + return "" + } + return scs.auth.IdentityToken +} + +func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *registry.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { + var ( + endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + modifiers = Headers(userAgent, nil) + authTransport = transport.NewTransport(newTransport(endpoint.TLSConfig), modifiers...) + credentialAuthConfig = *authConfig + creds = loginCredentialStore{authConfig: &credentialAuthConfig} + ) + + logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) + + loginClient, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + if err != nil { + return "", "", err + } + + req, err := http.NewRequest(http.MethodGet, endpointStr, nil) + if err != nil { + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + err = translateV2AuthError(err) + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + } + + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + return "", "", errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) +} + +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) { + challengeManager, err := PingV2Registry(endpoint, authTransport) + if err != nil { + return nil, err + } + + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: AuthClientID, + Scopes: scopes, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + + return &http.Client{ + Transport: transport.NewTransport(authTransport, modifiers...), + Timeout: 15 * time.Second, + }, nil +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + return strings.SplitN(stripped, "/", 2)[0] +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(authConfigs map[string]registry.AuthConfig, index *registry.IndexInfo) registry.AuthConfig { + configKey := GetAuthConfigKey(index) + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == ConvertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return registry.AuthConfig{} +} + +// PingResponseError is used when the response from a ping +// was received but invalid. +type PingResponseError struct { + Err error +} + +func (err PingResponseError) Error() string { + return err.Err.Error() +} + +// PingV2Registry attempts to ping a v2 registry and on success return a +// challenge manager for the supported authentication types. +// If a response is received but cannot be interpreted, a PingResponseError will be returned. +func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, error) { + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" + req, err := http.NewRequest(http.MethodGet, endpointStr, nil) + if err != nil { + return nil, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + challengeManager := challenge.NewSimpleManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, PingResponseError{ + Err: err, + } + } + + return challengeManager, nil +} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go new file mode 100644 index 000000000..2766306ac --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config.go @@ -0,0 +1,448 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "net" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/registry" + "github.com/sirupsen/logrus" +) + +// ServiceOptions holds command line options. +type ServiceOptions struct { + AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` +} + +// serviceConfig holds daemon configuration for the registry service. +type serviceConfig registry.ServiceConfig + +// TODO(thaJeztah) both the "index.docker.io" and "registry-1.docker.io" domains +// are here for historic reasons and backward-compatibility. These domains +// are still supported by Docker Hub (and will continue to be supported), but +// there are new domains already in use, and plans to consolidate all legacy +// domains to new "canonical" domains. Once those domains are decided on, we +// should update these consts (but making sure to preserve compatibility with +// existing installs, clients, and user configuration). +const ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultRegistryHost is the hostname for the default (Docker Hub) registry + // used for pushing and pulling images. This hostname is hard-coded to handle + // the conversion from image references without registry name (e.g. "ubuntu", + // or "ubuntu:latest"), as well as references using the "docker.io" domain + // name, which is used as canonical reference for images on Docker Hub, but + // does not match the domain-name of Docker Hub's registry. + DefaultRegistryHost = "registry-1.docker.io" + // IndexHostname is the index hostname, used for authentication and image search. + IndexHostname = "index.docker.io" + // IndexServer is used for user auth and image search + IndexServer = "https://" + IndexHostname + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" +) + +var ( + // DefaultV2Registry is the URI of the default (Docker Hub) registry. + DefaultV2Registry = &url.URL{ + Scheme: "https", + Host: DefaultRegistryHost, + } + + emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) + validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) + + // for mocking in unit tests + lookupIP = net.LookupIP + + // certsDir is used to override defaultCertsDir. + certsDir string +) + +// SetCertsDir allows the default certs directory to be changed. This function +// is used at daemon startup to set the correct location when running in +// rootless mode. +func SetCertsDir(path string) { + certsDir = path +} + +// CertsDir is the directory where certificates are stored. +func CertsDir() string { + if certsDir != "" { + return certsDir + } + return defaultCertsDir +} + +// newServiceConfig returns a new instance of ServiceConfig +func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { + config := &serviceConfig{} + if err := config.loadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { + return nil, err + } + if err := config.loadMirrors(options.Mirrors); err != nil { + return nil, err + } + if err := config.loadInsecureRegistries(options.InsecureRegistries); err != nil { + return nil, err + } + + return config, nil +} + +// copy constructs a new ServiceConfig with a copy of the configuration in config. +func (config *serviceConfig) copy() *registry.ServiceConfig { + ic := make(map[string]*registry.IndexInfo) + for key, value := range config.IndexConfigs { + ic[key] = value + } + return ®istry.ServiceConfig{ + AllowNondistributableArtifactsCIDRs: append([]*registry.NetIPNet(nil), config.AllowNondistributableArtifactsCIDRs...), + AllowNondistributableArtifactsHostnames: append([]string(nil), config.AllowNondistributableArtifactsHostnames...), + InsecureRegistryCIDRs: append([]*registry.NetIPNet(nil), config.InsecureRegistryCIDRs...), + IndexConfigs: ic, + Mirrors: append([]string(nil), config.Mirrors...), + } +} + +// loadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. +func (config *serviceConfig) loadAllowNondistributableArtifacts(registries []string) error { + cidrs := map[string]*registry.NetIPNet{} + hostnames := map[string]bool{} + + for _, r := range registries { + if _, err := ValidateIndexName(r); err != nil { + return err + } + if hasScheme(r) { + return invalidParamf("allow-nondistributable-artifacts registry %s should not contain '://'", r) + } + + if _, ipnet, err := net.ParseCIDR(r); err == nil { + // Valid CIDR. + cidrs[ipnet.String()] = (*registry.NetIPNet)(ipnet) + } else if err = validateHostPort(r); err == nil { + // Must be `host:port` if not CIDR. + hostnames[r] = true + } else { + return invalidParamWrapf(err, "allow-nondistributable-artifacts registry %s is not valid", r) + } + } + + config.AllowNondistributableArtifactsCIDRs = make([]*registry.NetIPNet, 0, len(cidrs)) + for _, c := range cidrs { + config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) + } + + config.AllowNondistributableArtifactsHostnames = make([]string, 0, len(hostnames)) + for h := range hostnames { + config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) + } + + return nil +} + +// loadMirrors loads mirrors to config, after removing duplicates. +// Returns an error if mirrors contains an invalid mirror. +func (config *serviceConfig) loadMirrors(mirrors []string) error { + mMap := map[string]struct{}{} + unique := []string{} + + for _, mirror := range mirrors { + m, err := ValidateMirror(mirror) + if err != nil { + return err + } + if _, exist := mMap[m]; !exist { + mMap[m] = struct{}{} + unique = append(unique, m) + } + } + + config.Mirrors = unique + + // Configure public registry since mirrors may have changed. + config.IndexConfigs = map[string]*registry.IndexInfo{ + IndexName: { + Name: IndexName, + Mirrors: unique, + Secure: true, + Official: true, + }, + } + + return nil +} + +// loadInsecureRegistries loads insecure registries to config +func (config *serviceConfig) loadInsecureRegistries(registries []string) error { + // Localhost is by default considered as an insecure registry. This is a + // stop-gap for people who are running a private registry on localhost. + registries = append(registries, "127.0.0.0/8") + + var ( + insecureRegistryCIDRs = make([]*registry.NetIPNet, 0) + indexConfigs = make(map[string]*registry.IndexInfo) + ) + +skip: + for _, r := range registries { + // validate insecure registry + if _, err := ValidateIndexName(r); err != nil { + return err + } + if strings.HasPrefix(strings.ToLower(r), "http://") { + logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) + r = r[7:] + } else if strings.HasPrefix(strings.ToLower(r), "https://") { + logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) + r = r[8:] + } else if hasScheme(r) { + return invalidParamf("insecure registry %s should not contain '://'", r) + } + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. + data := (*registry.NetIPNet)(ipnet) + for _, value := range insecureRegistryCIDRs { + if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { + continue skip + } + } + // ipnet is not found, add it in config.InsecureRegistryCIDRs + insecureRegistryCIDRs = append(insecureRegistryCIDRs, data) + } else { + if err := validateHostPort(r); err != nil { + return invalidParamWrapf(err, "insecure registry %s is not valid", r) + } + // Assume `host:port` if not CIDR. + indexConfigs[r] = ®istry.IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + indexConfigs[IndexName] = ®istry.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + config.InsecureRegistryCIDRs = insecureRegistryCIDRs + config.IndexConfigs = indexConfigs + + return nil +} + +// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries +// that allow push of nondistributable artifacts. +// +// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP +// of the registry specified by hostname, true is returned. +// +// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If +// resolution fails, CIDR matching is not performed. +func (config *serviceConfig) allowNondistributableArtifacts(hostname string) bool { + for _, h := range config.AllowNondistributableArtifactsHostnames { + if h == hostname { + return true + } + } + + return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func (config *serviceConfig) isSecureIndex(indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides newIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) +} + +// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) +// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be +// resolved to IP addresses for matching. If resolution fails, false is returned. +func isCIDRMatch(cidrs []*registry.NetIPNet, URLHost string) bool { + host, _, err := net.SplitHostPort(URLHost) + if err != nil { + // Assume URLHost is of the form `host` without the port and go on. + host = URLHost + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range cidrs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return true + } + } + } + + return false +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", invalidParamWrapf(err, "invalid mirror: %q is not a valid URI", val) + } + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", invalidParamf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) + } + if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { + return "", invalidParamf("invalid mirror: path, query, or fragment at end of the URI %q", uri) + } + if uri.User != nil { + // strip password from output + uri.User = url.UserPassword(uri.User.Username(), "xxxxx") + return "", invalidParamf("invalid mirror: username/password not allowed in URI %q", uri) + } + return strings.TrimSuffix(val, "/") + "/", nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + // TODO: upstream this to check to reference package + if val == "index.docker.io" { + val = "docker.io" + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", invalidParamf("invalid index name (%s). Cannot begin or end with a hyphen", val) + } + return val, nil +} + +func hasScheme(reposName string) bool { + return strings.Contains(reposName, "://") +} + +func validateHostPort(s string) error { + // Split host and port, and in case s can not be splitted, assume host only + host, port, err := net.SplitHostPort(s) + if err != nil { + host = s + port = "" + } + // If match against the `host:port` pattern fails, + // it might be `IPv6:port`, which will be captured by net.ParseIP(host) + if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { + return invalidParamf("invalid host %q", host) + } + if port != "" { + v, err := strconv.Atoi(port) + if err != nil { + return err + } + if v < 0 || v > 65535 { + return invalidParamf("invalid port %q", port) + } + } + return nil +} + +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *serviceConfig, indexName string) (*registry.IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + return ®istry.IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Secure: config.isSecureIndex(indexName), + Official: false, + }, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func GetAuthConfigKey(index *registry.IndexInfo) string { + if index.Official { + return IndexServer + } + return index.Name +} + +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, reference.Domain(name)) + if err != nil { + return nil, err + } + official := !strings.ContainsRune(reference.FamiliarName(name), '/') + + return &RepositoryInfo{ + Name: reference.TrimNamed(name), + Index: index, + Official: official, + }, nil +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +// +// TODO(thaJeztah) this function is only used by the CLI, and used to get +// information of the registry (to provide credentials if needed). We should +// move this function (or equivalent) to the CLI, as it's doing too much just +// for that. +func ParseSearchIndexInfo(reposName string) (*registry.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go new file mode 100644 index 000000000..898c6b8a5 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_unix.go @@ -0,0 +1,17 @@ +//go:build !windows +// +build !windows + +package registry // import "github.com/docker/docker/registry" + +// defaultCertsDir is the platform-specific default directory where certificates +// are stored. On Linux, it may be overridden through certsDir, for example, when +// running in rootless mode. +const defaultCertsDir = "/etc/docker/certs.d" + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go new file mode 100644 index 000000000..2674f2818 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_windows.go @@ -0,0 +1,20 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "os" + "path/filepath" + "strings" +) + +// defaultCertsDir is the platform-specific default directory where certificates +// are stored. On Linux, it may be overridden through certsDir, for example, when +// running in rootless mode. +var defaultCertsDir = os.Getenv("programdata") + `\docker\certs.d` + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.ReplaceAll(s, ":", "")) +} diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go new file mode 100644 index 000000000..56257dc79 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -0,0 +1,185 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "crypto/tls" + "encoding/json" + "io" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types/registry" + "github.com/sirupsen/logrus" +) + +// v1PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type v1PingResult struct { + // Version is the registry version supplied by the registry in an HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// v1Endpoint stores basic information about a V1 registry endpoint. +type v1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// newV1Endpoint parses the given address to return a registry endpoint. +// TODO: remove. This is only used by search. +func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, headers) + if err != nil { + return nil, err + } + + err = validateEndpoint(endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *v1Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return invalidParamf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.ping(); err2 == nil { + return nil + } + + return invalidParamf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +// trimV1Address trims the version off the address and returns the +// trimmed address or an error if there is a non-V1 version. +func trimV1Address(address string) (string, error) { + address = strings.TrimSuffix(address, "/") + chunks := strings.Split(address, "/") + apiVersionStr := chunks[len(chunks)-1] + if apiVersionStr == "v1" { + return strings.Join(chunks[:len(chunks)-1], "/"), nil + } + + for k, v := range apiVersions { + if k != APIVersion1 && apiVersionStr == v { + return "", invalidParamf("unsupported V1 version path %s", apiVersionStr) + } + } + + return address, nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, headers http.Header) (*v1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, invalidParam(err) + } + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := newTransport(tlsConfig) + + return &v1Endpoint{ + IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, + URL: uri, + client: httpClient(transport.NewTransport(tr, Headers("", headers)...)), + }, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *v1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// ping returns a v1PingResult which indicates whether the registry is standalone or not. +func (e *v1Endpoint) ping() (v1PingResult, error) { + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return v1PingResult{}, nil + } + + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + pingURL := e.String() + "_ping" + req, err := http.NewRequest(http.MethodGet, pingURL, nil) + if err != nil { + return v1PingResult{}, invalidParam(err) + } + + resp, err := e.client.Do(req) + if err != nil { + return v1PingResult{}, invalidParam(err) + } + + defer resp.Body.Close() + + jsonString, err := io.ReadAll(resp.Body) + if err != nil { + return v1PingResult{}, invalidParamWrapf(err, "error while reading response from %s", pingURL) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := v1PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.WithError(err).Debug("error unmarshaling _ping response") + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + info.Version = hdr + } + logrus.Debugf("v1PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("v1PingResult.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/vendor/github.com/docker/docker/registry/errors.go b/vendor/github.com/docker/docker/registry/errors.go new file mode 100644 index 000000000..7dc20ad8f --- /dev/null +++ b/vendor/github.com/docker/docker/registry/errors.go @@ -0,0 +1,36 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "net/url" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +func translateV2AuthError(err error) error { + switch e := err.(type) { + case *url.Error: + switch e2 := e.Err.(type) { + case errcode.Error: + switch e2.Code { + case errcode.ErrorCodeUnauthorized: + return errdefs.Unauthorized(err) + } + } + } + + return err +} + +func invalidParam(err error) error { + return errdefs.InvalidParameter(err) +} + +func invalidParamf(format string, args ...interface{}) error { + return errdefs.InvalidParameter(errors.Errorf(format, args...)) +} + +func invalidParamWrapf(err error, format string, args ...interface{}) error { + return errdefs.InvalidParameter(errors.Wrapf(err, format, args...)) +} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go new file mode 100644 index 000000000..5ff39ce5e --- /dev/null +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -0,0 +1,180 @@ +// Package registry contains client primitives to interact with a remote Docker registry. +package registry // import "github.com/docker/docker/registry" + +import ( + "crypto/tls" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +// HostCertsDir returns the config directory for a specific host. +func HostCertsDir(hostname string) string { + return filepath.Join(CertsDir(), cleanPath(hostname)) +} + +// newTLSConfig constructs a client TLS configuration based on server defaults +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault() + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure && CertsDir() != "" { + hostDir := HostCertsDir(hostname) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return tlsConfig, nil +} + +func hasFile(files []os.DirEntry, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := os.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return invalidParam(err) + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return invalidParamWrapf(err, "unable to get system cert pool") + } + tlsConfig.RootCAs = systemPool + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := os.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return invalidParamf("missing key %s for client certificate %s. CA certificates must use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return invalidParamf("missing client certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// Headers returns request modifiers with a User-Agent and metaHeaders +func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{} + if userAgent != "" { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ + "User-Agent": []string{userAgent}, + })) + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// httpClient returns an HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func httpClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if len(via) != 0 && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +// newTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func newTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + tlsConfig = tlsconfig.ServerDefault() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: direct.DialContext, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } +} diff --git a/vendor/github.com/docker/docker/registry/search.go b/vendor/github.com/docker/docker/registry/search.go new file mode 100644 index 000000000..60b86ea22 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/search.go @@ -0,0 +1,139 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "context" + "net/http" + "strconv" + "strings" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + + "github.com/docker/distribution/registry/client/auth" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var acceptedSearchFilterTags = map[string]bool{ + "is-automated": true, + "is-official": true, + "stars": true, +} + +// Search queries the public registry for repositories matching the specified +// search term and filters. +func (s *Service) Search(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) ([]registry.SearchResult, error) { + if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { + return nil, err + } + + isAutomated, err := searchFilters.GetBoolOrDefault("is-automated", false) + if err != nil { + return nil, err + } + isOfficial, err := searchFilters.GetBoolOrDefault("is-official", false) + if err != nil { + return nil, err + } + + hasStarFilter := 0 + if searchFilters.Contains("stars") { + hasStars := searchFilters.Get("stars") + for _, hasStar := range hasStars { + iHasStar, err := strconv.Atoi(hasStar) + if err != nil { + return nil, errdefs.InvalidParameter(errors.Wrapf(err, "invalid filter 'stars=%s'", hasStar)) + } + if iHasStar > hasStarFilter { + hasStarFilter = iHasStar + } + } + } + + unfilteredResult, err := s.searchUnfiltered(ctx, term, limit, authConfig, headers) + if err != nil { + return nil, err + } + + filteredResults := []registry.SearchResult{} + for _, result := range unfilteredResult.Results { + if searchFilters.Contains("is-automated") { + if isAutomated != result.IsAutomated { + continue + } + } + if searchFilters.Contains("is-official") { + if isOfficial != result.IsOfficial { + continue + } + } + if searchFilters.Contains("stars") { + if result.StarCount < hasStarFilter { + continue + } + } + filteredResults = append(filteredResults, result) + } + + return filteredResults, nil +} + +func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int, authConfig *registry.AuthConfig, headers http.Header) (*registry.SearchResults, error) { + // TODO Use ctx when searching for repositories + if hasScheme(term) { + return nil, invalidParamf("invalid repository name: repository name (%s) should not have a scheme", term) + } + + indexName, remoteName := splitReposSearchTerm(term) + + // Search is a long-running operation, just lock s.config to avoid block others. + s.mu.RLock() + index, err := newIndexInfo(s.config, indexName) + s.mu.RUnlock() + + if err != nil { + return nil, err + } + if index.Official { + // If pull "library/foo", it's stored locally under "foo" + remoteName = strings.TrimPrefix(remoteName, "library/") + } + + endpoint, err := newV1Endpoint(index, headers) + if err != nil { + return nil, err + } + + var client *http.Client + if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { + creds := NewStaticCredentialStore(authConfig) + scopes := []auth.Scope{ + auth.RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + } + + // TODO(thaJeztah); is there a reason not to include other headers here? (originally added in 19d48f0b8ba59eea9f2cac4ad1c7977712a6b7ac) + modifiers := Headers(headers.Get("User-Agent"), nil) + v2Client, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + if err != nil { + return nil, err + } + // Copy non transport http client features + v2Client.Timeout = endpoint.client.Timeout + v2Client.CheckRedirect = endpoint.client.CheckRedirect + v2Client.Jar = endpoint.client.Jar + + logrus.Debugf("using v2 client for search to %s", endpoint.URL) + client = v2Client + } else { + client = endpoint.client + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + } + + return newSession(client, endpoint).searchRepositories(remoteName, limit) +} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go new file mode 100644 index 000000000..b848065b3 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service.go @@ -0,0 +1,167 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "context" + "crypto/tls" + "net/url" + "strings" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + "github.com/sirupsen/logrus" +) + +// Service is a registry service. It tracks configuration data such as a list +// of mirrors. +type Service struct { + config *serviceConfig + mu sync.RWMutex +} + +// NewService returns a new instance of defaultService ready to be +// installed into an engine. +func NewService(options ServiceOptions) (*Service, error) { + config, err := newServiceConfig(options) + + return &Service{config: config}, err +} + +// ServiceConfig returns a copy of the public registry service's configuration. +func (s *Service) ServiceConfig() *registry.ServiceConfig { + s.mu.RLock() + defer s.mu.RUnlock() + return s.config.copy() +} + +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. +func (s *Service) LoadAllowNondistributableArtifacts(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.loadAllowNondistributableArtifacts(registries) +} + +// LoadMirrors loads registry mirrors for Service +func (s *Service) LoadMirrors(mirrors []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.loadMirrors(mirrors) +} + +// LoadInsecureRegistries loads insecure registries for Service +func (s *Service) LoadInsecureRegistries(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.loadInsecureRegistries(registries) +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was successful. +// It can be used to verify the validity of a client's credentials. +func (s *Service) Auth(ctx context.Context, authConfig *registry.AuthConfig, userAgent string) (status, token string, err error) { + // TODO Use ctx when searching for repositories + var registryHostName = IndexHostname + + if authConfig.ServerAddress != "" { + serverAddress := authConfig.ServerAddress + if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { + serverAddress = "https://" + serverAddress + } + u, err := url.Parse(serverAddress) + if err != nil { + return "", "", invalidParamWrapf(err, "unable to parse server address") + } + registryHostName = u.Host + } + + // Lookup endpoints for authentication using "LookupPushEndpoints", which + // excludes mirrors to prevent sending credentials of the upstream registry + // to a mirror. + endpoints, err := s.LookupPushEndpoints(registryHostName) + if err != nil { + return "", "", invalidParam(err) + } + + for _, endpoint := range endpoints { + status, token, err = loginV2(authConfig, endpoint, userAgent) + if err == nil { + return + } + if errdefs.IsUnauthorized(err) { + // Failed to authenticate; don't continue with (non-TLS) endpoints. + return status, token, err + } + logrus.WithError(err).Infof("Error logging in to endpoint, trying next endpoint") + } + + return "", "", err +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), + // use the default Docker Hub registry (docker.io) + return IndexName, reposName + } + return nameParts[0], nameParts[1] +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + s.mu.RLock() + defer s.mu.RUnlock() + return newRepositoryInfo(s.config, name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL *url.URL + Version APIVersion + AllowNondistributableArtifacts bool + Official bool + TrimHostname bool + TLSConfig *tls.Config +} + +// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference. +// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP. +func (s *Service) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.lookupV2Endpoints(hostname) +} + +// LookupPushEndpoints creates a list of v2 endpoints to try to push to, in order of preference. +// It gives preference to HTTPS over plain HTTP. Mirrors are not included. +func (s *Service) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.RLock() + defer s.mu.RUnlock() + + allEndpoints, err := s.lookupV2Endpoints(hostname) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) + } + } + } + return endpoints, err +} + +// IsInsecureRegistry returns true if the registry at given host is configured as +// insecure registry. +func (s *Service) IsInsecureRegistry(host string) bool { + s.mu.RLock() + defer s.mu.RUnlock() + return !s.config.isSecureIndex(host) +} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go new file mode 100644 index 000000000..c8c545d21 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -0,0 +1,80 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "net/url" + "strings" + + "github.com/docker/go-connections/tlsconfig" +) + +func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + ana := s.config.allowNondistributableArtifacts(hostname) + + if hostname == DefaultNamespace || hostname == IndexHostname { + for _, mirror := range s.config.Mirrors { + if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { + mirror = "https://" + mirror + } + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, invalidParam(err) + } + mirrorTLSConfig, err := newTLSConfig(mirrorURL.Host, s.config.isSecureIndex(mirrorURL.Host)) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirrorURL, + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsconfig.ServerDefault(), + + AllowNondistributableArtifacts: ana, + }) + + return endpoints, nil + } + + tlsConfig, err := newTLSConfig(hostname, s.config.isSecureIndex(hostname)) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go new file mode 100644 index 000000000..86a5cd9ed --- /dev/null +++ b/vendor/github.com/docker/docker/registry/session.go @@ -0,0 +1,216 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + // this is required for some certificates + _ "crypto/sha512" + "encoding/json" + "fmt" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" + "sync" + + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// A session is used to communicate with a V1 registry +type session struct { + indexEndpoint *v1Endpoint + client *http.Client +} + +type authTransport struct { + http.RoundTripper + *registry.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// newAuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func newAuthTransport(base http.RoundTripper, authConfig *registry.AuthConfig, alwaysSetBasicAuth bool) *authTransport { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes an HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func authorizeClient(client *http.Client, authConfig *registry.AuthConfig, endpoint *v1Endpoint) error { + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.ping() + if err != nil { + return err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = newAuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return errdefs.System(errors.New("cookiejar.New is not supposed to return an error")) + } + client.Jar = jar + + return nil +} + +func newSession(client *http.Client, endpoint *v1Endpoint) *session { + return &session{ + client: client, + indexEndpoint: endpoint, + } +} + +// defaultSearchLimit is the default value for maximum number of returned search results. +const defaultSearchLimit = 25 + +// searchRepositories performs a search against the remote repository +func (r *session) searchRepositories(term string, limit int) (*registry.SearchResults, error) { + if limit == 0 { + limit = defaultSearchLimit + } + if limit < 1 || limit > 100 { + return nil, invalidParamf("limit %d is outside the range of [1, 100]", limit) + } + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) + + req, err := http.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, invalidParamWrapf(err, "error building request") + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, errdefs.System(err) + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, errdefs.Unknown(&jsonmessage.JSONError{ + Message: fmt.Sprintf("Unexpected status code %d", res.StatusCode), + Code: res.StatusCode, + }) + } + result := new(registry.SearchResults) + return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results") +} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go new file mode 100644 index 000000000..37094737f --- /dev/null +++ b/vendor/github.com/docker/docker/registry/types.go @@ -0,0 +1,39 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/registry" +) + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +// API Version identifiers. +const ( + APIVersion1 APIVersion = 1 + APIVersion2 APIVersion = 2 +) + +var apiVersions = map[APIVersion]string{ + APIVersion1: "v1", + APIVersion2: "v2", +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + Name reference.Named + // Index points to registry information + Index *registry.IndexInfo + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool + // Class represents the class of the repository, such as "plugin" + // or "image". + Class string +} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 000000000..bb7e4e336 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 000000000..892adf8c6 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 000000000..ce950171e --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 000000000..1ca0965e0 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 000000000..1ff81c333 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 000000000..0ef3fdcb4 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,254 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 000000000..6b4c6a7c0 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 000000000..ee22df47c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md new file mode 100644 index 000000000..b8a512c36 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-metrics/LICENSE b/vendor/github.com/docker/go-metrics/LICENSE new file mode 100644 index 000000000..8f3fee627 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE new file mode 100644 index 000000000..8915f0277 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md new file mode 100644 index 000000000..a9e947cb5 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/README.md @@ -0,0 +1,91 @@ +# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) + +This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +## Best Practices + +This packages is meant to be used for collecting metrics in Docker projects. +It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. +If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). + +The following are a few Docker specific rules that will help you name and work with metrics in your project. + +1. Namespace and Subsystem + +This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. + +```go +ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, +}) +``` + +In the example above we are creating metrics for the Docker engine's daemon package. +`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. + +A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. + +2. Declaring your Metrics + +Try to keep all your metric declarations in one file. +This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. + +3. Use labels instead of multiple metrics + +Labels allow you to define one metric such as the time it takes to perform a certain action on an object. +If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. + + +```go +containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") +``` + +The last parameter is the label name or key. +When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. + +```go +containerActions.WithValues("create").UpdateSince(start) +``` + +4. Always use a unit + +The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. +For a timer, the standard unit is seconds and a counter's standard unit is a total. +For gauges you must provide the unit. +This package provides a standard set of units for use within the Docker projects. + +```go +Nanoseconds Unit = "nanoseconds" +Seconds Unit = "seconds" +Bytes Unit = "bytes" +Total Unit = "total" +``` + +If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. + +## Docs + +Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). + +## HTTP Metrics + +To instrument a http handler, you can wrap the code like this: + +```go +namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) +httpMetrics := namespace.NewDefaultHttpMetrics() +metrics.Register(namespace) +instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) +``` +Note: The `handler` label must be provided when a new namespace is created. + +## Additional Metrics + +Additional metrics are also defined here that are not available in the prometheus client. +If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. + + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go new file mode 100644 index 000000000..fe36316a4 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/counter.go @@ -0,0 +1,52 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Counter is a metrics that can only increment its current count +type Counter interface { + // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. + // + // If len(vs) == 0, increments the counter by 1. + Inc(vs ...float64) +} + +// LabeledCounter is counter that must have labels populated before use. +type LabeledCounter interface { + WithValues(vs ...string) Counter +} + +type labeledCounter struct { + pc *prometheus.CounterVec +} + +func (lc *labeledCounter) WithValues(vs ...string) Counter { + return &counter{pc: lc.pc.WithLabelValues(vs...)} +} + +func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { + lc.pc.Describe(ch) +} + +func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { + lc.pc.Collect(ch) +} + +type counter struct { + pc prometheus.Counter +} + +func (c *counter) Inc(vs ...float64) { + if len(vs) == 0 { + c.pc.Inc() + } + + c.pc.Add(sumFloat64(vs...)) +} + +func (c *counter) Describe(ch chan<- *prometheus.Desc) { + c.pc.Describe(ch) +} + +func (c *counter) Collect(ch chan<- prometheus.Metric) { + c.pc.Collect(ch) +} diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go new file mode 100644 index 000000000..8fbdfc697 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/docs.go @@ -0,0 +1,3 @@ +// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +package metrics diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go new file mode 100644 index 000000000..74296e877 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/gauge.go @@ -0,0 +1,72 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Gauge is a metric that allows incrementing and decrementing a value +type Gauge interface { + Inc(...float64) + Dec(...float64) + + // Add adds the provided value to the gauge's current value + Add(float64) + + // Set replaces the gauge's current value with the provided value + Set(float64) +} + +// LabeledGauge describes a gauge the must have values populated before use. +type LabeledGauge interface { + WithValues(labels ...string) Gauge +} + +type labeledGauge struct { + pg *prometheus.GaugeVec +} + +func (lg *labeledGauge) WithValues(labels ...string) Gauge { + return &gauge{pg: lg.pg.WithLabelValues(labels...)} +} + +func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { + lg.pg.Describe(c) +} + +func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { + lg.pg.Collect(c) +} + +type gauge struct { + pg prometheus.Gauge +} + +func (g *gauge) Inc(vs ...float64) { + if len(vs) == 0 { + g.pg.Inc() + } + + g.Add(sumFloat64(vs...)) +} + +func (g *gauge) Dec(vs ...float64) { + if len(vs) == 0 { + g.pg.Dec() + } + + g.Add(-sumFloat64(vs...)) +} + +func (g *gauge) Add(v float64) { + g.pg.Add(v) +} + +func (g *gauge) Set(v float64) { + g.pg.Set(v) +} + +func (g *gauge) Describe(c chan<- *prometheus.Desc) { + g.pg.Describe(c) +} + +func (g *gauge) Collect(c chan<- prometheus.Metric) { + g.pg.Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go new file mode 100644 index 000000000..05601e9ec --- /dev/null +++ b/vendor/github.com/docker/go-metrics/handler.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// HTTPHandlerOpts describes a set of configurable options of http metrics +type HTTPHandlerOpts struct { + DurationBuckets []float64 + RequestSizeBuckets []float64 + ResponseSizeBuckets []float64 +} + +const ( + InstrumentHandlerResponseSize = iota + InstrumentHandlerRequestSize + InstrumentHandlerDuration + InstrumentHandlerCounter + InstrumentHandlerInFlight +) + +type HTTPMetric struct { + prometheus.Collector + handlerType int +} + +var ( + defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} + defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G + defaultResponseSizeBuckets = defaultRequestSizeBuckets +) + +// Handler returns the global http.Handler that provides the prometheus +// metrics format on GET requests. This handler is no longer instrumented. +func Handler() http.Handler { + return promhttp.Handler() +} + +func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(metrics, handler.ServeHTTP) +} + +func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { + var handler http.Handler + handler = http.HandlerFunc(handlerFunc) + for _, metric := range metrics { + switch metric.handlerType { + case InstrumentHandlerResponseSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerResponseSize(collector, handler) + } + case InstrumentHandlerRequestSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerRequestSize(collector, handler) + } + case InstrumentHandlerDuration: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerDuration(collector, handler) + } + case InstrumentHandlerCounter: + if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { + handler = promhttp.InstrumentHandlerCounter(collector, handler) + } + case InstrumentHandlerInFlight: + if collector, ok := metric.Collector.(prometheus.Gauge); ok { + handler = promhttp.InstrumentHandlerInFlight(collector, handler) + } + } + } + return handler.ServeHTTP +} diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go new file mode 100644 index 000000000..68b7f51b3 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/helpers.go @@ -0,0 +1,10 @@ +package metrics + +func sumFloat64(vs ...float64) float64 { + var sum float64 + for _, v := range vs { + sum += v + } + + return sum +} diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go new file mode 100644 index 000000000..798315451 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/namespace.go @@ -0,0 +1,315 @@ +package metrics + +import ( + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type Labels map[string]string + +// NewNamespace returns a namespaces that is responsible for managing a collection of +// metrics for a particual namespace and subsystem +// +// labels allows const labels to be added to all metrics created in this namespace +// and are commonly used for data like application version and git commit +func NewNamespace(name, subsystem string, labels Labels) *Namespace { + if labels == nil { + labels = make(map[string]string) + } + return &Namespace{ + name: name, + subsystem: subsystem, + labels: labels, + } +} + +// Namespace describes a set of metrics that share a namespace and subsystem. +type Namespace struct { + name string + subsystem string + labels Labels + mu sync.Mutex + metrics []prometheus.Collector +} + +// WithConstLabels returns a namespace with the provided set of labels merged +// with the existing constant labels on the namespace. +// +// Only metrics created with the returned namespace will get the new constant +// labels. The returned namespace must be registered separately. +func (n *Namespace) WithConstLabels(labels Labels) *Namespace { + n.mu.Lock() + ns := &Namespace{ + name: n.name, + subsystem: n.subsystem, + labels: mergeLabels(n.labels, labels), + } + n.mu.Unlock() + return ns +} + +func (n *Namespace) NewCounter(name, help string) Counter { + c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} + n.Add(c) + return c +} + +func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { + c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} + n.Add(c) + return c +} + +func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Total), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewTimer(name, help string) Timer { + t := &timer{ + m: prometheus.NewHistogram(n.newTimerOpts(name, help)), + } + n.Add(t) + return t +} + +func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { + t := &labeledTimer{ + m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), + } + n.Add(t) + return t +} + +func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Seconds), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { + g := &gauge{ + pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), + } + n.Add(g) + return g +} + +func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { + g := &labeledGauge{ + pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), + } + n.Add(g) + return g +} + +func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, unit), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Describe(ch) + } +} + +func (n *Namespace) Collect(ch chan<- prometheus.Metric) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Collect(ch) + } +} + +func (n *Namespace) Add(collector prometheus.Collector) { + n.mu.Lock() + n.metrics = append(n.metrics, collector) + n.mu.Unlock() +} + +func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { + name = makeName(name, unit) + namespace := n.name + if n.subsystem != "" { + namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) + } + name = fmt.Sprintf("%s_%s", namespace, name) + return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) +} + +// mergeLabels merges two or more labels objects into a single map, favoring +// the later labels. +func mergeLabels(lbs ...Labels) Labels { + merged := make(Labels) + + for _, target := range lbs { + for k, v := range target { + merged[k] = v + } + } + + return merged +} + +func makeName(name string, unit Unit) string { + if unit == "" { + return name + } + + return fmt.Sprintf("%s_%s", name, unit) +} + +func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: defaultDurationBuckets, + RequestSizeBuckets: defaultResponseSizeBuckets, + ResponseSizeBuckets: defaultResponseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: durationBuckets, + RequestSizeBuckets: requestSizeBuckets, + ResponseSizeBuckets: responseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { + var httpMetrics []*HTTPMetric + inFlightMetric := n.NewInFlightGaugeMetric(handlerName) + requestTotalMetric := n.NewRequestTotalMetric(handlerName) + requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) + requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) + responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) + httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) + return httpMetrics +} + +func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "in_flight_requests", + Help: "The in-flight HTTP requests", + ConstLabels: prometheus.Labels(labels), + }) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerInFlight, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: prometheus.Labels(labels), + }, + []string{"code", "method"}, + ) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerCounter, + } + n.Add(httpMetric) + return httpMetric +} +func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("DurationBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_duration_seconds", + Help: "The HTTP request latencies in seconds.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{"method"}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerDuration, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("RequestSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_size_bytes", + Help: "The HTTP request sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerRequestSize, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("ResponseSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "response_size_bytes", + Help: "The HTTP response sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metrics := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metrics, + handlerType: InstrumentHandlerResponseSize, + } + n.Add(httpMetric) + return httpMetric +} diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go new file mode 100644 index 000000000..708358df0 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/register.go @@ -0,0 +1,15 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Register adds all the metrics in the provided namespace to the global +// metrics registry +func Register(n *Namespace) { + prometheus.MustRegister(n) +} + +// Deregister removes all the metrics in the provided namespace from the +// global metrics registry +func Deregister(n *Namespace) { + prometheus.Unregister(n) +} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go new file mode 100644 index 000000000..824c98739 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/timer.go @@ -0,0 +1,85 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// StartTimer begins a timer observation at the callsite. When the target +// operation is completed, the caller should call the return done func(). +func StartTimer(timer Timer) (done func()) { + start := time.Now() + return func() { + timer.Update(time.Since(start)) + } +} + +// Timer is a metric that allows collecting the duration of an action in seconds +type Timer interface { + // Update records an observation, duration, and converts to the target + // units. + Update(duration time.Duration) + + // UpdateSince will add the duration from the provided starting time to the + // timer's summary with the precisions that was used in creation of the timer + UpdateSince(time.Time) +} + +// LabeledTimer is a timer that must have label values populated before use. +type LabeledTimer interface { + WithValues(labels ...string) *labeledTimerObserver +} + +type labeledTimer struct { + m *prometheus.HistogramVec +} + +type labeledTimerObserver struct { + m prometheus.Observer +} + +func (lbo *labeledTimerObserver) Update(duration time.Duration) { + lbo.m.Observe(duration.Seconds()) +} + +func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { + lbo.m.Observe(time.Since(since).Seconds()) +} + +func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { + return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} +} + +func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { + lt.m.Describe(c) +} + +func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { + lt.m.Collect(c) +} + +type timer struct { + m prometheus.Observer +} + +func (t *timer) Update(duration time.Duration) { + t.m.Observe(duration.Seconds()) +} + +func (t *timer) UpdateSince(since time.Time) { + t.m.Observe(time.Since(since).Seconds()) +} + +func (t *timer) Describe(c chan<- *prometheus.Desc) { + c <- t.m.(prometheus.Metric).Desc() +} + +func (t *timer) Collect(c chan<- prometheus.Metric) { + // Are there any observers that don't implement Collector? It is really + // unclear what the point of the upstream change was, but we'll let this + // panic if we get an observer that doesn't implement collector. In this + // case, we should almost always see metricVec objects, so this should + // never panic. + t.m.(prometheus.Collector).Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go new file mode 100644 index 000000000..c96622f90 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/unit.go @@ -0,0 +1,12 @@ +package metrics + +// Unit represents the type or precision of a metric that is appended to +// the metrics fully qualified name +type Unit string + +const ( + Nanoseconds Unit = "nanoseconds" + Seconds Unit = "seconds" + Bytes Unit = "bytes" + Total Unit = "total" +) diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 000000000..9ea86d784 --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 000000000..4aac7c741 --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,46 @@ +# go-units maintainers file +# +# This file describes who runs the docker/go-units project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "akihirosuda", + "dnephin", + "thajeztah", + "vdemeester", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "akihiro.suda.cz@hco.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 000000000..4f70a4e13 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 000000000..af9d60552 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get golang.org/x/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 000000000..48dd8744d --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 000000000..c245a8951 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,154 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[byte]int64 + +var ( + decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} + binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} +) + +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + // TODO: rewrite to use strings.Cut if there's a space + // once Go < 1.18 is deprecated. + sep := strings.LastIndexAny(sizeStr, "01234567890. ") + if sep == -1 { + // There should be at least a digit. + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + var num, sfx string + if sizeStr[sep] != ' ' { + num = sizeStr[:sep+1] + sfx = sizeStr[sep+1:] + } else { + // Omit the space separator. + num = sizeStr[:sep] + sfx = sizeStr[sep+1:] + } + + size, err := strconv.ParseFloat(num, 64) + if err != nil { + return -1, err + } + // Backward compatibility: reject negative sizes. + if size < 0 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + if len(sfx) == 0 { + return int64(size), nil + } + + // Process the suffix. + + if len(sfx) > 3 { // Too long. + goto badSuffix + } + sfx = strings.ToLower(sfx) + // Trivial case: b suffix. + if sfx[0] == 'b' { + if len(sfx) > 1 { // no extra characters allowed after b. + goto badSuffix + } + return int64(size), nil + } + // A suffix from the map. + if mul, ok := uMap[sfx[0]]; ok { + size *= float64(mul) + } else { + goto badSuffix + } + + // The suffix may have extra "b" or "ib" (e.g. KiB or MB). + switch { + case len(sfx) == 2 && sfx[1] != 'b': + goto badSuffix + case len(sfx) == 3 && sfx[1:] != "ib": + goto badSuffix + } + + return int64(size), nil + +badSuffix: + return -1, fmt.Errorf("invalid suffix: '%s'", sfx) +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 000000000..fca0400cc --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,123 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if *hard != -1 { + if soft == -1 { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) + } + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 28e351693..97e319b21 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -4,7 +4,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). [![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Build Status](https://github.com/evanphx/json-patch/actions/workflows/go.yml/badge.svg)](https://github.com/evanphx/json-patch/actions/workflows/go.yml) [![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) # Get It! @@ -314,4 +314,4 @@ go test -cover ./... ``` Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). +using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml). diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index dc2b7e51e..cd0274e1e 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -359,7 +359,7 @@ func findObject(pd *container, path string) (container, string) { next, ok := doc.get(decodePatchKey(part)) - if next == nil || ok != nil { + if next == nil || ok != nil || next.raw == nil { return nil, "" } @@ -688,7 +688,7 @@ func (p Patch) test(doc *container, op Operation) error { } if val == nil { - if op.value().raw == nil { + if op.value() == nil || op.value().raw == nil { return nil } return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) diff --git a/vendor/github.com/exponent-io/jsonpath/.gitignore b/vendor/github.com/exponent-io/jsonpath/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/exponent-io/jsonpath/.travis.yml b/vendor/github.com/exponent-io/jsonpath/.travis.yml new file mode 100644 index 000000000..f4f458a41 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - 1.5 + - tip diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE new file mode 100644 index 000000000..541977250 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Exponent Labs LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md new file mode 100644 index 000000000..382fb3138 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/README.md @@ -0,0 +1,66 @@ +[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath) +[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath) + +# jsonpath + +This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used. + +This Decoder has the following enhancements... + * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions). + * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path. + * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token. + * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string. + +## Installation + + go get -u github.com/exponent-io/jsonpath + +## Example Usage + +#### SeekTo + +```go +import "github.com/exponent-io/jsonpath" + +var j = []byte(`[ + {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, + {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} +]`) + +w := json.NewDecoder(bytes.NewReader(j)) +var v interface{} + +w.SeekTo(1, "Point", "G") +w.Decode(&v) // v is 218 +``` + +#### Scan with PathActions + +```go +var j = []byte(`{"colors":[ + {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, + {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} +]}`) + +var actions PathActions + +// Extract the value at Point.A +actions.Add(func(d *Decoder) error { + var alpha int + err := d.Decode(&alpha) + fmt.Printf("Alpha: %v\n", alpha) + return err +}, "Point", "A") + +w := NewDecoder(bytes.NewReader(j)) +w.SeekTo("colors", 0) + +var ok = true +var err error +for ok { + ok, err = w.Scan(&actions) + if err != nil && err != io.EOF { + panic(err) + } +} +``` diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go new file mode 100644 index 000000000..31de46c73 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/decoder.go @@ -0,0 +1,210 @@ +package jsonpath + +import ( + "encoding/json" + "io" +) + +// KeyString is returned from Decoder.Token to represent each key in a JSON object value. +type KeyString string + +// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens. +type Decoder struct { + json.Decoder + + path JsonPath + context jsonContext +} + +// NewDecoder creates a new instance of the extended JSON Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{Decoder: *json.NewDecoder(r)} +} + +// SeekTo causes the Decoder to move forward to a given path in the JSON structure. +// +// The path argument must consist of strings or integers. Each string specifies an JSON object key, and +// each integer specifies an index into a JSON array. +// +// Consider the JSON structure +// +// { "a": [0,"s",12e4,{"b":0,"v":35} ] } +// +// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object, +// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v". +// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35. +// +// SeekTo returns a boolean value indicating whether a match was found. +// +// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. +func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { + + if len(path) == 0 { + return len(d.path) == 0, nil + } + last := len(path) - 1 + if i, ok := path[last].(int); ok { + path[last] = i - 1 + } + + for { + if d.path.Equal(path) { + return true, nil + } + _, err := d.Token() + if err == io.EOF { + return false, nil + } else if err != nil { + return false, err + } + } +} + +// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is +// equivalent to encoding/json.Decode(). +func (d *Decoder) Decode(v interface{}) error { + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + return d.Decoder.Decode(v) +} + +// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the +// position of the most-recently parsed token. +func (d *Decoder) Path() JsonPath { + p := make(JsonPath, len(d.path)) + copy(p, d.path) + return p +} + +// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes +// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a +// KeyString rather than as a native string. +func (d *Decoder) Token() (json.Token, error) { + t, err := d.Decoder.Token() + if err != nil { + return t, err + } + + if t == nil { + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + return t, err + } + + switch t := t.(type) { + case json.Delim: + switch t { + case json.Delim('{'): + if d.context == arrValue { + d.path.incTop() + } + d.path.push("") + d.context = objKey + break + case json.Delim('}'): + d.path.pop() + d.context = d.path.inferContext() + break + case json.Delim('['): + if d.context == arrValue { + d.path.incTop() + } + d.path.push(-1) + d.context = arrValue + break + case json.Delim(']'): + d.path.pop() + d.context = d.path.inferContext() + break + } + case float64, json.Number, bool: + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + break + case string: + switch d.context { + case objKey: + d.path.nameTop(t) + d.context = objValue + return KeyString(t), err + case objValue: + d.context = objKey + case arrValue: + d.path.incTop() + } + break + } + + return t, err +} + +// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array) +// invoking each matching PathAction along the way. +// +// Scan returns true if there are more contiguous values to scan (for example in an array). +func (d *Decoder) Scan(ext *PathActions) (bool, error) { + + rootPath := d.Path() + + // If this is an array path, increment the root path in our local copy. + if rootPath.inferContext() == arrValue { + rootPath.incTop() + } + + for { + // advance the token position + _, err := d.Token() + if err != nil { + return false, err + } + + match: + var relPath JsonPath + + // capture the new JSON path + path := d.Path() + + if len(path) > len(rootPath) { + // capture the path relative to where the scan started + relPath = path[len(rootPath):] + } else { + // if the path is not longer than the root, then we are done with this scan + // return boolean flag indicating if there are more items to scan at the same level + return d.Decoder.More(), nil + } + + // match the relative path against the path actions + if node := ext.node.match(relPath); node != nil { + if node.action != nil { + // we have a match so execute the action + err = node.action(d) + if err != nil { + return d.Decoder.More(), err + } + // The action may have advanced the decoder. If we are in an array, advancing it further would + // skip tokens. So, if we are scanning an array, jump to the top without advancing the token. + if d.path.inferContext() == arrValue && d.Decoder.More() { + goto match + } + } + } + } +} diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go new file mode 100644 index 000000000..d7db2ad33 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/path.go @@ -0,0 +1,67 @@ +// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens. +package jsonpath + +import "fmt" + +type jsonContext int + +const ( + none jsonContext = iota + objKey + objValue + arrValue +) + +// AnyIndex can be used in a pattern to match any array index. +const AnyIndex = -2 + +// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and +// each integer specifies an index into a JSON array. +type JsonPath []interface{} + +func (p *JsonPath) push(n interface{}) { *p = append(*p, n) } +func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] } + +// increment the index at the top of the stack (must be an array index) +func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 } + +// name the key at the top of the stack (must be an object key) +func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n } + +// infer the context from the item at the top of the stack +func (p *JsonPath) inferContext() jsonContext { + if len(*p) == 0 { + return none + } + t := (*p)[len(*p)-1] + switch t.(type) { + case string: + return objKey + case int: + return arrValue + default: + panic(fmt.Sprintf("Invalid stack type %T", t)) + } +} + +// Equal tests for equality between two JsonPath types. +func (p *JsonPath) Equal(o JsonPath) bool { + if len(*p) != len(o) { + return false + } + for i, v := range *p { + if v != o[i] { + return false + } + } + return true +} + +func (p *JsonPath) HasPrefix(o JsonPath) bool { + for i, v := range o { + if v != (*p)[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go new file mode 100644 index 000000000..497ed686c --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/pathaction.go @@ -0,0 +1,61 @@ +package jsonpath + +// pathNode is used to construct a trie of paths to be matched +type pathNode struct { + matchOn interface{} // string, or integer + childNodes []pathNode + action DecodeAction +} + +// match climbs the trie to find a node that matches the given JSON path. +func (n *pathNode) match(path JsonPath) *pathNode { + var node *pathNode = n + for _, ps := range path { + found := false + for i, n := range node.childNodes { + if n.matchOn == ps { + node = &node.childNodes[i] + found = true + break + } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex { + node = &node.childNodes[i] + found = true + break + } + } + if !found { + return nil + } + } + return node +} + +// PathActions represents a collection of DecodeAction functions that should be called at certain path positions +// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams. +type PathActions struct { + node pathNode +} + +// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail. +type DecodeAction func(d *Decoder) error + +// Add specifies an action to call on the Decoder when the specified path is encountered. +func (je *PathActions) Add(action DecodeAction, path ...interface{}) { + + var node *pathNode = &je.node + for _, ps := range path { + found := false + for i, n := range node.childNodes { + if n.matchOn == ps { + node = &node.childNodes[i] + found = true + break + } + } + if !found { + node.childNodes = append(node.childNodes, pathNode{matchOn: ps}) + node = &node.childNodes[len(node.childNodes)-1] + } + } + node.action = action +} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 000000000..25fdaf639 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 000000000..5152bf59b --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,178 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(color.FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`). + +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set (regardless of its value). + +`Color` has support to disable/enable colors programatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`--no-color` bool flag. You can easily disable the color output with: + +```go +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 000000000..98a60f3c8 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,618 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// noColorExists returns true if the environment variable NO_COLOR exists. +func noColorExists() bool { + _, exists := os.LookupEnv("NO_COLOR") + return exists +} + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorExists() { + c.noColor = boolPtr(true) + } + + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user set action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 000000000..04541de78 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,135 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +You can also disable the color by setting the NO_COLOR environment variable to any value. + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml new file mode 100644 index 000000000..bfc421200 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 000000000..e028b46a9 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 000000000..2d84889ae --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 000000000..ddcecd13e --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) +[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 000000000..b77cc7c00 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 000000000..203c35b3c --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 000000000..31cbdfb8e --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 000000000..ab99c07c7 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31e0..000000000 --- a/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc0..000000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75b4..000000000 --- a/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 586007402..000000000 --- a/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'Å¿' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index 4fb4054a8..000000000 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/vendor/github.com/go-errors/errors/.travis.yml b/vendor/github.com/go-errors/errors/.travis.yml new file mode 100644 index 000000000..77a6bccf7 --- /dev/null +++ b/vendor/github.com/go-errors/errors/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - "1.8.x" + - "1.10.x" + - "1.13.x" + - "1.14.x" + - "1.16.x" diff --git a/vendor/github.com/go-errors/errors/LICENSE.MIT b/vendor/github.com/go-errors/errors/LICENSE.MIT new file mode 100644 index 000000000..c9a5b2eeb --- /dev/null +++ b/vendor/github.com/go-errors/errors/LICENSE.MIT @@ -0,0 +1,7 @@ +Copyright (c) 2015 Conrad Irwin + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-errors/errors/README.md b/vendor/github.com/go-errors/errors/README.md new file mode 100644 index 000000000..3d7852594 --- /dev/null +++ b/vendor/github.com/go-errors/errors/README.md @@ -0,0 +1,82 @@ +go-errors/errors +================ + +[![Build Status](https://travis-ci.org/go-errors/errors.svg?branch=master)](https://travis-ci.org/go-errors/errors) + +Package errors adds stacktrace support to errors in go. + +This is particularly useful when you want to understand the state of execution +when an error was returned unexpectedly. + +It provides the type \*Error which implements the standard golang error +interface, so you can use this library interchangably with code that is +expecting a normal error return. + +Usage +----- + +Full documentation is available on +[godoc](https://godoc.org/github.com/go-errors/errors), but here's a simple +example: + +```go +package crashy + +import "github.com/go-errors/errors" + +var Crashed = errors.Errorf("oh dear") + +func Crash() error { + return errors.New(Crashed) +} +``` + +This can be called as follows: + +```go +package main + +import ( + "crashy" + "fmt" + "github.com/go-errors/errors" +) + +func main() { + err := crashy.Crash() + if err != nil { + if errors.Is(err, crashy.Crashed) { + fmt.Println(err.(*errors.Error).ErrorStack()) + } else { + panic(err) + } + } +} +``` + +Meta-fu +------- + +This package was original written to allow reporting to +[Bugsnag](https://bugsnag.com/) from +[bugsnag-go](https://github.com/bugsnag/bugsnag-go), but after I found similar +packages by Facebook and Dropbox, it was moved to one canonical location so +everyone can benefit. + +This package is licensed under the MIT license, see LICENSE.MIT for details. + + +## Changelog +* v1.1.0 updated to use go1.13's standard-library errors.Is method instead of == in errors.Is +* v1.2.0 added `errors.As` from the standard library. +* v1.3.0 *BREAKING* updated error methods to return `error` instead of `*Error`. +> Code that needs access to the underlying `*Error` can use the new errors.AsError(e) +> ``` +> // before +> errors.New(err).ErrorStack() +> // after +>. errors.AsError(errors.Wrap(err)).ErrorStack() +> ``` +* v1.4.0 *BREAKING* v1.4.0 reverted all changes from v1.3.0 and is identical to v1.2.0 +* v1.4.1 no code change, but now without an unnecessary cover.out file. +* v1.4.2 performance improvement to ErrorStack() to avoid unnecessary work https://github.com/go-errors/errors/pull/40 diff --git a/vendor/github.com/go-errors/errors/error.go b/vendor/github.com/go-errors/errors/error.go new file mode 100644 index 000000000..ccbc2e427 --- /dev/null +++ b/vendor/github.com/go-errors/errors/error.go @@ -0,0 +1,209 @@ +// Package errors provides errors that have stack-traces. +// +// This is particularly useful when you want to understand the +// state of execution when an error was returned unexpectedly. +// +// It provides the type *Error which implements the standard +// golang error interface, so you can use this library interchangably +// with code that is expecting a normal error return. +// +// For example: +// +// package crashy +// +// import "github.com/go-errors/errors" +// +// var Crashed = errors.Errorf("oh dear") +// +// func Crash() error { +// return errors.New(Crashed) +// } +// +// This can be called as follows: +// +// package main +// +// import ( +// "crashy" +// "fmt" +// "github.com/go-errors/errors" +// ) +// +// func main() { +// err := crashy.Crash() +// if err != nil { +// if errors.Is(err, crashy.Crashed) { +// fmt.Println(err.(*errors.Error).ErrorStack()) +// } else { +// panic(err) +// } +// } +// } +// +// This package was original written to allow reporting to Bugsnag, +// but after I found similar packages by Facebook and Dropbox, it +// was moved to one canonical location so everyone can benefit. +package errors + +import ( + "bytes" + "fmt" + "reflect" + "runtime" +) + +// The maximum number of stackframes on any error. +var MaxStackDepth = 50 + +// Error is an error with an attached stacktrace. It can be used +// wherever the builtin error interface is expected. +type Error struct { + Err error + stack []uintptr + frames []StackFrame + prefix string +} + +// New makes an Error from the given value. If that value is already an +// error then it will be used directly, if not, it will be passed to +// fmt.Errorf("%v"). The stacktrace will point to the line of code that +// called New. +func New(e interface{}) *Error { + var err error + + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf("%v", e) + } + + stack := make([]uintptr, MaxStackDepth) + length := runtime.Callers(2, stack[:]) + return &Error{ + Err: err, + stack: stack[:length], + } +} + +// Wrap makes an Error from the given value. If that value is already an +// error then it will be used directly, if not, it will be passed to +// fmt.Errorf("%v"). The skip parameter indicates how far up the stack +// to start the stacktrace. 0 is from the current call, 1 from its caller, etc. +func Wrap(e interface{}, skip int) *Error { + if e == nil { + return nil + } + + var err error + + switch e := e.(type) { + case *Error: + return e + case error: + err = e + default: + err = fmt.Errorf("%v", e) + } + + stack := make([]uintptr, MaxStackDepth) + length := runtime.Callers(2+skip, stack[:]) + return &Error{ + Err: err, + stack: stack[:length], + } +} + +// WrapPrefix makes an Error from the given value. If that value is already an +// error then it will be used directly, if not, it will be passed to +// fmt.Errorf("%v"). The prefix parameter is used to add a prefix to the +// error message when calling Error(). The skip parameter indicates how far +// up the stack to start the stacktrace. 0 is from the current call, +// 1 from its caller, etc. +func WrapPrefix(e interface{}, prefix string, skip int) *Error { + if e == nil { + return nil + } + + err := Wrap(e, 1+skip) + + if err.prefix != "" { + prefix = fmt.Sprintf("%s: %s", prefix, err.prefix) + } + + return &Error{ + Err: err.Err, + stack: err.stack, + prefix: prefix, + } + +} + +// Errorf creates a new error with the given message. You can use it +// as a drop-in replacement for fmt.Errorf() to provide descriptive +// errors in return values. +func Errorf(format string, a ...interface{}) *Error { + return Wrap(fmt.Errorf(format, a...), 1) +} + +// Error returns the underlying error's message. +func (err *Error) Error() string { + + msg := err.Err.Error() + if err.prefix != "" { + msg = fmt.Sprintf("%s: %s", err.prefix, msg) + } + + return msg +} + +// Stack returns the callstack formatted the same way that go does +// in runtime/debug.Stack() +func (err *Error) Stack() []byte { + buf := bytes.Buffer{} + + for _, frame := range err.StackFrames() { + buf.WriteString(frame.String()) + } + + return buf.Bytes() +} + +// Callers satisfies the bugsnag ErrorWithCallerS() interface +// so that the stack can be read out. +func (err *Error) Callers() []uintptr { + return err.stack +} + +// ErrorStack returns a string that contains both the +// error message and the callstack. +func (err *Error) ErrorStack() string { + return err.TypeName() + " " + err.Error() + "\n" + string(err.Stack()) +} + +// StackFrames returns an array of frames containing information about the +// stack. +func (err *Error) StackFrames() []StackFrame { + if err.frames == nil { + err.frames = make([]StackFrame, len(err.stack)) + + for i, pc := range err.stack { + err.frames[i] = NewStackFrame(pc) + } + } + + return err.frames +} + +// TypeName returns the type this error. e.g. *errors.stringError. +func (err *Error) TypeName() string { + if _, ok := err.Err.(uncaughtPanic); ok { + return "panic" + } + return reflect.TypeOf(err.Err).String() +} + +// Return the wrapped error (implements api for As function). +func (err *Error) Unwrap() error { + return err.Err +} diff --git a/vendor/github.com/go-errors/errors/error_1_13.go b/vendor/github.com/go-errors/errors/error_1_13.go new file mode 100644 index 000000000..0af2fc806 --- /dev/null +++ b/vendor/github.com/go-errors/errors/error_1_13.go @@ -0,0 +1,31 @@ +// +build go1.13 + +package errors + +import ( + baseErrors "errors" +) + +// find error in any wrapped error +func As(err error, target interface{}) bool { + return baseErrors.As(err, target) +} + +// Is detects whether the error is equal to a given error. Errors +// are considered equal by this function if they are matched by errors.Is +// or if their contained errors are matched through errors.Is +func Is(e error, original error) bool { + if baseErrors.Is(e, original) { + return true + } + + if e, ok := e.(*Error); ok { + return Is(e.Err, original) + } + + if original, ok := original.(*Error); ok { + return Is(e, original.Err) + } + + return false +} diff --git a/vendor/github.com/go-errors/errors/error_backward.go b/vendor/github.com/go-errors/errors/error_backward.go new file mode 100644 index 000000000..80b0695e7 --- /dev/null +++ b/vendor/github.com/go-errors/errors/error_backward.go @@ -0,0 +1,57 @@ +// +build !go1.13 + +package errors + +import ( + "reflect" +) + +type unwrapper interface { + Unwrap() error +} + +// As assigns error or any wrapped error to the value target points +// to. If there is no value of the target type of target As returns +// false. +func As(err error, target interface{}) bool { + targetType := reflect.TypeOf(target) + + for { + errType := reflect.TypeOf(err) + + if errType == nil { + return false + } + + if reflect.PtrTo(errType) == targetType { + reflect.ValueOf(target).Elem().Set(reflect.ValueOf(err)) + return true + } + + wrapped, ok := err.(unwrapper) + if ok { + err = wrapped.Unwrap() + } else { + return false + } + } +} + +// Is detects whether the error is equal to a given error. Errors +// are considered equal by this function if they are the same object, +// or if they both contain the same error inside an errors.Error. +func Is(e error, original error) bool { + if e == original { + return true + } + + if e, ok := e.(*Error); ok { + return Is(e.Err, original) + } + + if original, ok := original.(*Error); ok { + return Is(e, original.Err) + } + + return false +} diff --git a/vendor/github.com/go-errors/errors/parse_panic.go b/vendor/github.com/go-errors/errors/parse_panic.go new file mode 100644 index 000000000..cc37052d7 --- /dev/null +++ b/vendor/github.com/go-errors/errors/parse_panic.go @@ -0,0 +1,127 @@ +package errors + +import ( + "strconv" + "strings" +) + +type uncaughtPanic struct{ message string } + +func (p uncaughtPanic) Error() string { + return p.message +} + +// ParsePanic allows you to get an error object from the output of a go program +// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap. +func ParsePanic(text string) (*Error, error) { + lines := strings.Split(text, "\n") + + state := "start" + + var message string + var stack []StackFrame + + for i := 0; i < len(lines); i++ { + line := lines[i] + + if state == "start" { + if strings.HasPrefix(line, "panic: ") { + message = strings.TrimPrefix(line, "panic: ") + state = "seek" + } else { + return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line) + } + + } else if state == "seek" { + if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") { + state = "parsing" + } + + } else if state == "parsing" { + if line == "" { + state = "done" + break + } + createdBy := false + if strings.HasPrefix(line, "created by ") { + line = strings.TrimPrefix(line, "created by ") + createdBy = true + } + + i++ + + if i >= len(lines) { + return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line) + } + + frame, err := parsePanicFrame(line, lines[i], createdBy) + if err != nil { + return nil, err + } + + stack = append(stack, *frame) + if createdBy { + state = "done" + break + } + } + } + + if state == "done" || state == "parsing" { + return &Error{Err: uncaughtPanic{message}, frames: stack}, nil + } + return nil, Errorf("could not parse panic: %v", text) +} + +// The lines we're passing look like this: +// +// main.(*foo).destruct(0xc208067e98) +// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151 +func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) { + idx := strings.LastIndex(name, "(") + if idx == -1 && !createdBy { + return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name) + } + if idx != -1 { + name = name[:idx] + } + pkg := "" + + if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { + pkg += name[:lastslash] + "/" + name = name[lastslash+1:] + } + if period := strings.Index(name, "."); period >= 0 { + pkg += name[:period] + name = name[period+1:] + } + + name = strings.Replace(name, "·", ".", -1) + + if !strings.HasPrefix(line, "\t") { + return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line) + } + + idx = strings.LastIndex(line, ":") + if idx == -1 { + return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line) + } + file := line[1:idx] + + number := line[idx+1:] + if idx = strings.Index(number, " +"); idx > -1 { + number = number[:idx] + } + + lno, err := strconv.ParseInt(number, 10, 32) + if err != nil { + return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line) + } + + return &StackFrame{ + File: file, + LineNumber: int(lno), + Package: pkg, + Name: name, + }, nil +} diff --git a/vendor/github.com/go-errors/errors/stackframe.go b/vendor/github.com/go-errors/errors/stackframe.go new file mode 100644 index 000000000..ef4a8b3f3 --- /dev/null +++ b/vendor/github.com/go-errors/errors/stackframe.go @@ -0,0 +1,122 @@ +package errors + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "strings" +) + +// A StackFrame contains all necessary information about to generate a line +// in a callstack. +type StackFrame struct { + // The path to the file containing this ProgramCounter + File string + // The LineNumber in that file + LineNumber int + // The Name of the function that contains this ProgramCounter + Name string + // The Package that contains this function + Package string + // The underlying ProgramCounter + ProgramCounter uintptr +} + +// NewStackFrame popoulates a stack frame object from the program counter. +func NewStackFrame(pc uintptr) (frame StackFrame) { + + frame = StackFrame{ProgramCounter: pc} + if frame.Func() == nil { + return + } + frame.Package, frame.Name = packageAndName(frame.Func()) + + // pc -1 because the program counters we use are usually return addresses, + // and we want to show the line that corresponds to the function call + frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1) + return + +} + +// Func returns the function that contained this frame. +func (frame *StackFrame) Func() *runtime.Func { + if frame.ProgramCounter == 0 { + return nil + } + return runtime.FuncForPC(frame.ProgramCounter) +} + +// String returns the stackframe formatted in the same way as go does +// in runtime/debug.Stack() +func (frame *StackFrame) String() string { + str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter) + + source, err := frame.sourceLine() + if err != nil { + return str + } + + return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source) +} + +// SourceLine gets the line of code (from File and Line) of the original source if possible. +func (frame *StackFrame) SourceLine() (string, error) { + source, err := frame.sourceLine() + if err != nil { + return source, New(err) + } + return source, err +} + +func (frame *StackFrame) sourceLine() (string, error) { + if frame.LineNumber <= 0 { + return "???", nil + } + + file, err := os.Open(frame.File) + if err != nil { + return "", err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + currentLine := 1 + for scanner.Scan() { + if currentLine == frame.LineNumber { + return string(bytes.Trim(scanner.Bytes(), " \t")), nil + } + currentLine++ + } + if err := scanner.Err(); err != nil { + return "", err + } + + return "???", nil +} + +func packageAndName(fn *runtime.Func) (string, string) { + name := fn.Name() + pkg := "" + + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + // Since the package path might contains dots (e.g. code.google.com/...), + // we first remove the path prefix if there is one. + if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { + pkg += name[:lastslash] + "/" + name = name[lastslash+1:] + } + if period := strings.Index(name, "."); period >= 0 { + pkg += name[:period] + name = name[period+1:] + } + + name = strings.Replace(name, "·", ".", -1) + return pkg, name +} diff --git a/vendor/github.com/go-gorp/gorp/v3/.gitignore b/vendor/github.com/go-gorp/gorp/v3/.gitignore new file mode 100644 index 000000000..a96e5fec5 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/.gitignore @@ -0,0 +1,11 @@ +_test +*.test +_testmain.go +_obj +*~ +*.6 +6.out +gorptest.bin +tmp +.idea +coverage.out diff --git a/vendor/github.com/go-gorp/gorp/v3/.travis.yml b/vendor/github.com/go-gorp/gorp/v3/.travis.yml new file mode 100644 index 000000000..958d260ac --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/.travis.yml @@ -0,0 +1,36 @@ +language: go +go: +- "1.15.x" +- "1.16.x" +- tip + +matrix: + allow_failures: + - go: tip + +services: +- mysql +- postgresql +- sqlite3 + +env: + global: + - secure: RriLxF6+2yMl67hdVv8ImXlu0h62mhcpqjaOgYNU+IEbUQ7hx96CKY6gkpYubW3BgApvF5RH6j3+HKvh2kGp0XhDOYOQCODfBSaSipZ5Aa5RKjsEYLtuVIobvJ80awR9hUeql69+WXs0/s72WThG0qTbOUY4pqHWfteeY235hWM= + +install: + - go get -t -d + - go get -t -d -tags integration + +before_script: +- mysql -e "CREATE DATABASE gorptest;" +- mysql -u root -e "GRANT ALL ON gorptest.* TO gorptest@localhost IDENTIFIED BY 'gorptest'" +- psql -c "CREATE DATABASE gorptest;" -U postgres +- psql -c "CREATE USER "gorptest" WITH SUPERUSER PASSWORD 'gorptest';" -U postgres +- go get github.com/lib/pq +- go get github.com/mattn/go-sqlite3 +- go get github.com/ziutek/mymysql/godrv +- go get github.com/go-sql-driver/mysql +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls + +script: ./test_all.sh diff --git a/vendor/github.com/go-gorp/gorp/v3/CONTRIBUTING.md b/vendor/github.com/go-gorp/gorp/v3/CONTRIBUTING.md new file mode 100644 index 000000000..7bc145fd7 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# Contributions are very welcome! + +## First: Create an Issue + +Even if your fix is simple, we'd like to have an issue to relate to +the PR. Discussion about the architecture and value can go on the +issue, leaving PR comments exclusively for coding style. + +## Second: Make Your PR + +- Fork the `master` branch +- Make your change +- Make a PR against the `master` branch + +You don't need to wait for comments on the issue before making your +PR. If you do wait for comments, you'll have a better chance of +getting your PR accepted the first time around, but it's not +necessary. + +## Third: Be Patient + +- If your change breaks backward compatibility, this becomes + especially true. + +We all have lives and jobs, and many of us are no longer on projects +that make use of `gorp`. We will get back to you, but it might take a +while. + +## Fourth: Consider Becoming a Maintainer + +We really do need help. We will likely ask you for help after a good +PR, but if we don't, please create an issue requesting maintainership. +Considering how few of us are currently active, we are unlikely to +refuse good help. diff --git a/vendor/github.com/go-gorp/gorp/v3/LICENSE b/vendor/github.com/go-gorp/gorp/v3/LICENSE new file mode 100644 index 000000000..b661111d0 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2012 James Cooper + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-gorp/gorp/v3/README.md b/vendor/github.com/go-gorp/gorp/v3/README.md new file mode 100644 index 000000000..983fe4343 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/README.md @@ -0,0 +1,809 @@ +# Go Relational Persistence + +[![build status](https://github.com/go-gorp/gorp/actions/workflows/go.yml/badge.svg)](https://github.com/go-gorp/gorp/actions) +[![issues](https://img.shields.io/github/issues/go-gorp/gorp.svg)](https://github.com/go-gorp/gorp/issues) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-gorp/gorp/v3.svg)](https://pkg.go.dev/github.com/go-gorp/gorp/v3) + +### Update 2016-11-13: Future versions + +As many of the maintainers have become busy with other projects, +progress toward the ever-elusive v2 has slowed to the point that we're +only occasionally making progress outside of merging pull requests. +In the interest of continuing to release, I'd like to lean toward a +more maintainable path forward. + +For the moment, I am releasing a v2 tag with the current feature set +from master, as some of those features have been actively used and +relied on by more than one project. Our next goal is to continue +cleaning up the code base with non-breaking changes as much as +possible, but if/when a breaking change is needed, we'll just release +new versions. This allows us to continue development at whatever pace +we're capable of, without delaying the release of features or refusing +PRs. + +## Introduction + +I hesitate to call gorp an ORM. Go doesn't really have objects, at +least not in the classic Smalltalk/Java sense. There goes the "O". +gorp doesn't know anything about the relationships between your +structs (at least not yet). So the "R" is questionable too (but I use +it in the name because, well, it seemed more clever). + +The "M" is alive and well. Given some Go structs and a database, gorp +should remove a fair amount of boilerplate busy-work from your code. + +I hope that gorp saves you time, minimizes the drudgery of getting +data in and out of your database, and helps your code focus on +algorithms, not infrastructure. + +* Bind struct fields to table columns via API or tag +* Support for embedded structs +* Support for transactions +* Forward engineer db schema from structs (great for unit tests) +* Pre/post insert/update/delete hooks +* Automatically generate insert/update/delete statements for a struct +* Automatic binding of auto increment PKs back to struct after insert +* Delete by primary key(s) +* Select by primary key(s) +* Optional trace sql logging +* Bind arbitrary SQL queries to a struct +* Bind slice to SELECT query results without type assertions +* Use positional or named bind parameters in custom SELECT queries +* Optional optimistic locking using a version column (for + update/deletes) + +## Installation + +Use `go get` or your favorite vendoring tool, using whichever import +path you'd like. + +## Versioning + +We use semantic version tags. Feel free to import through `gopkg.in` +(e.g. `gopkg.in/gorp.v2`) to get the latest tag for a major version, +or check out the tag using your favorite vendoring tool. + +Development is not very active right now, but we have plans to +restructure `gorp` as we continue to move toward a more extensible +system. Whenever a breaking change is needed, the major version will +be bumped. + +The `master` branch is where all development is done, and breaking +changes may happen from time to time. That said, if you want to live +on the bleeding edge and are comfortable updating your code when we +make a breaking change, you may use `github.com/go-gorp/gorp` as your +import path. + +Check the version tags to see what's available. We'll make a good +faith effort to add badges for new versions, but we make no +guarantees. + +## Supported Go versions + +This package is guaranteed to be compatible with the latest 2 major +versions of Go. + +Any earlier versions are only supported on a best effort basis and can +be dropped any time. Go has a great compatibility promise. Upgrading +your program to a newer version of Go should never really be a +problem. + +## Migration guide + +#### Pre-v2 to v2 +Automatic mapping of the version column used in optimistic locking has +been removed as it could cause problems if the type was not int. The +version column must now explicitly be set with +`tablemap.SetVersionCol()`. + +## Help/Support + +Use our [`gitter` channel](https://gitter.im/go-gorp/gorp). We used +to use IRC, but with most of us being pulled in many directions, we +often need the email notifications from `gitter` to yell at us to sign +in. + +## Quickstart + +```go +package main + +import ( + "database/sql" + "gopkg.in/gorp.v1" + _ "github.com/mattn/go-sqlite3" + "log" + "time" +) + +func main() { + // initialize the DbMap + dbmap := initDb() + defer dbmap.Db.Close() + + // delete any existing rows + err := dbmap.TruncateTables() + checkErr(err, "TruncateTables failed") + + // create two posts + p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum") + p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum") + + // insert rows - auto increment PKs will be set properly after the insert + err = dbmap.Insert(&p1, &p2) + checkErr(err, "Insert failed") + + // use convenience SelectInt + count, err := dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Rows after inserting:", count) + + // update a row + p2.Title = "Go 1.2 is better than ever" + count, err = dbmap.Update(&p2) + checkErr(err, "Update failed") + log.Println("Rows updated:", count) + + // fetch one row - note use of "post_id" instead of "Id" since column is aliased + // + // Postgres users should use $1 instead of ? placeholders + // See 'Known Issues' below + // + err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id) + checkErr(err, "SelectOne failed") + log.Println("p2 row:", p2) + + // fetch all rows + var posts []Post + _, err = dbmap.Select(&posts, "select * from posts order by post_id") + checkErr(err, "Select failed") + log.Println("All rows:") + for x, p := range posts { + log.Printf(" %d: %v\n", x, p) + } + + // delete row by PK + count, err = dbmap.Delete(&p1) + checkErr(err, "Delete failed") + log.Println("Rows deleted:", count) + + // delete row manually via Exec + _, err = dbmap.Exec("delete from posts where post_id=?", p2.Id) + checkErr(err, "Exec failed") + + // confirm count is zero + count, err = dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Row count - should be zero:", count) + + log.Println("Done!") +} + +type Post struct { + // db tag lets you specify the column name if it differs from the struct field + Id int64 `db:"post_id"` + Created int64 + Title string `db:",size:50"` // Column size set to 50 + Body string `db:"article_body,size:1024"` // Set both column name and size +} + +func newPost(title, body string) Post { + return Post{ + Created: time.Now().UnixNano(), + Title: title, + Body: body, + } +} + +func initDb() *gorp.DbMap { + // connect to db using standard Go database/sql API + // use whatever database/sql driver you wish + db, err := sql.Open("sqlite3", "/tmp/post_db.bin") + checkErr(err, "sql.Open failed") + + // construct a gorp DbMap + dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} + + // add a table, setting the table name to 'posts' and + // specifying that the Id property is an auto incrementing PK + dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id") + + // create the table. in a production system you'd generally + // use a migration tool, or create the tables via scripts + err = dbmap.CreateTablesIfNotExists() + checkErr(err, "Create tables failed") + + return dbmap +} + +func checkErr(err error, msg string) { + if err != nil { + log.Fatalln(msg, err) + } +} +``` + +## Examples + +### Mapping structs to tables + +First define some types: + +```go +type Invoice struct { + Id int64 + Created int64 + Updated int64 + Memo string + PersonId int64 +} + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string +} + +// Example of using tags to alias fields to column names +// The 'db' value is the column name +// +// A hyphen will cause gorp to skip this field, similar to the +// Go json package. +// +// This is equivalent to using the ColMap methods: +// +// table := dbmap.AddTableWithName(Product{}, "product") +// table.ColMap("Id").Rename("product_id") +// table.ColMap("Price").Rename("unit_price") +// table.ColMap("IgnoreMe").SetTransient(true) +// +// You can optionally declare the field to be a primary key and/or autoincrement +// +type Product struct { + Id int64 `db:"product_id, primarykey, autoincrement"` + Price int64 `db:"unit_price"` + IgnoreMe string `db:"-"` +} +``` + +Then create a mapper, typically you'd do this one time at app startup: + +```go +// connect to db using standard Go database/sql API +// use whatever database/sql driver you wish +db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword") + +// construct a gorp DbMap +dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}} + +// register the structs you wish to use with gorp +// you can also use the shorter dbmap.AddTable() if you +// don't want to override the table name +// +// SetKeys(true) means we have a auto increment primary key, which +// will get automatically bound to your struct post-insert +// +t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") +t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id") +t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id") +``` + +### Struct Embedding + +gorp supports embedding structs. For example: + +```go +type Names struct { + FirstName string + LastName string +} + +type WithEmbeddedStruct struct { + Id int64 + Names +} + +es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} +err := dbmap.Insert(es) +``` + +See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example. + +### Create/Drop Tables ### + +Automatically create / drop registered tables. This is useful for unit tests +but is entirely optional. You can of course use gorp with tables created manually, +or with a separate migration tool (like [sql-migrate](https://github.com/rubenv/sql-migrate), [goose](https://bitbucket.org/liamstask/goose) or [migrate](https://github.com/mattes/migrate)). + +```go +// create all registered tables +dbmap.CreateTables() + +// same as above, but uses "if not exists" clause to skip tables that are +// already defined +dbmap.CreateTablesIfNotExists() + +// drop +dbmap.DropTables() +``` + +### SQL Logging + +Optionally you can pass in a logger to trace all SQL statements. +I recommend enabling this initially while you're getting the feel for what +gorp is doing on your behalf. + +Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies. +However, you can write your own `GorpLogger` implementation, or use a package such +as `glog` if you want more control over how statements are logged. + +```go +// Will log all SQL statements + args as they are run +// The first arg is a string prefix to prepend to all log messages +dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds)) + +// Turn off tracing +dbmap.TraceOff() +``` + +### Insert + +```go +// Must declare as pointers so optional callback hooks +// can operate on your data, not copies +inv1 := &Invoice{0, 100, 200, "first order", 0} +inv2 := &Invoice{0, 100, 200, "second order", 0} + +// Insert your rows +err := dbmap.Insert(inv1, inv2) + +// Because we called SetKeys(true) on Invoice, the Id field +// will be populated after the Insert() automatically +fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id) +``` + +### Update + +Continuing the above example, use the `Update` method to modify an Invoice: + +```go +// count is the # of rows updated, which should be 1 in this example +count, err := dbmap.Update(inv1) +``` + +### Delete + +If you have primary key(s) defined for a struct, you can use the `Delete` +method to remove rows: + +```go +count, err := dbmap.Delete(inv1) +``` + +### Select by Key + +Use the `Get` method to fetch a single row by primary key. It returns +nil if no row is found. + +```go +// fetch Invoice with Id=99 +obj, err := dbmap.Get(Invoice{}, 99) +inv := obj.(*Invoice) +``` + +### Ad Hoc SQL + +#### SELECT + +`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice +or a single struct. + +```go +// Select a slice - first return value is not needed when a slice pointer is passed to Select() +var posts []Post +_, err := dbmap.Select(&posts, "select * from post order by id") + +// You can also use primitive types +var ids []string +_, err := dbmap.Select(&ids, "select id from post") + +// Select a single row. +// Returns an error if no row found, or if more than one row is found +var post Post +err := dbmap.SelectOne(&post, "select * from post where id=?", id) +``` + +Want to do joins? Just write the SQL and the struct. gorp will bind them: + +```go +// Define a type for your join +// It *must* contain all the columns in your SELECT statement +// +// The names here should match the aliased column names you specify +// in your SQL - no additional binding work required. simple. +// +type InvoicePersonView struct { + InvoiceId int64 + PersonId int64 + Memo string + FName string +} + +// Create some rows +p1 := &Person{0, 0, 0, "bob", "smith"} +err = dbmap.Insert(p1) +checkErr(err, "Insert failed") + +// notice how we can wire up p1.Id to the invoice easily +inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id} +err = dbmap.Insert(inv1) +checkErr(err, "Insert failed") + +// Run your query +query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + +// pass a slice to Select() +var list []InvoicePersonView +_, err := dbmap.Select(&list, query) + +// this should test true +expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName} +if reflect.DeepEqual(list[0], expected) { + fmt.Println("Woot! My join worked!") +} +``` + +#### SELECT string or int64 + +gorp provides a few convenience methods for selecting a single string or int64. + +```go +// select single int64 from db (use $1 instead of ? for postgresql) +i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal) + +// select single string from db: +s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal) + +``` + +#### Named bind parameters + +You may use a map or struct to bind parameters by name. This is currently +only supported in SELECT queries. + +```go +_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{ + "name": "Rob", + "age": 31, +}) +``` + +#### UPDATE / DELETE + +You can execute raw SQL if you wish. Particularly good for batch operations. + +```go +res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10) +``` + +### Transactions + +You can batch operations into a transaction: + +```go +func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error { + // Start a new transaction + trans, err := dbmap.Begin() + if err != nil { + return err + } + + err = trans.Insert(per) + checkErr(err, "Insert failed") + + inv.PersonId = per.Id + err = trans.Insert(inv) + checkErr(err, "Insert failed") + + // if the commit is successful, a nil error is returned + return trans.Commit() +} +``` + +### Hooks + +Use hooks to update data before/after saving to the db. Good for timestamps: + +```go +// implement the PreInsert and PreUpdate hooks +func (i *Invoice) PreInsert(s gorp.SqlExecutor) error { + i.Created = time.Now().UnixNano() + i.Updated = i.Created + return nil +} + +func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error { + i.Updated = time.Now().UnixNano() + return nil +} + +// You can use the SqlExecutor to cascade additional SQL +// Take care to avoid cycles. gorp won't prevent them. +// +// Here's an example of a cascading delete +// +func (p *Person) PreDelete(s gorp.SqlExecutor) error { + query := "delete from invoice_test where PersonId=?" + + _, err := s.Exec(query, p.Id) + + if err != nil { + return err + } + return nil +} +``` + +Full list of hooks that you can implement: + + PostGet + PreInsert + PostInsert + PreUpdate + PostUpdate + PreDelete + PostDelete + + All have the same signature. for example: + + func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error + +### Optimistic Locking + +#### Note that this behaviour has changed in v2. See [Migration Guide](#migration-guide). + +gorp provides a simple optimistic locking feature, similar to Java's +JPA, that will raise an error if you try to update/delete a row whose +`version` column has a value different than the one in memory. This +provides a safe way to do "select then update" style operations +without explicit read and write locks. + +```go +// Version is an auto-incremented number, managed by gorp +// If this property is present on your struct, update +// operations will be constrained +// +// For example, say we defined Person as: + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string + + // automatically used as the Version col + // use table.SetVersionCol("columnName") to map a different + // struct field as the version field + Version int64 +} + +p1 := &Person{0, 0, 0, "Bob", "Smith", 0} +err = dbmap.Insert(p1) // Version is now 1 +checkErr(err, "Insert failed") + +obj, err := dbmap.Get(Person{}, p1.Id) +p2 := obj.(*Person) +p2.LName = "Edwards" +_,err = dbmap.Update(p2) // Version is now 2 +checkErr(err, "Update failed") + +p1.LName = "Howard" + +// Raises error because p1.Version == 1, which is out of date +count, err := dbmap.Update(p1) +_, ok := err.(gorp.OptimisticLockError) +if ok { + // should reach this statement + + // in a real app you might reload the row and retry, or + // you might propegate this to the user, depending on the desired + // semantics + fmt.Printf("Tried to update row with stale data: %v\n", err) +} else { + // some other db error occurred - log or return up the stack + fmt.Printf("Unknown db err: %v\n", err) +} +``` +### Adding INDEX(es) on column(s) beyond the primary key ### + +Indexes are frequently critical for performance. Here is how to add +them to your tables. + +NB: SqlServer and Oracle need testing and possible adjustment to the +CreateIndexSuffix() and DropIndexSuffix() methods to make AddIndex() +work for them. + +In the example below we put an index both on the Id field, and on the +AcctId field. + +``` +type Account struct { + Id int64 + AcctId string // e.g. this might be a long uuid for portability +} + +// indexType (the 2nd param to AddIndex call) is "Btree" or "Hash" for MySQL. +// demonstrate adding a second index on AcctId, and constrain that field to have unique values. +dbm.AddTable(iptab.Account{}).SetKeys(true, "Id").AddIndex("AcctIdIndex", "Btree", []string{"AcctId"}).SetUnique(true) + +err = dbm.CreateTablesIfNotExists() +checkErr(err, "CreateTablesIfNotExists failed") + +err = dbm.CreateIndex() +checkErr(err, "CreateIndex failed") + +``` +Check the effect of the CreateIndex() call in mysql: +``` +$ mysql + +MariaDB [test]> show create table Account; ++---------+--------------------------+ +| Account | CREATE TABLE `Account` ( + `Id` bigint(20) NOT NULL AUTO_INCREMENT, + `AcctId` varchar(255) DEFAULT NULL, + PRIMARY KEY (`Id`), + UNIQUE KEY `AcctIdIndex` (`AcctId`) USING BTREE <<<--- yes! index added. +) ENGINE=InnoDB DEFAULT CHARSET=utf8 ++---------+--------------------------+ + +``` + + +## Database Drivers + +gorp uses the Go 1 `database/sql` package. A full list of compliant +drivers is available here: + +http://code.google.com/p/go-wiki/wiki/SQLDrivers + +Sadly, SQL databases differ on various issues. gorp provides a Dialect +interface that should be implemented per database vendor. Dialects +are provided for: + +* MySQL +* PostgreSQL +* sqlite3 + +Each of these three databases pass the test suite. See `gorp_test.go` +for example DSNs for these three databases. + +Support is also provided for: + +* Oracle (contributed by @klaidliadon) +* SQL Server (contributed by @qrawl) - use driver: + github.com/denisenkom/go-mssqldb + +Note that these databases are not covered by CI and I (@coopernurse) +have no good way to test them locally. So please try them and send +patches as needed, but expect a bit more unpredicability. + +## Sqlite3 Extensions + +In order to use sqlite3 extensions you need to first register a custom driver: + +```go +import ( + "database/sql" + + // use whatever database/sql driver you wish + sqlite "github.com/mattn/go-sqlite3" +) + +func customDriver() (*sql.DB, error) { + + // create custom driver with extensions defined + sql.Register("sqlite3-custom", &sqlite.SQLiteDriver{ + Extensions: []string{ + "mod_spatialite", + }, + }) + + // now you can then connect using the 'sqlite3-custom' driver instead of 'sqlite3' + return sql.Open("sqlite3-custom", "/tmp/post_db.bin") +} +``` + +## Known Issues + +### SQL placeholder portability + +Different databases use different strings to indicate variable +placeholders in prepared SQL statements. Unlike some database +abstraction layers (such as JDBC), Go's `database/sql` does not +standardize this. + +SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` +methods delegates to a Dialect implementation for each database, and +will generate portable SQL. + +Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, +etc will not be parsed. Consequently you may have portability issues +if you write a query like this: + +```go +// works on MySQL and Sqlite3, but not with Postgresql err := +dbmap.SelectOne(&val, "select * from foo where id = ?", 30) +``` + +In `Select` and `SelectOne` you can use named parameters to work +around this. The following is portable: + +```go +err := dbmap.SelectOne(&val, "select * from foo where id = :id", +map[string]interface{} { "id": 30}) +``` + +Additionally, when using Postgres as your database, you should utilize +`$1` instead of `?` placeholders as utilizing `?` placeholders when +querying Postgres will result in `pq: operator does not exist` +errors. Alternatively, use `dbMap.Dialect.BindVar(varIdx)` to get the +proper variable binding for your dialect. + +### time.Time and time zones + +gorp will pass `time.Time` fields through to the `database/sql` +driver, but note that the behavior of this type varies across database +drivers. + +MySQL users should be especially cautious. See: +https://github.com/ziutek/mymysql/pull/77 + +To avoid any potential issues with timezone/DST, consider: + +- Using an integer field for time data and storing UNIX time. +- Using a custom time type that implements some SQL types: + - [`"database/sql".Scanner`](https://golang.org/pkg/database/sql/#Scanner) + - [`"database/sql/driver".Valuer`](https://golang.org/pkg/database/sql/driver/#Valuer) + +## Running the tests + +The included tests may be run against MySQL, Postgresql, or sqlite3. +You must set two environment variables so the test code knows which +driver to use, and how to connect to your database. + +```sh +# MySQL example: +export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123 +export GORP_TEST_DIALECT=mysql + +# run the tests +go test + +# run the tests and benchmarks +go test -bench="Bench" -benchtime 10 +``` + +Valid `GORP_TEST_DIALECT` values are: "mysql"(for mymysql), +"gomysql"(for go-sql-driver), "postgres", "sqlite" See the +`test_all.sh` script for examples of all 3 databases. This is the +script I run locally to test the library. + +## Performance + +gorp uses reflection to construct SQL queries and bind parameters. +See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a +simple perf test. On my MacBook Pro gorp is about 2-3% slower than +hand written SQL. + + +## Contributors + +* matthias-margush - column aliasing via tags +* Rob Figueiredo - @robfig +* Quinn Slack - @sqs diff --git a/vendor/github.com/go-gorp/gorp/v3/column.go b/vendor/github.com/go-gorp/gorp/v3/column.go new file mode 100644 index 000000000..383e9efb6 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/column.go @@ -0,0 +1,76 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import "reflect" + +// ColumnMap represents a mapping between a Go struct field and a single +// column in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type ColumnMap struct { + // Column name in db table + ColumnName string + + // If true, this column is skipped in generated SQL statements + Transient bool + + // If true, " unique" is added to create table statements. + // Not used elsewhere + Unique bool + + // Query used for getting generated id after insert + GeneratedIdQuery string + + // Passed to Dialect.ToSqlType() to assist in informing the + // correct column type to map to in CreateTables() + MaxSize int + + DefaultValue string + + fieldName string + gotype reflect.Type + isPK bool + isAutoIncr bool + isNotNull bool +} + +// Rename allows you to specify the column name in the table +// +// Example: table.ColMap("Updated").Rename("date_updated") +// +func (c *ColumnMap) Rename(colname string) *ColumnMap { + c.ColumnName = colname + return c +} + +// SetTransient allows you to mark the column as transient. If true +// this column will be skipped when SQL statements are generated +func (c *ColumnMap) SetTransient(b bool) *ColumnMap { + c.Transient = b + return c +} + +// SetUnique adds "unique" to the create table statements for this +// column, if b is true. +func (c *ColumnMap) SetUnique(b bool) *ColumnMap { + c.Unique = b + return c +} + +// SetNotNull adds "not null" to the create table statements for this +// column, if nn is true. +func (c *ColumnMap) SetNotNull(nn bool) *ColumnMap { + c.isNotNull = nn + return c +} + +// SetMaxSize specifies the max length of values of this column. This is +// passed to the dialect.ToSqlType() function, which can use the value +// to alter the generated type for "create table" statements +func (c *ColumnMap) SetMaxSize(size int) *ColumnMap { + c.MaxSize = size + return c +} diff --git a/vendor/github.com/go-gorp/gorp/v3/db.go b/vendor/github.com/go-gorp/gorp/v3/db.go new file mode 100644 index 000000000..d78062e5b --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/db.go @@ -0,0 +1,985 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "bytes" + "context" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "log" + "reflect" + "strconv" + "strings" + "time" +) + +// DbMap is the root gorp mapping object. Create one of these for each +// database schema you wish to map. Each DbMap contains a list of +// mapped tables. +// +// Example: +// +// dialect := gorp.MySQLDialect{"InnoDB", "UTF8"} +// dbmap := &gorp.DbMap{Db: db, Dialect: dialect} +// +type DbMap struct { + ctx context.Context + + // Db handle to use with this map + Db *sql.DB + + // Dialect implementation to use with this map + Dialect Dialect + + TypeConverter TypeConverter + + // ExpandSlices when enabled will convert slice arguments in mappers into flat + // values. It will modify the query, adding more placeholders, and the mapper, + // adding each item of the slice as a new unique entry in the mapper. For + // example, given the scenario bellow: + // + // dbmap.Select(&output, "SELECT 1 FROM example WHERE id IN (:IDs)", map[string]interface{}{ + // "IDs": []int64{1, 2, 3}, + // }) + // + // The executed query would be: + // + // SELECT 1 FROM example WHERE id IN (:IDs0,:IDs1,:IDs2) + // + // With the mapper: + // + // map[string]interface{}{ + // "IDs": []int64{1, 2, 3}, + // "IDs0": int64(1), + // "IDs1": int64(2), + // "IDs2": int64(3), + // } + // + // It is also flexible for custom slice types. The value just need to + // implement stringer or numberer interfaces. + // + // type CustomValue string + // + // const ( + // CustomValueHey CustomValue = "hey" + // CustomValueOh CustomValue = "oh" + // ) + // + // type CustomValues []CustomValue + // + // func (c CustomValues) ToStringSlice() []string { + // values := make([]string, len(c)) + // for i := range c { + // values[i] = string(c[i]) + // } + // return values + // } + // + // func query() { + // // ... + // result, err := dbmap.Select(&output, "SELECT 1 FROM example WHERE value IN (:Values)", map[string]interface{}{ + // "Values": CustomValues([]CustomValue{CustomValueHey}), + // }) + // // ... + // } + ExpandSliceArgs bool + + tables []*TableMap + tablesDynamic map[string]*TableMap // tables that use same go-struct and different db table names + logger GorpLogger + logPrefix string +} + +func (m *DbMap) dynamicTableAdd(tableName string, tbl *TableMap) { + if m.tablesDynamic == nil { + m.tablesDynamic = make(map[string]*TableMap) + } + m.tablesDynamic[tableName] = tbl +} + +func (m *DbMap) dynamicTableFind(tableName string) (*TableMap, bool) { + if m.tablesDynamic == nil { + return nil, false + } + tbl, found := m.tablesDynamic[tableName] + return tbl, found +} + +func (m *DbMap) dynamicTableMap() map[string]*TableMap { + if m.tablesDynamic == nil { + m.tablesDynamic = make(map[string]*TableMap) + } + return m.tablesDynamic +} + +func (m *DbMap) WithContext(ctx context.Context) SqlExecutor { + copy := &DbMap{} + *copy = *m + copy.ctx = ctx + return copy +} + +func (m *DbMap) CreateIndex() error { + var err error + dialect := reflect.TypeOf(m.Dialect) + for _, table := range m.tables { + for _, index := range table.indexes { + err = m.createIndexImpl(dialect, table, index) + if err != nil { + break + } + } + } + + for _, table := range m.dynamicTableMap() { + for _, index := range table.indexes { + err = m.createIndexImpl(dialect, table, index) + if err != nil { + break + } + } + } + + return err +} + +func (m *DbMap) createIndexImpl(dialect reflect.Type, + table *TableMap, + index *IndexMap) error { + s := bytes.Buffer{} + s.WriteString("create") + if index.Unique { + s.WriteString(" unique") + } + s.WriteString(" index") + s.WriteString(fmt.Sprintf(" %s on %s", index.IndexName, table.TableName)) + if dname := dialect.Name(); dname == "PostgresDialect" && index.IndexType != "" { + s.WriteString(fmt.Sprintf(" %s %s", m.Dialect.CreateIndexSuffix(), index.IndexType)) + } + s.WriteString(" (") + for x, col := range index.columns { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(col)) + } + s.WriteString(")") + + if dname := dialect.Name(); dname == "MySQLDialect" && index.IndexType != "" { + s.WriteString(fmt.Sprintf(" %s %s", m.Dialect.CreateIndexSuffix(), index.IndexType)) + } + s.WriteString(";") + _, err := m.Exec(s.String()) + return err +} + +func (t *TableMap) DropIndex(name string) error { + + var err error + dialect := reflect.TypeOf(t.dbmap.Dialect) + for _, idx := range t.indexes { + if idx.IndexName == name { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("DROP INDEX %s", idx.IndexName)) + + if dname := dialect.Name(); dname == "MySQLDialect" { + s.WriteString(fmt.Sprintf(" %s %s", t.dbmap.Dialect.DropIndexSuffix(), t.TableName)) + } + s.WriteString(";") + _, e := t.dbmap.Exec(s.String()) + if e != nil { + err = e + } + break + } + } + t.ResetSql() + return err +} + +// AddTable registers the given interface type with gorp. The table name +// will be given the name of the TypeOf(i). You must call this function, +// or AddTableWithName, for any struct type you wish to persist with +// the given DbMap. +// +// This operation is idempotent. If i's type is already mapped, the +// existing *TableMap is returned +func (m *DbMap) AddTable(i interface{}) *TableMap { + return m.AddTableWithName(i, "") +} + +// AddTableWithName has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithName(i interface{}, name string) *TableMap { + return m.AddTableWithNameAndSchema(i, "", name) +} + +// AddTableWithNameAndSchema has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithNameAndSchema(i interface{}, schema string, name string) *TableMap { + t := reflect.TypeOf(i) + if name == "" { + name = t.Name() + } + + // check if we have a table for this type already + // if so, update the name and return the existing pointer + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + table.TableName = name + return table + } + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + var primaryKey []*ColumnMap + tmap.Columns, primaryKey = m.readStructColumns(t) + m.tables = append(m.tables, tmap) + if len(primaryKey) > 0 { + tmap.keys = append(tmap.keys, primaryKey...) + } + + return tmap +} + +// AddTableDynamic registers the given interface type with gorp. +// The table name will be dynamically determined at runtime by +// using the GetTableName method on DynamicTable interface +func (m *DbMap) AddTableDynamic(inp DynamicTable, schema string) *TableMap { + + val := reflect.ValueOf(inp) + elm := val.Elem() + t := elm.Type() + name := inp.TableName() + if name == "" { + panic("Missing table name in DynamicTable instance") + } + + // Check if there is another dynamic table with the same name + if _, found := m.dynamicTableFind(name); found { + panic(fmt.Sprintf("A table with the same name %v already exists", name)) + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + var primaryKey []*ColumnMap + tmap.Columns, primaryKey = m.readStructColumns(t) + if len(primaryKey) > 0 { + tmap.keys = append(tmap.keys, primaryKey...) + } + + m.dynamicTableAdd(name, tmap) + + return tmap +} + +func (m *DbMap) readStructColumns(t reflect.Type) (cols []*ColumnMap, primaryKey []*ColumnMap) { + primaryKey = make([]*ColumnMap, 0) + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Anonymous && f.Type.Kind() == reflect.Struct { + // Recursively add nested fields in embedded structs. + subcols, subpk := m.readStructColumns(f.Type) + // Don't append nested fields that have the same field + // name as an already-mapped field. + for _, subcol := range subcols { + shouldAppend := true + for _, col := range cols { + if !subcol.Transient && subcol.fieldName == col.fieldName { + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, subcol) + } + } + if subpk != nil { + primaryKey = append(primaryKey, subpk...) + } + } else { + // Tag = Name { ',' Option } + // Option = OptionKey [ ':' OptionValue ] + cArguments := strings.Split(f.Tag.Get("db"), ",") + columnName := cArguments[0] + var maxSize int + var defaultValue string + var isAuto bool + var isPK bool + var isNotNull bool + for _, argString := range cArguments[1:] { + argString = strings.TrimSpace(argString) + arg := strings.SplitN(argString, ":", 2) + + // check mandatory/unexpected option values + switch arg[0] { + case "size", "default": + // options requiring value + if len(arg) == 1 { + panic(fmt.Sprintf("missing option value for option %v on field %v", arg[0], f.Name)) + } + default: + // options where value is invalid (currently all other options) + if len(arg) == 2 { + panic(fmt.Sprintf("unexpected option value for option %v on field %v", arg[0], f.Name)) + } + } + + switch arg[0] { + case "size": + maxSize, _ = strconv.Atoi(arg[1]) + case "default": + defaultValue = arg[1] + case "primarykey": + isPK = true + case "autoincrement": + isAuto = true + case "notnull": + isNotNull = true + default: + panic(fmt.Sprintf("Unrecognized tag option for field %v: %v", f.Name, arg)) + } + } + if columnName == "" { + columnName = f.Name + } + + gotype := f.Type + valueType := gotype + if valueType.Kind() == reflect.Ptr { + valueType = valueType.Elem() + } + value := reflect.New(valueType).Interface() + if m.TypeConverter != nil { + // Make a new pointer to a value of type gotype and + // pass it to the TypeConverter's FromDb method to see + // if a different type should be used for the column + // type during table creation. + scanner, useHolder := m.TypeConverter.FromDb(value) + if useHolder { + value = scanner.Holder + gotype = reflect.TypeOf(value) + } + } + if typer, ok := value.(SqlTyper); ok { + gotype = reflect.TypeOf(typer.SqlType()) + } else if typer, ok := value.(legacySqlTyper); ok { + log.Printf("Deprecation Warning: update your SqlType methods to return a driver.Value") + gotype = reflect.TypeOf(typer.SqlType()) + } else if valuer, ok := value.(driver.Valuer); ok { + // Only check for driver.Valuer if SqlTyper wasn't + // found. + v, err := valuer.Value() + if err == nil && v != nil { + gotype = reflect.TypeOf(v) + } + } + cm := &ColumnMap{ + ColumnName: columnName, + DefaultValue: defaultValue, + Transient: columnName == "-", + fieldName: f.Name, + gotype: gotype, + isPK: isPK, + isAutoIncr: isAuto, + isNotNull: isNotNull, + MaxSize: maxSize, + } + if isPK { + primaryKey = append(primaryKey, cm) + } + // Check for nested fields of the same field name and + // override them. + shouldAppend := true + for index, col := range cols { + if !col.Transient && col.fieldName == cm.fieldName { + cols[index] = cm + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, cm) + } + } + + } + return +} + +// CreateTables iterates through TableMaps registered to this DbMap and +// executes "create table" statements against the database for each. +// +// This is particularly useful in unit tests where you want to create +// and destroy the schema automatically. +func (m *DbMap) CreateTables() error { + return m.createTables(false) +} + +// CreateTablesIfNotExists is similar to CreateTables, but starts +// each statement with "create table if not exists" so that existing +// tables do not raise errors +func (m *DbMap) CreateTablesIfNotExists() error { + return m.createTables(true) +} + +func (m *DbMap) createTables(ifNotExists bool) error { + var err error + for i := range m.tables { + table := m.tables[i] + sql := table.SqlForCreate(ifNotExists) + _, err = m.Exec(sql) + if err != nil { + return err + } + } + + for _, tbl := range m.dynamicTableMap() { + sql := tbl.SqlForCreate(ifNotExists) + _, err = m.Exec(sql) + if err != nil { + return err + } + } + + return err +} + +// DropTable drops an individual table. +// Returns an error when the table does not exist. +func (m *DbMap) DropTable(table interface{}) error { + t := reflect.TypeOf(table) + + tableName := "" + if dyn, ok := table.(DynamicTable); ok { + tableName = dyn.TableName() + } + + return m.dropTable(t, tableName, false) +} + +// DropTableIfExists drops an individual table when the table exists. +func (m *DbMap) DropTableIfExists(table interface{}) error { + t := reflect.TypeOf(table) + + tableName := "" + if dyn, ok := table.(DynamicTable); ok { + tableName = dyn.TableName() + } + + return m.dropTable(t, tableName, true) +} + +// DropTables iterates through TableMaps registered to this DbMap and +// executes "drop table" statements against the database for each. +func (m *DbMap) DropTables() error { + return m.dropTables(false) +} + +// DropTablesIfExists is the same as DropTables, but uses the "if exists" clause to +// avoid errors for tables that do not exist. +func (m *DbMap) DropTablesIfExists() error { + return m.dropTables(true) +} + +// Goes through all the registered tables, dropping them one by one. +// If an error is encountered, then it is returned and the rest of +// the tables are not dropped. +func (m *DbMap) dropTables(addIfExists bool) (err error) { + for _, table := range m.tables { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return err + } + } + + for _, table := range m.dynamicTableMap() { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return err + } + } + + return err +} + +// Implementation of dropping a single table. +func (m *DbMap) dropTable(t reflect.Type, name string, addIfExists bool) error { + table := tableOrNil(m, t, name) + if table == nil { + return fmt.Errorf("table %s was not registered", table.TableName) + } + + return m.dropTableImpl(table, addIfExists) +} + +func (m *DbMap) dropTableImpl(table *TableMap, ifExists bool) (err error) { + tableDrop := "drop table" + if ifExists { + tableDrop = m.Dialect.IfTableExists(tableDrop, table.SchemaName, table.TableName) + } + _, err = m.Exec(fmt.Sprintf("%s %s;", tableDrop, m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + return err +} + +// TruncateTables iterates through TableMaps registered to this DbMap and +// executes "truncate table" statements against the database for each, or in the case of +// sqlite, a "delete from" with no "where" clause, which uses the truncate optimization +// (http://www.sqlite.org/lang_delete.html) +func (m *DbMap) TruncateTables() error { + var err error + for i := range m.tables { + table := m.tables[i] + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + + for _, table := range m.dynamicTableMap() { + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + + return err +} + +// Insert runs a SQL INSERT statement for each element in list. List +// items must be pointers. +// +// Any interface whose TableMap has an auto-increment primary key will +// have its last insert id bound to the PK field on the struct. +// +// The hook functions PreInsert() and/or PostInsert() will be executed +// before/after the INSERT statement if the interface defines them. +// +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Insert(list ...interface{}) error { + return insert(m, m, list...) +} + +// Update runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Update(list ...interface{}) (int64, error) { + return update(m, m, nil, list...) +} + +// UpdateColumns runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// Only the columns accepted by filter are included in the UPDATE. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) UpdateColumns(filter ColumnFilter, list ...interface{}) (int64, error) { + return update(m, m, filter, list...) +} + +// Delete runs a SQL DELETE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreDelete() and/or PostDelete() will be executed +// before/after the DELETE statement if the interface defines them. +// +// Returns the number of rows deleted. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Delete(list ...interface{}) (int64, error) { + return delete(m, m, list...) +} + +// Get runs a SQL SELECT to fetch a single row from the table based on the +// primary key(s) +// +// i should be an empty value for the struct to load. keys should be +// the primary key value(s) for the row to load. If multiple keys +// exist on the table, the order should match the column order +// specified in SetKeys() when the table mapping was defined. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Returns a pointer to a struct that matches or nil if no row is found. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(m, m, i, keys...) +} + +// Select runs an arbitrary SQL query, binding the columns in the result +// to fields on the struct specified by i. args represent the bind +// parameters for the SQL statement. +// +// Column names on the SELECT statement should be aliased to the field names +// on the struct i. Returns an error if one or more columns in the result +// do not match. It is OK if fields on i are not part of the SQL +// statement. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Values are returned in one of two ways: +// 1. If i is a struct or a pointer to a struct, returns a slice of pointers to +// matching rows of type i. +// 2. If i is a pointer to a slice, the results will be appended to that slice +// and nil returned. +// +// i does NOT need to be registered with AddTable() +func (m *DbMap) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return hookedselect(m, m, i, query, args...) +} + +// Exec runs an arbitrary SQL statement. args represent the bind parameters. +// This is equivalent to running: Exec() using database/sql +func (m *DbMap) Exec(query string, args ...interface{}) (sql.Result, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return maybeExpandNamedQueryAndExec(m, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function +func (m *DbMap) SelectInt(query string, args ...interface{}) (int64, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectInt(m, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function +func (m *DbMap) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullInt(m, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function +func (m *DbMap) SelectFloat(query string, args ...interface{}) (float64, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectFloat(m, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function +func (m *DbMap) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullFloat(m, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function +func (m *DbMap) SelectStr(query string, args ...interface{}) (string, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectStr(m, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function +func (m *DbMap) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullStr(m, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function +func (m *DbMap) SelectOne(holder interface{}, query string, args ...interface{}) error { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectOne(m, m, holder, query, args...) +} + +// Begin starts a gorp Transaction +func (m *DbMap) Begin() (*Transaction, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, "begin;") + } + tx, err := begin(m) + if err != nil { + return nil, err + } + return &Transaction{ + dbmap: m, + tx: tx, + closed: false, + }, nil +} + +// TableFor returns the *TableMap corresponding to the given Go Type +// If no table is mapped to that type an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) TableFor(t reflect.Type, checkPK bool) (*TableMap, error) { + table := tableOrNil(m, t, "") + if table == nil { + return nil, fmt.Errorf("no table found for type: %v", t.Name()) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: no keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// DynamicTableFor returns the *TableMap for the dynamic table corresponding +// to the input tablename +// If no table is mapped to that tablename an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) DynamicTableFor(tableName string, checkPK bool) (*TableMap, error) { + table, found := m.dynamicTableFind(tableName) + if !found { + return nil, fmt.Errorf("gorp: no table found for name: %v", tableName) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: no keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// Prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the returned statement. +// This is equivalent to running: Prepare() using database/sql +func (m *DbMap) Prepare(query string) (*sql.Stmt, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, nil) + } + return prepare(m, query) +} + +func tableOrNil(m *DbMap, t reflect.Type, name string) *TableMap { + if name != "" { + // Search by table name (dynamic tables) + if table, found := m.dynamicTableFind(name); found { + return table + } + return nil + } + + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + return table + } + } + return nil +} + +func (m *DbMap) tableForPointer(ptr interface{}, checkPK bool) (*TableMap, reflect.Value, error) { + ptrv := reflect.ValueOf(ptr) + if ptrv.Kind() != reflect.Ptr { + e := fmt.Sprintf("gorp: passed non-pointer: %v (kind=%v)", ptr, + ptrv.Kind()) + return nil, reflect.Value{}, errors.New(e) + } + elem := ptrv.Elem() + ifc := elem.Interface() + var t *TableMap + var err error + tableName := "" + if dyn, isDyn := ptr.(DynamicTable); isDyn { + tableName = dyn.TableName() + t, err = m.DynamicTableFor(tableName, checkPK) + } else { + etype := reflect.TypeOf(ifc) + t, err = m.TableFor(etype, checkPK) + } + + if err != nil { + return nil, reflect.Value{}, err + } + + return t, elem, nil +} + +func (m *DbMap) QueryRow(query string, args ...interface{}) *sql.Row { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return queryRow(m, query, args...) +} + +func (m *DbMap) Query(q string, args ...interface{}) (*sql.Rows, error) { + if m.ExpandSliceArgs { + expandSliceArgs(&q, args...) + } + + if m.logger != nil { + now := time.Now() + defer m.trace(now, q, args...) + } + return query(m, q, args...) +} + +func (m *DbMap) trace(started time.Time, query string, args ...interface{}) { + if m.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if m.logger != nil { + var margs = argsString(args...) + m.logger.Printf("%s%s [%s] (%v)", m.logPrefix, query, margs, (time.Now().Sub(started))) + } +} + +type stringer interface { + ToStringSlice() []string +} + +type numberer interface { + ToInt64Slice() []int64 +} + +func expandSliceArgs(query *string, args ...interface{}) { + for _, arg := range args { + mapper, ok := arg.(map[string]interface{}) + if !ok { + continue + } + + for key, value := range mapper { + var replacements []string + + // add flexibility for any custom type to be convert to one of the + // acceptable formats. + if v, ok := value.(stringer); ok { + value = v.ToStringSlice() + } + if v, ok := value.(numberer); ok { + value = v.ToInt64Slice() + } + + switch v := value.(type) { + case []string: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint8: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint16: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint32: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []uint64: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int8: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int16: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int32: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []int64: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []float32: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + case []float64: + for id, replace := range v { + mapper[fmt.Sprintf("%s%d", key, id)] = replace + replacements = append(replacements, fmt.Sprintf(":%s%d", key, id)) + } + default: + continue + } + + if len(replacements) == 0 { + continue + } + + *query = strings.Replace(*query, fmt.Sprintf(":%s", key), strings.Join(replacements, ","), -1) + } + } +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect.go b/vendor/github.com/go-gorp/gorp/v3/dialect.go new file mode 100644 index 000000000..fdea2b203 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect.go @@ -0,0 +1,105 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "reflect" +) + +// The Dialect interface encapsulates behaviors that differ across +// SQL databases. At present the Dialect is only used by CreateTables() +// but this could change in the future +type Dialect interface { + // adds a suffix to any query, usually ";" + QuerySuffix() string + + // ToSqlType returns the SQL column type to use when creating a + // table of the given Go Type. maxsize can be used to switch based on + // size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB, + // or LONGBLOB depending on the maxsize + ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string + + // string to append to primary key column definitions + AutoIncrStr() string + + // string to bind autoincrement columns to. Empty string will + // remove reference to those columns in the INSERT statement. + AutoIncrBindValue() string + + AutoIncrInsertSuffix(col *ColumnMap) string + + // string to append to "create table" statement for vendor specific + // table attributes + CreateTableSuffix() string + + // string to append to "create index" statement + CreateIndexSuffix() string + + // string to append to "drop index" statement + DropIndexSuffix() string + + // string to truncate tables + TruncateClause() string + + // bind variable string to use when forming SQL statements + // in many dbs it is "?", but Postgres appears to use $1 + // + // i is a zero based index of the bind variable in this statement + // + BindVar(i int) string + + // Handles quoting of a field name to ensure that it doesn't raise any + // SQL parsing exceptions by using a reserved word as a field name. + QuoteField(field string) string + + // Handles building up of a schema.database string that is compatible with + // the given dialect + // + // schema - The schema that lives in + // table - The table name + QuotedTableForQuery(schema string, table string) string + + // Existence clause for table creation / deletion + IfSchemaNotExists(command, schema string) string + IfTableExists(command, schema, table string) string + IfTableNotExists(command, schema, table string) string +} + +// IntegerAutoIncrInserter is implemented by dialects that can perform +// inserts with automatically incremented integer primary keys. If +// the dialect can handle automatic assignment of more than just +// integers, see TargetedAutoIncrInserter. +type IntegerAutoIncrInserter interface { + InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) +} + +// TargetedAutoIncrInserter is implemented by dialects that can +// perform automatic assignment of any primary key type (i.e. strings +// for uuids, integers for serials, etc). +type TargetedAutoIncrInserter interface { + // InsertAutoIncrToTarget runs an insert operation and assigns the + // automatically generated primary key directly to the passed in + // target. The target should be a pointer to the primary key + // field of the value being inserted. + InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error +} + +// TargetQueryInserter is implemented by dialects that can perform +// assignment of integer primary key type by executing a query +// like "select sequence.currval from dual". +type TargetQueryInserter interface { + // TargetQueryInserter runs an insert operation and assigns the + // automatically generated primary key retrived by the query + // extracted from the GeneratedIdQuery field of the id column. + InsertQueryToTarget(exec SqlExecutor, insertSql, idSql string, target interface{}, params ...interface{}) error +} + +func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + res, err := exec.Exec(insertSql, params...) + if err != nil { + return 0, err + } + return res.LastInsertId() +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_mysql.go b/vendor/github.com/go-gorp/gorp/v3/dialect_mysql.go new file mode 100644 index 000000000..d068ebe85 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_mysql.go @@ -0,0 +1,169 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +// Implementation of Dialect for MySQL databases. +type MySQLDialect struct { + + // Engine is the storage engine to use "InnoDB" vs "MyISAM" for example + Engine string + + // Encoding is the character encoding to use for created tables + Encoding string +} + +func (d MySQLDialect) QuerySuffix() string { return ";" } + +func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "tinyint unsigned" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "smallint unsigned" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "int unsigned" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint unsigned" + case reflect.Float64, reflect.Float32: + return "double" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "mediumblob" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + + /* == About varchar(N) == + * N is number of characters. + * A varchar column can store up to 65535 bytes. + * Remember that 1 character is 3 bytes in utf-8 charset. + * Also remember that each row can store up to 65535 bytes, + * and you have some overheads, so it's not possible for a + * varchar column to have 65535/3 characters really. + * So it would be better to use 'text' type in stead of + * large varchar type. + */ + if maxsize < 256 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } +} + +// Returns auto_increment +func (d MySQLDialect) AutoIncrStr() string { + return "auto_increment" +} + +func (d MySQLDialect) AutoIncrBindValue() string { + return "null" +} + +func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns engine=%s charset=%s based on values stored on struct +func (d MySQLDialect) CreateTableSuffix() string { + if d.Engine == "" || d.Encoding == "" { + msg := "gorp - undefined" + + if d.Engine == "" { + msg += " MySQLDialect.Engine" + } + if d.Engine == "" && d.Encoding == "" { + msg += "," + } + if d.Encoding == "" { + msg += " MySQLDialect.Encoding" + } + msg += ". Check that your MySQLDialect was correctly initialized when declared." + panic(msg) + } + + return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding) +} + +func (d MySQLDialect) CreateIndexSuffix() string { + return "using" +} + +func (d MySQLDialect) DropIndexSuffix() string { + return "on" +} + +func (d MySQLDialect) TruncateClause() string { + return "truncate" +} + +func (d MySQLDialect) SleepClause(s time.Duration) string { + return fmt.Sprintf("sleep(%f)", s.Seconds()) +} + +// Returns "?" +func (d MySQLDialect) BindVar(i int) string { + return "?" +} + +func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d MySQLDialect) QuoteField(f string) string { + return "`" + f + "`" +} + +func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d MySQLDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d MySQLDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d MySQLDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_oracle.go b/vendor/github.com/go-gorp/gorp/v3/dialect_oracle.go new file mode 100644 index 000000000..01a99b8a1 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_oracle.go @@ -0,0 +1,139 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +// Implementation of Dialect for Oracle databases. +type OracleDialect struct{} + +func (d OracleDialect) QuerySuffix() string { return "" } + +func (d OracleDialect) CreateIndexSuffix() string { return "" } + +func (d OracleDialect) DropIndexSuffix() string { return "" } + +func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "NullTime", "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d OracleDialect) AutoIncrStr() string { + return "" +} + +func (d OracleDialect) AutoIncrBindValue() string { + return "NULL" +} + +func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d OracleDialect) CreateTableSuffix() string { + return "" +} + +func (d OracleDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d OracleDialect) BindVar(i int) string { + return fmt.Sprintf(":%d", i+1) +} + +// After executing the insert uses the ColMap IdQuery to get the generated id +func (d OracleDialect) InsertQueryToTarget(exec SqlExecutor, insertSql, idSql string, target interface{}, params ...interface{}) error { + _, err := exec.Exec(insertSql, params...) + if err != nil { + return err + } + id, err := exec.SelectInt(idSql) + if err != nil { + return err + } + switch target.(type) { + case *int64: + *(target.(*int64)) = id + case *int32: + *(target.(*int32)) = int32(id) + case int: + *(target.(*int)) = int(id) + default: + return fmt.Errorf("Id field can be int, int32 or int64") + } + return nil +} + +func (d OracleDialect) QuoteField(f string) string { + return `"` + strings.ToUpper(f) + `"` +} + +func (d OracleDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_postgres.go b/vendor/github.com/go-gorp/gorp/v3/dialect_postgres.go new file mode 100644 index 000000000..7a9c50bf8 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_postgres.go @@ -0,0 +1,149 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type PostgresDialect struct { + suffix string + LowercaseFields bool +} + +func (d PostgresDialect) QuerySuffix() string { return ";" } + +func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time", "NullTime": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d PostgresDialect) AutoIncrStr() string { + return "" +} + +func (d PostgresDialect) AutoIncrBindValue() string { + return "default" +} + +func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + d.QuoteField(col.ColumnName) +} + +// Returns suffix +func (d PostgresDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d PostgresDialect) CreateIndexSuffix() string { + return "using" +} + +func (d PostgresDialect) DropIndexSuffix() string { + return "" +} + +func (d PostgresDialect) TruncateClause() string { + return "truncate" +} + +func (d PostgresDialect) SleepClause(s time.Duration) string { + return fmt.Sprintf("pg_sleep(%f)", s.Seconds()) +} + +// Returns "$(i+1)" +func (d PostgresDialect) BindVar(i int) string { + return fmt.Sprintf("$%d", i+1) +} + +func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.Query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return fmt.Errorf("No serial value returned for insert: %s Encountered error: %s", insertSql, rows.Err()) + } + if err := rows.Scan(target); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("more than two serial value returned for insert: %s", insertSql) + } + return rows.Err() +} + +func (d PostgresDialect) QuoteField(f string) string { + if d.LowercaseFields { + return `"` + strings.ToLower(f) + `"` + } + return `"` + f + `"` +} + +func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d PostgresDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d PostgresDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d PostgresDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_snowflake.go b/vendor/github.com/go-gorp/gorp/v3/dialect_snowflake.go new file mode 100644 index 000000000..2e2cb8952 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_snowflake.go @@ -0,0 +1,152 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +type SnowflakeDialect struct { + suffix string + LowercaseFields bool +} + +func (d SnowflakeDialect) QuerySuffix() string { return ";" } + +func (d SnowflakeDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32: + + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "binary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time", "NullTime": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d SnowflakeDialect) AutoIncrStr() string { + return "" +} + +func (d SnowflakeDialect) AutoIncrBindValue() string { + return "default" +} + +func (d SnowflakeDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SnowflakeDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d SnowflakeDialect) CreateIndexSuffix() string { + return "" +} + +func (d SnowflakeDialect) DropIndexSuffix() string { + return "" +} + +func (d SnowflakeDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d SnowflakeDialect) BindVar(i int) string { + return "?" +} + +func (d SnowflakeDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.Query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return fmt.Errorf("No serial value returned for insert: %s Encountered error: %s", insertSql, rows.Err()) + } + if err := rows.Scan(target); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("more than two serial value returned for insert: %s", insertSql) + } + return rows.Err() +} + +func (d SnowflakeDialect) QuoteField(f string) string { + if d.LowercaseFields { + return `"` + strings.ToLower(f) + `"` + } + return `"` + f + `"` +} + +func (d SnowflakeDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d SnowflakeDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SnowflakeDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SnowflakeDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_sqlite.go b/vendor/github.com/go-gorp/gorp/v3/dialect_sqlite.go new file mode 100644 index 000000000..2296275b9 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_sqlite.go @@ -0,0 +1,112 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" +) + +type SqliteDialect struct { + suffix string +} + +func (d SqliteDialect) QuerySuffix() string { return ";" } + +func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "integer" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return "integer" + case reflect.Float64, reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "blob" + } + } + + switch val.Name() { + case "NullInt64": + return "integer" + case "NullFloat64": + return "real" + case "NullBool": + return "integer" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns autoincrement +func (d SqliteDialect) AutoIncrStr() string { + return "autoincrement" +} + +func (d SqliteDialect) AutoIncrBindValue() string { + return "null" +} + +func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqliteDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d SqliteDialect) CreateIndexSuffix() string { + return "" +} + +func (d SqliteDialect) DropIndexSuffix() string { + return "" +} + +// With sqlite, there technically isn't a TRUNCATE statement, +// but a DELETE FROM uses a truncate optimization: +// http://www.sqlite.org/lang_delete.html +func (d SqliteDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqliteDialect) BindVar(i int) string { + return "?" +} + +func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqliteDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +// sqlite does not have schemas like PostgreSQL does, so just escape it like normal +func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string { + return d.QuoteField(table) +} + +func (d SqliteDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SqliteDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SqliteDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/v3/dialect_sqlserver.go b/vendor/github.com/go-gorp/gorp/v3/dialect_sqlserver.go new file mode 100644 index 000000000..ec06aed66 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/dialect_sqlserver.go @@ -0,0 +1,145 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +// Implementation of Dialect for Microsoft SQL Server databases. +// Use gorp.SqlServerDialect{"2005"} for legacy datatypes. +// Tested with driver: github.com/denisenkom/go-mssqldb + +type SqlServerDialect struct { + + // If set to "2005" legacy datatypes will be used + Version string +} + +func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "bit" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "smallint" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "int" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "bigint" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "numeric(20,0)" + case reflect.Float32: + return "float(24)" + case reflect.Float64: + return "float(53)" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "varbinary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "float(53)" + case "NullBool": + return "bit" + case "NullTime", "Time": + if d.Version == "2005" { + return "datetime" + } + return "datetime2" + } + + if maxsize < 1 { + if d.Version == "2005" { + maxsize = 255 + } else { + return fmt.Sprintf("nvarchar(max)") + } + } + return fmt.Sprintf("nvarchar(%d)", maxsize) +} + +// Returns auto_increment +func (d SqlServerDialect) AutoIncrStr() string { + return "identity(0,1)" +} + +// Empty string removes autoincrement columns from the INSERT statements. +func (d SqlServerDialect) AutoIncrBindValue() string { + return "" +} + +func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +func (d SqlServerDialect) CreateTableSuffix() string { return ";" } + +func (d SqlServerDialect) TruncateClause() string { + return "truncate table" +} + +// Returns "?" +func (d SqlServerDialect) BindVar(i int) string { + return "?" +} + +func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqlServerDialect) QuoteField(f string) string { + return "[" + strings.Replace(f, "]", "]]", -1) + "]" +} + +func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + return d.QuoteField(schema) + "." + d.QuoteField(table) +} + +func (d SqlServerDialect) QuerySuffix() string { return ";" } + +func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string { + s := fmt.Sprintf("if schema_id(N'%s') is null %s", schema, command) + return s +} + +func (d SqlServerDialect) IfTableExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("%s.", d.QuoteField(schema)) + } + s := fmt.Sprintf("if object_id('%s%s') is not null %s", schema_clause, d.QuoteField(table), command) + return s +} + +func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("%s.", schema) + } + s := fmt.Sprintf("if object_id('%s%s') is null %s", schema_clause, table, command) + return s +} + +func (d SqlServerDialect) CreateIndexSuffix() string { return "" } +func (d SqlServerDialect) DropIndexSuffix() string { return "" } diff --git a/vendor/github.com/go-gorp/gorp/v3/doc.go b/vendor/github.com/go-gorp/gorp/v3/doc.go new file mode 100644 index 000000000..593f1c3c1 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/doc.go @@ -0,0 +1,11 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +package gorp diff --git a/vendor/github.com/go-gorp/gorp/v3/errors.go b/vendor/github.com/go-gorp/gorp/v3/errors.go new file mode 100644 index 000000000..752ad1bca --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/errors.go @@ -0,0 +1,31 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" +) + +// A non-fatal error, when a select query returns columns that do not exist +// as fields in the struct it is being mapped to +// TODO: discuss wether this needs an error. encoding/json silently ignores missing fields +type NoFieldInTypeError struct { + TypeName string + MissingColNames []string +} + +func (err *NoFieldInTypeError) Error() string { + return fmt.Sprintf("gorp: no fields %+v in type %s", err.MissingColNames, err.TypeName) +} + +// returns true if the error is non-fatal (ie, we shouldn't immediately return) +func NonFatalError(err error) bool { + switch err.(type) { + case *NoFieldInTypeError: + return true + default: + return false + } +} diff --git a/vendor/github.com/go-gorp/gorp/v3/gorp.go b/vendor/github.com/go-gorp/gorp/v3/gorp.go new file mode 100644 index 000000000..fc6545676 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/gorp.go @@ -0,0 +1,672 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "strings" + "time" +) + +// OracleString (empty string is null) +// TODO: move to dialect/oracle?, rename to String? +type OracleString struct { + sql.NullString +} + +// Scan implements the Scanner interface. +func (os *OracleString) Scan(value interface{}) error { + if value == nil { + os.String, os.Valid = "", false + return nil + } + os.Valid = true + return os.NullString.Scan(value) +} + +// Value implements the driver Valuer interface. +func (os OracleString) Value() (driver.Value, error) { + if !os.Valid || os.String == "" { + return nil, nil + } + return os.String, nil +} + +// SqlTyper is a type that returns its database type. Most of the +// time, the type can just use "database/sql/driver".Valuer; but when +// it returns nil for its empty value, it needs to implement SqlTyper +// to have its column type detected properly during table creation. +type SqlTyper interface { + SqlType() driver.Value +} + +// legacySqlTyper prevents breaking clients who depended on the previous +// SqlTyper interface +type legacySqlTyper interface { + SqlType() driver.Valuer +} + +// for fields that exists in DB table, but not exists in struct +type dummyField struct{} + +// Scan implements the Scanner interface. +func (nt *dummyField) Scan(value interface{}) error { + return nil +} + +var zeroVal reflect.Value +var versFieldConst = "[gorp_ver_field]" + +// The TypeConverter interface provides a way to map a value of one +// type to another type when persisting to, or loading from, a database. +// +// Example use cases: Implement type converter to convert bool types to "y"/"n" strings, +// or serialize a struct member as a JSON blob. +type TypeConverter interface { + // ToDb converts val to another type. Called before INSERT/UPDATE operations + ToDb(val interface{}) (interface{}, error) + + // FromDb returns a CustomScanner appropriate for this type. This will be used + // to hold values returned from SELECT queries. + // + // In particular the CustomScanner returned should implement a Binder + // function appropriate for the Go type you wish to convert the db value to + // + // If bool==false, then no custom scanner will be used for this field. + FromDb(target interface{}) (CustomScanner, bool) +} + +// SqlExecutor exposes gorp operations that can be run from Pre/Post +// hooks. This hides whether the current operation that triggered the +// hook is in a transaction. +// +// See the DbMap function docs for each of the functions below for more +// information. +type SqlExecutor interface { + WithContext(ctx context.Context) SqlExecutor + Get(i interface{}, keys ...interface{}) (interface{}, error) + Insert(list ...interface{}) error + Update(list ...interface{}) (int64, error) + Delete(list ...interface{}) (int64, error) + Exec(query string, args ...interface{}) (sql.Result, error) + Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) + SelectInt(query string, args ...interface{}) (int64, error) + SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) + SelectFloat(query string, args ...interface{}) (float64, error) + SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) + SelectStr(query string, args ...interface{}) (string, error) + SelectNullStr(query string, args ...interface{}) (sql.NullString, error) + SelectOne(holder interface{}, query string, args ...interface{}) error + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +// DynamicTable allows the users of gorp to dynamically +// use different database table names during runtime +// while sharing the same golang struct for in-memory data +type DynamicTable interface { + TableName() string + SetTableName(string) +} + +// Compile-time check that DbMap and Transaction implement the SqlExecutor +// interface. +var _, _ SqlExecutor = &DbMap{}, &Transaction{} + +func argValue(a interface{}) interface{} { + v, ok := a.(driver.Valuer) + if !ok { + return a + } + vV := reflect.ValueOf(v) + if vV.Kind() == reflect.Ptr && vV.IsNil() { + return nil + } + ret, err := v.Value() + if err != nil { + return a + } + return ret +} + +func argsString(args ...interface{}) string { + var margs string + for i, a := range args { + v := argValue(a) + switch v.(type) { + case string: + v = fmt.Sprintf("%q", v) + default: + v = fmt.Sprintf("%v", v) + } + margs += fmt.Sprintf("%d:%s", i+1, v) + if i+1 < len(args) { + margs += " " + } + } + return margs +} + +// Calls the Exec function on the executor, but attempts to expand any eligible named +// query arguments first. +func maybeExpandNamedQueryAndExec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + dbMap := extractDbMap(e) + + if len(args) == 1 { + query, args = maybeExpandNamedQuery(dbMap, query, args) + } + + return exec(e, query, args...) +} + +func extractDbMap(e SqlExecutor) *DbMap { + switch m := e.(type) { + case *DbMap: + return m + case *Transaction: + return m.dbmap + } + return nil +} + +// executor exposes the sql.DB and sql.Tx functions so that it can be used +// on internal functions that need to be agnostic to the underlying object. +type executor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Prepare(query string) (*sql.Stmt, error) + QueryRow(query string, args ...interface{}) *sql.Row + Query(query string, args ...interface{}) (*sql.Rows, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func extractExecutorAndContext(e SqlExecutor) (executor, context.Context) { + switch m := e.(type) { + case *DbMap: + return m.Db, m.ctx + case *Transaction: + return m.tx, m.ctx + } + return nil, nil +} + +// maybeExpandNamedQuery checks the given arg to see if it's eligible to be used +// as input to a named query. If so, it rewrites the query to use +// dialect-dependent bindvars and instantiates the corresponding slice of +// parameters by extracting data from the map / struct. +// If not, returns the input values unchanged. +func maybeExpandNamedQuery(m *DbMap, query string, args []interface{}) (string, []interface{}) { + var ( + arg = args[0] + argval = reflect.ValueOf(arg) + ) + if argval.Kind() == reflect.Ptr { + argval = argval.Elem() + } + + if argval.Kind() == reflect.Map && argval.Type().Key().Kind() == reflect.String { + return expandNamedQuery(m, query, func(key string) reflect.Value { + return argval.MapIndex(reflect.ValueOf(key)) + }) + } + if argval.Kind() != reflect.Struct { + return query, args + } + if _, ok := arg.(time.Time); ok { + // time.Time is driver.Value + return query, args + } + if _, ok := arg.(driver.Valuer); ok { + // driver.Valuer will be converted to driver.Value. + return query, args + } + + return expandNamedQuery(m, query, argval.FieldByName) +} + +var keyRegexp = regexp.MustCompile(`:[[:word:]]+`) + +// expandNamedQuery accepts a query with placeholders of the form ":key", and a +// single arg of Kind Struct or Map[string]. It returns the query with the +// dialect's placeholders, and a slice of args ready for positional insertion +// into the query. +func expandNamedQuery(m *DbMap, query string, keyGetter func(key string) reflect.Value) (string, []interface{}) { + var ( + n int + args []interface{} + ) + return keyRegexp.ReplaceAllStringFunc(query, func(key string) string { + val := keyGetter(key[1:]) + if !val.IsValid() { + return key + } + args = append(args, val.Interface()) + newVar := m.Dialect.BindVar(n) + n++ + return newVar + }), args +} + +func columnToFieldIndex(m *DbMap, t reflect.Type, name string, cols []string) ([][]int, error) { + colToFieldIndex := make([][]int, len(cols)) + + // check if type t is a mapped table - if so we'll + // check the table for column aliasing below + tableMapped := false + table := tableOrNil(m, t, name) + if table != nil { + tableMapped = true + } + + // Loop over column names and find field in i to bind to + // based on column name. all returned columns must match + // a field in the i struct + missingColNames := []string{} + for x := range cols { + colName := strings.ToLower(cols[x]) + field, found := t.FieldByNameFunc(func(fieldName string) bool { + field, _ := t.FieldByName(fieldName) + cArguments := strings.Split(field.Tag.Get("db"), ",") + fieldName = cArguments[0] + + if fieldName == "-" { + return false + } else if fieldName == "" { + fieldName = field.Name + } + if tableMapped { + colMap := colMapOrNil(table, fieldName) + if colMap != nil { + fieldName = colMap.ColumnName + } + } + return colName == strings.ToLower(fieldName) + }) + if found { + colToFieldIndex[x] = field.Index + } + if colToFieldIndex[x] == nil { + missingColNames = append(missingColNames, colName) + } + } + if len(missingColNames) > 0 { + return colToFieldIndex, &NoFieldInTypeError{ + TypeName: t.Name(), + MissingColNames: missingColNames, + } + } + return colToFieldIndex, nil +} + +func fieldByName(val reflect.Value, fieldName string) *reflect.Value { + // try to find field by exact match + f := val.FieldByName(fieldName) + + if f != zeroVal { + return &f + } + + // try to find by case insensitive match - only the Postgres driver + // seems to require this - in the case where columns are aliased in the sql + fieldNameL := strings.ToLower(fieldName) + fieldCount := val.NumField() + t := val.Type() + for i := 0; i < fieldCount; i++ { + sf := t.Field(i) + if strings.ToLower(sf.Name) == fieldNameL { + f := val.Field(i) + return &f + } + } + + return nil +} + +// toSliceType returns the element type of the given object, if the object is a +// "*[]*Element" or "*[]Element". If not, returns nil. +// err is returned if the user was trying to pass a pointer-to-slice but failed. +func toSliceType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + if t.Kind() != reflect.Ptr { + // If it's a slice, return a more helpful error message + if t.Kind() == reflect.Slice { + return nil, fmt.Errorf("gorp: cannot SELECT into a non-pointer slice: %v", t) + } + return nil, nil + } + if t = t.Elem(); t.Kind() != reflect.Slice { + return nil, nil + } + return t.Elem(), nil +} + +func toType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + + // If a Pointer to a type, follow + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("gorp: cannot SELECT into this type: %v", reflect.TypeOf(i)) + } + return t, nil +} + +type foundTable struct { + table *TableMap + dynName *string +} + +func tableFor(m *DbMap, t reflect.Type, i interface{}) (*foundTable, error) { + if dyn, isDynamic := i.(DynamicTable); isDynamic { + tableName := dyn.TableName() + table, err := m.DynamicTableFor(tableName, true) + if err != nil { + return nil, err + } + return &foundTable{ + table: table, + dynName: &tableName, + }, nil + } + table, err := m.TableFor(t, true) + if err != nil { + return nil, err + } + return &foundTable{table: table}, nil +} + +func get(m *DbMap, exec SqlExecutor, i interface{}, + keys ...interface{}) (interface{}, error) { + + t, err := toType(i) + if err != nil { + return nil, err + } + + foundTable, err := tableFor(m, t, i) + if err != nil { + return nil, err + } + table := foundTable.table + + plan := table.bindGet() + + v := reflect.New(t) + if foundTable.dynName != nil { + retDyn := v.Interface().(DynamicTable) + retDyn.SetTableName(*foundTable.dynName) + } + + dest := make([]interface{}, len(plan.argFields)) + + conv := m.TypeConverter + custScan := make([]CustomScanner, 0) + + for x, fieldName := range plan.argFields { + f := v.Elem().FieldByName(fieldName) + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + row := exec.QueryRow(plan.query, keys...) + err = row.Scan(dest...) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if v, ok := v.Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + + return v.Interface(), nil +} + +func delete(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreDelete); ok { + err = v.PreDelete(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindDelete(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + count += rows + + if v, ok := eval.(HasPostDelete); ok { + err := v.PostDelete(exec) + if err != nil { + return -1, err + } + } + } + + return count, nil +} + +func update(m *DbMap, exec SqlExecutor, colFilter ColumnFilter, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreUpdate); ok { + err = v.PreUpdate(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindUpdate(elem, colFilter) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + if bi.versField != "" { + elem.FieldByName(bi.versField).SetInt(bi.existingVersion + 1) + } + + count += rows + + if v, ok := eval.(HasPostUpdate); ok { + err = v.PostUpdate(exec) + if err != nil { + return -1, err + } + } + } + return count, nil +} + +func insert(m *DbMap, exec SqlExecutor, list ...interface{}) error { + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, false) + if err != nil { + return err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreInsert); ok { + err := v.PreInsert(exec) + if err != nil { + return err + } + } + + bi, err := table.bindInsert(elem) + if err != nil { + return err + } + + if bi.autoIncrIdx > -1 { + f := elem.FieldByName(bi.autoIncrFieldName) + switch inserter := m.Dialect.(type) { + case IntegerAutoIncrInserter: + id, err := inserter.InsertAutoIncr(exec, bi.query, bi.args...) + if err != nil { + return err + } + k := f.Kind() + if (k == reflect.Int) || (k == reflect.Int16) || (k == reflect.Int32) || (k == reflect.Int64) { + f.SetInt(id) + } else if (k == reflect.Uint) || (k == reflect.Uint16) || (k == reflect.Uint32) || (k == reflect.Uint64) { + f.SetUint(uint64(id)) + } else { + return fmt.Errorf("gorp: cannot set autoincrement value on non-Int field. SQL=%s autoIncrIdx=%d autoIncrFieldName=%s", bi.query, bi.autoIncrIdx, bi.autoIncrFieldName) + } + case TargetedAutoIncrInserter: + err := inserter.InsertAutoIncrToTarget(exec, bi.query, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + case TargetQueryInserter: + var idQuery = table.ColMap(bi.autoIncrFieldName).GeneratedIdQuery + if idQuery == "" { + return fmt.Errorf("gorp: cannot set %s value if its ColumnMap.GeneratedIdQuery is empty", bi.autoIncrFieldName) + } + err := inserter.InsertQueryToTarget(exec, bi.query, idQuery, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + default: + return fmt.Errorf("gorp: cannot use autoincrement fields on dialects that do not implement an autoincrementing interface") + } + } else { + _, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return err + } + } + + if v, ok := eval.(HasPostInsert); ok { + err := v.PostInsert(exec) + if err != nil { + return err + } + } + } + return nil +} + +func exec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.ExecContext(ctx, query, args...) + } + + return executor.Exec(query, args...) +} + +func prepare(e SqlExecutor, query string) (*sql.Stmt, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.PrepareContext(ctx, query) + } + + return executor.Prepare(query) +} + +func queryRow(e SqlExecutor, query string, args ...interface{}) *sql.Row { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.QueryRowContext(ctx, query, args...) + } + + return executor.QueryRow(query, args...) +} + +func query(e SqlExecutor, query string, args ...interface{}) (*sql.Rows, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.QueryContext(ctx, query, args...) + } + + return executor.Query(query, args...) +} + +func begin(m *DbMap) (*sql.Tx, error) { + if m.ctx != nil { + return m.Db.BeginTx(m.ctx, nil) + } + + return m.Db.Begin() +} diff --git a/vendor/github.com/go-gorp/gorp/v3/hooks.go b/vendor/github.com/go-gorp/gorp/v3/hooks.go new file mode 100644 index 000000000..1e80bca7e --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/hooks.go @@ -0,0 +1,42 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +//++ TODO v2-phase3: HasPostGet => PostGetter, HasPostDelete => PostDeleter, etc. + +// HasPostGet provides PostGet() which will be executed after the GET statement. +type HasPostGet interface { + PostGet(SqlExecutor) error +} + +// HasPostDelete provides PostDelete() which will be executed after the DELETE statement +type HasPostDelete interface { + PostDelete(SqlExecutor) error +} + +// HasPostUpdate provides PostUpdate() which will be executed after the UPDATE statement +type HasPostUpdate interface { + PostUpdate(SqlExecutor) error +} + +// HasPostInsert provides PostInsert() which will be executed after the INSERT statement +type HasPostInsert interface { + PostInsert(SqlExecutor) error +} + +// HasPreDelete provides PreDelete() which will be executed before the DELETE statement. +type HasPreDelete interface { + PreDelete(SqlExecutor) error +} + +// HasPreUpdate provides PreUpdate() which will be executed before UPDATE statement. +type HasPreUpdate interface { + PreUpdate(SqlExecutor) error +} + +// HasPreInsert provides PreInsert() which will be executed before INSERT statement. +type HasPreInsert interface { + PreInsert(SqlExecutor) error +} diff --git a/vendor/github.com/go-gorp/gorp/v3/index.go b/vendor/github.com/go-gorp/gorp/v3/index.go new file mode 100644 index 000000000..df1cf5520 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/index.go @@ -0,0 +1,49 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +// IndexMap represents a mapping between a Go struct field and a single +// index in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type IndexMap struct { + // Index name in db table + IndexName string + + // If true, " unique" is added to create index statements. + // Not used elsewhere + Unique bool + + // Index type supported by Dialect + // Postgres: B-tree, Hash, GiST and GIN. + // Mysql: Btree, Hash. + // Sqlite: nil. + IndexType string + + // Columns name for single and multiple indexes + columns []string +} + +// Rename allows you to specify the index name in the table +// +// Example: table.IndMap("customer_test_idx").Rename("customer_idx") +// +func (idx *IndexMap) Rename(indname string) *IndexMap { + idx.IndexName = indname + return idx +} + +// SetUnique adds "unique" to the create index statements for this +// index, if b is true. +func (idx *IndexMap) SetUnique(b bool) *IndexMap { + idx.Unique = b + return idx +} + +// SetIndexType specifies the index type supported by chousen SQL Dialect +func (idx *IndexMap) SetIndexType(indtype string) *IndexMap { + idx.IndexType = indtype + return idx +} diff --git a/vendor/github.com/go-gorp/gorp/v3/lockerror.go b/vendor/github.com/go-gorp/gorp/v3/lockerror.go new file mode 100644 index 000000000..7e81891e2 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/lockerror.go @@ -0,0 +1,56 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "fmt" + "reflect" +) + +// OptimisticLockError is returned by Update() or Delete() if the +// struct being modified has a Version field and the value is not equal to +// the current value in the database +type OptimisticLockError struct { + // Table name where the lock error occurred + TableName string + + // Primary key values of the row being updated/deleted + Keys []interface{} + + // true if a row was found with those keys, indicating the + // LocalVersion is stale. false if no value was found with those + // keys, suggesting the row has been deleted since loaded, or + // was never inserted to begin with + RowExists bool + + // Version value on the struct passed to Update/Delete. This value is + // out of sync with the database. + LocalVersion int64 +} + +// Error returns a description of the cause of the lock error +func (e OptimisticLockError) Error() string { + if e.RowExists { + return fmt.Sprintf("gorp: OptimisticLockError table=%s keys=%v out of date version=%d", e.TableName, e.Keys, e.LocalVersion) + } + + return fmt.Sprintf("gorp: OptimisticLockError no row found for table=%s keys=%v", e.TableName, e.Keys) +} + +func lockError(m *DbMap, exec SqlExecutor, tableName string, + existingVer int64, elem reflect.Value, + keys ...interface{}) (int64, error) { + + existing, err := get(m, exec, elem.Interface(), keys...) + if err != nil { + return -1, err + } + + ole := OptimisticLockError{tableName, keys, true, existingVer} + if existing == nil { + ole.RowExists = false + } + return -1, ole +} diff --git a/vendor/github.com/go-gorp/gorp/v3/logging.go b/vendor/github.com/go-gorp/gorp/v3/logging.go new file mode 100644 index 000000000..e8cba3db2 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/logging.go @@ -0,0 +1,42 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import "fmt" + +// GorpLogger is a deprecated alias of Logger. +type GorpLogger = Logger + +// Logger is the type that gorp uses to log SQL statements. +// See DbMap.TraceOn. +type Logger interface { + Printf(format string, v ...interface{}) +} + +// TraceOn turns on SQL statement logging for this DbMap. After this is +// called, all SQL statements will be sent to the logger. If prefix is +// a non-empty string, it will be written to the front of all logged +// strings, which can aid in filtering log lines. +// +// Use TraceOn if you want to spy on the SQL statements that gorp +// generates. +// +// Note that the base log.Logger type satisfies Logger, but adapters can +// easily be written for other logging packages (e.g., the golang-sanctioned +// glog framework). +func (m *DbMap) TraceOn(prefix string, logger Logger) { + m.logger = logger + if prefix == "" { + m.logPrefix = prefix + } else { + m.logPrefix = fmt.Sprintf("%s ", prefix) + } +} + +// TraceOff turns off tracing. It is idempotent. +func (m *DbMap) TraceOff() { + m.logger = nil + m.logPrefix = "" +} diff --git a/vendor/github.com/go-gorp/gorp/v3/nulltypes.go b/vendor/github.com/go-gorp/gorp/v3/nulltypes.go new file mode 100644 index 000000000..87b21f83f --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/nulltypes.go @@ -0,0 +1,65 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "database/sql/driver" + "log" + "time" +) + +// A nullable Time value +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + log.Printf("Time scan value is: %#v", value) + switch t := value.(type) { + case time.Time: + nt.Time, nt.Valid = t, true + case []byte: + v := strToTime(string(t)) + if v != nil { + nt.Valid = true + nt.Time = *v + } + case string: + v := strToTime(t) + if v != nil { + nt.Valid = true + nt.Time = *v + } + } + return nil +} + +func strToTime(v string) *time.Time { + for _, dtfmt := range []string{ + "2006-01-02 15:04:05.999999999", + "2006-01-02T15:04:05.999999999", + "2006-01-02 15:04:05", + "2006-01-02T15:04:05", + "2006-01-02 15:04", + "2006-01-02T15:04", + "2006-01-02", + "2006-01-02 15:04:05-07:00", + } { + if t, err := time.Parse(dtfmt, v); err == nil { + return &t + } + } + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/go-gorp/gorp/v3/select.go b/vendor/github.com/go-gorp/gorp/v3/select.go new file mode 100644 index 000000000..2d2d59618 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/select.go @@ -0,0 +1,359 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "database/sql" + "fmt" + "reflect" +) + +// SelectInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectInt(e SqlExecutor, query string, args ...interface{}) (int64, error) { + var h int64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullInt(e SqlExecutor, query string, args ...interface{}) (sql.NullInt64, error) { + var h sql.NullInt64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectFloat(e SqlExecutor, query string, args ...interface{}) (float64, error) { + var h float64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullFloat(e SqlExecutor, query string, args ...interface{}) (sql.NullFloat64, error) { + var h sql.NullFloat64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectStr executes the given query, which should be a SELECT statement for a single +// char/varchar column, and returns the value of the first row returned. If no rows are +// found, an empty string is returned. +func SelectStr(e SqlExecutor, query string, args ...interface{}) (string, error) { + var h string + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return "", err + } + return h, nil +} + +// SelectNullStr executes the given query, which should be a SELECT +// statement for a single char/varchar column, and returns the value +// of the first row returned. If no rows are found, the empty +// sql.NullString is returned. +func SelectNullStr(e SqlExecutor, query string, args ...interface{}) (sql.NullString, error) { + var h sql.NullString + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectOne executes the given query (which should be a SELECT statement) +// and binds the result to holder, which must be a pointer. +// +// If no row is found, an error (sql.ErrNoRows specifically) will be returned +// +// If more than one row is found, an error will be returned. +// +func SelectOne(m *DbMap, e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + t := reflect.TypeOf(holder) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } else { + return fmt.Errorf("gorp: SelectOne holder must be a pointer, but got: %t", holder) + } + + // Handle pointer to pointer + isptr := false + if t.Kind() == reflect.Ptr { + isptr = true + t = t.Elem() + } + + if t.Kind() == reflect.Struct { + var nonFatalErr error + + list, err := hookedselect(m, e, holder, query, args...) + if err != nil { + if !NonFatalError(err) { // FIXME: double negative, rename NonFatalError to FatalError + return err + } + nonFatalErr = err + } + + dest := reflect.ValueOf(holder) + if isptr { + dest = dest.Elem() + } + + if list != nil && len(list) > 0 { // FIXME: invert if/else + // check for multiple rows + if len(list) > 1 { + return fmt.Errorf("gorp: multiple rows returned for: %s - %v", query, args) + } + + // Initialize if nil + if dest.IsNil() { + dest.Set(reflect.New(t)) + } + + // only one row found + src := reflect.ValueOf(list[0]) + dest.Elem().Set(src.Elem()) + } else { + // No rows found, return a proper error. + return sql.ErrNoRows + } + + return nonFatalErr + } + + return selectVal(e, holder, query, args...) +} + +func selectVal(e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + if len(args) == 1 { + switch m := e.(type) { + case *DbMap: + query, args = maybeExpandNamedQuery(m, query, args) + case *Transaction: + query, args = maybeExpandNamedQuery(m.dbmap, query, args) + } + } + rows, err := e.Query(query, args...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + + return rows.Scan(holder) +} + +func hookedselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + + var nonFatalErr error + + list, err := rawselect(m, exec, i, query, args...) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + + // Determine where the results are: written to i, or returned in list + if t, _ := toSliceType(i); t == nil { + for _, v := range list { + if v, ok := v.(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } else { + resultsValue := reflect.Indirect(reflect.ValueOf(i)) + for i := 0; i < resultsValue.Len(); i++ { + if v, ok := resultsValue.Index(i).Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } + return list, nonFatalErr +} + +func rawselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + var ( + appendToSlice = false // Write results to i directly? + intoStruct = true // Selecting into a struct? + pointerElements = true // Are the slice elements pointers (vs values)? + ) + + var nonFatalErr error + + tableName := "" + var dynObj DynamicTable + isDynamic := false + if dynObj, isDynamic = i.(DynamicTable); isDynamic { + tableName = dynObj.TableName() + } + + // get type for i, verifying it's a supported destination + t, err := toType(i) + if err != nil { + var err2 error + if t, err2 = toSliceType(i); t == nil { + if err2 != nil { + return nil, err2 + } + return nil, err + } + pointerElements = t.Kind() == reflect.Ptr + if pointerElements { + t = t.Elem() + } + appendToSlice = true + intoStruct = t.Kind() == reflect.Struct + } + + // If the caller supplied a single struct/map argument, assume a "named + // parameter" query. Extract the named arguments from the struct/map, create + // the flat arg slice, and rewrite the query to use the dialect's placeholder. + if len(args) == 1 { + query, args = maybeExpandNamedQuery(m, query, args) + } + + // Run the query + rows, err := exec.Query(query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + // Fetch the column names as returned from db + cols, err := rows.Columns() + if err != nil { + return nil, err + } + + if !intoStruct && len(cols) > 1 { + return nil, fmt.Errorf("gorp: select into non-struct slice requires 1 column, got %d", len(cols)) + } + + var colToFieldIndex [][]int + if intoStruct { + colToFieldIndex, err = columnToFieldIndex(m, t, tableName, cols) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + } + + conv := m.TypeConverter + + // Add results to one of these two slices. + var ( + list = make([]interface{}, 0) + sliceValue = reflect.Indirect(reflect.ValueOf(i)) + ) + + for { + if !rows.Next() { + // if error occured return rawselect + if rows.Err() != nil { + return nil, rows.Err() + } + // time to exit from outer "for" loop + break + } + v := reflect.New(t) + + if isDynamic { + v.Interface().(DynamicTable).SetTableName(tableName) + } + + dest := make([]interface{}, len(cols)) + + custScan := make([]CustomScanner, 0) + + for x := range cols { + f := v.Elem() + if intoStruct { + index := colToFieldIndex[x] + if index == nil { + // this field is not present in the struct, so create a dummy + // value for rows.Scan to scan into + var dummy dummyField + dest[x] = &dummy + continue + } + f = f.FieldByIndex(index) + } + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + err = rows.Scan(dest...) + if err != nil { + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if appendToSlice { + if !pointerElements { + v = v.Elem() + } + sliceValue.Set(reflect.Append(sliceValue, v)) + } else { + list = append(list, v.Interface()) + } + } + + if appendToSlice && sliceValue.IsNil() { + sliceValue.Set(reflect.MakeSlice(sliceValue.Type(), 0, 0)) + } + + return list, nonFatalErr +} diff --git a/vendor/github.com/go-gorp/gorp/v3/table.go b/vendor/github.com/go-gorp/gorp/v3/table.go new file mode 100644 index 000000000..5931b2d9e --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/table.go @@ -0,0 +1,258 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// TableMap represents a mapping between a Go struct and a database table +// Use dbmap.AddTable() or dbmap.AddTableWithName() to create these +type TableMap struct { + // Name of database table. + TableName string + SchemaName string + gotype reflect.Type + Columns []*ColumnMap + keys []*ColumnMap + indexes []*IndexMap + uniqueTogether [][]string + version *ColumnMap + insertPlan bindPlan + updatePlan bindPlan + deletePlan bindPlan + getPlan bindPlan + dbmap *DbMap +} + +// ResetSql removes cached insert/update/select/delete SQL strings +// associated with this TableMap. Call this if you've modified +// any column names or the table name itself. +func (t *TableMap) ResetSql() { + t.insertPlan = bindPlan{} + t.updatePlan = bindPlan{} + t.deletePlan = bindPlan{} + t.getPlan = bindPlan{} +} + +// SetKeys lets you specify the fields on a struct that map to primary +// key columns on the table. If isAutoIncr is set, result.LastInsertId() +// will be used after INSERT to bind the generated id to the Go struct. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if isAutoIncr is true, and fieldNames length != 1 +// +func (t *TableMap) SetKeys(isAutoIncr bool, fieldNames ...string) *TableMap { + if isAutoIncr && len(fieldNames) != 1 { + panic(fmt.Sprintf( + "gorp: SetKeys: fieldNames length must be 1 if key is auto-increment. (Saw %v fieldNames)", + len(fieldNames))) + } + t.keys = make([]*ColumnMap, 0) + for _, name := range fieldNames { + colmap := t.ColMap(name) + colmap.isPK = true + colmap.isAutoIncr = isAutoIncr + t.keys = append(t.keys, colmap) + } + t.ResetSql() + + return t +} + +// SetUniqueTogether lets you specify uniqueness constraints across multiple +// columns on the table. Each call adds an additional constraint for the +// specified columns. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if fieldNames length < 2. +// +func (t *TableMap) SetUniqueTogether(fieldNames ...string) *TableMap { + if len(fieldNames) < 2 { + panic(fmt.Sprintf( + "gorp: SetUniqueTogether: must provide at least two fieldNames to set uniqueness constraint.")) + } + + columns := make([]string, 0, len(fieldNames)) + for _, name := range fieldNames { + columns = append(columns, name) + } + + for _, existingColumns := range t.uniqueTogether { + if equal(existingColumns, columns) { + return t + } + } + t.uniqueTogether = append(t.uniqueTogether, columns) + t.ResetSql() + + return t +} + +// ColMap returns the ColumnMap pointer matching the given struct field +// name. It panics if the struct does not contain a field matching this +// name. +func (t *TableMap) ColMap(field string) *ColumnMap { + col := colMapOrNil(t, field) + if col == nil { + e := fmt.Sprintf("No ColumnMap in table %s type %s with field %s", + t.TableName, t.gotype.Name(), field) + + panic(e) + } + return col +} + +func colMapOrNil(t *TableMap, field string) *ColumnMap { + for _, col := range t.Columns { + if col.fieldName == field || col.ColumnName == field { + return col + } + } + return nil +} + +// IdxMap returns the IndexMap pointer matching the given index name. +func (t *TableMap) IdxMap(field string) *IndexMap { + for _, idx := range t.indexes { + if idx.IndexName == field { + return idx + } + } + return nil +} + +// AddIndex registers the index with gorp for specified table with given parameters. +// This operation is idempotent. If index is already mapped, the +// existing *IndexMap is returned +// Function will panic if one of the given for index columns does not exists +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +func (t *TableMap) AddIndex(name string, idxtype string, columns []string) *IndexMap { + // check if we have a index with this name already + for _, idx := range t.indexes { + if idx.IndexName == name { + return idx + } + } + for _, icol := range columns { + if res := t.ColMap(icol); res == nil { + e := fmt.Sprintf("No ColumnName in table %s to create index on", t.TableName) + panic(e) + } + } + + idx := &IndexMap{IndexName: name, Unique: false, IndexType: idxtype, columns: columns} + t.indexes = append(t.indexes, idx) + t.ResetSql() + return idx +} + +// SetVersionCol sets the column to use as the Version field. By default +// the "Version" field is used. Returns the column found, or panics +// if the struct does not contain a field matching this name. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +func (t *TableMap) SetVersionCol(field string) *ColumnMap { + c := t.ColMap(field) + t.version = c + t.ResetSql() + return c +} + +// SqlForCreateTable gets a sequence of SQL commands that will create +// the specified table and any associated schema +func (t *TableMap) SqlForCreate(ifNotExists bool) string { + s := bytes.Buffer{} + dialect := t.dbmap.Dialect + + if strings.TrimSpace(t.SchemaName) != "" { + schemaCreate := "create schema" + if ifNotExists { + s.WriteString(dialect.IfSchemaNotExists(schemaCreate, t.SchemaName)) + } else { + s.WriteString(schemaCreate) + } + s.WriteString(fmt.Sprintf(" %s;", t.SchemaName)) + } + + tableCreate := "create table" + if ifNotExists { + s.WriteString(dialect.IfTableNotExists(tableCreate, t.SchemaName, t.TableName)) + } else { + s.WriteString(tableCreate) + } + s.WriteString(fmt.Sprintf(" %s (", dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(", ") + } + stype := dialect.ToSqlType(col.gotype, col.MaxSize, col.isAutoIncr) + s.WriteString(fmt.Sprintf("%s %s", dialect.QuoteField(col.ColumnName), stype)) + + if col.isPK || col.isNotNull { + s.WriteString(" not null") + } + if col.isPK && len(t.keys) == 1 { + s.WriteString(" primary key") + } + if col.Unique { + s.WriteString(" unique") + } + if col.isAutoIncr { + s.WriteString(fmt.Sprintf(" %s", dialect.AutoIncrStr())) + } + + x++ + } + } + if len(t.keys) > 1 { + s.WriteString(", primary key (") + for x := range t.keys { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(dialect.QuoteField(t.keys[x].ColumnName)) + } + s.WriteString(")") + } + if len(t.uniqueTogether) > 0 { + for _, columns := range t.uniqueTogether { + s.WriteString(", unique (") + for i, column := range columns { + if i > 0 { + s.WriteString(", ") + } + s.WriteString(dialect.QuoteField(column)) + } + s.WriteString(")") + } + } + s.WriteString(") ") + s.WriteString(dialect.CreateTableSuffix()) + s.WriteString(dialect.QuerySuffix()) + return s.String() +} + +func equal(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/go-gorp/gorp/v3/table_bindings.go b/vendor/github.com/go-gorp/gorp/v3/table_bindings.go new file mode 100644 index 000000000..43c0b3dd4 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/table_bindings.go @@ -0,0 +1,305 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "bytes" + "fmt" + "reflect" + "sync" +) + +// CustomScanner binds a database column value to a Go type +type CustomScanner struct { + // After a row is scanned, Holder will contain the value from the database column. + // Initialize the CustomScanner with the concrete Go type you wish the database + // driver to scan the raw column into. + Holder interface{} + // Target typically holds a pointer to the target struct field to bind the Holder + // value to. + Target interface{} + // Binder is a custom function that converts the holder value to the target type + // and sets target accordingly. This function should return error if a problem + // occurs converting the holder to the target. + Binder func(holder interface{}, target interface{}) error +} + +// Used to filter columns when selectively updating +type ColumnFilter func(*ColumnMap) bool + +func acceptAllFilter(col *ColumnMap) bool { + return true +} + +// Bind is called automatically by gorp after Scan() +func (me CustomScanner) Bind() error { + return me.Binder(me.Holder, me.Target) +} + +type bindPlan struct { + query string + argFields []string + keyFields []string + versField string + autoIncrIdx int + autoIncrFieldName string + once sync.Once +} + +func (plan *bindPlan) createBindInstance(elem reflect.Value, conv TypeConverter) (bindInstance, error) { + bi := bindInstance{query: plan.query, autoIncrIdx: plan.autoIncrIdx, autoIncrFieldName: plan.autoIncrFieldName, versField: plan.versField} + if plan.versField != "" { + bi.existingVersion = elem.FieldByName(plan.versField).Int() + } + + var err error + + for i := 0; i < len(plan.argFields); i++ { + k := plan.argFields[i] + if k == versFieldConst { + newVer := bi.existingVersion + 1 + bi.args = append(bi.args, newVer) + if bi.existingVersion == 0 { + elem.FieldByName(plan.versField).SetInt(int64(newVer)) + } + } else { + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.args = append(bi.args, val) + } + } + + for i := 0; i < len(plan.keyFields); i++ { + k := plan.keyFields[i] + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.keys = append(bi.keys, val) + } + + return bi, nil +} + +type bindInstance struct { + query string + args []interface{} + keys []interface{} + existingVersion int64 + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (t *TableMap) bindInsert(elem reflect.Value) (bindInstance, error) { + plan := &t.insertPlan + plan.once.Do(func() { + plan.autoIncrIdx = -1 + + s := bytes.Buffer{} + s2 := bytes.Buffer{} + s.WriteString(fmt.Sprintf("insert into %s (", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + first := true + for y := range t.Columns { + col := t.Columns[y] + if !(col.isAutoIncr && t.dbmap.Dialect.AutoIncrBindValue() == "") { + if !col.Transient { + if !first { + s.WriteString(",") + s2.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + + if col.isAutoIncr { + s2.WriteString(t.dbmap.Dialect.AutoIncrBindValue()) + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } else { + if col.DefaultValue == "" { + s2.WriteString(t.dbmap.Dialect.BindVar(x)) + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } else { + s2.WriteString(col.DefaultValue) + } + } + first = false + } + } else { + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } + } + s.WriteString(") values (") + s.WriteString(s2.String()) + s.WriteString(")") + if plan.autoIncrIdx > -1 { + s.WriteString(t.dbmap.Dialect.AutoIncrInsertSuffix(t.Columns[plan.autoIncrIdx])) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindUpdate(elem reflect.Value, colFilter ColumnFilter) (bindInstance, error) { + if colFilter == nil { + colFilter = acceptAllFilter + } + + plan := &t.updatePlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("update %s set ", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + x := 0 + + for y := range t.Columns { + col := t.Columns[y] + if !col.isAutoIncr && !col.Transient && colFilter(col) { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } + } + + s.WriteString(" where ") + for y := range t.keys { + col := t.keys[y] + if y > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.argFields = append(plan.argFields, col.fieldName) + plan.keyFields = append(plan.keyFields, col.fieldName) + x++ + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindDelete(elem reflect.Value) (bindInstance, error) { + plan := &t.deletePlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("delete from %s", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + for y := range t.Columns { + col := t.Columns[y] + if !col.Transient { + if col == t.version { + plan.versField = col.fieldName + } + } + } + + s.WriteString(" where ") + for x := range t.keys { + k := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(k.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, k.fieldName) + plan.argFields = append(plan.argFields, k.fieldName) + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(len(plan.argFields))) + + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindGet() *bindPlan { + plan := &t.getPlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString("select ") + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + plan.argFields = append(plan.argFields, col.fieldName) + x++ + } + } + s.WriteString(" from ") + s.WriteString(t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName)) + s.WriteString(" where ") + for x := range t.keys { + col := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, col.fieldName) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan +} diff --git a/vendor/github.com/go-gorp/gorp/v3/test_all.sh b/vendor/github.com/go-gorp/gorp/v3/test_all.sh new file mode 100644 index 000000000..91007d645 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/test_all.sh @@ -0,0 +1,23 @@ +#!/bin/bash -ex + +# on macs, you may need to: +# export GOBUILDFLAG=-ldflags -linkmode=external + +echo "Running unit tests" +go test -race + +echo "Testing against postgres" +export GORP_TEST_DSN="host=postgres user=gorptest password=gorptest dbname=gorptest sslmode=disable" +export GORP_TEST_DIALECT=postgres +go test -tags integration $GOBUILDFLAG $@ . + +echo "Testing against sqlite" +export GORP_TEST_DSN=/tmp/gorptest.bin +export GORP_TEST_DIALECT=sqlite +go test -tags integration $GOBUILDFLAG $@ . +rm -f /tmp/gorptest.bin + +echo "Testing against mysql" +export GORP_TEST_DSN="gorptest:gorptest@tcp(mysql)/gorptest" +export GORP_TEST_DIALECT=mysql +go test -tags integration $GOBUILDFLAG $@ . diff --git a/vendor/github.com/go-gorp/gorp/v3/transaction.go b/vendor/github.com/go-gorp/gorp/v3/transaction.go new file mode 100644 index 000000000..d505d94c9 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/v3/transaction.go @@ -0,0 +1,239 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package gorp + +import ( + "context" + "database/sql" + "time" +) + +// Transaction represents a database transaction. +// Insert/Update/Delete/Get/Exec operations will be run in the context +// of that transaction. Transactions should be terminated with +// a call to Commit() or Rollback() +type Transaction struct { + ctx context.Context + dbmap *DbMap + tx *sql.Tx + closed bool +} + +func (t *Transaction) WithContext(ctx context.Context) SqlExecutor { + copy := &Transaction{} + *copy = *t + copy.ctx = ctx + return copy +} + +// Insert has the same behavior as DbMap.Insert(), but runs in a transaction. +func (t *Transaction) Insert(list ...interface{}) error { + return insert(t.dbmap, t, list...) +} + +// Update had the same behavior as DbMap.Update(), but runs in a transaction. +func (t *Transaction) Update(list ...interface{}) (int64, error) { + return update(t.dbmap, t, nil, list...) +} + +// UpdateColumns had the same behavior as DbMap.UpdateColumns(), but runs in a transaction. +func (t *Transaction) UpdateColumns(filter ColumnFilter, list ...interface{}) (int64, error) { + return update(t.dbmap, t, filter, list...) +} + +// Delete has the same behavior as DbMap.Delete(), but runs in a transaction. +func (t *Transaction) Delete(list ...interface{}) (int64, error) { + return delete(t.dbmap, t, list...) +} + +// Get has the same behavior as DbMap.Get(), but runs in a transaction. +func (t *Transaction) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(t.dbmap, t, i, keys...) +} + +// Select has the same behavior as DbMap.Select(), but runs in a transaction. +func (t *Transaction) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return hookedselect(t.dbmap, t, i, query, args...) +} + +// Exec has the same behavior as DbMap.Exec(), but runs in a transaction. +func (t *Transaction) Exec(query string, args ...interface{}) (sql.Result, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return maybeExpandNamedQueryAndExec(t, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function. +func (t *Transaction) SelectInt(query string, args ...interface{}) (int64, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectInt(t, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function. +func (t *Transaction) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullInt(t, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function. +func (t *Transaction) SelectFloat(query string, args ...interface{}) (float64, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectFloat(t, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function. +func (t *Transaction) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullFloat(t, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function. +func (t *Transaction) SelectStr(query string, args ...interface{}) (string, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectStr(t, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function. +func (t *Transaction) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectNullStr(t, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function. +func (t *Transaction) SelectOne(holder interface{}, query string, args ...interface{}) error { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + return SelectOne(t.dbmap, t, holder, query, args...) +} + +// Commit commits the underlying database transaction. +func (t *Transaction) Commit() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "commit;") + } + return t.tx.Commit() + } + + return sql.ErrTxDone +} + +// Rollback rolls back the underlying database transaction. +func (t *Transaction) Rollback() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "rollback;") + } + return t.tx.Rollback() + } + + return sql.ErrTxDone +} + +// Savepoint creates a savepoint with the given name. The name is interpolated +// directly into the SQL SAVEPOINT statement, so you must sanitize it if it is +// derived from user input. +func (t *Transaction) Savepoint(name string) error { + query := "savepoint " + t.dbmap.Dialect.QuoteField(name) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// RollbackToSavepoint rolls back to the savepoint with the given name. The +// name is interpolated directly into the SQL SAVEPOINT statement, so you must +// sanitize it if it is derived from user input. +func (t *Transaction) RollbackToSavepoint(savepoint string) error { + query := "rollback to savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// ReleaseSavepint releases the savepoint with the given name. The name is +// interpolated directly into the SQL SAVEPOINT statement, so you must sanitize +// it if it is derived from user input. +func (t *Transaction) ReleaseSavepoint(savepoint string) error { + query := "release savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// Prepare has the same behavior as DbMap.Prepare(), but runs in a transaction. +func (t *Transaction) Prepare(query string) (*sql.Stmt, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + return prepare(t, query) +} + +func (t *Transaction) QueryRow(query string, args ...interface{}) *sql.Row { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&query, args...) + } + + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return queryRow(t, query, args...) +} + +func (t *Transaction) Query(q string, args ...interface{}) (*sql.Rows, error) { + if t.dbmap.ExpandSliceArgs { + expandSliceArgs(&q, args...) + } + + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, q, args...) + } + return query(t, q, args...) +} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 000000000..515866789 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 000000000..93a8aab51 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/shlex/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README new file mode 100644 index 000000000..c86bcc066 --- /dev/null +++ b/vendor/github.com/google/shlex/README @@ -0,0 +1,2 @@ +go-shlex is a simple lexer for go that supports shell-style quoting, +commenting, and escaping. diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go new file mode 100644 index 000000000..d98308bce --- /dev/null +++ b/vendor/github.com/google/shlex/shlex.go @@ -0,0 +1,416 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package shlex implements a simple lexer which splits input in to tokens using +shell-style rules for quoting and commenting. + +The basic use case uses the default ASCII lexer to split a string into sub-strings: + + shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} + +To process a stream of strings: + + l := NewLexer(os.Stdin) + for ; token, err := l.Next(); err != nil { + // process token + } + +To access the raw token stream (which includes tokens for comments): + + t := NewTokenizer(os.Stdin) + for ; token, err := t.Next(); err != nil { + // process token + } + +*/ +package shlex + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// TokenType is a top-level token classification: A word, space, comment, unknown. +type TokenType int + +// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. +type runeTokenClass int + +// the internal state used by the lexer state machine +type lexerState int + +// Token is a (type, value) pair representing a lexographical token. +type Token struct { + tokenType TokenType + value string +} + +// Equal reports whether tokens a, and b, are equal. +// Two tokens are equal if both their types and values are equal. A nil token can +// never be equal to another token. +func (a *Token) Equal(b *Token) bool { + if a == nil || b == nil { + return false + } + if a.tokenType != b.tokenType { + return false + } + return a.value == b.value +} + +// Named classes of UTF-8 runes +const ( + spaceRunes = " \t\r\n" + escapingQuoteRunes = `"` + nonEscapingQuoteRunes = "'" + escapeRunes = `\` + commentRunes = "#" +) + +// Classes of rune token +const ( + unknownRuneClass runeTokenClass = iota + spaceRuneClass + escapingQuoteRuneClass + nonEscapingQuoteRuneClass + escapeRuneClass + commentRuneClass + eofRuneClass +) + +// Classes of lexographic token +const ( + UnknownToken TokenType = iota + WordToken + SpaceToken + CommentToken +) + +// Lexer state machine states +const ( + startState lexerState = iota // no runes have been seen + inWordState // processing regular runes in a word + escapingState // we have just consumed an escape rune; the next rune is literal + escapingQuotedState // we have just consumed an escape rune within a quoted string + quotingEscapingState // we are within a quoted string that supports escaping ("...") + quotingState // we are within a string that does not support escaping ('...') + commentState // we are within a comment (everything following an unquoted or unescaped # +) + +// tokenClassifier is used for classifying rune characters. +type tokenClassifier map[rune]runeTokenClass + +func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { + for _, runeChar := range runes { + typeMap[runeChar] = tokenType + } +} + +// newDefaultClassifier creates a new classifier for ASCII characters. +func newDefaultClassifier() tokenClassifier { + t := tokenClassifier{} + t.addRuneClass(spaceRunes, spaceRuneClass) + t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) + t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) + t.addRuneClass(escapeRunes, escapeRuneClass) + t.addRuneClass(commentRunes, commentRuneClass) + return t +} + +// ClassifyRune classifiees a rune +func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { + return t[runeVal] +} + +// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. +type Lexer Tokenizer + +// NewLexer creates a new lexer from an input stream. +func NewLexer(r io.Reader) *Lexer { + + return (*Lexer)(NewTokenizer(r)) +} + +// Next returns the next word, or an error. If there are no more words, +// the error will be io.EOF. +func (l *Lexer) Next() (string, error) { + for { + token, err := (*Tokenizer)(l).Next() + if err != nil { + return "", err + } + switch token.tokenType { + case WordToken: + return token.value, nil + case CommentToken: + // skip comments + default: + return "", fmt.Errorf("Unknown token type: %v", token.tokenType) + } + } +} + +// Tokenizer turns an input stream into a sequence of typed tokens +type Tokenizer struct { + input bufio.Reader + classifier tokenClassifier +} + +// NewTokenizer creates a new tokenizer from an input stream. +func NewTokenizer(r io.Reader) *Tokenizer { + input := bufio.NewReader(r) + classifier := newDefaultClassifier() + return &Tokenizer{ + input: *input, + classifier: classifier} +} + +// scanStream scans the stream for the next token using the internal state machine. +// It will panic if it encounters a rune which it does not know how to handle. +func (t *Tokenizer) scanStream() (*Token, error) { + state := startState + var tokenType TokenType + var value []rune + var nextRune rune + var nextRuneType runeTokenClass + var err error + + for { + nextRune, _, err = t.input.ReadRune() + nextRuneType = t.classifier.ClassifyRune(nextRune) + + if err == io.EOF { + nextRuneType = eofRuneClass + err = nil + } else if err != nil { + return nil, err + } + + switch state { + case startState: // no runes read yet + { + switch nextRuneType { + case eofRuneClass: + { + return nil, io.EOF + } + case spaceRuneClass: + { + } + case escapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingState + } + case escapeRuneClass: + { + tokenType = WordToken + state = escapingState + } + case commentRuneClass: + { + tokenType = CommentToken + state = commentState + } + default: + { + tokenType = WordToken + value = append(value, nextRune) + state = inWordState + } + } + } + case inWordState: // in a regular word + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + state = quotingState + } + case escapeRuneClass: + { + state = escapingState + } + default: + { + value = append(value, nextRune) + } + } + } + case escapingState: // the rune after an escape character + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = inWordState + value = append(value, nextRune) + } + } + } + case escapingQuotedState: // the next rune after an escape character, in double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = quotingEscapingState + value = append(value, nextRune) + } + } + } + case quotingEscapingState: // in escaping double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = inWordState + } + case escapeRuneClass: + { + state = escapingQuotedState + } + default: + { + value = append(value, nextRune) + } + } + } + case quotingState: // in non-escaping single quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case nonEscapingQuoteRuneClass: + { + state = inWordState + } + default: + { + value = append(value, nextRune) + } + } + } + case commentState: // in a comment + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + if nextRune == '\n' { + state = startState + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } else { + value = append(value, nextRune) + } + } + default: + { + value = append(value, nextRune) + } + } + } + default: + { + return nil, fmt.Errorf("Unexpected state: %v", state) + } + } + } +} + +// Next returns the next token in the stream. +func (t *Tokenizer) Next() (*Token, error) { + return t.scanStream() +} + +// Split partitions a string into a slice of strings. +func Split(s string) ([]string, error) { + l := NewLexer(strings.NewReader(s)) + subStrings := make([]string, 0) + for { + word, err := l.Next() + if err != nil { + if err == io.EOF { + return subStrings, nil + } + return subStrings, err + } + subStrings = append(subStrings, word) + } +} diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS new file mode 100644 index 000000000..b722392ee --- /dev/null +++ b/vendor/github.com/gorilla/mux/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of gorilla/mux authors for copyright purposes. +# +# Please keep the list sorted. + +Google LLC (https://opensource.google.com/) +Kamil Kisielk +Matt Silverlock +Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE new file mode 100644 index 000000000..6903df638 --- /dev/null +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md new file mode 100644 index 000000000..35eea9f10 --- /dev/null +++ b/vendor/github.com/gorilla/mux/README.md @@ -0,0 +1,805 @@ +# gorilla/mux + +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) + +![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) + +https://www.gorillatoolkit.org/pkg/mux + +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts, paths and query values can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. + +--- + +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Static Files](#static-files) +* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) +* [Registered URLs](#registered-urls) +* [Walking Routes](#walking-routes) +* [Graceful Shutdown](#graceful-shutdown) +* [Middleware](#middleware) +* [Handling CORS Requests](#handling-cors-requests) +* [Testing Handlers](#testing-handlers) +* [Full Example](#full-example) + +--- + +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` + +## Examples + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Category: %v\n", vars["category"]) +} +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +### Matching Routes + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.example.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Routes are tested in the order they were added to the router. If two routes match, the first one wins: + +```go +r := mux.NewRouter() +r.HandleFunc("/specific", specificHandler) +r.PathPrefix("/").Handler(catchAllHandler) +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + + +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/\*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Serving Single Page Applications + +Most of the time it makes sense to serve your SPA on a separate web server from your API, +but sometimes it's desirable to serve them both from one place. It's possible to write a simple +handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage +mux's powerful routing for your API endpoints. + +```go +package main + +import ( + "encoding/json" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/gorilla/mux" +) + +// spaHandler implements the http.Handler interface, so we can use it +// to respond to HTTP requests. The path to the static directory and +// path to the index file within that static directory are used to +// serve the SPA in the given static directory. +type spaHandler struct { + staticPath string + indexPath string +} + +// ServeHTTP inspects the URL path to locate a file within the static dir +// on the SPA handler. If a file is found, it will be served. If not, the +// file located at the index path on the SPA handler will be served. This +// is suitable behavior for serving an SPA (single page application). +func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // get the absolute path to prevent directory traversal + path, err := filepath.Abs(r.URL.Path) + if err != nil { + // if we failed to get the absolute path respond with a 400 bad request + // and stop + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // prepend the path with the path to the static directory + path = filepath.Join(h.staticPath, path) + + // check whether a file exists at the given path + _, err = os.Stat(path) + if os.IsNotExist(err) { + // file does not exist, serve index.html + http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) + return + } else if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // otherwise, use http.FileServer to serve the static dir + http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) +} + +func main() { + router := mux.NewRouter() + + router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { + // an example API handler + json.NewEncoder(w).Encode(map[string]bool{"ok": true}) + }) + + spa := spaHandler{staticPath: "build", indexPath: "index.html"} + router.PathPrefix("/").Handler(spa) + + srv := &http.Server{ + Handler: router, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host and query value variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.example.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.example.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.example.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.example.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +### Walking Routes + +The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, +the following prints all of the registered routes: + +```go +package main + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gorilla/mux" +) + +func handler(w http.ResponseWriter, r *http.Request) { + return +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.HandleFunc("/products", handler).Methods("POST") + r.HandleFunc("/articles", handler).Methods("GET") + r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") + r.HandleFunc("/authors", handler).Queries("surname", "{surname}") + err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + pathTemplate, err := route.GetPathTemplate() + if err == nil { + fmt.Println("ROUTE:", pathTemplate) + } + pathRegexp, err := route.GetPathRegexp() + if err == nil { + fmt.Println("Path regexp:", pathRegexp) + } + queriesTemplates, err := route.GetQueriesTemplates() + if err == nil { + fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) + } + queriesRegexps, err := route.GetQueriesRegexp() + if err == nil { + fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) + } + methods, err := route.GetMethods() + if err == nil { + fmt.Println("Methods:", strings.Join(methods, ",")) + } + fmt.Println() + return nil + }) + + if err != nil { + fmt.Println(err) + } + + http.Handle("/", r) +} +``` + +### Graceful Shutdown + +Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: + +```go +package main + +import ( + "context" + "flag" + "log" + "net/http" + "os" + "os/signal" + "time" + + "github.com/gorilla/mux" +) + +func main() { + var wait time.Duration + flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") + flag.Parse() + + r := mux.NewRouter() + // Add your routes as needed + + srv := &http.Server{ + Addr: "0.0.0.0:8080", + // Good practice to set timeouts to avoid Slowloris attacks. + WriteTimeout: time.Second * 15, + ReadTimeout: time.Second * 15, + IdleTimeout: time.Second * 60, + Handler: r, // Pass our instance of gorilla/mux in. + } + + // Run our server in a goroutine so that it doesn't block. + go func() { + if err := srv.ListenAndServe(); err != nil { + log.Println(err) + } + }() + + c := make(chan os.Signal, 1) + // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) + // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. + signal.Notify(c, os.Interrupt) + + // Block until we receive our signal. + <-c + + // Create a deadline to wait for. + ctx, cancel := context.WithTimeout(context.Background(), wait) + defer cancel() + // Doesn't block if no connections, but will otherwise wait + // until the timeout deadline. + srv.Shutdown(ctx) + // Optionally, you could run srv.Shutdown in a goroutine and block on + // <-ctx.Done() if your application should wait for other services + // to finalize based on context cancellation. + log.Println("shutting down") + os.Exit(0) +} +``` + +### Middleware + +Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. +Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. + +Mux middlewares are defined using the de facto standard type: + +```go +type MiddlewareFunc func(http.Handler) http.Handler +``` + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. + +A very basic middleware which logs the URI of the request being handled could be written as: + +```go +func loggingMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) +} +``` + +Middlewares can be added to a router using `Router.Use()`: + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) +r.Use(loggingMiddleware) +``` + +A more complex authentication middleware, which maps session token to users, could be written as: + +```go +// Define our struct +type authenticationMiddleware struct { + tokenUsers map[string]string +} + +// Initialize it somewhere +func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" +} + +// Middleware function, which will be called for each request +func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + // Pass down the request to the next middleware (or final handler) + next.ServeHTTP(w, r) + } else { + // Write an error and stop the handler chain + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) +} +``` + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) + +amw := authenticationMiddleware{} +amw.Populate() + +r.Use(amw.Middleware) +``` + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. + +### Handling CORS Requests + +[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. + +* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` +* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route +* If you do not specify any methods, then: +> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. + +Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: + +```go +package main + +import ( + "net/http" + "github.com/gorilla/mux" +) + +func main() { + r := mux.NewRouter() + + // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers + r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) + r.Use(mux.CORSMethodMiddleware(r)) + + http.ListenAndServe(":8080", r) +} + +func fooHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + if r.Method == http.MethodOptions { + return + } + + w.Write([]byte("foo")) +} +``` + +And an request to `/foo` using something like: + +```bash +curl localhost:8080/foo -v +``` + +Would look like: + +```bash +* Trying ::1... +* TCP_NODELAY set +* Connected to localhost (::1) port 8080 (#0) +> GET /foo HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.59.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS +< Access-Control-Allow-Origin: * +< Date: Fri, 28 Jun 2019 20:13:30 GMT +< Content-Length: 3 +< Content-Type: text/plain; charset=utf-8 +< +* Connection #0 to host localhost left intact +foo +``` + +### Testing Handlers + +Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. + +First, our simple HTTP handler: + +```go +// endpoints.go +package main + +func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { + // A very simple health check. + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // In the future we could report back on the status of our DB, or our cache + // (e.g. Redis) by performing a simple PING, and include them in the response. + io.WriteString(w, `{"alive": true}`) +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/health", HealthCheckHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test code: + +```go +// endpoints_test.go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestHealthCheckHandler(t *testing.T) { + // Create a request to pass to our handler. We don't have any query parameters for now, so we'll + // pass 'nil' as the third parameter. + req, err := http.NewRequest("GET", "/health", nil) + if err != nil { + t.Fatal(err) + } + + // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. + rr := httptest.NewRecorder() + handler := http.HandlerFunc(HealthCheckHandler) + + // Our handlers satisfy http.Handler, so we can call their ServeHTTP method + // directly and pass in our Request and ResponseRecorder. + handler.ServeHTTP(rr, req) + + // Check the status code is what we expect. + if status := rr.Code; status != http.StatusOK { + t.Errorf("handler returned wrong status code: got %v want %v", + status, http.StatusOK) + } + + // Check the response body is what we expect. + expected := `{"alive": true}` + if rr.Body.String() != expected { + t.Errorf("handler returned unexpected body: got %v want %v", + rr.Body.String(), expected) + } +} +``` + +In the case that our routes have [variables](#examples), we can pass those in the request. We could write +[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple +possible route variables as needed. + +```go +// endpoints.go +func main() { + r := mux.NewRouter() + // A route with a route variable: + r.HandleFunc("/metrics/{type}", MetricsHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test file, with a table-driven test of `routeVariables`: + +```go +// endpoints_test.go +func TestMetricsHandler(t *testing.T) { + tt := []struct{ + routeVariable string + shouldPass bool + }{ + {"goroutines", true}, + {"heap", true}, + {"counters", true}, + {"queries", true}, + {"adhadaeqm3k", false}, + } + + for _, tc := range tt { + path := fmt.Sprintf("/metrics/%s", tc.routeVariable) + req, err := http.NewRequest("GET", path, nil) + if err != nil { + t.Fatal(err) + } + + rr := httptest.NewRecorder() + + // Need to create a router that we can pass the request through so that the vars will be added to the context + router := mux.NewRouter() + router.HandleFunc("/metrics/{type}", MetricsHandler) + router.ServeHTTP(rr, req) + + // In this case, our MetricsHandler returns a non-200 response + // for a route variable it doesn't know about. + if rr.Code == http.StatusOK && !tc.shouldPass { + t.Errorf("handler should have failed on routeVariable %s: got %v want %v", + tc.routeVariable, rr.Code, http.StatusOK) + } + } +} +``` + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + "log" + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + log.Fatal(http.ListenAndServe(":8000", r)) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go new file mode 100644 index 000000000..bd5a38b55 --- /dev/null +++ b/vendor/github.com/gorilla/mux/doc.go @@ -0,0 +1,306 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts, paths and query values can have variables with an optional + regular expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: + + r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +Note that if any capturing groups are present, mux will panic() during parsing. To prevent +this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to +"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably +when capturing groups were present. + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host and query value variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. + + type MiddlewareFunc func(http.Handler) http.Handler + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). + +A very basic middleware which logs the URI of the request being handled could be written as: + + func simpleMw(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) + } + +Middlewares can be added to a router using `Router.Use()`: + + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.Use(simpleMw) + +A more complex authentication middleware, which maps session token to users, could be written as: + + // Define our struct + type authenticationMiddleware struct { + tokenUsers map[string]string + } + + // Initialize it somewhere + func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" + } + + // Middleware function, which will be called for each request + func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + next.ServeHTTP(w, r) + } else { + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) + } + + r := mux.NewRouter() + r.HandleFunc("/", handler) + + amw := authenticationMiddleware{tokenUsers: make(map[string]string)} + amw.Populate() + + r.Use(amw.Middleware) + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. + +*/ +package mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go new file mode 100644 index 000000000..cb51c565e --- /dev/null +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -0,0 +1,74 @@ +package mux + +import ( + "net/http" + "strings" +) + +// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. +// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed +// to it, and then calls the handler passed as parameter to the MiddlewareFunc. +type MiddlewareFunc func(http.Handler) http.Handler + +// middleware interface is anything which implements a MiddlewareFunc named Middleware. +type middleware interface { + Middleware(handler http.Handler) http.Handler +} + +// Middleware allows MiddlewareFunc to implement the middleware interface. +func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { + return mw(handler) +} + +// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) Use(mwf ...MiddlewareFunc) { + for _, fn := range mwf { + r.middlewares = append(r.middlewares, fn) + } +} + +// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) useInterface(mw middleware) { + r.middlewares = append(r.middlewares, mw) +} + +// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header +// on requests for routes that have an OPTIONS method matcher to all the method matchers on +// the route. Routes that do not explicitly handle OPTIONS requests will not be processed +// by the middleware. See examples for usage. +func CORSMethodMiddleware(r *Router) MiddlewareFunc { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + allMethods, err := getAllMethodsForRoute(r, req) + if err == nil { + for _, v := range allMethods { + if v == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) + } + } + } + + next.ServeHTTP(w, req) + }) + } +} + +// getAllMethodsForRoute returns all the methods from method matchers matching a given +// request. +func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { + var allMethods []string + + for _, route := range r.routes { + var match RouteMatch + if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { + methods, err := route.GetMethods() + if err != nil { + return nil, err + } + + allMethods = append(allMethods, methods...) + } + } + + return allMethods, nil +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go new file mode 100644 index 000000000..782a34b22 --- /dev/null +++ b/vendor/github.com/gorilla/mux/mux.go @@ -0,0 +1,606 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "context" + "errors" + "fmt" + "net/http" + "path" + "regexp" +) + +var ( + // ErrMethodMismatch is returned when the method in the request does not match + // the method defined against the route. + ErrMethodMismatch = errors.New("method is not allowed") + // ErrNotFound is returned when no route match is found. + ErrNotFound = errors.New("no matching route was found") +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route)} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + + // Configurable Handler to be used when the request method does not match the route. + MethodNotAllowedHandler http.Handler + + // Routes to be matched, in order. + routes []*Route + + // Routes by name for URL building. + namedRoutes map[string]*Route + + // If true, do not clear the request context after handling the request. + // + // Deprecated: No effect, since the context is stored on the request itself. + KeepContext bool + + // Slice of middlewares to be called after a match is found + middlewares []middleware + + // configuration shared with `Route` + routeConf +} + +// common route configuration shared between `Router` and `Route` +type routeConf struct { + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool + + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + + // Manager for the variables from host and path. + regexp routeRegexpGroup + + // List of matchers. + matchers []matcher + + // The scheme used when building URLs. + buildScheme string + + buildVarsFunc BuildVarsFunc +} + +// returns an effective deep copy of `routeConf` +func copyRouteConf(r routeConf) routeConf { + c := r + + if r.regexp.path != nil { + c.regexp.path = copyRouteRegexp(r.regexp.path) + } + + if r.regexp.host != nil { + c.regexp.host = copyRouteRegexp(r.regexp.host) + } + + c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) + for _, q := range r.regexp.queries { + c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) + } + + c.matchers = make([]matcher, len(r.matchers)) + copy(c.matchers, r.matchers) + + return c +} + +func copyRouteRegexp(r *routeRegexp) *routeRegexp { + c := *r + return &c +} + +// Match attempts to match the given request against the router's registered routes. +// +// If the request matches a route of this router or one of its subrouters the Route, +// Handler, and Vars fields of the the match argument are filled and this function +// returns true. +// +// If the request does not match any of this router's or its subrouters' routes +// then this function returns false. If available, a reason for the match failure +// will be filled in the match argument's MatchErr field. If the match failure type +// (eg: not found) has a registered handler, the handler is assigned to the Handler +// field of the match argument. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + // Build middleware chain if no error was found + if match.MatchErr == nil { + for i := len(r.middlewares) - 1; i >= 0; i-- { + match.Handler = r.middlewares[i].Middleware(match.Handler) + } + } + return true + } + } + + if match.MatchErr == ErrMethodMismatch { + if r.MethodNotAllowedHandler != nil { + match.Handler = r.MethodNotAllowedHandler + return true + } + + return false + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + match.MatchErr = ErrNotFound + return true + } + + match.MatchErr = ErrNotFound + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + req = requestWithVars(req, match.Vars) + req = requestWithRoute(req, match.Route) + } + + if handler == nil && match.MatchErr == ErrMethodMismatch { + handler = methodNotAllowedHandler() + } + + if handler == nil { + handler = http.NotFoundHandler() + } + + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.namedRoutes[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.namedRoutes[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will perform a redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for +// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed +// request will be made as a GET by most clients. Use middleware or client settings +// to modify this behaviour as needed. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + // initialize a route with a copy of the parent router's configuration + route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} + r.routes = append(r.routes, route) + return route +} + +// Name registers a new route with a name. +// See Route.Name(). +func (r *Router) Name(name string) *Route { + return r.NewRoute().Name(name) +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + if err != nil { + return err + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string + + // MatchErr is set to appropriate matching error + // It is set to ErrMethodMismatch if there is a mismatch in + // the request method and route method + MatchErr error +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := r.Context().Value(varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns. +func CurrentRoute(r *http.Request) *Route { + if rv := r.Context().Value(routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func requestWithVars(r *http.Request, vars map[string]string) *http.Request { + ctx := context.WithValue(r.Context(), varsKey, vars) + return r.WithContext(ctx) +} + +func requestWithRoute(r *http.Request, route *Route) *http.Request { + ctx := context.WithValue(r.Context(), routeKey, route) + return r.WithContext(ctx) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// mapFromPairsToRegex converts variadic string parameters to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// methodNotAllowed replies to the request with an HTTP status code 405. +func methodNotAllowed(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusMethodNotAllowed) +} + +// methodNotAllowedHandler returns a simple request handler +// that replies to each request with a status code 405. +func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go new file mode 100644 index 000000000..0144842bb --- /dev/null +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -0,0 +1,388 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +type routeRegexpOptions struct { + strictSlash bool + useEncodedPath bool +} + +type regexpType int + +const ( + regexpTypePath regexpType = 0 + regexpTypeHost regexpType = 1 + regexpTypePrefix regexpType = 2 + regexpTypeQuery regexpType = 3 +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if typ == regexpTypeQuery { + defaultPattern = ".*" + } else if typ == regexpTypeHost { + defaultPattern = "[^.]+" + } + // Only match strict slash if not matching + if typ != regexpTypePath { + options.strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if options.strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if options.strictSlash { + pattern.WriteString("[/]?") + } + if typ == regexpTypeQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if typ != regexpTypePrefix { + pattern.WriteByte('$') + } + + var wildcardHostPort bool + if typ == regexpTypeHost { + if !strings.Contains(pattern.String(), ":") { + wildcardHostPort = true + } + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + + // Check for capturing groups which used to work in older versions + if reg.NumSubexp() != len(idxs)/2 { + panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + + "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") + } + + // Done! + return &routeRegexp{ + template: template, + regexpType: typ, + options: options, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + wildcardHostPort: wildcardHostPort, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // The type of match + regexpType regexpType + // Options for matching + options routeRegexpOptions + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp + // Wildcard host-port (no strict port match in hostname) + wildcardHostPort bool +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if r.regexpType == regexpTypeHost { + host := getHost(req) + if r.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } + return r.regexp.MatchString(host) + } + + if r.regexpType == regexpTypeQuery { + return r.matchQueryString(req) + } + path := req.URL.Path + if r.options.useEncodedPath { + path = req.URL.EscapedPath() + } + return r.regexp.MatchString(path) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + if r.regexpType == regexpTypeQuery { + value = url.QueryEscape(value) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if r.regexpType != regexpTypeQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) + if ok { + return templateKey + "=" + val + } + return "" +} + +// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. +// If key was not found, empty string and false is returned. +func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { + query := []byte(rawQuery) + for len(query) > 0 { + foundKey := query + if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { + foundKey, query = foundKey[:i], foundKey[i+1:] + } else { + query = query[:0] + } + if len(foundKey) == 0 { + continue + } + var value []byte + if i := bytes.IndexByte(foundKey, '='); i >= 0 { + foundKey, value = foundKey[:i], foundKey[i+1:] + } + if len(foundKey) < len(key) { + // Cannot possibly be key. + continue + } + keyString, err := url.QueryUnescape(string(foundKey)) + if err != nil { + continue + } + if keyString != key { + continue + } + valueString, err := url.QueryUnescape(string(value)) + if err != nil { + continue + } + return valueString, true + } + return "", false +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + var idxs []int + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + host := getHost(req) + if v.host.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) + } + } + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Store path variables. + if v.path != nil { + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) + // Check if we should redirect. + if v.path.options.strictSlash { + p1 := strings.HasSuffix(path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) + } + } +} + +// getHost tries its best to return the request host. +// According to section 14.23 of RFC 2616 the Host header +// can include the port number if the default value of 80 is not used. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + return r.Host +} + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] + } +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go new file mode 100644 index 000000000..750afe570 --- /dev/null +++ b/vendor/github.com/gorilla/mux/route.go @@ -0,0 +1,736 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Request handler for the route. + handler http.Handler + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + // "global" reference to all named routes + namedRoutes map[string]*Route + + // config possibly passed in from `Router` + routeConf +} + +// SkipClean reports whether path cleaning is enabled for this route via +// Router.SkipClean. +func (r *Route) SkipClean() bool { + return r.skipClean +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + + var matchErr error + + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + if _, ok := m.(methodMatcher); ok { + matchErr = ErrMethodMismatch + continue + } + + // Ignore ErrNotFound errors. These errors arise from match call + // to Subrouters. + // + // This prevents subsequent matching subrouters from failing to + // run middleware. If not ignored, the middleware would see a + // non-nil MatchErr and be skipped, even when there was a + // matching route. + if match.MatchErr == ErrNotFound { + match.MatchErr = nil + } + + matchErr = nil + return false + } + } + + if matchErr != nil { + match.MatchErr = matchErr + return false + } + + if match.MatchErr == ErrMethodMismatch && r.handler != nil { + // We found a route which matches request method, clear MatchErr + match.MatchErr = nil + // Then override the mis-matched handler + match.Handler = r.handler + } + + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + + // Set variables. + r.regexp.setMatch(req, match, r) + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// It is an error to call Name more than once on a route. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.namedRoutes[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { + if r.err != nil { + return r.err + } + if typ == regexpTypePath || typ == regexpTypePrefix { + if len(tpl) > 0 && tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ + strictSlash: r.strictSlash, + useEncodedPath: r.useEncodedPath, + }) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if typ == regexpTypeHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if typ == regexpTypeQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// If the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// If the value is an empty string, it will match any value if the key is set. +// Use the start and end of string anchors (^ and $) to match an exact value. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypeHost) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +// Match returns the match for a given request. +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePath) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// If the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + scheme := r.URL.Scheme + // https://golang.org/pkg/net/http/#Request + // "For [most] server requests, fields other than Path and RawQuery will be + // empty." + // Since we're an http muxer, the scheme is either going to be http or https + // though, so we can just set it based on the tls termination state. + if scheme == "" { + if r.TLS == nil { + scheme = "http" + } else { + scheme = "https" + } + } + return matchInArray(m, scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +// If the request's URL has a scheme set, it will be matched against. +// Generally, the URL scheme will only be set if a previous handler set it, +// such as the ProxyHeaders handler from gorilla/handlers. +// If unset, the scheme will be determined based on the request's TLS +// termination state. +// The first argument to Schemes will be used when constructing a route URL. +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + if len(schemes) > 0 { + r.buildScheme = schemes[0] + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + if r.buildVarsFunc != nil { + // compose the old and new functions + old := r.buildVarsFunc + r.buildVarsFunc = func(m map[string]string) map[string]string { + return f(old(m)) + } + } else { + r.buildVarsFunc = f + } + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + // initialize a subrouter with a copy of the parent route's configuration + router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// The scheme of the resulting url will be the first argument that was passed to Schemes: +// +// // url.String() will be "https://example.com" +// r := mux.NewRouter() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + queries := make([]string, 0, len(r.regexp.queries)) + if r.regexp.host != nil { + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + scheme = "http" + if r.buildScheme != "" { + scheme = r.buildScheme + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + for _, q := range r.regexp.queries { + var query string + if query, err = q.url(values); err != nil { + return nil, err + } + queries = append(queries, query) + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + RawQuery: strings.Join(queries, "&"), + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + u := &url.URL{ + Scheme: "http", + Host: host, + } + if r.buildScheme != "" { + u.Scheme = r.buildScheme + } + return u, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetPathRegexp returns the expanded regular expression used to match route path. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathRegexp() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route does not have a path") + } + return r.regexp.path.regexp.String(), nil +} + +// GetQueriesRegexp returns the expanded regular expressions used to match the +// route queries. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not have queries. +func (r *Route) GetQueriesRegexp() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + queries := make([]string, 0, len(r.regexp.queries)) + for _, query := range r.regexp.queries { + queries = append(queries, query.regexp.String()) + } + return queries, nil +} + +// GetQueriesTemplates returns the templates used to build the +// query matching. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define queries. +func (r *Route) GetQueriesTemplates() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + queries := make([]string, 0, len(r.regexp.queries)) + for _, query := range r.regexp.queries { + queries = append(queries, query.template) + } + return queries, nil +} + +// GetMethods returns the methods the route matches against +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if route does not have methods. +func (r *Route) GetMethods() ([]string, error) { + if r.err != nil { + return nil, r.err + } + for _, m := range r.matchers { + if methods, ok := m.(methodMatcher); ok { + return []string(methods), nil + } + } + return nil, errors.New("mux: route doesn't have methods") +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go new file mode 100644 index 000000000..5f5c496de --- /dev/null +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -0,0 +1,19 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import "net/http" + +// SetURLVars sets the URL variables for the given request, to be accessed via +// mux.Vars for testing route behaviour. Arguments are not modified, a shallow +// copy is returned. +// +// This API should only be used for testing purposes; it provides a way to +// inject variables into the request context. Alternatively, URL variables +// can be set by making a route that captures the required variables, +// starting a server and sending the request to that server. +func SetURLVars(r *http.Request, val map[string]string) *http.Request { + return requestWithVars(r, val) +} diff --git a/vendor/github.com/gosuri/uitable/.travis.yml b/vendor/github.com/gosuri/uitable/.travis.yml new file mode 100644 index 000000000..26fc3b438 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/.travis.yml @@ -0,0 +1,6 @@ +language: go +sudo: false +install: + - go get ./... +go: + - tip diff --git a/vendor/github.com/gosuri/uitable/LICENSE b/vendor/github.com/gosuri/uitable/LICENSE new file mode 100644 index 000000000..e436d9043 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/LICENSE @@ -0,0 +1,10 @@ +MIT License +=========== + +Copyright (c) 2015, Greg Osuri + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gosuri/uitable/Makefile b/vendor/github.com/gosuri/uitable/Makefile new file mode 100644 index 000000000..60246a8a9 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/Makefile @@ -0,0 +1,4 @@ +test: + @go test ./... + +.PHONY: test diff --git a/vendor/github.com/gosuri/uitable/README.md b/vendor/github.com/gosuri/uitable/README.md new file mode 100644 index 000000000..683b538d1 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/README.md @@ -0,0 +1,67 @@ +# uitable [![GoDoc](https://godoc.org/github.com/gosuri/uitable?status.svg)](https://godoc.org/github.com/gosuri/uitable) [![Build Status](https://travis-ci.org/gosuri/uitable.svg?branch=master)](https://travis-ci.org/gosuri/uitable) + +uitable is a go library for representing data as tables for terminal applications. It provides primitives for sizing and wrapping columns to improve readability. + +## Example Usage + +Full source code for the example is available at [example/main.go](example/main.go) + +```go +table := uitable.New() +table.MaxColWidth = 50 + +table.AddRow("NAME", "BIRTHDAY", "BIO") +for _, hacker := range hackers { + table.AddRow(hacker.Name, hacker.Birthday, hacker.Bio) +} +fmt.Println(table) +``` + +Will render the data as: + +```sh +NAME BIRTHDAY BIO +Ada Lovelace December 10, 1815 Ada was a British mathematician and writer, chi... +Alan Turing June 23, 1912 Alan was a British pioneering computer scientis... +``` + +For wrapping in two columns: + +```go +table = uitable.New() +table.MaxColWidth = 80 +table.Wrap = true // wrap columns + +for _, hacker := range hackers { + table.AddRow("Name:", hacker.Name) + table.AddRow("Birthday:", hacker.Birthday) + table.AddRow("Bio:", hacker.Bio) + table.AddRow("") // blank +} +fmt.Println(table) +``` + +Will render the data as: + +``` +Name: Ada Lovelace +Birthday: December 10, 1815 +Bio: Ada was a British mathematician and writer, chiefly known for her work on + Charles Babbage's early mechanical general-purpose computer, the Analytical + Engine + +Name: Alan Turing +Birthday: June 23, 1912 +Bio: Alan was a British pioneering computer scientist, mathematician, logician, + cryptanalyst and theoretical biologist +``` + +## Installation + +``` +$ go get -v github.com/gosuri/uitable +``` + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/gosuri/uitable/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + diff --git a/vendor/github.com/gosuri/uitable/table.go b/vendor/github.com/gosuri/uitable/table.go new file mode 100644 index 000000000..b764e5150 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/table.go @@ -0,0 +1,205 @@ +// Package uitable provides a decorator for formating data as a table +package uitable + +import ( + "fmt" + "strings" + "sync" + + "github.com/fatih/color" + "github.com/gosuri/uitable/util/strutil" + "github.com/gosuri/uitable/util/wordwrap" +) + +// Separator is the default column seperator +var Separator = "\t" + +// Table represents a decorator that renders the data in formatted in a table +type Table struct { + // Rows is the collection of rows in the table + Rows []*Row + + // MaxColWidth is the maximum allowed width for cells in the table + MaxColWidth uint + + // Wrap when set to true wraps the contents of the columns when the length exceeds the MaxColWidth + Wrap bool + + // Separator is the seperator for columns in the table. Default is "\t" + Separator string + + mtx *sync.RWMutex + rightAlign map[int]bool +} + +// New returns a new Table with default values +func New() *Table { + return &Table{ + Separator: Separator, + mtx: new(sync.RWMutex), + rightAlign: map[int]bool{}, + } +} + +// AddRow adds a new row to the table +func (t *Table) AddRow(data ...interface{}) *Table { + t.mtx.Lock() + defer t.mtx.Unlock() + r := NewRow(data...) + t.Rows = append(t.Rows, r) + return t +} + +// Bytes returns the []byte value of table +func (t *Table) Bytes() []byte { + return []byte(t.String()) +} + +func (t *Table) RightAlign(col int) { + t.mtx.Lock() + t.rightAlign[col] = true + t.mtx.Unlock() +} + +// String returns the string value of table +func (t *Table) String() string { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if len(t.Rows) == 0 { + return "" + } + + // determine the width for each column (cell in a row) + var colwidths []uint + for _, row := range t.Rows { + for i, cell := range row.Cells { + // resize colwidth array + if i+1 > len(colwidths) { + colwidths = append(colwidths, 0) + } + cellwidth := cell.LineWidth() + if t.MaxColWidth != 0 && cellwidth > t.MaxColWidth { + cellwidth = t.MaxColWidth + } + + if cellwidth > colwidths[i] { + colwidths[i] = cellwidth + } + } + } + + var lines []string + for _, row := range t.Rows { + row.Separator = t.Separator + for i, cell := range row.Cells { + cell.Width = colwidths[i] + cell.Wrap = t.Wrap + cell.RightAlign = t.rightAlign[i] + } + lines = append(lines, row.String()) + } + return strutil.Join(lines, "\n") +} + +// Row represents a row in a table +type Row struct { + // Cells is the group of cell for the row + Cells []*Cell + + // Separator for tabular columns + Separator string +} + +// NewRow returns a new Row and adds the data to the row +func NewRow(data ...interface{}) *Row { + r := &Row{Cells: make([]*Cell, len(data))} + for i, d := range data { + r.Cells[i] = &Cell{Data: d} + } + return r +} + +// String returns the string representation of the row +func (r *Row) String() string { + // get the max number of lines for each cell + var lc int // line count + for _, cell := range r.Cells { + if clc := len(strings.Split(cell.String(), "\n")); clc > lc { + lc = clc + } + } + + // allocate a two-dimentional array of cells for each line and add size them + cells := make([][]*Cell, lc) + for x := 0; x < lc; x++ { + cells[x] = make([]*Cell, len(r.Cells)) + for y := 0; y < len(r.Cells); y++ { + cells[x][y] = &Cell{Width: r.Cells[y].Width} + } + } + + // insert each line in a cell as new cell in the cells array + for y, cell := range r.Cells { + lines := strings.Split(cell.String(), "\n") + for x, line := range lines { + cells[x][y].Data = line + } + } + + // format each line + lines := make([]string, lc) + for x := range lines { + line := make([]string, len(cells[x])) + for y := range cells[x] { + line[y] = cells[x][y].String() + } + lines[x] = strutil.Join(line, r.Separator) + } + return strings.Join(lines, "\n") +} + +// Cell represents a column in a row +type Cell struct { + // Width is the width of the cell + Width uint + + // Wrap when true wraps the contents of the cell when the lenght exceeds the width + Wrap bool + + // RightAlign when true aligns contents to the right + RightAlign bool + + // Data is the cell data + Data interface{} +} + +// LineWidth returns the max width of all the lines in a cell +func (c *Cell) LineWidth() uint { + width := 0 + for _, s := range strings.Split(c.String(), "\n") { + w := strutil.StringWidth(s) + if w > width { + width = w + } + } + return uint(width) +} + +// String returns the string formated representation of the cell +func (c *Cell) String() string { + if c.Data == nil { + return strutil.PadLeft(" ", int(c.Width), ' ') + } + col := color.New(color.FgBlack) + col.DisableColor() + s := fmt.Sprintf("%v", col.Sprint(c.Data)) + if c.Width > 0 { + if c.Wrap && uint(len(s)) > c.Width { + return wordwrap.WrapString(s, c.Width) + } else { + return strutil.Resize(s, c.Width, c.RightAlign) + } + } + return s +} diff --git a/vendor/github.com/gosuri/uitable/util/strutil/strutil.go b/vendor/github.com/gosuri/uitable/util/strutil/strutil.go new file mode 100644 index 000000000..53f30a8fd --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/strutil/strutil.go @@ -0,0 +1,101 @@ +// Package strutil provides various utilities for manipulating strings +package strutil + +import ( + "bytes" + "regexp" + + "github.com/mattn/go-runewidth" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +// PadRight returns a new string of a specified length in which the end of the current string is padded with spaces or with a specified Unicode character. +func PadRight(str string, length int, pad byte) string { + slen := StringWidth(str) + if slen >= length { + return str + } + buf := bytes.NewBufferString(str) + for i := 0; i < length-slen; i++ { + buf.WriteByte(pad) + } + return buf.String() +} + +// PadLeft returns a new string of a specified length in which the beginning of the current string is padded with spaces or with a specified Unicode character. +func PadLeft(str string, length int, pad byte) string { + slen := StringWidth(str) + if slen >= length { + return str + } + var buf bytes.Buffer + for i := 0; i < length-slen; i++ { + buf.WriteByte(pad) + } + buf.WriteString(str) + return buf.String() +} + +// Resize resizes the string with the given length. It ellipses with '...' when the string's length exceeds +// the desired length or pads spaces to the right of the string when length is smaller than desired +func Resize(s string, length uint, rightAlign bool) string { + slen := StringWidth(s) + n := int(length) + if slen == n { + return s + } + // Pads only when length of the string smaller than len needed + if rightAlign { + s = PadLeft(s, n, ' ') + } else { + s = PadRight(s, n, ' ') + } + if slen > n { + rs := []rune(s) + var buf bytes.Buffer + w := 0 + for _, r := range rs { + buf.WriteRune(r) + rw := RuneWidth(r) + if w+rw >= n-3 { + break + } + w += rw + } + buf.WriteString("...") + s = buf.String() + } + return s +} + +// Join joins the list of the string with the delim provided. +// Returns an empty string for empty list +func Join(list []string, delim string) string { + if len(list) == 0 { + return "" + } + var buf bytes.Buffer + for i := 0; i < len(list)-1; i++ { + buf.WriteString(list[i] + delim) + } + buf.WriteString(list[len(list)-1]) + return buf.String() +} + +// Strip strips the string of all colors +func Strip(s string) string { + return re.ReplaceAllString(s, "") +} + +// StringWidth returns the actual width of the string without colors +func StringWidth(s string) int { + return runewidth.StringWidth(Strip(s)) +} + +// RuneWidth returns the actual width of the rune +func RuneWidth(s rune) int { + return runewidth.RuneWidth(s) +} diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md b/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/README.md b/vendor/github.com/gosuri/uitable/util/wordwrap/README.md new file mode 100644 index 000000000..60ae31170 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go b/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go new file mode 100644 index 000000000..b2c63b879 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go @@ -0,0 +1,85 @@ +// Package wordwrap provides methods for wrapping the contents of a string +package wordwrap + +import ( + "bytes" + "unicode" + + "github.com/gosuri/uitable/util/strutil" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + var wordWidth, spaceWidth int + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceWidth) > lim { + current = 0 + } else { + current += uint(spaceWidth) + spaceBuf.WriteTo(buf) + spaceWidth += strutil.StringWidth(buf.String()) + } + spaceBuf.Reset() + spaceWidth = 0 + } else { + current += uint(spaceWidth + wordWidth) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + spaceWidth = 0 + wordWidth = 0 + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceWidth + wordWidth) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + spaceWidth = 0 + wordWidth = 0 + } + + spaceBuf.WriteRune(char) + spaceWidth += strutil.RuneWidth(char) + } else { + wordBuf.WriteRune(char) + wordWidth += strutil.RuneWidth(char) + + if current+uint(spaceWidth+wordWidth) > lim && uint(wordWidth) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + spaceWidth = 0 + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceWidth) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml new file mode 100644 index 000000000..b5ffbe03d --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 000000000..81316beb0 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Softwareâ€), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS ISâ€, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 000000000..09c9e7c17 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,25 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. +- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 000000000..f6a2ec4a5 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,551 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + if req.Method == http.MethodGet { + return req.URL.String() + } else { + return req.Method + " " + req.URL.String() + } +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader, _ := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 000000000..444df08f8 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 000000000..44e368e56 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,178 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 000000000..b97cd6ed0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 000000000..71dd308ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,150 @@ +# go-multierror + +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) + +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 000000000..3e2589bfd --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,43 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 000000000..aab8e9abe --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 000000000..47f13c49a --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 000000000..9c29efb7f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 000000000..f54574326 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,121 @@ +package multierror + +import ( + "errors" + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. +// +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + return e.Errors +} + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 000000000..5c477abe4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 000000000..fecb14e81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md index e809c79ab..750c3c7eb 100644 --- a/vendor/github.com/huandu/xstrings/README.md +++ b/vendor/github.com/huandu/xstrings/README.md @@ -39,8 +39,8 @@ _Keep this table sorted by Function in ascending order._ | [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | | [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | | [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | -| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | | [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | | [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | | [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | @@ -50,15 +50,14 @@ _Keep this table sorted by Function in ascending order._ | [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | | [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | | [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | -| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | | [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | | [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | | [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | | [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | | [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | | [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | -| [ToPascalCase](https://godoc.org/github.com/huandu/xstrings#ToPascalCase) | - | [#1](https://github.com/huandu/xstrings/issues/1) | | [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | | [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | | [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go index 5d8cfee47..cba0d0725 100644 --- a/vendor/github.com/huandu/xstrings/convert.go +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -13,37 +13,17 @@ import ( // // Some samples. // -// "some_words" => "someWords" -// "http_server" => "httpServer" -// "no_https" => "noHttps" -// "_complex__case_" => "_complex_Case_" -// "some words" => "someWords" -// "GOLANG_IS_GREAT" => "golangIsGreat" -func ToCamelCase(str string) string { - return toCamelCase(str, false) -} - -// ToPascalCase is to convert words separated by space, underscore and hyphen to pascal case. -// -// Some samples. -// // "some_words" => "SomeWords" // "http_server" => "HttpServer" // "no_https" => "NoHttps" // "_complex__case_" => "_Complex_Case_" // "some words" => "SomeWords" -// "GOLANG_IS_GREAT" => "GolangIsGreat" -func ToPascalCase(str string) string { - return toCamelCase(str, true) -} - -func toCamelCase(str string, isBig bool) string { +func ToCamelCase(str string) string { if len(str) == 0 { return "" } buf := &stringBuilder{} - var isFirstRuneUpper bool var r0, r1 rune var size int @@ -53,14 +33,7 @@ func toCamelCase(str string, isBig bool) string { str = str[size:] if !isConnector(r0) { - isFirstRuneUpper = unicode.IsUpper(r0) - - if isBig { - r0 = unicode.ToUpper(r0) - } else { - r0 = unicode.ToLower(r0) - } - + r0 = unicode.ToUpper(r0) break } @@ -87,25 +60,12 @@ func toCamelCase(str string, isBig bool) string { } if isConnector(r1) { - isFirstRuneUpper = unicode.IsUpper(r0) r0 = unicode.ToUpper(r0) } else { - if isFirstRuneUpper { - if unicode.IsUpper(r0) { - r0 = unicode.ToLower(r0) - } else { - isFirstRuneUpper = false - } - } - buf.WriteRune(r1) } } - if isFirstRuneUpper && !isBig { - r0 = unicode.ToLower(r0) - } - buf.WriteRune(r0) return buf.String() } diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index dad29725f..d324c43ba 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 876abb500..7e6f7aeee 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -8,8 +8,7 @@ [![Coverage Status][9]][10] [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] +[![Become my sponsor][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -25,8 +24,8 @@ [12]: https://sourcegraph.com/github.com/imdario/mergo?badge [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. @@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild +- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -97,7 +96,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [janoszen/containerssh](https://github.com/janoszen/containerssh) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) ## Install @@ -168,7 +169,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -218,7 +219,6 @@ func main() { } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index afa84a1e2..8b4e2f47a 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co visited[h] = &visit{addr, typ, seen} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return @@ -95,13 +95,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } if src.Kind() != reflect.Map { diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 3cc926c7f..9fe362d47 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -17,7 +17,7 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ErrNonPointerAgument = errors.New("dst must be a pointer") @@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore new file mode 100644 index 000000000..b2be23c87 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.idea + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +tags +environ diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml new file mode 100644 index 000000000..1cfa28cb3 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.travis.yml @@ -0,0 +1,26 @@ +# vim: ft=yaml sw=2 ts=2 + +language: go + +# enable database services +services: + - mysql + - postgresql + +# create test database +before_install: + - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;' + - psql -c 'create database sqlxtest;' -U postgres + - go get github.com/mattn/goveralls + - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true" + - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable" + - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db" + +# go versions to test +go: + - "1.15.x" + - "1.16.x" + +# run tests w/ coverage +script: + - travis_retry $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE new file mode 100644 index 000000000..0d31edfa7 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/LICENSE @@ -0,0 +1,23 @@ + Copyright (c) 2013, Jason Moiron + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md new file mode 100644 index 000000000..0d7159290 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/README.md @@ -0,0 +1,213 @@ +# sqlx + +[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) + +sqlx is a library which provides a set of extensions on go's standard +`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, +et al. all leave the underlying interfaces untouched, so that their interfaces +are a superset on the standard ones. This makes it relatively painless to +integrate existing codebases using database/sql with sqlx. + +Major additional concepts are: + +* Marshal rows into structs (with embedded struct support), maps, and slices +* Named parameter support including prepared statements +* `Get` and `Select` to go quickly from query to struct/slice + +In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), +there is also some [user documentation](http://jmoiron.github.io/sqlx/) that +explains how to use `database/sql` along with sqlx. + +## Recent Changes + +1.3.0: + +* `sqlx.DB.Connx(context.Context) *sqlx.Conn` +* `sqlx.BindDriver(driverName, bindType)` +* support for `[]map[string]interface{}` to do "batch" insertions +* allocation & perf improvements for `sqlx.In` + +DB.Connx returns an `sqlx.Conn`, which is an `sql.Conn`-alike consistent with +sqlx's wrapping of other types. + +`BindDriver` allows users to control the bindvars that sqlx will use for drivers, +and add new drivers at runtime. This results in a very slight performance hit +when resolving the driver into a bind type (~40ns per call), but it allows users +to specify what bindtype their driver uses even when sqlx has not been updated +to know about it by default. + +### Backwards Compatibility + +Compatibility with the most recent two versions of Go is a requirement for any +new changes. Compatibility beyond that is not guaranteed. + +Versioning is done with Go modules. Breaking changes (eg. removing deprecated API) +will get major version number bumps. + +## install + + go get github.com/jmoiron/sqlx + +## issues + +Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of +`Columns()` does not fully qualify column names in queries like: + +```sql +SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; +``` + +making a struct or map destination ambiguous. Use `AS` in your queries +to give columns distinct names, `rows.Scan` to scan them manually, or +`SliceScan` to get a slice of results. + +## usage + +Below is an example which shows some common use cases for sqlx. Check +[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more +usage. + + +```go +package main + +import ( + "database/sql" + "fmt" + "log" + + _ "github.com/lib/pq" + "github.com/jmoiron/sqlx" +) + +var schema = ` +CREATE TABLE person ( + first_name text, + last_name text, + email text +); + +CREATE TABLE place ( + country text, + city text NULL, + telcode integer +)` + +type Person struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string +} + +type Place struct { + Country string + City sql.NullString + TelCode int +} + +func main() { + // this Pings the database trying to connect + // use sqlx.Open() for sql.Open() semantics + db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") + if err != nil { + log.Fatalln(err) + } + + // exec the schema or fail; multi-statement Exec behavior varies between + // database drivers; pq will exec them all, sqlite3 won't, ymmv + db.MustExec(schema) + + tx := db.MustBegin() + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") + tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") + // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person + tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) + tx.Commit() + + // Query the database, storing results in a []Person (wrapped in []interface{}) + people := []Person{} + db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") + jason, john := people[0], people[1] + + fmt.Printf("%#v\n%#v", jason, john) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} + + // You can also get a single result, a la QueryRow + jason = Person{} + err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") + fmt.Printf("%#v\n", jason) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + + // if you have null fields and use SELECT *, you must use sql.Null* in your struct + places := []Place{} + err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + fmt.Println(err) + return + } + usa, singsing, honkers := places[0], places[1], places[2] + + fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + + // Loop through rows using only one struct + place := Place{} + rows, err := db.Queryx("SELECT * FROM place") + for rows.Next() { + err := rows.StructScan(&place) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("%#v\n", place) + } + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + + // Named queries, using `:name` as the bindvar. Automatic bindvar support + // which takes into account the dbtype based on the driverName on sqlx.Open/Connect + _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, + map[string]interface{}{ + "first": "Bin", + "last": "Smuth", + "email": "bensmith@allblacks.nz", + }) + + // Selects Mr. Smith from the database + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) + + // Named queries can also use structs. Their bind names follow the same rules + // as the name -> db mapping, so struct fields are lowercased and the `db` tag + // is taken into consideration. + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) + + + // batch insert + + // batch insert with structs + personStructs := []Person{ + {FirstName: "Ardie", LastName: "Savea", Email: "asavea@ab.co.nz"}, + {FirstName: "Sonny Bill", LastName: "Williams", Email: "sbw@ab.co.nz"}, + {FirstName: "Ngani", LastName: "Laumape", Email: "nlaumape@ab.co.nz"}, + } + + _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`, personStructs) + + // batch insert with maps + personMaps := []map[string]interface{}{ + {"first_name": "Ardie", "last_name": "Savea", "email": "asavea@ab.co.nz"}, + {"first_name": "Sonny Bill", "last_name": "Williams", "email": "sbw@ab.co.nz"}, + {"first_name": "Ngani", "last_name": "Laumape", "email": "nlaumape@ab.co.nz"}, + } + + _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`, personMaps) +} +``` diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go new file mode 100644 index 000000000..ec0da4e72 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/bind.go @@ -0,0 +1,265 @@ +package sqlx + +import ( + "bytes" + "database/sql/driver" + "errors" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Bindvar types supported by Rebind, BindMap and BindStruct. +const ( + UNKNOWN = iota + QUESTION + DOLLAR + NAMED + AT +) + +var defaultBinds = map[int][]string{ + DOLLAR: []string{"postgres", "pgx", "pq-timeouts", "cloudsqlpostgres", "ql", "nrpostgres", "cockroach"}, + QUESTION: []string{"mysql", "sqlite3", "nrmysql", "nrsqlite3"}, + NAMED: []string{"oci8", "ora", "goracle", "godror"}, + AT: []string{"sqlserver"}, +} + +var binds sync.Map + +func init() { + for bind, drivers := range defaultBinds { + for _, driver := range drivers { + BindDriver(driver, bind) + } + } + +} + +// BindType returns the bindtype for a given database given a drivername. +func BindType(driverName string) int { + itype, ok := binds.Load(driverName) + if !ok { + return UNKNOWN + } + return itype.(int) +} + +// BindDriver sets the BindType for driverName to bindType. +func BindDriver(driverName string, bindType int) { + binds.Store(driverName, bindType) +} + +// FIXME: this should be able to be tolerant of escaped ?'s in queries without +// losing much speed, and should be to avoid confusion. + +// Rebind a query from the default bindtype (QUESTION) to the target bindtype. +func Rebind(bindType int, query string) string { + switch bindType { + case QUESTION, UNKNOWN: + return query + } + + // Add space enough for 10 params before we have to allocate + rqb := make([]byte, 0, len(query)+10) + + var i, j int + + for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { + rqb = append(rqb, query[:i]...) + + switch bindType { + case DOLLAR: + rqb = append(rqb, '$') + case NAMED: + rqb = append(rqb, ':', 'a', 'r', 'g') + case AT: + rqb = append(rqb, '@', 'p') + } + + j++ + rqb = strconv.AppendInt(rqb, int64(j), 10) + + query = query[i+1:] + } + + return string(append(rqb, query...)) +} + +// Experimental implementation of Rebind which uses a bytes.Buffer. The code is +// much simpler and should be more resistant to odd unicode, but it is twice as +// slow. Kept here for benchmarking purposes and to possibly replace Rebind if +// problems arise with its somewhat naive handling of unicode. +func rebindBuff(bindType int, query string) string { + if bindType != DOLLAR { + return query + } + + b := make([]byte, 0, len(query)) + rqb := bytes.NewBuffer(b) + j := 1 + for _, r := range query { + if r == '?' { + rqb.WriteRune('$') + rqb.WriteString(strconv.Itoa(j)) + j++ + } else { + rqb.WriteRune(r) + } + } + + return rqb.String() +} + +func asSliceForIn(i interface{}) (v reflect.Value, ok bool) { + if i == nil { + return reflect.Value{}, false + } + + v = reflect.ValueOf(i) + t := reflectx.Deref(v.Type()) + + // Only expand slices + if t.Kind() != reflect.Slice { + return reflect.Value{}, false + } + + // []byte is a driver.Value type so it should not be expanded + if t == reflect.TypeOf([]byte{}) { + return reflect.Value{}, false + + } + + return v, true +} + +// In expands slice values in args, returning the modified query string +// and a new arg list that can be executed by a database. The `query` should +// use the `?` bindVar. The return value uses the `?` bindVar. +func In(query string, args ...interface{}) (string, []interface{}, error) { + // argMeta stores reflect.Value and length for slices and + // the value itself for non-slice arguments + type argMeta struct { + v reflect.Value + i interface{} + length int + } + + var flatArgsCount int + var anySlices bool + + var stackMeta [32]argMeta + + var meta []argMeta + if len(args) <= len(stackMeta) { + meta = stackMeta[:len(args)] + } else { + meta = make([]argMeta, len(args)) + } + + for i, arg := range args { + if a, ok := arg.(driver.Valuer); ok { + var err error + arg, err = a.Value() + if err != nil { + return "", nil, err + } + } + + if v, ok := asSliceForIn(arg); ok { + meta[i].length = v.Len() + meta[i].v = v + + anySlices = true + flatArgsCount += meta[i].length + + if meta[i].length == 0 { + return "", nil, errors.New("empty slice passed to 'in' query") + } + } else { + meta[i].i = arg + flatArgsCount++ + } + } + + // don't do any parsing if there aren't any slices; note that this means + // some errors that we might have caught below will not be returned. + if !anySlices { + return query, args, nil + } + + newArgs := make([]interface{}, 0, flatArgsCount) + + var buf strings.Builder + buf.Grow(len(query) + len(", ?")*flatArgsCount) + + var arg, offset int + + for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { + if arg >= len(meta) { + // if an argument wasn't passed, lets return an error; this is + // not actually how database/sql Exec/Query works, but since we are + // creating an argument list programmatically, we want to be able + // to catch these programmer errors earlier. + return "", nil, errors.New("number of bindVars exceeds arguments") + } + + argMeta := meta[arg] + arg++ + + // not a slice, continue. + // our questionmark will either be written before the next expansion + // of a slice or after the loop when writing the rest of the query + if argMeta.length == 0 { + offset = offset + i + 1 + newArgs = append(newArgs, argMeta.i) + continue + } + + // write everything up to and including our ? character + buf.WriteString(query[:offset+i+1]) + + for si := 1; si < argMeta.length; si++ { + buf.WriteString(", ?") + } + + newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) + + // slice the query and reset the offset. this avoids some bookkeeping for + // the write after the loop + query = query[offset+i+1:] + offset = 0 + } + + buf.WriteString(query) + + if arg < len(meta) { + return "", nil, errors.New("number of bindVars less than number arguments") + } + + return buf.String(), newArgs, nil +} + +func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { + switch val := v.Interface().(type) { + case []interface{}: + args = append(args, val...) + case []int: + for i := range val { + args = append(args, val[i]) + } + case []string: + for i := range val { + args = append(args, val[i]) + } + default: + for si := 0; si < vlen; si++ { + args = append(args, v.Index(si).Interface()) + } + } + + return args +} diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go new file mode 100644 index 000000000..e2b4e60b2 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/doc.go @@ -0,0 +1,12 @@ +// Package sqlx provides general purpose extensions to database/sql. +// +// It is intended to seamlessly wrap database/sql and provide convenience +// methods which are useful in the development of database driven applications. +// None of the underlying database/sql methods are changed. Instead all extended +// behavior is implemented through new methods defined on wrapper types. +// +// Additions include scanning into structs, named query support, rebinding +// queries for different drivers, convenient shorthands for common error handling +// and more. +// +package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go new file mode 100644 index 000000000..728aa04d0 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named.go @@ -0,0 +1,458 @@ +package sqlx + +// Named Query Support +// +// * BindMap - bind query bindvars to map/struct args +// * NamedExec, NamedQuery - named query w/ struct or map +// * NamedStmt - a pre-compiled named query which is a prepared statement +// +// Internal Interfaces: +// +// * compileNamedQuery - rebind a named query, returning a query and list of names +// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist +// +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "unicode" + + "github.com/jmoiron/sqlx/reflectx" +) + +// NamedStmt is a prepared statement that executes named queries. Prepare it +// how you would execute a NamedQuery, but pass in a struct or map when executing. +type NamedStmt struct { + Params []string + QueryString string + Stmt *Stmt +} + +// Close closes the named statement. +func (n *NamedStmt) Close() error { + return n.Stmt.Close() +} + +// Exec executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.Exec(args...) +} + +// Query executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.Query(args...) +} + +// QueryRow executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRow(arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowx(args...) +} + +// MustExec execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExec(arg interface{}) sql.Result { + res, err := n.Exec(arg) + if err != nil { + panic(err) + } + return res +} + +// Queryx using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { + r, err := n.Query(arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowx(arg interface{}) *Row { + return n.QueryRow(arg) +} + +// Select using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { + rows, err := n.Queryx(arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { + r := n.QueryRowx(arg) + return r.scanAny(dest, false) +} + +// Unsafe creates an unsafe version of the NamedStmt +func (n *NamedStmt) Unsafe() *NamedStmt { + r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} + r.Stmt.unsafe = true + return r +} + +// A union interface of preparer and binder, required to be able to prepare +// named statements (as the bindtype must be determined). +type namedPreparer interface { + Preparer + binder +} + +func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := Preparex(p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// convertMapStringInterface attempts to convert v to map[string]interface{}. +// Unlike v.(map[string]interface{}), this function works on named types that +// are convertible to map[string]interface{} as well. +func convertMapStringInterface(v interface{}) (map[string]interface{}, bool) { + var m map[string]interface{} + mtype := reflect.TypeOf(m) + t := reflect.TypeOf(v) + if !t.ConvertibleTo(mtype) { + return nil, false + } + return reflect.ValueOf(v).Convert(mtype).Interface().(map[string]interface{}), true + +} + +func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + if maparg, ok := convertMapStringInterface(arg); ok { + return bindMapArgs(names, maparg) + } + return bindArgs(names, arg, m) +} + +// private interface to generate a list of interfaces from a given struct +// type, given a list of names to pull out of the struct. Used by public +// BindStruct interface. +func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + // grab the indirected value of arg + v := reflect.ValueOf(arg) + for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { + v = v.Elem() + } + + err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { + if len(t) == 0 { + return fmt.Errorf("could not find name %s in %#v", names[i], arg) + } + + val := reflectx.FieldByIndexesReadOnly(v, t) + arglist = append(arglist, val.Interface()) + + return nil + }) + + return arglist, err +} + +// like bindArgs, but for maps. +func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + for _, name := range names { + val, ok := arg[name] + if !ok { + return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) + } + arglist = append(arglist, val) + } + return arglist, nil +} + +// bindStruct binds a named parameter query with fields from a struct argument. +// The rules for binding field names to parameter names follow the same +// conventions as for StructScan, including obeying the `db` struct tags. +func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindAnyArgs(names, arg, m) + if err != nil { + return "", []interface{}{}, err + } + + return bound, arglist, nil +} + +var valuesReg = regexp.MustCompile(`\)\s*(?i)VALUES\s*\(`) + +func findMatchingClosingBracketIndex(s string) int { + count := 0 + for i, ch := range s { + if ch == '(' { + count++ + } + if ch == ')' { + count-- + if count == 0 { + return i + } + } + } + return 0 +} + +func fixBound(bound string, loop int) string { + loc := valuesReg.FindStringIndex(bound) + // defensive guard when "VALUES (...)" not found + if len(loc) < 2 { + return bound + } + + openingBracketIndex := loc[1] - 1 + index := findMatchingClosingBracketIndex(bound[openingBracketIndex:]) + // defensive guard. must have closing bracket + if index == 0 { + return bound + } + closingBracketIndex := openingBracketIndex + index + 1 + + var buffer bytes.Buffer + + buffer.WriteString(bound[0:closingBracketIndex]) + for i := 0; i < loop-1; i++ { + buffer.WriteString(",") + buffer.WriteString(bound[openingBracketIndex:closingBracketIndex]) + } + buffer.WriteString(bound[closingBracketIndex:]) + return buffer.String() +} + +// bindArray binds a named parameter query with fields from an array or slice of +// structs argument. +func bindArray(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + // do the initial binding with QUESTION; if bindType is not question, + // we can rebind it at the end. + bound, names, err := compileNamedQuery([]byte(query), QUESTION) + if err != nil { + return "", []interface{}{}, err + } + arrayValue := reflect.ValueOf(arg) + arrayLen := arrayValue.Len() + if arrayLen == 0 { + return "", []interface{}{}, fmt.Errorf("length of array is 0: %#v", arg) + } + var arglist = make([]interface{}, 0, len(names)*arrayLen) + for i := 0; i < arrayLen; i++ { + elemArglist, err := bindAnyArgs(names, arrayValue.Index(i).Interface(), m) + if err != nil { + return "", []interface{}{}, err + } + arglist = append(arglist, elemArglist...) + } + if arrayLen > 1 { + bound = fixBound(bound, arrayLen) + } + // adjust binding type if we weren't on question + if bindType != QUESTION { + bound = Rebind(bindType, bound) + } + return bound, arglist, nil +} + +// bindMap binds a named parameter query with a map of arguments. +func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindMapArgs(names, args) + return bound, arglist, err +} + +// -- Compilation of Named Queries + +// Allow digits and letters in bind params; additionally runes are +// checked against underscores, meaning that bind params can have be +// alphanumeric with underscores. Mind the difference between unicode +// digits and numbers, where '5' is a digit but '五' is not. +var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} + +// FIXME: this function isn't safe for unicode named params, as a failing test +// can testify. This is not a regression but a failure of the original code +// as well. It should be modified to range over runes in a string rather than +// bytes, even though this is less convenient and slower. Hopefully the +// addition of the prepared NamedStmt (which will only do this once) will make +// up for the slightly slower ad-hoc NamedExec/NamedQuery. + +// compile a NamedQuery into an unbound query (using the '?' bindvar) and +// a list of names. +func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { + names = make([]string, 0, 10) + rebound := make([]byte, 0, len(qs)) + + inName := false + last := len(qs) - 1 + currentVar := 1 + name := make([]byte, 0, 10) + + for i, b := range qs { + // a ':' while we're in a name is an error + if b == ':' { + // if this is the second ':' in a '::' escape sequence, append a ':' + if inName && i > 0 && qs[i-1] == ':' { + rebound = append(rebound, ':') + inName = false + continue + } else if inName { + err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) + return query, names, err + } + inName = true + name = []byte{} + } else if inName && i > 0 && b == '=' && len(name) == 0 { + rebound = append(rebound, ':', '=') + inName = false + continue + // if we're in a name, and this is an allowed character, continue + } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { + // append the byte to the name if we are in a name and not on the last byte + name = append(name, b) + // if we're in a name and it's not an allowed character, the name is done + } else if inName { + inName = false + // if this is the final byte of the string and it is part of the name, then + // make sure to add it to the name + if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { + name = append(name, b) + } + // add the string representation to the names list + names = append(names, string(name)) + // add a proper bindvar for the bindType + switch bindType { + // oracle only supports named type bind vars even for positional + case NAMED: + rebound = append(rebound, ':') + rebound = append(rebound, name...) + case QUESTION, UNKNOWN: + rebound = append(rebound, '?') + case DOLLAR: + rebound = append(rebound, '$') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + case AT: + rebound = append(rebound, '@', 'p') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + } + // add this byte to string unless it was not part of the name + if i != last { + rebound = append(rebound, b) + } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { + rebound = append(rebound, b) + } + } else { + // this is a normal byte and should just go onto the rebound query + rebound = append(rebound, b) + } + } + + return string(rebound), names, err +} + +// BindNamed binds a struct or a map to a query with named parameters. +// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. +func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(bindType, query, arg, mapper()) +} + +// Named takes a query using named parameters and an argument and +// returns a new query with a list of args that can be executed by +// a database. The return value uses the `?` bindvar. +func Named(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(QUESTION, query, arg, mapper()) +} + +func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + t := reflect.TypeOf(arg) + k := t.Kind() + switch { + case k == reflect.Map && t.Key().Kind() == reflect.String: + m, ok := convertMapStringInterface(arg) + if !ok { + return "", nil, fmt.Errorf("sqlx.bindNamedMapper: unsupported map type: %T", arg) + } + return bindMap(bindType, query, m) + case k == reflect.Array || k == reflect.Slice: + return bindArray(bindType, query, arg, m) + default: + return bindStruct(bindType, query, arg, m) + } +} + +// NamedQuery binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Queryx(q, args...) +} + +// NamedExec uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query execution itself. +func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Exec(q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go new file mode 100644 index 000000000..07ad2165d --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named_context.go @@ -0,0 +1,132 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" +) + +// A union interface of contextPreparer and binder, required to be able to +// prepare named statements with context (as the bindtype must be determined). +type namedPreparerContext interface { + PreparerContext + binder +} + +func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := PreparexContext(ctx, p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// ExecContext executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.ExecContext(ctx, args...) +} + +// QueryContext executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.QueryContext(ctx, args...) +} + +// QueryRowContext executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowxContext(ctx, args...) +} + +// MustExecContext execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { + res, err := n.ExecContext(ctx, arg) + if err != nil { + panic(err) + } + return res +} + +// QueryxContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { + r, err := n.QueryContext(ctx, arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { + return n.QueryRowContext(ctx, arg) +} + +// SelectContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { + rows, err := n.QueryxContext(ctx, arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// GetContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { + r := n.QueryRowxContext(ctx, arg) + return r.scanAny(dest, false) +} + +// NamedQueryContext binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.QueryxContext(ctx, q, args...) +} + +// NamedExecContext uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query execution itself. +func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.ExecContext(ctx, q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md new file mode 100644 index 000000000..f01d3d1f0 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md @@ -0,0 +1,17 @@ +# reflectx + +The sqlx package has special reflect needs. In particular, it needs to: + +* be able to map a name to a field +* understand embedded structs +* understand mapping names to fields by a particular tag +* user specified name -> field mapping functions + +These behaviors mimic the behaviors by the standard library marshallers and also the +behavior of standard Go accessors. + +The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is +addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct +tags in the ways that are vital to most marshallers, and they are slow. + +This reflectx package extends reflect to achieve these goals. diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go new file mode 100644 index 000000000..0b1099428 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go @@ -0,0 +1,444 @@ +// Package reflectx implements extensions to the standard reflect lib suitable +// for implementing marshalling and unmarshalling packages. The main Mapper type +// allows for Go-compatible named attribute access, including accessing embedded +// struct attributes and the ability to use functions and struct tags to +// customize field names. +// +package reflectx + +import ( + "reflect" + "runtime" + "strings" + "sync" +) + +// A FieldInfo is metadata for a struct field. +type FieldInfo struct { + Index []int + Path string + Field reflect.StructField + Zero reflect.Value + Name string + Options map[string]string + Embedded bool + Children []*FieldInfo + Parent *FieldInfo +} + +// A StructMap is an index of field metadata for a struct. +type StructMap struct { + Tree *FieldInfo + Index []*FieldInfo + Paths map[string]*FieldInfo + Names map[string]*FieldInfo +} + +// GetByPath returns a *FieldInfo for a given string path. +func (f StructMap) GetByPath(path string) *FieldInfo { + return f.Paths[path] +} + +// GetByTraversal returns a *FieldInfo for a given integer path. It is +// analogous to reflect.FieldByIndex, but using the cached traversal +// rather than re-executing the reflect machinery each time. +func (f StructMap) GetByTraversal(index []int) *FieldInfo { + if len(index) == 0 { + return nil + } + + tree := f.Tree + for _, i := range index { + if i >= len(tree.Children) || tree.Children[i] == nil { + return nil + } + tree = tree.Children[i] + } + return tree +} + +// Mapper is a general purpose mapper of names to struct fields. A Mapper +// behaves like most marshallers in the standard library, obeying a field tag +// for name mapping but also providing a basic transform function. +type Mapper struct { + cache map[reflect.Type]*StructMap + tagName string + tagMapFunc func(string) string + mapFunc func(string) string + mutex sync.Mutex +} + +// NewMapper returns a new mapper using the tagName as its struct field tag. +// If tagName is the empty string, it is ignored. +func NewMapper(tagName string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + } +} + +// NewMapperTagFunc returns a new mapper which contains a mapper for field names +// AND a mapper for tag values. This is useful for tags like json which can +// have values like "name,omitempty". +func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: mapFunc, + tagMapFunc: tagMapFunc, + } +} + +// NewMapperFunc returns a new mapper which optionally obeys a field tag and +// a struct field name mapper func given by f. Tags will take precedence, but +// for any other field, the mapped name will be f(field.Name) +func NewMapperFunc(tagName string, f func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: f, + } +} + +// TypeMap returns a mapping of field strings to int slices representing +// the traversal down the struct to reach the field. +func (m *Mapper) TypeMap(t reflect.Type) *StructMap { + m.mutex.Lock() + mapping, ok := m.cache[t] + if !ok { + mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) + m.cache[t] = mapping + } + m.mutex.Unlock() + return mapping +} + +// FieldMap returns the mapper's mapping of field names to reflect values. Panics +// if v's Kind is not Struct, or v is not Indirectable to a struct kind. +func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + r := map[string]reflect.Value{} + tm := m.TypeMap(v.Type()) + for tagName, fi := range tm.Names { + r[tagName] = FieldByIndexes(v, fi.Index) + } + return r +} + +// FieldByName returns a field by its mapped name as a reflect.Value. +// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. +// Returns zero Value if the name is not found. +func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + fi, ok := tm.Names[name] + if !ok { + return v + } + return FieldByIndexes(v, fi.Index) +} + +// FieldsByName returns a slice of values corresponding to the slice of names +// for the value. Panics if v's Kind is not Struct or v is not Indirectable +// to a struct Kind. Returns zero Value for each name not found. +func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + vals := make([]reflect.Value, 0, len(names)) + for _, name := range names { + fi, ok := tm.Names[name] + if !ok { + vals = append(vals, *new(reflect.Value)) + } else { + vals = append(vals, FieldByIndexes(v, fi.Index)) + } + } + return vals +} + +// TraversalsByName returns a slice of int slices which represent the struct +// traversals for each mapped name. Panics if t is not a struct or Indirectable +// to a struct. Returns empty int slice for each name not found. +func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { + r := make([][]int, 0, len(names)) + m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { + if i == nil { + r = append(r, []int{}) + } else { + r = append(r, i) + } + + return nil + }) + return r +} + +// TraversalsByNameFunc traverses the mapped names and calls fn with the index of +// each name and the struct traversal represented by that name. Panics if t is not +// a struct or Indirectable to a struct. Returns the first error returned by fn or nil. +func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { + t = Deref(t) + mustBe(t, reflect.Struct) + tm := m.TypeMap(t) + for i, name := range names { + fi, ok := tm.Names[name] + if !ok { + if err := fn(i, nil); err != nil { + return err + } + } else { + if err := fn(i, fi.Index); err != nil { + return err + } + } + } + return nil +} + +// FieldByIndexes returns a value for the field given by the struct traversal +// for the given value. +func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + // if this is a pointer and it's nil, allocate a new value and set it + if v.Kind() == reflect.Ptr && v.IsNil() { + alloc := reflect.New(Deref(v.Type())) + v.Set(alloc) + } + if v.Kind() == reflect.Map && v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + } + return v +} + +// FieldByIndexesReadOnly returns a value for a particular struct traversal, +// but is not concerned with allocating nil pointers because the value is +// going to be used for reading and not setting. +func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + } + return v +} + +// Deref is Indirect for reflect.Types +func Deref(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +// -- helpers & utilities -- + +type kinder interface { + Kind() reflect.Kind +} + +// mustBe checks a value against a kind, panicing with a reflect.ValueError +// if the kind isn't that which is required. +func mustBe(v kinder, expected reflect.Kind) { + if k := v.Kind(); k != expected { + panic(&reflect.ValueError{Method: methodName(), Kind: k}) + } +} + +// methodName returns the caller of the function calling methodName +func methodName() string { + pc, _, _, _ := runtime.Caller(2) + f := runtime.FuncForPC(pc) + if f == nil { + return "unknown method" + } + return f.Name() +} + +type typeQueue struct { + t reflect.Type + fi *FieldInfo + pp string // Parent path +} + +// A copying append that creates a new slice each time. +func apnd(is []int, i int) []int { + x := make([]int, len(is)+1) + copy(x, is) + x[len(x)-1] = i + return x +} + +type mapf func(string) string + +// parseName parses the tag and the target name for the given field using +// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the +// field's name to a target name, and tagMapFunc for mapping the tag to +// a target name. +func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { + // first, set the fieldName to the field's name + fieldName = field.Name + // if a mapFunc is set, use that to override the fieldName + if mapFunc != nil { + fieldName = mapFunc(fieldName) + } + + // if there's no tag to look for, return the field name + if tagName == "" { + return "", fieldName + } + + // if this tag is not set using the normal convention in the tag, + // then return the fieldname.. this check is done because according + // to the reflect documentation: + // If the tag does not have the conventional format, + // the value returned by Get is unspecified. + // which doesn't sound great. + if !strings.Contains(string(field.Tag), tagName+":") { + return "", fieldName + } + + // at this point we're fairly sure that we have a tag, so lets pull it out + tag = field.Tag.Get(tagName) + + // if we have a mapper function, call it on the whole tag + // XXX: this is a change from the old version, which pulled out the name + // before the tagMapFunc could be run, but I think this is the right way + if tagMapFunc != nil { + tag = tagMapFunc(tag) + } + + // finally, split the options from the name + parts := strings.Split(tag, ",") + fieldName = parts[0] + + return tag, fieldName +} + +// parseOptions parses options out of a tag string, skipping the name +func parseOptions(tag string) map[string]string { + parts := strings.Split(tag, ",") + options := make(map[string]string, len(parts)) + if len(parts) > 1 { + for _, opt := range parts[1:] { + // short circuit potentially expensive split op + if strings.Contains(opt, "=") { + kv := strings.Split(opt, "=") + options[kv[0]] = kv[1] + continue + } + options[opt] = "" + } + } + return options +} + +// getMapping returns a mapping for the t type, using the tagName, mapFunc and +// tagMapFunc to determine the canonical names of fields. +func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { + m := []*FieldInfo{} + + root := &FieldInfo{} + queue := []typeQueue{} + queue = append(queue, typeQueue{Deref(t), root, ""}) + +QueueLoop: + for len(queue) != 0 { + // pop the first item off of the queue + tq := queue[0] + queue = queue[1:] + + // ignore recursive field + for p := tq.fi.Parent; p != nil; p = p.Parent { + if tq.fi.Field.Type == p.Field.Type { + continue QueueLoop + } + } + + nChildren := 0 + if tq.t.Kind() == reflect.Struct { + nChildren = tq.t.NumField() + } + tq.fi.Children = make([]*FieldInfo, nChildren) + + // iterate through all of its fields + for fieldPos := 0; fieldPos < nChildren; fieldPos++ { + + f := tq.t.Field(fieldPos) + + // parse the tag and the target name using the mapping options for this field + tag, name := parseName(f, tagName, mapFunc, tagMapFunc) + + // if the name is "-", disabled via a tag, skip it + if name == "-" { + continue + } + + fi := FieldInfo{ + Field: f, + Name: name, + Zero: reflect.New(f.Type).Elem(), + Options: parseOptions(tag), + } + + // if the path is empty this path is just the name + if tq.pp == "" { + fi.Path = fi.Name + } else { + fi.Path = tq.pp + "." + fi.Name + } + + // skip unexported fields + if len(f.PkgPath) != 0 && !f.Anonymous { + continue + } + + // bfs search of anonymous embedded structs + if f.Anonymous { + pp := tq.pp + if tag != "" { + pp = fi.Path + } + + fi.Embedded = true + fi.Index = apnd(tq.fi.Index, fieldPos) + nChildren := 0 + ft := Deref(f.Type) + if ft.Kind() == reflect.Struct { + nChildren = ft.NumField() + } + fi.Children = make([]*FieldInfo, nChildren) + queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) + } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) + queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) + } + + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Parent = tq.fi + tq.fi.Children[fieldPos] = &fi + m = append(m, &fi) + } + } + + flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} + for _, fi := range flds.Index { + // check if nothing has already been pushed with the same path + // sometimes you can choose to override a type using embedded struct + fld, ok := flds.Paths[fi.Path] + if !ok || fld.Embedded { + flds.Paths[fi.Path] = fi + if fi.Name != "" && !fi.Embedded { + flds.Names[fi.Path] = fi + } + } + } + + return flds +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go new file mode 100644 index 000000000..f7b287683 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx.go @@ -0,0 +1,1051 @@ +package sqlx + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Although the NameMapper is convenient, in practice it should not +// be relied on except for application code. If you are writing a library +// that uses sqlx, you should be aware that the name mappings you expect +// can be overridden by your user's application. + +// NameMapper is used to map column names to struct field names. By default, +// it uses strings.ToLower to lowercase struct field names. It can be set +// to whatever you want, but it is encouraged to be set before sqlx is used +// as name-to-field mappings are cached after first use on a type. +var NameMapper = strings.ToLower +var origMapper = reflect.ValueOf(NameMapper) + +// Rather than creating on init, this is created when necessary so that +// importers have time to customize the NameMapper. +var mpr *reflectx.Mapper + +// mprMu protects mpr. +var mprMu sync.Mutex + +// mapper returns a valid mapper using the configured NameMapper func. +func mapper() *reflectx.Mapper { + mprMu.Lock() + defer mprMu.Unlock() + + if mpr == nil { + mpr = reflectx.NewMapperFunc("db", NameMapper) + } else if origMapper != reflect.ValueOf(NameMapper) { + // if NameMapper has changed, create a new mapper + mpr = reflectx.NewMapperFunc("db", NameMapper) + origMapper = reflect.ValueOf(NameMapper) + } + return mpr +} + +// isScannable takes the reflect.Type and the actual dest value and returns +// whether or not it's Scannable. Something is scannable if: +// * it is not a struct +// * it implements sql.Scanner +// * it has no exported fields +func isScannable(t reflect.Type) bool { + if reflect.PtrTo(t).Implements(_scannerInterface) { + return true + } + if t.Kind() != reflect.Struct { + return true + } + + // it's not important that we use the right mapper for this particular object, + // we're only concerned on how many exported fields this struct has + return len(mapper().TypeMap(t).Index) == 0 +} + +// ColScanner is an interface used by MapScan and SliceScan +type ColScanner interface { + Columns() ([]string, error) + Scan(dest ...interface{}) error + Err() error +} + +// Queryer is an interface used by Get and Select +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) + Queryx(query string, args ...interface{}) (*Rows, error) + QueryRowx(query string, args ...interface{}) *Row +} + +// Execer is an interface used by MustExec and LoadFile +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Binder is an interface for something which can bind queries (Tx, DB) +type binder interface { + DriverName() string + Rebind(string) string + BindNamed(string, interface{}) (string, []interface{}, error) +} + +// Ext is a union interface which can bind, query, and exec, used by +// NamedQuery and NamedExec. +type Ext interface { + binder + Queryer + Execer +} + +// Preparer is an interface used by Preparex. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// determine if any of our extensions are unsafe +func isUnsafe(i interface{}) bool { + switch v := i.(type) { + case Row: + return v.unsafe + case *Row: + return v.unsafe + case Rows: + return v.unsafe + case *Rows: + return v.unsafe + case NamedStmt: + return v.Stmt.unsafe + case *NamedStmt: + return v.Stmt.unsafe + case Stmt: + return v.unsafe + case *Stmt: + return v.unsafe + case qStmt: + return v.unsafe + case *qStmt: + return v.unsafe + case DB: + return v.unsafe + case *DB: + return v.unsafe + case Tx: + return v.unsafe + case *Tx: + return v.unsafe + case sql.Rows, *sql.Rows: + return false + default: + return false + } +} + +func mapperFor(i interface{}) *reflectx.Mapper { + switch i := i.(type) { + case DB: + return i.Mapper + case *DB: + return i.Mapper + case Tx: + return i.Mapper + case *Tx: + return i.Mapper + default: + return mapper() + } +} + +var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() +var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// Row is a reimplementation of sql.Row in order to gain access to the underlying +// sql.Rows.Columns() data, necessary for StructScan. +type Row struct { + err error + unsafe bool + rows *sql.Rows + Mapper *reflectx.Mapper +} + +// Scan is a fixed implementation of sql.Row.Scan, which does not discard the +// underlying error from the internal rows object if it exists. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + + // TODO(bradfitz): for now we need to defensively clone all + // []byte that the driver returned (not permitting + // *RawBytes in Rows.Scan), since we're about to close + // the Rows in our defer, when we return from this function. + // the contract with the driver.Next(...) interface is that it + // can return slices into read-only temporary memory that's + // only valid until the next Scan/Close. But the TODO is that + // for a lot of drivers, this copy will be unnecessary. We + // should provide an optional interface for drivers to + // implement to say, "don't worry, the []bytes that I return + // from Next will not be modified again." (for instance, if + // they were obtained from the network anyway) But for now we + // don't care. + defer r.rows.Close() + for _, dp := range dest { + if _, ok := dp.(*sql.RawBytes); ok { + return errors.New("sql: RawBytes isn't allowed on Row.Scan") + } + } + + if !r.rows.Next() { + if err := r.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := r.rows.Scan(dest...) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + if err := r.rows.Close(); err != nil { + return err + } + return nil +} + +// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually +// returned by Row.Scan() +func (r *Row) Columns() ([]string, error) { + if r.err != nil { + return []string{}, r.err + } + return r.rows.Columns() +} + +// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error +func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { + if r.err != nil { + return []*sql.ColumnType{}, r.err + } + return r.rows.ColumnTypes() +} + +// Err returns the error encountered while scanning. +func (r *Row) Err() error { + return r.err +} + +// DB is a wrapper around sql.DB which keeps track of the driverName upon Open, +// used mostly to automatically bind named queries using the right bindvars. +type DB struct { + *sql.DB + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The +// driverName of the original database is required for named query support. +func NewDb(db *sql.DB, driverName string) *DB { + return &DB{DB: db, driverName: driverName, Mapper: mapper()} +} + +// DriverName returns the driverName passed to the Open function for this DB. +func (db *DB) DriverName() string { + return db.driverName +} + +// Open is the same as sql.Open, but returns an *sqlx.DB instead. +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err +} + +// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. +func MustOpen(driverName, dataSourceName string) *DB { + db, err := Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// MapperFunc sets a new mapper for this db using the default sqlx struct tag +// and the provided mapper function. +func (db *DB) MapperFunc(mf func(string) string) { + db.Mapper = reflectx.NewMapperFunc("db", mf) +} + +// Rebind transforms a query from QUESTION to the DB driver's bindvar type. +func (db *DB) Rebind(query string) string { + return Rebind(BindType(db.driverName), query) +} + +// Unsafe returns a version of DB which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its +// safety behavior. +func (db *DB) Unsafe() *DB { + return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} +} + +// BindNamed binds a query using the DB driver's bindvar type. +func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) +} + +// NamedQuery using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(db, query, arg) +} + +// NamedExec using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(db, query, arg) +} + +// Select using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { + return Select(db, dest, query, args...) +} + +// Get using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { + return Get(db, dest, query, args...) +} + +// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +func (db *DB) MustBegin() *Tx { + tx, err := db.Beginx() + if err != nil { + panic(err) + } + return tx +} + +// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. +func (db *DB) Beginx() (*Tx, error) { + tx, err := db.DB.Begin() + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Queryx queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowx queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowx(query string, args ...interface{}) *Row { + rows, err := db.DB.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustExec (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(db, query, args...) +} + +// Preparex returns an sqlx.Stmt instead of a sql.Stmt +func (db *DB) Preparex(query string) (*Stmt, error) { + return Preparex(db, query) +} + +// PrepareNamed returns an sqlx.NamedStmt +func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(db, query) +} + +// Conn is a wrapper around sql.Conn with extra functionality +type Conn struct { + *sql.Conn + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// Tx is an sqlx wrapper around sql.Tx with extra functionality +type Tx struct { + *sql.Tx + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// DriverName returns the driverName used by the DB which began this transaction. +func (tx *Tx) DriverName() string { + return tx.driverName +} + +// Rebind a query within a transaction's bindvar type. +func (tx *Tx) Rebind(query string) string { + return Rebind(BindType(tx.driverName), query) +} + +// Unsafe returns a version of Tx which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (tx *Tx) Unsafe() *Tx { + return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} +} + +// BindNamed binds a query within a transaction's bindvar type. +func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) +} + +// NamedQuery within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(tx, query, arg) +} + +// NamedExec a named query within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(tx, query, arg) +} + +// Select within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { + return Select(tx, dest, query, args...) +} + +// Queryx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// QueryRowx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { + rows, err := tx.Tx.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// Get within a transaction. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { + return Get(tx, dest, query, args...) +} + +// MustExec runs MustExec within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(tx, query, args...) +} + +// Preparex a statement within a transaction. +func (tx *Tx) Preparex(query string) (*Stmt, error) { + return Preparex(tx, query) +} + +// Stmtx returns a version of the prepared statement which runs within a transaction. Provided +// stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) Stmtx(stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} +} + +// NamedStmt returns a version of the prepared statement which runs within a transaction. +func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.Stmtx(stmt.Stmt), + } +} + +// PrepareNamed returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(tx, query) +} + +// Stmt is an sqlx wrapper around sql.Stmt with extra functionality +type Stmt struct { + *sql.Stmt + unsafe bool + Mapper *reflectx.Mapper +} + +// Unsafe returns a version of Stmt which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (s *Stmt) Unsafe() *Stmt { + return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} +} + +// Select using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Select(dest interface{}, args ...interface{}) error { + return Select(&qStmt{s}, dest, "", args...) +} + +// Get using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) Get(dest interface{}, args ...interface{}) error { + return Get(&qStmt{s}, dest, "", args...) +} + +// MustExec (panic) using this statement. Note that the query portion of the error +// output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExec(args ...interface{}) sql.Result { + return MustExec(&qStmt{s}, "", args...) +} + +// QueryRowx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowx(args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowx("", args...) +} + +// Queryx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.Queryx("", args...) +} + +// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by +// implementing those interfaces and ignoring the `query` argument. +type qStmt struct{ *Stmt } + +func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.Query(args...) +} + +func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.Query(args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { + rows, err := q.Stmt.Query(args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.Exec(args...) +} + +// Rows is a wrapper around sql.Rows which caches costly reflect operations +// during a looped StructScan +type Rows struct { + *sql.Rows + unsafe bool + Mapper *reflectx.Mapper + // these fields cache memory use for a rows during iteration w/ structScan + started bool + fields [][]int + values []interface{} +} + +// SliceScan using this Rows. +func (r *Rows) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Rows) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. +// Use this and iterate over Rows manually when the memory load of Select() might be +// prohibitive. *Rows.StructScan caches the reflect work of matching up column +// positions to fields to avoid that overhead per scan, which means it is not safe +// to run StructScan on the same Rows instance with different struct types. +func (r *Rows) StructScan(dest interface{}) error { + v := reflect.ValueOf(dest) + + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + + v = v.Elem() + + if !r.started { + columns, err := r.Columns() + if err != nil { + return err + } + m := r.Mapper + + r.fields = m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(r.fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + r.values = make([]interface{}, len(columns)) + r.started = true + } + + err := fieldsByTraversal(v, r.fields, r.values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + err = r.Scan(r.values...) + if err != nil { + return err + } + return r.Err() +} + +// Connect to a database and verify with a ping. +func Connect(driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + err = db.Ping() + if err != nil { + db.Close() + return nil, err + } + return db, nil +} + +// MustConnect connects to a database and panics on error. +func MustConnect(driverName, dataSourceName string) *DB { + db, err := Connect(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// Preparex prepares a statement. +func Preparex(p Preparer, query string) (*Stmt, error) { + s, err := p.Prepare(query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// Select executes a query using the provided Queryer, and StructScans each row +// into dest, which must be a slice. If the slice elements are scannable, then +// the result set must have only one column. Otherwise, StructScan is used. +// The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { + rows, err := q.Queryx(query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get does a QueryRow using the provided Queryer, and scans the resulting row +// to dest. If dest is scannable, the result must only have one column. Otherwise, +// StructScan is used. Get will return sql.ErrNoRows like row.Scan would. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowx(query, args...) + return r.scanAny(dest, false) +} + +// LoadFile exec's every statement in a file (as a single call to Exec). +// LoadFile may return a nil *sql.Result if errors are encountered locating or +// reading the file at path. LoadFile reads the entire file into memory, so it +// is not suitable for loading large data dumps, but can be useful for initializing +// schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFile(e Execer, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.Exec(string(contents)) + return &res, err +} + +// MustExec execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExec(e Execer, query string, args ...interface{}) sql.Result { + res, err := e.Exec(query, args...) + if err != nil { + panic(err) + } + return res +} + +// SliceScan using this Rows. +func (r *Row) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Row) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +func (r *Row) scanAny(dest interface{}, structOnly bool) error { + if r.err != nil { + return r.err + } + if r.rows == nil { + r.err = sql.ErrNoRows + return r.err + } + defer r.rows.Close() + + v := reflect.ValueOf(dest) + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if v.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + + base := reflectx.Deref(v.Type()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := r.Columns() + if err != nil { + return err + } + + if scannable && len(columns) > 1 { + return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) + } + + if scannable { + return r.Scan(dest) + } + + m := r.Mapper + + fields := m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values := make([]interface{}, len(columns)) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + return r.Scan(values...) +} + +// StructScan a single Row into dest. +func (r *Row) StructScan(dest interface{}) error { + return r.scanAny(dest, true) +} + +// SliceScan a row, returning a []interface{} with values similar to MapScan. +// This function is primarily intended for use where the number of columns +// is not known. Because you can pass an []interface{} directly to Scan, +// it's recommended that you do that as it will not have to allocate new +// slices per row. +func SliceScan(r ColScanner) ([]interface{}, error) { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return []interface{}{}, err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + + if err != nil { + return values, err + } + + for i := range columns { + values[i] = *(values[i].(*interface{})) + } + + return values, r.Err() +} + +// MapScan scans a single Row into the dest map[string]interface{}. +// Use this to get results for SQL that might not be under your control +// (for instance, if you're building an interface for an SQL server that +// executes SQL from input). Please do not use this as a primary interface! +// This will modify the map sent to it in place, so reuse the same map with +// care. Columns which occur more than once in the result will overwrite +// each other! +func MapScan(r ColScanner, dest map[string]interface{}) error { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + if err != nil { + return err + } + + for i, column := range columns { + dest[column] = *(values[i].(*interface{})) + } + + return r.Err() +} + +type rowsi interface { + Close() error + Columns() ([]string, error) + Err() error + Next() bool + Scan(...interface{}) error +} + +// structOnlyError returns an error appropriate for type when a non-scannable +// struct is expected but something else is given +func structOnlyError(t reflect.Type) error { + isStruct := t.Kind() == reflect.Struct + isScanner := reflect.PtrTo(t).Implements(_scannerInterface) + if !isStruct { + return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) + } + if isScanner { + return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) + } + return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) +} + +// scanAll scans all rows into a destination, which must be a slice of any +// type. It resets the slice length to zero before appending each element to +// the slice. If the destination slice type is a Struct, then StructScan will +// be used on each row. If the destination is some other kind of base type, +// then each row must only have one column which can scan into that type. This +// allows you to do something like: +// +// rows, _ := db.Query("select id from people;") +// var ids []int +// scanAll(rows, &ids, false) +// +// and ids will be a list of the id results. I realize that this is a desirable +// interface to expose to users, but for now it will only be exposed via changes +// to `Get` and `Select`. The reason that this has been implemented like this is +// this is the only way to not duplicate reflect work in the new API while +// maintaining backwards compatibility. +func scanAll(rows rowsi, dest interface{}, structOnly bool) error { + var v, vp reflect.Value + + value := reflect.ValueOf(dest) + + // json.Unmarshal returns errors for these + if value.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if value.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + direct := reflect.Indirect(value) + + slice, err := baseType(value.Type(), reflect.Slice) + if err != nil { + return err + } + direct.SetLen(0) + + isPtr := slice.Elem().Kind() == reflect.Ptr + base := reflectx.Deref(slice.Elem()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := rows.Columns() + if err != nil { + return err + } + + // if it's a base type make sure it only has 1 column; if not return an error + if scannable && len(columns) > 1 { + return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) + } + + if !scannable { + var values []interface{} + var m *reflectx.Mapper + + switch rows.(type) { + case *Rows: + m = rows.(*Rows).Mapper + default: + m = mapper() + } + + fields := m.TraversalsByName(base, columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values = make([]interface{}, len(columns)) + + for rows.Next() { + // create a new struct type (which returns PtrTo) and indirect it + vp = reflect.New(base) + v = reflect.Indirect(vp) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + + // scan into the struct field pointers and append to our results + err = rows.Scan(values...) + if err != nil { + return err + } + + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, v)) + } + } + } else { + for rows.Next() { + vp = reflect.New(base) + err = rows.Scan(vp.Interface()) + if err != nil { + return err + } + // append + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, reflect.Indirect(vp))) + } + } + } + + return rows.Err() +} + +// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately +// it doesn't really feel like it's named properly. There is an incongruency +// between this and the way that StructScan (which might better be ScanStruct +// anyway) works on a rows object. + +// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. +// StructScan will scan in the entire rows result, so if you do not want to +// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. +// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. +func StructScan(rows rowsi, dest interface{}) error { + return scanAll(rows, dest, true) + +} + +// reflect helpers + +func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { + t = reflectx.Deref(t) + if t.Kind() != expected { + return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) + } + return t, nil +} + +// fieldsByName fills a values interface with fields from the passed value based +// on the traversals in int. If ptrs is true, return addresses instead of values. +// We write this instead of using FieldsByName to save allocations and map lookups +// when iterating over many rows. Empty traversals will get an interface pointer. +// Because of the necessity of requesting ptrs or values, it's considered a bit too +// specialized for inclusion in reflectx itself. +func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return errors.New("argument not a struct") + } + + for i, traversal := range traversals { + if len(traversal) == 0 { + values[i] = new(interface{}) + continue + } + f := reflectx.FieldByIndexes(v, traversal) + if ptrs { + values[i] = f.Addr().Interface() + } else { + values[i] = f.Interface() + } + } + return nil +} + +func missingFields(transversals [][]int) (field int, err error) { + for i, t := range transversals { + if len(t) == 0 { + return i, errors.New("missing field") + } + } + return 0, nil +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go new file mode 100644 index 000000000..7aa4dd01e --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go @@ -0,0 +1,414 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" +) + +// ConnectContext to a database and verify with a ping. +func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return db, err + } + err = db.PingContext(ctx) + return db, err +} + +// QueryerContext is an interface used by GetContext and SelectContext +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) + QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row +} + +// PreparerContext is an interface used by PreparexContext. +type PreparerContext interface { + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// ExecerContext is an interface used by MustExecContext and LoadFileContext +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// ExtContext is a union interface which can bind, query, and exec, with Context +// used by NamedQueryContext and NamedExecContext. +type ExtContext interface { + binder + QueryerContext + ExecerContext +} + +// SelectContext executes a query using the provided Queryer, and StructScans +// each row into dest, which must be a slice. If the slice elements are +// scannable, then the result set must have only one column. Otherwise, +// StructScan is used. The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + rows, err := q.QueryxContext(ctx, query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// PreparexContext prepares a statement. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { + s, err := p.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// GetContext does a QueryRow using the provided Queryer, and scans the +// resulting row to dest. If dest is scannable, the result must only have one +// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like +// row.Scan would. Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowxContext(ctx, query, args...) + return r.scanAny(dest, false) +} + +// LoadFileContext exec's every statement in a file (as a single call to Exec). +// LoadFileContext may return a nil *sql.Result if errors are encountered +// locating or reading the file at path. LoadFile reads the entire file into +// memory, so it is not suitable for loading large data dumps, but can be useful +// for initializing schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.ExecContext(ctx, string(contents)) + return &res, err +} + +// MustExecContext execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { + res, err := e.ExecContext(ctx, query, args...) + if err != nil { + panic(err) + } + return res +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, db, query) +} + +// NamedQueryContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { + return NamedQueryContext(ctx, db, query, arg) +} + +// NamedExecContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, db, query, arg) +} + +// SelectContext using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, db, dest, query, args...) +} + +// GetContext using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, db, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, db, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.DB.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// MustBeginContext is canceled. +func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { + tx, err := db.BeginTxx(ctx, opts) + if err != nil { + panic(err) + } + return tx +} + +// MustExecContext (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, db, query, args...) +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := db.DB.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Connx returns an *sqlx.Conn instead of an *sql.Conn. +func (db *DB) Connx(ctx context.Context) (*Conn, error) { + conn, err := db.DB.Conn(ctx) + if err != nil { + return nil, err + } + + return &Conn{Conn: conn, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, nil +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (c *Conn) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := c.Conn.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: c.driverName, unsafe: c.unsafe, Mapper: c.Mapper}, err +} + +// SelectContext using this Conn. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, c, dest, query, args...) +} + +// GetContext using this Conn. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (c *Conn) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, c, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (c *Conn) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, c, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := c.Conn.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: c.unsafe, Mapper: c.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := c.Conn.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: c.unsafe, Mapper: c.Mapper} +} + +// Rebind a query within a Conn's bindvar type. +func (c *Conn) Rebind(query string) string { + return Rebind(BindType(c.driverName), query) +} + +// StmtxContext returns a version of the prepared statement which runs within a +// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} +} + +// NamedStmtContext returns a version of the prepared statement which runs +// within a transaction. +func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.StmtxContext(ctx, stmt.Stmt), + } +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, tx, query) +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, tx, query) +} + +// MustExecContext runs MustExecContext within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, tx, query, args...) +} + +// QueryxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// SelectContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, tx, dest, query, args...) +} + +// GetContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, tx, dest, query, args...) +} + +// QueryRowxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.Tx.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// NamedExecContext using this Tx. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, tx, query, arg) +} + +// SelectContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return SelectContext(ctx, &qStmt{s}, dest, "", args...) +} + +// GetContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return GetContext(ctx, &qStmt{s}, dest, "", args...) +} + +// MustExecContext (panic) using this statement. Note that the query portion of +// the error output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { + return MustExecContext(ctx, &qStmt{s}, "", args...) +} + +// QueryRowxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowxContext(ctx, "", args...) +} + +// QueryxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.QueryxContext(ctx, "", args...) +} + +func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.QueryContext(ctx, args...) +} + +func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.QueryContext(ctx, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := q.Stmt.QueryContext(ctx, args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.ExecContext(ctx, args...) +} diff --git a/vendor/github.com/lann/builder/.gitignore b/vendor/github.com/lann/builder/.gitignore new file mode 100644 index 000000000..f54eb28f6 --- /dev/null +++ b/vendor/github.com/lann/builder/.gitignore @@ -0,0 +1,2 @@ +*~ +\#*# diff --git a/vendor/github.com/lann/builder/.travis.yml b/vendor/github.com/lann/builder/.travis.yml new file mode 100644 index 000000000..c8860f69b --- /dev/null +++ b/vendor/github.com/lann/builder/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - '1.8' + - '1.9' + - '1.10' + - tip diff --git a/vendor/github.com/lann/builder/LICENSE b/vendor/github.com/lann/builder/LICENSE new file mode 100644 index 000000000..a109e8051 --- /dev/null +++ b/vendor/github.com/lann/builder/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014-2015 Lann Martin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lann/builder/README.md b/vendor/github.com/lann/builder/README.md new file mode 100644 index 000000000..3b18550bb --- /dev/null +++ b/vendor/github.com/lann/builder/README.md @@ -0,0 +1,68 @@ +# Builder - fluent immutable builders for Go + +[![GoDoc](https://godoc.org/github.com/lann/builder?status.png)](https://godoc.org/github.com/lann/builder) +[![Build Status](https://travis-ci.org/lann/builder.png?branch=master)](https://travis-ci.org/lann/builder) + +Builder was originally written for +[Squirrel](https://github.com/lann/squirrel), a fluent SQL generator. It +is probably the best example of Builder in action. + +Builder helps you write **fluent** DSLs for your libraries with method chaining: + +```go +resp := ReqBuilder. + Url("http://golang.org"). + Header("User-Agent", "Builder"). + Get() +``` + +Builder uses **immutable** persistent data structures +([these](https://github.com/mndrix/ps), specifically) +so that each step in your method chain can be reused: + +```go +build := WordBuilder.AddLetters("Build") +builder := build.AddLetters("er") +building := build.AddLetters("ing") +``` + +Builder makes it easy to **build** structs using the **builder** pattern +(*surprise!*): + +```go +import "github.com/lann/builder" + +type Muppet struct { + Name string + Friends []string +} + +type muppetBuilder builder.Builder + +func (b muppetBuilder) Name(name string) muppetBuilder { + return builder.Set(b, "Name", name).(muppetBuilder) +} + +func (b muppetBuilder) AddFriend(friend string) muppetBuilder { + return builder.Append(b, "Friends", friend).(muppetBuilder) +} + +func (b muppetBuilder) Build() Muppet { + return builder.GetStruct(b).(Muppet) +} + +var MuppetBuilder = builder.Register(muppetBuilder{}, Muppet{}).(muppetBuilder) +``` +```go +MuppetBuilder. + Name("Beaker"). + AddFriend("Dr. Honeydew"). + Build() + +=> Muppet{Name:"Beaker", Friends:[]string{"Dr. Honeydew"}} +``` + +## License + +Builder is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/lann/builder/builder.go b/vendor/github.com/lann/builder/builder.go new file mode 100644 index 000000000..ff621406a --- /dev/null +++ b/vendor/github.com/lann/builder/builder.go @@ -0,0 +1,225 @@ +// Package builder provides a method for writing fluent immutable builders. +package builder + +import ( + "github.com/lann/ps" + "go/ast" + "reflect" +) + +// Builder stores a set of named values. +// +// New types can be declared with underlying type Builder and used with the +// functions in this package. See example. +// +// Instances of Builder should be treated as immutable. It is up to the +// implementor to ensure mutable values set on a Builder are not mutated while +// the Builder is in use. +type Builder struct { + builderMap ps.Map +} + +var ( + EmptyBuilder = Builder{ps.NewMap()} + emptyBuilderValue = reflect.ValueOf(EmptyBuilder) +) + +func getBuilderMap(builder interface{}) ps.Map { + b := convert(builder, Builder{}).(Builder) + + if b.builderMap == nil { + return ps.NewMap() + } + + return b.builderMap +} + +// Set returns a copy of the given builder with a new value set for the given +// name. +// +// Set (and all other functions taking a builder in this package) will panic if +// the given builder's underlying type is not Builder. +func Set(builder interface{}, name string, v interface{}) interface{} { + b := Builder{getBuilderMap(builder).Set(name, v)} + return convert(b, builder) +} + +// Delete returns a copy of the given builder with the given named value unset. +func Delete(builder interface{}, name string) interface{} { + b := Builder{getBuilderMap(builder).Delete(name)} + return convert(b, builder) +} + +// Append returns a copy of the given builder with new value(s) appended to the +// named list. If the value was previously unset or set with Set (even to a e.g. +// slice values), the new value(s) will be appended to an empty list. +func Append(builder interface{}, name string, vs ...interface{}) interface{} { + return Extend(builder, name, vs) +} + +// Extend behaves like Append, except it takes a single slice or array value +// which will be concatenated to the named list. +// +// Unlike a variadic call to Append - which requires a []interface{} value - +// Extend accepts slices or arrays of any type. +// +// Extend will panic if the given value is not a slice, array, or nil. +func Extend(builder interface{}, name string, vs interface{}) interface{} { + if vs == nil { + return builder + } + + maybeList, ok := getBuilderMap(builder).Lookup(name) + + var list ps.List + if ok { + list, ok = maybeList.(ps.List) + } + if !ok { + list = ps.NewList() + } + + forEach(vs, func(v interface{}) { + list = list.Cons(v) + }) + + return Set(builder, name, list) +} + +func listToSlice(list ps.List, arrayType reflect.Type) reflect.Value { + size := list.Size() + slice := reflect.MakeSlice(arrayType, size, size) + for i := size - 1; i >= 0; i-- { + val := reflect.ValueOf(list.Head()) + slice.Index(i).Set(val) + list = list.Tail() + } + return slice +} + +var anyArrayType = reflect.TypeOf([]interface{}{}) + +// Get retrieves a single named value from the given builder. +// If the value has not been set, it returns (nil, false). Otherwise, it will +// return (value, true). +// +// If the named value was last set with Append or Extend, the returned value +// will be a slice. If the given Builder has been registered with Register or +// RegisterType and the given name is an exported field of the registered +// struct, the returned slice will have the same type as that field. Otherwise +// the slice will have type []interface{}. It will panic if the given name is a +// registered struct's exported field and the value set on the Builder is not +// assignable to the field. +func Get(builder interface{}, name string) (interface{}, bool) { + val, ok := getBuilderMap(builder).Lookup(name) + if !ok { + return nil, false + } + + list, isList := val.(ps.List) + if isList { + arrayType := anyArrayType + + if ast.IsExported(name) { + structType := getBuilderStructType(reflect.TypeOf(builder)) + if structType != nil { + field, ok := (*structType).FieldByName(name) + if ok { + arrayType = field.Type + } + } + } + + val = listToSlice(list, arrayType).Interface() + } + + return val, true +} + +// GetMap returns a map[string]interface{} of the values set in the given +// builder. +// +// See notes on Get regarding returned slices. +func GetMap(builder interface{}) map[string]interface{} { + m := getBuilderMap(builder) + structType := getBuilderStructType(reflect.TypeOf(builder)) + + ret := make(map[string]interface{}, m.Size()) + + m.ForEach(func(name string, val ps.Any) { + list, isList := val.(ps.List) + if isList { + arrayType := anyArrayType + + if structType != nil { + field, ok := (*structType).FieldByName(name) + if ok { + arrayType = field.Type + } + } + + val = listToSlice(list, arrayType).Interface() + } + + ret[name] = val + }) + + return ret +} + +// GetStruct builds a new struct from the given registered builder. +// It will return nil if the given builder's type has not been registered with +// Register or RegisterValue. +// +// All values set on the builder with names that start with an uppercase letter +// (i.e. which would be exported if they were identifiers) are assigned to the +// corresponding exported fields of the struct. +// +// GetStruct will panic if any of these "exported" values are not assignable to +// their corresponding struct fields. +func GetStruct(builder interface{}) interface{} { + structVal := newBuilderStruct(reflect.TypeOf(builder)) + if structVal == nil { + return nil + } + return scanStruct(builder, structVal) +} + +// GetStructLike builds a new struct from the given builder with the same type +// as the given struct. +// +// All values set on the builder with names that start with an uppercase letter +// (i.e. which would be exported if they were identifiers) are assigned to the +// corresponding exported fields of the struct. +// +// ScanStruct will panic if any of these "exported" values are not assignable to +// their corresponding struct fields. +func GetStructLike(builder interface{}, strct interface{}) interface{} { + structVal := reflect.New(reflect.TypeOf(strct)).Elem() + return scanStruct(builder, &structVal) +} + +func scanStruct(builder interface{}, structVal *reflect.Value) interface{} { + getBuilderMap(builder).ForEach(func(name string, val ps.Any) { + if ast.IsExported(name) { + field := structVal.FieldByName(name) + + var value reflect.Value + switch v := val.(type) { + case nil: + switch field.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + value = reflect.Zero(field.Type()) + } + // nil is not valid for this Type; Set will panic + case ps.List: + value = listToSlice(v, field.Type()) + default: + value = reflect.ValueOf(val) + } + field.Set(value) + } + }) + + return structVal.Interface() +} diff --git a/vendor/github.com/lann/builder/reflect.go b/vendor/github.com/lann/builder/reflect.go new file mode 100644 index 000000000..3b236e791 --- /dev/null +++ b/vendor/github.com/lann/builder/reflect.go @@ -0,0 +1,24 @@ +package builder + +import "reflect" + +func convert(from interface{}, to interface{}) interface{} { + return reflect. + ValueOf(from). + Convert(reflect.TypeOf(to)). + Interface() +} + +func forEach(s interface{}, f func(interface{})) { + val := reflect.ValueOf(s) + + kind := val.Kind() + if kind != reflect.Slice && kind != reflect.Array { + panic(&reflect.ValueError{Method: "builder.forEach", Kind: kind}) + } + + l := val.Len() + for i := 0; i < l; i++ { + f(val.Index(i).Interface()) + } +} diff --git a/vendor/github.com/lann/builder/registry.go b/vendor/github.com/lann/builder/registry.go new file mode 100644 index 000000000..612845418 --- /dev/null +++ b/vendor/github.com/lann/builder/registry.go @@ -0,0 +1,59 @@ +package builder + +import ( + "reflect" + "sync" +) + +var ( + registry = make(map[reflect.Type]reflect.Type) + registryMux sync.RWMutex +) + +// RegisterType maps the given builderType to a structType. +// This mapping affects the type of slices returned by Get and is required for +// GetStruct to work. +// +// Returns a Value containing an empty instance of the registered builderType. +// +// RegisterType will panic if builderType's underlying type is not Builder or +// if structType's Kind is not Struct. +func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Value { + registryMux.Lock() + defer registryMux.Unlock() + structType.NumField() // Panic if structType is not a struct + registry[builderType] = structType + emptyValue := emptyBuilderValue.Convert(builderType) + return &emptyValue +} + +// Register wraps RegisterType, taking instances instead of Types. +// +// Returns an empty instance of the registered builder type which can be used +// as the initial value for builder expressions. See example. +func Register(builderProto, structProto interface{}) interface{} { + empty := RegisterType( + reflect.TypeOf(builderProto), + reflect.TypeOf(structProto), + ).Interface() + return empty +} + +func getBuilderStructType(builderType reflect.Type) *reflect.Type { + registryMux.RLock() + defer registryMux.RUnlock() + structType, ok := registry[builderType] + if !ok { + return nil + } + return &structType +} + +func newBuilderStruct(builderType reflect.Type) *reflect.Value { + structType := getBuilderStructType(builderType) + if structType == nil { + return nil + } + newStruct := reflect.New(*structType).Elem() + return &newStruct +} diff --git a/vendor/github.com/lann/ps/LICENSE b/vendor/github.com/lann/ps/LICENSE new file mode 100644 index 000000000..69f9ae8d5 --- /dev/null +++ b/vendor/github.com/lann/ps/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2013 Michael Hendricks + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lann/ps/README.md b/vendor/github.com/lann/ps/README.md new file mode 100644 index 000000000..325a5b354 --- /dev/null +++ b/vendor/github.com/lann/ps/README.md @@ -0,0 +1,10 @@ +**This is a stable fork of https://github.com/mndrix/ps; it will not introduce breaking changes.** + +ps +== + +Persistent data structures for Go. See the [full package documentation](http://godoc.org/github.com/lann/ps) + +Install with + + go get github.com/lann/ps diff --git a/vendor/github.com/lann/ps/list.go b/vendor/github.com/lann/ps/list.go new file mode 100644 index 000000000..48a1cebac --- /dev/null +++ b/vendor/github.com/lann/ps/list.go @@ -0,0 +1,93 @@ +package ps + +// List is a persistent list of possibly heterogenous values. +type List interface { + // IsNil returns true if the list is empty + IsNil() bool + + // Cons returns a new list with val as the head + Cons(val Any) List + + // Head returns the first element of the list; + // panics if the list is empty + Head() Any + + // Tail returns a list with all elements except the head; + // panics if the list is empty + Tail() List + + // Size returns the list's length. This takes O(1) time. + Size() int + + // ForEach executes a callback for each value in the list. + ForEach(f func(Any)) + + // Reverse returns a list whose elements are in the opposite order as + // the original list. + Reverse() List +} + +// Immutable (i.e. persistent) list +type list struct { + depth int // the number of nodes after, and including, this one + value Any + tail *list +} + +// An empty list shared by all lists +var nilList = &list{} + +// NewList returns a new, empty list. The result is a singly linked +// list implementation. All lists share an empty tail, so allocating +// empty lists is efficient in time and memory. +func NewList() List { + return nilList +} + +func (self *list) IsNil() bool { + return self == nilList +} + +func (self *list) Size() int { + return self.depth +} + +func (tail *list) Cons(val Any) List { + var xs list + xs.depth = tail.depth + 1 + xs.value = val + xs.tail = tail + return &xs +} + +func (self *list) Head() Any { + if self.IsNil() { + panic("Called Head() on an empty list") + } + + return self.value +} + +func (self *list) Tail() List { + if self.IsNil() { + panic("Called Tail() on an empty list") + } + + return self.tail +} + +// ForEach executes a callback for each value in the list +func (self *list) ForEach(f func(Any)) { + if self.IsNil() { + return + } + f(self.Head()) + self.Tail().ForEach(f) +} + +// Reverse returns a list with elements in opposite order as this list +func (self *list) Reverse() List { + reversed := NewList() + self.ForEach(func(v Any) { reversed = reversed.Cons(v) }) + return reversed +} diff --git a/vendor/github.com/lann/ps/map.go b/vendor/github.com/lann/ps/map.go new file mode 100644 index 000000000..192daec8f --- /dev/null +++ b/vendor/github.com/lann/ps/map.go @@ -0,0 +1,311 @@ +// Fully persistent data structures. A persistent data structure is a data +// structure that always preserves the previous version of itself when +// it is modified. Such data structures are effectively immutable, +// as their operations do not update the structure in-place, but instead +// always yield a new structure. +// +// Persistent +// data structures typically share structure among themselves. This allows +// operations to avoid copying the entire data structure. +package ps + +import ( + "bytes" + "fmt" +) + +// Any is a shorthand for Go's verbose interface{} type. +type Any interface{} + +// A Map associates unique keys (type string) with values (type Any). +type Map interface { + // IsNil returns true if the Map is empty + IsNil() bool + + // Set returns a new map in which key and value are associated. + // If the key didn't exist before, it's created; otherwise, the + // associated value is changed. + // This operation is O(log N) in the number of keys. + Set(key string, value Any) Map + + // Delete returns a new map with the association for key, if any, removed. + // This operation is O(log N) in the number of keys. + Delete(key string) Map + + // Lookup returns the value associated with a key, if any. If the key + // exists, the second return value is true; otherwise, false. + // This operation is O(log N) in the number of keys. + Lookup(key string) (Any, bool) + + // Size returns the number of key value pairs in the map. + // This takes O(1) time. + Size() int + + // ForEach executes a callback on each key value pair in the map. + ForEach(f func(key string, val Any)) + + // Keys returns a slice with all keys in this map. + // This operation is O(N) in the number of keys. + Keys() []string + + String() string +} + +// Immutable (i.e. persistent) associative array +const childCount = 8 +const shiftSize = 3 + +type tree struct { + count int + hash uint64 // hash of the key (used for tree balancing) + key string + value Any + children [childCount]*tree +} + +var nilMap = &tree{} + +// Recursively set nilMap's subtrees to point at itself. +// This eliminates all nil pointers in the map structure. +// All map nodes are created by cloning this structure so +// they avoid the problem too. +func init() { + for i := range nilMap.children { + nilMap.children[i] = nilMap + } +} + +// NewMap allocates a new, persistent map from strings to values of +// any type. +// This is currently implemented as a path-copying binary tree. +func NewMap() Map { + return nilMap +} + +func (self *tree) IsNil() bool { + return self == nilMap +} + +// clone returns an exact duplicate of a tree node +func (self *tree) clone() *tree { + var m tree + m = *self + return &m +} + +// constants for FNV-1a hash algorithm +const ( + offset64 uint64 = 14695981039346656037 + prime64 uint64 = 1099511628211 +) + +// hashKey returns a hash code for a given string +func hashKey(key string) uint64 { + hash := offset64 + for _, codepoint := range key { + hash ^= uint64(codepoint) + hash *= prime64 + } + return hash +} + +// Set returns a new map similar to this one but with key and value +// associated. If the key didn't exist, it's created; otherwise, the +// associated value is changed. +func (self *tree) Set(key string, value Any) Map { + hash := hashKey(key) + return setLowLevel(self, hash, hash, key, value) +} + +func setLowLevel(self *tree, partialHash, hash uint64, key string, value Any) *tree { + if self.IsNil() { // an empty tree is easy + m := self.clone() + m.count = 1 + m.hash = hash + m.key = key + m.value = value + return m + } + + if hash != self.hash { + m := self.clone() + i := partialHash % childCount + m.children[i] = setLowLevel(self.children[i], partialHash>>shiftSize, hash, key, value) + recalculateCount(m) + return m + } + + // replacing a key's previous value + m := self.clone() + m.value = value + return m +} + +// modifies a map by recalculating its key count based on the counts +// of its subtrees +func recalculateCount(m *tree) { + count := 0 + for _, t := range m.children { + count += t.Size() + } + m.count = count + 1 // add one to count ourself +} + +func (m *tree) Delete(key string) Map { + hash := hashKey(key) + newMap, _ := deleteLowLevel(m, hash, hash) + return newMap +} + +func deleteLowLevel(self *tree, partialHash, hash uint64) (*tree, bool) { + // empty trees are easy + if self.IsNil() { + return self, false + } + + if hash != self.hash { + i := partialHash % childCount + child, found := deleteLowLevel(self.children[i], partialHash>>shiftSize, hash) + if !found { + return self, false + } + newMap := self.clone() + newMap.children[i] = child + recalculateCount(newMap) + return newMap, true // ? this wasn't in the original code + } + + // we must delete our own node + if self.isLeaf() { // we have no children + return nilMap, true + } + /* + if self.subtreeCount() == 1 { // only one subtree + for _, t := range self.children { + if t != nilMap { + return t, true + } + } + panic("Tree with 1 subtree actually had no subtrees") + } + */ + + // find a node to replace us + i := -1 + size := -1 + for j, t := range self.children { + if t.Size() > size { + i = j + size = t.Size() + } + } + + // make chosen leaf smaller + replacement, child := self.children[i].deleteLeftmost() + newMap := replacement.clone() + for j := range self.children { + if j == i { + newMap.children[j] = child + } else { + newMap.children[j] = self.children[j] + } + } + recalculateCount(newMap) + return newMap, true +} + +// delete the leftmost node in a tree returning the node that +// was deleted and the tree left over after its deletion +func (m *tree) deleteLeftmost() (*tree, *tree) { + if m.isLeaf() { + return m, nilMap + } + + for i, t := range m.children { + if t != nilMap { + deleted, child := t.deleteLeftmost() + newMap := m.clone() + newMap.children[i] = child + recalculateCount(newMap) + return deleted, newMap + } + } + panic("Tree isn't a leaf but also had no children. How does that happen?") +} + +// isLeaf returns true if this is a leaf node +func (m *tree) isLeaf() bool { + return m.Size() == 1 +} + +// returns the number of child subtrees we have +func (m *tree) subtreeCount() int { + count := 0 + for _, t := range m.children { + if t != nilMap { + count++ + } + } + return count +} + +func (m *tree) Lookup(key string) (Any, bool) { + hash := hashKey(key) + return lookupLowLevel(m, hash, hash) +} + +func lookupLowLevel(self *tree, partialHash, hash uint64) (Any, bool) { + if self.IsNil() { // an empty tree is easy + return nil, false + } + + if hash != self.hash { + i := partialHash % childCount + return lookupLowLevel(self.children[i], partialHash>>shiftSize, hash) + } + + // we found it + return self.value, true +} + +func (m *tree) Size() int { + return m.count +} + +func (m *tree) ForEach(f func(key string, val Any)) { + if m.IsNil() { + return + } + + // ourself + f(m.key, m.value) + + // children + for _, t := range m.children { + if t != nilMap { + t.ForEach(f) + } + } +} + +func (m *tree) Keys() []string { + keys := make([]string, m.Size()) + i := 0 + m.ForEach(func(k string, v Any) { + keys[i] = k + i++ + }) + return keys +} + +// make it easier to display maps for debugging +func (m *tree) String() string { + keys := m.Keys() + buf := bytes.NewBufferString("{") + for _, key := range keys { + val, _ := m.Lookup(key) + fmt.Fprintf(buf, "%s: %s, ", key, val) + } + fmt.Fprintf(buf, "}\n") + return buf.String() +} diff --git a/vendor/github.com/lann/ps/profile.sh b/vendor/github.com/lann/ps/profile.sh new file mode 100644 index 000000000..f03df05a5 --- /dev/null +++ b/vendor/github.com/lann/ps/profile.sh @@ -0,0 +1,3 @@ +#!/bin/sh +go test -c +./ps.test -test.run=none -test.bench=$2 -test.$1profile=$1.profile diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 000000000..3243952a4 --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,6 @@ +.db +*.test +*~ +*.swp +.idea +.vscode \ No newline at end of file diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 000000000..5773904a3 --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 000000000..126ee5d35 --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,36 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) + +## Install + + go get github.com/lib/pq + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support +* GSS (Kerberos) auth + +## Tests + +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. + +## Status + +This package is currently in maintenance mode, which means: +1. It generally does not accept new features. +2. It does accept bug fixes and version compatability changes provided by the community. +3. Maintainers usually do not resolve reported issues. +4. Community members are encouraged to help each other with reported issues. + +For users that require new features or reliable resolution of reported bugs, we recommend using [pgx](https://github.com/jackc/pgx) which is under active development. diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md new file mode 100644 index 000000000..f05021115 --- /dev/null +++ b/vendor/github.com/lib/pq/TESTS.md @@ -0,0 +1,33 @@ +# Tests + +## Running Tests + +`go test` is used for testing. A running PostgreSQL +server is required, with the ability to log in. The +database to connect to test with is "pqgotest," on +"localhost" but these can be overridden using [environment +variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). + +Example: + + PGHOST=/run/postgresql go test + +## Benchmarks + +A benchmark suite can be run as part of the tests: + + go test -bench . + +## Example setup (Docker) + +Run a postgres container: + +``` +docker run --expose 5432:5432 postgres +``` + +Run tests: + +``` +PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test +``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 000000000..39c8f7e2e --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,895 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []float32: + return (*Float32Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []int32: + return (*Int32Array)(&a) + case []string: + return (*StringArray)(&a) + case [][]byte: + return (*ByteaArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]float32: + return (*Float32Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]int32: + return (*Int32Array)(a) + case *[]string: + return (*StringArray)(a) + case *[][]byte: + return (*ByteaArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// Float32Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float32Array []float32 + +// Scan implements the sql.Scanner interface. +func (a *Float32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float32Array", src) +} + +func (a *Float32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float32Array, len(elems)) + for i, v := range elems { + var x float64 + if x, err = strconv.ParseFloat(string(v), 32); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = float32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// Int32Array represents a one-dimensional array of the PostgreSQL integer types. +type Int32Array []int32 + +// Scan implements the sql.Scanner interface. +func (a *Int32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int32Array", src) +} + +func (a *Int32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int32Array, len(elems)) + for i, v := range elems { + x, err := strconv.ParseInt(string(v), 10, 32) + if err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = int32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, int64(a[0]), 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, int64(a[i]), 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 000000000..4b0a0a8f7 --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(append(b.buf, s...), '\000') +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 000000000..da4ff9de6 --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,2112 @@ +package pq + +import ( + "bufio" + "bytes" + "context" + "crypto/md5" + "crypto/sha256" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "github.com/lib/pq/oid" + "github.com/lib/pq/scram" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyUnknownOwnership = errors.New("pq: Could not get owner information for private key, may not be properly protected") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key has world access. Permissions should be u=rw,g=r (0640) if owned by root, or u=rw (0600), or less") + + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") +) + +// Compile time validation that our types implement the expected interfaces +var ( + _ driver.Driver = Driver{} +) + +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d Driver) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &Driver{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +// DialerContext is the context-aware dialer interface. +type DialerContext interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +type defaultDialer struct { + d net.Dialer +} + +func (d defaultDialer) Dial(network, address string) (net.Conn, error) { + return d.d.Dial(network, address) +} +func (d defaultDialer) DialTimeout( + network, address string, timeout time.Duration, +) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return d.DialContext(ctx, network, address) +} +func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return d.d.DialContext(ctx, network, address) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If an error is set, this connection is bad and all public-facing + // functions should return the appropriate error by calling get() + // (ErrBadConn) or getForNext(). + err syncErr + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool + + // If not nil, notices will be synchronously sent here + noticeHandler func(*Error) + + // If not nil, notifications will be synchronously sent here + notificationHandler func(*Notification) + + // GSSAPI context + gss GSS +} + +type syncErr struct { + err error + sync.Mutex +} + +// Return ErrBadConn if connection is bad. +func (e *syncErr) get() error { + e.Lock() + defer e.Unlock() + if e.err != nil { + return driver.ErrBadConn + } + return nil +} + +// Return the error set on the connection. Currently only used by rows.Next. +func (e *syncErr) getForNext() error { + e.Lock() + defer e.Unlock() + return e.err +} + +// Set error, only if it isn't set yet. +func (e *syncErr) set(err error) { + if err == nil { + panic("attempt to set nil err") + } + e.Lock() + defer e.Unlock() + if e.err == nil { + e.err = err + } +} + +// Handle driver-side settings in parsed connection string. +func (cn *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value, ok := o[key]; ok { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) + if err != nil { + return err + } + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + // From: https://github.com/tg/pgpass/blob/master/reader.go + for scanner.Scan() { + if scanText(scanner.Text(), o) { + break + } + } +} + +// GetFields is a helper function for scanText. +func getFields(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) +} + +// ScanText assists HandlePgpass in it's objective. +func scanText(line string, o values) bool { + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + if len(line) == 0 || line[0] == '#' { + return false + } + split := getFields(line) + if len(split) != 5 { + return false + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return true + } + return false +} + +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b + return &writeBuf{ + buf: cn.scratch[:5], + pos: 1, + } +} + +// Open opens a new connection to the database. dsn is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(dsn string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, dsn) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { + c, err := NewConnector(dsn) + if err != nil { + return nil, err + } + c.Dialer(d) + return c.open(context.Background()) +} + +func (c *Connector) open(ctx context.Context) (cn *conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + // Create a new values map (copy). This makes it so maps in different + // connections do not reference the same underlying data structure, so it + // is safe for multiple connections to concurrently write to their opts. + o := make(values) + for k, v := range c.opts { + o[k] = v + } + + cn = &conn{ + opts: o, + dialer: c.dialer, + } + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(ctx, c.dialer, o) + if err != nil { + return nil, err + } + + err = cn.ssl(o) + if err != nil { + if cn.c != nil { + cn.c.Close() + } + return nil, err + } + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + panicking = false + return cn, err +} + +func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { + network, address := network(o) + + // Zero or not specified means wait indefinitely. + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + var conn net.Conn + if dctx, ok := d.(DialerContext); ok { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + conn, err = dctx.DialContext(ctx, network, address) + } else { + conn, err = d.DialTimeout(network, address, duration) + } + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + if dctx, ok := d.(DialerContext); ok { + return dctx.DialContext(ctx, network, address) + } + return d.Dial(network, address) +} + +func network(o values) (string, string) { + host := o["host"] + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o["port"]) +} + +type values map[string]string + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o[string(keyRunes)] = "" + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o[string(keyRunes)] = string(valRunes) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.err.set(driver.ErrBadConn) + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN" + mode) + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.err.set(driver.ErrBadConn) + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.err.set(driver.ErrBadConn) + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + +func (cn *conn) Commit() (err error) { + defer cn.closeTxn() + if err := cn.err.get(); err != nil { + return err + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.err.set(driver.ErrBadConn) + } + return err + } + if commandTag != "COMMIT" { + cn.err.set(driver.ErrBadConn) + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + defer cn.closeTxn() + if err := cn.err.get(); err != nil { + return err + } + defer cn.errRecover(&err) + return cn.rollback() +} + +func (cn *conn) rollback() (err error) { + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.err.set(driver.ErrBadConn) + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + // done + return + case 'E': + err = parseError(r) + case 'I': + res = emptyRows + case 'T', 'D': + // ignore any results + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' { + res.result, res.tag = cn.parseComplete(r.string()) + if res.colNames != nil { + return + } + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.rowsHeader = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown response for simple query: %q", t) + } + } +} + +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats( + colTyps []fieldDesc, forceText bool, +) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, t := range colTyps { + switch t.OID { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + fallthrough + case oid.T_uuid: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + // Skip cn.bad return here because we always want to close a connection. + defer cn.errRecover(&err) + + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + return cn.sendSimpleMessage('X') +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + if cn.inCopy { + return nil, errCopyInProgress + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.rowsHeader = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + rowsHeader: st.rowsHeader, + }, nil +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err +} + +type safeRetryError struct { + Err error +} + +func (se *safeRetryError) Error() string { + return se.Err.Error() +} + +func (cn *conn) send(m *writeBuf) { + n, err := cn.c.Write(m.wrap()) + if err != nil { + if n == 0 { + err = &safeRetryError{Err: err} + } + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.err.set(driver.ErrBadConn) + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + switch t { + case 'E': + panic(parseError(r)) + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err + } + + if upgrade == nil { + // Nothing to do + return nil + } + + w := cn.writeBuf(0) + w.int32(80877103) + if err = cn.sendStartupPacket(w); err != nil { + return err + } + + b := cn.scratch[:1] + _, err = io.ReadFull(cn.c, b) + if err != nil { + return err + } + + if b[0] != 'S' { + return ErrSSLNotSupported + } + + cn.c, err = upgrade(cn.c) + return err +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert", "sslinline", "sslsni": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + case "krbsrvname": + return true + case "krbspn": + return true + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } + + for { + t, r := cn.recv() + switch t { + case 'K': + cn.processBackendKeyData(r) + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o["password"]) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 7: // GSSAPI, startup + if newGss == nil { + errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") + } + cli, err := newGss() + if err != nil { + errorf("kerberos error: %s", err.Error()) + } + + var token []byte + + if spn, ok := o["krbspn"]; ok { + // Use the supplied SPN if provided.. + token, err = cli.GetInitTokenFromSpn(spn) + } else { + // Allow the kerberos service name to be overridden + service := "postgres" + if val, ok := o["krbsrvname"]; ok { + service = val + } + + token, err = cli.GetInitToken(o["host"], service) + } + + if err != nil { + errorf("failed to get Kerberos ticket: %q", err) + } + + w := cn.writeBuf('p') + w.bytes(token) + cn.send(w) + + // Store for GSSAPI continue message + cn.gss = cli + + case 8: // GSSAPI continue + + if cn.gss == nil { + errorf("GSSAPI protocol error") + } + + b := []byte(*r) + + done, tokOut, err := cn.gss.Continue(b) + if err == nil && !done { + w := cn.writeBuf('p') + w.bytes(tokOut) + cn.send(w) + } + + // Errors fall through and read the more detailed message + // from the server.. + + case 10: + sc := scram.NewClient(sha256.New, o["user"], o["password"]) + sc.Step(nil) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + scOut := sc.Out() + + w := cn.writeBuf('p') + w.string("SCRAM-SHA-256") + w.int32(len(scOut)) + w.bytes(scOut) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 11 { + errorf("unexpected authentication response: %q", t) + } + + nextStep := r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + scOut = sc.Out() + w = cn.writeBuf('p') + w.bytes(scOut) + cn.send(w) + + t, r = cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 12 { + errorf("unexpected authentication response: %q", t) + } + + nextStep = r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText = []byte{0, 0} + +type stmt struct { + cn *conn + name string + rowsHeader + colFmtData []byte + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if err := st.cn.err.get(); err != nil { + return err + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.err.set(driver.ErrBadConn) + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.err.set(driver.ErrBadConn) + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + return st.query(v) +} + +func (st *stmt) query(v []driver.Value) (r *rows, err error) { + if err := st.cn.err.get(); err != nil { + return nil, err + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + rowsHeader: st.rowsHeader, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if err := st.cn.err.get(); err != nil { + return nil, err + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.err.set(driver.ErrBadConn) + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rowsHeader struct { + colNames []string + colTyps []fieldDesc + colFmts []format +} + +type rows struct { + cn *conn + finish func() + rowsHeader + done bool + rb readBuf + result driver.Result + tag string + + next *rowsHeader +} + +func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if err := conn.err.getForNext(); err != nil { + return err + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.err.set(driver.ErrBadConn) + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) + } + return + case 'T': + next := parsePortalRowDescribe(&rs.rb) + rs.next = &next + return io.EOF + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +func (rs *rows) HasNextResultSet() bool { + hasNext := rs.next != nil && !rs.done + return hasNext +} + +func (rs *rows) NextResultSet() error { + if rs.next == nil { + return io.EOF + } + rs.rowsHeader = *rs.next + rs.next = nil + return nil +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +// BufferQuoteIdentifier satisfies the same purpose as QuoteIdentifier, but backed by a +// byte buffer. +func BufferQuoteIdentifier(name string, buffer *bytes.Buffer) { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + buffer.WriteRune('"') + buffer.WriteString(strings.Replace(name, `"`, `""`, -1)) + buffer.WriteRune('"') +} + +// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal +// to DDL and other statements that do not accept parameters) to be used as part +// of an SQL statement. For example: +// +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// +// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be +// replaced by two backslashes (i.e. "\\") and the C-style escape identifier +// that PostgreSQL provides ('E') will be prepended to the string. +func QuoteLiteral(literal string) string { + // This follows the PostgreSQL internal algorithm for handling quoted literals + // from libpq, which can be found in the "PQEscapeStringInternal" function, + // which is found in the libpq/fe-exec.c source file: + // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c + // + // substitute any single-quotes (') with two single-quotes ('') + literal = strings.Replace(literal, `'`, `''`, -1) + // determine if the string has any backslashes (\) in it. + // if it does, replace any backslashes (\) with two backslashes (\\) + // then, we need to wrap the entire string with a PostgreSQL + // C-style escape. Per how "PQEscapeStringInternal" handles this case, we + // also add a space before the "E" + if strings.Contains(literal, `\`) { + literal = strings.Replace(literal, `\`, `\\`, -1) + literal = ` E'` + literal + `'` + } else { + // otherwise, we can just wrap the literal with a pair of single quotes + literal = `'` + literal + `'` + } + return literal +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (cn *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + _, err = fmt.Sscanf(r.string(), "%d.%d", &major1, &major2) + if err == nil { + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + } + + case "TimeZone": + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + cn.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() ( + paramTyps []oid.Oid, + colNames []string, + colTyps []fieldDesc, +) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() rowsHeader { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return rowsHeader{} + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse( + protocolState string, +) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } + // ignore any results + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) rowsHeader { + n := r.int16() + colNames := make([]string, n) + colFmts := make([]format, n) + colTyps := make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + colFmts[i] = format(r.int16()) + } + return rowsHeader{ + colNames: colNames, + colFmts: colFmts, + colTyps: colTyps, + } +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGSSLSNI": + accrue("sslsni") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} + +// The database/sql/driver package says: +// All Conn implementations should implement the following interfaces: Pinger, SessionResetter, and Validator. +var _ driver.Pinger = &conn{} +var _ driver.SessionResetter = &conn{} + +func (cn *conn) ResetSession(ctx context.Context) error { + // Ensure bad connections are reported: From database/sql/driver: + // If a connection is never returned to the connection pool but immediately reused, then + // ResetSession is called prior to reuse but IsValid is not called. + return cn.err.get() +} + +func (cn *conn) IsValid() bool { + return cn.err.get() == nil +} diff --git a/vendor/github.com/lib/pq/conn_go115.go b/vendor/github.com/lib/pq/conn_go115.go new file mode 100644 index 000000000..f4ef030f9 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go115.go @@ -0,0 +1,8 @@ +//go:build go1.15 +// +build go1.15 + +package pq + +import "database/sql/driver" + +var _ driver.Validator = &conn{} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 000000000..63d4ca6aa --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,247 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "time" +) + +const ( + watchCancelDialContextTimeout = time.Second * 10 +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnPrepareContext" interface +func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + return cn.Prepare(query) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) Ping(ctx context.Context) error { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + rows, err := cn.simpleQuery(";") + if err != nil { + return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger + } + rows.Close() + return nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}, 1) + go func() { + select { + case <-done: + select { + case finished <- struct{}{}: + default: + // We raced with the finish func, let the next query handle this with the + // context. + return + } + + // Set the connection state to bad so it does not get reused. + cn.err.set(ctx.Err()) + + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) + defer cancel() + + _ = cn.cancel(ctxCancel) + case <-finished: + } + }() + return func() { + select { + case <-finished: + cn.err.set(ctx.Err()) + cn.Close() + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel(ctx context.Context) error { + // Create a new values map (copy). This makes sure the connection created + // in this method cannot write to the same underlying data, which could + // cause a concurrent map write panic. This is necessary because cancel + // is called from a goroutine in watchCancel. + o := make(values) + for k, v := range cn.opts { + o[k] = v + } + + c, err := dial(ctx, cn.dialer, o) + if err != nil { + return err + } + defer c.Close() + + { + can := conn{ + c: c, + } + err = can.ssl(o) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} + +// Implement the "StmtQueryContext" interface +func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := st.watchCancel(ctx) + r, err := st.query(list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "StmtExecContext" interface +func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := st.watchCancel(ctx); finish != nil { + defer finish() + } + + return st.Exec(list) +} + +// watchCancel is implemented on stmt in order to not mark the parent conn as bad +func (st *stmt) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) + defer cancel() + + _ = st.cancel(ctxCancel) + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (st *stmt) cancel(ctx context.Context) error { + return st.cn.cancel(ctx) +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go new file mode 100644 index 000000000..1145e1225 --- /dev/null +++ b/vendor/github.com/lib/pq/connector.go @@ -0,0 +1,120 @@ +package pq + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" +) + +// Connector represents a fixed configuration for the pq driver with a given +// name. Connector satisfies the database/sql/driver Connector interface and +// can be used to create any number of DB Conn's via the database/sql OpenDB +// function. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +type Connector struct { + opts values + dialer Dialer +} + +// Connect returns a connection to the database using the fixed configuration +// of this Connector. Context is not used. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + return c.open(ctx) +} + +// Dialer allows change the dialer used to open connections. +func (c *Connector) Dialer(dialer Dialer) { + c.dialer = dialer +} + +// Driver returns the underlying driver of this Connector. +func (c *Connector) Driver() driver.Driver { + return &Driver{} +} + +// NewConnector returns a connector for the pq driver in a fixed configuration +// with the given dsn. The returned connector can be used to create any number +// of equivalent Conn's. The returned connector is intended to be used with +// database/sql.OpenDB. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +func NewConnector(dsn string) (*Connector, error) { + var err error + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + dsn, err = ParseURL(dsn) + if err != nil { + return nil, err + } + } + + if err := parseOpts(dsn, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + // SSL is not necessary or supported over UNIX domain sockets + if network, _ := network(o); network == "unix" { + o["sslmode"] = "disable" + } + + return &Connector{opts: o, dialer: defaultDialer{}}, nil +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 000000000..a8f16b2b2 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,348 @@ +package pq + +import ( + "bytes" + "context" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + buffer := bytes.NewBufferString("COPY ") + BufferQuoteIdentifier(table, buffer) + buffer.WriteString(" (") + makeStmt(buffer, columns...) + return buffer.String() +} + +// MakeStmt makes the stmt string for CopyIn and CopyInSchema. +func makeStmt(buffer *bytes.Buffer, columns ...string) { + //s := bytes.NewBufferString() + for i, col := range columns { + if i != 0 { + buffer.WriteString(", ") + } + BufferQuoteIdentifier(col, buffer) + } + buffer.WriteString(") FROM STDIN") +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + buffer := bytes.NewBufferString("COPY ") + BufferQuoteIdentifier(schema, buffer) + buffer.WriteRune('.') + BufferQuoteIdentifier(table, buffer) + buffer.WriteString(" (") + makeStmt(buffer, columns...) + return buffer.String() +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + mu struct { + sync.Mutex + err error + driver.Result + } +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + ci.setBad(driver.ErrBadConn) + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad(driver.ErrBadConn) + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad(driver.ErrBadConn) + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.setBad(driver.ErrBadConn) + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + res, _ := ci.cn.parseComplete(r.string()) + ci.setResult(res) + case 'N': + if n := ci.cn.noticeHandler; n != nil { + n(parseError(&r)) + } + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.setBad(driver.ErrBadConn) + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) setBad(err error) { + ci.cn.err.set(err) +} + +func (ci *copyin) getBad() error { + return ci.cn.err.get() +} + +func (ci *copyin) err() error { + ci.mu.Lock() + err := ci.mu.err + ci.mu.Unlock() + return err +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.mu.Lock() + if ci.mu.err == nil { + ci.mu.err = err + } + ci.mu.Unlock() +} + +func (ci *copyin) setResult(result driver.Result) { + ci.mu.Lock() + ci.mu.Result = result + ci.mu.Unlock() +} + +func (ci *copyin) getResult() driver.Result { + ci.mu.Lock() + result := ci.mu.Result + ci.mu.Unlock() + if result == nil { + return driver.RowsAffected(0) + } + return result +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if err := ci.getBad(); err != nil { + return nil, err + } + defer ci.cn.errRecover(&err) + + if err := ci.err(); err != nil { + return nil, err + } + + if len(v) == 0 { + if err := ci.Close(); err != nil { + return driver.RowsAffected(0), err + } + + return ci.getResult(), nil + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +// CopyData inserts a raw string into the COPY stream. The insert is +// asynchronous and CopyData can return errors from previous CopyData calls to +// the same COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if finish := ci.cn.watchCancel(ctx); finish != nil { + defer finish() + } + + if err := ci.getBad(); err != nil { + return nil, err + } + defer ci.cn.errRecover(&err) + + if err := ci.err(); err != nil { + return nil, err + } + + ci.buffer = append(ci.buffer, []byte(line)...) + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if err := ci.getBad(); err != nil { + return err + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + ci.cn.inCopy = false + + if err := ci.err(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 000000000..b57184801 --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,268 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid'" + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + + +Queries + + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + +Errors + + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +https://godoc.org/github.com/lib/pq/example/listen. + + +Kerberos Support + + +If you need support for Kerberos authentication, add the following to your main +package: + + import "github.com/lib/pq/auth/kerberos" + + func init() { + pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) + } + +This package is in a separate module so that users who don't need Kerberos +don't have to download unnecessary dependencies. + +When imported, additional connection string parameters are supported: + + * krbsrvname - GSS (Kerberos) service name when constructing the + SPN (default is `postgres`). This will be combined with the host + to form the full SPN: `krbsrvname/host`. + * krbspn - GSS (Kerberos) SPN. This takes priority over + `krbsrvname` if present. +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 000000000..bffe6096a --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,632 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + switch f { + case formatBinary: + return binaryDecode(parameterStatus, s, typ) + case formatText: + return textDecode(parameterStatus, s, typ) + default: + panic("not reached") + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b + + default: + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) + case oid.T_bytea: + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + // We always use 64 bit parsing, regardless of whether the input text is for + // a float4 or float8, because clients expect float64s for all float datatypes + // and returning a 32-bit parsed float64 produces lossy results. + f, err := strconv.ParseFloat(string(s), 64) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // Check for a minute and second offset in the timezone. + if typ == oid.T_timestamptz || typ == oid.T_timetz { + for i := 3; i <= 6; i += 3 { + if str[len(str)-i] == ':' { + f += ":00" + continue + } + break + } + } + + // Special case for 24:00 time. + // Unfortunately, golang does not parse 24:00 as a proper time. + // In this case, we want to try "round to the next day", to differentiate. + // As such, we find if the 24:00 time matches at the beginning; if so, + // we default it back to 00:00 but add a day later. + var is2400Time bool + switch typ { + case oid.T_timetz, oid.T_time: + if matches := time2400Regex.FindStringSubmatch(str); matches != nil { + // Concatenate timezone information at the back. + str = "00:00:00" + str[len(matches[1]):] + is2400Time = true + } + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + if is2400Time { + t = t.Add(24 * time.Hour) + } + return t +} + +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) + if err != nil { + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := p.mustAtoi(str, 0, monSep) + daySep := monSep + 3 + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) + timeSep := daySep + 3 + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + + var hour, minute, second int + if len(str) > minLen { + p.expect(str, ' ', timeSep) + minSep := timeSep + 3 + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) + secSep := minSep + 3 + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) + secEnd := secSep + 3 + second = p.mustAtoi(str, secSep+1, secEnd) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx] == '.' { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+Z ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { + // time zone separator is always '-' or '+' or 'Z' (UTC is +00) + var tzSign int + switch c := str[tzStart]; c { + case '-': + tzSign = -1 + case '+': + tzSign = +1 + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) + remainderIdx += 3 + var tzMin, tzSec int + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } else if tzStart < len(str) && str[tzStart] == 'Z' { + // time zone Z separator indicates UTC is +00 + remainderIdx += 1 + } + + var isoYear int + + if isBC { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t, p.err +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) []byte { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) + + _, offset := t.Zone() + offset %= 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte, err error) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + return nil, err + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + return nil, fmt.Errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseUint(string(s[1:4]), 8, 8) + if err != nil { + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result, nil +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 000000000..f67c5a5fa --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,523 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// SQLState returns the SQLState of the error. +func (err *Error) SQLState() string { + return string(err.Code) +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err *Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (cn *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + cn.err.set(driver.ErrBadConn) + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + cn.err.set(driver.ErrBadConn) + *err = v + case *safeRetryError: + cn.err.set(driver.ErrBadConn) + *err = driver.ErrBadConn + case error: + if v == io.EOF || v.Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + cn.err.set(driver.ErrBadConn) + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + cn.err.set(driver.ErrBadConn) + } +} diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go new file mode 100644 index 000000000..408ec01f9 --- /dev/null +++ b/vendor/github.com/lib/pq/krb.go @@ -0,0 +1,27 @@ +package pq + +// NewGSSFunc creates a GSS authentication provider, for use with +// RegisterGSSProvider. +type NewGSSFunc func() (GSS, error) + +var newGss NewGSSFunc + +// RegisterGSSProvider registers a GSS authentication provider. For example, if +// you need to use Kerberos to authenticate with your server, add this to your +// main package: +// +// import "github.com/lib/pq/auth/kerberos" +// +// func init() { +// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) +// } +func RegisterGSSProvider(newGssArg NewGSSFunc) { + newGss = newGssArg +} + +// GSS provides GSSAPI authentication (e.g., Kerberos). +type GSS interface { + GetInitToken(host string, service string) ([]byte, error) + GetInitTokenFromSpn(spn string) ([]byte, error) + Continue(inToken []byte) (done bool, outToken []byte, err error) +} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go new file mode 100644 index 000000000..70ad122a7 --- /dev/null +++ b/vendor/github.com/lib/pq/notice.go @@ -0,0 +1,72 @@ +//go:build go1.10 +// +build go1.10 + +package pq + +import ( + "context" + "database/sql/driver" +) + +// NoticeHandler returns the notice handler on the given connection, if any. A +// runtime panic occurs if c is not a pq connection. This is rarely used +// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. +func NoticeHandler(c driver.Conn) func(*Error) { + return c.(*conn).noticeHandler +} + +// SetNoticeHandler sets the given notice handler on the given connection. A +// runtime panic occurs if c is not a pq connection. A nil handler may be used +// to unset it. This is rarely used directly, use ConnectorNoticeHandler and +// ConnectorWithNoticeHandler instead. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNoticeHandler(c driver.Conn, handler func(*Error)) { + c.(*conn).noticeHandler = handler +} + +// NoticeHandlerConnector wraps a regular connector and sets a notice handler +// on it. +type NoticeHandlerConnector struct { + driver.Connector + noticeHandler func(*Error) +} + +// Connect calls the underlying connector's connect method and then sets the +// notice handler. +func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNoticeHandler(c, n.noticeHandler) + } + return c, err +} + +// ConnectorNoticeHandler returns the currently set notice handler, if any. If +// the given connector is not a result of ConnectorWithNoticeHandler, nil is +// returned. +func ConnectorNoticeHandler(c driver.Connector) func(*Error) { + if c, ok := c.(*NoticeHandlerConnector); ok { + return c.noticeHandler + } + return nil +} + +// ConnectorWithNoticeHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notice +// handler. A nil notice handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { + if c, ok := c.(*NoticeHandlerConnector); ok { + c.noticeHandler = handler + return c + } + return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 000000000..5c421fdb8 --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,858 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +// SetNotificationHandler sets the given notification handler on the given +// connection. A runtime panic occurs if c is not a pq connection. A nil handler +// may be used to unset it. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { + c.(*conn).notificationHandler = handler +} + +// NotificationHandlerConnector wraps a regular connector and sets a notification handler +// on it. +type NotificationHandlerConnector struct { + driver.Connector + notificationHandler func(*Notification) +} + +// Connect calls the underlying connector's connect method and then sets the +// notification handler. +func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNotificationHandler(c, n.notificationHandler) + } + return c, err +} + +// ConnectorNotificationHandler returns the currently set notification handler, if any. If +// the given connector is not a result of ConnectorWithNotificationHandler, nil is +// returned. +func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { + if c, ok := c.(*NotificationHandlerConnector); ok { + return c.notificationHandler + } + return nil +} + +// ConnectorWithNotificationHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notification +// handler. A nil notification handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { + if c, ok := c.(*NotificationHandlerConnector); ok { + c.notificationHandler = handler + return c + } + return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// NewListenerConn creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: c, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'S': + // ignore + case 'N': + if n := l.cn.noticeHandler; n != nil { + n(parseError(r)) + } + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +// Close closes the connection. +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +// ListenerEventType is an enumeration of listener event types. +type ListenerEventType int + +const ( + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. + ListenerEventConnected ListenerEventType = iota + + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. + ListenerEventDisconnected + + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. + ListenerEventConnectionAttemptFailed +) + +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + dialer Dialer + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + dialer: d, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func(notificationChan <-chan *Notification) { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }(notificationChan) + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + // Unblock calls to Listen() + l.reconnectCond.Broadcast() + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(time.Until(nextReconnect)) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 000000000..caaede248 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 000000000..ecc84c2c8 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,343 @@ +// Code generated by gen.go. DO NOT EDIT. + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_index_am_handler Oid = 325 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 +) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 000000000..c6aa5b9a3 --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go new file mode 100644 index 000000000..477216b60 --- /dev/null +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -0,0 +1,264 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-256, for example, use: +// +// client := scram.NewClient(sha256.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that occurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 16 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 000000000..36b61ba45 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,204 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" + "strings" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + // Set Server Name Indication (SNI), if enabled by connection parameters. + // By default SNI is on, any value which is not starting with "1" disables + // SNI -- that is the same check vanilla libpq uses. + if sslsni := o["sslsni"]; sslsni == "" || strings.HasPrefix(sslsni, "1") { + // RFC 6066 asks to not set SNI if the host is a literal IP address (IPv4 + // or IPv6). This check is coded already crypto.tls.hostnameInSNI, so + // just always set ServerName here and let crypto/tls do the filtering. + tlsConf.ServerName = o["host"] + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + + // Accept renegotiation requests initiated by the backend. + // + // Renegotiation was deprecated then removed from PostgreSQL 9.5, but + // the default configuration of older versions has it enabled. Redshift + // also initiates renegotiations and cannot be reconfigured. + tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + sslinline := o["sslinline"] + if sslinline == "true" { + cert, err := tls.X509KeyPair([]byte(o["sslcert"]), []byte(o["sslkey"])) + if err != nil { + return err + } + tlsConf.Certificates = []tls.Certificate{cert} + return nil + } + + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + sslinline := o["sslinline"] + + var cert []byte + if sslinline == "true" { + cert = []byte(sslrootcert) + } else { + var err error + cert, err = ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 000000000..d587f102e --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,93 @@ +//go:build !windows +// +build !windows + +package pq + +import ( + "errors" + "os" + "syscall" +) + +const ( + rootUserID = uint32(0) + + // The maximum permissions that a private key file owned by a regular user + // is allowed to have. This translates to u=rw. + maxUserOwnedKeyPermissions os.FileMode = 0600 + + // The maximum permissions that a private key file owned by root is allowed + // to have. This translates to u=rw,g=r. + maxRootOwnedKeyPermissions os.FileMode = 0640 +) + +var ( + errSSLKeyHasUnacceptableUserPermissions = errors.New("permissions for files not owned by root should be u=rw (0600) or less") + errSSLKeyHasUnacceptableRootPermissions = errors.New("permissions for root owned files should be u=rw,g=r (0640) or less") +) + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + + err = hasCorrectPermissions(info) + + // return ErrSSLKeyHasWorldPermissions for backwards compatability with + // existing code. + if err == errSSLKeyHasUnacceptableUserPermissions || err == errSSLKeyHasUnacceptableRootPermissions { + err = ErrSSLKeyHasWorldPermissions + } + return err +} + +// hasCorrectPermissions checks the file info (and the unix-specific stat_t +// output) to verify that the permissions on the file are correct. +// +// If the file is owned by the same user the process is running as, +// the file should only have 0600 (u=rw). If the file is owned by root, +// and the group matches the group that the process is running in, the +// permissions cannot be more than 0640 (u=rw,g=r). The file should +// never have world permissions. +// +// Returns an error when the permission check fails. +func hasCorrectPermissions(info os.FileInfo) error { + // if file's permission matches 0600, allow access. + userPermissionMask := (os.FileMode(0777) ^ maxUserOwnedKeyPermissions) + + // regardless of if we're running as root or not, 0600 is acceptable, + // so we return if we match the regular user permission mask. + if info.Mode().Perm()&userPermissionMask == 0 { + return nil + } + + // We need to pull the Unix file information to get the file's owner. + // If we can't access it, there's some sort of operating system level error + // and we should fail rather than attempting to use faulty information. + sysInfo := info.Sys() + if sysInfo == nil { + return ErrSSLKeyUnknownOwnership + } + + unixStat, ok := sysInfo.(*syscall.Stat_t) + if !ok { + return ErrSSLKeyUnknownOwnership + } + + // if the file is owned by root, we allow 0640 (u=rw,g=r) to match what + // Postgres does. + if unixStat.Uid == rootUserID { + rootPermissionMask := (os.FileMode(0777) ^ maxRootOwnedKeyPermissions) + if info.Mode().Perm()&rootPermissionMask != 0 { + return errSSLKeyHasUnacceptableRootPermissions + } + return nil + } + + return errSSLKeyHasUnacceptableUserPermissions +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 000000000..73663c8f1 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,10 @@ +//go:build windows +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 000000000..aec6e95be --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"='"+escaper.Replace(v)+"'") + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/user_other.go b/vendor/github.com/lib/pq/user_other.go new file mode 100644 index 000000000..3dae8f557 --- /dev/null +++ b/vendor/github.com/lib/pq/user_other.go @@ -0,0 +1,10 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +//go:build js || android || hurd || zos +// +build js android hurd zos + +package pq + +func userCurrent() (string, error) { + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 000000000..5f2d439bc --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,25 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +//go:build aix || darwin || dragonfly || freebsd || (linux && !android) || nacl || netbsd || openbsd || plan9 || solaris || rumprun || illumos +// +build aix darwin dragonfly freebsd linux,!android nacl netbsd openbsd plan9 solaris rumprun illumos + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 000000000..2b691267b --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 000000000..9a1b9e074 --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/github.com/liggitt/tabwriter/.travis.yml b/vendor/github.com/liggitt/tabwriter/.travis.yml new file mode 100644 index 000000000..2768dc072 --- /dev/null +++ b/vendor/github.com/liggitt/tabwriter/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - "1.8" + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - master + +script: go test -v ./... diff --git a/vendor/github.com/liggitt/tabwriter/LICENSE b/vendor/github.com/liggitt/tabwriter/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/liggitt/tabwriter/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/liggitt/tabwriter/README.md b/vendor/github.com/liggitt/tabwriter/README.md new file mode 100644 index 000000000..e75d35672 --- /dev/null +++ b/vendor/github.com/liggitt/tabwriter/README.md @@ -0,0 +1,7 @@ +This repo is a drop-in replacement for the golang [text/tabwriter](https://golang.org/pkg/text/tabwriter/) package. + +It is based on that package at [cf2c2ea8](https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a/src/text/tabwriter) and inherits its license. + +The following additional features are supported: +* `RememberWidths` flag allows remembering maximum widths seen per column even after Flush() is called. +* `RememberedWidths() []int` and `SetRememberedWidths([]int) *Writer` allows obtaining and transferring remembered column width between writers. diff --git a/vendor/github.com/liggitt/tabwriter/tabwriter.go b/vendor/github.com/liggitt/tabwriter/tabwriter.go new file mode 100644 index 000000000..fd3431fb0 --- /dev/null +++ b/vendor/github.com/liggitt/tabwriter/tabwriter.go @@ -0,0 +1,637 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tabwriter implements a write filter (tabwriter.Writer) that +// translates tabbed columns in input into properly aligned text. +// +// It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter), +// based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a +// with support for additional features. +// +// The package is using the Elastic Tabstops algorithm described at +// http://nickgravgaard.com/elastictabstops/index.html. +package tabwriter + +import ( + "io" + "unicode/utf8" +) + +// ---------------------------------------------------------------------------- +// Filter implementation + +// A cell represents a segment of text terminated by tabs or line breaks. +// The text itself is stored in a separate buffer; cell only describes the +// segment's size in bytes, its width in runes, and whether it's an htab +// ('\t') terminated cell. +// +type cell struct { + size int // cell size in bytes + width int // cell width in runes + htab bool // true if the cell is terminated by an htab ('\t') +} + +// A Writer is a filter that inserts padding around tab-delimited +// columns in its input to align them in the output. +// +// The Writer treats incoming bytes as UTF-8-encoded text consisting +// of cells terminated by horizontal ('\t') or vertical ('\v') tabs, +// and newline ('\n') or formfeed ('\f') characters; both newline and +// formfeed act as line breaks. +// +// Tab-terminated cells in contiguous lines constitute a column. The +// Writer inserts padding as needed to make all cells in a column have +// the same width, effectively aligning the columns. It assumes that +// all characters have the same width, except for tabs for which a +// tabwidth must be specified. Column cells must be tab-terminated, not +// tab-separated: non-tab terminated trailing text at the end of a line +// forms a cell but that cell is not part of an aligned column. +// For instance, in this example (where | stands for a horizontal tab): +// +// aaaa|bbb|d +// aa |b |dd +// a | +// aa |cccc|eee +// +// the b and c are in distinct columns (the b column is not contiguous +// all the way). The d and e are not in a column at all (there's no +// terminating tab, nor would the column be contiguous). +// +// The Writer assumes that all Unicode code points have the same width; +// this may not be true in some fonts or if the string contains combining +// characters. +// +// If DiscardEmptyColumns is set, empty columns that are terminated +// entirely by vertical (or "soft") tabs are discarded. Columns +// terminated by horizontal (or "hard") tabs are not affected by +// this flag. +// +// If a Writer is configured to filter HTML, HTML tags and entities +// are passed through. The widths of tags and entities are +// assumed to be zero (tags) and one (entities) for formatting purposes. +// +// A segment of text may be escaped by bracketing it with Escape +// characters. The tabwriter passes escaped text segments through +// unchanged. In particular, it does not interpret any tabs or line +// breaks within the segment. If the StripEscape flag is set, the +// Escape characters are stripped from the output; otherwise they +// are passed through as well. For the purpose of formatting, the +// width of the escaped text is always computed excluding the Escape +// characters. +// +// The formfeed character acts like a newline but it also terminates +// all columns in the current line (effectively calling Flush). Tab- +// terminated cells in the next line start new columns. Unless found +// inside an HTML tag or inside an escaped text segment, formfeed +// characters appear as newlines in the output. +// +// The Writer must buffer input internally, because proper spacing +// of one line may depend on the cells in future lines. Clients must +// call Flush when done calling Write. +// +type Writer struct { + // configuration + output io.Writer + minwidth int + tabwidth int + padding int + padbytes [8]byte + flags uint + + // current state + buf []byte // collected text excluding tabs or line breaks + pos int // buffer position up to which cell.width of incomplete cell has been computed + cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections + endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0) + lines [][]cell // list of lines; each line is a list of cells + widths []int // list of column widths in runes - re-used during formatting + + maxwidths []int // list of max column widths in runes +} + +// addLine adds a new line. +// flushed is a hint indicating whether the underlying writer was just flushed. +// If so, the previous line is not likely to be a good indicator of the new line's cells. +func (b *Writer) addLine(flushed bool) { + // Grow slice instead of appending, + // as that gives us an opportunity + // to re-use an existing []cell. + if n := len(b.lines) + 1; n <= cap(b.lines) { + b.lines = b.lines[:n] + b.lines[n-1] = b.lines[n-1][:0] + } else { + b.lines = append(b.lines, nil) + } + + if !flushed { + // The previous line is probably a good indicator + // of how many cells the current line will have. + // If the current line's capacity is smaller than that, + // abandon it and make a new one. + if n := len(b.lines); n >= 2 { + if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) { + b.lines[n-1] = make([]cell, 0, prev) + } + } + } +} + +// Reset the current state. +func (b *Writer) reset() { + b.buf = b.buf[:0] + b.pos = 0 + b.cell = cell{} + b.endChar = 0 + b.lines = b.lines[0:0] + b.widths = b.widths[0:0] + b.addLine(true) +} + +// Internal representation (current state): +// +// - all text written is appended to buf; tabs and line breaks are stripped away +// - at any given time there is a (possibly empty) incomplete cell at the end +// (the cell starts after a tab or line break) +// - cell.size is the number of bytes belonging to the cell so far +// - cell.width is text width in runes of that cell from the start of the cell to +// position pos; html tags and entities are excluded from this width if html +// filtering is enabled +// - the sizes and widths of processed text are kept in the lines list +// which contains a list of cells for each line +// - the widths list is a temporary list with current widths used during +// formatting; it is kept in Writer because it's re-used +// +// |<---------- size ---------->| +// | | +// |<- width ->|<- ignored ->| | +// | | | | +// [---processed---tab------------......] +// ^ ^ ^ +// | | | +// buf start of incomplete cell pos + +// Formatting can be controlled with these flags. +const ( + // Ignore html tags and treat entities (starting with '&' + // and ending in ';') as single characters (width = 1). + FilterHTML uint = 1 << iota + + // Strip Escape characters bracketing escaped text segments + // instead of passing them through unchanged with the text. + StripEscape + + // Force right-alignment of cell content. + // Default is left-alignment. + AlignRight + + // Handle empty columns as if they were not present in + // the input in the first place. + DiscardEmptyColumns + + // Always use tabs for indentation columns (i.e., padding of + // leading empty cells on the left) independent of padchar. + TabIndent + + // Print a vertical bar ('|') between columns (after formatting). + // Discarded columns appear as zero-width columns ("||"). + Debug + + // Remember maximum widths seen per column even after Flush() is called. + RememberWidths +) + +// A Writer must be initialized with a call to Init. The first parameter (output) +// specifies the filter output. The remaining parameters control the formatting: +// +// minwidth minimal cell width including any padding +// tabwidth width of tab characters (equivalent number of spaces) +// padding padding added to a cell before computing its width +// padchar ASCII char used for padding +// if padchar == '\t', the Writer will assume that the +// width of a '\t' in the formatted output is tabwidth, +// and cells are left-aligned independent of align_left +// (for correct-looking results, tabwidth must correspond +// to the tab width in the viewer displaying the result) +// flags formatting control +// +func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { + if minwidth < 0 || tabwidth < 0 || padding < 0 { + panic("negative minwidth, tabwidth, or padding") + } + b.output = output + b.minwidth = minwidth + b.tabwidth = tabwidth + b.padding = padding + for i := range b.padbytes { + b.padbytes[i] = padchar + } + if padchar == '\t' { + // tab padding enforces left-alignment + flags &^= AlignRight + } + b.flags = flags + + b.reset() + + return b +} + +// debugging support (keep code around) +func (b *Writer) dump() { + pos := 0 + for i, line := range b.lines { + print("(", i, ") ") + for _, c := range line { + print("[", string(b.buf[pos:pos+c.size]), "]") + pos += c.size + } + print("\n") + } + print("\n") +} + +// local error wrapper so we can distinguish errors we want to return +// as errors from genuine panics (which we don't want to return as errors) +type osError struct { + err error +} + +func (b *Writer) write0(buf []byte) { + n, err := b.output.Write(buf) + if n != len(buf) && err == nil { + err = io.ErrShortWrite + } + if err != nil { + panic(osError{err}) + } +} + +func (b *Writer) writeN(src []byte, n int) { + for n > len(src) { + b.write0(src) + n -= len(src) + } + b.write0(src[0:n]) +} + +var ( + newline = []byte{'\n'} + tabs = []byte("\t\t\t\t\t\t\t\t") +) + +func (b *Writer) writePadding(textw, cellw int, useTabs bool) { + if b.padbytes[0] == '\t' || useTabs { + // padding is done with tabs + if b.tabwidth == 0 { + return // tabs have no width - can't do any padding + } + // make cellw the smallest multiple of b.tabwidth + cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth + n := cellw - textw // amount of padding + if n < 0 { + panic("internal error") + } + b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth) + return + } + + // padding is done with non-tab characters + b.writeN(b.padbytes[0:], cellw-textw) +} + +var vbar = []byte{'|'} + +func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) { + pos = pos0 + for i := line0; i < line1; i++ { + line := b.lines[i] + + // if TabIndent is set, use tabs to pad leading empty cells + useTabs := b.flags&TabIndent != 0 + + for j, c := range line { + if j > 0 && b.flags&Debug != 0 { + // indicate column break + b.write0(vbar) + } + + if c.size == 0 { + // empty cell + if j < len(b.widths) { + b.writePadding(c.width, b.widths[j], useTabs) + } + } else { + // non-empty cell + useTabs = false + if b.flags&AlignRight == 0 { // align left + b.write0(b.buf[pos : pos+c.size]) + pos += c.size + if j < len(b.widths) { + b.writePadding(c.width, b.widths[j], false) + } + } else { // align right + if j < len(b.widths) { + b.writePadding(c.width, b.widths[j], false) + } + b.write0(b.buf[pos : pos+c.size]) + pos += c.size + } + } + } + + if i+1 == len(b.lines) { + // last buffered line - we don't have a newline, so just write + // any outstanding buffered data + b.write0(b.buf[pos : pos+b.cell.size]) + pos += b.cell.size + } else { + // not the last line - write newline + b.write0(newline) + } + } + return +} + +// Format the text between line0 and line1 (excluding line1); pos +// is the buffer position corresponding to the beginning of line0. +// Returns the buffer position corresponding to the beginning of +// line1 and an error, if any. +// +func (b *Writer) format(pos0 int, line0, line1 int) (pos int) { + pos = pos0 + column := len(b.widths) + for this := line0; this < line1; this++ { + line := b.lines[this] + + if column >= len(line)-1 { + continue + } + // cell exists in this column => this line + // has more cells than the previous line + // (the last cell per line is ignored because cells are + // tab-terminated; the last cell per line describes the + // text before the newline/formfeed and does not belong + // to a column) + + // print unprinted lines until beginning of block + pos = b.writeLines(pos, line0, this) + line0 = this + + // column block begin + width := b.minwidth // minimal column width + discardable := true // true if all cells in this column are empty and "soft" + for ; this < line1; this++ { + line = b.lines[this] + if column >= len(line)-1 { + break + } + // cell exists in this column + c := line[column] + // update width + if w := c.width + b.padding; w > width { + width = w + } + // update discardable + if c.width > 0 || c.htab { + discardable = false + } + } + // column block end + + // discard empty columns if necessary + if discardable && b.flags&DiscardEmptyColumns != 0 { + width = 0 + } + + if b.flags&RememberWidths != 0 { + if len(b.maxwidths) < len(b.widths) { + b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...) + } + + switch { + case len(b.maxwidths) == len(b.widths): + b.maxwidths = append(b.maxwidths, width) + case b.maxwidths[len(b.widths)] > width: + width = b.maxwidths[len(b.widths)] + case b.maxwidths[len(b.widths)] < width: + b.maxwidths[len(b.widths)] = width + } + } + + // format and print all columns to the right of this column + // (we know the widths of this column and all columns to the left) + b.widths = append(b.widths, width) // push width + pos = b.format(pos, line0, this) + b.widths = b.widths[0 : len(b.widths)-1] // pop width + line0 = this + } + + // print unprinted lines until end + return b.writeLines(pos, line0, line1) +} + +// Append text to current cell. +func (b *Writer) append(text []byte) { + b.buf = append(b.buf, text...) + b.cell.size += len(text) +} + +// Update the cell width. +func (b *Writer) updateWidth() { + b.cell.width += utf8.RuneCount(b.buf[b.pos:]) + b.pos = len(b.buf) +} + +// To escape a text segment, bracket it with Escape characters. +// For instance, the tab in this string "Ignore this tab: \xff\t\xff" +// does not terminate a cell and constitutes a single character of +// width one for formatting purposes. +// +// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence. +// +const Escape = '\xff' + +// Start escaped mode. +func (b *Writer) startEscape(ch byte) { + switch ch { + case Escape: + b.endChar = Escape + case '<': + b.endChar = '>' + case '&': + b.endChar = ';' + } +} + +// Terminate escaped mode. If the escaped text was an HTML tag, its width +// is assumed to be zero for formatting purposes; if it was an HTML entity, +// its width is assumed to be one. In all other cases, the width is the +// unicode width of the text. +// +func (b *Writer) endEscape() { + switch b.endChar { + case Escape: + b.updateWidth() + if b.flags&StripEscape == 0 { + b.cell.width -= 2 // don't count the Escape chars + } + case '>': // tag of zero width + case ';': + b.cell.width++ // entity, count as one rune + } + b.pos = len(b.buf) + b.endChar = 0 +} + +// Terminate the current cell by adding it to the list of cells of the +// current line. Returns the number of cells in that line. +// +func (b *Writer) terminateCell(htab bool) int { + b.cell.htab = htab + line := &b.lines[len(b.lines)-1] + *line = append(*line, b.cell) + b.cell = cell{} + return len(*line) +} + +func handlePanic(err *error, op string) { + if e := recover(); e != nil { + if nerr, ok := e.(osError); ok { + *err = nerr.err + return + } + panic("tabwriter: panic during " + op) + } +} + +// RememberedWidths returns a copy of the remembered per-column maximum widths. +// Requires use of the RememberWidths flag, and is not threadsafe. +func (b *Writer) RememberedWidths() []int { + retval := make([]int, len(b.maxwidths)) + copy(retval, b.maxwidths) + return retval +} + +// SetRememberedWidths sets the remembered per-column maximum widths. +// Requires use of the RememberWidths flag, and is not threadsafe. +func (b *Writer) SetRememberedWidths(widths []int) *Writer { + b.maxwidths = make([]int, len(widths)) + copy(b.maxwidths, widths) + return b +} + +// Flush should be called after the last call to Write to ensure +// that any data buffered in the Writer is written to output. Any +// incomplete escape sequence at the end is considered +// complete for formatting purposes. +func (b *Writer) Flush() error { + return b.flush() +} + +func (b *Writer) flush() (err error) { + defer b.reset() // even in the presence of errors + defer handlePanic(&err, "Flush") + + // add current cell if not empty + if b.cell.size > 0 { + if b.endChar != 0 { + // inside escape - terminate it even if incomplete + b.endEscape() + } + b.terminateCell(false) + } + + // format contents of buffer + b.format(0, 0, len(b.lines)) + return nil +} + +var hbar = []byte("---\n") + +// Write writes buf to the writer b. +// The only errors returned are ones encountered +// while writing to the underlying output stream. +// +func (b *Writer) Write(buf []byte) (n int, err error) { + defer handlePanic(&err, "Write") + + // split text into cells + n = 0 + for i, ch := range buf { + if b.endChar == 0 { + // outside escape + switch ch { + case '\t', '\v', '\n', '\f': + // end of cell + b.append(buf[n:i]) + b.updateWidth() + n = i + 1 // ch consumed + ncells := b.terminateCell(ch == '\t') + if ch == '\n' || ch == '\f' { + // terminate line + b.addLine(ch == '\f') + if ch == '\f' || ncells == 1 { + // A '\f' always forces a flush. Otherwise, if the previous + // line has only one cell which does not have an impact on + // the formatting of the following lines (the last cell per + // line is ignored by format()), thus we can flush the + // Writer contents. + if err = b.Flush(); err != nil { + return + } + if ch == '\f' && b.flags&Debug != 0 { + // indicate section break + b.write0(hbar) + } + } + } + + case Escape: + // start of escaped sequence + b.append(buf[n:i]) + b.updateWidth() + n = i + if b.flags&StripEscape != 0 { + n++ // strip Escape + } + b.startEscape(Escape) + + case '<', '&': + // possibly an html tag/entity + if b.flags&FilterHTML != 0 { + // begin of tag/entity + b.append(buf[n:i]) + b.updateWidth() + n = i + b.startEscape(ch) + } + } + + } else { + // inside escape + if ch == b.endChar { + // end of tag/entity + j := i + 1 + if ch == Escape && b.flags&StripEscape != 0 { + j = i // strip Escape + } + b.append(buf[n:j]) + n = i + 1 // ch consumed + b.endEscape() + } + } + } + + // append leftover text + b.append(buf[n:]) + n = len(buf) + return +} + +// NewWriter allocates and initializes a new tabwriter.Writer. +// The parameters are the same as for the Init function. +// +func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { + return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags) +} diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 000000000..ca0483711 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 000000000..416d1bbbf --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,38 @@ +//go:build appengine +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 000000000..766d94603 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +//go:build !windows && !appengine +// +build !windows,!appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 000000000..1846ad5ab --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1047 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 000000000..05d6f74bf --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..38418353e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..d569c0c94 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,19 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly hurd +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..31503226f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,16 @@ +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 000000000..bae7f9bb3 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,23 @@ +//go:build plan9 +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..0c3acf2dc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,21 @@ +//go:build solaris && !appengine +// +build solaris,!appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 000000000..67787657f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..8e3c99171 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml new file mode 100644 index 000000000..6a21813a3 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - go generate + - git diff --cached --exit-code + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md new file mode 100644 index 000000000..aa56ab96c --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/README.md @@ -0,0 +1,27 @@ +go-runewidth +============ + +[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) +[![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth) +[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) +[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) + +Provides functions to get fixed width of the character or string. + +Usage +----- + +```go +runewidth.StringWidth("ã¤ã®ã â˜†HIRO") == 12 +``` + + +Author +------ + +Yasuhiro Matsumoto + +License +------- + +under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/go.test.sh b/vendor/github.com/mattn/go-runewidth/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 000000000..19f8e0449 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,257 @@ +package runewidth + +import ( + "os" +) + +//go:generate go run script/generate.go + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth bool + + // ZeroWidthJoiner is flag to set to use UTR#51 ZWJ + ZeroWidthJoiner bool + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{} +) + +func init() { + handleEnv() +} + +func handleEnv() { + env := os.Getenv("RUNEWIDTH_EASTASIAN") + if env == "" { + EastAsianWidth = IsEastAsian() + } else { + EastAsianWidth = env == "1" + } + // update DefaultCondition + DefaultCondition.EastAsianWidth = EastAsianWidth + DefaultCondition.ZeroWidthJoiner = ZeroWidthJoiner +} + +type interval struct { + first rune + last rune +} + +type table []interval + +func inTables(r rune, ts ...table) bool { + for _, t := range ts { + if inTable(r, t) { + return true + } + } + return false +} + +func inTable(r rune, t table) bool { + if r < t[0].first { + return false + } + + bot := 0 + top := len(t) - 1 + for top >= bot { + mid := (bot + top) >> 1 + + switch { + case t[mid].last < r: + bot = mid + 1 + case t[mid].first > r: + top = mid - 1 + default: + return true + } + } + + return false +} + +var private = table{ + {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, +} + +var nonprint = table{ + {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, + {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, + {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool + ZeroWidthJoiner bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{ + EastAsianWidth: EastAsianWidth, + ZeroWidthJoiner: ZeroWidthJoiner, + } +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + switch { + case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned): + return 0 + case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth): + return 2 + default: + return 1 + } +} + +func (c *Condition) stringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +func (c *Condition) stringWidthZeroJoiner(s string) (width int) { + r1, r2 := rune(0), rune(0) + for _, r := range []rune(s) { + if r == 0xFE0E || r == 0xFE0F { + continue + } + w := c.RuneWidth(r) + if r2 == 0x200D && inTables(r, emoji) && inTables(r1, emoji) { + if width < w { + width = w + } + } else { + width += w + } + r1, r2 = r2, r + } + return width +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + if c.ZeroWidthJoiner { + return c.stringWidthZeroJoiner(s) + } + return c.stringWidth(s) +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + return string(r[0:i]) + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return inTables(r, private, ambiguous) +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return inTable(r, neutral) +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go new file mode 100644 index 000000000..7d99f6e52 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go @@ -0,0 +1,8 @@ +// +build appengine + +package runewidth + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 000000000..c5fdf40ba --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,9 @@ +// +build js +// +build !appengine + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 000000000..480ad7485 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,82 @@ +// +build !windows +// +build !js +// +build !appengine + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_ALL") + if locale == "" { + locale = os.Getenv("LC_CTYPE") + } + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go new file mode 100644 index 000000000..b27d77d89 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -0,0 +1,437 @@ +// Code generated by script/generate.go. DO NOT EDIT. + +package runewidth + +var combining = table{ + {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3}, + {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01}, + {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0}, + {0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF}, + {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF}, + {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, + {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1}, + {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A}, + {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301}, + {0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, + {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, + {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, + {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, + {0x1E8D0, 0x1E8D6}, +} + +var doublewidth = table{ + {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, + {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, + {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, + {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, + {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, + {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, + {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, + {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, + {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, + {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, + {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, + {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, + {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, + {0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3}, + {0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF}, + {0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C}, + {0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, + {0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, + {0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, + {0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5}, + {0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152}, + {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004}, + {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, + {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320}, + {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, + {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, + {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, + {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, + {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, + {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, + {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7}, + {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978}, + {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74}, + {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8}, + {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6}, + {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, +} + +var ambiguous = table{ + {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, + {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, + {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, + {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, + {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, + {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, + {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, + {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, + {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, + {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, + {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, + {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, + {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, + {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, + {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, + {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, + {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, + {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, + {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, + {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, + {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, + {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, + {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, + {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, + {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, + {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, + {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, + {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, + {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, + {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, + {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, + {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, + {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, + {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, + {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, + {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, + {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, + {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, + {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, + {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, + {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, + {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, + {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, + {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, + {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, + {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, + {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, + {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, + {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, + {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, + {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, + {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, + {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, + {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, + {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, + {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, + {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, + {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, + {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, + {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, +} +var notassigned = table{ + {0x27E6, 0x27ED}, {0x2985, 0x2986}, +} + +var neutral = table{ + {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9}, + {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, + {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, + {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, + {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, + {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, + {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, + {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, + {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, + {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, + {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, + {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, + {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, + {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250}, + {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6}, + {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, + {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, + {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F}, + {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390}, + {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400}, + {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F}, + {0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F}, + {0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4}, + {0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A}, + {0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D}, + {0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E}, + {0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7}, + {0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990}, + {0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, + {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, + {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, + {0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, + {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, + {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, + {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, + {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, + {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, + {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, + {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, + {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, + {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, + {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, + {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, + {0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, + {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, + {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, + {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, + {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, + {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, + {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, + {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, + {0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, + {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90}, + {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, + {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, + {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3}, + {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C}, + {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, + {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, + {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, + {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, + {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, + {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, + {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3}, + {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4}, + {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, + {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, + {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, + {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7}, + {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248}, + {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, + {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, + {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, + {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, + {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, + {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5}, + {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8}, + {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736}, + {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, + {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9}, + {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819}, + {0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5}, + {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B}, + {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974}, + {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA}, + {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C}, + {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD}, + {0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C}, + {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49}, + {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7}, + {0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15}, + {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, + {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, + {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, + {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB}, + {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE}, + {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017}, + {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023}, + {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, + {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064}, + {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080}, + {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, + {0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0}, + {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108}, + {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120}, + {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152}, + {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, + {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7}, + {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6}, + {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206}, + {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210}, + {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C}, + {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226}, + {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B}, + {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251}, + {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269}, + {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285}, + {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4}, + {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319}, + {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF}, + {0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A}, + {0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F}, + {0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2}, + {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, + {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, + {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, + {0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608}, + {0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B}, + {0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641}, + {0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662}, + {0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E}, + {0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D}, + {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC}, + {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7}, + {0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727}, + {0x2729, 0x273C}, {0x273E, 0x274B}, {0x274D, 0x274D}, + {0x274F, 0x2752}, {0x2756, 0x2756}, {0x2758, 0x2775}, + {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE}, + {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A}, + {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73}, + {0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E}, + {0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, + {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, + {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, + {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, + {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, + {0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, + {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF}, + {0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839}, + {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, + {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD}, + {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36}, + {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2}, + {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, + {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, + {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9}, + {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF}, + {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36}, + {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, + {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F}, + {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD}, + {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B}, + {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, + {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA}, + {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E}, + {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD}, + {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB}, + {0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A}, + {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5}, + {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, + {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, + {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, + {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, + {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, + {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF}, + {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B}, + {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7}, + {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06}, + {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58}, + {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6}, + {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72}, + {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, + {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, + {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E}, + {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1}, + {0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB}, + {0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F}, + {0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, + {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147}, + {0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4}, + {0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286}, + {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, + {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, + {0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, + {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, + {0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348}, + {0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357}, + {0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7}, + {0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD}, + {0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, + {0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A}, + {0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B}, + {0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909}, + {0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935}, + {0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959}, + {0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, + {0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8}, + {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45}, + {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7}, + {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09}, + {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D}, + {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65}, + {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91}, + {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8}, + {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399}, + {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, + {0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646}, + {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, + {0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, + {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, + {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A}, + {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F}, + {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, + {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5}, + {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245}, + {0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, + {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, + {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, + {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, + {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, + {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, + {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, + {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, + {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, + {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9}, + {0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, + {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, + {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03}, + {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, + {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, + {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, + {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, + {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54}, + {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, + {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, + {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72}, + {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, + {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, + {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1}, + {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093}, + {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE}, + {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F}, + {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF}, + {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, + {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, + {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, + {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, + {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, + {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, + {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4}, + {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, + {0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, + {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, + {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B}, + {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D}, + {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9}, + {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, +} + +var emoji = table{ + {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122}, + {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA}, + {0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388}, + {0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, + {0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, + {0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605}, + {0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705}, + {0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716}, + {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728}, + {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747}, + {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755}, + {0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797}, + {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, + {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030}, + {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299}, + {0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F}, + {0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, + {0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F}, + {0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, + {0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D}, + {0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F}, + {0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, + {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF}, + {0x1FC00, 0x1FFFD}, +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 000000000..d6a61777d --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,28 @@ +// +build windows +// +build !appengine + +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml deleted file mode 100644 index d7b9589ab..000000000 --- a/vendor/github.com/mitchellh/copystructure/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.7 - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md index bcb8c8d2c..f0fbd2e5c 100644 --- a/vendor/github.com/mitchellh/copystructure/README.md +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -1,21 +1,21 @@ -# copystructure - -copystructure is a Go library for deep copying values in Go. - -This allows you to copy Go values that may contain reference values -such as maps, slices, or pointers, and copy their data as well instead -of just their references. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/copystructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). - -The `Copy` function has examples associated with it there. +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go index 140435255..8089e6670 100644 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -8,7 +8,30 @@ import ( "github.com/mitchellh/reflectwalk" ) +const tagKey = "copy" + // Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// func Copy(v interface{}) (interface{}, error) { return Config{}.Copy(v) } @@ -28,6 +51,19 @@ type CopierFunc func(interface{}) (interface{}, error) // this map as well as to Copy in a mutex. var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + // Must is a helper that wraps a call to a function returning // (interface{}, error) and panics if the error is non-nil. It is intended // for use in variable initializations and should only be used when a copy @@ -50,6 +86,11 @@ type Config struct { // Copiers is a map of types associated with a CopierFunc. Use the global // Copiers map if this is nil. Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} } func (c Config) Copy(v interface{}) (interface{}, error) { @@ -65,6 +106,12 @@ func (c Config) Copy(v interface{}) (interface{}, error) { if c.Copiers == nil { c.Copiers = Copiers } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers err := reflectwalk.Walk(v, w) if err != nil { @@ -93,10 +140,12 @@ func ifaceKey(pointers, depth int) uint64 { type walker struct { Result interface{} - depth int - ignoreDepth int - vals []reflect.Value - cs []reflect.Value + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value // This stores the number of pointers we've walked over, indexed by depth. ps []int @@ -263,6 +312,20 @@ func (w *walker) PointerExit(v bool) error { return nil } +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + func (w *walker) Interface(v reflect.Value) error { if !v.IsValid() { return nil @@ -356,7 +419,7 @@ func (w *walker) Struct(s reflect.Value) error { w.lock(s) var v reflect.Value - if c, ok := Copiers[s.Type()]; ok { + if c, ok := w.copiers[s.Type()]; ok { // We have a Copier for this struct, so we use that copier to // get the copy, and we ignore anything deeper than this. w.ignoreDepth = w.depth @@ -396,9 +459,29 @@ func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { return reflectwalk.SkipEntry } + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + // Push the field onto the stack, we'll handle it when we exit // the struct field in Exit... w.valPush(reflect.ValueOf(f)) + return nil } diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 000000000..60ae31170 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 000000000..f7bedda38 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,83 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +const nbsp = 0xA0 + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + var wordBufLen, spaceBufLen uint + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+spaceBufLen > lim { + current = 0 + } else { + current += spaceBufLen + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + spaceBufLen = 0 + } else { + current += spaceBufLen + wordBufLen + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + spaceBufLen = 0 + wordBuf.WriteTo(buf) + wordBuf.Reset() + wordBufLen = 0 + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) && char != nbsp { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += spaceBufLen + wordBufLen + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + spaceBufLen = 0 + wordBuf.WriteTo(buf) + wordBuf.Reset() + wordBufLen = 0 + } + + spaceBuf.WriteRune(char) + spaceBufLen++ + } else { + wordBuf.WriteRune(char) + wordBufLen++ + + if current+wordBufLen+spaceBufLen > lim && wordBufLen < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + spaceBufLen = 0 + } + } + } + + if wordBuf.Len() == 0 { + if current+spaceBufLen <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go index d7ab7b6d7..7fee7b050 100644 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -69,6 +69,13 @@ type PointerWalker interface { PointerExit(bool) error } +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + // SkipEntry can be returned from walk functions to skip walking // the value of this field. This is only valid in the following functions: // @@ -130,6 +137,17 @@ func walk(v reflect.Value, w interface{}) (err error) { } if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + pointer = true v = reflect.Indirect(pointerV) } @@ -230,7 +248,8 @@ func walkMap(v reflect.Value, w interface{}) error { ew.Enter(MapValue) } - if err := walk(kv, w); err != nil { + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { return err } diff --git a/vendor/github.com/moby/locker/LICENSE b/vendor/github.com/moby/locker/LICENSE new file mode 100644 index 000000000..2e0ec1dcf --- /dev/null +++ b/vendor/github.com/moby/locker/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/locker/README.md b/vendor/github.com/moby/locker/README.md new file mode 100644 index 000000000..a0852f0f8 --- /dev/null +++ b/vendor/github.com/moby/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/moby/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return i.data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + i.mu.Lock() + i.data[name] = data + i.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/moby/locker/locker.go b/vendor/github.com/moby/locker/locker.go new file mode 100644 index 000000000..0b22ddfab --- /dev/null +++ b/vendor/github.com/moby/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/moby/term/.gitignore b/vendor/github.com/moby/term/.gitignore new file mode 100644 index 000000000..b0747ff01 --- /dev/null +++ b/vendor/github.com/moby/term/.gitignore @@ -0,0 +1,8 @@ +# if you want to ignore files created by your editor/tools, consider using a +# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files +.* +!.github +!.gitignore +profile.out +# support running go modules in vendor mode for local development +vendor/ diff --git a/vendor/github.com/moby/term/LICENSE b/vendor/github.com/moby/term/LICENSE new file mode 100644 index 000000000..6d8d58fb6 --- /dev/null +++ b/vendor/github.com/moby/term/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/term/README.md b/vendor/github.com/moby/term/README.md new file mode 100644 index 000000000..0ce92cc33 --- /dev/null +++ b/vendor/github.com/moby/term/README.md @@ -0,0 +1,36 @@ +# term - utilities for dealing with terminals + +![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) + +term provides structures and helper functions to work with terminal (state, sizes). + +#### Using term + +```go +package main + +import ( + "log" + "os" + + "github.com/moby/term" +) + +func main() { + fd := os.Stdin.Fd() + if term.IsTerminal(fd) { + ws, err := term.GetWinsize(fd) + if err != nil { + log.Fatalf("term.GetWinsize: %s", err) + } + log.Printf("%d:%d\n", ws.Height, ws.Width) + } +} +``` + +## Contributing + +Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. + +## Copyright and license +Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/moby/term/ascii.go b/vendor/github.com/moby/term/ascii.go new file mode 100644 index 000000000..55873c055 --- /dev/null +++ b/vendor/github.com/moby/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, key[0]) + } + } + return codes, nil +} diff --git a/vendor/github.com/moby/term/doc.go b/vendor/github.com/moby/term/doc.go new file mode 100644 index 000000000..c9bc03244 --- /dev/null +++ b/vendor/github.com/moby/term/doc.go @@ -0,0 +1,3 @@ +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term diff --git a/vendor/github.com/moby/term/proxy.go b/vendor/github.com/moby/term/proxy.go new file mode 100644 index 000000000..c47756b89 --- /dev/null +++ b/vendor/github.com/moby/term/proxy.go @@ -0,0 +1,88 @@ +package term + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader + buf []byte +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (n int, err error) { + if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { + return 0, EscapeError{} + } + + if len(r.buf) > 0 { + n = copy(buf, r.buf) + r.buf = r.buf[n:] + } + + nr, err := r.r.Read(buf[n:]) + n += nr + if len(r.escapeKeys) == 0 { + return n, err + } + + for i := 0; i < n; i++ { + if buf[i] == r.escapeKeys[r.escapeKeyPos] { + r.escapeKeyPos++ + + // Check if the full escape sequence is matched. + if r.escapeKeyPos == len(r.escapeKeys) { + n = i + 1 - r.escapeKeyPos + if n < 0 { + n = 0 + } + return n, EscapeError{} + } + continue + } + + // If we need to prepend a partial escape sequence from the previous + // read, make sure the new buffer size doesn't exceed len(buf). + // Otherwise, preserve any extra data in a buffer for the next read. + if i < r.escapeKeyPos { + preserve := make([]byte, 0, r.escapeKeyPos+n) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf[:n]...) + n = copy(buf, preserve) + i += r.escapeKeyPos + r.buf = append(r.buf, preserve[n:]...) + } + r.escapeKeyPos = 0 + } + + // If we're in the middle of reading an escape sequence, make sure we don't + // let the caller read it. If later on we find that this is not the escape + // sequence, we'll prepend it back to buf. + n -= r.escapeKeyPos + if n < 0 { + n = 0 + } + return n, err +} diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go new file mode 100644 index 000000000..f9d8988ef --- /dev/null +++ b/vendor/github.com/moby/term/term.go @@ -0,0 +1,85 @@ +package term + +import "io" + +// State holds the platform-specific state / console mode for the terminal. +type State terminalState + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + + // Only used on Unix + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stderr). +// +// On Windows, it attempts to turn on VT handling on all std handles if +// supported, or falls back to terminal emulation. On Unix, this returns +// the standard [os.Stdin], [os.Stdout] and [os.Stderr]. +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return stdStreams() +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (fd uintptr, isTerminal bool) { + return getFdInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + return getWinsize(fd) +} + +// SetWinsize tries to set the specified window size for the specified file +// descriptor. It is only implemented on Unix, and returns an error on Windows. +func SetWinsize(fd uintptr, ws *Winsize) error { + return setWinsize(fd, ws) +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return isTerminal(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return restoreTerminal(fd, state) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + return saveState(fd) +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + return disableEcho(fd, state) +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this is the equivalent of +// [MakeRaw], and puts both the input and output into raw mode. On Windows, it +// only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (previousState *State, err error) { + return setRawTerminal(fd) +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (previousState *State, err error) { + return setRawTerminalOutput(fd) +} + +// MakeRaw puts the terminal (Windows Console) connected to the +// given file descriptor into raw mode and returns the previous state of +// the terminal so that it can be restored. +func MakeRaw(fd uintptr) (previousState *State, err error) { + return makeRaw(fd) +} diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go new file mode 100644 index 000000000..2ec7706a1 --- /dev/null +++ b/vendor/github.com/moby/term/term_unix.go @@ -0,0 +1,98 @@ +//go:build !windows +// +build !windows + +package term + +import ( + "errors" + "io" + "os" + + "golang.org/x/sys/unix" +) + +// ErrInvalidState is returned if the state of the terminal is invalid. +// +// Deprecated: ErrInvalidState is no longer used. +var ErrInvalidState = errors.New("Invalid terminal state") + +// terminalState holds the platform-specific state / console mode for the terminal. +type terminalState struct { + termios unix.Termios +} + +func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +func getFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = isTerminal(inFd) + } + return inFd, isTerminalIn +} + +func getWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +func setWinsize(fd uintptr, ws *Winsize) error { + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &unix.Winsize{ + Row: ws.Height, + Col: ws.Width, + Xpixel: ws.x, + Ypixel: ws.y, + }) +} + +func isTerminal(fd uintptr) bool { + _, err := tcget(fd) + return err == nil +} + +func restoreTerminal(fd uintptr, state *State) error { + if state == nil { + return errors.New("invalid terminal state") + } + return tcset(fd, &state.termios) +} + +func saveState(fd uintptr) (*State, error) { + termios, err := tcget(fd) + if err != nil { + return nil, err + } + return &State{termios: *termios}, nil +} + +func disableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + return tcset(fd, &newState) +} + +func setRawTerminal(fd uintptr) (*State, error) { + return makeRaw(fd) +} + +func setRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func tcget(fd uintptr) (*unix.Termios, error) { + p, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + return p, nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), setTermios, p) +} diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go new file mode 100644 index 000000000..81ccff042 --- /dev/null +++ b/vendor/github.com/moby/term/term_windows.go @@ -0,0 +1,176 @@ +package term + +import ( + "fmt" + "io" + "os" + "os/signal" + + windowsconsole "github.com/moby/term/windows" + "golang.org/x/sys/windows" +) + +// terminalState holds the platform-specific state / console mode for the terminal. +type terminalState struct { + mode uint32 +} + +// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console +var vtInputSupported bool + +func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var ( + emulateStdin, emulateStdout, emulateStderr bool + + mode uint32 + ) + + fd := windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + _ = windows.SetConsoleMode(fd, mode) + } + + fd = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStdout = true + } else { + _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + fd = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStderr = true + } else { + _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + if emulateStdin { + h := uint32(windows.STD_INPUT_HANDLE) + stdIn = windowsconsole.NewAnsiReader(int(h)) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + h := uint32(windows.STD_OUTPUT_HANDLE) + stdOut = windowsconsole.NewAnsiWriter(int(h)) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + h := uint32(windows.STD_ERROR_HANDLE) + stdErr = windowsconsole.NewAnsiWriter(int(h)) + } else { + stdErr = os.Stderr + } + + return stdIn, stdOut, stdErr +} + +func getFdInfo(in interface{}) (uintptr, bool) { + return windowsconsole.GetHandleInfo(in) +} + +func getWinsize(fd uintptr) (*Winsize, error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func setWinsize(fd uintptr, ws *Winsize) error { + return fmt.Errorf("not implemented on Windows") +} + +func isTerminal(fd uintptr) bool { + var mode uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &mode) + return err == nil +} + +func restoreTerminal(fd uintptr, state *State) error { + return windows.SetConsoleMode(windows.Handle(fd), state.mode) +} + +func saveState(fd uintptr) (*State, error) { + var mode uint32 + + if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { + return nil, err + } + + return &State{mode: mode}, nil +} + +func disableEcho(fd uintptr, state *State) error { + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + mode := state.mode + mode &^= windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT + err := windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +func setRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, oldState) + return oldState, err +} + +func setRawTerminalOutput(fd uintptr) (*State, error) { + oldState, err := saveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this + // version of Windows. + _ = windows.SetConsoleMode(windows.Handle(fd), oldState.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) + return oldState, err +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + _ = RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go new file mode 100644 index 000000000..45f77e03c --- /dev/null +++ b/vendor/github.com/moby/term/termios_bsd.go @@ -0,0 +1,13 @@ +//go:build darwin || freebsd || openbsd || netbsd +// +build darwin freebsd openbsd netbsd + +package term + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go new file mode 100644 index 000000000..88b7b2156 --- /dev/null +++ b/vendor/github.com/moby/term/termios_nonbsd.go @@ -0,0 +1,13 @@ +//go:build !darwin && !freebsd && !netbsd && !openbsd && !windows +// +build !darwin,!freebsd,!netbsd,!openbsd,!windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) diff --git a/vendor/github.com/moby/term/termios_unix.go b/vendor/github.com/moby/term/termios_unix.go new file mode 100644 index 000000000..60c823783 --- /dev/null +++ b/vendor/github.com/moby/term/termios_unix.go @@ -0,0 +1,35 @@ +//go:build !windows +// +build !windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +// Termios is the Unix API for terminal I/O. +// +// Deprecated: use [unix.Termios]. +type Termios = unix.Termios + +func makeRaw(fd uintptr) (*State, error) { + termios, err := tcget(fd) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := tcset(fd, termios); err != nil { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/moby/term/termios_windows.go b/vendor/github.com/moby/term/termios_windows.go new file mode 100644 index 000000000..5be4e7601 --- /dev/null +++ b/vendor/github.com/moby/term/termios_windows.go @@ -0,0 +1,37 @@ +package term + +import "golang.org/x/sys/windows" + +func makeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + err = windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return nil, err + } + return state, nil +} diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go new file mode 100644 index 000000000..fb34c547a --- /dev/null +++ b/vendor/github.com/moby/term/windows/ansi_reader.go @@ -0,0 +1,252 @@ +//go:build windows +// +build windows + +package windowsconsole + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(rune(keyEvent.UnicodeChar))) + } + + return string(rune(keyEvent.UnicodeChar)) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go new file mode 100644 index 000000000..4243307fd --- /dev/null +++ b/vendor/github.com/moby/term/windows/ansi_writer.go @@ -0,0 +1,57 @@ +//go:build windows +// +build windows + +package windowsconsole + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + + return &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go new file mode 100644 index 000000000..21e57bd52 --- /dev/null +++ b/vendor/github.com/moby/term/windows/console.go @@ -0,0 +1,43 @@ +//go:build windows +// +build windows + +package windowsconsole + +import ( + "os" + + "golang.org/x/sys/windows" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = isConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +// +// Deprecated: use [windows.GetConsoleMode] or [golang.org/x/term.IsTerminal]. +func IsConsole(fd uintptr) bool { + return isConsole(fd) +} + +func isConsole(fd uintptr) bool { + var mode uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &mode) + return err == nil +} diff --git a/vendor/github.com/moby/term/windows/doc.go b/vendor/github.com/moby/term/windows/doc.go new file mode 100644 index 000000000..54265fffa --- /dev/null +++ b/vendor/github.com/moby/term/windows/doc.go @@ -0,0 +1,5 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole diff --git a/vendor/github.com/monochromegane/go-gitignore/.travis.yml b/vendor/github.com/monochromegane/go-gitignore/.travis.yml new file mode 100644 index 000000000..b06a36a46 --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/.travis.yml @@ -0,0 +1,6 @@ +language: go +go: + - 1.14.x + - master +script: + - go test -v ./... diff --git a/vendor/github.com/monochromegane/go-gitignore/LICENSE b/vendor/github.com/monochromegane/go-gitignore/LICENSE new file mode 100644 index 000000000..91b84e927 --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) [2015] [go-gitignore] + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/monochromegane/go-gitignore/README.md b/vendor/github.com/monochromegane/go-gitignore/README.md new file mode 100644 index 000000000..51a480747 --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/README.md @@ -0,0 +1,95 @@ +# go-gitignore [![Build Status](https://travis-ci.org/monochromegane/go-gitignore.svg)](https://travis-ci.org/monochromegane/go-gitignore) + +A fast gitignore matching library for Go. + +This library use simple tree index for matching, so keep fast if gitignore file has many pattern. + +## Usage + +```go +gitignore, _ := gitignore.NewGitIgnore("/path/to/gitignore") + +path := "/path/to/file" +isDir := false +gitignore.Match(path, isDir) +``` + +### Specify base directory + +go-gitignore treat `path` as a base directory. +If you want to specify other base (e.g. current directory and Global gitignore), you can like the following. + +```go +gitignore, _ := gitignore.NewGitIgnore("/home/you/.gitignore", ".") +``` + +### From io.Reader + +go-gitignore can initialize from io.Reader. + +```go +gitignore, _ := gitignore.NewGitIgnoreFromReader(base, reader) +``` + +## Simple tree index + +go-gitignore parse gitignore file, and generate a simple tree index for matching like the following. + +``` +. +├── accept +│   ├── absolute +│   │   └── depth +│   │   ├── initial +│   │   └── other +│   └── relative +│   └── depth +│   ├── initial +│   └── other +└── ignore + ├── absolute + │   └── depth + │   ├── initial + │   └── other + └── relative + └── depth + ├── initial + └── other +``` + +## Features + +- Support absolute path (/path/to/ignore) +- Support relative path (path/to/ignore) +- Support accept pattern (!path/to/accept) +- Support directory pattern (path/to/directory/) +- Support glob pattern (path/to/\*.txt) + +*note: glob pattern* + +go-gitignore use [filepath.Match](https://golang.org/pkg/path/filepath/#Match) for matching meta char pattern, so not support recursive pattern (path/`**`/file). + +## Installation + +```sh +$ go get github.com/monochromegane/go-gitignore +``` + +## Contribution + +1. Fork it +2. Create a feature branch +3. Commit your changes +4. Rebase your local changes against the master branch +5. Run test suite with the `go test ./...` command and confirm that it passes +6. Run `gofmt -s` +7. Create new Pull Request + +## License + +[MIT](https://github.com/monochromegane/go-gitignore/blob/master/LICENSE) + +## Author + +[monochromegane](https://github.com/monochromegane) + diff --git a/vendor/github.com/monochromegane/go-gitignore/depth_holder.go b/vendor/github.com/monochromegane/go-gitignore/depth_holder.go new file mode 100644 index 000000000..9805b325d --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/depth_holder.go @@ -0,0 +1,79 @@ +package gitignore + +import "strings" + +const ( + asc = iota + desc +) + +type depthPatternHolder struct { + patterns depthPatterns + order int +} + +func newDepthPatternHolder(order int) depthPatternHolder { + return depthPatternHolder{ + patterns: depthPatterns{m: map[int]initialPatternHolder{}}, + order: order, + } +} + +func (h *depthPatternHolder) add(pattern string) { + count := strings.Count(strings.Trim(pattern, "/"), "/") + h.patterns.set(count+1, pattern) +} + +func (h depthPatternHolder) match(path string, isDir bool) bool { + if h.patterns.size() == 0 { + return false + } + + for depth := 1; ; depth++ { + var part string + var isLast, isDirCurrent bool + if h.order == asc { + part, isLast = cutN(path, depth) + if isLast { + isDirCurrent = isDir + } else { + isDirCurrent = false + } + } else { + part, isLast = cutLastN(path, depth) + isDirCurrent = isDir + } + if patterns, ok := h.patterns.get(depth); ok { + if patterns.match(part, isDirCurrent) { + return true + } + } + if isLast { + break + } + } + return false +} + +type depthPatterns struct { + m map[int]initialPatternHolder +} + +func (p *depthPatterns) set(depth int, pattern string) { + if ps, ok := p.m[depth]; ok { + ps.add(pattern) + } else { + holder := newInitialPatternHolder() + holder.add(pattern) + p.m[depth] = holder + } +} + +func (p depthPatterns) get(depth int) (initialPatternHolder, bool) { + patterns, ok := p.m[depth] + return patterns, ok +} + +func (p depthPatterns) size() int { + return len(p.m) +} diff --git a/vendor/github.com/monochromegane/go-gitignore/full_scan_patterns.go b/vendor/github.com/monochromegane/go-gitignore/full_scan_patterns.go new file mode 100644 index 000000000..8c04ef3a7 --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/full_scan_patterns.go @@ -0,0 +1,31 @@ +package gitignore + +import "strings" + +// Only benchmark use +type fullScanPatterns struct { + absolute patterns + relative patterns +} + +func newFullScanPatterns() *fullScanPatterns { + return &fullScanPatterns{ + absolute: patterns{}, + relative: patterns{}, + } +} + +func (ps *fullScanPatterns) add(pattern string) { + if strings.HasPrefix(pattern, "/") { + ps.absolute.add(newPattern(pattern)) + } else { + ps.relative.add(newPattern(pattern)) + } +} + +func (ps fullScanPatterns) match(path string, isDir bool) bool { + if ps.absolute.match(path, isDir) { + return true + } + return ps.relative.match(path, isDir) +} diff --git a/vendor/github.com/monochromegane/go-gitignore/gitignore.go b/vendor/github.com/monochromegane/go-gitignore/gitignore.go new file mode 100644 index 000000000..9c719a6ca --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/gitignore.go @@ -0,0 +1,80 @@ +package gitignore + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strings" +) + +type IgnoreMatcher interface { + Match(path string, isDir bool) bool +} + +type DummyIgnoreMatcher bool + +func (d DummyIgnoreMatcher) Match(path string, isDir bool) bool { + return bool(d) +} + +type gitIgnore struct { + ignorePatterns scanStrategy + acceptPatterns scanStrategy + path string +} + +func NewGitIgnore(gitignore string, base ...string) (IgnoreMatcher, error) { + var path string + if len(base) > 0 { + path = base[0] + } else { + path = filepath.Dir(gitignore) + } + + file, err := os.Open(gitignore) + if err != nil { + return nil, err + } + defer file.Close() + + return NewGitIgnoreFromReader(path, file), nil +} + +func NewGitIgnoreFromReader(path string, r io.Reader) IgnoreMatcher { + g := gitIgnore{ + ignorePatterns: newIndexScanPatterns(), + acceptPatterns: newIndexScanPatterns(), + path: path, + } + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := strings.Trim(scanner.Text(), " ") + if len(line) == 0 || strings.HasPrefix(line, "#") { + continue + } + if strings.HasPrefix(line, `\#`) { + line = strings.TrimPrefix(line, `\`) + } + + if strings.HasPrefix(line, "!") { + g.acceptPatterns.add(strings.TrimPrefix(line, "!")) + } else { + g.ignorePatterns.add(line) + } + } + return g +} + +func (g gitIgnore) Match(path string, isDir bool) bool { + relativePath, err := filepath.Rel(g.path, path) + if err != nil { + return false + } + relativePath = filepath.ToSlash(relativePath) + + if g.acceptPatterns.match(relativePath, isDir) { + return false + } + return g.ignorePatterns.match(relativePath, isDir) +} diff --git a/vendor/github.com/monochromegane/go-gitignore/index_scan_patterns.go b/vendor/github.com/monochromegane/go-gitignore/index_scan_patterns.go new file mode 100644 index 000000000..882280e95 --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/index_scan_patterns.go @@ -0,0 +1,35 @@ +package gitignore + +import "strings" + +type indexScanPatterns struct { + absolute depthPatternHolder + relative depthPatternHolder +} + +func newIndexScanPatterns() *indexScanPatterns { + return &indexScanPatterns{ + absolute: newDepthPatternHolder(asc), + relative: newDepthPatternHolder(desc), + } +} + +func (ps *indexScanPatterns) add(pattern string) { + if strings.HasPrefix(pattern, "/") { + ps.absolute.add(pattern) + } else { + ps.relative.add(pattern) + } +} + +func (ps indexScanPatterns) match(path string, isDir bool) bool { + if ps.absolute.match(path, isDir) { + return true + } + return ps.relative.match(path, isDir) +} + +type scanStrategy interface { + add(pattern string) + match(path string, isDir bool) bool +} diff --git a/vendor/github.com/monochromegane/go-gitignore/initial_holder.go b/vendor/github.com/monochromegane/go-gitignore/initial_holder.go new file mode 100644 index 000000000..86f0bfee2 --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/initial_holder.go @@ -0,0 +1,62 @@ +package gitignore + +import "strings" + +const initials = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ." + +type initialPatternHolder struct { + patterns initialPatterns + otherPatterns *patterns +} + +func newInitialPatternHolder() initialPatternHolder { + return initialPatternHolder{ + patterns: initialPatterns{m: map[byte]*patterns{}}, + otherPatterns: &patterns{}, + } +} + +func (h *initialPatternHolder) add(pattern string) { + trimedPattern := strings.TrimPrefix(pattern, "/") + if strings.IndexAny(trimedPattern[0:1], initials) != -1 { + h.patterns.set(trimedPattern[0], newPatternForEqualizedPath(pattern)) + } else { + h.otherPatterns.add(newPatternForEqualizedPath(pattern)) + } +} + +func (h initialPatternHolder) match(path string, isDir bool) bool { + if h.patterns.size() == 0 && h.otherPatterns.size() == 0 { + return false + } + if patterns, ok := h.patterns.get(path[0]); ok { + if patterns.match(path, isDir) { + return true + } + } + return h.otherPatterns.match(path, isDir) +} + +type initialPatterns struct { + m map[byte]*patterns +} + +func (p *initialPatterns) set(initial byte, pattern pattern) { + if ps, ok := p.m[initial]; ok { + ps.add(pattern) + } else { + patterns := &patterns{} + patterns.add(pattern) + p.m[initial] = patterns + + } +} + +func (p initialPatterns) get(initial byte) (*patterns, bool) { + patterns, ok := p.m[initial] + return patterns, ok +} + +func (p initialPatterns) size() int { + return len(p.m) +} diff --git a/vendor/github.com/monochromegane/go-gitignore/match.go b/vendor/github.com/monochromegane/go-gitignore/match.go new file mode 100644 index 000000000..4140a9bdc --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/match.go @@ -0,0 +1,24 @@ +package gitignore + +import "path/filepath" + +type pathMatcher interface { + match(path string) bool +} + +type simpleMatcher struct { + path string +} + +func (m simpleMatcher) match(path string) bool { + return m.path == path +} + +type filepathMatcher struct { + path string +} + +func (m filepathMatcher) match(path string) bool { + match, _ := filepath.Match(m.path, path) + return match +} diff --git a/vendor/github.com/monochromegane/go-gitignore/pattern.go b/vendor/github.com/monochromegane/go-gitignore/pattern.go new file mode 100644 index 000000000..93adbf763 --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/pattern.go @@ -0,0 +1,69 @@ +package gitignore + +import ( + "path/filepath" + "strings" +) + +var Separator = string(filepath.Separator) + +type pattern struct { + hasRootPrefix bool + hasDirSuffix bool + pathDepth int + matcher pathMatcher + onlyEqualizedPath bool +} + +func newPattern(path string) pattern { + hasRootPrefix := path[0] == '/' + hasDirSuffix := path[len(path)-1] == '/' + + var pathDepth int + if !hasRootPrefix { + pathDepth = strings.Count(path, "/") + } + + var matcher pathMatcher + matchingPath := strings.Trim(path, "/") + if hasMeta(path) { + matcher = filepathMatcher{path: matchingPath} + } else { + matcher = simpleMatcher{path: matchingPath} + } + + return pattern{ + hasRootPrefix: hasRootPrefix, + hasDirSuffix: hasDirSuffix, + pathDepth: pathDepth, + matcher: matcher, + } +} + +func newPatternForEqualizedPath(path string) pattern { + pattern := newPattern(path) + pattern.onlyEqualizedPath = true + return pattern +} + +func (p pattern) match(path string, isDir bool) bool { + + if p.hasDirSuffix && !isDir { + return false + } + + var targetPath string + if p.hasRootPrefix || p.onlyEqualizedPath { + // absolute pattern or only equalized path mode + targetPath = path + } else { + // relative pattern + targetPath = p.equalizeDepth(path) + } + return p.matcher.match(targetPath) +} + +func (p pattern) equalizeDepth(path string) string { + equalizedPath, _ := cutLastN(path, p.pathDepth+1) + return equalizedPath +} diff --git a/vendor/github.com/monochromegane/go-gitignore/patterns.go b/vendor/github.com/monochromegane/go-gitignore/patterns.go new file mode 100644 index 000000000..6770fb465 --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/patterns.go @@ -0,0 +1,22 @@ +package gitignore + +type patterns struct { + patterns []pattern +} + +func (ps *patterns) add(pattern pattern) { + ps.patterns = append(ps.patterns, pattern) +} + +func (ps *patterns) size() int { + return len(ps.patterns) +} + +func (ps patterns) match(path string, isDir bool) bool { + for _, p := range ps.patterns { + if match := p.match(path, isDir); match { + return true + } + } + return false +} diff --git a/vendor/github.com/monochromegane/go-gitignore/util.go b/vendor/github.com/monochromegane/go-gitignore/util.go new file mode 100644 index 000000000..b5ab9bbfd --- /dev/null +++ b/vendor/github.com/monochromegane/go-gitignore/util.go @@ -0,0 +1,45 @@ +package gitignore + +import ( + "os" + "strings" +) + +func cutN(path string, n int) (string, bool) { + isLast := true + + var i, count int + for i < len(path)-1 { + if os.IsPathSeparator(path[i]) { + count++ + if count >= n { + isLast = false + break + } + } + i++ + } + return path[:i+1], isLast +} + +func cutLastN(path string, n int) (string, bool) { + isLast := true + i := len(path) - 1 + + var count int + for i >= 0 { + if os.IsPathSeparator(path[i]) { + count++ + if count >= n { + isLast = false + break + } + } + i-- + } + return path[i+1:], isLast +} + +func hasMeta(path string) bool { + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE new file mode 100644 index 000000000..1c2640164 --- /dev/null +++ b/vendor/github.com/morikuni/aec/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Taihei Morikuni + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md new file mode 100644 index 000000000..3cbc4343e --- /dev/null +++ b/vendor/github.com/morikuni/aec/README.md @@ -0,0 +1,178 @@ +# aec + +[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) + +Go wrapper for ANSI escape code. + +## Install + +```bash +go get github.com/morikuni/aec +``` + +## Features + +ANSI escape codes depend on terminal environment. +Some of these features may not work. +Check supported Font-Style/Font-Color features with [checkansi](./checkansi). + +[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. + +### Cursor + +- `Up(n)` +- `Down(n)` +- `Right(n)` +- `Left(n)` +- `NextLine(n)` +- `PreviousLine(n)` +- `Column(col)` +- `Position(row, col)` +- `Save` +- `Restore` +- `Hide` +- `Show` +- `Report` + +### Erase + +- `EraseDisplay(mode)` +- `EraseLine(mode)` + +### Scroll + +- `ScrollUp(n)` +- `ScrollDown(n)` + +### Font Style + +- `Bold` +- `Faint` +- `Italic` +- `Underline` +- `BlinkSlow` +- `BlinkRapid` +- `Inverse` +- `Conceal` +- `CrossOut` +- `Frame` +- `Encircle` +- `Overline` + +### Font Color + +Foreground color. + +- `DefaultF` +- `BlackF` +- `RedF` +- `GreenF` +- `YellowF` +- `BlueF` +- `MagentaF` +- `CyanF` +- `WhiteF` +- `LightBlackF` +- `LightRedF` +- `LightGreenF` +- `LightYellowF` +- `LightBlueF` +- `LightMagentaF` +- `LightCyanF` +- `LightWhiteF` +- `Color3BitF(color)` +- `Color8BitF(color)` +- `FullColorF(r, g, b)` + +Background color. + +- `DefaultB` +- `BlackB` +- `RedB` +- `GreenB` +- `YellowB` +- `BlueB` +- `MagentaB` +- `CyanB` +- `WhiteB` +- `LightBlackB` +- `LightRedB` +- `LightGreenB` +- `LightYellowB` +- `LightBlueB` +- `LightMagentaB` +- `LightCyanB` +- `LightWhiteB` +- `Color3BitB(color)` +- `Color8BitB(color)` +- `FullColorB(r, g, b)` + +### Color Converter + +24bit RGB color to ANSI color. + +- `NewRGB3Bit(r, g, b)` +- `NewRGB8Bit(r, g, b)` + +### Builder + +To mix these features. + +```go +custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI +custom.Apply("Hello World") +``` + +## Usage + +1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` +2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` + +`aec.Reset` should be added when using font style or font color features. + +## Example + +Simple progressbar. + +![sample](./sample.gif) + +```go +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/morikuni/aec" +) + +func main() { + const n = 20 + builder := aec.EmptyBuilder + + up2 := aec.Up(2) + col := aec.Column(n + 2) + bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) + label := builder.LightRedF().Underline().With(col).Right(1).ANSI + + // for up2 + fmt.Println() + fmt.Println() + + for i := 0; i <= n; i++ { + fmt.Print(up2) + fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) + fmt.Print("[") + fmt.Print(bar.Apply(strings.Repeat("=", i))) + fmt.Println(col.Apply("]")) + time.Sleep(100 * time.Millisecond) + } +} +``` + +## License + +[MIT](./LICENSE) + + diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go new file mode 100644 index 000000000..566be6eb1 --- /dev/null +++ b/vendor/github.com/morikuni/aec/aec.go @@ -0,0 +1,137 @@ +package aec + +import "fmt" + +// EraseMode is listed in a variable EraseModes. +type EraseMode uint + +var ( + // EraseModes is a list of EraseMode. + EraseModes struct { + // All erase all. + All EraseMode + + // Head erase to head. + Head EraseMode + + // Tail erase to tail. + Tail EraseMode + } + + // Save saves the cursor position. + Save ANSI + + // Restore restores the cursor position. + Restore ANSI + + // Hide hides the cursor. + Hide ANSI + + // Show shows the cursor. + Show ANSI + + // Report reports the cursor position. + Report ANSI +) + +// Up moves up the cursor. +func Up(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dA", n)) +} + +// Down moves down the cursor. +func Down(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dB", n)) +} + +// Right moves right the cursor. +func Right(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dC", n)) +} + +// Left moves left the cursor. +func Left(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dD", n)) +} + +// NextLine moves down the cursor to head of a line. +func NextLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dE", n)) +} + +// PreviousLine moves up the cursor to head of a line. +func PreviousLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dF", n)) +} + +// Column set the cursor position to a given column. +func Column(col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dG", col)) +} + +// Position set the cursor position to a given absolute position. +func Position(row, col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) +} + +// EraseDisplay erases display by given EraseMode. +func EraseDisplay(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dJ", m)) +} + +// EraseLine erases lines by given EraseMode. +func EraseLine(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dK", m)) +} + +// ScrollUp scrolls up the page. +func ScrollUp(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dS", n)) +} + +// ScrollDown scrolls down the page. +func ScrollDown(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dT", n)) +} + +func init() { + EraseModes = struct { + All EraseMode + Head EraseMode + Tail EraseMode + }{ + Tail: 0, + Head: 1, + All: 2, + } + + Save = newAnsi(esc + "s") + Restore = newAnsi(esc + "u") + Hide = newAnsi(esc + "?25l") + Show = newAnsi(esc + "?25h") + Report = newAnsi(esc + "6n") +} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go new file mode 100644 index 000000000..e60722e6e --- /dev/null +++ b/vendor/github.com/morikuni/aec/ansi.go @@ -0,0 +1,59 @@ +package aec + +import ( + "fmt" + "strings" +) + +const esc = "\x1b[" + +// Reset resets SGR effect. +const Reset string = "\x1b[0m" + +var empty = newAnsi("") + +// ANSI represents ANSI escape code. +type ANSI interface { + fmt.Stringer + + // With adapts given ANSIs. + With(...ANSI) ANSI + + // Apply wraps given string in ANSI. + Apply(string) string +} + +type ansiImpl string + +func newAnsi(s string) *ansiImpl { + r := ansiImpl(s) + return &r +} + +func (a *ansiImpl) With(ansi ...ANSI) ANSI { + return concat(append([]ANSI{a}, ansi...)) +} + +func (a *ansiImpl) Apply(s string) string { + return a.String() + s + Reset +} + +func (a *ansiImpl) String() string { + return string(*a) +} + +// Apply wraps given string in ANSIs. +func Apply(s string, ansi ...ANSI) string { + if len(ansi) == 0 { + return s + } + return concat(ansi).Apply(s) +} + +func concat(ansi []ANSI) ANSI { + strs := make([]string, 0, len(ansi)) + for _, p := range ansi { + strs = append(strs, p.String()) + } + return newAnsi(strings.Join(strs, "")) +} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go new file mode 100644 index 000000000..13bd002d4 --- /dev/null +++ b/vendor/github.com/morikuni/aec/builder.go @@ -0,0 +1,388 @@ +package aec + +// Builder is a lightweight syntax to construct customized ANSI. +type Builder struct { + ANSI ANSI +} + +// EmptyBuilder is an initialized Builder. +var EmptyBuilder *Builder + +// NewBuilder creates a Builder from existing ANSI. +func NewBuilder(a ...ANSI) *Builder { + return &Builder{concat(a)} +} + +// With is a syntax for With. +func (builder *Builder) With(a ...ANSI) *Builder { + return NewBuilder(builder.ANSI.With(a...)) +} + +// Up is a syntax for Up. +func (builder *Builder) Up(n uint) *Builder { + return builder.With(Up(n)) +} + +// Down is a syntax for Down. +func (builder *Builder) Down(n uint) *Builder { + return builder.With(Down(n)) +} + +// Right is a syntax for Right. +func (builder *Builder) Right(n uint) *Builder { + return builder.With(Right(n)) +} + +// Left is a syntax for Left. +func (builder *Builder) Left(n uint) *Builder { + return builder.With(Left(n)) +} + +// NextLine is a syntax for NextLine. +func (builder *Builder) NextLine(n uint) *Builder { + return builder.With(NextLine(n)) +} + +// PreviousLine is a syntax for PreviousLine. +func (builder *Builder) PreviousLine(n uint) *Builder { + return builder.With(PreviousLine(n)) +} + +// Column is a syntax for Column. +func (builder *Builder) Column(col uint) *Builder { + return builder.With(Column(col)) +} + +// Position is a syntax for Position. +func (builder *Builder) Position(row, col uint) *Builder { + return builder.With(Position(row, col)) +} + +// EraseDisplay is a syntax for EraseDisplay. +func (builder *Builder) EraseDisplay(m EraseMode) *Builder { + return builder.With(EraseDisplay(m)) +} + +// EraseLine is a syntax for EraseLine. +func (builder *Builder) EraseLine(m EraseMode) *Builder { + return builder.With(EraseLine(m)) +} + +// ScrollUp is a syntax for ScrollUp. +func (builder *Builder) ScrollUp(n int) *Builder { + return builder.With(ScrollUp(n)) +} + +// ScrollDown is a syntax for ScrollDown. +func (builder *Builder) ScrollDown(n int) *Builder { + return builder.With(ScrollDown(n)) +} + +// Save is a syntax for Save. +func (builder *Builder) Save() *Builder { + return builder.With(Save) +} + +// Restore is a syntax for Restore. +func (builder *Builder) Restore() *Builder { + return builder.With(Restore) +} + +// Hide is a syntax for Hide. +func (builder *Builder) Hide() *Builder { + return builder.With(Hide) +} + +// Show is a syntax for Show. +func (builder *Builder) Show() *Builder { + return builder.With(Show) +} + +// Report is a syntax for Report. +func (builder *Builder) Report() *Builder { + return builder.With(Report) +} + +// Bold is a syntax for Bold. +func (builder *Builder) Bold() *Builder { + return builder.With(Bold) +} + +// Faint is a syntax for Faint. +func (builder *Builder) Faint() *Builder { + return builder.With(Faint) +} + +// Italic is a syntax for Italic. +func (builder *Builder) Italic() *Builder { + return builder.With(Italic) +} + +// Underline is a syntax for Underline. +func (builder *Builder) Underline() *Builder { + return builder.With(Underline) +} + +// BlinkSlow is a syntax for BlinkSlow. +func (builder *Builder) BlinkSlow() *Builder { + return builder.With(BlinkSlow) +} + +// BlinkRapid is a syntax for BlinkRapid. +func (builder *Builder) BlinkRapid() *Builder { + return builder.With(BlinkRapid) +} + +// Inverse is a syntax for Inverse. +func (builder *Builder) Inverse() *Builder { + return builder.With(Inverse) +} + +// Conceal is a syntax for Conceal. +func (builder *Builder) Conceal() *Builder { + return builder.With(Conceal) +} + +// CrossOut is a syntax for CrossOut. +func (builder *Builder) CrossOut() *Builder { + return builder.With(CrossOut) +} + +// BlackF is a syntax for BlackF. +func (builder *Builder) BlackF() *Builder { + return builder.With(BlackF) +} + +// RedF is a syntax for RedF. +func (builder *Builder) RedF() *Builder { + return builder.With(RedF) +} + +// GreenF is a syntax for GreenF. +func (builder *Builder) GreenF() *Builder { + return builder.With(GreenF) +} + +// YellowF is a syntax for YellowF. +func (builder *Builder) YellowF() *Builder { + return builder.With(YellowF) +} + +// BlueF is a syntax for BlueF. +func (builder *Builder) BlueF() *Builder { + return builder.With(BlueF) +} + +// MagentaF is a syntax for MagentaF. +func (builder *Builder) MagentaF() *Builder { + return builder.With(MagentaF) +} + +// CyanF is a syntax for CyanF. +func (builder *Builder) CyanF() *Builder { + return builder.With(CyanF) +} + +// WhiteF is a syntax for WhiteF. +func (builder *Builder) WhiteF() *Builder { + return builder.With(WhiteF) +} + +// DefaultF is a syntax for DefaultF. +func (builder *Builder) DefaultF() *Builder { + return builder.With(DefaultF) +} + +// BlackB is a syntax for BlackB. +func (builder *Builder) BlackB() *Builder { + return builder.With(BlackB) +} + +// RedB is a syntax for RedB. +func (builder *Builder) RedB() *Builder { + return builder.With(RedB) +} + +// GreenB is a syntax for GreenB. +func (builder *Builder) GreenB() *Builder { + return builder.With(GreenB) +} + +// YellowB is a syntax for YellowB. +func (builder *Builder) YellowB() *Builder { + return builder.With(YellowB) +} + +// BlueB is a syntax for BlueB. +func (builder *Builder) BlueB() *Builder { + return builder.With(BlueB) +} + +// MagentaB is a syntax for MagentaB. +func (builder *Builder) MagentaB() *Builder { + return builder.With(MagentaB) +} + +// CyanB is a syntax for CyanB. +func (builder *Builder) CyanB() *Builder { + return builder.With(CyanB) +} + +// WhiteB is a syntax for WhiteB. +func (builder *Builder) WhiteB() *Builder { + return builder.With(WhiteB) +} + +// DefaultB is a syntax for DefaultB. +func (builder *Builder) DefaultB() *Builder { + return builder.With(DefaultB) +} + +// Frame is a syntax for Frame. +func (builder *Builder) Frame() *Builder { + return builder.With(Frame) +} + +// Encircle is a syntax for Encircle. +func (builder *Builder) Encircle() *Builder { + return builder.With(Encircle) +} + +// Overline is a syntax for Overline. +func (builder *Builder) Overline() *Builder { + return builder.With(Overline) +} + +// LightBlackF is a syntax for LightBlueF. +func (builder *Builder) LightBlackF() *Builder { + return builder.With(LightBlackF) +} + +// LightRedF is a syntax for LightRedF. +func (builder *Builder) LightRedF() *Builder { + return builder.With(LightRedF) +} + +// LightGreenF is a syntax for LightGreenF. +func (builder *Builder) LightGreenF() *Builder { + return builder.With(LightGreenF) +} + +// LightYellowF is a syntax for LightYellowF. +func (builder *Builder) LightYellowF() *Builder { + return builder.With(LightYellowF) +} + +// LightBlueF is a syntax for LightBlueF. +func (builder *Builder) LightBlueF() *Builder { + return builder.With(LightBlueF) +} + +// LightMagentaF is a syntax for LightMagentaF. +func (builder *Builder) LightMagentaF() *Builder { + return builder.With(LightMagentaF) +} + +// LightCyanF is a syntax for LightCyanF. +func (builder *Builder) LightCyanF() *Builder { + return builder.With(LightCyanF) +} + +// LightWhiteF is a syntax for LightWhiteF. +func (builder *Builder) LightWhiteF() *Builder { + return builder.With(LightWhiteF) +} + +// LightBlackB is a syntax for LightBlackB. +func (builder *Builder) LightBlackB() *Builder { + return builder.With(LightBlackB) +} + +// LightRedB is a syntax for LightRedB. +func (builder *Builder) LightRedB() *Builder { + return builder.With(LightRedB) +} + +// LightGreenB is a syntax for LightGreenB. +func (builder *Builder) LightGreenB() *Builder { + return builder.With(LightGreenB) +} + +// LightYellowB is a syntax for LightYellowB. +func (builder *Builder) LightYellowB() *Builder { + return builder.With(LightYellowB) +} + +// LightBlueB is a syntax for LightBlueB. +func (builder *Builder) LightBlueB() *Builder { + return builder.With(LightBlueB) +} + +// LightMagentaB is a syntax for LightMagentaB. +func (builder *Builder) LightMagentaB() *Builder { + return builder.With(LightMagentaB) +} + +// LightCyanB is a syntax for LightCyanB. +func (builder *Builder) LightCyanB() *Builder { + return builder.With(LightCyanB) +} + +// LightWhiteB is a syntax for LightWhiteB. +func (builder *Builder) LightWhiteB() *Builder { + return builder.With(LightWhiteB) +} + +// Color3BitF is a syntax for Color3BitF. +func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { + return builder.With(Color3BitF(c)) +} + +// Color3BitB is a syntax for Color3BitB. +func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { + return builder.With(Color3BitB(c)) +} + +// Color8BitF is a syntax for Color8BitF. +func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { + return builder.With(Color8BitF(c)) +} + +// Color8BitB is a syntax for Color8BitB. +func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { + return builder.With(Color8BitB(c)) +} + +// FullColorF is a syntax for FullColorF. +func (builder *Builder) FullColorF(r, g, b uint8) *Builder { + return builder.With(FullColorF(r, g, b)) +} + +// FullColorB is a syntax for FullColorB. +func (builder *Builder) FullColorB(r, g, b uint8) *Builder { + return builder.With(FullColorB(r, g, b)) +} + +// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. +func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { + return builder.Color3BitF(NewRGB3Bit(r, g, b)) +} + +// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. +func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { + return builder.Color3BitB(NewRGB3Bit(r, g, b)) +} + +// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. +func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { + return builder.Color8BitF(NewRGB8Bit(r, g, b)) +} + +// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. +func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { + return builder.Color8BitB(NewRGB8Bit(r, g, b)) +} + +func init() { + EmptyBuilder = &Builder{empty} +} diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif new file mode 100644 index 0000000000000000000000000000000000000000..c6c613bb70645efa4e184726060ea1ff981eeceb GIT binary patch literal 12548 zcmeHtXHe7ox^?KiNed7{8X;7XCP*=%29P2G0wRPS6cCY(5D}@N_aaD@UX-rVRGLyn ziUk#vP5`li6zlsB?&3b@?0xn-bLW1#I^&ms!?1qqS><^Q4E0qHIxs9l8jiG4D5=G zkc%wJQx+903onrs@|6>>krR&H55KY>eoh`;asZm7fND|@8CDR!sE7_%MAs^cwkwL% zD8UkxCDWAAxhk0ZDx%L+gi8*hnhqkK92Ch^mCRSgl&MNwQ$;^kh1^y{4XcSwtBI7U zOIE2%HL9a~)WsLnMc33J&6<)AG*RQ4$Z0M3oEDOz1$(b0YH(Pw?XYC`VaYpKsV=P4 z7FJXVhd0D24dNsp;UpHd#UJTNZt5VXbS0+rBajp4Phz6_~W&Tz}DWZtdlR_FHdX zbw;p>nRc|ipYKnXb{y$w-B=ziF)lQ{-S%;9vg!1zk=yOtZ)bYq9mUM%UMfA9n94_HLw{y+a(h(q-H)YoF^N>j>XMNzW=%^r(B zo(sGdWe;Xm(&m&J4NpP=hLMdqme=}W3IoesvgnH&z-y5>+3elk1COcaV|B0R4H$9Y zV*2>z@q}O<@LD9v%r8`##56I&r^e>X2z1>hQ07gn4Oe)0x)57)f?6Z+T3DWpO+>(w z>MQt9F%RMD^U($GTVz$+w>n>xyGT1aY;RqzrjI&k+M=7B6Ttxi%Q#QpELdhJQ~WMw zrd9O}%uo?|yP&=kswoRaNCawq{q$jH<8$z(&I>QTF$-N~5~0(e?+vAYzwBrOT3w!r zIIv#MgcGuVLgREB?jH?3m{G>GkIoQ(H43Z^ycVgIdj(kkQGrfjQfKIB7>WB`iZeTV zDFP?^gs~(6!zhWus|6oSv9)GywvA@u-yUUr%Yidbb%azIq|wNzEo9j{_eP}B_#TW- zr16?9#`Q1WA7vD=qshlQS__p!U07Yq&z@YW^3YK)59q?W-}e|55j$k54LvS^H!pPh z>M0R?L0_s0XR*@INk(%=>L+B7Gm$#t_D_0q>8waq&>YTY$&~b)Z5}jE4d2Dfyh!xc zqvg6veBEv^D18mIao_Qpn%-4O0~-fstT}^Is@kX{Nw;&9USun7v>{JYox4d{U~N5X zYUTY3c1q1@)ZJD|mncx`v+Rjm43R#Q!eOs+67*;_)hVXq0E*zp`ncvfcK40?+b&LW zuF##nVk`4s{20GLStcc=Qo3nPM&-L9hw*yXoa~pPnrK@qnC`Jz-I{b}<=Z?^4?m~x z*QbxFck?DmxZQ}6BRz~$@PFN3$guW!E!ZHi<@ zMhcql2!%1dB+rs1iuusFx^h7aQ7BGTc)D4^zJY zH=l4Jr6WOWbx-BYUssFVYg@lLuciL>#^*bq(9Lv*+upVDp6OWY7rgQLL)JmFM;oI| za$n6K9sj=lQQNKV;;Sj9#jo2JqPZUJEG&C}Gow^o+Ww?<<88?FyKU6>FXx^-9Zx!hv)_14rhhu5DCU4NsxrVV%*@?Lx>=@Sb;o*5Cyd;CimDp(W!r0-= z{HeGa4(&)phc3(U{%(P`cTw^QPHYUg9^vQj$l)nhIoZN{pig|GvFxE-DHp-LqMQxU zdXHv#&SvWh$-O6=B+T-9gZ03s@1w2!XZby|^<;e-;uXIjz{gweL6RCG-87KG_bT-n z%Nr7XT#=B^+xOMRk0u59JBn^p8fbiYf8oLxBWTrb%qQ{ji!nMIg+vEJKl+S9s284(QkX=Nod_q1+A_ULu0)}7Y7rh6OE zx-99*MnLOiNdZ)?GrXBd)wr#t6AL46eeUF%4*DhZ9J z9~ShtozIU{i47IouQjo~A^$?v?%}sF^af>qXmi8i&ezMEv#(y6DIEU(9kI(a6DrCA zK8ngm0b0jmp*?evQJarN0?;~mRGJ3SF;X}H!xIf9Pjbf~vSPSn(dDY#acnf>{qgem zcbs8T7JbFq%nFq*;>xDyU9`zZ5DO`~tU>tX@vkxF*grJyFVR8(Xqo;4v=}id*gu?! zVFE*B?^uste|Y+D&LmRkb$wn>T3K}No-^5Wr^}}bLD4a;@ZvpZ@{urtW)PVfQ!K5f zI+HZ7nN9+arh6~dnaJuwWTpXUa$JU6(`yw#OArIr#D8Li9@!~j%VK8*oJrs;S<_y` zxS%7jM^w|A)f{jpKCv3vO#V<4J1-V3(iNiIgEr+y+?90g1tu?+B8M7Juf06^A+bQF zCWuWeX^eTOqBmVSdJ?1%8^NKwzfkL<;ls73{N}F;g4wp-K5vb`^Jixg|7yPPqFG_> zmz|IQ!I{K8Vnnj5^b-&)`pmrWTuY);R1UrpZs@H#HyfSol{QO8U0H~Z5ieIAk408f ziuGmgV37I;Mu_ve%KChU5*k|+WOBw-AR&ocuX`XBj}9tDX$#|B)6Ef^t{Ik`N^Y5! z4zc1Hc5?K1B!Mi6&JOe1b-c{FzB7L%GkZns3X71!K%V~-wPm=^7L$bb&&EdqfK=-r zK>CN-+|0f@2+<_n_Xq!GHuyb)Svx&Sj+U4ez-T|0R&Z60JLHY<@Y10#( z(aCa{W$u#z0iUwq?()?GX7gB^gho8{_OZBvyQRl!pq{#1sgLVj&d{H;1I#9CJORT@ z>qeaF467MEcE1NJcpmuCg*#fkAS!PouewZxZ>%*OjBH&YDQvy1=6&69eOB~mv&qmp zbeHAsx2L;1>;G;xKRo>%(jA`PA)VF#3DST)NGr#vkRtQ|NK149NL_S(h7|lKNNK5% zvI3Brm>Mo7T05w@WKqd`nLyrkgW0Y2m1(Pgt8^E4dw$-XO5Wu>|G^-9?#J7)|A@TW zASRF;&4K?iYWb`vV>+Ne$U8s|PzliZ73ntD#?+GGAF)C~*Cbbi8Y}#XqMF8*0@PS> zELrKyJM&Leq^&QKSmgOJPXO}1)F6dQTQvZq_G7kxbZ4#fQ-HjJSD%FWuB}c>Z)4~> zJa$>3Uz%mRIwE`+bQ7dfUQ~baFI^(6nE>UYoC@(yu*V- zs5xNN9t)Ojzv&&-d}i%s{)Jm-p54Cm=UDM}Wc1*nLI>qT{;NO7iqt*wE_j2h9P6~k zcDG?H-dD^ng`jtK_TN0)d-r>Z$@Zc|kwFMZbtX)J9}Ej;*DfVQa39(vMDn4PoODzD zi-A~i3U3(o;w1A?R9*&P6vd5bivtj~%+B$07R)XQ+#`5ogbJ09d*lt9qSz;~x=qfe zW->p6q*D2qZmz$(05?+TL;Z=5763}n|AtbW#U^p8CAI(FAK}kVwR$t~*YJmG)i;Z0 zP(3Gym;Vv|$iXGi5u?=bXL727GENPDwIS)?!;*(2XNbHzy-=WazjyZ(+ zMc2JwE=g2+rOL#(xmLJ;xbpo~&=xiPHT)Q{RaVu@F?YU>e|$InZ{aWeF(Z;*08c>B zpj81DDp3d#IT*~Wj<>4wY*ez>zS&5K{z5caluQ|o7KtMwv{40>#W=}YiZe)-I#`vS z5Rr))9Ylu&>R?UAGVYe34rTSZq{5-GsAwZONvCvEH1nc%sFL$yrj4BtK4ZqDYBb>> z*_P$g%6x10Qx19fF-0DSf%As0*&-R{U{i2$fra;)Q zB-Hi-F_jghV~+O!$o6a#lpLfsfll6m8MH^jBsKFxFdI>x42MxD<>|L?2G+CW>0GX=472}$wC?^CsDP76MjH4FauEC~M zCuJY7B7Ql|p2acCIRx*XBU%uN&i=CCv6 zYK$-mUo^}S_wyX~1Cag(`iIWeAX#lQ24a}@pzlYElLq5Xlm8g(x!2+x6tSNi*a&A& zmGz~zIK=x11QU3##W_SGsRAv|su4dz$Odq>(t5I_yg;DEk?Eoh-b|1my6J6IO3G-< zH$IQCtomhR^|q8n*UP)1YNliJ;y{bTw@rx+YiV$af`0@D9%=OhDLKiW*0-mjEIkZ$ zT*ZOf{9hi-F{$x+VQV(}xPG8pllxCCjY|siKb-9s_5P!sv`DE>ORb4 zqh<3w5KE|_7Hta6CCj?Fa`_A(e0(f7WMFtqSvgAeKKv$r7>t1#TRk;=fG37R!{A4R zZH#aEJZ>R1@;&JEkDk68nmi|8wQM*5595^%X^pIh?q?G)E_i*G0uiw~GSYwrW^FV9 z%FEpnwvyLc2za0D2uv5_k=SbmUt6!e3cvo5uk68v1BK;`IQw-IjdyP=8KTy@>^jt@ z8y#8sT%9dKH`Y9v*1GoD63jGjHN^LC8%>vH=b&>F9N+RAK>E&&G!{C3>`o98qx|lD z@VPGZ0+^XFKK{xgL*{=i+9nAi3H}tP=vX2i4$sXXI>+Y3s5*%xHB;u0xuul|a!9W# zGEwCv(J4`36%S9=vSuy?gw%N<6)$XPn9|GvkTGwMj7HrHUZxJage*Iiez(g^AuR5p z&I*8#63eU5SHcojY;)N7a~AS4Tb>E!u>wL`5WLm83=gm%d;CdA(*ST^{yn%ej7YaPB-oQ{i%6 z?(^}R-u;@XQ!hQuJ)^LB{w18&gqFmQm;4;nJhG+mEvO zV%KiLHzwM5^99_8>wSN3QD0T=eBN1GJ05fQ@W9u9ZBZM6z{0K1?1bQWV$hEUyT>@8 z`NyAIIpNa-3@}Ug_-CQ*;-sPv%u-zePgH8! z7raf1QFotf`WsD_#UKmQm6Mt5z%0$O4`_9RS&u|5YO8)Ax{EsvDwhMZbZ#lvw=D>N z1Bi=8@$Wn>qKjVOH;`ds0-fNaQ3Xf_?j-*fh0?vMoG&{p9*SVo z=^QIr9$|?gZ0a7c!QvqWB(e;?GQHwq4mFV4`D*i*s`9pBttcw`E7XDS>r-ZtMeY=I2XRW?uo!|+6$Y_-vjX??a+1lR>g{uJGTwin0v&C zN&Z^eNYW&GAT#PPZg=y&YvBcY0WeQ6V?MR=eD@e$>5X1fQ!M_3^wt%r63Nq?7#YTtC&V{MwJ?dT7+o6ija;!0|pOs$%dxN-md_@nXHh zg5w3qv6}&Q9m?Y#BdoLYxP0_>GDq{7yPIhCiF%sQwaa(zaawVoJuUYL8;F7&#B`+L zWHP#8`#Vl(Thmv##tTnoC=^d@jV79-bc5b5Q-yO`ynu)8FDV-_es z4>!gx$ghirX zqE6#O8oQX)`w?}ni<>QGW}53y9JwxRb<&RaZ_h-3zX*HY)2mtCL01Y@p~HN@1~WLsd0W zw_6NaIlBt8rUmlns4=t{A|o$Ayc+b5I9`T}2SoSrj6gYSo&I&8@iV>7(M1w9Rwwr{_QZQck(H&=D+e#q&4>oZ?@`?Eprcn#AViEqK5w%&fO-S2q# z%(rhJ8HK2tla=gcm4(?dmC6+?~fhi z0cUC*i;;n$k_ysw1pN1U*(BcTJoX&S>^wI;GSd`cedib|pI&ky1!vLi zVyyKE(9t6*E2#7|$bdzf#VG@~%!!k+Wqx)(lJ=Kn03FSC%~ErRIh>+jGQ#d#aToV= zZ+15TGRbm&;PW7S9{9n)(Vw{YmvD~*aM%6~+`q1z_q)#&N5^!tTP;z>6w7t)Q^QK6 z>ea#iB$jg>d9ki#04eut<*@L=g*6Qz^u&7Cg`_;7bx_7Iu_Z9rkq}?Zx%>NRjGIn9 zXw|FkN#(g2Cz{((V-OR(87V$=b6uy2`(y%Y=4S15gs-T3J>6!l|bz8u* zN9>6+Hy?-~yrKy1+fcdp=R*--)^PB^m6i?iT`P*@yi-xUI%$}2aYS?W3}pY?$op3} z#av`9GC&B)_a@Lw;L_z}k zoe)Bt0Or(KiiU4JAtPmO0+?gU`}4)*2UT4XRSp@rB&c!Ta!Ky}nhjvi=edxoXF*%4 zt33x`ZmusWo{=UabTr+%jqH|H;XP31;SeHen=J`oo71W2p92b|0?=R7ejJLvV>%00Sv5(_*k7*+BrLn|@2`p`2;-!IKho9cG7xeie*kHuz|OO=Gy;Si&MAby6VU&eOXVB!OHAajo_;w4zX5di zkUBZksyOIr0%4-s3S&JxLFe9Ho3DOLw!2cDzChXNR^9P-#sSW$wOj9Al`|G9fMuT6 z&zFR<%!%W78rj`u=23^ICG{!QcYXHpP{vE%0J} z8?{?+$?aTQ-%KD*z7aj~(vER~P9GKrlLdo0Z?}a0R{NdrtLcf)6{}lT5Bxys%l*?e z!InzGhqZ@={aQZSx%3!9XwJ^Iok$X}VL|5d5n*RTly+c|`%Eb(B0241M99KR_njTtPhefJN|+>WJU8 zviQK&tOU$KG61Cbkc1>}?bw8K*n?cQeJwzB?QH)@V2+3*5Q{l}VTO^iJV$l9+JA=db5wOxd;!kv4QS8HFDoo5Sg`B4$d zk#>dxJqY0(S+6lRu!i*e*~hiqW9Ey)hKtmF95A!aANe-%8{;N-!j!P?XwBI~d z$-)Faa<<``_lp`{f1DF{0#xyCdYy`CI_!vMZ#rF{cYWv4mg7ZJaG9^SUW4>+y42^< zm0a+n;n*t+LwA2Y3klKA0M0@TJYT&As-xAhSARMS**Pkbpo?*ej8&Zk!*g9Loya*h z{Wc;=kr*JJ0=bnKk`aiAld8?OjYGFhf)f;+^^sOU&pV%_v5JpPIHKALQBI4 z3i;22Rwo*${Q`fz?Ez>Rc{Np?r0xDblMDsv1FlbYx%_3mh4&p;u z*KH#;HdQ^1&%b^VVX+Bg=|m3atKt%#hz5VC?NTNs6^-pq(9ZS=atH=*9OKI3VzcSp zZ53$rAg8fdJei0f(5aUmuCWG_V2d)0^H*$5UJy=)Wf%({sp-S6zoPMBdOFctL{4E1 zeJbFin=CjF7s6D?PU+!a(O+NVk-vE39Vl>9VC2io{5RVO_<44|@Q#jCAMAl5u+f%_ zWBYA8!oK*NDbtslvwh|?GQ;n1G@##f)g?g>a6bu}F?`h9p6IBbZ+j@VA_=9H&T+BX zlLGMEUW`jZPD{VFND`1rk-3kENT-m^9b}3&r|t0+E#YSZX-4DsN^JlpFJ@R88OFw# zodO{K(f<)_UDI9N49jiY3v#TkoI>A2TZAg@ty_d*vJKt&-7f^_)XWSA*VgA(mkea)AWycwY#2v*f5OUJSUN>8Y~HHb1Rk*_GP$xDMW=rLjtX z%{JCqL|~@Gqxab@&Itv}>S)QSc9uoCJUjk6#|RRy-{X>*y4kQW9-tarte@{?0;<8; z9r=ZDk&oa?uN?)P5^2!y+nOyQs-VQsz@*ZGEBsN2F;gI~aKP)wOlE8EJwN++wlR_B z`C-4_b=?|l1e`A^%Q(_hv~BZ`yRN|(@&VLF5b+Mn;%)aZ(4PJJW%Yoqcu; z^rD@48c|v!5kJaqa(+hC-h96{1XJh_(QawL_Ap(bA7V5H%!Uj@TZ!~tIV zl0G(?Fp(=G4QyAg6T{RrSVbp`Rwkb~Ix=CUTL`aY67t zqYJA=)-TgC}e>hXU zhfQ(#9nGa5Cw3zq6VwyCA}9-b6R6>}!9Sna@!#m`k3l76Gshr#8VBNJYO`755J22a zP||7~h?gH(=u1>v?RQB&upWa*fkNH_TUU;iR#ar-!)TKKFi9@Yae%Vf2EU%}>z&CH)it%uW9T z=6K%-Ix=upDf6QT`q2X>#=>;=<(Ws#--~2;>6_Pd)GUhz;i4VvoQBkoJaH(5J0>Ez z0Im>${t;j#xX3g8)Iw0ZWbVi*^Ir==_zmfPzi^LaJ@KBLY|<;Y&gM1rnZ5yE8ta)o-#<<3w9mD;?+DF2 zES0URSfg`al4y|umsWkG;Q?p6@%qs_n%8h_zNgZj8A3~6M^T%>(K~yQrjPHz-Mg2@ zcE3u@gq~gT`2DOB3=G#F`q=_(SbpedrTOprc~$pU{bW80=w}Oef`WqXyytKFX?Xl# zfWXiC$-*$OfHT#yHP5i%)Gf3BNk8?o_Vjbf>ks{`_<6W$10ZVsD~JpN_>}sYZU98Y zUJ8xZ5kRriEYIch?$gm8CM?h8#S3ae{KNJwa;D0&sbHQhgkibFKf*BJxw#>b+71QB z(}Wp7E4-%&S=1*LKq&`R$sKb&;heeHSJ6sPCX&t=kao zSPyvys>#$=JS=jn+73mhY>&#u5u?)}p9I{hqPwtJz6|3*!h}R9IL^tZWyqhvSx5KS zj;B)ZVd60S;MAB4!)q+Gh>n=AgPjo=tb}Lv_mHr-ej3thtNgow_$dx!X79NxMRD-U~8KZm@LAKgE$Ll1{C#o(5BI7i+Vv2*c7*|S?05=O%_UI~9s<-^y0f%V4+@&lKGB*Zz!stBk& zdMQd#n9+E}6+hMmkK*{jXO!Gz2PQl+!}^qDSr}xs{8GT`il_LMZel;GH0zX9Rdi&W Y(?6j)4M_L*quqaULH)mVw5$IA0FB)@mjD0& literal 0 HcmV?d00001 diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go new file mode 100644 index 000000000..0ba3464e6 --- /dev/null +++ b/vendor/github.com/morikuni/aec/sgr.go @@ -0,0 +1,202 @@ +package aec + +import ( + "fmt" +) + +// RGB3Bit is a 3bit RGB color. +type RGB3Bit uint8 + +// RGB8Bit is a 8bit RGB color. +type RGB8Bit uint8 + +func newSGR(n uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", n)) +} + +// NewRGB3Bit create a RGB3Bit from given RGB. +func NewRGB3Bit(r, g, b uint8) RGB3Bit { + return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) +} + +// NewRGB8Bit create a RGB8Bit from given RGB. +func NewRGB8Bit(r, g, b uint8) RGB8Bit { + return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) +} + +// Color3BitF set the foreground color of text. +func Color3BitF(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) +} + +// Color3BitB set the background color of text. +func Color3BitB(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) +} + +// Color8BitF set the foreground color of text. +func Color8BitF(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) +} + +// Color8BitB set the background color of text. +func Color8BitB(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) +} + +// FullColorF set the foreground color of text. +func FullColorF(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) +} + +// FullColorB set the foreground color of text. +func FullColorB(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) +} + +// Style +var ( + // Bold set the text style to bold or increased intensity. + Bold ANSI + + // Faint set the text style to faint. + Faint ANSI + + // Italic set the text style to italic. + Italic ANSI + + // Underline set the text style to underline. + Underline ANSI + + // BlinkSlow set the text style to slow blink. + BlinkSlow ANSI + + // BlinkRapid set the text style to rapid blink. + BlinkRapid ANSI + + // Inverse swap the foreground color and background color. + Inverse ANSI + + // Conceal set the text style to conceal. + Conceal ANSI + + // CrossOut set the text style to crossed out. + CrossOut ANSI + + // Frame set the text style to framed. + Frame ANSI + + // Encircle set the text style to encircled. + Encircle ANSI + + // Overline set the text style to overlined. + Overline ANSI +) + +// Foreground color of text. +var ( + // DefaultF is the default color of foreground. + DefaultF ANSI + + // Normal color + BlackF ANSI + RedF ANSI + GreenF ANSI + YellowF ANSI + BlueF ANSI + MagentaF ANSI + CyanF ANSI + WhiteF ANSI + + // Light color + LightBlackF ANSI + LightRedF ANSI + LightGreenF ANSI + LightYellowF ANSI + LightBlueF ANSI + LightMagentaF ANSI + LightCyanF ANSI + LightWhiteF ANSI +) + +// Background color of text. +var ( + // DefaultB is the default color of background. + DefaultB ANSI + + // Normal color + BlackB ANSI + RedB ANSI + GreenB ANSI + YellowB ANSI + BlueB ANSI + MagentaB ANSI + CyanB ANSI + WhiteB ANSI + + // Light color + LightBlackB ANSI + LightRedB ANSI + LightGreenB ANSI + LightYellowB ANSI + LightBlueB ANSI + LightMagentaB ANSI + LightCyanB ANSI + LightWhiteB ANSI +) + +func init() { + Bold = newSGR(1) + Faint = newSGR(2) + Italic = newSGR(3) + Underline = newSGR(4) + BlinkSlow = newSGR(5) + BlinkRapid = newSGR(6) + Inverse = newSGR(7) + Conceal = newSGR(8) + CrossOut = newSGR(9) + + BlackF = newSGR(30) + RedF = newSGR(31) + GreenF = newSGR(32) + YellowF = newSGR(33) + BlueF = newSGR(34) + MagentaF = newSGR(35) + CyanF = newSGR(36) + WhiteF = newSGR(37) + + DefaultF = newSGR(39) + + BlackB = newSGR(40) + RedB = newSGR(41) + GreenB = newSGR(42) + YellowB = newSGR(43) + BlueB = newSGR(44) + MagentaB = newSGR(45) + CyanB = newSGR(46) + WhiteB = newSGR(47) + + DefaultB = newSGR(49) + + Frame = newSGR(51) + Encircle = newSGR(52) + Overline = newSGR(53) + + LightBlackF = newSGR(90) + LightRedF = newSGR(91) + LightGreenF = newSGR(92) + LightYellowF = newSGR(93) + LightBlueF = newSGR(94) + LightMagentaF = newSGR(95) + LightCyanF = newSGR(96) + LightWhiteF = newSGR(97) + + LightBlackB = newSGR(100) + LightRedB = newSGR(101) + LightGreenB = newSGR(102) + LightYellowB = newSGR(103) + LightBlueB = newSGR(104) + LightMagentaB = newSGR(105) + LightCyanB = newSGR(106) + LightWhiteB = newSGR(107) +} diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap new file mode 100644 index 000000000..eaf8b2f9e --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.mailmap @@ -0,0 +1,4 @@ +Aaron Lehmann +Derek McGowan +Stephen J Day +Haibing Zhou diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml new file mode 100644 index 000000000..b6165f83c --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml @@ -0,0 +1,28 @@ +version: 2 + +requirements: + signed_off_by: + required: true + +always_pending: + title_regex: '^WIP' + explanation: 'Work in progress...' + +group_defaults: + required: 2 + approve_by_comment: + enabled: true + approve_regex: '^LGTM' + reject_regex: '^Rejected' + reset_on_push: + enabled: true + author_approval: + ignored: true + conditions: + branches: + - master + +groups: + go-digest: + teams: + - go-digest-maintainers diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml new file mode 100644 index 000000000..5775f885c --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.12.x + - 1.13.x + - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md new file mode 100644 index 000000000..e4d962ac1 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md @@ -0,0 +1,72 @@ +# Contributing to Docker open source projects + +Want to hack on this project? Awesome! Here are instructions to get you started. + +This project is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE new file mode 100644 index 000000000..3ac8ab648 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2019, 2020 OCI Contributors + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS new file mode 100644 index 000000000..843b1b206 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS @@ -0,0 +1,5 @@ +Derek McGowan (@dmcgowan) +Stephen Day (@stevvooe) +Vincent Batts (@vbatts) +Akihiro Suda (@AkihiroSuda) +Sebastiaan van Stijn (@thaJeztah) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md new file mode 100644 index 000000000..a11287207 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -0,0 +1,96 @@ +# go-digest + +[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) + +Common digest package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. + +# What is a digest? + +A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function). + +The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems: + +```go +id := digest.FromBytes([]byte("my content")) +``` + +In the example above, the id can be used to uniquely identify the byte slice "my content". +This allows two disparate applications to agree on a verifiable identifier without having to trust one another. + +An identifying digest can be verified, as follows: + +```go +if id != digest.FromBytes([]byte("my content")) { + return errors.New("the content has changed!") +} +``` + +A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense: + +```go +rd := getContent() +verifier := id.Verifier() +io.Copy(verifier, rd) + +if !verifier.Verified() { + return errors.New("the content has changed!") +} +``` + +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system. + +# Usage + +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. + +1. Make sure to import the hash implementations into your application or the package will panic. + You should have something like the following in the main (or other entrypoint) of your application: + + ```go + import ( + _ "crypto/sha256" + _ "crypto/sha512" + ) + ``` + This may seem inconvenient but it allows you replace the hash + implementations with others, such as https://github.com/stevvooe/resumable. + +2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input. + While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application. + +3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests. + +# Stability + +The Go API, at this stage, is considered stable, unless otherwise noted. + +As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). + +# Contributing + +This package is considered fairly complete. +It has been in production in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. +If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR. + +## Code of Conduct + +Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct]. + +## Security + +If you find an issue, please follow the [security][security] protocol to report it. + +# Copyright and license + +Copyright © 2019, 2020 OCI Contributors +Copyright © 2016 Docker, Inc. +All rights reserved, except as follows. +Code is released under the [Apache 2.0 license](LICENSE). +This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). +You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. + +[security]: https://github.com/opencontainers/org/blob/master/security +[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go new file mode 100644 index 000000000..490951dc3 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -0,0 +1,193 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" + "regexp" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, Digester and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + if !a.Available() { + return ErrDigestUnsupported + } + + return nil +} + +// Digester returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling Digester. +func (a Algorithm) Digester() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // Empty algorithm string is invalid + if a == "" { + panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) + } + + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.Digester() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.Digester() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// FromString digests the string input and returns a Digest. +func (a Algorithm) FromString(s string) Digest { + return a.FromBytes([]byte(s)) +} + +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat +} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go new file mode 100644 index 000000000..518b5e715 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -0,0 +1,157 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return NewDigestFromEncoded(alg, alg.Encode(p)) +} + +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. +func NewDigestFromHex(alg, hex string) Digest { + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// Parse parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func Parse(s string) (Digest, error) { + d := Digest(s) + return d, d.Validate() +} + +// FromReader consumes the content of rd until io.EOF, returning canonical digest. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// FromString digests the input and returns a Digest. +func FromString(s string) Digest { + return Canonical.FromString(s) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + i := strings.Index(s, ":") + if i <= 0 || i+1 == len(s) { + return ErrDigestInvalidFormat + } + algorithm, encoded := Algorithm(s[:i]), s[i+1:] + if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + return ErrDigestUnsupported + } + return algorithm.Validate(encoded) +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Verifier returns a writer object that can be used to verify a stream of +// content against the digest. If the digest is invalid, the method will panic. +func (d Digest) Verifier() Verifier { + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + } +} + +// Encoded returns the encoded portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Encoded() string { + return string(d[d.sepIndex()+1:]) +} + +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic(fmt.Sprintf("no ':' separator in digest %q", d)) + } + + return i +} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go new file mode 100644 index 000000000..ede907757 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digester.go @@ -0,0 +1,40 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import "hash" + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go new file mode 100644 index 000000000..83d3a936c --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/doc.go @@ -0,0 +1,62 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// The "algorithm" portion defines both the hashing algorithm used to calculate +// the digest and the encoding of the resulting digest, which defaults to "hex" +// if not otherwise specified. Currently, all supported algorithms have their +// digests encoded in hex strings. +// +// In the example above, the string "sha256" is the algorithm and the hex bytes +// are the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go new file mode 100644 index 000000000..afef506f4 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -0,0 +1,46 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 000000000..9fdc20fdb --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 000000000..581cf7cdf --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,62 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 000000000..36b0aeb8f --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,111 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` + + // ArgsEscaped + // + // Deprecated: This field is present only for legacy compatibility with + // Docker and should not be used by new image builders. It is used by Docker + // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, + // contains only a single element array, that is a pre-escaped, and combined + // into a single string `CommandLine`. If `true` the value in `Entrypoint` or + // `Cmd` should be used as-is to avoid double escaping. + // https://github.com/opencontainers/image-spec/pull/892 + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + Platform + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 000000000..1881b1181 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,80 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` + + // ArtifactType is the IANA media type of this artifact. + ArtifactType string `json:"artifactType,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64le`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} + +// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. +var DescriptorEmptyJSON = Descriptor{ + MediaType: MediaTypeEmptyJSON, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 000000000..e2bed9d4e --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 000000000..c5503cb30 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name containing ImageLayout in an OCI Image Layout + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" + // ImageIndexFile is the file name of the entry point for references and descriptors in an OCI Image Layout + ImageIndexFile = "index.json" + // ImageBlobsDir is the directory name containing content addressable blobs in an OCI Image Layout + ImageBlobsDir = "blobs" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 000000000..26fec52a6 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,41 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 000000000..892ba3de9 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value `{}` + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 000000000..11e09b584 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 1 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-rc.5" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 000000000..58a1510f3 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE new file mode 100644 index 000000000..41ce7f16e --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2012 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md new file mode 100644 index 000000000..3474739ed --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/README.md @@ -0,0 +1,141 @@ +# What is diskv? + +Diskv (disk-vee) is a simple, persistent key-value store written in the Go +language. It starts with an incredibly simple API for storing arbitrary data on +a filesystem by key, and builds several layers of performance-enhancing +abstraction on top. The end result is a conceptually simple, but highly +performant, disk-backed storage system. + +[![Build Status][1]][2] + +[1]: https://drone.io/github.com/peterbourgon/diskv/status.png +[2]: https://drone.io/github.com/peterbourgon/diskv/latest + + +# Installing + +Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. +Then, + +```bash +$ go get github.com/peterbourgon/diskv +``` + +[3]: http://golang.org +[4]: http://golang.org/doc/install/source +[5]: http://golang.org/doc/install + + +# Usage + +```go +package main + +import ( + "fmt" + "github.com/peterbourgon/diskv" +) + +func main() { + // Simplest transform function: put all the data files into the base dir. + flatTransform := func(s string) []string { return []string{} } + + // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. + d := diskv.New(diskv.Options{ + BasePath: "my-data-dir", + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + }) + + // Write three bytes to the key "alpha". + key := "alpha" + d.Write(key, []byte{'1', '2', '3'}) + + // Read the value back out of the store. + value, _ := d.Read(key) + fmt.Printf("%v\n", value) + + // Erase the key+value from the store (and the disk). + d.Erase(key) +} +``` + +More complex examples can be found in the "examples" subdirectory. + + +# Theory + +## Basic idea + +At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). +The data is written to a single file on disk, with the same name as the key. +The key determines where that file will be stored, via a user-provided +`TransformFunc`, which takes a key and returns a slice (`[]string`) +corresponding to a path list where the key file will be stored. The simplest +TransformFunc, + +```go +func SimpleTransform (key string) []string { + return []string{} +} +``` + +will place all keys in the same, base directory. The design is inspired by +[Redis diskstore][6]; a TransformFunc which emulates the default diskstore +behavior is available in the content-addressable-storage example. + +[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 + +**Note** that your TransformFunc should ensure that one valid key doesn't +transform to a subset of another valid key. That is, it shouldn't be possible +to construct valid keys that resolve to directory names. As a concrete example, +if your TransformFunc splits on every 3 characters, then + +```go +d.Write("abcabc", val) // OK: written to /abc/abc/abcabc +d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory +``` + +This will be addressed in an upcoming version of diskv. + +Probably the most important design principle behind diskv is that your data is +always flatly available on the disk. diskv will never do anything that would +prevent you from accessing, copying, backing up, or otherwise interacting with +your data via common UNIX commandline tools. + +## Adding a cache + +An in-memory caching layer is provided by combining the BasicStore +functionality with a simple map structure, and keeping it up-to-date as +appropriate. Since the map structure in Go is not threadsafe, it's combined +with a RWMutex to provide safe concurrent access. + +## Adding order + +diskv is a key-value store and therefore inherently unordered. An ordering +system can be injected into the store by passing something which satisfies the +diskv.Index interface. (A default implementation, using Google's +[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a +user-provided Less function) index of the keys, which can be queried. + +[7]: https://github.com/google/btree + +## Adding compression + +Something which implements the diskv.Compression interface may be passed +during store creation, so that all Writes and Reads are filtered through +a compression/decompression pipeline. Several default implementations, +using stdlib compression algorithms, are provided. Note that data is cached +compressed; the cost of decompression is borne with each Read. + +## Streaming + +diskv also now provides ReadStream and WriteStream methods, to allow very large +data to be handled efficiently. + + +# Future plans + + * Needs plenty of robust testing: huge datasets, etc... + * More thorough benchmarking + * Your suggestions for use-cases I haven't thought of diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go new file mode 100644 index 000000000..5192b0273 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/compression.go @@ -0,0 +1,64 @@ +package diskv + +import ( + "compress/flate" + "compress/gzip" + "compress/zlib" + "io" +) + +// Compression is an interface that Diskv uses to implement compression of +// data. Writer takes a destination io.Writer and returns a WriteCloser that +// compresses all data written through it. Reader takes a source io.Reader and +// returns a ReadCloser that decompresses all data read through it. You may +// define these methods on your own type, or use one of the NewCompression +// helpers. +type Compression interface { + Writer(dst io.Writer) (io.WriteCloser, error) + Reader(src io.Reader) (io.ReadCloser, error) +} + +// NewGzipCompression returns a Gzip-based Compression. +func NewGzipCompression() Compression { + return NewGzipCompressionLevel(flate.DefaultCompression) +} + +// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. +func NewGzipCompressionLevel(level int) Compression { + return &genericCompression{ + wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, + rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, + } +} + +// NewZlibCompression returns a Zlib-based Compression. +func NewZlibCompression() Compression { + return NewZlibCompressionLevel(flate.DefaultCompression) +} + +// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. +func NewZlibCompressionLevel(level int) Compression { + return NewZlibCompressionLevelDict(level, nil) +} + +// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given +// level, based on the given dictionary. +func NewZlibCompressionLevelDict(level int, dict []byte) Compression { + return &genericCompression{ + func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, + func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, + } +} + +type genericCompression struct { + wf func(w io.Writer) (io.WriteCloser, error) + rf func(r io.Reader) (io.ReadCloser, error) +} + +func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { + return g.wf(dst) +} + +func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { + return g.rf(src) +} diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go new file mode 100644 index 000000000..524dc0a6e --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/diskv.go @@ -0,0 +1,624 @@ +// Diskv (disk-vee) is a simple, persistent, key-value store. +// It stores all data flatly on the filesystem. + +package diskv + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" +) + +const ( + defaultBasePath = "diskv" + defaultFilePerm os.FileMode = 0666 + defaultPathPerm os.FileMode = 0777 +) + +var ( + defaultTransform = func(s string) []string { return []string{} } + errCanceled = errors.New("canceled") + errEmptyKey = errors.New("empty key") + errBadKey = errors.New("bad key") + errImportDirectory = errors.New("can't import a directory") +) + +// TransformFunction transforms a key into a slice of strings, with each +// element in the slice representing a directory in the file path where the +// key's entry will eventually be stored. +// +// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], +// the final location of the data file will be /ab/cde/f/abcdef +type TransformFunction func(s string) []string + +// Options define a set of properties that dictate Diskv behavior. +// All values are optional. +type Options struct { + BasePath string + Transform TransformFunction + CacheSizeMax uint64 // bytes + PathPerm os.FileMode + FilePerm os.FileMode + // If TempDir is set, it will enable filesystem atomic writes by + // writing temporary files to that location before being moved + // to BasePath. + // Note that TempDir MUST be on the same device/partition as + // BasePath. + TempDir string + + Index Index + IndexLess LessFunction + + Compression Compression +} + +// Diskv implements the Diskv interface. You shouldn't construct Diskv +// structures directly; instead, use the New constructor. +type Diskv struct { + Options + mu sync.RWMutex + cache map[string][]byte + cacheSize uint64 +} + +// New returns an initialized Diskv structure, ready to use. +// If the path identified by baseDir already contains data, +// it will be accessible, but not yet cached. +func New(o Options) *Diskv { + if o.BasePath == "" { + o.BasePath = defaultBasePath + } + if o.Transform == nil { + o.Transform = defaultTransform + } + if o.PathPerm == 0 { + o.PathPerm = defaultPathPerm + } + if o.FilePerm == 0 { + o.FilePerm = defaultFilePerm + } + + d := &Diskv{ + Options: o, + cache: map[string][]byte{}, + cacheSize: 0, + } + + if d.Index != nil && d.IndexLess != nil { + d.Index.Initialize(d.IndexLess, d.Keys(nil)) + } + + return d +} + +// Write synchronously writes the key-value pair to disk, making it immediately +// available for reads. Write relies on the filesystem to perform an eventual +// sync to physical media. If you need stronger guarantees, see WriteStream. +func (d *Diskv) Write(key string, val []byte) error { + return d.WriteStream(key, bytes.NewBuffer(val), false) +} + +// WriteStream writes the data represented by the io.Reader to the disk, under +// the provided key. If sync is true, WriteStream performs an explicit sync on +// the file as soon as it's written. +// +// bytes.Buffer provides io.Reader semantics for basic data types. +func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { + if len(key) <= 0 { + return errEmptyKey + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.writeStreamWithLock(key, r, sync) +} + +// createKeyFileWithLock either creates the key file directly, or +// creates a temporary file in TempDir if it is set. +func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) { + if d.TempDir != "" { + if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { + return nil, fmt.Errorf("temp mkdir: %s", err) + } + f, err := ioutil.TempFile(d.TempDir, "") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + + if err := f.Chmod(d.FilePerm); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return nil, fmt.Errorf("chmod: %s", err) + } + return f, nil + } + + mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists + f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) + if err != nil { + return nil, fmt.Errorf("open file: %s", err) + } + return f, nil +} + +// writeStream does no input validation checking. +func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { + if err := d.ensurePathWithLock(key); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + f, err := d.createKeyFileWithLock(key) + if err != nil { + return fmt.Errorf("create key file: %s", err) + } + + wc := io.WriteCloser(&nopWriteCloser{f}) + if d.Compression != nil { + wc, err = d.Compression.Writer(f) + if err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression writer: %s", err) + } + } + + if _, err := io.Copy(wc, r); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("i/o copy: %s", err) + } + + if err := wc.Close(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression close: %s", err) + } + + if sync { + if err := f.Sync(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("file sync: %s", err) + } + } + + if err := f.Close(); err != nil { + return fmt.Errorf("file close: %s", err) + } + + if f.Name() != d.completeFilename(key) { + if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil { + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("rename: %s", err) + } + } + + if d.Index != nil { + d.Index.Insert(key) + } + + d.bustCacheWithLock(key) // cache only on read + + return nil +} + +// Import imports the source file into diskv under the destination key. If the +// destination key already exists, it's overwritten. If move is true, the +// source file is removed after a successful import. +func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { + if dstKey == "" { + return errEmptyKey + } + + if fi, err := os.Stat(srcFilename); err != nil { + return err + } else if fi.IsDir() { + return errImportDirectory + } + + d.mu.Lock() + defer d.mu.Unlock() + + if err := d.ensurePathWithLock(dstKey); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + if move { + if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil { + d.bustCacheWithLock(dstKey) + return nil + } else if err != syscall.EXDEV { + // If it failed due to being on a different device, fall back to copying + return err + } + } + + f, err := os.Open(srcFilename) + if err != nil { + return err + } + defer f.Close() + err = d.writeStreamWithLock(dstKey, f, false) + if err == nil && move { + err = os.Remove(srcFilename) + } + return err +} + +// Read reads the key and returns the value. +// If the key is available in the cache, Read won't touch the disk. +// If the key is not in the cache, Read will have the side-effect of +// lazily caching the value. +func (d *Diskv) Read(key string) ([]byte, error) { + rc, err := d.ReadStream(key, false) + if err != nil { + return []byte{}, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} + +// ReadStream reads the key and returns the value (data) as an io.ReadCloser. +// If the value is cached from a previous read, and direct is false, +// ReadStream will use the cached value. Otherwise, it will return a handle to +// the file on disk, and cache the data on read. +// +// If direct is true, ReadStream will lazily delete any cached value for the +// key, and return a direct handle to the file on disk. +// +// If compression is enabled, ReadStream taps into the io.Reader stream prior +// to decompression, and caches the compressed data. +func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if val, ok := d.cache[key]; ok { + if !direct { + buf := bytes.NewBuffer(val) + if d.Compression != nil { + return d.Compression.Reader(buf) + } + return ioutil.NopCloser(buf), nil + } + + go func() { + d.mu.Lock() + defer d.mu.Unlock() + d.uncacheWithLock(key, uint64(len(val))) + }() + } + + return d.readWithRLock(key) +} + +// read ignores the cache, and returns an io.ReadCloser representing the +// decompressed data for the given key, streamed from the disk. Clients should +// acquire a read lock on the Diskv and check the cache themselves before +// calling read. +func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { + filename := d.completeFilename(key) + + fi, err := os.Stat(filename) + if err != nil { + return nil, err + } + if fi.IsDir() { + return nil, os.ErrNotExist + } + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + + var r io.Reader + if d.CacheSizeMax > 0 { + r = newSiphon(f, d, key) + } else { + r = &closingReader{f} + } + + var rc = io.ReadCloser(ioutil.NopCloser(r)) + if d.Compression != nil { + rc, err = d.Compression.Reader(r) + if err != nil { + return nil, err + } + } + + return rc, nil +} + +// closingReader provides a Reader that automatically closes the +// embedded ReadCloser when it reaches EOF +type closingReader struct { + rc io.ReadCloser +} + +func (cr closingReader) Read(p []byte) (int, error) { + n, err := cr.rc.Read(p) + if err == io.EOF { + if closeErr := cr.rc.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + } + return n, err +} + +// siphon is like a TeeReader: it copies all data read through it to an +// internal buffer, and moves that buffer to the cache at EOF. +type siphon struct { + f *os.File + d *Diskv + key string + buf *bytes.Buffer +} + +// newSiphon constructs a siphoning reader that represents the passed file. +// When a successful series of reads ends in an EOF, the siphon will write +// the buffered data to Diskv's cache under the given key. +func newSiphon(f *os.File, d *Diskv, key string) io.Reader { + return &siphon{ + f: f, + d: d, + key: key, + buf: &bytes.Buffer{}, + } +} + +// Read implements the io.Reader interface for siphon. +func (s *siphon) Read(p []byte) (int, error) { + n, err := s.f.Read(p) + + if err == nil { + return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed + } + + if err == io.EOF { + s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail + if closeErr := s.f.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + return n, err + } + + return n, err +} + +// Erase synchronously erases the given key from the disk and the cache. +func (d *Diskv) Erase(key string) error { + d.mu.Lock() + defer d.mu.Unlock() + + d.bustCacheWithLock(key) + + // erase from index + if d.Index != nil { + d.Index.Delete(key) + } + + // erase from disk + filename := d.completeFilename(key) + if s, err := os.Stat(filename); err == nil { + if s.IsDir() { + return errBadKey + } + if err = os.Remove(filename); err != nil { + return err + } + } else { + // Return err as-is so caller can do os.IsNotExist(err). + return err + } + + // clean up and return + d.pruneDirsWithLock(key) + return nil +} + +// EraseAll will delete all of the data from the store, both in the cache and on +// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- +// diskv-related data. Care should be taken to always specify a diskv base +// directory that is exclusively for diskv data. +func (d *Diskv) EraseAll() error { + d.mu.Lock() + defer d.mu.Unlock() + d.cache = make(map[string][]byte) + d.cacheSize = 0 + if d.TempDir != "" { + os.RemoveAll(d.TempDir) // errors ignored + } + return os.RemoveAll(d.BasePath) +} + +// Has returns true if the given key exists. +func (d *Diskv) Has(key string) bool { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.cache[key]; ok { + return true + } + + filename := d.completeFilename(key) + s, err := os.Stat(filename) + if err != nil { + return false + } + if s.IsDir() { + return false + } + + return true +} + +// Keys returns a channel that will yield every key accessible by the store, +// in undefined order. If a cancel channel is provided, closing it will +// terminate and close the keys channel. +func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { + return d.KeysPrefix("", cancel) +} + +// KeysPrefix returns a channel that will yield every key accessible by the +// store with the given prefix, in undefined order. If a cancel channel is +// provided, closing it will terminate and close the keys channel. If the +// provided prefix is the empty string, all keys will be yielded. +func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { + var prepath string + if prefix == "" { + prepath = d.BasePath + } else { + prepath = d.pathFor(prefix) + } + c := make(chan string) + go func() { + filepath.Walk(prepath, walker(c, prefix, cancel)) + close(c) + }() + return c +} + +// walker returns a function which satisfies the filepath.WalkFunc interface. +// It sends every non-directory file entry down the channel c. +func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) { + return nil // "pass" + } + + select { + case c <- info.Name(): + case <-cancel: + return errCanceled + } + + return nil + } +} + +// pathFor returns the absolute path for location on the filesystem where the +// data for the given key will be stored. +func (d *Diskv) pathFor(key string) string { + return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...)) +} + +// ensurePathWithLock is a helper function that generates all necessary +// directories on the filesystem for the given key. +func (d *Diskv) ensurePathWithLock(key string) error { + return os.MkdirAll(d.pathFor(key), d.PathPerm) +} + +// completeFilename returns the absolute path to the file for the given key. +func (d *Diskv) completeFilename(key string) string { + return filepath.Join(d.pathFor(key), key) +} + +// cacheWithLock attempts to cache the given key-value pair in the store's +// cache. It can fail if the value is larger than the cache's maximum size. +func (d *Diskv) cacheWithLock(key string, val []byte) error { + valueSize := uint64(len(val)) + if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { + return fmt.Errorf("%s; not caching", err) + } + + // be very strict about memory guarantees + if (d.cacheSize + valueSize) > d.CacheSizeMax { + panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) + } + + d.cache[key] = val + d.cacheSize += valueSize + return nil +} + +// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. +func (d *Diskv) cacheWithoutLock(key string, val []byte) error { + d.mu.Lock() + defer d.mu.Unlock() + return d.cacheWithLock(key, val) +} + +func (d *Diskv) bustCacheWithLock(key string) { + if val, ok := d.cache[key]; ok { + d.uncacheWithLock(key, uint64(len(val))) + } +} + +func (d *Diskv) uncacheWithLock(key string, sz uint64) { + d.cacheSize -= sz + delete(d.cache, key) +} + +// pruneDirsWithLock deletes empty directories in the path walk leading to the +// key k. Typically this function is called after an Erase is made. +func (d *Diskv) pruneDirsWithLock(key string) error { + pathlist := d.Transform(key) + for i := range pathlist { + dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) + + // thanks to Steven Blenkinsop for this snippet + switch fi, err := os.Stat(dir); true { + case err != nil: + return err + case !fi.IsDir(): + panic(fmt.Sprintf("corrupt dirstate at %s", dir)) + } + + nlinks, err := filepath.Glob(filepath.Join(dir, "*")) + if err != nil { + return err + } else if len(nlinks) > 0 { + return nil // has subdirs -- do not prune + } + if err = os.Remove(dir); err != nil { + return err + } + } + + return nil +} + +// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order +// until the cache has at least valueSize bytes available. +func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { + if valueSize > d.CacheSizeMax { + return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) + } + + safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } + + for key, val := range d.cache { + if safe() { + break + } + + d.uncacheWithLock(key, uint64(len(val))) + } + + if !safe() { + panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) + } + + return nil +} + +// nopWriteCloser wraps an io.Writer and provides a no-op Close method to +// satisfy the io.WriteCloser interface. +type nopWriteCloser struct { + io.Writer +} + +func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } +func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go new file mode 100644 index 000000000..96fee5152 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/index.go @@ -0,0 +1,115 @@ +package diskv + +import ( + "sync" + + "github.com/google/btree" +) + +// Index is a generic interface for things that can +// provide an ordered list of keys. +type Index interface { + Initialize(less LessFunction, keys <-chan string) + Insert(key string) + Delete(key string) + Keys(from string, n int) []string +} + +// LessFunction is used to initialize an Index of keys in a specific order. +type LessFunction func(string, string) bool + +// btreeString is a custom data type that satisfies the BTree Less interface, +// making the strings it wraps sortable by the BTree package. +type btreeString struct { + s string + l LessFunction +} + +// Less satisfies the BTree.Less interface using the btreeString's LessFunction. +func (s btreeString) Less(i btree.Item) bool { + return s.l(s.s, i.(btreeString).s) +} + +// BTreeIndex is an implementation of the Index interface using google/btree. +type BTreeIndex struct { + sync.RWMutex + LessFunction + *btree.BTree +} + +// Initialize populates the BTree tree with data from the keys channel, +// according to the passed less function. It's destructive to the BTreeIndex. +func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { + i.Lock() + defer i.Unlock() + i.LessFunction = less + i.BTree = rebuild(less, keys) +} + +// Insert inserts the given key (only) into the BTree tree. +func (i *BTreeIndex) Insert(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) +} + +// Delete removes the given key (only) from the BTree tree. +func (i *BTreeIndex) Delete(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) +} + +// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, +// Keys will return the first n keys. If the passed 'from' key is non-empty, the +// first key in the returned slice will be the key that immediately follows the +// passed key, in key order. +func (i *BTreeIndex) Keys(from string, n int) []string { + i.RLock() + defer i.RUnlock() + + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + + if i.BTree.Len() <= 0 { + return []string{} + } + + btreeFrom := btreeString{s: from, l: i.LessFunction} + skipFirst := true + if len(from) <= 0 || !i.BTree.Has(btreeFrom) { + // no such key, so fabricate an always-smallest item + btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} + skipFirst = false + } + + keys := []string{} + iterator := func(i btree.Item) bool { + keys = append(keys, i.(btreeString).s) + return len(keys) < n + } + i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) + + if skipFirst && len(keys) > 0 { + keys = keys[1:] + } + + return keys +} + +// rebuildIndex does the work of regenerating the index +// with the given keys. +func rebuild(less LessFunction, keys <-chan string) *btree.BTree { + tree := btree.New(2) + for key := range keys { + tree.ReplaceOrInsert(btreeString{s: key, l: less}) + } + return tree +} diff --git a/vendor/github.com/rubenv/sql-migrate/.dockerignore b/vendor/github.com/rubenv/sql-migrate/.dockerignore new file mode 100644 index 000000000..42bb921b4 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.dockerignore @@ -0,0 +1,6 @@ +* +!go.mod +!go.sum +!*.go +!sql-migrate/*.go +!sqlparse/*.go diff --git a/vendor/github.com/rubenv/sql-migrate/.gitignore b/vendor/github.com/rubenv/sql-migrate/.gitignore new file mode 100644 index 000000000..9720db0cf --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.gitignore @@ -0,0 +1,9 @@ +.*.swp +*.test +.idea +/vendor/ + +/sql-migrate/test.db +/test.db +.vscode/ +bin/ diff --git a/vendor/github.com/rubenv/sql-migrate/.golangci.yaml b/vendor/github.com/rubenv/sql-migrate/.golangci.yaml new file mode 100644 index 000000000..40d1720e7 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.golangci.yaml @@ -0,0 +1,98 @@ +linters-settings: + gocritic: + disabled-checks: + - ifElseChain + goimports: + local-prefixes: github.com/rubenv/sql-migrate + govet: + enable-all: true + disable: + - fieldalignment + depguard: + list-type: blacklist + include-go-root: true + include-go-std-lib: true + exhaustive: + default-signifies-exhaustive: true + nolintlint: + allow-unused: false + allow-leading-space: false + allow-no-explanation: + - depguard + require-explanation: true + require-specific: true + revive: + enable-all-rules: false + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: duplicated-imports + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: identical-branches + - name: imports-blacklist + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: string-format + - name: string-of-int + - name: struct-tag + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + - name: superfluous-else + - name: unreachable-code + - name: var-declaration + - name: waitgroup-by-value + - name: unused-receiver + - name: unnecessary-stmt + - name: unused-parameter +run: + tests: true + timeout: 1m +linters: + disable-all: true + enable: + - asciicheck + - depguard + - errcheck + - exhaustive + - gocritic + - gofmt + - gofumpt + - goimports + - govet + - ineffassign + - nolintlint + - revive + - staticcheck + - typecheck + - unused + - whitespace + - errorlint + - gosimple + - unparam +issues: + exclude: + - 'declaration of "err" shadows declaration at' # Allow shadowing of `err` because it's so common + - 'error-strings: error strings should not be capitalized or end with punctuation or a newline' + max-same-issues: 10000 + max-issues-per-linter: 10000 diff --git a/vendor/github.com/rubenv/sql-migrate/Dockerfile b/vendor/github.com/rubenv/sql-migrate/Dockerfile new file mode 100644 index 000000000..238ac714a --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/Dockerfile @@ -0,0 +1,25 @@ +ARG GO_VERSION=1.20.6 +ARG ALPINE_VERSION=3.12 + +### Vendor +FROM golang:${GO_VERSION} as vendor +COPY . /project +WORKDIR /project +RUN go mod tidy && go mod vendor + +### Build binary +FROM golang:${GO_VERSION} as build-binary +COPY . /project +COPY --from=vendor /project/vendor /project/vendor +WORKDIR /project +RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 GO111MODULE=on go build \ + -v \ + -mod vendor \ + -o /project/bin/sql-migrate \ + /project/sql-migrate + +### Image +FROM alpine:${ALPINE_VERSION} as image +COPY --from=build-binary /project/bin/sql-migrate /usr/local/bin/sql-migrate +RUN chmod +x /usr/local/bin/sql-migrate +ENTRYPOINT ["sql-migrate"] diff --git a/vendor/github.com/rubenv/sql-migrate/LICENSE b/vendor/github.com/rubenv/sql-migrate/LICENSE new file mode 100644 index 000000000..95e7c9abe --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (C) 2014-2021 by Ruben Vermeersch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/Makefile b/vendor/github.com/rubenv/sql-migrate/Makefile new file mode 100644 index 000000000..e17ae0ad4 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/Makefile @@ -0,0 +1,11 @@ +.PHONY: test lint build + +test: + go test ./... + +lint: + golangci-lint run --fix --config .golangci.yaml + +build: + mkdir -p bin + go build -o ./bin/sql-migrate ./sql-migrate diff --git a/vendor/github.com/rubenv/sql-migrate/README.md b/vendor/github.com/rubenv/sql-migrate/README.md new file mode 100644 index 000000000..dfb73cbf5 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/README.md @@ -0,0 +1,416 @@ +# sql-migrate + +> SQL Schema migration tool for [Go](https://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose). + +[![Test](https://github.com/rubenv/sql-migrate/actions/workflows/test.yml/badge.svg)](https://github.com/rubenv/sql-migrate/actions/workflows/test.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/rubenv/sql-migrate.svg)](https://pkg.go.dev/github.com/rubenv/sql-migrate) + +## Features + +- Usable as a CLI tool or as a library +- Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp)) +- Can embed migrations into your application +- Migrations are defined with SQL for full flexibility +- Atomic migrations +- Up/down migrations to allow rollback +- Supports multiple database types in one project +- Works great with other libraries such as [sqlx](https://jmoiron.github.io/sqlx/) +- Supported on go1.13+ + +## Installation + +To install the library and command line program, use the following: + +```bash +go get -v github.com/rubenv/sql-migrate/... +``` + +For Go version from 1.18, use: + +```bash +go install github.com/rubenv/sql-migrate/...@latest +``` + +## Usage + +### As a standalone tool + +``` +$ sql-migrate --help +usage: sql-migrate [--version] [--help] [] + +Available commands are: + down Undo a database migration + new Create a new migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available +``` + +Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments: + +```yml +development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + +production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations +``` + +(See more examples for different set ups [here](test-integration/dbconfig.yml)) + +Also one can obtain env variables in datasource field via `os.ExpandEnv` embedded call for the field. +This may be useful if one doesn't want to store credentials in file: + +```yml +production: + dialect: postgres + datasource: host=prodhost dbname=proddb user=${DB_USER} password=${DB_PASSWORD} sslmode=require + dir: migrations + table: migrations +``` + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the `-env` flag (defaults to `development`). + +Use the `--help` flag in combination with any of the commands to get an overview of its usage: + +``` +$ sql-migrate up --help +Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + +Options: + + -config=dbconfig.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -version Run migrate up to a specific version, eg: the version number of migration 1_initial.sql is 1. + -dryrun Don't apply migrations, just print them. +``` + +The `new` command creates a new empty migration template using the following pattern `-.sql`. + +The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter, and the `-version` parameter. Note `-version` has higher priority than `-limit` if you try to use them both. + +The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the `status` command to see the state of the applied migrations: + +```bash +$ sql-migrate status ++---------------+-----------------------------------------+ +| MIGRATION | APPLIED | ++---------------+-----------------------------------------+ +| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | +| 2_record.sql | no | ++---------------+-----------------------------------------+ +``` + +#### Running Test Integrations + +You can see how to run setups for different setups by executing the `.sh` files in [test-integration](test-integration/) + +```bash +# Run mysql-env.sh example (you need to be in the project root directory) + +./test-integration/mysql-env.sh +``` + +### MySQL Caveat + +If you are using MySQL, you must append `?parseTime=true` to the `datasource` configuration. For example: + +```yml +production: + dialect: mysql + datasource: root@/dbname?parseTime=true + dir: migrations/mysql + table: migrations +``` + +See [here](https://github.com/go-sql-driver/mysql#parsetime) for more information. + +### Oracle (oci8) + +Oracle Driver is [oci8](https://github.com/mattn/go-oci8), it is not pure Go code and relies on Oracle Office Client ([Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html)), more detailed information is in the [oci8 repo](https://github.com/mattn/go-oci8). + +#### Install with Oracle support + +To install the library and command line program, use the following: + +```bash +go get -tags oracle -v github.com/rubenv/sql-migrate/... +``` + +```yml +development: + dialect: oci8 + datasource: user/password@localhost:1521/sid + dir: migrations/oracle + table: migrations +``` + +### Oracle (godror) + +Oracle Driver is [godror](https://github.com/godror/godror), it is not pure Go code and relies on Oracle Office Client ([Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html)), more detailed information is in the [godror repository](https://github.com/godror/godror). + +#### Install with Oracle support + +To install the library and command line program, use the following: + +1. Install sql-migrate + +```bash +go get -tags godror -v github.com/rubenv/sql-migrate/... +``` + +2. Download Oracle Office Client(e.g. macos, click [Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html) if you are other system) + +```bash +wget https://download.oracle.com/otn_software/mac/instantclient/193000/instantclient-basic-macos.x64-19.3.0.0.0dbru.zip +``` + +3. Configure environment variables `LD_LIBRARY_PATH` + +``` +export LD_LIBRARY_PATH=your_oracle_office_path/instantclient_19_3 +``` + +```yml +development: + dialect: godror + datasource: user/password@localhost:1521/sid + dir: migrations/oracle + table: migrations +``` + +### As a library + +Import sql-migrate into your application: + +```go +import "github.com/rubenv/sql-migrate" +``` + +Set up a source of migrations, this can be from memory, from a set of files, from bindata (more on that later), or from any library that implements [`http.FileSystem`](https://godoc.org/net/http#FileSystem): + +```go +// Hardcoded strings in memory: +migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, +} + +// OR: Read migrations from a folder: +migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", +} + +// OR: Use migrations from a packr box +migrations := &migrate.PackrMigrationSource{ + Box: packr.New("migrations", "./migrations"), +} + +// OR: Use pkger which implements `http.FileSystem` +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: pkger.Dir("/db/migrations"), +} + +// OR: Use migrations from bindata: +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", +} + +// OR: Read migrations from a `http.FileSystem` +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: httpFS, +} +``` + +Then use the `Exec` function to upgrade your database: + +```go +db, err := sql.Open("sqlite3", filename) +if err != nil { + // Handle errors! +} + +n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) +if err != nil { + // Handle errors! +} +fmt.Printf("Applied %d migrations!\n", n) +``` + +Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation. + +## Writing migrations + +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + +```sql +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied +CREATE TABLE people (id int); + + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back +DROP TABLE people; +``` + +You can put multiple statements in each block, as long as you end them with a semicolon (`;`). + +You can alternatively set up a separator string that matches an entire line by setting `sqlparse.LineSeparator`. This +can be used to imitate, for example, MS SQL Query Analyzer functionality where commands can be separated by a line with +contents of `GO`. If `sqlparse.LineSeparator` is matched, it will not be included in the resulting migration scripts. + +If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries: + +```sql +-- +migrate Up +CREATE TABLE people (id int); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION do_something() +returns void AS $$ +DECLARE + create_query text; +BEGIN + -- Do something here +END; +$$ +language plpgsql; +-- +migrate StatementEnd + +-- +migrate Down +DROP FUNCTION do_something(); +DROP TABLE people; +``` + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the `notransaction` option: + +```sql +-- +migrate Up notransaction +CREATE UNIQUE INDEX CONCURRENTLY people_unique_id_idx ON people (id); + +-- +migrate Down +DROP INDEX people_unique_id_idx; +``` + +## Embedding migrations with [packr](https://github.com/gobuffalo/packr) + +If you like your Go applications self-contained (that is: a single binary): use [packr](https://github.com/gobuffalo/packr) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Import the packr package into your application: + +```go +import "github.com/gobuffalo/packr/v2" +``` + +Use the `PackrMigrationSource` in your application to find the migrations: + +```go +migrations := &migrate.PackrMigrationSource{ + Box: packr.New("migrations", "./migrations"), +} +``` + +If you already have a box and would like to use a subdirectory: + +```go +migrations := &migrate.PackrMigrationSource{ + Box: myBox, + Dir: "./migrations", +} +``` + +## Embedding migrations with [bindata](https://github.com/shuLhan/go-bindata) + +As an alternative, but slightly less maintained, you can use [bindata](https://github.com/shuLhan/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a `.go` file with the migrations embedded: + +```bash +go-bindata -pkg myapp -o bindata.go db/migrations/ +``` + +The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives). + +Use the `AssetMigrationSource` in your application to find the migrations: + +```go +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", +} +``` + +Both `Asset` and `AssetDir` are functions provided by bindata. + +Then proceed as usual. + +## Embedding migrations with libraries that implement `http.FileSystem` + +You can also embed migrations with any library that implements `http.FileSystem`, like [`vfsgen`](https://github.com/shurcooL/vfsgen), [`parcello`](https://github.com/phogolabs/parcello), or [`go-resources`](https://github.com/omeid/go-resources). + +```go +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: httpFS, +} +``` + +## Extending + +Adding a new migration source means implementing `MigrationSource`. + +```go +type MigrationSource interface { + FindMigrations() ([]*Migration, error) +} +``` + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field. + +## Usage with [sqlx](https://jmoiron.github.io/sqlx/) + +This library is compatible with sqlx. When calling migrate just dereference the DB from your `*sqlx.DB`: + +``` +n, err := migrate.Exec(db.DB, "sqlite3", migrations, migrate.Up) + // ^^^ <-- Here db is a *sqlx.DB, the db.DB field is the plain sql.DB +if err != nil { + // Handle errors! +} +``` + +## Questions or Feedback? + +You can use Github Issues for feedback or questions. + +## License + +This library is distributed under the [MIT](LICENSE) license. diff --git a/vendor/github.com/rubenv/sql-migrate/doc.go b/vendor/github.com/rubenv/sql-migrate/doc.go new file mode 100644 index 000000000..8ff186d0f --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/doc.go @@ -0,0 +1,238 @@ +/* +SQL Schema migration tool for Go. + +Key features: + + - Usable as a CLI tool or as a library + - Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp) + - Can embed migrations into your application + - Migrations are defined with SQL for full flexibility + - Atomic migrations + - Up/down migrations to allow rollback + - Supports multiple database types in one project + +# Installation + +To install the library and command line program, use the following: + + go get -v github.com/rubenv/sql-migrate/... + +# Command-line tool + +The main command is called sql-migrate. + + $ sql-migrate --help + usage: sql-migrate [--version] [--help] [] + + Available commands are: + down Undo a database migration + new Create a new migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available + +Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments: + + development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + + production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the -env flag (defaults to development). + +Use the --help flag in combination with any of the commands to get an overview of its usage: + + $ sql-migrate up --help + Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + + Options: + + -config=config.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. + +The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter. + +The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the status command to see the state of the applied migrations: + + $ sql-migrate status + +---------------+-----------------------------------------+ + | MIGRATION | APPLIED | + +---------------+-----------------------------------------+ + | 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | + | 2_record.sql | no | + +---------------+-----------------------------------------+ + +# MySQL Caveat + +If you are using MySQL, you must append ?parseTime=true to the datasource configuration. For example: + + production: + dialect: mysql + datasource: root@/dbname?parseTime=true + dir: migrations/mysql + table: migrations + +See https://github.com/go-sql-driver/mysql#parsetime for more information. + +# Library + +Import sql-migrate into your application: + + import "github.com/rubenv/sql-migrate" + +Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later): + + // Hardcoded strings in memory: + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, + } + + // OR: Read migrations from a folder: + migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", + } + + // OR: Use migrations from bindata: + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", + } + +Then use the Exec function to upgrade your database: + + db, err := sql.Open("sqlite3", filename) + if err != nil { + // Handle errors! + } + + n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) + if err != nil { + // Handle errors! + } + fmt.Printf("Applied %d migrations!\n", n) + +Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +The full set of capabilities can be found in the API docs below. + +# Writing migrations + +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + + -- +migrate Up + -- SQL in section 'Up' is executed when this migration is applied + CREATE TABLE people (id int); + + + -- +migrate Down + -- SQL section 'Down' is executed when this migration is rolled back + DROP TABLE people; + +You can put multiple statements in each block, as long as you end them with a semicolon (;). + +If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries: + + -- +migrate Up + CREATE TABLE people (id int); + + -- +migrate StatementBegin + CREATE OR REPLACE FUNCTION do_something() + returns void AS $$ + DECLARE + create_query text; + BEGIN + -- Do something here + END; + $$ + language plpgsql; + -- +migrate StatementEnd + + -- +migrate Down + DROP FUNCTION do_something(); + DROP TABLE people; + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the notransaction option: + + -- +migrate Up notransaction + CREATE UNIQUE INDEX people_unique_id_idx CONCURRENTLY ON people (id); + + -- +migrate Down + DROP INDEX people_unique_id_idx; + +# Embedding migrations with packr + +If you like your Go applications self-contained (that is: a single binary): use packr (https://github.com/gobuffalo/packr) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Use the PackrMigrationSource in your application to find the migrations: + + migrations := &migrate.PackrMigrationSource{ + Box: packr.NewBox("./migrations"), + } + +If you already have a box and would like to use a subdirectory: + + migrations := &migrate.PackrMigrationSource{ + Box: myBox, + Dir: "./migrations", + } + +# Embedding migrations with bindata + +As an alternative, but slightly less maintained, you can use bindata (https://github.com/shuLhan/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a .go file with the migrations embedded: + + go-bindata -pkg myapp -o bindata.go db/migrations/ + +The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives). + +Use the AssetMigrationSource in your application to find the migrations: + + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", + } + +Both Asset and AssetDir are functions provided by bindata. + +Then proceed as usual. + +# Extending + +Adding a new migration source means implementing MigrationSource. + + type MigrationSource interface { + FindMigrations() ([]*Migration, error) + } + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field. +*/ +package migrate diff --git a/vendor/github.com/rubenv/sql-migrate/migrate.go b/vendor/github.com/rubenv/sql-migrate/migrate.go new file mode 100644 index 000000000..0974eb612 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/migrate.go @@ -0,0 +1,877 @@ +package migrate + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "io" + "net/http" + "os" + "path" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/go-gorp/gorp/v3" + + "github.com/rubenv/sql-migrate/sqlparse" +) + +type MigrationDirection int + +const ( + Up MigrationDirection = iota + Down +) + +// MigrationSet provides database parameters for a migration execution +type MigrationSet struct { + // TableName name of the table used to store migration info. + TableName string + // SchemaName schema that the migration table be referenced. + SchemaName string + // IgnoreUnknown skips the check to see if there is a migration + // ran in the database that is not in MigrationSource. + // + // This should be used sparingly as it is removing a safety check. + IgnoreUnknown bool + // DisableCreateTable disable the creation of the migration table + DisableCreateTable bool +} + +var migSet = MigrationSet{} + +// NewMigrationSet returns a parametrized Migration object +func (ms MigrationSet) getTableName() string { + if ms.TableName == "" { + return "gorp_migrations" + } + return ms.TableName +} + +var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`) + +// PlanError happens where no migration plan could be created between the sets +// of already applied migrations and the currently found. For example, when the database +// contains a migration which is not among the migrations list found for an operation. +type PlanError struct { + Migration *Migration + ErrorMessage string +} + +func newPlanError(migration *Migration, errorMessage string) error { + return &PlanError{ + Migration: migration, + ErrorMessage: errorMessage, + } +} + +func (p *PlanError) Error() string { + return fmt.Sprintf("Unable to create migration plan because of %s: %s", + p.Migration.Id, p.ErrorMessage) +} + +// TxError is returned when any error is encountered during a database +// transaction. It contains the relevant *Migration and notes it's Id in the +// Error function output. +type TxError struct { + Migration *Migration + Err error +} + +func newTxError(migration *PlannedMigration, err error) error { + return &TxError{ + Migration: migration.Migration, + Err: err, + } +} + +func (e *TxError) Error() string { + return e.Err.Error() + " handling " + e.Migration.Id +} + +// Set the name of the table used to store migration info. +// +// Should be called before any other call such as (Exec, ExecMax, ...). +func SetTable(name string) { + if name != "" { + migSet.TableName = name + } +} + +// SetSchema sets the name of a schema that the migration table be referenced. +func SetSchema(name string) { + if name != "" { + migSet.SchemaName = name + } +} + +// SetDisableCreateTable sets the boolean to disable the creation of the migration table +func SetDisableCreateTable(disable bool) { + migSet.DisableCreateTable = disable +} + +// SetIgnoreUnknown sets the flag that skips database check to see if there is a +// migration in the database that is not in migration source. +// +// This should be used sparingly as it is removing a safety check. +func SetIgnoreUnknown(v bool) { + migSet.IgnoreUnknown = v +} + +type Migration struct { + Id string + Up []string + Down []string + + DisableTransactionUp bool + DisableTransactionDown bool +} + +func (m Migration) Less(other *Migration) bool { + switch { + case m.isNumeric() && other.isNumeric() && m.VersionInt() != other.VersionInt(): + return m.VersionInt() < other.VersionInt() + case m.isNumeric() && !other.isNumeric(): + return true + case !m.isNumeric() && other.isNumeric(): + return false + default: + return m.Id < other.Id + } +} + +func (m Migration) isNumeric() bool { + return len(m.NumberPrefixMatches()) > 0 +} + +func (m Migration) NumberPrefixMatches() []string { + return numberPrefixRegex.FindStringSubmatch(m.Id) +} + +func (m Migration) VersionInt() int64 { + v := m.NumberPrefixMatches()[1] + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err)) + } + return value +} + +type PlannedMigration struct { + *Migration + + DisableTransaction bool + Queries []string +} + +type byId []*Migration + +func (b byId) Len() int { return len(b) } +func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) } + +type MigrationRecord struct { + Id string `db:"id"` + AppliedAt time.Time `db:"applied_at"` +} + +type OracleDialect struct { + gorp.OracleDialect +} + +func (OracleDialect) IfTableNotExists(command, _, _ string) string { + return command +} + +func (OracleDialect) IfSchemaNotExists(command, _ string) string { + return command +} + +func (OracleDialect) IfTableExists(command, _, _ string) string { + return command +} + +var MigrationDialects = map[string]gorp.Dialect{ + "sqlite3": gorp.SqliteDialect{}, + "postgres": gorp.PostgresDialect{}, + "mysql": gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"}, + "mssql": gorp.SqlServerDialect{}, + "oci8": OracleDialect{}, + "godror": OracleDialect{}, + "snowflake": gorp.SnowflakeDialect{}, +} + +type MigrationSource interface { + // Finds the migrations. + // + // The resulting slice of migrations should be sorted by Id. + FindMigrations() ([]*Migration, error) +} + +// A hardcoded set of migrations, in-memory. +type MemoryMigrationSource struct { + Migrations []*Migration +} + +var _ MigrationSource = (*MemoryMigrationSource)(nil) + +func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) { + // Make sure migrations are sorted. In order to make the MemoryMigrationSource safe for + // concurrent use we should not mutate it in place. So `FindMigrations` would sort a copy + // of the m.Migrations. + migrations := make([]*Migration, len(m.Migrations)) + copy(migrations, m.Migrations) + sort.Sort(byId(migrations)) + return migrations, nil +} + +// A set of migrations loaded from an http.FileServer + +type HttpFileSystemMigrationSource struct { + FileSystem http.FileSystem +} + +var _ MigrationSource = (*HttpFileSystemMigrationSource)(nil) + +func (f HttpFileSystemMigrationSource) FindMigrations() ([]*Migration, error) { + return findMigrations(f.FileSystem, "/") +} + +// A set of migrations loaded from a directory. +type FileMigrationSource struct { + Dir string +} + +var _ MigrationSource = (*FileMigrationSource)(nil) + +func (f FileMigrationSource) FindMigrations() ([]*Migration, error) { + filesystem := http.Dir(f.Dir) + return findMigrations(filesystem, "/") +} + +func findMigrations(dir http.FileSystem, root string) ([]*Migration, error) { + migrations := make([]*Migration, 0) + + file, err := dir.Open(root) + if err != nil { + return nil, err + } + + files, err := file.Readdir(0) + if err != nil { + return nil, err + } + + for _, info := range files { + if strings.HasSuffix(info.Name(), ".sql") { + migration, err := migrationFromFile(dir, root, info) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +func migrationFromFile(dir http.FileSystem, root string, info os.FileInfo) (*Migration, error) { + path := path.Join(root, info.Name()) + file, err := dir.Open(path) + if err != nil { + return nil, fmt.Errorf("Error while opening %s: %w", info.Name(), err) + } + defer func() { _ = file.Close() }() + + migration, err := ParseMigration(info.Name(), file) + if err != nil { + return nil, fmt.Errorf("Error while parsing %s: %w", info.Name(), err) + } + return migration, nil +} + +// Migrations from a bindata asset set. +type AssetMigrationSource struct { + // Asset should return content of file in path if exists + Asset func(path string) ([]byte, error) + + // AssetDir should return list of files in the path + AssetDir func(path string) ([]string, error) + + // Path in the bindata to use. + Dir string +} + +var _ MigrationSource = (*AssetMigrationSource)(nil) + +func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + + files, err := a.AssetDir(a.Dir) + if err != nil { + return nil, err + } + + for _, name := range files { + if strings.HasSuffix(name, ".sql") { + file, err := a.Asset(path.Join(a.Dir, name)) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Avoids pulling in the packr library for everyone, mimicks the bits of +// packr.Box that we need. +type PackrBox interface { + List() []string + Find(name string) ([]byte, error) +} + +// Migrations from a packr box. +type PackrMigrationSource struct { + Box PackrBox + + // Path in the box to use. + Dir string +} + +var _ MigrationSource = (*PackrMigrationSource)(nil) + +func (p PackrMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + items := p.Box.List() + + prefix := "" + dir := path.Clean(p.Dir) + if dir != "." { + prefix = fmt.Sprintf("%s/", dir) + } + + for _, item := range items { + if !strings.HasPrefix(item, prefix) { + continue + } + name := strings.TrimPrefix(item, prefix) + if strings.Contains(name, "/") { + continue + } + + if strings.HasSuffix(name, ".sql") { + file, err := p.Box.Find(item) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Migration parsing +func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) { + m := &Migration{ + Id: id, + } + + parsed, err := sqlparse.ParseMigration(r) + if err != nil { + return nil, fmt.Errorf("Error parsing migration (%s): %w", id, err) + } + + m.Up = parsed.UpStatements + m.Down = parsed.DownStatements + + m.DisableTransactionUp = parsed.DisableTransactionUp + m.DisableTransactionDown = parsed.DisableTransactionDown + + return m, nil +} + +type SqlExecutor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Insert(list ...interface{}) error + Delete(list ...interface{}) (int64, error) +} + +// Execute a set of migrations +// +// Returns the number of applied migrations. +func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ExecMaxContext(context.Background(), db, dialect, m, dir, 0) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ms.ExecMaxContext(context.Background(), db, dialect, m, dir, 0) +} + +// Execute a set of migrations with an input context. +// +// Returns the number of applied migrations. +func ExecContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ExecMaxContext(ctx, db, dialect, m, dir, 0) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) ExecContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ms.ExecMaxContext(ctx, db, dialect, m, dir, 0) +} + +// Execute a set of migrations +// +// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec). +// +// Returns the number of applied migrations. +func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + return migSet.ExecMax(db, dialect, m, dir, max) +} + +// Execute a set of migrations with an input context. +// +// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec). +// +// Returns the number of applied migrations. +func ExecMaxContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + return migSet.ExecMaxContext(ctx, db, dialect, m, dir, max) +} + +// Execute a set of migrations +// +// Will apply at the target `version` of migration. Cannot be a negative value. +// +// Returns the number of applied migrations. +func ExecVersion(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) (int, error) { + return ExecVersionContext(context.Background(), db, dialect, m, dir, version) +} + +// Execute a set of migrations with an input context. +// +// Will apply at the target `version` of migration. Cannot be a negative value. +// +// Returns the number of applied migrations. +func ExecVersionContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) (int, error) { + if version < 0 { + return 0, fmt.Errorf("target version %d should not be negative", version) + } + return migSet.ExecVersionContext(ctx, db, dialect, m, dir, version) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + return ms.ExecMaxContext(context.Background(), db, dialect, m, dir, max) +} + +// Returns the number of applied migrations, but applies with an input context. +func (ms MigrationSet) ExecMaxContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := ms.PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + return ms.applyMigrations(ctx, dir, migrations, dbMap) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) ExecVersion(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) (int, error) { + return ms.ExecVersionContext(context.Background(), db, dialect, m, dir, version) +} + +func (ms MigrationSet) ExecVersionContext(ctx context.Context, db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) (int, error) { + migrations, dbMap, err := ms.PlanMigrationToVersion(db, dialect, m, dir, version) + if err != nil { + return 0, err + } + return ms.applyMigrations(ctx, dir, migrations, dbMap) +} + +// Applies the planned migrations and returns the number of applied migrations. +func (MigrationSet) applyMigrations(ctx context.Context, dir MigrationDirection, migrations []*PlannedMigration, dbMap *gorp.DbMap) (int, error) { + applied := 0 + for _, migration := range migrations { + var executor SqlExecutor + var err error + + if migration.DisableTransaction { + executor = dbMap.WithContext(ctx) + } else { + e, err := dbMap.Begin() + if err != nil { + return applied, newTxError(migration, err) + } + executor = e.WithContext(ctx) + } + + for _, stmt := range migration.Queries { + // remove the semicolon from stmt, fix ORA-00922 issue in database oracle + stmt = strings.TrimSuffix(stmt, "\n") + stmt = strings.TrimSuffix(stmt, " ") + stmt = strings.TrimSuffix(stmt, ";") + if _, err := executor.Exec(stmt); err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + } + + switch dir { + case Up: + err = executor.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + case Down: + _, err := executor.Delete(&MigrationRecord{ + Id: migration.Id, + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + default: + panic("Not possible") + } + + if trans, ok := executor.(*gorp.Transaction); ok { + if err := trans.Commit(); err != nil { + return applied, newTxError(migration, err) + } + } + + applied++ + } + + return applied, nil +} + +// Plan a migration. +func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + return migSet.PlanMigration(db, dialect, m, dir, max) +} + +// Plan a migration to version. +func PlanMigrationToVersion(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) ([]*PlannedMigration, *gorp.DbMap, error) { + return migSet.PlanMigrationToVersion(db, dialect, m, dir, version) +} + +// Plan a migration. +func (ms MigrationSet) PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + return ms.planMigrationCommon(db, dialect, m, dir, max, -1) +} + +// Plan a migration to version. +func (ms MigrationSet) PlanMigrationToVersion(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, version int64) ([]*PlannedMigration, *gorp.DbMap, error) { + return ms.planMigrationCommon(db, dialect, m, dir, 0, version) +} + +// A common method to plan a migration. +func (ms MigrationSet) planMigrationCommon(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int, version int64) ([]*PlannedMigration, *gorp.DbMap, error) { + dbMap, err := ms.getMigrationDbMap(db, dialect) + if err != nil { + return nil, nil, err + } + + migrations, err := m.FindMigrations() + if err != nil { + return nil, nil, err + } + + var migrationRecords []MigrationRecord + _, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", dbMap.Dialect.QuotedTableForQuery(ms.SchemaName, ms.getTableName()))) + if err != nil { + return nil, nil, err + } + + // Sort migrations that have been run by Id. + var existingMigrations []*Migration + for _, migrationRecord := range migrationRecords { + existingMigrations = append(existingMigrations, &Migration{ + Id: migrationRecord.Id, + }) + } + sort.Sort(byId(existingMigrations)) + + // Make sure all migrations in the database are among the found migrations which + // are to be applied. + if !ms.IgnoreUnknown { + migrationsSearch := make(map[string]struct{}) + for _, migration := range migrations { + migrationsSearch[migration.Id] = struct{}{} + } + for _, existingMigration := range existingMigrations { + if _, ok := migrationsSearch[existingMigration.Id]; !ok { + return nil, nil, newPlanError(existingMigration, "unknown migration in database") + } + } + } + + // Get last migration that was run + record := &Migration{} + if len(existingMigrations) > 0 { + record = existingMigrations[len(existingMigrations)-1] + } + + result := make([]*PlannedMigration, 0) + + // Add missing migrations up to the last run migration. + // This can happen for example when merges happened. + if len(existingMigrations) > 0 { + result = append(result, ToCatchup(migrations, existingMigrations, record)...) + } + + // Figure out which migrations to apply + toApply := ToApply(migrations, record.Id, dir) + toApplyCount := len(toApply) + + if version >= 0 { + targetIndex := 0 + for targetIndex < len(toApply) { + tempVersion := toApply[targetIndex].VersionInt() + if dir == Up && tempVersion > version || dir == Down && tempVersion < version { + return nil, nil, newPlanError(&Migration{}, fmt.Errorf("unknown migration with version id %d in database", version).Error()) + } + if tempVersion == version { + toApplyCount = targetIndex + 1 + break + } + targetIndex++ + } + if targetIndex == len(toApply) { + return nil, nil, newPlanError(&Migration{}, fmt.Errorf("unknown migration with version id %d in database", version).Error()) + } + } else if max > 0 && max < toApplyCount { + toApplyCount = max + } + for _, v := range toApply[0:toApplyCount] { + if dir == Up { + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Up, + DisableTransaction: v.DisableTransactionUp, + }) + } else if dir == Down { + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Down, + DisableTransaction: v.DisableTransactionDown, + }) + } + } + + return result, dbMap, nil +} + +// Skip a set of migrations +// +// Will skip at most `max` migrations. Pass 0 for no limit. +// +// Returns the number of skipped migrations. +func SkipMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + + // Skip migrations + applied := 0 + for _, migration := range migrations { + var executor SqlExecutor + + if migration.DisableTransaction { + executor = dbMap + } else { + executor, err = dbMap.Begin() + if err != nil { + return applied, newTxError(migration, err) + } + } + + err = executor.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + + if trans, ok := executor.(*gorp.Transaction); ok { + if err := trans.Commit(); err != nil { + return applied, newTxError(migration, err) + } + } + + applied++ + } + + return applied, nil +} + +// Filter a slice of migrations into ones that should be applied. +func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration { + index := -1 + if current != "" { + for index < len(migrations)-1 { + index++ + if migrations[index].Id == current { + break + } + } + } + + if direction == Up { + return migrations[index+1:] + } else if direction == Down { + if index == -1 { + return []*Migration{} + } + + // Add in reverse order + toApply := make([]*Migration, index+1) + for i := 0; i < index+1; i++ { + toApply[index-i] = migrations[i] + } + return toApply + } + + panic("Not possible") +} + +func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration { + missing := make([]*PlannedMigration, 0) + for _, migration := range migrations { + found := false + for _, existing := range existingMigrations { + if existing.Id == migration.Id { + found = true + break + } + } + if !found && migration.Less(lastRun) { + missing = append(missing, &PlannedMigration{ + Migration: migration, + Queries: migration.Up, + DisableTransaction: migration.DisableTransactionUp, + }) + } + } + return missing +} + +func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + return migSet.GetMigrationRecords(db, dialect) +} + +func (ms MigrationSet) GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + dbMap, err := ms.getMigrationDbMap(db, dialect) + if err != nil { + return nil, err + } + + var records []*MigrationRecord + query := fmt.Sprintf("SELECT * FROM %s ORDER BY %s ASC", dbMap.Dialect.QuotedTableForQuery(ms.SchemaName, ms.getTableName()), dbMap.Dialect.QuoteField("id")) + _, err = dbMap.Select(&records, query) + if err != nil { + return nil, err + } + + return records, nil +} + +func (ms MigrationSet) getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) { + d, ok := MigrationDialects[dialect] + if !ok { + return nil, fmt.Errorf("Unknown dialect: %s", dialect) + } + + // When using the mysql driver, make sure that the parseTime option is + // configured, otherwise it won't map time columns to time.Time. See + // https://github.com/rubenv/sql-migrate/issues/2 + if dialect == "mysql" { + var out *time.Time + err := db.QueryRow("SELECT NOW()").Scan(&out) + if err != nil { + if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" || + err.Error() == "sql: Scan error on column index 0: unsupported Scan, storing driver.Value type []uint8 into type *time.Time" || + err.Error() == "sql: Scan error on column index 0, name \"NOW()\": unsupported Scan, storing driver.Value type []uint8 into type *time.Time" { + return nil, errors.New(`Cannot parse dates. + +Make sure that the parseTime option is supplied to your database connection. +Check https://github.com/go-sql-driver/mysql#parsetime for more info.`) + } + return nil, err + } + } + + // Create migration database map + dbMap := &gorp.DbMap{Db: db, Dialect: d} + table := dbMap.AddTableWithNameAndSchema(MigrationRecord{}, ms.SchemaName, ms.getTableName()).SetKeys(false, "Id") + + if dialect == "oci8" || dialect == "godror" { + table.ColMap("Id").SetMaxSize(4000) + } + + if ms.DisableCreateTable { + return dbMap, nil + } + + err := dbMap.CreateTablesIfNotExists() + if err != nil { + // Oracle database does not support `if not exists`, so use `ORA-00955:` error code + // to check if the table exists. + if (dialect == "oci8" || dialect == "godror") && strings.Contains(err.Error(), "ORA-00955:") { + return dbMap, nil + } + return nil, err + } + + return dbMap, nil +} + +// TODO: Run migration + record insert in transaction. diff --git a/vendor/github.com/rubenv/sql-migrate/migrate_go116.go b/vendor/github.com/rubenv/sql-migrate/migrate_go116.go new file mode 100644 index 000000000..a1c94a3d0 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/migrate_go116.go @@ -0,0 +1,23 @@ +//go:build go1.16 +// +build go1.16 + +package migrate + +import ( + "embed" + "net/http" +) + +// A set of migrations loaded from an go1.16 embed.FS + +type EmbedFileSystemMigrationSource struct { + FileSystem embed.FS + + Root string +} + +var _ MigrationSource = (*EmbedFileSystemMigrationSource)(nil) + +func (f EmbedFileSystemMigrationSource) FindMigrations() ([]*Migration, error) { + return findMigrations(http.FS(f.FileSystem), f.Root) +} diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE b/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE new file mode 100644 index 000000000..9c1252513 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (C) 2014-2017 by Ruben Vermeersch +Copyright (C) 2012-2014 by Liam Staskawicz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md new file mode 100644 index 000000000..fa5341abd --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md @@ -0,0 +1,7 @@ +# SQL migration parser + +Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser. + +## License + +This library is distributed under the [MIT](LICENSE) license. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go new file mode 100644 index 000000000..f04460e6e --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go @@ -0,0 +1,226 @@ +package sqlparse + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +const ( + sqlCmdPrefix = "-- +migrate " + optionNoTransaction = "notransaction" +) + +type ParsedMigration struct { + UpStatements []string + DownStatements []string + + DisableTransactionUp bool + DisableTransactionDown bool +} + +// LineSeparator can be used to split migrations by an exact line match. This line +// will be removed from the output. If left blank, it is not considered. It is defaulted +// to blank so you will have to set it manually. +// Use case: in MSSQL, it is convenient to separate commands by GO statements like in +// SQL Query Analyzer. +var LineSeparator = "" + +func errNoTerminator() error { + if len(LineSeparator) == 0 { + return fmt.Errorf(`ERROR: The last statement must be ended by a semicolon or '-- +migrate StatementEnd' marker. + See https://github.com/rubenv/sql-migrate for details.`) + } + + return fmt.Errorf(`ERROR: The last statement must be ended by a semicolon, a line whose contents are %q, or '-- +migrate StatementEnd' marker. + See https://github.com/rubenv/sql-migrate for details.`, LineSeparator) +} + +// Checks the line to see if the line has a statement-ending semicolon +// or if the line contains a double-dash comment. +func endsWithSemicolon(line string) bool { + prev := "" + scanner := bufio.NewScanner(strings.NewReader(line)) + scanner.Split(bufio.ScanWords) + + for scanner.Scan() { + word := scanner.Text() + if strings.HasPrefix(word, "--") { + break + } + prev = word + } + + return strings.HasSuffix(prev, ";") +} + +type migrationDirection int + +const ( + directionNone migrationDirection = iota + directionUp + directionDown +) + +type migrateCommand struct { + Command string + Options []string +} + +func (c *migrateCommand) HasOption(opt string) bool { + for _, specifiedOption := range c.Options { + if specifiedOption == opt { + return true + } + } + + return false +} + +func parseCommand(line string) (*migrateCommand, error) { + cmd := &migrateCommand{} + + if !strings.HasPrefix(line, sqlCmdPrefix) { + return nil, fmt.Errorf("ERROR: not a sql-migrate command") + } + + fields := strings.Fields(line[len(sqlCmdPrefix):]) + if len(fields) == 0 { + return nil, fmt.Errorf(`ERROR: incomplete migration command`) + } + + cmd.Command = fields[0] + + cmd.Options = fields[1:] + + return cmd, nil +} + +// Split the given sql script into individual statements. +// +// The base case is to simply split on semicolons, as these +// naturally terminate a statement. +// +// However, more complex cases like pl/pgsql can have semicolons +// within a statement. For these cases, we provide the explicit annotations +// 'StatementBegin' and 'StatementEnd' to allow the script to +// tell us to ignore semicolons. +func ParseMigration(r io.ReadSeeker) (*ParsedMigration, error) { + p := &ParsedMigration{} + + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + scanner := bufio.NewScanner(r) + scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) + + statementEnded := false + ignoreSemicolons := false + currentDirection := directionNone + + for scanner.Scan() { + line := scanner.Text() + // ignore comment except beginning with '-- +' + if strings.HasPrefix(line, "-- ") && !strings.HasPrefix(line, "-- +") { + continue + } + + // handle any migrate-specific commands + if strings.HasPrefix(line, sqlCmdPrefix) { + cmd, err := parseCommand(line) + if err != nil { + return nil, err + } + + switch cmd.Command { + case "Up": + if len(strings.TrimSpace(buf.String())) > 0 { + return nil, errNoTerminator() + } + currentDirection = directionUp + if cmd.HasOption(optionNoTransaction) { + p.DisableTransactionUp = true + } + + case "Down": + if len(strings.TrimSpace(buf.String())) > 0 { + return nil, errNoTerminator() + } + currentDirection = directionDown + if cmd.HasOption(optionNoTransaction) { + p.DisableTransactionDown = true + } + + case "StatementBegin": + if currentDirection != directionNone { + ignoreSemicolons = true + } + + case "StatementEnd": + if currentDirection != directionNone { + statementEnded = ignoreSemicolons + ignoreSemicolons = false + } + } + } + + if currentDirection == directionNone { + continue + } + + isLineSeparator := !ignoreSemicolons && len(LineSeparator) > 0 && line == LineSeparator + + if !isLineSeparator && !strings.HasPrefix(line, "-- +") { + if _, err := buf.WriteString(line + "\n"); err != nil { + return nil, err + } + } + + // Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement + // Lines that end with semicolon that are in a statement block + // do not conclude statement. + if (!ignoreSemicolons && (endsWithSemicolon(line) || isLineSeparator)) || statementEnded { + statementEnded = false + switch currentDirection { + case directionUp: + p.UpStatements = append(p.UpStatements, buf.String()) + + case directionDown: + p.DownStatements = append(p.DownStatements, buf.String()) + + default: + panic("impossible state") + } + + buf.Reset() + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // diagnose likely migration script errors + if ignoreSemicolons { + return nil, fmt.Errorf("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'") + } + + if currentDirection == directionNone { + return nil, fmt.Errorf(`ERROR: no Up/Down annotations found, so no statements were executed. + See https://github.com/rubenv/sql-migrate for details.`) + } + + // allow comment without sql instruction. Example: + // -- +migrate Down + // -- nothing to downgrade! + if len(strings.TrimSpace(buf.String())) > 0 && !strings.HasPrefix(buf.String(), "-- +") { + return nil, errNoTerminator() + } + + return p, nil +} diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore new file mode 100644 index 000000000..75623dccc --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml new file mode 100644 index 000000000..b0b525a5a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.10.x" + - "1.11.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt new file mode 100644 index 000000000..2885af360 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md new file mode 100644 index 000000000..d9c08a22f --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/README.md @@ -0,0 +1,335 @@ +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: + + go get github.com/russross/blackfriday/v2 + +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday/v2" + +and `go get` without parameters. + +Legacy GOPATH mode is unsupported. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://pkg.go.dev/github.com/russross/blackfriday/v2. + +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + +Usage +----- + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday/v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `AutoHeadingIDs` extension is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ```go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled newlines in the input + translate into line breaks in the output. This extension is off by default. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go new file mode 100644 index 000000000..dcd61e6e3 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/block.go @@ -0,0 +1,1612 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "html" + "regexp" + "strings" + "unicode" +) + +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *Markdown) block(data []byte) { + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed heading: + // + // # Heading 1 + // ## Heading 2 + // ... + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.extensions&Titleblock != 0 { + if data[0] == '%' { + if i := p.titleBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.addBlock(HorizontalRule, nil) + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(data, ListTypeOrdered):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(data, ListTypeDefinition):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headings, too + data = data[p.paragraph(data):] + } + + p.nesting-- +} + +func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { + p.closeUnmatchedBlocks() + container := p.addChild(typ, 0) + container.content = content + return container +} + +func (p *Markdown) isPrefixHeading(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.extensions&SpaceHeadings != 0 { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + if level == len(data) || data[level] != ' ' { + return false + } + } + return true +} + +func (p *Markdown) prefixHeading(data []byte) int { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + block := p.addBlock(Heading, data[i:end]) + block.HeadingID = id + block.Level = level + } + return skip +} + +func (p *Markdown) isUnderlinedHeading(data []byte) int { + // test of level 1 heading + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 1 + } + return 0 + } + + // test of level 2 heading + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 2 + } + return 0 + } + + return 0 +} + +func (p *Markdown) titleBlock(data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := p.addBlock(Heading, data) + block.Level = 1 + block.IsTitleblock = true + + return consumed +} + +func (p *Markdown) html(data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + + return i +} + +func finalizeHTMLBlock(block *Node) { + block.Literal = block.content + block.content = nil +} + +// HTML comment, lax form +func (p *Markdown) htmlComment(data []byte, doRender bool) int { + i := p.inlineHTMLComment(data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + block := p.addBlock(HTMLBlock, data[:end]) + finalizeHTMLBlock(block) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *Markdown) htmlHr(data []byte, doRender bool) int { + if len(data) < 4 { + return 0 + } + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + i := 3 + for i < len(data) && data[i] != '>' && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + return size + } + } + return 0 +} + +func (p *Markdown) htmlFindTag(data []byte) (string, bool) { + i := 0 + for i < len(data) && isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *Markdown) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + if tag == "hr" { + return 2 + } + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.extensions&LaxHTMLBlocks != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*Markdown) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + if i < len(data) && data[i] == '\n' { + i++ + } + return i +} + +func (*Markdown) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for i < len(data) && data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If info is not nil, it gets set to the syntax specified in the fence line. +func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + i++ + i = skipChar(data, i, ' ') + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + if i == len(data) { + return i, marker + } + if i > len(data) || data[i] != '\n' { + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { + var info string + beg, marker := isFenceLine(data, &info, "") + if beg == 0 || beg >= len(data) { + return 0 + } + fenceLength := beg - 1 + + var work bytes.Buffer + work.Write([]byte(info)) + work.WriteByte('\n') + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := isFenceLine(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = true + block.FenceLength = fenceLength + finalizeCodeBlock(block) + } + + return beg +} + +func unescapeChar(str []byte) []byte { + if str[0] == '\\' { + return []byte{str[1]} + } + return []byte(html.UnescapeString(string(str))) +} + +func unescapeString(str []byte) []byte { + if reBackslashOrAmp.Match(str) { + return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) + } + return str +} + +func finalizeCodeBlock(block *Node) { + if block.IsFenced { + newlinePos := bytes.IndexByte(block.content, '\n') + firstLine := block.content[:newlinePos] + rest := block.content[newlinePos+1:] + block.Info = unescapeString(bytes.Trim(firstLine, "\n")) + block.Literal = rest + } else { + block.Literal = block.content + } + block.content = nil +} + +func (p *Markdown) table(data []byte) int { + table := p.addBlock(Table, nil) + i, columns := p.tableHeader(data) + if i == 0 { + p.tip = table.Parent + table.Unlink() + return 0 + } + + p.addBlock(TableBody, nil) + + for i < len(data) { + pipes, rowStart := 0, i + for ; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + if i < len(data) && data[i] == '\n' { + i++ + } + p.tableRow(data[rowStart:i], columns, false) + } + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { + i := 0 + colCount := 1 + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + j := i + if j < len(data) && data[j] == '\n' { + j++ + } + header := data[:j] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]CellAlignFlags, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for i < len(data) && data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TableAlignmentLeft + dashes++ + } + for i < len(data) && data[i] == '-' { + i++ + dashes++ + } + if i < len(data) && data[i] == ':' { + i++ + columns[col] |= TableAlignmentRight + dashes++ + } + for i < len(data) && data[i] == ' ' { + i++ + } + if i == len(data) { + return + } + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for i < len(data) && data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && i < len(data) && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.addBlock(TableHead, nil) + p.tableRow(header, columns, true) + size = i + if size < len(data) && data[size] == '\n' { + size++ + } + return +} + +func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { + p.addBlock(TableRow, nil) + i, col := 0, 0 + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for i < len(data) && data[i] == ' ' { + i++ + } + + cellStart := i + + for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { + cellEnd-- + } + + cell := p.addBlock(TableCell, data[cellStart:cellEnd]) + cell.IsHeader = header + cell.Align = columns[col] + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + cell := p.addBlock(TableCell, nil) + cell.IsHeader = header + cell.Align = columns[col] + } + + // silently ignore rows with too many cells +} + +// returns blockquote prefix length +func (p *Markdown) quotePrefix(data []byte) int { + i := 0 + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + if i < len(data) && data[i] == '>' { + if i+1 < len(data) && data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *Markdown) quote(data []byte) int { + block := p.addBlock(BlockQuote, nil) + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + if end < len(data) && data[end] == '\n' { + end++ + } + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + p.block(raw.Bytes()) + p.finalize(block) + return end +} + +// returns prefix length for block code +func (p *Markdown) codePrefix(data []byte) int { + if len(data) >= 1 && data[0] == '\t' { + return 1 + } + if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *Markdown) code(data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for i < len(data) && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '\n' { + i++ + } + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffer + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = false + finalizeCodeBlock(block) + + return i +} + +// returns unordered list item prefix +func (p *Markdown) uliPrefix(data []byte) int { + i := 0 + // start with up to 3 spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data)-1 { + return 0 + } + // need one of {'*', '+', '-'} followed by a space or a tab + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + (data[i+1] != ' ' && data[i+1] != '\t') { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *Markdown) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for i < len(data) && data[i] >= '0' && data[i] <= '9' { + i++ + } + if start == i || i >= len(data)-1 { + return 0 + } + + // we need >= 1 digits followed by a dot and a space or a tab + if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *Markdown) dliPrefix(data []byte) int { + if len(data) < 2 { + return 0 + } + i := 0 + // need a ':' followed by a space or a tab + if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + for i < len(data) && data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *Markdown) list(data []byte, flags ListType) int { + i := 0 + flags |= ListItemBeginningOfList + block := p.addBlock(List, nil) + block.ListFlags = flags + block.Tight = true + + for i < len(data) { + skip := p.listItem(data[i:], &flags) + if flags&ListItemContainsBlock != 0 { + block.ListData.Tight = false + } + i += skip + if skip == 0 || flags&ListItemEndOfList != 0 { + break + } + flags &= ^ListItemBeginningOfList + } + + above := block.Parent + finalizeList(block) + p.tip = above + return i +} + +// Returns true if the list item is not the same type as its parent list +func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { + if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { + return true + } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { + return true + } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { + return true + } + return false +} + +// Returns true if block ends with a blank line, descending if needed +// into lists and sublists. +func endsWithBlankLine(block *Node) bool { + // TODO: figure this out. Always false now. + for block != nil { + //if block.lastLineBlank { + //return true + //} + t := block.Type + if t == List || t == Item { + block = block.LastChild + } else { + break + } + } + return false +} + +func finalizeList(block *Node) { + block.open = false + item := block.FirstChild + for item != nil { + // check for non-final list item ending with blank line: + if endsWithBlankLine(item) && item.Next != nil { + block.ListData.Tight = false + break + } + // recurse into children of list item, to see if there are spaces + // between any of them: + subItem := item.FirstChild + for subItem != nil { + if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { + block.ListData.Tight = false + break + } + subItem = subItem.Next + } + item = item.Next + } +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *Markdown) listItem(data []byte, flags *ListType) int { + // keep track of the indentation of the first line + itemIndent := 0 + if data[0] == '\t' { + itemIndent += 4 + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + } + + var bulletChar byte = '*' + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } else { + bulletChar = data[i-2] + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^ListTypeTerm + } + } + if i == 0 { + // if in definition list, set term flag and continue + if *flags&ListTypeDefinition != 0 { + *flags |= ListTypeTerm + } else { + return 0 + } + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + indentIndex := 0 + if data[line] == '\t' { + indentIndex++ + indent += 4 + } else { + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + indentIndex++ + } + } + + chunk := data[line+indentIndex : i] + + if p.extensions&FencedCode != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indentIndex : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + // to be a nested list, it must be indented more + // if not, it is either a different kind of list + // or the next item in the same list + if indent <= itemIndent { + if p.listTypeChanged(chunk, flags) { + *flags |= ListItemEndOfList + } else if containsBlankLine { + *flags |= ListItemContainsBlock + } + + break gatherlines + } + + if containsBlankLine { + *flags |= ListItemContainsBlock + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix heading? + case p.isPrefixHeading(chunk): + // if the heading is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= ListItemEndOfList + break gatherlines + } + *flags |= ListItemContainsBlock + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&ListTypeDefinition != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for next < len(data) && data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= ListItemEndOfList + } + } else { + *flags |= ListItemEndOfList + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= ListItemContainsBlock + } + + // if this line was preceded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indentIndex : i]) + + line = i + } + + rawBytes := raw.Bytes() + + block := p.addBlock(Item, nil) + block.ListFlags = *flags + block.Tight = false + block.BulletChar = bulletChar + block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + + // render the contents of the list item + if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(rawBytes[:sublist]) + p.block(rawBytes[sublist:]) + } else { + p.block(rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + child := p.addChild(Paragraph, 0) + child.content = rawBytes[:sublist] + p.block(rawBytes[sublist:]) + } else { + child := p.addChild(Paragraph, 0) + child.content = rawBytes + } + } + return line +} + +// render a single paragraph that has already been parsed out +func (p *Markdown) renderParagraph(data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + end := len(data) + // trim trailing newline + if data[len(data)-1] == '\n' { + end-- + } + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + p.addBlock(Paragraph, data[beg:end]) +} + +func (p *Markdown) paragraph(data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + tabSize := TabSizeDefault + if p.extensions&TabSizeEight != 0 { + tabSize = TabSizeDouble + } + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a reference or a footnote? If so, end a paragraph + // preceding it and report that we have consumed up to the end of that + // reference: + if refEnd := isReference(p, current, tabSize); refEnd > 0 { + p.renderParagraph(data[:i]) + return i + refEnd + } + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.extensions&DefinitionLists != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(data[prev:], ListTypeDefinition) + } + } + + p.renderParagraph(data[:i]) + return i + n + } + + // an underline under some text marks a heading, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeading(current); level > 0 { + // render the paragraph + p.renderParagraph(data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + id := "" + if p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + block := p.addBlock(Heading, data[prev:eol]) + block.Level = level + block.HeadingID = id + + // find the end of the underline + for i < len(data) && data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.extensions&LaxHTMLBlocks != 0 { + if data[i] == '<' && p.html(current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a prefixed heading or a horizontal rule after this, paragraph is over + if p.isPrefixHeading(current) || p.isHRule(current) { + p.renderParagraph(data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.extensions&FencedCode != 0 { + if p.fencedCodeBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(current) != 0 { + ret := p.list(data[prev:], ListTypeDefinition) + return ret + } + } + + // if there's a list after this, paragraph is over + if p.extensions&NoEmptyLineBeforeBlock != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + nl := bytes.IndexByte(data[i:], '\n') + if nl >= 0 { + i += nl + 1 + } else { + i += len(data[i:]) + } + } + + p.renderParagraph(data[:i]) + return i +} + +func skipChar(data []byte, start int, char byte) int { + i := start + for i < len(data) && data[i] == char { + i++ + } + return i +} + +func skipUntilChar(text []byte, start int, char byte) int { + i := start + for i < len(text) && text[i] != char { + i++ + } + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go new file mode 100644 index 000000000..57ff152a0 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/doc.go @@ -0,0 +1,46 @@ +// Package blackfriday is a markdown processor. +// +// It translates plain text with simple formatting rules into an AST, which can +// then be further processed to HTML (provided by Blackfriday itself) or other +// formats (provided by the community). +// +// The simplest way to invoke Blackfriday is to call the Run function. It will +// take a text input and produce a text output in HTML (or other format). +// +// A slightly more sophisticated way to use Blackfriday is to create a Markdown +// processor and to call Parse, which returns a syntax tree for the input +// document. You can leverage Blackfriday's parsing for content extraction from +// markdown documents. You can assign a custom renderer and set various options +// to the Markdown processor. +// +// If you're interested in calling Blackfriday from command line, see +// https://github.com/russross/blackfriday-tool. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when AutoHeadingIDs extension is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that precede the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go new file mode 100644 index 000000000..a2c3edb69 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/entities.go @@ -0,0 +1,2236 @@ +package blackfriday + +// Extracted from https://html.spec.whatwg.org/multipage/entities.json +var entities = map[string]bool{ + "Æ": true, + "Æ": true, + "&": true, + "&": true, + "Á": true, + "Á": true, + "Ă": true, + "Â": true, + "Â": true, + "А": true, + "𝔄": true, + "À": true, + "À": true, + "Α": true, + "Ā": true, + "⩓": true, + "Ą": true, + "𝔸": true, + "⁡": true, + "Å": true, + "Å": true, + "𝒜": true, + "≔": true, + "Ã": true, + "Ã": true, + "Ä": true, + "Ä": true, + "∖": true, + "⫧": true, + "⌆": true, + "Б": true, + "∵": true, + "ℬ": true, + "Β": true, + "𝔅": true, + "𝔹": true, + "˘": true, + "ℬ": true, + "≎": true, + "Ч": true, + "©": true, + "©": true, + "Ć": true, + "⋒": true, + "ⅅ": true, + "ℭ": true, + "Č": true, + "Ç": true, + "Ç": true, + "Ĉ": true, + "∰": true, + "Ċ": true, + "¸": true, + "·": true, + "ℭ": true, + "Χ": true, + "⊙": true, + "⊖": true, + "⊕": true, + "⊗": true, + "∲": true, + "”": true, + "’": true, + "∷": true, + "⩴": true, + "≡": true, + "∯": true, + "∮": true, + "ℂ": true, + "∐": true, + "∳": true, + "⨯": true, + "𝒞": true, + "⋓": true, + "≍": true, + "ⅅ": true, + "⤑": true, + "Ђ": true, + "Ѕ": true, + "Џ": true, + "‡": true, + "↡": true, + "⫤": true, + "Ď": true, + "Д": true, + "∇": true, + "Δ": true, + "𝔇": true, + "´": true, + "˙": true, + "˝": true, + "`": true, + "˜": true, + "⋄": true, + "ⅆ": true, + "𝔻": true, + "¨": true, + "⃜": true, + "≐": true, + "∯": true, + "¨": true, + "⇓": true, + "⇐": true, + "⇔": true, + "⫤": true, + "⟸": true, + "⟺": true, + "⟹": true, + "⇒": true, + "⊨": true, + "⇑": true, + "⇕": true, + "∥": true, + "↓": true, + "⤓": true, + "⇵": true, + "̑": true, + "⥐": true, + "⥞": true, + "↽": true, + "⥖": true, + "⥟": true, + "⇁": true, + "⥗": true, + "⊤": true, + "↧": true, + "⇓": true, + "𝒟": true, + "Đ": true, + "Ŋ": true, + "Ð": true, + "Ð": true, + "É": true, + "É": true, + "Ě": true, + "Ê": true, + "Ê": true, + "Э": true, + "Ė": true, + "𝔈": true, + "È": true, + "È": true, + "∈": true, + "Ē": true, + "◻": true, + "▫": true, + "Ę": true, + "𝔼": true, + "Ε": true, + "⩵": true, + "≂": true, + "⇌": true, + "ℰ": true, + "⩳": true, + "Η": true, + "Ë": true, + "Ë": true, + "∃": true, + "ⅇ": true, + "Ф": true, + "𝔉": true, + "◼": true, + "▪": true, + "𝔽": true, + "∀": true, + "ℱ": true, + "ℱ": true, + "Ѓ": true, + ">": true, + ">": true, + "Γ": true, + "Ϝ": true, + "Ğ": true, + "Ģ": true, + "Ĝ": true, + "Г": true, + "Ġ": true, + "𝔊": true, + "⋙": true, + "𝔾": true, + "≥": true, + "⋛": true, + "≧": true, + "⪢": true, + "≷": true, + "⩾": true, + "≳": true, + "𝒢": true, + "≫": true, + "Ъ": true, + "ˇ": true, + "^": true, + "Ĥ": true, + "ℌ": true, + "ℋ": true, + "ℍ": true, + "─": true, + "ℋ": true, + "Ħ": true, + "≎": true, + "≏": true, + "Е": true, + "IJ": true, + "Ё": true, + "Í": true, + "Í": true, + "Î": true, + "Î": true, + "И": true, + "İ": true, + "ℑ": true, + "Ì": true, + "Ì": true, + "ℑ": true, + "Ī": true, + "ⅈ": true, + "⇒": true, + "∬": true, + "∫": true, + "⋂": true, + "⁣": true, + "⁢": true, + "Į": true, + "𝕀": true, + "Ι": true, + "ℐ": true, + "Ĩ": true, + "І": true, + "Ï": true, + "Ï": true, + "Ĵ": true, + "Й": true, + "𝔍": true, + "𝕁": true, + "𝒥": true, + "Ј": true, + "Є": true, + "Х": true, + "Ќ": true, + "Κ": true, + "Ķ": true, + "К": true, + "𝔎": true, + "𝕂": true, + "𝒦": true, + "Љ": true, + "<": true, + "<": true, + "Ĺ": true, + "Λ": true, + "⟪": true, + "ℒ": true, + "↞": true, + "Ľ": true, + "Ļ": true, + "Л": true, + "⟨": true, + "←": true, + "⇤": true, + "⇆": true, + "⌈": true, + "⟦": true, + "⥡": true, + "⇃": true, + "⥙": true, + "⌊": true, + "↔": true, + "⥎": true, + "⊣": true, + "↤": true, + "⥚": true, + "⊲": true, + "⧏": true, + "⊴": true, + "⥑": true, + "⥠": true, + "↿": true, + "⥘": true, + "↼": true, + "⥒": true, + "⇐": true, + "⇔": true, + "⋚": true, + "≦": true, + "≶": true, + "⪡": true, + "⩽": true, + "≲": true, + "𝔏": true, + "⋘": true, + "⇚": true, + "Ŀ": true, + "⟵": true, + "⟷": true, + "⟶": true, + "⟸": true, + "⟺": true, + "⟹": true, + "𝕃": true, + "↙": true, + "↘": true, + "ℒ": true, + "↰": true, + "Ł": true, + "≪": true, + "⤅": true, + "М": true, + " ": true, + "ℳ": true, + "𝔐": true, + "∓": true, + "𝕄": true, + "ℳ": true, + "Μ": true, + "Њ": true, + "Ń": true, + "Ň": true, + "Ņ": true, + "Н": true, + "​": true, + "​": true, + "​": true, + "​": true, + "≫": true, + "≪": true, + " ": true, + "𝔑": true, + "⁠": true, + " ": true, + "ℕ": true, + "⫬": true, + "≢": true, + "≭": true, + "∦": true, + "∉": true, + "≠": true, + "≂̸": true, + "∄": true, + "≯": true, + "≱": true, + "≧̸": true, + "≫̸": true, + "≹": true, + "⩾̸": true, + "≵": true, + "≎̸": true, + "≏̸": true, + "⋪": true, + "⧏̸": true, + "⋬": true, + "≮": true, + "≰": true, + "≸": true, + "≪̸": true, + "⩽̸": true, + "≴": true, + "⪢̸": true, + "⪡̸": true, + "⊀": true, + "⪯̸": true, + "⋠": true, + "∌": true, + "⋫": true, + "⧐̸": true, + "⋭": true, + "⊏̸": true, + "⋢": true, + "⊐̸": true, + "⋣": true, + "⊂⃒": true, + "⊈": true, + "⊁": true, + "⪰̸": true, + "⋡": true, + "≿̸": true, + "⊃⃒": true, + "⊉": true, + "≁": true, + "≄": true, + "≇": true, + "≉": true, + "∤": true, + "𝒩": true, + "Ñ": true, + "Ñ": true, + "Ν": true, + "Œ": true, + "Ó": true, + "Ó": true, + "Ô": true, + "Ô": true, + "О": true, + "Ő": true, + "𝔒": true, + "Ò": true, + "Ò": true, + "Ō": true, + "Ω": true, + "Ο": true, + "𝕆": true, + "“": true, + "‘": true, + "⩔": true, + "𝒪": true, + "Ø": true, + "Ø": true, + "Õ": true, + "Õ": true, + "⨷": true, + "Ö": true, + "Ö": true, + "‾": true, + "⏞": true, + "⎴": true, + "⏜": true, + "∂": true, + "П": true, + "𝔓": true, + "Φ": true, + "Π": true, + "±": true, + "ℌ": true, + "ℙ": true, + "⪻": true, + "≺": true, + "⪯": true, + "≼": true, + "≾": true, + "″": true, + "∏": true, + "∷": true, + "∝": true, + "𝒫": true, + "Ψ": true, + """: true, + """: true, + "𝔔": true, + "ℚ": true, + "𝒬": true, + "⤐": true, + "®": true, + "®": true, + "Ŕ": true, + "⟫": true, + "↠": true, + "⤖": true, + "Ř": true, + "Ŗ": true, + "Р": true, + "ℜ": true, + "∋": true, + "⇋": true, + "⥯": true, + "ℜ": true, + "Ρ": true, + "⟩": true, + "→": true, + "⇥": true, + "⇄": true, + "⌉": true, + "⟧": true, + "⥝": true, + "⇂": true, + "⥕": true, + "⌋": true, + "⊢": true, + "↦": true, + "⥛": true, + "⊳": true, + "⧐": true, + "⊵": true, + "⥏": true, + "⥜": true, + "↾": true, + "⥔": true, + "⇀": true, + "⥓": true, + "⇒": true, + "ℝ": true, + "⥰": true, + "⇛": true, + "ℛ": true, + "↱": true, + "⧴": true, + "Щ": true, + "Ш": true, + "Ь": true, + "Ś": true, + "⪼": true, + "Š": true, + "Ş": true, + "Ŝ": true, + "С": true, + "𝔖": true, + "↓": true, + "←": true, + "→": true, + "↑": true, + "Σ": true, + "∘": true, + "𝕊": true, + "√": true, + "□": true, + "⊓": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊔": true, + "𝒮": true, + "⋆": true, + "⋐": true, + "⋐": true, + "⊆": true, + "≻": true, + "⪰": true, + "≽": true, + "≿": true, + "∋": true, + "∑": true, + "⋑": true, + "⊃": true, + "⊇": true, + "⋑": true, + "Þ": true, + "Þ": true, + "™": true, + "Ћ": true, + "Ц": true, + " ": true, + "Τ": true, + "Ť": true, + "Ţ": true, + "Т": true, + "𝔗": true, + "∴": true, + "Θ": true, + "  ": true, + " ": true, + "∼": true, + "≃": true, + "≅": true, + "≈": true, + "𝕋": true, + "⃛": true, + "𝒯": true, + "Ŧ": true, + "Ú": true, + "Ú": true, + "↟": true, + "⥉": true, + "Ў": true, + "Ŭ": true, + "Û": true, + "Û": true, + "У": true, + "Ű": true, + "𝔘": true, + "Ù": true, + "Ù": true, + "Ū": true, + "_": true, + "⏟": true, + "⎵": true, + "⏝": true, + "⋃": true, + "⊎": true, + "Ų": true, + "𝕌": true, + "↑": true, + "⤒": true, + "⇅": true, + "↕": true, + "⥮": true, + "⊥": true, + "↥": true, + "⇑": true, + "⇕": true, + "↖": true, + "↗": true, + "ϒ": true, + "Υ": true, + "Ů": true, + "𝒰": true, + "Ũ": true, + "Ü": true, + "Ü": true, + "⊫": true, + "⫫": true, + "В": true, + "⊩": true, + "⫦": true, + "⋁": true, + "‖": true, + "‖": true, + "∣": true, + "|": true, + "❘": true, + "≀": true, + " ": true, + "𝔙": true, + "𝕍": true, + "𝒱": true, + "⊪": true, + "Ŵ": true, + "⋀": true, + "𝔚": true, + "𝕎": true, + "𝒲": true, + "𝔛": true, + "Ξ": true, + "𝕏": true, + "𝒳": true, + "Я": true, + "Ї": true, + "Ю": true, + "Ý": true, + "Ý": true, + "Ŷ": true, + "Ы": true, + "𝔜": true, + "𝕐": true, + "𝒴": true, + "Ÿ": true, + "Ж": true, + "Ź": true, + "Ž": true, + "З": true, + "Ż": true, + "​": true, + "Ζ": true, + "ℨ": true, + "ℤ": true, + "𝒵": true, + "á": true, + "á": true, + "ă": true, + "∾": true, + "∾̳": true, + "∿": true, + "â": true, + "â": true, + "´": true, + "´": true, + "а": true, + "æ": true, + "æ": true, + "⁡": true, + "𝔞": true, + "à": true, + "à": true, + "ℵ": true, + "ℵ": true, + "α": true, + "ā": true, + "⨿": true, + "&": true, + "&": true, + "∧": true, + "⩕": true, + "⩜": true, + "⩘": true, + "⩚": true, + "∠": true, + "⦤": true, + "∠": true, + "∡": true, + "⦨": true, + "⦩": true, + "⦪": true, + "⦫": true, + "⦬": true, + "⦭": true, + "⦮": true, + "⦯": true, + "∟": true, + "⊾": true, + "⦝": true, + "∢": true, + "Å": true, + "⍼": true, + "ą": true, + "𝕒": true, + "≈": true, + "⩰": true, + "⩯": true, + "≊": true, + "≋": true, + "'": true, + "≈": true, + "≊": true, + "å": true, + "å": true, + "𝒶": true, + "*": true, + "≈": true, + "≍": true, + "ã": true, + "ã": true, + "ä": true, + "ä": true, + "∳": true, + "⨑": true, + "⫭": true, + "≌": true, + "϶": true, + "‵": true, + "∽": true, + "⋍": true, + "⊽": true, + "⌅": true, + "⌅": true, + "⎵": true, + "⎶": true, + "≌": true, + "б": true, + "„": true, + "∵": true, + "∵": true, + "⦰": true, + "϶": true, + "ℬ": true, + "β": true, + "ℶ": true, + "≬": true, + "𝔟": true, + "⋂": true, + "◯": true, + "⋃": true, + "⨀": true, + "⨁": true, + "⨂": true, + "⨆": true, + "★": true, + "▽": true, + "△": true, + "⨄": true, + "⋁": true, + "⋀": true, + "⤍": true, + "⧫": true, + "▪": true, + "▴": true, + "▾": true, + "◂": true, + "▸": true, + "␣": true, + "▒": true, + "░": true, + "▓": true, + "█": true, + "=⃥": true, + "≡⃥": true, + "⌐": true, + "𝕓": true, + "⊥": true, + "⊥": true, + "⋈": true, + "╗": true, + "╔": true, + "╖": true, + "╓": true, + "═": true, + "╦": true, + "╩": true, + "╤": true, + "╧": true, + "╝": true, + "╚": true, + "╜": true, + "╙": true, + "║": true, + "╬": true, + "╣": true, + "╠": true, + "╫": true, + "╢": true, + "╟": true, + "⧉": true, + "╕": true, + "╒": true, + "┐": true, + "┌": true, + "─": true, + "╥": true, + "╨": true, + "┬": true, + "┴": true, + "⊟": true, + "⊞": true, + "⊠": true, + "╛": true, + "╘": true, + "┘": true, + "└": true, + "│": true, + "╪": true, + "╡": true, + "╞": true, + "┼": true, + "┤": true, + "├": true, + "‵": true, + "˘": true, + "¦": true, + "¦": true, + "𝒷": true, + "⁏": true, + "∽": true, + "⋍": true, + "\": true, + "⧅": true, + "⟈": true, + "•": true, + "•": true, + "≎": true, + "⪮": true, + "≏": true, + "≏": true, + "ć": true, + "∩": true, + "⩄": true, + "⩉": true, + "⩋": true, + "⩇": true, + "⩀": true, + "∩︀": true, + "⁁": true, + "ˇ": true, + "⩍": true, + "č": true, + "ç": true, + "ç": true, + "ĉ": true, + "⩌": true, + "⩐": true, + "ċ": true, + "¸": true, + "¸": true, + "⦲": true, + "¢": true, + "¢": true, + "·": true, + "𝔠": true, + "ч": true, + "✓": true, + "✓": true, + "χ": true, + "○": true, + "⧃": true, + "ˆ": true, + "≗": true, + "↺": true, + "↻": true, + "®": true, + "Ⓢ": true, + "⊛": true, + "⊚": true, + "⊝": true, + "≗": true, + "⨐": true, + "⫯": true, + "⧂": true, + "♣": true, + "♣": true, + ":": true, + "≔": true, + "≔": true, + ",": true, + "@": true, + "∁": true, + "∘": true, + "∁": true, + "ℂ": true, + "≅": true, + "⩭": true, + "∮": true, + "𝕔": true, + "∐": true, + "©": true, + "©": true, + "℗": true, + "↵": true, + "✗": true, + "𝒸": true, + "⫏": true, + "⫑": true, + "⫐": true, + "⫒": true, + "⋯": true, + "⤸": true, + "⤵": true, + "⋞": true, + "⋟": true, + "↶": true, + "⤽": true, + "∪": true, + "⩈": true, + "⩆": true, + "⩊": true, + "⊍": true, + "⩅": true, + "∪︀": true, + "↷": true, + "⤼": true, + "⋞": true, + "⋟": true, + "⋎": true, + "⋏": true, + "¤": true, + "¤": true, + "↶": true, + "↷": true, + "⋎": true, + "⋏": true, + "∲": true, + "∱": true, + "⌭": true, + "⇓": true, + "⥥": true, + "†": true, + "ℸ": true, + "↓": true, + "‐": true, + "⊣": true, + "⤏": true, + "˝": true, + "ď": true, + "д": true, + "ⅆ": true, + "‡": true, + "⇊": true, + "⩷": true, + "°": true, + "°": true, + "δ": true, + "⦱": true, + "⥿": true, + "𝔡": true, + "⇃": true, + "⇂": true, + "⋄": true, + "⋄": true, + "♦": true, + "♦": true, + "¨": true, + "ϝ": true, + "⋲": true, + "÷": true, + "÷": true, + "÷": true, + "⋇": true, + "⋇": true, + "ђ": true, + "⌞": true, + "⌍": true, + "$": true, + "𝕕": true, + "˙": true, + "≐": true, + "≑": true, + "∸": true, + "∔": true, + "⊡": true, + "⌆": true, + "↓": true, + "⇊": true, + "⇃": true, + "⇂": true, + "⤐": true, + "⌟": true, + "⌌": true, + "𝒹": true, + "ѕ": true, + "⧶": true, + "đ": true, + "⋱": true, + "▿": true, + "▾": true, + "⇵": true, + "⥯": true, + "⦦": true, + "џ": true, + "⟿": true, + "⩷": true, + "≑": true, + "é": true, + "é": true, + "⩮": true, + "ě": true, + "≖": true, + "ê": true, + "ê": true, + "≕": true, + "э": true, + "ė": true, + "ⅇ": true, + "≒": true, + "𝔢": true, + "⪚": true, + "è": true, + "è": true, + "⪖": true, + "⪘": true, + "⪙": true, + "⏧": true, + "ℓ": true, + "⪕": true, + "⪗": true, + "ē": true, + "∅": true, + "∅": true, + "∅": true, + " ": true, + " ": true, + " ": true, + "ŋ": true, + " ": true, + "ę": true, + "𝕖": true, + "⋕": true, + "⧣": true, + "⩱": true, + "ε": true, + "ε": true, + "ϵ": true, + "≖": true, + "≕": true, + "≂": true, + "⪖": true, + "⪕": true, + "=": true, + "≟": true, + "≡": true, + "⩸": true, + "⧥": true, + "≓": true, + "⥱": true, + "ℯ": true, + "≐": true, + "≂": true, + "η": true, + "ð": true, + "ð": true, + "ë": true, + "ë": true, + "€": true, + "!": true, + "∃": true, + "ℰ": true, + "ⅇ": true, + "≒": true, + "ф": true, + "♀": true, + "ffi": true, + "ff": true, + "ffl": true, + "𝔣": true, + "fi": true, + "fj": true, + "♭": true, + "fl": true, + "▱": true, + "ƒ": true, + "𝕗": true, + "∀": true, + "⋔": true, + "⫙": true, + "⨍": true, + "½": true, + "½": true, + "⅓": true, + "¼": true, + "¼": true, + "⅕": true, + "⅙": true, + "⅛": true, + "⅔": true, + "⅖": true, + "¾": true, + "¾": true, + "⅗": true, + "⅜": true, + "⅘": true, + "⅚": true, + "⅝": true, + "⅞": true, + "⁄": true, + "⌢": true, + "𝒻": true, + "≧": true, + "⪌": true, + "ǵ": true, + "γ": true, + "ϝ": true, + "⪆": true, + "ğ": true, + "ĝ": true, + "г": true, + "ġ": true, + "≥": true, + "⋛": true, + "≥": true, + "≧": true, + "⩾": true, + "⩾": true, + "⪩": true, + "⪀": true, + "⪂": true, + "⪄": true, + "⋛︀": true, + "⪔": true, + "𝔤": true, + "≫": true, + "⋙": true, + "ℷ": true, + "ѓ": true, + "≷": true, + "⪒": true, + "⪥": true, + "⪤": true, + "≩": true, + "⪊": true, + "⪊": true, + "⪈": true, + "⪈": true, + "≩": true, + "⋧": true, + "𝕘": true, + "`": true, + "ℊ": true, + "≳": true, + "⪎": true, + "⪐": true, + ">": true, + ">": true, + "⪧": true, + "⩺": true, + "⋗": true, + "⦕": true, + "⩼": true, + "⪆": true, + "⥸": true, + "⋗": true, + "⋛": true, + "⪌": true, + "≷": true, + "≳": true, + "≩︀": true, + "≩︀": true, + "⇔": true, + " ": true, + "½": true, + "ℋ": true, + "ъ": true, + "↔": true, + "⥈": true, + "↭": true, + "ℏ": true, + "ĥ": true, + "♥": true, + "♥": true, + "…": true, + "⊹": true, + "𝔥": true, + "⤥": true, + "⤦": true, + "⇿": true, + "∻": true, + "↩": true, + "↪": true, + "𝕙": true, + "―": true, + "𝒽": true, + "ℏ": true, + "ħ": true, + "⁃": true, + "‐": true, + "í": true, + "í": true, + "⁣": true, + "î": true, + "î": true, + "и": true, + "е": true, + "¡": true, + "¡": true, + "⇔": true, + "𝔦": true, + "ì": true, + "ì": true, + "ⅈ": true, + "⨌": true, + "∭": true, + "⧜": true, + "℩": true, + "ij": true, + "ī": true, + "ℑ": true, + "ℐ": true, + "ℑ": true, + "ı": true, + "⊷": true, + "Ƶ": true, + "∈": true, + "℅": true, + "∞": true, + "⧝": true, + "ı": true, + "∫": true, + "⊺": true, + "ℤ": true, + "⊺": true, + "⨗": true, + "⨼": true, + "ё": true, + "į": true, + "𝕚": true, + "ι": true, + "⨼": true, + "¿": true, + "¿": true, + "𝒾": true, + "∈": true, + "⋹": true, + "⋵": true, + "⋴": true, + "⋳": true, + "∈": true, + "⁢": true, + "ĩ": true, + "і": true, + "ï": true, + "ï": true, + "ĵ": true, + "й": true, + "𝔧": true, + "ȷ": true, + "𝕛": true, + "𝒿": true, + "ј": true, + "є": true, + "κ": true, + "ϰ": true, + "ķ": true, + "к": true, + "𝔨": true, + "ĸ": true, + "х": true, + "ќ": true, + "𝕜": true, + "𝓀": true, + "⇚": true, + "⇐": true, + "⤛": true, + "⤎": true, + "≦": true, + "⪋": true, + "⥢": true, + "ĺ": true, + "⦴": true, + "ℒ": true, + "λ": true, + "⟨": true, + "⦑": true, + "⟨": true, + "⪅": true, + "«": true, + "«": true, + "←": true, + "⇤": true, + "⤟": true, + "⤝": true, + "↩": true, + "↫": true, + "⤹": true, + "⥳": true, + "↢": true, + "⪫": true, + "⤙": true, + "⪭": true, + "⪭︀": true, + "⤌": true, + "❲": true, + "{": true, + "[": true, + "⦋": true, + "⦏": true, + "⦍": true, + "ľ": true, + "ļ": true, + "⌈": true, + "{": true, + "л": true, + "⤶": true, + "“": true, + "„": true, + "⥧": true, + "⥋": true, + "↲": true, + "≤": true, + "←": true, + "↢": true, + "↽": true, + "↼": true, + "⇇": true, + "↔": true, + "⇆": true, + "⇋": true, + "↭": true, + "⋋": true, + "⋚": true, + "≤": true, + "≦": true, + "⩽": true, + "⩽": true, + "⪨": true, + "⩿": true, + "⪁": true, + "⪃": true, + "⋚︀": true, + "⪓": true, + "⪅": true, + "⋖": true, + "⋚": true, + "⪋": true, + "≶": true, + "≲": true, + "⥼": true, + "⌊": true, + "𝔩": true, + "≶": true, + "⪑": true, + "↽": true, + "↼": true, + "⥪": true, + "▄": true, + "љ": true, + "≪": true, + "⇇": true, + "⌞": true, + "⥫": true, + "◺": true, + "ŀ": true, + "⎰": true, + "⎰": true, + "≨": true, + "⪉": true, + "⪉": true, + "⪇": true, + "⪇": true, + "≨": true, + "⋦": true, + "⟬": true, + "⇽": true, + "⟦": true, + "⟵": true, + "⟷": true, + "⟼": true, + "⟶": true, + "↫": true, + "↬": true, + "⦅": true, + "𝕝": true, + "⨭": true, + "⨴": true, + "∗": true, + "_": true, + "◊": true, + "◊": true, + "⧫": true, + "(": true, + "⦓": true, + "⇆": true, + "⌟": true, + "⇋": true, + "⥭": true, + "‎": true, + "⊿": true, + "‹": true, + "𝓁": true, + "↰": true, + "≲": true, + "⪍": true, + "⪏": true, + "[": true, + "‘": true, + "‚": true, + "ł": true, + "<": true, + "<": true, + "⪦": true, + "⩹": true, + "⋖": true, + "⋋": true, + "⋉": true, + "⥶": true, + "⩻": true, + "⦖": true, + "◃": true, + "⊴": true, + "◂": true, + "⥊": true, + "⥦": true, + "≨︀": true, + "≨︀": true, + "∺": true, + "¯": true, + "¯": true, + "♂": true, + "✠": true, + "✠": true, + "↦": true, + "↦": true, + "↧": true, + "↤": true, + "↥": true, + "▮": true, + "⨩": true, + "м": true, + "—": true, + "∡": true, + "𝔪": true, + "℧": true, + "µ": true, + "µ": true, + "∣": true, + "*": true, + "⫰": true, + "·": true, + "·": true, + "−": true, + "⊟": true, + "∸": true, + "⨪": true, + "⫛": true, + "…": true, + "∓": true, + "⊧": true, + "𝕞": true, + "∓": true, + "𝓂": true, + "∾": true, + "μ": true, + "⊸": true, + "⊸": true, + "⋙̸": true, + "≫⃒": true, + "≫̸": true, + "⇍": true, + "⇎": true, + "⋘̸": true, + "≪⃒": true, + "≪̸": true, + "⇏": true, + "⊯": true, + "⊮": true, + "∇": true, + "ń": true, + "∠⃒": true, + "≉": true, + "⩰̸": true, + "≋̸": true, + "ʼn": true, + "≉": true, + "♮": true, + "♮": true, + "ℕ": true, + " ": true, + " ": true, + "≎̸": true, + "≏̸": true, + "⩃": true, + "ň": true, + "ņ": true, + "≇": true, + "⩭̸": true, + "⩂": true, + "н": true, + "–": true, + "≠": true, + "⇗": true, + "⤤": true, + "↗": true, + "↗": true, + "≐̸": true, + "≢": true, + "⤨": true, + "≂̸": true, + "∄": true, + "∄": true, + "𝔫": true, + "≧̸": true, + "≱": true, + "≱": true, + "≧̸": true, + "⩾̸": true, + "⩾̸": true, + "≵": true, + "≯": true, + "≯": true, + "⇎": true, + "↮": true, + "⫲": true, + "∋": true, + "⋼": true, + "⋺": true, + "∋": true, + "њ": true, + "⇍": true, + "≦̸": true, + "↚": true, + "‥": true, + "≰": true, + "↚": true, + "↮": true, + "≰": true, + "≦̸": true, + "⩽̸": true, + "⩽̸": true, + "≮": true, + "≴": true, + "≮": true, + "⋪": true, + "⋬": true, + "∤": true, + "𝕟": true, + "¬": true, + "¬": true, + "∉": true, + "⋹̸": true, + "⋵̸": true, + "∉": true, + "⋷": true, + "⋶": true, + "∌": true, + "∌": true, + "⋾": true, + "⋽": true, + "∦": true, + "∦": true, + "⫽⃥": true, + "∂̸": true, + "⨔": true, + "⊀": true, + "⋠": true, + "⪯̸": true, + "⊀": true, + "⪯̸": true, + "⇏": true, + "↛": true, + "⤳̸": true, + "↝̸": true, + "↛": true, + "⋫": true, + "⋭": true, + "⊁": true, + "⋡": true, + "⪰̸": true, + "𝓃": true, + "∤": true, + "∦": true, + "≁": true, + "≄": true, + "≄": true, + "∤": true, + "∦": true, + "⋢": true, + "⋣": true, + "⊄": true, + "⫅̸": true, + "⊈": true, + "⊂⃒": true, + "⊈": true, + "⫅̸": true, + "⊁": true, + "⪰̸": true, + "⊅": true, + "⫆̸": true, + "⊉": true, + "⊃⃒": true, + "⊉": true, + "⫆̸": true, + "≹": true, + "ñ": true, + "ñ": true, + "≸": true, + "⋪": true, + "⋬": true, + "⋫": true, + "⋭": true, + "ν": true, + "#": true, + "№": true, + " ": true, + "⊭": true, + "⤄": true, + "≍⃒": true, + "⊬": true, + "≥⃒": true, + ">⃒": true, + "⧞": true, + "⤂": true, + "≤⃒": true, + "<⃒": true, + "⊴⃒": true, + "⤃": true, + "⊵⃒": true, + "∼⃒": true, + "⇖": true, + "⤣": true, + "↖": true, + "↖": true, + "⤧": true, + "Ⓢ": true, + "ó": true, + "ó": true, + "⊛": true, + "⊚": true, + "ô": true, + "ô": true, + "о": true, + "⊝": true, + "ő": true, + "⨸": true, + "⊙": true, + "⦼": true, + "œ": true, + "⦿": true, + "𝔬": true, + "˛": true, + "ò": true, + "ò": true, + "⧁": true, + "⦵": true, + "Ω": true, + "∮": true, + "↺": true, + "⦾": true, + "⦻": true, + "‾": true, + "⧀": true, + "ō": true, + "ω": true, + "ο": true, + "⦶": true, + "⊖": true, + "𝕠": true, + "⦷": true, + "⦹": true, + "⊕": true, + "∨": true, + "↻": true, + "⩝": true, + "ℴ": true, + "ℴ": true, + "ª": true, + "ª": true, + "º": true, + "º": true, + "⊶": true, + "⩖": true, + "⩗": true, + "⩛": true, + "ℴ": true, + "ø": true, + "ø": true, + "⊘": true, + "õ": true, + "õ": true, + "⊗": true, + "⨶": true, + "ö": true, + "ö": true, + "⌽": true, + "∥": true, + "¶": true, + "¶": true, + "∥": true, + "⫳": true, + "⫽": true, + "∂": true, + "п": true, + "%": true, + ".": true, + "‰": true, + "⊥": true, + "‱": true, + "𝔭": true, + "φ": true, + "ϕ": true, + "ℳ": true, + "☎": true, + "π": true, + "⋔": true, + "ϖ": true, + "ℏ": true, + "ℎ": true, + "ℏ": true, + "+": true, + "⨣": true, + "⊞": true, + "⨢": true, + "∔": true, + "⨥": true, + "⩲": true, + "±": true, + "±": true, + "⨦": true, + "⨧": true, + "±": true, + "⨕": true, + "𝕡": true, + "£": true, + "£": true, + "≺": true, + "⪳": true, + "⪷": true, + "≼": true, + "⪯": true, + "≺": true, + "⪷": true, + "≼": true, + "⪯": true, + "⪹": true, + "⪵": true, + "⋨": true, + "≾": true, + "′": true, + "ℙ": true, + "⪵": true, + "⪹": true, + "⋨": true, + "∏": true, + "⌮": true, + "⌒": true, + "⌓": true, + "∝": true, + "∝": true, + "≾": true, + "⊰": true, + "𝓅": true, + "ψ": true, + " ": true, + "𝔮": true, + "⨌": true, + "𝕢": true, + "⁗": true, + "𝓆": true, + "ℍ": true, + "⨖": true, + "?": true, + "≟": true, + """: true, + """: true, + "⇛": true, + "⇒": true, + "⤜": true, + "⤏": true, + "⥤": true, + "∽̱": true, + "ŕ": true, + "√": true, + "⦳": true, + "⟩": true, + "⦒": true, + "⦥": true, + "⟩": true, + "»": true, + "»": true, + "→": true, + "⥵": true, + "⇥": true, + "⤠": true, + "⤳": true, + "⤞": true, + "↪": true, + "↬": true, + "⥅": true, + "⥴": true, + "↣": true, + "↝": true, + "⤚": true, + "∶": true, + "ℚ": true, + "⤍": true, + "❳": true, + "}": true, + "]": true, + "⦌": true, + "⦎": true, + "⦐": true, + "ř": true, + "ŗ": true, + "⌉": true, + "}": true, + "р": true, + "⤷": true, + "⥩": true, + "”": true, + "”": true, + "↳": true, + "ℜ": true, + "ℛ": true, + "ℜ": true, + "ℝ": true, + "▭": true, + "®": true, + "®": true, + "⥽": true, + "⌋": true, + "𝔯": true, + "⇁": true, + "⇀": true, + "⥬": true, + "ρ": true, + "ϱ": true, + "→": true, + "↣": true, + "⇁": true, + "⇀": true, + "⇄": true, + "⇌": true, + "⇉": true, + "↝": true, + "⋌": true, + "˚": true, + "≓": true, + "⇄": true, + "⇌": true, + "‏": true, + "⎱": true, + "⎱": true, + "⫮": true, + "⟭": true, + "⇾": true, + "⟧": true, + "⦆": true, + "𝕣": true, + "⨮": true, + "⨵": true, + ")": true, + "⦔": true, + "⨒": true, + "⇉": true, + "›": true, + "𝓇": true, + "↱": true, + "]": true, + "’": true, + "’": true, + "⋌": true, + "⋊": true, + "▹": true, + "⊵": true, + "▸": true, + "⧎": true, + "⥨": true, + "℞": true, + "ś": true, + "‚": true, + "≻": true, + "⪴": true, + "⪸": true, + "š": true, + "≽": true, + "⪰": true, + "ş": true, + "ŝ": true, + "⪶": true, + "⪺": true, + "⋩": true, + "⨓": true, + "≿": true, + "с": true, + "⋅": true, + "⊡": true, + "⩦": true, + "⇘": true, + "⤥": true, + "↘": true, + "↘": true, + "§": true, + "§": true, + ";": true, + "⤩": true, + "∖": true, + "∖": true, + "✶": true, + "𝔰": true, + "⌢": true, + "♯": true, + "щ": true, + "ш": true, + "∣": true, + "∥": true, + "­": true, + "­": true, + "σ": true, + "ς": true, + "ς": true, + "∼": true, + "⩪": true, + "≃": true, + "≃": true, + "⪞": true, + "⪠": true, + "⪝": true, + "⪟": true, + "≆": true, + "⨤": true, + "⥲": true, + "←": true, + "∖": true, + "⨳": true, + "⧤": true, + "∣": true, + "⌣": true, + "⪪": true, + "⪬": true, + "⪬︀": true, + "ь": true, + "/": true, + "⧄": true, + "⌿": true, + "𝕤": true, + "♠": true, + "♠": true, + "∥": true, + "⊓": true, + "⊓︀": true, + "⊔": true, + "⊔︀": true, + "⊏": true, + "⊑": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊐": true, + "⊒": true, + "□": true, + "□": true, + "▪": true, + "▪": true, + "→": true, + "𝓈": true, + "∖": true, + "⌣": true, + "⋆": true, + "☆": true, + "★": true, + "ϵ": true, + "ϕ": true, + "¯": true, + "⊂": true, + "⫅": true, + "⪽": true, + "⊆": true, + "⫃": true, + "⫁": true, + "⫋": true, + "⊊": true, + "⪿": true, + "⥹": true, + "⊂": true, + "⊆": true, + "⫅": true, + "⊊": true, + "⫋": true, + "⫇": true, + "⫕": true, + "⫓": true, + "≻": true, + "⪸": true, + "≽": true, + "⪰": true, + "⪺": true, + "⪶": true, + "⋩": true, + "≿": true, + "∑": true, + "♪": true, + "¹": true, + "¹": true, + "²": true, + "²": true, + "³": true, + "³": true, + "⊃": true, + "⫆": true, + "⪾": true, + "⫘": true, + "⊇": true, + "⫄": true, + "⟉": true, + "⫗": true, + "⥻": true, + "⫂": true, + "⫌": true, + "⊋": true, + "⫀": true, + "⊃": true, + "⊇": true, + "⫆": true, + "⊋": true, + "⫌": true, + "⫈": true, + "⫔": true, + "⫖": true, + "⇙": true, + "⤦": true, + "↙": true, + "↙": true, + "⤪": true, + "ß": true, + "ß": true, + "⌖": true, + "τ": true, + "⎴": true, + "ť": true, + "ţ": true, + "т": true, + "⃛": true, + "⌕": true, + "𝔱": true, + "∴": true, + "∴": true, + "θ": true, + "ϑ": true, + "ϑ": true, + "≈": true, + "∼": true, + " ": true, + "≈": true, + "∼": true, + "þ": true, + "þ": true, + "˜": true, + "×": true, + "×": true, + "⊠": true, + "⨱": true, + "⨰": true, + "∭": true, + "⤨": true, + "⊤": true, + "⌶": true, + "⫱": true, + "𝕥": true, + "⫚": true, + "⤩": true, + "‴": true, + "™": true, + "▵": true, + "▿": true, + "◃": true, + "⊴": true, + "≜": true, + "▹": true, + "⊵": true, + "◬": true, + "≜": true, + "⨺": true, + "⨹": true, + "⧍": true, + "⨻": true, + "⏢": true, + "𝓉": true, + "ц": true, + "ћ": true, + "ŧ": true, + "≬": true, + "↞": true, + "↠": true, + "⇑": true, + "⥣": true, + "ú": true, + "ú": true, + "↑": true, + "ў": true, + "ŭ": true, + "û": true, + "û": true, + "у": true, + "⇅": true, + "ű": true, + "⥮": true, + "⥾": true, + "𝔲": true, + "ù": true, + "ù": true, + "↿": true, + "↾": true, + "▀": true, + "⌜": true, + "⌜": true, + "⌏": true, + "◸": true, + "ū": true, + "¨": true, + "¨": true, + "ų": true, + "𝕦": true, + "↑": true, + "↕": true, + "↿": true, + "↾": true, + "⊎": true, + "υ": true, + "ϒ": true, + "υ": true, + "⇈": true, + "⌝": true, + "⌝": true, + "⌎": true, + "ů": true, + "◹": true, + "𝓊": true, + "⋰": true, + "ũ": true, + "▵": true, + "▴": true, + "⇈": true, + "ü": true, + "ü": true, + "⦧": true, + "⇕": true, + "⫨": true, + "⫩": true, + "⊨": true, + "⦜": true, + "ϵ": true, + "ϰ": true, + "∅": true, + "ϕ": true, + "ϖ": true, + "∝": true, + "↕": true, + "ϱ": true, + "ς": true, + "⊊︀": true, + "⫋︀": true, + "⊋︀": true, + "⫌︀": true, + "ϑ": true, + "⊲": true, + "⊳": true, + "в": true, + "⊢": true, + "∨": true, + "⊻": true, + "≚": true, + "⋮": true, + "|": true, + "|": true, + "𝔳": true, + "⊲": true, + "⊂⃒": true, + "⊃⃒": true, + "𝕧": true, + "∝": true, + "⊳": true, + "𝓋": true, + "⫋︀": true, + "⊊︀": true, + "⫌︀": true, + "⊋︀": true, + "⦚": true, + "ŵ": true, + "⩟": true, + "∧": true, + "≙": true, + "℘": true, + "𝔴": true, + "𝕨": true, + "℘": true, + "≀": true, + "≀": true, + "𝓌": true, + "⋂": true, + "◯": true, + "⋃": true, + "▽": true, + "𝔵": true, + "⟺": true, + "⟷": true, + "ξ": true, + "⟸": true, + "⟵": true, + "⟼": true, + "⋻": true, + "⨀": true, + "𝕩": true, + "⨁": true, + "⨂": true, + "⟹": true, + "⟶": true, + "𝓍": true, + "⨆": true, + "⨄": true, + "△": true, + "⋁": true, + "⋀": true, + "ý": true, + "ý": true, + "я": true, + "ŷ": true, + "ы": true, + "¥": true, + "¥": true, + "𝔶": true, + "ї": true, + "𝕪": true, + "𝓎": true, + "ю": true, + "ÿ": true, + "ÿ": true, + "ź": true, + "ž": true, + "з": true, + "ż": true, + "ℨ": true, + "ζ": true, + "𝔷": true, + "ж": true, + "⇝": true, + "𝕫": true, + "𝓏": true, + "‍": true, + "‌": true, +} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go new file mode 100644 index 000000000..6ab60102c --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/esc.go @@ -0,0 +1,70 @@ +package blackfriday + +import ( + "html" + "io" +) + +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + escapeEntities(w, s, false) +} + +func escapeAllHTML(w io.Writer, s []byte) { + escapeEntities(w, s, true) +} + +func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + isEntity, entityEnd := nodeIsEntity(s, end) + if isEntity && !escapeValidEntities { + w.Write(s[start : entityEnd+1]) + start = entityEnd + 1 + } else { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } + } + end++ + } + if start < len(s) && end <= len(s) { + w.Write(s[start:end]) + } +} + +func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { + isEntity = false + endEntityPos = end + 1 + + if s[end] == '&' { + for endEntityPos < len(s) { + if s[endEntityPos] == ';' { + if entities[string(s[end:endEntityPos+1])] { + isEntity = true + break + } + } + if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { + break + } + endEntityPos++ + } + } + + return isEntity, endEntityPos +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + escapeHTML(w, []byte(unesc)) +} diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go new file mode 100644 index 000000000..cb4f26e30 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/html.go @@ -0,0 +1,952 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// HTMLFlags control optional behavior of HTML renderer. +type HTMLFlags int + +// HTML renderer configuration options. +const ( + HTMLFlagsNone HTMLFlags = 0 + SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks + SkipImages // Skip embedded images + SkipLinks // Skip all links + Safelink // Only link to trusted protocols + NofollowLinks // Only link with rel="nofollow" + NoreferrerLinks // Only link with rel="noreferrer" + NoopenerLinks // Only link with rel="noopener" + HrefTargetBlank // Add a blank target + CompletePage // Generate a complete HTML page + UseXHTML // Generate XHTML output instead of HTML + FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source + Smartypants // Enable smart punctuation substitutions + SmartypantsFractions // Enable smart fractions (with Smartypants) + SmartypantsDashes // Enable smart dashes (with Smartypants) + SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) + SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering + SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) + TOC // Generate a table of contents +) + +var ( + htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) +) + +const ( + htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + + processingInstruction + "|" + declaration + "|" + cdata + ")" + closeTag = "]" + openTag = "<" + tagName + attribute + "*" + "\\s*/?>" + attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" + attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" + attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" + attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + cdata = "" + declaration = "]*>" + doubleQuotedValue = "\"[^\"]*\"" + htmlComment = "|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

becomes

etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
  • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
    ") + brXHTMLTag = []byte("
    ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
    ")
    +	preCloseTag        = []byte("
    ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

    ") + pCloseTag = []byte("

    ") + blockquoteTag = []byte("
    ") + blockquoteCloseTag = []byte("
    ") + hrTag = []byte("
    ") + hrXHTMLTag = []byte("
    ") + ulTag = []byte("
      ") + ulCloseTag = []byte("
    ") + olTag = []byte("
      ") + olCloseTag = []byte("
    ") + dlTag = []byte("
    ") + dlCloseTag = []byte("
    ") + liTag = []byte("
  • ") + liCloseTag = []byte("
  • ") + ddTag = []byte("
    ") + ddCloseTag = []byte("
    ") + dtTag = []byte("
    ") + dtCloseTag = []byte("
    ") + tableTag = []byte("

    ") + tableCloseTag = []byte("
    ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
    \n\n") + footnotesCloseDivBytes = []byte("\n
    \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 000000000..d45bd9417 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 000000000..58d2e4538 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 000000000..04e6050ce --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 000000000..3a220e942 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (â„, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore new file mode 100644 index 000000000..ff36b987f --- /dev/null +++ b/vendor/github.com/shopspring/decimal/.gitignore @@ -0,0 +1,9 @@ +.git +*.swp + +# IntelliJ +.idea/ +*.iml + +# VS code +*.code-workspace diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml new file mode 100644 index 000000000..6326d40f0 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/.travis.yml @@ -0,0 +1,19 @@ +language: go + +arch: + - amd64 + - ppc64le + +go: + - 1.7.x + - 1.14.x + - 1.15.x + - 1.16.x + - 1.17.x + - tip + +install: + - go build . + +script: + - go test -v diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md new file mode 100644 index 000000000..aea61154b --- /dev/null +++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md @@ -0,0 +1,49 @@ +## Decimal v1.3.1 + +#### ENHANCEMENTS +- Reduce memory allocation in case of initialization from big.Int [#252](https://github.com/shopspring/decimal/pull/252) + +#### BUGFIXES +- Fix binary marshalling of decimal zero value [#253](https://github.com/shopspring/decimal/pull/253) + +## Decimal v1.3.0 + +#### FEATURES +- Add NewFromFormattedString initializer [#184](https://github.com/shopspring/decimal/pull/184) +- Add NewNullDecimal initializer [#234](https://github.com/shopspring/decimal/pull/234) +- Add implementation of natural exponent function (Taylor, Hull-Abraham) [#229](https://github.com/shopspring/decimal/pull/229) +- Add RoundUp, RoundDown, RoundCeil, RoundFloor methods [#196](https://github.com/shopspring/decimal/pull/196) [#202](https://github.com/shopspring/decimal/pull/202) [#220](https://github.com/shopspring/decimal/pull/220) +- Add XML support for NullDecimal [#192](https://github.com/shopspring/decimal/pull/192) +- Add IsInteger method [#179](https://github.com/shopspring/decimal/pull/179) +- Add Copy helper method [#123](https://github.com/shopspring/decimal/pull/123) +- Add InexactFloat64 helper method [#205](https://github.com/shopspring/decimal/pull/205) +- Add CoefficientInt64 helper method [#244](https://github.com/shopspring/decimal/pull/244) + +#### ENHANCEMENTS +- Performance optimization of NewFromString init method [#198](https://github.com/shopspring/decimal/pull/198) +- Performance optimization of Abs and Round methods [#240](https://github.com/shopspring/decimal/pull/240) +- Additional tests (CI) for ppc64le architecture [#188](https://github.com/shopspring/decimal/pull/188) + +#### BUGFIXES +- Fix rounding in FormatFloat fallback path (roundShortest method, fix taken from Go main repository) [#161](https://github.com/shopspring/decimal/pull/161) +- Add slice range checks to UnmarshalBinary method [#232](https://github.com/shopspring/decimal/pull/232) + +## Decimal v1.2.0 + +#### BREAKING +- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172) + +#### FEATURES +- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72) +- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157) +- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171) + +#### ENHANCEMENTS +- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160) +- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156) +- Update documentation [#173](https://github.com/shopspring/decimal/pull/173) +- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174) + +#### BUGFIXES +- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159) +- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166) diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE new file mode 100644 index 000000000..ad2148aaf --- /dev/null +++ b/vendor/github.com/shopspring/decimal/LICENSE @@ -0,0 +1,45 @@ +The MIT License (MIT) + +Copyright (c) 2015 Spring, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +- Based on https://github.com/oguzbilgic/fpd, which has the following license: +""" +The MIT License (MIT) + +Copyright (c) 2013 Oguz Bilgic + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +""" diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md new file mode 100644 index 000000000..2e35df068 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/README.md @@ -0,0 +1,130 @@ +# decimal + +[![Build Status](https://app.travis-ci.com/shopspring/decimal.svg?branch=master)](https://app.travis-ci.com/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) + +Arbitrary-precision fixed-point decimal numbers in go. + +_Note:_ Decimal library can "only" represent numbers with a maximum of 2^31 digits after the decimal point. + +## Features + + * The zero-value is 0, and is safe to use without initialization + * Addition, subtraction, multiplication with no loss of precision + * Division with specified precision + * Database/sql serialization/deserialization + * JSON and XML serialization/deserialization + +## Install + +Run `go get github.com/shopspring/decimal` + +## Requirements + +Decimal library requires Go version `>=1.7` + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/shopspring/decimal" +) + +func main() { + price, err := decimal.NewFromString("136.02") + if err != nil { + panic(err) + } + + quantity := decimal.NewFromInt(3) + + fee, _ := decimal.NewFromString(".035") + taxRate, _ := decimal.NewFromString(".08875") + + subtotal := price.Mul(quantity) + + preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1))) + + total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1))) + + fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06 + fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421 + fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375 + fmt.Println("Total:", total) // Total: 459.824961375 + fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875 +} +``` + +## Documentation + +http://godoc.org/github.com/shopspring/decimal + +## Production Usage + +* [Spring](https://shopspring.com/), since August 14, 2014. +* If you are using this in production, please let us know! + +## FAQ + +#### Why don't you just use float64? + +Because float64 (or any binary floating point type, actually) can't represent +numbers such as `0.1` exactly. + +Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that +it prints out `10`, but it actually prints `9.999999999999831`. Over time, +these small errors can really add up! + +#### Why don't you just use big.Rat? + +big.Rat is fine for representing rational numbers, but Decimal is better for +representing money. Why? Here's a (contrived) example: + +Let's say you use big.Rat, and you have two numbers, x and y, both +representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one +out, the string output has to stop somewhere (let's say it stops at 3 decimal +digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did +the other 0.001 go? + +Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE + +With Decimal, the strings being printed out represent the number exactly. So, +if you have `x = y = 1/3` (with precision 3), they will actually be equal to +0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is +unaccounted for! + +You still have to be careful. If you want to split a number `N` 3 ways, you +can't just send `N/3` to three different people. You have to pick one to send +`N - (2/3*N)` to. That person will receive the fraction of a penny remainder. + +But, it is much easier to be careful with Decimal than with big.Rat. + +#### Why isn't the API similar to big.Int's? + +big.Int's API is built to reduce the number of memory allocations for maximal +performance. This makes sense for its use-case, but the trade-off is that the +API is awkward and easy to misuse. + +For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A +developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This +modifies `a` and sets `z` as an alias for `a`, which they might not expect. It +also modifies any other aliases to `a`. + +Here's an example of the subtle bugs you can introduce with big.Int's API: +https://play.golang.org/p/x2R_78pa8r + +In contrast, it's difficult to make such mistakes with decimal. Decimals +behave like other go numbers types: even though `a = b` will not deep copy +`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods +return new Decimals and do not modify the originals. The downside is that +this causes extra allocations, so Decimal is less performant. My assumption +is that if you're using Decimals, you probably care more about correctness +than performance. + +## License + +The MIT License (MIT) + +This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License. diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go new file mode 100644 index 000000000..9958d6902 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/decimal-go.go @@ -0,0 +1,415 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package decimal + +type decimal struct { + d [800]byte // digits, big-endian representation + nd int // number of digits used + dp int // decimal point + neg bool // negative flag + trunc bool // discarded nonzero digits beyond d[:nd] +} + +func (a *decimal) String() string { + n := 10 + a.nd + if a.dp > 0 { + n += a.dp + } + if a.dp < 0 { + n += -a.dp + } + + buf := make([]byte, n) + w := 0 + switch { + case a.nd == 0: + return "0" + + case a.dp <= 0: + // zeros fill space between decimal point and digits + buf[w] = '0' + w++ + buf[w] = '.' + w++ + w += digitZero(buf[w : w+-a.dp]) + w += copy(buf[w:], a.d[0:a.nd]) + + case a.dp < a.nd: + // decimal point in middle of digits + w += copy(buf[w:], a.d[0:a.dp]) + buf[w] = '.' + w++ + w += copy(buf[w:], a.d[a.dp:a.nd]) + + default: + // zeros fill space between digits and decimal point + w += copy(buf[w:], a.d[0:a.nd]) + w += digitZero(buf[w : w+a.dp-a.nd]) + } + return string(buf[0:w]) +} + +func digitZero(dst []byte) int { + for i := range dst { + dst[i] = '0' + } + return len(dst) +} + +// trim trailing zeros from number. +// (They are meaningless; the decimal point is tracked +// independent of the number of digits.) +func trim(a *decimal) { + for a.nd > 0 && a.d[a.nd-1] == '0' { + a.nd-- + } + if a.nd == 0 { + a.dp = 0 + } +} + +// Assign v to a. +func (a *decimal) Assign(v uint64) { + var buf [24]byte + + // Write reversed decimal in buf. + n := 0 + for v > 0 { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n++ + v = v1 + } + + // Reverse again to produce forward decimal in a.d. + a.nd = 0 + for n--; n >= 0; n-- { + a.d[a.nd] = buf[n] + a.nd++ + } + a.dp = a.nd + trim(a) +} + +// Maximum shift that we can do in one pass without overflow. +// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63) +const maxShift = uintSize - 4 + +// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow. +func rightShift(a *decimal, k uint) { + r := 0 // read pointer + w := 0 // write pointer + + // Pick up enough leading digits to cover first shift. + var n uint + for ; n>>k == 0; r++ { + if r >= a.nd { + if n == 0 { + // a == 0; shouldn't get here, but handle anyway. + a.nd = 0 + return + } + for n>>k == 0 { + n = n * 10 + r++ + } + break + } + c := uint(a.d[r]) + n = n*10 + c - '0' + } + a.dp -= r - 1 + + var mask uint = (1 << k) - 1 + + // Pick up a digit, put down a digit. + for ; r < a.nd; r++ { + c := uint(a.d[r]) + dig := n >> k + n &= mask + a.d[w] = byte(dig + '0') + w++ + n = n*10 + c - '0' + } + + // Put down extra digits. + for n > 0 { + dig := n >> k + n &= mask + if w < len(a.d) { + a.d[w] = byte(dig + '0') + w++ + } else if dig > 0 { + a.trunc = true + } + n = n * 10 + } + + a.nd = w + trim(a) +} + +// Cheat sheet for left shift: table indexed by shift count giving +// number of new digits that will be introduced by that shift. +// +// For example, leftcheats[4] = {2, "625"}. That means that +// if we are shifting by 4 (multiplying by 16), it will add 2 digits +// when the string prefix is "625" through "999", and one fewer digit +// if the string prefix is "000" through "624". +// +// Credit for this trick goes to Ken. + +type leftCheat struct { + delta int // number of new digits + cutoff string // minus one digit if original < a. +} + +var leftcheats = []leftCheat{ + // Leading digits of 1/2^i = 5^i. + // 5^23 is not an exact 64-bit floating point number, + // so have to use bc for the math. + // Go up to 60 to be large enough for 32bit and 64bit platforms. + /* + seq 60 | sed 's/^/5^/' | bc | + awk 'BEGIN{ print "\t{ 0, \"\" }," } + { + log2 = log(2)/log(10) + printf("\t{ %d, \"%s\" },\t// * %d\n", + int(log2*NR+1), $0, 2**NR) + }' + */ + {0, ""}, + {1, "5"}, // * 2 + {1, "25"}, // * 4 + {1, "125"}, // * 8 + {2, "625"}, // * 16 + {2, "3125"}, // * 32 + {2, "15625"}, // * 64 + {3, "78125"}, // * 128 + {3, "390625"}, // * 256 + {3, "1953125"}, // * 512 + {4, "9765625"}, // * 1024 + {4, "48828125"}, // * 2048 + {4, "244140625"}, // * 4096 + {4, "1220703125"}, // * 8192 + {5, "6103515625"}, // * 16384 + {5, "30517578125"}, // * 32768 + {5, "152587890625"}, // * 65536 + {6, "762939453125"}, // * 131072 + {6, "3814697265625"}, // * 262144 + {6, "19073486328125"}, // * 524288 + {7, "95367431640625"}, // * 1048576 + {7, "476837158203125"}, // * 2097152 + {7, "2384185791015625"}, // * 4194304 + {7, "11920928955078125"}, // * 8388608 + {8, "59604644775390625"}, // * 16777216 + {8, "298023223876953125"}, // * 33554432 + {8, "1490116119384765625"}, // * 67108864 + {9, "7450580596923828125"}, // * 134217728 + {9, "37252902984619140625"}, // * 268435456 + {9, "186264514923095703125"}, // * 536870912 + {10, "931322574615478515625"}, // * 1073741824 + {10, "4656612873077392578125"}, // * 2147483648 + {10, "23283064365386962890625"}, // * 4294967296 + {10, "116415321826934814453125"}, // * 8589934592 + {11, "582076609134674072265625"}, // * 17179869184 + {11, "2910383045673370361328125"}, // * 34359738368 + {11, "14551915228366851806640625"}, // * 68719476736 + {12, "72759576141834259033203125"}, // * 137438953472 + {12, "363797880709171295166015625"}, // * 274877906944 + {12, "1818989403545856475830078125"}, // * 549755813888 + {13, "9094947017729282379150390625"}, // * 1099511627776 + {13, "45474735088646411895751953125"}, // * 2199023255552 + {13, "227373675443232059478759765625"}, // * 4398046511104 + {13, "1136868377216160297393798828125"}, // * 8796093022208 + {14, "5684341886080801486968994140625"}, // * 17592186044416 + {14, "28421709430404007434844970703125"}, // * 35184372088832 + {14, "142108547152020037174224853515625"}, // * 70368744177664 + {15, "710542735760100185871124267578125"}, // * 140737488355328 + {15, "3552713678800500929355621337890625"}, // * 281474976710656 + {15, "17763568394002504646778106689453125"}, // * 562949953421312 + {16, "88817841970012523233890533447265625"}, // * 1125899906842624 + {16, "444089209850062616169452667236328125"}, // * 2251799813685248 + {16, "2220446049250313080847263336181640625"}, // * 4503599627370496 + {16, "11102230246251565404236316680908203125"}, // * 9007199254740992 + {17, "55511151231257827021181583404541015625"}, // * 18014398509481984 + {17, "277555756156289135105907917022705078125"}, // * 36028797018963968 + {17, "1387778780781445675529539585113525390625"}, // * 72057594037927936 + {18, "6938893903907228377647697925567626953125"}, // * 144115188075855872 + {18, "34694469519536141888238489627838134765625"}, // * 288230376151711744 + {18, "173472347597680709441192448139190673828125"}, // * 576460752303423488 + {19, "867361737988403547205962240695953369140625"}, // * 1152921504606846976 +} + +// Is the leading prefix of b lexicographically less than s? +func prefixIsLessThan(b []byte, s string) bool { + for i := 0; i < len(s); i++ { + if i >= len(b) { + return true + } + if b[i] != s[i] { + return b[i] < s[i] + } + } + return false +} + +// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow. +func leftShift(a *decimal, k uint) { + delta := leftcheats[k].delta + if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { + delta-- + } + + r := a.nd // read index + w := a.nd + delta // write index + + // Pick up a digit, put down a digit. + var n uint + for r--; r >= 0; r-- { + n += (uint(a.d[r]) - '0') << k + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + // Put down extra digits. + for n > 0 { + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + a.nd += delta + if a.nd >= len(a.d) { + a.nd = len(a.d) + } + a.dp += delta + trim(a) +} + +// Binary shift left (k > 0) or right (k < 0). +func (a *decimal) Shift(k int) { + switch { + case a.nd == 0: + // nothing to do: a == 0 + case k > 0: + for k > maxShift { + leftShift(a, maxShift) + k -= maxShift + } + leftShift(a, uint(k)) + case k < 0: + for k < -maxShift { + rightShift(a, maxShift) + k += maxShift + } + rightShift(a, uint(-k)) + } +} + +// If we chop a at nd digits, should we round up? +func shouldRoundUp(a *decimal, nd int) bool { + if nd < 0 || nd >= a.nd { + return false + } + if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even + // if we truncated, a little higher than what's recorded - always round up + if a.trunc { + return true + } + return nd > 0 && (a.d[nd-1]-'0')%2 != 0 + } + // not halfway - digit tells all + return a.d[nd] >= '5' +} + +// Round a to nd digits (or fewer). +// If nd is zero, it means we're rounding +// just to the left of the digits, as in +// 0.09 -> 0.1. +func (a *decimal) Round(nd int) { + if nd < 0 || nd >= a.nd { + return + } + if shouldRoundUp(a, nd) { + a.RoundUp(nd) + } else { + a.RoundDown(nd) + } +} + +// Round a down to nd digits (or fewer). +func (a *decimal) RoundDown(nd int) { + if nd < 0 || nd >= a.nd { + return + } + a.nd = nd + trim(a) +} + +// Round a up to nd digits (or fewer). +func (a *decimal) RoundUp(nd int) { + if nd < 0 || nd >= a.nd { + return + } + + // round up + for i := nd - 1; i >= 0; i-- { + c := a.d[i] + if c < '9' { // can stop after this digit + a.d[i]++ + a.nd = i + 1 + return + } + } + + // Number is all 9s. + // Change to single 1 with adjusted decimal point. + a.d[0] = '1' + a.nd = 1 + a.dp++ +} + +// Extract integer part, rounded appropriately. +// No guarantees about overflow. +func (a *decimal) RoundedInteger() uint64 { + if a.dp > 20 { + return 0xFFFFFFFFFFFFFFFF + } + var i int + n := uint64(0) + for i = 0; i < a.dp && i < a.nd; i++ { + n = n*10 + uint64(a.d[i]-'0') + } + for ; i < a.dp; i++ { + n *= 10 + } + if shouldRoundUp(a, a.dp) { + n++ + } + return n +} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go new file mode 100644 index 000000000..84405ec1c --- /dev/null +++ b/vendor/github.com/shopspring/decimal/decimal.go @@ -0,0 +1,1904 @@ +// Package decimal implements an arbitrary precision fixed-point decimal. +// +// The zero-value of a Decimal is 0, as you would expect. +// +// The best way to create a new Decimal is to use decimal.NewFromString, ex: +// +// n, err := decimal.NewFromString("-123.4567") +// n.String() // output: "-123.4567" +// +// To use Decimal as part of a struct: +// +// type Struct struct { +// Number Decimal +// } +// +// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. +package decimal + +import ( + "database/sql/driver" + "encoding/binary" + "fmt" + "math" + "math/big" + "regexp" + "strconv" + "strings" +) + +// DivisionPrecision is the number of decimal places in the result when it +// doesn't divide exactly. +// +// Example: +// +// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d1.String() // output: "0.6666666666666667" +// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) +// d2.String() // output: "0.0000666666666667" +// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) +// d3.String() // output: "6666.6666666666666667" +// decimal.DivisionPrecision = 3 +// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d4.String() // output: "0.667" +// +var DivisionPrecision = 16 + +// MarshalJSONWithoutQuotes should be set to true if you want the decimal to +// be JSON marshaled as a number, instead of as a string. +// WARNING: this is dangerous for decimals with many digits, since many JSON +// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754 +// double-precision floating point numbers, which means you can potentially +// silently lose precision. +var MarshalJSONWithoutQuotes = false + +// ExpMaxIterations specifies the maximum number of iterations needed to calculate +// precise natural exponent value using ExpHullAbrham method. +var ExpMaxIterations = 1000 + +// Zero constant, to make computations faster. +// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead. +var Zero = New(0, 1) + +var zeroInt = big.NewInt(0) +var oneInt = big.NewInt(1) +var twoInt = big.NewInt(2) +var fourInt = big.NewInt(4) +var fiveInt = big.NewInt(5) +var tenInt = big.NewInt(10) +var twentyInt = big.NewInt(20) + +var factorials = []Decimal{New(1, 0)} + +// Decimal represents a fixed-point decimal. It is immutable. +// number = value * 10 ^ exp +type Decimal struct { + value *big.Int + + // NOTE(vadim): this must be an int32, because we cast it to float64 during + // calculations. If exp is 64 bit, we might lose precision. + // If we cared about being able to represent every possible decimal, we + // could make exp a *big.Int but it would hurt performance and numbers + // like that are unrealistic. + exp int32 +} + +// New returns a new fixed-point decimal, value * 10 ^ exp. +func New(value int64, exp int32) Decimal { + return Decimal{ + value: big.NewInt(value), + exp: exp, + } +} + +// NewFromInt converts a int64 to Decimal. +// +// Example: +// +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" +func NewFromInt(value int64) Decimal { + return Decimal{ + value: big.NewInt(value), + exp: 0, + } +} + +// NewFromInt32 converts a int32 to Decimal. +// +// Example: +// +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" +func NewFromInt32(value int32) Decimal { + return Decimal{ + value: big.NewInt(int64(value)), + exp: 0, + } +} + +// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp +func NewFromBigInt(value *big.Int, exp int32) Decimal { + return Decimal{ + value: new(big.Int).Set(value), + exp: exp, + } +} + +// NewFromString returns a new Decimal from a string representation. +// Trailing zeroes are not trimmed. +// +// Example: +// +// d, err := NewFromString("-123.45") +// d2, err := NewFromString(".0001") +// d3, err := NewFromString("1.47000") +// +func NewFromString(value string) (Decimal, error) { + originalInput := value + var intString string + var exp int64 + + // Check if number is using scientific notation + eIndex := strings.IndexAny(value, "Ee") + if eIndex != -1 { + expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value) + } + return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value) + } + value = value[:eIndex] + exp = expInt + } + + pIndex := -1 + vLen := len(value) + for i := 0; i < vLen; i++ { + if value[i] == '.' { + if pIndex > -1 { + return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value) + } + pIndex = i + } + } + + if pIndex == -1 { + // There is no decimal point, we can just parse the original string as + // an int + intString = value + } else { + if pIndex+1 < vLen { + intString = value[:pIndex] + value[pIndex+1:] + } else { + intString = value[:pIndex] + } + expInt := -len(value[pIndex+1:]) + exp += int64(expInt) + } + + var dValue *big.Int + // strconv.ParseInt is faster than new(big.Int).SetString so this is just a shortcut for strings we know won't overflow + if len(intString) <= 18 { + parsed64, err := strconv.ParseInt(intString, 10, 64) + if err != nil { + return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) + } + dValue = big.NewInt(parsed64) + } else { + dValue = new(big.Int) + _, ok := dValue.SetString(intString, 10) + if !ok { + return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) + } + } + + if exp < math.MinInt32 || exp > math.MaxInt32 { + // NOTE(vadim): I doubt a string could realistically be this long + return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput) + } + + return Decimal{ + value: dValue, + exp: int32(exp), + }, nil +} + +// NewFromFormattedString returns a new Decimal from a formatted string representation. +// The second argument - replRegexp, is a regular expression that is used to find characters that should be +// removed from given decimal string representation. All matched characters will be replaced with an empty string. +// +// Example: +// +// r := regexp.MustCompile("[$,]") +// d1, err := NewFromFormattedString("$5,125.99", r) +// +// r2 := regexp.MustCompile("[_]") +// d2, err := NewFromFormattedString("1_000_000", r2) +// +// r3 := regexp.MustCompile("[USD\\s]") +// d3, err := NewFromFormattedString("5000 USD", r3) +// +func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, error) { + parsedValue := replRegexp.ReplaceAllString(value, "") + d, err := NewFromString(parsedValue) + if err != nil { + return Decimal{}, err + } + return d, nil +} + +// RequireFromString returns a new Decimal from a string representation +// or panics if NewFromString would have returned an error. +// +// Example: +// +// d := RequireFromString("-123.45") +// d2 := RequireFromString(".0001") +// +func RequireFromString(value string) Decimal { + dec, err := NewFromString(value) + if err != nil { + panic(err) + } + return dec +} + +// NewFromFloat converts a float64 to Decimal. +// +// The converted number will contain the number of significant digits that can be +// represented in a float with reliable roundtrip. +// This is typically 15 digits, but may be more in some cases. +// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. +// +// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. +// +// NOTE: this will panic on NaN, +/-inf +func NewFromFloat(value float64) Decimal { + if value == 0 { + return New(0, 0) + } + return newFromFloat(value, math.Float64bits(value), &float64info) +} + +// NewFromFloat32 converts a float32 to Decimal. +// +// The converted number will contain the number of significant digits that can be +// represented in a float with reliable roundtrip. +// This is typically 6-8 digits depending on the input. +// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. +// +// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. +// +// NOTE: this will panic on NaN, +/-inf +func NewFromFloat32(value float32) Decimal { + if value == 0 { + return New(0, 0) + } + // XOR is workaround for https://github.com/golang/go/issues/26285 + a := math.Float32bits(value) ^ 0x80808080 + return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info) +} + +func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { + if math.IsNaN(val) || math.IsInf(val, 0) { + panic(fmt.Sprintf("Cannot create a Decimal from %v", val)) + } + exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0 + + roundShortest(&d, mant, exp, flt) + // If less than 19 digits, we can do calculation in an int64. + if d.nd < 19 { + tmp := int64(0) + m := int64(1) + for i := d.nd - 1; i >= 0; i-- { + tmp += m * int64(d.d[i]-'0') + m *= 10 + } + if d.neg { + tmp *= -1 + } + return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)} + } + dValue := new(big.Int) + dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10) + if ok { + return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)} + } + + return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd)) +} + +// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary +// number of fractional digits. +// +// Example: +// +// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" +// +func NewFromFloatWithExponent(value float64, exp int32) Decimal { + if math.IsNaN(value) || math.IsInf(value, 0) { + panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) + } + + bits := math.Float64bits(value) + mant := bits & (1<<52 - 1) + exp2 := int32((bits >> 52) & (1<<11 - 1)) + sign := bits >> 63 + + if exp2 == 0 { + // specials + if mant == 0 { + return Decimal{} + } + // subnormal + exp2++ + } else { + // normal + mant |= 1 << 52 + } + + exp2 -= 1023 + 52 + + // normalizing base-2 values + for mant&1 == 0 { + mant = mant >> 1 + exp2++ + } + + // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0 + if exp < 0 && exp < exp2 { + if exp2 < 0 { + exp = exp2 + } else { + exp = 0 + } + } + + // representing 10^M * 2^N as 5^M * 2^(M+N) + exp2 -= exp + + temp := big.NewInt(1) + dMant := big.NewInt(int64(mant)) + + // applying 5^M + if exp > 0 { + temp = temp.SetInt64(int64(exp)) + temp = temp.Exp(fiveInt, temp, nil) + } else if exp < 0 { + temp = temp.SetInt64(-int64(exp)) + temp = temp.Exp(fiveInt, temp, nil) + dMant = dMant.Mul(dMant, temp) + temp = temp.SetUint64(1) + } + + // applying 2^(M+N) + if exp2 > 0 { + dMant = dMant.Lsh(dMant, uint(exp2)) + } else if exp2 < 0 { + temp = temp.Lsh(temp, uint(-exp2)) + } + + // rounding and downscaling + if exp > 0 || exp2 < 0 { + halfDown := new(big.Int).Rsh(temp, 1) + dMant = dMant.Add(dMant, halfDown) + dMant = dMant.Quo(dMant, temp) + } + + if sign == 1 { + dMant = dMant.Neg(dMant) + } + + return Decimal{ + value: dMant, + exp: exp, + } +} + +// Copy returns a copy of decimal with the same value and exponent, but a different pointer to value. +func (d Decimal) Copy() Decimal { + d.ensureInitialized() + return Decimal{ + value: &(*d.value), + exp: d.exp, + } +} + +// rescale returns a rescaled version of the decimal. Returned +// decimal may be less precise if the given exponent is bigger +// than the initial exponent of the Decimal. +// NOTE: this will truncate, NOT round +// +// Example: +// +// d := New(12345, -4) +// d2 := d.rescale(-1) +// d3 := d2.rescale(-4) +// println(d1) +// println(d2) +// println(d3) +// +// Output: +// +// 1.2345 +// 1.2 +// 1.2000 +// +func (d Decimal) rescale(exp int32) Decimal { + d.ensureInitialized() + + if d.exp == exp { + return Decimal{ + new(big.Int).Set(d.value), + d.exp, + } + } + + // NOTE(vadim): must convert exps to float64 before - to prevent overflow + diff := math.Abs(float64(exp) - float64(d.exp)) + value := new(big.Int).Set(d.value) + + expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil) + if exp > d.exp { + value = value.Quo(value, expScale) + } else if exp < d.exp { + value = value.Mul(value, expScale) + } + + return Decimal{ + value: value, + exp: exp, + } +} + +// Abs returns the absolute value of the decimal. +func (d Decimal) Abs() Decimal { + if !d.IsNegative() { + return d + } + d.ensureInitialized() + d2Value := new(big.Int).Abs(d.value) + return Decimal{ + value: d2Value, + exp: d.exp, + } +} + +// Add returns d + d2. +func (d Decimal) Add(d2 Decimal) Decimal { + rd, rd2 := RescalePair(d, d2) + + d3Value := new(big.Int).Add(rd.value, rd2.value) + return Decimal{ + value: d3Value, + exp: rd.exp, + } +} + +// Sub returns d - d2. +func (d Decimal) Sub(d2 Decimal) Decimal { + rd, rd2 := RescalePair(d, d2) + + d3Value := new(big.Int).Sub(rd.value, rd2.value) + return Decimal{ + value: d3Value, + exp: rd.exp, + } +} + +// Neg returns -d. +func (d Decimal) Neg() Decimal { + d.ensureInitialized() + val := new(big.Int).Neg(d.value) + return Decimal{ + value: val, + exp: d.exp, + } +} + +// Mul returns d * d2. +func (d Decimal) Mul(d2 Decimal) Decimal { + d.ensureInitialized() + d2.ensureInitialized() + + expInt64 := int64(d.exp) + int64(d2.exp) + if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 { + // NOTE(vadim): better to panic than give incorrect results, as + // Decimals are usually used for money + panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64)) + } + + d3Value := new(big.Int).Mul(d.value, d2.value) + return Decimal{ + value: d3Value, + exp: int32(expInt64), + } +} + +// Shift shifts the decimal in base 10. +// It shifts left when shift is positive and right if shift is negative. +// In simpler terms, the given value for shift is added to the exponent +// of the decimal. +func (d Decimal) Shift(shift int32) Decimal { + d.ensureInitialized() + return Decimal{ + value: new(big.Int).Set(d.value), + exp: d.exp + shift, + } +} + +// Div returns d / d2. If it doesn't divide exactly, the result will have +// DivisionPrecision digits after the decimal point. +func (d Decimal) Div(d2 Decimal) Decimal { + return d.DivRound(d2, int32(DivisionPrecision)) +} + +// QuoRem does divsion with remainder +// d.QuoRem(d2,precision) returns quotient q and remainder r such that +// d = d2 * q + r, q an integer multiple of 10^(-precision) +// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 +// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 +// Note that precision<0 is allowed as input. +func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { + d.ensureInitialized() + d2.ensureInitialized() + if d2.value.Sign() == 0 { + panic("decimal division by 0") + } + scale := -precision + e := int64(d.exp - d2.exp - scale) + if e > math.MaxInt32 || e < math.MinInt32 { + panic("overflow in decimal QuoRem") + } + var aa, bb, expo big.Int + var scalerest int32 + // d = a 10^ea + // d2 = b 10^eb + if e < 0 { + aa = *d.value + expo.SetInt64(-e) + bb.Exp(tenInt, &expo, nil) + bb.Mul(d2.value, &bb) + scalerest = d.exp + // now aa = a + // bb = b 10^(scale + eb - ea) + } else { + expo.SetInt64(e) + aa.Exp(tenInt, &expo, nil) + aa.Mul(d.value, &aa) + bb = *d2.value + scalerest = scale + d2.exp + // now aa = a ^ (ea - eb - scale) + // bb = b + } + var q, r big.Int + q.QuoRem(&aa, &bb, &r) + dq := Decimal{value: &q, exp: scale} + dr := Decimal{value: &r, exp: scalerest} + return dq, dr +} + +// DivRound divides and rounds to a given precision +// i.e. to an integer multiple of 10^(-precision) +// for a positive quotient digit 5 is rounded up, away from 0 +// if the quotient is negative then digit 5 is rounded down, away from 0 +// Note that precision<0 is allowed as input. +func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { + // QuoRem already checks initialization + q, r := d.QuoRem(d2, precision) + // the actual rounding decision is based on comparing r*10^precision and d2/2 + // instead compare 2 r 10 ^precision and d2 + var rv2 big.Int + rv2.Abs(r.value) + rv2.Lsh(&rv2, 1) + // now rv2 = abs(r.value) * 2 + r2 := Decimal{value: &rv2, exp: r.exp + precision} + // r2 is now 2 * r * 10 ^ precision + var c = r2.Cmp(d2.Abs()) + + if c < 0 { + return q + } + + if d.value.Sign()*d2.value.Sign() < 0 { + return q.Sub(New(1, -precision)) + } + + return q.Add(New(1, -precision)) +} + +// Mod returns d % d2. +func (d Decimal) Mod(d2 Decimal) Decimal { + quo := d.Div(d2).Truncate(0) + return d.Sub(d2.Mul(quo)) +} + +// Pow returns d to the power d2 +func (d Decimal) Pow(d2 Decimal) Decimal { + var temp Decimal + if d2.IntPart() == 0 { + return NewFromFloat(1) + } + temp = d.Pow(d2.Div(NewFromFloat(2))) + if d2.IntPart()%2 == 0 { + return temp.Mul(temp) + } + if d2.IntPart() > 0 { + return temp.Mul(temp).Mul(d) + } + return temp.Mul(temp).Div(d) +} + +// ExpHullAbrham calculates the natural exponent of decimal (e to the power of d) using Hull-Abraham algorithm. +// OverallPrecision argument specifies the overall precision of the result (integer part + decimal part). +// +// ExpHullAbrham is faster than ExpTaylor for small precision values, but it is much slower for large precision values. +// +// Example: +// +// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000" +// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284" +// +func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) { + // Algorithm based on Variable precision exponential function. + // ACM Transactions on Mathematical Software by T. E. Hull & A. Abrham. + if d.IsZero() { + return Decimal{oneInt, 0}, nil + } + + currentPrecision := overallPrecision + + // Algorithm does not work if currentPrecision * 23 < |x|. + // Precision is automatically increased in such cases, so the value can be calculated precisely. + // If newly calculated precision is higher than ExpMaxIterations the currentPrecision will not be changed. + f := d.Abs().InexactFloat64() + if ncp := f / 23; ncp > float64(currentPrecision) && ncp < float64(ExpMaxIterations) { + currentPrecision = uint32(math.Ceil(ncp)) + } + + // fail if abs(d) beyond an over/underflow threshold + overflowThreshold := New(23*int64(currentPrecision), 0) + if d.Abs().Cmp(overflowThreshold) > 0 { + return Decimal{}, fmt.Errorf("over/underflow threshold, exp(x) cannot be calculated precisely") + } + + // Return 1 if abs(d) small enough; this also avoids later over/underflow + overflowThreshold2 := New(9, -int32(currentPrecision)-1) + if d.Abs().Cmp(overflowThreshold2) <= 0 { + return Decimal{oneInt, d.exp}, nil + } + + // t is the smallest integer >= 0 such that the corresponding abs(d/k) < 1 + t := d.exp + int32(d.NumDigits()) // Add d.NumDigits because the paper assumes that d.value [0.1, 1) + + if t < 0 { + t = 0 + } + + k := New(1, t) // reduction factor + r := Decimal{new(big.Int).Set(d.value), d.exp - t} // reduced argument + p := int32(currentPrecision) + t + 2 // precision for calculating the sum + + // Determine n, the number of therms for calculating sum + // use first Newton step (1.435p - 1.182) / log10(p/abs(r)) + // for solving appropriate equation, along with directed + // roundings and simple rational bound for log10(p/abs(r)) + rf := r.Abs().InexactFloat64() + pf := float64(p) + nf := math.Ceil((1.453*pf - 1.182) / math.Log10(pf/rf)) + if nf > float64(ExpMaxIterations) || math.IsNaN(nf) { + return Decimal{}, fmt.Errorf("exact value cannot be calculated in <=ExpMaxIterations iterations") + } + n := int64(nf) + + tmp := New(0, 0) + sum := New(1, 0) + one := New(1, 0) + for i := n - 1; i > 0; i-- { + tmp.value.SetInt64(i) + sum = sum.Mul(r.DivRound(tmp, p)) + sum = sum.Add(one) + } + + ki := k.IntPart() + res := New(1, 0) + for i := ki; i > 0; i-- { + res = res.Mul(sum) + } + + resNumDigits := int32(res.NumDigits()) + + var roundDigits int32 + if resNumDigits > abs(res.exp) { + roundDigits = int32(currentPrecision) - resNumDigits - res.exp + } else { + roundDigits = int32(currentPrecision) + } + + res = res.Round(roundDigits) + + return res, nil +} + +// ExpTaylor calculates the natural exponent of decimal (e to the power of d) using Taylor series expansion. +// Precision argument specifies how precise the result must be (number of digits after decimal point). +// Negative precision is allowed. +// +// ExpTaylor is much faster for large precision values than ExpHullAbrham. +// +// Example: +// +// d, err := NewFromFloat(26.1).ExpTaylor(2).String() +// d.String() // output: "216314672147.06" +// +// NewFromFloat(26.1).ExpTaylor(20).String() +// d.String() // output: "216314672147.05767284062928674083" +// +// NewFromFloat(26.1).ExpTaylor(-10).String() +// d.String() // output: "220000000000" +// +func (d Decimal) ExpTaylor(precision int32) (Decimal, error) { + // Note(mwoss): Implementation can be optimized by exclusively using big.Int API only + if d.IsZero() { + return Decimal{oneInt, 0}.Round(precision), nil + } + + var epsilon Decimal + var divPrecision int32 + if precision < 0 { + epsilon = New(1, -1) + divPrecision = 8 + } else { + epsilon = New(1, -precision-1) + divPrecision = precision + 1 + } + + decAbs := d.Abs() + pow := d.Abs() + factorial := New(1, 0) + + result := New(1, 0) + + for i := int64(1); ; { + step := pow.DivRound(factorial, divPrecision) + result = result.Add(step) + + // Stop Taylor series when current step is smaller than epsilon + if step.Cmp(epsilon) < 0 { + break + } + + pow = pow.Mul(decAbs) + + i++ + + // Calculate next factorial number or retrieve cached value + if len(factorials) >= int(i) && !factorials[i-1].IsZero() { + factorial = factorials[i-1] + } else { + // To avoid any race conditions, firstly the zero value is appended to a slice to create + // a spot for newly calculated factorial. After that, the zero value is replaced by calculated + // factorial using the index notation. + factorial = factorials[i-2].Mul(New(i, 0)) + factorials = append(factorials, Zero) + factorials[i-1] = factorial + } + } + + if d.Sign() < 0 { + result = New(1, 0).DivRound(result, precision+1) + } + + result = result.Round(precision) + return result, nil +} + +// NumDigits returns the number of digits of the decimal coefficient (d.Value) +// Note: Current implementation is extremely slow for large decimals and/or decimals with large fractional part +func (d Decimal) NumDigits() int { + // Note(mwoss): It can be optimized, unnecessary cast of big.Int to string + if d.IsNegative() { + return len(d.value.String()) - 1 + } + return len(d.value.String()) +} + +// IsInteger returns true when decimal can be represented as an integer value, otherwise, it returns false. +func (d Decimal) IsInteger() bool { + // The most typical case, all decimal with exponent higher or equal 0 can be represented as integer + if d.exp >= 0 { + return true + } + // When the exponent is negative we have to check every number after the decimal place + // If all of them are zeroes, we are sure that given decimal can be represented as an integer + var r big.Int + q := new(big.Int).Set(d.value) + for z := abs(d.exp); z > 0; z-- { + q.QuoRem(q, tenInt, &r) + if r.Cmp(zeroInt) != 0 { + return false + } + } + return true +} + +// Abs calculates absolute value of any int32. Used for calculating absolute value of decimal's exponent. +func abs(n int32) int32 { + if n < 0 { + return -n + } + return n +} + +// Cmp compares the numbers represented by d and d2 and returns: +// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 +// +func (d Decimal) Cmp(d2 Decimal) int { + d.ensureInitialized() + d2.ensureInitialized() + + if d.exp == d2.exp { + return d.value.Cmp(d2.value) + } + + rd, rd2 := RescalePair(d, d2) + + return rd.value.Cmp(rd2.value) +} + +// Equal returns whether the numbers represented by d and d2 are equal. +func (d Decimal) Equal(d2 Decimal) bool { + return d.Cmp(d2) == 0 +} + +// Equals is deprecated, please use Equal method instead +func (d Decimal) Equals(d2 Decimal) bool { + return d.Equal(d2) +} + +// GreaterThan (GT) returns true when d is greater than d2. +func (d Decimal) GreaterThan(d2 Decimal) bool { + return d.Cmp(d2) == 1 +} + +// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2. +func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool { + cmp := d.Cmp(d2) + return cmp == 1 || cmp == 0 +} + +// LessThan (LT) returns true when d is less than d2. +func (d Decimal) LessThan(d2 Decimal) bool { + return d.Cmp(d2) == -1 +} + +// LessThanOrEqual (LTE) returns true when d is less than or equal to d2. +func (d Decimal) LessThanOrEqual(d2 Decimal) bool { + cmp := d.Cmp(d2) + return cmp == -1 || cmp == 0 +} + +// Sign returns: +// +// -1 if d < 0 +// 0 if d == 0 +// +1 if d > 0 +// +func (d Decimal) Sign() int { + if d.value == nil { + return 0 + } + return d.value.Sign() +} + +// IsPositive return +// +// true if d > 0 +// false if d == 0 +// false if d < 0 +func (d Decimal) IsPositive() bool { + return d.Sign() == 1 +} + +// IsNegative return +// +// true if d < 0 +// false if d == 0 +// false if d > 0 +func (d Decimal) IsNegative() bool { + return d.Sign() == -1 +} + +// IsZero return +// +// true if d == 0 +// false if d > 0 +// false if d < 0 +func (d Decimal) IsZero() bool { + return d.Sign() == 0 +} + +// Exponent returns the exponent, or scale component of the decimal. +func (d Decimal) Exponent() int32 { + return d.exp +} + +// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent() +func (d Decimal) Coefficient() *big.Int { + d.ensureInitialized() + // we copy the coefficient so that mutating the result does not mutate the Decimal. + return new(big.Int).Set(d.value) +} + +// CoefficientInt64 returns the coefficient of the decimal as int64. It is scaled by 10^Exponent() +// If coefficient cannot be represented in an int64, the result will be undefined. +func (d Decimal) CoefficientInt64() int64 { + d.ensureInitialized() + return d.value.Int64() +} + +// IntPart returns the integer component of the decimal. +func (d Decimal) IntPart() int64 { + scaledD := d.rescale(0) + return scaledD.value.Int64() +} + +// BigInt returns integer component of the decimal as a BigInt. +func (d Decimal) BigInt() *big.Int { + scaledD := d.rescale(0) + i := &big.Int{} + i.SetString(scaledD.String(), 10) + return i +} + +// BigFloat returns decimal as BigFloat. +// Be aware that casting decimal to BigFloat might cause a loss of precision. +func (d Decimal) BigFloat() *big.Float { + f := &big.Float{} + f.SetString(d.String()) + return f +} + +// Rat returns a rational number representation of the decimal. +func (d Decimal) Rat() *big.Rat { + d.ensureInitialized() + if d.exp <= 0 { + // NOTE(vadim): must negate after casting to prevent int32 overflow + denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil) + return new(big.Rat).SetFrac(d.value, denom) + } + + mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil) + num := new(big.Int).Mul(d.value, mul) + return new(big.Rat).SetFrac(num, oneInt) +} + +// Float64 returns the nearest float64 value for d and a bool indicating +// whether f represents d exactly. +// For more details, see the documentation for big.Rat.Float64 +func (d Decimal) Float64() (f float64, exact bool) { + return d.Rat().Float64() +} + +// InexactFloat64 returns the nearest float64 value for d. +// It doesn't indicate if the returned value represents d exactly. +func (d Decimal) InexactFloat64() float64 { + f, _ := d.Float64() + return f +} + +// String returns the string representation of the decimal +// with the fixed point. +// +// Example: +// +// d := New(-12345, -3) +// println(d.String()) +// +// Output: +// +// -12.345 +// +func (d Decimal) String() string { + return d.string(true) +} + +// StringFixed returns a rounded fixed-point string with places digits after +// the decimal point. +// +// Example: +// +// NewFromFloat(0).StringFixed(2) // output: "0.00" +// NewFromFloat(0).StringFixed(0) // output: "0" +// NewFromFloat(5.45).StringFixed(0) // output: "5" +// NewFromFloat(5.45).StringFixed(1) // output: "5.5" +// NewFromFloat(5.45).StringFixed(2) // output: "5.45" +// NewFromFloat(5.45).StringFixed(3) // output: "5.450" +// NewFromFloat(545).StringFixed(-1) // output: "550" +// +func (d Decimal) StringFixed(places int32) string { + rounded := d.Round(places) + return rounded.string(false) +} + +// StringFixedBank returns a banker rounded fixed-point string with places digits +// after the decimal point. +// +// Example: +// +// NewFromFloat(0).StringFixedBank(2) // output: "0.00" +// NewFromFloat(0).StringFixedBank(0) // output: "0" +// NewFromFloat(5.45).StringFixedBank(0) // output: "5" +// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" +// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" +// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" +// NewFromFloat(545).StringFixedBank(-1) // output: "540" +// +func (d Decimal) StringFixedBank(places int32) string { + rounded := d.RoundBank(places) + return rounded.string(false) +} + +// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For +// more details see the documentation at function RoundCash. +func (d Decimal) StringFixedCash(interval uint8) string { + rounded := d.RoundCash(interval) + return rounded.string(false) +} + +// Round rounds the decimal to places decimal places. +// If places < 0, it will round the integer part to the nearest 10^(-places). +// +// Example: +// +// NewFromFloat(5.45).Round(1).String() // output: "5.5" +// NewFromFloat(545).Round(-1).String() // output: "550" +// +func (d Decimal) Round(places int32) Decimal { + if d.exp == -places { + return d + } + // truncate to places + 1 + ret := d.rescale(-places - 1) + + // add sign(d) * 0.5 + if ret.value.Sign() < 0 { + ret.value.Sub(ret.value, fiveInt) + } else { + ret.value.Add(ret.value, fiveInt) + } + + // floor for positive numbers, ceil for negative numbers + _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int)) + ret.exp++ + if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 { + ret.value.Add(ret.value, oneInt) + } + + return ret +} + +// RoundCeil rounds the decimal towards +infinity. +// +// Example: +// +// NewFromFloat(545).RoundCeil(-2).String() // output: "600" +// NewFromFloat(500).RoundCeil(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.5" +// +func (d Decimal) RoundCeil(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() > 0 { + rescaled.value.Add(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundFloor rounds the decimal towards -infinity. +// +// Example: +// +// NewFromFloat(545).RoundFloor(-2).String() // output: "500" +// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.4" +// +func (d Decimal) RoundFloor(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() < 0 { + rescaled.value.Sub(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundUp rounds the decimal away from zero. +// +// Example: +// +// NewFromFloat(545).RoundUp(-2).String() // output: "600" +// NewFromFloat(500).RoundUp(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.4" +// +func (d Decimal) RoundUp(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() > 0 { + rescaled.value.Add(rescaled.value, oneInt) + } else if d.value.Sign() < 0 { + rescaled.value.Sub(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundDown rounds the decimal towards zero. +// +// Example: +// +// NewFromFloat(545).RoundDown(-2).String() // output: "500" +// NewFromFloat(-500).RoundDown(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.5" +// +func (d Decimal) RoundDown(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + return rescaled +} + +// RoundBank rounds the decimal to places decimal places. +// If the final digit to round is equidistant from the nearest two integers the +// rounded value is taken as the even number +// +// If places < 0, it will round the integer part to the nearest 10^(-places). +// +// Examples: +// +// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4" +// NewFromFloat(545).RoundBank(-1).String() // output: "540" +// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5" +// NewFromFloat(546).RoundBank(-1).String() // output: "550" +// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6" +// NewFromFloat(555).RoundBank(-1).String() // output: "560" +// +func (d Decimal) RoundBank(places int32) Decimal { + + round := d.Round(places) + remainder := d.Sub(round).Abs() + + half := New(5, -places-1) + if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 { + if round.value.Sign() < 0 { + round.value.Add(round.value, oneInt) + } else { + round.value.Sub(round.value, oneInt) + } + } + + return round +} + +// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific +// interval. The amount payable for a cash transaction is rounded to the nearest +// multiple of the minimum currency unit available. The following intervals are +// available: 5, 10, 25, 50 and 100; any other number throws a panic. +// 5: 5 cent rounding 3.43 => 3.45 +// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) +// 25: 25 cent rounding 3.41 => 3.50 +// 50: 50 cent rounding 3.75 => 4.00 +// 100: 100 cent rounding 3.50 => 4.00 +// For more details: https://en.wikipedia.org/wiki/Cash_rounding +func (d Decimal) RoundCash(interval uint8) Decimal { + var iVal *big.Int + switch interval { + case 5: + iVal = twentyInt + case 10: + iVal = tenInt + case 25: + iVal = fourInt + case 50: + iVal = twoInt + case 100: + iVal = oneInt + default: + panic(fmt.Sprintf("Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100", interval)) + } + dVal := Decimal{ + value: iVal, + } + + // TODO: optimize those calculations to reduce the high allocations (~29 allocs). + return d.Mul(dVal).Round(0).Div(dVal).Truncate(2) +} + +// Floor returns the nearest integer value less than or equal to d. +func (d Decimal) Floor() Decimal { + d.ensureInitialized() + + if d.exp >= 0 { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z := new(big.Int).Div(d.value, exp) + return Decimal{value: z, exp: 0} +} + +// Ceil returns the nearest integer value greater than or equal to d. +func (d Decimal) Ceil() Decimal { + d.ensureInitialized() + + if d.exp >= 0 { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z, m := new(big.Int).DivMod(d.value, exp, new(big.Int)) + if m.Cmp(zeroInt) != 0 { + z.Add(z, oneInt) + } + return Decimal{value: z, exp: 0} +} + +// Truncate truncates off digits from the number, without rounding. +// +// NOTE: precision is the last digit that will not be truncated (must be >= 0). +// +// Example: +// +// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" +// +func (d Decimal) Truncate(precision int32) Decimal { + d.ensureInitialized() + if precision >= 0 && -precision > d.exp { + return d.rescale(-precision) + } + return d +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error { + if string(decimalBytes) == "null" { + return nil + } + + str, err := unquoteIfQuoted(decimalBytes) + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", decimalBytes, err) + } + + decimal, err := NewFromString(str) + *d = decimal + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", str, err) + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Decimal) MarshalJSON() ([]byte, error) { + var str string + if MarshalJSONWithoutQuotes { + str = d.String() + } else { + str = "\"" + d.String() + "\"" + } + return []byte(str), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation +// is already used when encoding to text, this method stores that string as []byte +func (d *Decimal) UnmarshalBinary(data []byte) error { + // Verify we have at least 4 bytes for the exponent. The GOB encoded value + // may be empty. + if len(data) < 4 { + return fmt.Errorf("error decoding binary %v: expected at least 4 bytes, got %d", data, len(data)) + } + + // Extract the exponent + d.exp = int32(binary.BigEndian.Uint32(data[:4])) + + // Extract the value + d.value = new(big.Int) + if err := d.value.GobDecode(data[4:]); err != nil { + return fmt.Errorf("error decoding binary %v: %s", data, err) + } + + return nil +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Decimal) MarshalBinary() (data []byte, err error) { + // Write the exponent first since it's a fixed size + v1 := make([]byte, 4) + binary.BigEndian.PutUint32(v1, uint32(d.exp)) + + // Add the value + var v2 []byte + if v2, err = d.value.GobEncode(); err != nil { + return + } + + // Return the byte array + data = append(v1, v2...) + return +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *Decimal) Scan(value interface{}) error { + // first try to see if the data is stored in database as a Numeric datatype + switch v := value.(type) { + + case float32: + *d = NewFromFloat(float64(v)) + return nil + + case float64: + // numeric in sqlite3 sends us float64 + *d = NewFromFloat(v) + return nil + + case int64: + // at least in sqlite3 when the value is 0 in db, the data is sent + // to us as an int64 instead of a float64 ... + *d = New(v, 0) + return nil + + default: + // default is trying to interpret value stored as string + str, err := unquoteIfQuoted(v) + if err != nil { + return err + } + *d, err = NewFromString(str) + return err + } +} + +// Value implements the driver.Valuer interface for database serialization. +func (d Decimal) Value() (driver.Value, error) { + return d.String(), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for XML +// deserialization. +func (d *Decimal) UnmarshalText(text []byte) error { + str := string(text) + + dec, err := NewFromString(str) + *d = dec + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", str, err) + } + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface for XML +// serialization. +func (d Decimal) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +// GobEncode implements the gob.GobEncoder interface for gob serialization. +func (d Decimal) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface for gob serialization. +func (d *Decimal) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// StringScaled first scales the decimal then calls .String() on it. +// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. +func (d Decimal) StringScaled(exp int32) string { + return d.rescale(exp).String() +} + +func (d Decimal) string(trimTrailingZeros bool) string { + if d.exp >= 0 { + return d.rescale(0).value.String() + } + + abs := new(big.Int).Abs(d.value) + str := abs.String() + + var intPart, fractionalPart string + + // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN + // and you are on a 32-bit machine. Won't fix this super-edge case. + dExpInt := int(d.exp) + if len(str) > -dExpInt { + intPart = str[:len(str)+dExpInt] + fractionalPart = str[len(str)+dExpInt:] + } else { + intPart = "0" + + num0s := -dExpInt - len(str) + fractionalPart = strings.Repeat("0", num0s) + str + } + + if trimTrailingZeros { + i := len(fractionalPart) - 1 + for ; i >= 0; i-- { + if fractionalPart[i] != '0' { + break + } + } + fractionalPart = fractionalPart[:i+1] + } + + number := intPart + if len(fractionalPart) > 0 { + number += "." + fractionalPart + } + + if d.value.Sign() < 0 { + return "-" + number + } + + return number +} + +func (d *Decimal) ensureInitialized() { + if d.value == nil { + d.value = new(big.Int) + } +} + +// Min returns the smallest Decimal that was passed in the arguments. +// +// To call this function with an array, you must do: +// +// Min(arr[0], arr[1:]...) +// +// This makes it harder to accidentally call Min with 0 arguments. +func Min(first Decimal, rest ...Decimal) Decimal { + ans := first + for _, item := range rest { + if item.Cmp(ans) < 0 { + ans = item + } + } + return ans +} + +// Max returns the largest Decimal that was passed in the arguments. +// +// To call this function with an array, you must do: +// +// Max(arr[0], arr[1:]...) +// +// This makes it harder to accidentally call Max with 0 arguments. +func Max(first Decimal, rest ...Decimal) Decimal { + ans := first + for _, item := range rest { + if item.Cmp(ans) > 0 { + ans = item + } + } + return ans +} + +// Sum returns the combined total of the provided first and rest Decimals +func Sum(first Decimal, rest ...Decimal) Decimal { + total := first + for _, item := range rest { + total = total.Add(item) + } + + return total +} + +// Avg returns the average value of the provided first and rest Decimals +func Avg(first Decimal, rest ...Decimal) Decimal { + count := New(int64(len(rest)+1), 0) + sum := Sum(first, rest...) + return sum.Div(count) +} + +// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) +func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { + d1.ensureInitialized() + d2.ensureInitialized() + + if d1.exp == d2.exp { + return d1, d2 + } + + baseScale := min(d1.exp, d2.exp) + if baseScale != d1.exp { + return d1.rescale(baseScale), d2 + } + return d1, d2.rescale(baseScale) +} + +func min(x, y int32) int32 { + if x >= y { + return y + } + return x +} + +func unquoteIfQuoted(value interface{}) (string, error) { + var bytes []byte + + switch v := value.(type) { + case string: + bytes = []byte(v) + case []byte: + bytes = v + default: + return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", + value, value) + } + + // If the amount is quoted, strip the quotes + if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' { + bytes = bytes[1 : len(bytes)-1] + } + return string(bytes), nil +} + +// NullDecimal represents a nullable decimal with compatibility for +// scanning null values from the database. +type NullDecimal struct { + Decimal Decimal + Valid bool +} + +func NewNullDecimal(d Decimal) NullDecimal { + return NullDecimal{ + Decimal: d, + Valid: true, + } +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *NullDecimal) Scan(value interface{}) error { + if value == nil { + d.Valid = false + return nil + } + d.Valid = true + return d.Decimal.Scan(value) +} + +// Value implements the driver.Valuer interface for database serialization. +func (d NullDecimal) Value() (driver.Value, error) { + if !d.Valid { + return nil, nil + } + return d.Decimal.Value() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error { + if string(decimalBytes) == "null" { + d.Valid = false + return nil + } + d.Valid = true + return d.Decimal.UnmarshalJSON(decimalBytes) +} + +// MarshalJSON implements the json.Marshaler interface. +func (d NullDecimal) MarshalJSON() ([]byte, error) { + if !d.Valid { + return []byte("null"), nil + } + return d.Decimal.MarshalJSON() +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for XML +// deserialization +func (d *NullDecimal) UnmarshalText(text []byte) error { + str := string(text) + + // check for empty XML or XML without body e.g., + if str == "" { + d.Valid = false + return nil + } + if err := d.Decimal.UnmarshalText(text); err != nil { + d.Valid = false + return err + } + d.Valid = true + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface for XML +// serialization. +func (d NullDecimal) MarshalText() (text []byte, err error) { + if !d.Valid { + return []byte{}, nil + } + return d.Decimal.MarshalText() +} + +// Trig functions + +// Atan returns the arctangent, in radians, of x. +func (d Decimal) Atan() Decimal { + if d.Equal(NewFromFloat(0.0)) { + return d + } + if d.GreaterThan(NewFromFloat(0.0)) { + return d.satan() + } + return d.Neg().satan().Neg() +} + +func (d Decimal) xatan() Decimal { + P0 := NewFromFloat(-8.750608600031904122785e-01) + P1 := NewFromFloat(-1.615753718733365076637e+01) + P2 := NewFromFloat(-7.500855792314704667340e+01) + P3 := NewFromFloat(-1.228866684490136173410e+02) + P4 := NewFromFloat(-6.485021904942025371773e+01) + Q0 := NewFromFloat(2.485846490142306297962e+01) + Q1 := NewFromFloat(1.650270098316988542046e+02) + Q2 := NewFromFloat(4.328810604912902668951e+02) + Q3 := NewFromFloat(4.853903996359136964868e+02) + Q4 := NewFromFloat(1.945506571482613964425e+02) + z := d.Mul(d) + b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z) + b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4) + z = b1.Div(b2) + z = d.Mul(z).Add(d) + return z +} + +// satan reduces its argument (known to be positive) +// to the range [0, 0.66] and calls xatan. +func (d Decimal) satan() Decimal { + Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits + Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8) + pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459) + + if d.LessThanOrEqual(NewFromFloat(0.66)) { + return d.xatan() + } + if d.GreaterThan(Tan3pio8) { + return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits) + } + return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits)) +} + +// sin coefficients +var _sin = [...]Decimal{ + NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd + NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d + NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1 + NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03 + NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0 + NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548 +} + +// Sin returns the sine of the radian argument x. +func (d Decimal) Sin() Decimal { + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + if d.Equal(NewFromFloat(0.0)) { + return d + } + // make argument positive but save the sign + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + sign = true + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + // reflect in x axis + if j > 3 { + sign = !sign + j -= 4 + } + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if j == 1 || j == 2 { + w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) + y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) + } else { + y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) + } + if sign { + y = y.Neg() + } + return y +} + +// cos coefficients +var _cos = [...]Decimal{ + NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b + NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05 + NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6 + NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5 + NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91 + NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b +} + +// Cos returns the cosine of the radian argument x. +func (d Decimal) Cos() Decimal { + + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + // make argument positive + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + // reflect in x axis + if j > 3 { + sign = !sign + j -= 4 + } + if j > 1 { + sign = !sign + } + + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if j == 1 || j == 2 { + y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) + } else { + w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) + y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) + } + if sign { + y = y.Neg() + } + return y +} + +var _tanP = [...]Decimal{ + NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38 + NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd + NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176 +} +var _tanQ = [...]Decimal{ + NewFromFloat(1.00000000000000000000e+0), + NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572 + NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96 + NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef + NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31 +} + +// Tan returns the tangent of the radian argument x. +func (d Decimal) Tan() Decimal { + + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + if d.Equal(NewFromFloat(0.0)) { + return d + } + + // make argument positive but save the sign + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + sign = true + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if zz.GreaterThan(NewFromFloat(1e-14)) { + w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2])) + x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4]) + y = z.Add(z.Mul(w.Div(x))) + } else { + y = z + } + if j&2 == 2 { + y = NewFromFloat(-1.0).Div(y) + } + if sign { + y = y.Neg() + } + return y +} diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go new file mode 100644 index 000000000..d4b0cd007 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/rounding.go @@ -0,0 +1,160 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package decimal + +type floatInfo struct { + mantbits uint + expbits uint + bias int +} + +var float32info = floatInfo{23, 8, -127} +var float64info = floatInfo{52, 11, -1023} + +// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits +// that will let the original floating point value be precisely reconstructed. +func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { + // If mantissa is zero, the number is zero; stop now. + if mant == 0 { + d.nd = 0 + return + } + + // Compute upper and lower such that any decimal number + // between upper and lower (possibly inclusive) + // will round to the original floating point number. + + // We may see at once that the number is already shortest. + // + // Suppose d is not denormal, so that 2^exp <= d < 10^dp. + // The closest shorter number is at least 10^(dp-nd) away. + // The lower/upper bounds computed below are at distance + // at most 2^(exp-mantbits). + // + // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), + // or equivalently log2(10)*(dp-nd) > exp-mantbits. + // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). + minexp := flt.bias + 1 // minimum possible exponent + if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { + // The number is already shortest. + return + } + + // d = mant << (exp - mantbits) + // Next highest floating point number is mant+1 << exp-mantbits. + // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. + upper := new(decimal) + upper.Assign(mant*2 + 1) + upper.Shift(exp - int(flt.mantbits) - 1) + + // d = mant << (exp - mantbits) + // Next lowest floating point number is mant-1 << exp-mantbits, + // unless mant-1 drops the significant bit and exp is not the minimum exp, + // in which case the next lowest is mant*2-1 << exp-mantbits-1. + // Either way, call it mantlo << explo-mantbits. + // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. + var mantlo uint64 + var explo int + if mant > 1<= d.nd { + break + } + li := ui - upper.dp + lower.dp + l := byte('0') // lower digit + if li >= 0 && li < lower.nd { + l = lower.d[li] + } + m := byte('0') // middle digit + if mi >= 0 { + m = d.d[mi] + } + u := byte('0') // upper digit + if ui < upper.nd { + u = upper.d[ui] + } + + // Okay to round down (truncate) if lower has a different digit + // or if lower is inclusive and is exactly the result of rounding + // down (i.e., and we have reached the final digit of lower). + okdown := l != m || inclusive && li+1 == lower.nd + + switch { + case upperdelta == 0 && m+1 < u: + // Example: + // m = 12345xxx + // u = 12347xxx + upperdelta = 2 + case upperdelta == 0 && m != u: + // Example: + // m = 12345xxx + // u = 12346xxx + upperdelta = 1 + case upperdelta == 1 && (m != '9' || u != '0'): + // Example: + // m = 1234598x + // u = 1234600x + upperdelta = 2 + } + // Okay to round up if upper has a different digit and either upper + // is inclusive or upper is bigger than the result of rounding up. + okup := upperdelta > 0 && (inclusive || upperdelta > 1 || ui+1 < upper.nd) + + // If it's okay to do either, then round to the nearest one. + // If it's okay to do only one, do it. + switch { + case okdown && okup: + d.Round(mi + 1) + return + case okdown: + d.RoundDown(mi + 1) + return + case okup: + d.RoundUp(mi + 1) + return + } + } +} diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore new file mode 100644 index 000000000..53053a8ac --- /dev/null +++ b/vendor/github.com/spf13/cast/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.bench diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE new file mode 100644 index 000000000..4527efb9c --- /dev/null +++ b/vendor/github.com/spf13/cast/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile new file mode 100644 index 000000000..f01a5dbb6 --- /dev/null +++ b/vendor/github.com/spf13/cast/Makefile @@ -0,0 +1,40 @@ +GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2) + +.PHONY: check fmt lint test test-race vet test-cover-html help +.DEFAULT_GOAL := help + +check: test-race fmt vet lint ## Run tests and linters + +test: ## Run tests + go test ./... + +test-race: ## Run tests with race detector + go test -race ./... + +fmt: ## Run gofmt linter +ifeq "$(GOVERSION)" "12" + @for d in `go list` ; do \ + if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \ + echo "^ improperly formatted go files" && echo && exit 1; \ + fi \ + done +endif + +lint: ## Run golint linter + @for d in `go list` ; do \ + if [ "`golint $$d | tee /dev/stderr`" ]; then \ + echo "^ golint errors!" && echo && exit 1; \ + fi \ + done + +vet: ## Run go vet linter + @if [ "`go vet | tee /dev/stderr`" ]; then \ + echo "^ go vet errors!" && echo && exit 1; \ + fi + +test-cover-html: ## Generate test coverage report + go test -coverprofile=coverage.out -covermode=count + go tool cover -func=coverage.out + +help: + @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md new file mode 100644 index 000000000..120a57342 --- /dev/null +++ b/vendor/github.com/spf13/cast/README.md @@ -0,0 +1,75 @@ +cast +==== +[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) +[![Build Status](https://github.com/spf13/cast/actions/workflows/go.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/go.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) + +Easy and safe casting from one type to another in Go + +Don’t Panic! ... Cast + +## What is Cast? + +Cast is a library to convert between different go types in a consistent and easy way. + +Cast provides simple functions to easily convert a number to a string, an +interface into a bool, etc. Cast does this intelligently when an obvious +conversion is possible. It doesn’t make any attempts to guess what you meant, +for example you can only convert a string to an int when it is a string +representation of an int such as “8â€. Cast was developed for use in +[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON +for meta data. + +## Why use Cast? + +When working with dynamic data in Go you often need to cast or convert the data +from one type into another. Cast goes beyond just using type assertion (though +it uses that when possible) to provide a very straightforward and convenient +library. + +If you are working with interfaces to handle things like dynamic content +you’ll need an easy way to convert an interface into a given type. This +is the library for you. + +If you are taking in data from YAML, TOML or JSON or other formats which lack +full types, then Cast is the library for you. + +## Usage + +Cast provides a handful of To_____ methods. These methods will always return +the desired type. **If input is provided that will not convert to that type, the +0 or nil value for that type will be returned**. + +Cast also provides identical methods To_____E. These return the same result as +the To_____ methods, plus an additional error which tells you if it successfully +converted. Using these methods you can tell the difference between when the +input matched the zero value or when the conversion failed and the zero value +was returned. + +The following examples are merely a sample of what is available. Please review +the code for a complete set. + +### Example ‘ToString’: + + cast.ToString("mayonegg") // "mayonegg" + cast.ToString(8) // "8" + cast.ToString(8.31) // "8.31" + cast.ToString([]byte("one time")) // "one time" + cast.ToString(nil) // "" + + var foo interface{} = "one more time" + cast.ToString(foo) // "one more time" + + +### Example ‘ToInt’: + + cast.ToInt(8) // 8 + cast.ToInt(8.31) // 8 + cast.ToInt("8") // 8 + cast.ToInt(true) // 1 + cast.ToInt(false) // 0 + + var eight interface{} = 8 + cast.ToInt(eight) // 8 + cast.ToInt(nil) // 0 + diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go new file mode 100644 index 000000000..0cfe9418d --- /dev/null +++ b/vendor/github.com/spf13/cast/cast.go @@ -0,0 +1,176 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Package cast provides easy and safe casting in Go. +package cast + +import "time" + +// ToBool casts an interface to a bool type. +func ToBool(i interface{}) bool { + v, _ := ToBoolE(i) + return v +} + +// ToTime casts an interface to a time.Time type. +func ToTime(i interface{}) time.Time { + v, _ := ToTimeE(i) + return v +} + +func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + +// ToDuration casts an interface to a time.Duration type. +func ToDuration(i interface{}) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToFloat64 casts an interface to a float64 type. +func ToFloat64(i interface{}) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToFloat32 casts an interface to a float32 type. +func ToFloat32(i interface{}) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToInt64 casts an interface to an int64 type. +func ToInt64(i interface{}) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToInt32 casts an interface to an int32 type. +func ToInt32(i interface{}) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt16 casts an interface to an int16 type. +func ToInt16(i interface{}) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt8 casts an interface to an int8 type. +func ToInt8(i interface{}) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt casts an interface to an int type. +func ToInt(i interface{}) int { + v, _ := ToIntE(i) + return v +} + +// ToUint casts an interface to a uint type. +func ToUint(i interface{}) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint64 casts an interface to a uint64 type. +func ToUint64(i interface{}) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToUint32 casts an interface to a uint32 type. +func ToUint32(i interface{}) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint16 casts an interface to a uint16 type. +func ToUint16(i interface{}) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint8 casts an interface to a uint8 type. +func ToUint8(i interface{}) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToString casts an interface to a string type. +func ToString(i interface{}) string { + v, _ := ToStringE(i) + return v +} + +// ToStringMapString casts an interface to a map[string]string type. +func ToStringMapString(i interface{}) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts an interface to a map[string][]string type. +func ToStringMapStringSlice(i interface{}) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts an interface to a map[string]bool type. +func ToStringMapBool(i interface{}) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMapInt casts an interface to a map[string]int type. +func ToStringMapInt(i interface{}) map[string]int { + v, _ := ToStringMapIntE(i) + return v +} + +// ToStringMapInt64 casts an interface to a map[string]int64 type. +func ToStringMapInt64(i interface{}) map[string]int64 { + v, _ := ToStringMapInt64E(i) + return v +} + +// ToStringMap casts an interface to a map[string]interface{} type. +func ToStringMap(i interface{}) map[string]interface{} { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts an interface to a []interface{} type. +func ToSlice(i interface{}) []interface{} { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts an interface to a []bool type. +func ToBoolSlice(i interface{}) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts an interface to a []string type. +func ToStringSlice(i interface{}) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts an interface to a []int type. +func ToIntSlice(i interface{}) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToDurationSlice casts an interface to a []time.Duration type. +func ToDurationSlice(i interface{}) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go new file mode 100644 index 000000000..514d759bf --- /dev/null +++ b/vendor/github.com/spf13/cast/caste.go @@ -0,0 +1,1476 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "html/template" + "reflect" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +// ToTimeE casts an interface to a time.Time type. +func ToTimeE(i interface{}) (tim time.Time, err error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { + i = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDateInDefaultLocation(v, location) + case json.Number: + s, err1 := ToInt64E(v) + if err1 != nil { + return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) + } + return time.Unix(s, 0), nil + case int: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + default: + return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) + } +} + +// ToDurationE casts an interface to a time.Duration type. +func ToDurationE(i interface{}) (d time.Duration, err error) { + i = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: + d = time.Duration(ToInt64(s)) + return + case float32, float64: + d = time.Duration(ToFloat64(s)) + return + case string: + if strings.ContainsAny(s, "nsuµmh") { + d, err = time.ParseDuration(s) + } else { + d, err = time.ParseDuration(s + "ns") + } + return + case json.Number: + var v float64 + v, err = s.Float64() + d = time.Duration(v) + return + default: + err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) + return + } +} + +// ToBoolE casts an interface to a bool type. +func ToBoolE(i interface{}) (bool, error) { + i = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + if i.(int) != 0 { + return true, nil + } + return false, nil + case string: + return strconv.ParseBool(i.(string)) + case json.Number: + v, err := ToInt64E(b) + if err == nil { + return v != 0, nil + } + return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) + default: + return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) + } +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i interface{}) (float64, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + return float64(intv), nil + } + + switch s := i.(type) { + case float64: + return s, nil + case float32: + return float64(s), nil + case int64: + return float64(s), nil + case int32: + return float64(s), nil + case int16: + return float64(s), nil + case int8: + return float64(s), nil + case uint: + return float64(s), nil + case uint64: + return float64(s), nil + case uint32: + return float64(s), nil + case uint16: + return float64(s), nil + case uint8: + return float64(s), nil + case string: + v, err := strconv.ParseFloat(s, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case json.Number: + v, err := s.Float64() + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + } +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i interface{}) (float32, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + return float32(intv), nil + } + + switch s := i.(type) { + case float64: + return float32(s), nil + case float32: + return s, nil + case int64: + return float32(s), nil + case int32: + return float32(s), nil + case int16: + return float32(s), nil + case int8: + return float32(s), nil + case uint: + return float32(s), nil + case uint64: + return float32(s), nil + case uint32: + return float32(s), nil + case uint16: + return float32(s), nil + case uint8: + return float32(s), nil + case string: + v, err := strconv.ParseFloat(s, 32) + if err == nil { + return float32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case json.Number: + v, err := s.Float64() + if err == nil { + return float32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + } +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i interface{}) (int64, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + return int64(intv), nil + } + + switch s := i.(type) { + case int64: + return s, nil + case int32: + return int64(s), nil + case int16: + return int64(s), nil + case int8: + return int64(s), nil + case uint: + return int64(s), nil + case uint64: + return int64(s), nil + case uint32: + return int64(s), nil + case uint16: + return int64(s), nil + case uint8: + return int64(s), nil + case float64: + return int64(s), nil + case float32: + return int64(s), nil + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + case json.Number: + return ToInt64E(string(s)) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + } +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i interface{}) (int32, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + return int32(intv), nil + } + + switch s := i.(type) { + case int64: + return int32(s), nil + case int32: + return s, nil + case int16: + return int32(s), nil + case int8: + return int32(s), nil + case uint: + return int32(s), nil + case uint64: + return int32(s), nil + case uint32: + return int32(s), nil + case uint16: + return int32(s), nil + case uint8: + return int32(s), nil + case float64: + return int32(s), nil + case float32: + return int32(s), nil + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + return int32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + case json.Number: + return ToInt32E(string(s)) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + } +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i interface{}) (int16, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + return int16(intv), nil + } + + switch s := i.(type) { + case int64: + return int16(s), nil + case int32: + return int16(s), nil + case int16: + return s, nil + case int8: + return int16(s), nil + case uint: + return int16(s), nil + case uint64: + return int16(s), nil + case uint32: + return int16(s), nil + case uint16: + return int16(s), nil + case uint8: + return int16(s), nil + case float64: + return int16(s), nil + case float32: + return int16(s), nil + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + return int16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + case json.Number: + return ToInt16E(string(s)) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + } +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i interface{}) (int8, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + return int8(intv), nil + } + + switch s := i.(type) { + case int64: + return int8(s), nil + case int32: + return int8(s), nil + case int16: + return int8(s), nil + case int8: + return s, nil + case uint: + return int8(s), nil + case uint64: + return int8(s), nil + case uint32: + return int8(s), nil + case uint16: + return int8(s), nil + case uint8: + return int8(s), nil + case float64: + return int8(s), nil + case float32: + return int8(s), nil + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + return int8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + case json.Number: + return ToInt8E(string(s)) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + } +} + +// ToIntE casts an interface to an int type. +func ToIntE(i interface{}) (int, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + return intv, nil + } + + switch s := i.(type) { + case int64: + return int(s), nil + case int32: + return int(s), nil + case int16: + return int(s), nil + case int8: + return int(s), nil + case uint: + return int(s), nil + case uint64: + return int(s), nil + case uint32: + return int(s), nil + case uint16: + return int(s), nil + case uint8: + return int(s), nil + case float64: + return int(s), nil + case float32: + return int(s), nil + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + return int(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + case json.Number: + return ToIntE(string(s)) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + } +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i interface{}) (uint, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + if intv < 0 { + return 0, errNegativeNotAllowed + } + return uint(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) + case json.Number: + return ToUintE(string(s)) + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case uint: + return s, nil + case uint64: + return uint(s), nil + case uint32: + return uint(s), nil + case uint16: + return uint(s), nil + case uint8: + return uint(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) + } +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i interface{}) (uint64, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + if intv < 0 { + return 0, errNegativeNotAllowed + } + return uint64(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint64(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) + case json.Number: + return ToUint64E(string(s)) + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case uint: + return uint64(s), nil + case uint64: + return s, nil + case uint32: + return uint64(s), nil + case uint16: + return uint64(s), nil + case uint8: + return uint64(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) + } +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i interface{}) (uint32, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + if intv < 0 { + return 0, errNegativeNotAllowed + } + return uint32(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) + case json.Number: + return ToUint32E(string(s)) + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case uint: + return uint32(s), nil + case uint64: + return uint32(s), nil + case uint32: + return s, nil + case uint16: + return uint32(s), nil + case uint8: + return uint32(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) + } +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i interface{}) (uint16, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + if intv < 0 { + return 0, errNegativeNotAllowed + } + return uint16(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) + case json.Number: + return ToUint16E(string(s)) + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case uint: + return uint16(s), nil + case uint64: + return uint16(s), nil + case uint32: + return uint16(s), nil + case uint16: + return s, nil + case uint8: + return uint16(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) + } +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i interface{}) (uint8, error) { + i = indirect(i) + + intv, ok := toInt(i) + if ok { + if intv < 0 { + return 0, errNegativeNotAllowed + } + return uint8(intv), nil + } + + switch s := i.(type) { + case string: + v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + if err == nil { + if v < 0 { + return 0, errNegativeNotAllowed + } + return uint8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) + case json.Number: + return ToUint8E(string(s)) + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case uint: + return uint8(s), nil + case uint64: + return uint8(s), nil + case uint32: + return uint8(s), nil + case uint16: + return uint8(s), nil + case uint8: + return s, nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) + } +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(a interface{}) interface{} { + if a == nil { + return nil + } + if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return a + } + v := reflect.ValueOf(a) + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirectToStringerOrError returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer +// or error, +func indirectToStringerOrError(a interface{}) interface{} { + if a == nil { + return nil + } + + var errorType = reflect.TypeOf((*error)(nil)).Elem() + var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + + v := reflect.ValueOf(a) + for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// ToStringE casts an interface to a string type. +func ToStringE(i interface{}) (string, error) { + i = indirectToStringerOrError(i) + + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int64: + return strconv.FormatInt(s, 10), nil + case int32: + return strconv.Itoa(int(s)), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case uint: + return strconv.FormatUint(uint64(s), 10), nil + case uint64: + return strconv.FormatUint(uint64(s), 10), nil + case uint32: + return strconv.FormatUint(uint64(s), 10), nil + case uint16: + return strconv.FormatUint(uint64(s), 10), nil + case uint8: + return strconv.FormatUint(uint64(s), 10), nil + case json.Number: + return s.String(), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) + } +} + +// ToStringMapStringE casts an interface to a map[string]string type. +func ToStringMapStringE(i interface{}) (map[string]string, error) { + var m = map[string]string{} + + switch v := i.(type) { + case map[string]string: + return v, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) + } +} + +// ToStringMapStringSliceE casts an interface to a map[string][]string type. +func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { + var m = map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]interface{}: + for k, val := range v { + switch vt := val.(type) { + case []interface{}: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[interface{}][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + value, err := ToStringSliceE(val) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + return m, nil +} + +// ToStringMapBoolE casts an interface to a map[string]bool type. +func ToStringMapBoolE(i interface{}) (map[string]bool, error) { + var m = map[string]bool{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]bool: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) + } +} + +// ToStringMapE casts an interface to a map[string]interface{} type. +func ToStringMapE(i interface{}) (map[string]interface{}, error) { + var m = map[string]interface{}{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = val + } + return m, nil + case map[string]interface{}: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) + } +} + +// ToStringMapIntE casts an interface to a map[string]int{} type. +func ToStringMapIntE(i interface{}) (map[string]int, error) { + var m = map[string]int{} + if i == nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToInt(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[k] = ToInt(val) + } + return m, nil + case map[string]int: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + for _, keyVal := range v.MapKeys() { + val, err := ToIntE(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + return m, nil +} + +// ToStringMapInt64E casts an interface to a map[string]int64{} type. +func ToStringMapInt64E(i interface{}) (map[string]int64, error) { + var m = map[string]int64{} + if i == nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToInt64(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[k] = ToInt64(val) + } + return m, nil + case map[string]int64: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + for _, keyVal := range v.MapKeys() { + val, err := ToInt64E(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + return m, nil +} + +// ToSliceE casts an interface to a []interface{} type. +func ToSliceE(i interface{}) ([]interface{}, error) { + var s []interface{} + + switch v := i.(type) { + case []interface{}: + return append(s, v...), nil + case []map[string]interface{}: + for _, u := range v { + s = append(s, u) + } + return s, nil + default: + return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) + } +} + +// ToBoolSliceE casts an interface to a []bool type. +func ToBoolSliceE(i interface{}) ([]bool, error) { + if i == nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + + switch v := i.(type) { + case []bool: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]bool, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToBoolE(s.Index(j).Interface()) + if err != nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + a[j] = val + } + return a, nil + default: + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } +} + +// ToStringSliceE casts an interface to a []string type. +func ToStringSliceE(i interface{}) ([]string, error) { + var a []string + + switch v := i.(type) { + case []interface{}: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []string: + return v, nil + case []int8: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []int64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float32: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []float64: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case string: + return strings.Fields(v), nil + case []error: + for _, err := range i.([]error) { + a = append(a, err.Error()) + } + return a, nil + case interface{}: + str, err := ToStringE(v) + if err != nil { + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } + return []string{str}, nil + default: + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } +} + +// ToIntSliceE casts an interface to a []int type. +func ToIntSliceE(i interface{}) ([]int, error) { + if i == nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + + switch v := i.(type) { + case []int: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]int, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToIntE(s.Index(j).Interface()) + if err != nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + a[j] = val + } + return a, nil + default: + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } +} + +// ToDurationSliceE casts an interface to a []time.Duration type. +func ToDurationSliceE(i interface{}) ([]time.Duration, error) { + if i == nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + + switch v := i.(type) { + case []time.Duration: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]time.Duration, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToDurationE(s.Index(j).Interface()) + if err != nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + a[j] = val + } + return a, nil + default: + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } +} + +// StringToDate attempts to parse a string into a time.Time type using a +// predefined list of formats. If no suitable format is found, an error is +// returned. +func StringToDate(s string) (time.Time, error) { + return parseDateWith(s, time.UTC, timeFormats) +} + +// StringToDateInDefaultLocation casts an empty interface to a time.Time, +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return parseDateWith(s, location, timeFormats) +} + +type timeFormatType int + +const ( + timeFormatNoTimezone timeFormatType = iota + timeFormatNamedTimezone + timeFormatNumericTimezone + timeFormatNumericAndNamedTimezone + timeFormatTimeOnly +) + +type timeFormat struct { + format string + typ timeFormatType +} + +func (f timeFormat) hasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone +} + +var ( + timeFormats = []timeFormat{ + {time.RFC3339, timeFormatNumericTimezone}, + {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, timeFormatNumericTimezone}, + {time.RFC1123, timeFormatNamedTimezone}, + {time.RFC822Z, timeFormatNumericTimezone}, + {time.RFC822, timeFormatNamedTimezone}, + {time.RFC850, timeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", timeFormatNoTimezone}, + {time.ANSIC, timeFormatNoTimezone}, + {time.UnixDate, timeFormatNamedTimezone}, + {time.RubyDate, timeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + {"2006-01-02", timeFormatNoTimezone}, + {"02 Jan 2006", timeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + {time.Kitchen, timeFormatTimeOnly}, + {time.Stamp, timeFormatTimeOnly}, + {time.StampMilli, timeFormatTimeOnly}, + {time.StampMicro, timeFormatTimeOnly}, + {time.StampNano, timeFormatTimeOnly}, + } +) + +func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { + + for _, format := range formats { + if d, e = time.Parse(format.format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.typ <= timeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v interface{}) error { + data := []byte(s) + return json.Unmarshal(data, v) +} + +// toInt returns the int value of v if v or v's underlying type +// is an int. +// Note that this will return false for int64 etc. types. +func toInt(v interface{}) (int, bool) { + switch v := v.(type) { + case int: + return v, true + case time.Weekday: + return int(v), true + case time.Month: + return int(v), true + default: + return 0, false + } +} + +func trimZeroDecimal(s string) string { + var foundZero bool + for i := len(s); i > 0; i-- { + switch s[i-1] { + case '.': + if foundZero { + return s[:i-1] + } + case '0': + foundZero = true + default: + return s + } + } + return s +} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go new file mode 100644 index 000000000..1524fc82c --- /dev/null +++ b/vendor/github.com/spf13/cast/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. + +package cast + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[timeFormatNoTimezone-0] + _ = x[timeFormatNamedTimezone-1] + _ = x[timeFormatNumericTimezone-2] + _ = x[timeFormatNumericAndNamedTimezone-3] + _ = x[timeFormatTimeOnly-4] +} + +const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" + +var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i timeFormatType) String() string { + if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { + return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] +} diff --git a/vendor/k8s.io/helm/LICENSE b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt similarity index 99% rename from vendor/k8s.io/helm/LICENSE rename to vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt index 393b7a33b..55ede8a42 100644 --- a/vendor/k8s.io/helm/LICENSE +++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright The Helm Authors. + Copyright 2015 xeipuuv Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md new file mode 100644 index 000000000..a4f5f1458 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md @@ -0,0 +1,41 @@ +# gojsonpointer +An implementation of JSON Pointer - Go language + +## Usage + jsonText := `{ + "name": "Bobby B", + "occupation": { + "title" : "King", + "years" : 15, + "heir" : "Joffrey B" + } + }` + + var jsonDocument map[string]interface{} + json.Unmarshal([]byte(jsonText), &jsonDocument) + + //create a JSON pointer + pointerString := "/occupation/title" + pointer, _ := NewJsonPointer(pointerString) + + //SET a new value for the "title" in the document + pointer.Set(jsonDocument, "Supreme Leader of Westeros") + + //GET the new "title" from the document + title, _, _ := pointer.Get(jsonDocument) + fmt.Println(title) //outputs "Supreme Leader of Westeros" + + //DELETE the "heir" from the document + deletePointer := NewJsonPointer("/occupation/heir") + deletePointer.Delete(jsonDocument) + + b, _ := json.Marshal(jsonDocument) + fmt.Println(string(b)) + //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}` + + +## References +https://tools.ietf.org/html/rfc6901 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go new file mode 100644 index 000000000..798c1f1c5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go @@ -0,0 +1,211 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package gojsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +const ( + const_empty_pointer = `` + const_pointer_separator = `/` + + const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` +) + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +type JsonPointer struct { + referenceTokens []string +} + +// NewJsonPointer parses the given string JSON pointer and returns an object +func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { + + // Pointer to the root of the document + if len(jsonPointerString) == 0 { + // Keep referenceTokens nil + return + } + if jsonPointerString[0] != '/' { + return p, errors.New(const_invalid_start) + } + + p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) + return +} + +// Uses the pointer to retrieve a value from a JSON document +func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + + is := &implStruct{mode: "GET", inDocument: document} + p.implementation(is) + return is.getOutNode, is.getOutKind, is.outError + +} + +// Uses the pointer to update a value from a JSON document +func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { + + is := &implStruct{mode: "SET", inDocument: document, setInValue: value} + p.implementation(is) + return document, is.outError + +} + +// Uses the pointer to delete a value from a JSON document +func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { + is := &implStruct{mode: "DEL", inDocument: document} + p.implementation(is) + return document, is.outError +} + +// Both Get and Set functions use the same implementation to avoid code duplication +func (p *JsonPointer) implementation(i *implStruct) { + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + i.getOutNode = i.inDocument + i.outError = nil + i.getOutKind = kind + i.outError = nil + return + } + + node := i.inDocument + + previousNodes := make([]interface{}, len(p.referenceTokens)) + previousTokens := make([]string, len(p.referenceTokens)) + + for ti, token := range p.referenceTokens { + + isLastToken := ti == len(p.referenceTokens)-1 + previousNodes[ti] = node + previousTokens[ti] = token + + switch v := node.(type) { + + case map[string]interface{}: + decodedToken := decodeReferenceToken(token) + if _, ok := v[decodedToken]; ok { + node = v[decodedToken] + if isLastToken && i.mode == "SET" { + v[decodedToken] = i.setInValue + } else if isLastToken && i.mode == "DEL" { + delete(v, decodedToken) + } + } else if isLastToken && i.mode == "SET" { + v[decodedToken] = i.setInValue + } else { + i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) + i.getOutKind = reflect.Map + i.getOutNode = nil + return + } + + case []interface{}: + tokenIndex, err := strconv.Atoi(token) + if err != nil { + i.outError = fmt.Errorf("Invalid array index '%s'", token) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + if tokenIndex < 0 || tokenIndex >= len(v) { + i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + + node = v[tokenIndex] + if isLastToken && i.mode == "SET" { + v[tokenIndex] = i.setInValue + } else if isLastToken && i.mode == "DEL" { + v[tokenIndex] = v[len(v)-1] + v[len(v)-1] = nil + v = v[:len(v)-1] + previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v + } + + default: + i.outError = fmt.Errorf("Invalid token reference '%s'", token) + i.getOutKind = reflect.ValueOf(node).Kind() + i.getOutNode = nil + return + } + + } + + i.getOutNode = node + i.getOutKind = reflect.ValueOf(node).Kind() + i.outError = nil +} + +// Pointer to string representation function +func (p *JsonPointer) String() string { + + if len(p.referenceTokens) == 0 { + return const_empty_pointer + } + + pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +func decodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~1`, `/`, -1) + step2 := strings.Replace(step1, `~0`, `~`, -1) + return step2 +} + +func encodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~`, `~0`, -1) + step2 := strings.Replace(step1, `/`, `~1`, -1) + return step2 +} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt new file mode 100644 index 000000000..55ede8a42 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md new file mode 100644 index 000000000..9ab6e1eb1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/README.md @@ -0,0 +1,10 @@ +# gojsonreference +An implementation of JSON Reference - Go language + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go new file mode 100644 index 000000000..645729130 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go @@ -0,0 +1,147 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package gojsonreference + +import ( + "errors" + "net/url" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonpointer" +) + +const ( + const_fragment_char = `#` +) + +func NewJsonReference(jsonReferenceString string) (JsonReference, error) { + + var r JsonReference + err := r.parse(jsonReferenceString) + return r, err + +} + +type JsonReference struct { + referenceUrl *url.URL + referencePointer gojsonpointer.JsonPointer + + HasFullUrl bool + HasUrlPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +func (r *JsonReference) GetUrl() *url.URL { + return r.referenceUrl +} + +func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { + return &r.referencePointer +} + +func (r *JsonReference) String() string { + + if r.referenceUrl != nil { + return r.referenceUrl.String() + } + + if r.HasFragmentOnly { + return const_fragment_char + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +func (r *JsonReference) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) +} + +// "Constructor", parses the given string JSON reference +func (r *JsonReference) parse(jsonReferenceString string) (err error) { + + r.referenceUrl, err = url.Parse(jsonReferenceString) + if err != nil { + return + } + refUrl := r.referenceUrl + + if refUrl.Scheme != "" && refUrl.Host != "" { + r.HasFullUrl = true + } else { + if refUrl.Path != "" { + r.HasUrlPathOnly = true + } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refUrl.Scheme == "file" + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, and if it + // doesn't then its first component will be treated as the host by the + // Go runtime + if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) + } + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path) + } + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) + + return +} + +// Creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { + if child.GetUrl() == nil { + return nil, errors.New("childUrl is nil!") + } + + if r.GetUrl() == nil { + return nil, errors.New("parentUrl is nil!") + } + + // Get a copy of the parent url to make sure we do not modify the original. + // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. + // The fragment of the child must be used, so the fragment of the parent is manually removed. + parentUrl := *r.GetUrl() + parentUrl.Fragment = "" + + ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) + if err != nil { + return nil, err + } + return &ref, err +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore new file mode 100644 index 000000000..68e993ce3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.gitignore @@ -0,0 +1,3 @@ +*.sw[nop] +*.iml +.vscode/ diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml new file mode 100644 index 000000000..3289001cd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - "1.11" + - "1.12" + - "1.13" +before_install: + - go get github.com/xeipuuv/gojsonreference + - go get github.com/xeipuuv/gojsonpointer + - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt new file mode 100644 index 000000000..55ede8a42 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md new file mode 100644 index 000000000..758f26df0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/README.md @@ -0,0 +1,466 @@ +[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) +[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) +[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema) + +# gojsonschema + +## Description + +An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07. + +References : + +* http://json-schema.org +* http://json-schema.org/latest/json-schema-core.html +* http://json-schema.org/latest/json-schema-validation.html + +## Installation + +``` +go get github.com/xeipuuv/gojsonschema +``` + +Dependencies : +* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) +* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) +* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) + +## Usage + +### Example + +```go + +package main + +import ( + "fmt" + "github.com/xeipuuv/gojsonschema" +) + +func main() { + + schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") + documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + panic(err.Error()) + } + + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + } +} + + +``` + +#### Loaders + +There are various ways to load your JSON data. +In order to load your schemas and documents, +first declare an appropriate loader : + +* Web / HTTP, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") +``` + +* Local file, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") +``` + +References use the URI scheme, the prefix (file://) and a full path to the file are required. + +* JSON strings : + +```go +loader := gojsonschema.NewStringLoader(`{"type": "string"}`) +``` + +* Custom Go types : + +```go +m := map[string]interface{}{"type": "string"} +loader := gojsonschema.NewGoLoader(m) +``` + +And + +```go +type Root struct { + Users []User `json:"users"` +} + +type User struct { + Name string `json:"name"` +} + +... + +data := Root{} +data.Users = append(data.Users, User{"John"}) +data.Users = append(data.Users, User{"Sophia"}) +data.Users = append(data.Users, User{"Bill"}) + +loader := gojsonschema.NewGoLoader(data) +``` + +#### Validation + +Once the loaders are set, validation is easy : + +```go +result, err := gojsonschema.Validate(schemaLoader, documentLoader) +``` + +Alternatively, you might want to load a schema only once and process to multiple validations : + +```go +schema, err := gojsonschema.NewSchema(schemaLoader) +... +result1, err := schema.Validate(documentLoader1) +... +result2, err := schema.Validate(documentLoader2) +... +// etc ... +``` + +To check the result : + +```go + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, err := range result.Errors() { + // Err implements the ResultError interface + fmt.Printf("- %s\n", err) + } + } +``` + + +## Loading local schemas + +By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`. + +```go + sl := gojsonschema.NewSchemaLoader() + loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`) + err := sl.AddSchema("http://some_host.com/string.json", loader1) +``` + +Alternatively if your schema already has an `$id` you can use the `AddSchemas` function +```go + loader2 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/maxlength.json", + "maxLength" : 5 + }`) + err = sl.AddSchemas(loader2) +``` + +The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them. +```go + loader3 := gojsonschema.NewStringLoader(`{ + "$id" : "http://some_host.com/main.json", + "allOf" : [ + { "$ref" : "http://some_host.com/string.json" }, + { "$ref" : "http://some_host.com/maxlength.json" } + ] + }`) + + schema, err := sl.Compile(loader3) + + documentLoader := gojsonschema.NewStringLoader(`"hello world"`) + + result, err := schema.Validate(documentLoader) +``` + +It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema. + +```go +err = sl.AddSchemas(loader3) +schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json")) +``` + +Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used. + +## Using a specific draft +By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode. + +Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Draft = gojsonschema.Draft7 +sl.AutoDetect = false +``` + +If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas. + +## Meta-schema validation +Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property. + +The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step. + +```go +sl := gojsonschema.NewSchemaLoader() +sl.Validate = true +err := sl.AddSchemas(gojsonschema.NewStringLoader(`{ + $id" : "http://some_host.com/invalid.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "multipleOf" : true +}`)) + ``` +``` + ``` + +Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema. + +Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used. + + +## Working with Errors + +The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it +```go +gojsonschema.Locale = YourCustomLocale{} +``` + +However, each error contains additional contextual information. + +Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens. + +**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below + +Note: An error of RequiredType has an err.Type() return value of "required" + + "required": RequiredError + "invalid_type": InvalidTypeError + "number_any_of": NumberAnyOfError + "number_one_of": NumberOneOfError + "number_all_of": NumberAllOfError + "number_not": NumberNotError + "missing_dependency": MissingDependencyError + "internal": InternalError + "const": ConstEror + "enum": EnumError + "array_no_additional_items": ArrayNoAdditionalItemsError + "array_min_items": ArrayMinItemsError + "array_max_items": ArrayMaxItemsError + "unique": ItemsMustBeUniqueError + "contains" : ArrayContainsError + "array_min_properties": ArrayMinPropertiesError + "array_max_properties": ArrayMaxPropertiesError + "additional_property_not_allowed": AdditionalPropertyNotAllowedError + "invalid_property_pattern": InvalidPropertyPatternError + "invalid_property_name": InvalidPropertyNameError + "string_gte": StringLengthGTEError + "string_lte": StringLengthLTEError + "pattern": DoesNotMatchPatternError + "multiple_of": MultipleOfError + "number_gte": NumberGTEError + "number_gt": NumberGTError + "number_lte": NumberLTEError + "number_lt": NumberLTError + "condition_then" : ConditionThenError + "condition_else" : ConditionElseError + +**err.Value()**: *interface{}* Returns the value given + +**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName + +**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. + +**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. + +**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result. + +**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* + +Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. +``` +{{.field}} must be greater than or equal to {{.min}} +``` + +The library allows you to specify custom template functions, should you require more complex error message handling. +```go +gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ + "allcaps": func(s string) string { + return strings.ToUpper(s) + }, +} +``` + +Given the above definition, you can use the custom function `"allcaps"` in your localization templates: +``` +{{allcaps .field}} must be greater than or equal to {{.min}} +``` + +The above error message would then be rendered with the `field` value in capital letters. For example: +``` +"PASSWORD must be greater than or equal to 8" +``` + +Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. + +## Formats +JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: + +````json +{"type": "string", "format": "email"} +```` + +Not all formats defined in draft-07 are available. Implemented formats are: + +* `date` +* `time` +* `date-time` +* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames. +* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support. +* `idn-email`. Same caveat as `email`. +* `ipv4` +* `ipv6` +* `uri`. Includes unicode support. +* `uri-reference`. Includes unicode support. +* `iri` +* `iri-reference` +* `uri-template` +* `uuid` +* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible. +* `json-pointer` +* `relative-json-pointer` + +`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific +unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats. + +The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. + +For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: + +```go +// Define the format checker +type RoleFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f RoleFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return strings.HasPrefix("ROLE_", asString) +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) +```` + +Now to use in your json schema: +````json +{"type": "string", "format": "role"} +```` + +Another example would be to check if the provided integer matches an id on database: + +JSON schema: +```json +{"type": "integer", "format": "ValidUserId"} +``` + +```go +// Define the format checker +type ValidUserIdFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { + + asFloat64, ok := input.(float64) // Numbers are always float64 here + if ok == false { + return false + } + + // XXX + // do the magic on the database looking for the int(asFloat64) + + return true +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) +```` + +Formats can also be removed, for example if you want to override one of the formats that is defined by default. + +```go +gojsonschema.FormatCheckers.Remove("hostname") +``` + + +## Additional custom validation +After the validation has run and you have the results, you may add additional +errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead +of having to add special exceptions for your own errors. Below is an example. + +```go +type AnswerInvalidError struct { + gojsonschema.ResultErrorFields +} + +func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError { + err := AnswerInvalidError{} + err.SetContext(context) + err.SetType("custom_invalid_error") + // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed + // using the description of err will be overridden by this. + err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}") + err.SetValue(value) + err.SetDetails(details) + + return &err +} + +func main() { + // ... + schema, err := gojsonschema.NewSchema(schemaLoader) + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + + if true { // some validation + jsonContext := gojsonschema.NewJsonContext("question", nil) + errDetail := gojsonschema.ErrorDetails{ + "answer": 42, + } + result.AddError( + newAnswerInvalidError( + gojsonschema.NewJsonContext("answer", jsonContext), + 52, + errDetail, + ), + errDetail, + ) + } + + return result, err + +} +``` + +This is especially useful if you want to add validation beyond what the +json schema drafts can provide such business specific logic. + +## Uses + +gojsonschema uses the following test suite : + +https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/draft.go b/vendor/github.com/xeipuuv/gojsonschema/draft.go new file mode 100644 index 000000000..61298e7aa --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/draft.go @@ -0,0 +1,125 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "errors" + "math" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +// Draft is a JSON-schema draft version +type Draft int + +// Supported Draft versions +const ( + Draft4 Draft = 4 + Draft6 Draft = 6 + Draft7 Draft = 7 + Hybrid Draft = math.MaxInt32 +) + +type draftConfig struct { + Version Draft + MetaSchemaURL string + MetaSchema string +} +type draftConfigs []draftConfig + +var drafts draftConfigs + +func init() { + drafts = []draftConfig{ + { + Version: Draft4, + MetaSchemaURL: "http://json-schema.org/draft-04/schema", + MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`, + }, + { + Version: Draft6, + MetaSchemaURL: "http://json-schema.org/draft-06/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`, + }, + { + Version: Draft7, + MetaSchemaURL: "http://json-schema.org/draft-07/schema", + MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`, + }, + } +} + +func (dc draftConfigs) GetMetaSchema(url string) string { + for _, config := range dc { + if config.MetaSchemaURL == url { + return config.MetaSchema + } + } + return "" +} +func (dc draftConfigs) GetDraftVersion(url string) *Draft { + for _, config := range dc { + if config.MetaSchemaURL == url { + return &config.Version + } + } + return nil +} +func (dc draftConfigs) GetSchemaURL(draft Draft) string { + for _, config := range dc { + if config.Version == draft { + return config.MetaSchemaURL + } + } + return "" +} + +func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { + + if isKind(documentNode, reflect.Bool) { + return "", nil, nil + } + + if !isKind(documentNode, reflect.Map) { + return "", nil, errors.New("schema is invalid") + } + + m := documentNode.(map[string]interface{}) + + if existsMapKey(m, KEY_SCHEMA) { + if !isKind(m[KEY_SCHEMA], reflect.String) { + return "", nil, errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": KEY_SCHEMA, + "type": TYPE_STRING, + }, + )) + } + + schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string)) + + if err != nil { + return "", nil, err + } + + schema := schemaReference.String() + + return schema, drafts.GetDraftVersion(schema), nil + } + + return "", nil, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go new file mode 100644 index 000000000..e4e9814f3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go @@ -0,0 +1,364 @@ +package gojsonschema + +import ( + "bytes" + "sync" + "text/template" +) + +var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}} + +// template.Template is not thread-safe for writing, so some locking is done +// sync.RWMutex is used for efficiently locking when new templates are created +type errorTemplate struct { + *template.Template + sync.RWMutex +} + +type ( + + // FalseError. ErrorDetails: - + FalseError struct { + ResultErrorFields + } + + // RequiredError indicates that a required field is missing + // ErrorDetails: property string + RequiredError struct { + ResultErrorFields + } + + // InvalidTypeError indicates that a field has the incorrect type + // ErrorDetails: expected, given + InvalidTypeError struct { + ResultErrorFields + } + + // NumberAnyOfError is produced in case of a failing "anyOf" validation + // ErrorDetails: - + NumberAnyOfError struct { + ResultErrorFields + } + + // NumberOneOfError is produced in case of a failing "oneOf" validation + // ErrorDetails: - + NumberOneOfError struct { + ResultErrorFields + } + + // NumberAllOfError is produced in case of a failing "allOf" validation + // ErrorDetails: - + NumberAllOfError struct { + ResultErrorFields + } + + // NumberNotError is produced if a "not" validation failed + // ErrorDetails: - + NumberNotError struct { + ResultErrorFields + } + + // MissingDependencyError is produced in case of a "missing dependency" problem + // ErrorDetails: dependency + MissingDependencyError struct { + ResultErrorFields + } + + // InternalError indicates an internal error + // ErrorDetails: error + InternalError struct { + ResultErrorFields + } + + // ConstError indicates a const error + // ErrorDetails: allowed + ConstError struct { + ResultErrorFields + } + + // EnumError indicates an enum error + // ErrorDetails: allowed + EnumError struct { + ResultErrorFields + } + + // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed + // ErrorDetails: - + ArrayNoAdditionalItemsError struct { + ResultErrorFields + } + + // ArrayMinItemsError is produced if an array contains less items than the allowed minimum + // ErrorDetails: min + ArrayMinItemsError struct { + ResultErrorFields + } + + // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum + // ErrorDetails: max + ArrayMaxItemsError struct { + ResultErrorFields + } + + // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items + // ErrorDetails: type, i, j + ItemsMustBeUniqueError struct { + ResultErrorFields + } + + // ArrayContainsError is produced if an array contains invalid items + // ErrorDetails: + ArrayContainsError struct { + ResultErrorFields + } + + // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum + // ErrorDetails: min + ArrayMinPropertiesError struct { + ResultErrorFields + } + + // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum + // ErrorDetails: max + ArrayMaxPropertiesError struct { + ResultErrorFields + } + + // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed + // ErrorDetails: property + AdditionalPropertyNotAllowedError struct { + ResultErrorFields + } + + // InvalidPropertyPatternError is produced if an pattern was found + // ErrorDetails: property, pattern + InvalidPropertyPatternError struct { + ResultErrorFields + } + + // InvalidPropertyNameError is produced if an invalid-named property was found + // ErrorDetails: property + InvalidPropertyNameError struct { + ResultErrorFields + } + + // StringLengthGTEError is produced if a string is shorter than the minimum required length + // ErrorDetails: min + StringLengthGTEError struct { + ResultErrorFields + } + + // StringLengthLTEError is produced if a string is longer than the maximum allowed length + // ErrorDetails: max + StringLengthLTEError struct { + ResultErrorFields + } + + // DoesNotMatchPatternError is produced if a string does not match the defined pattern + // ErrorDetails: pattern + DoesNotMatchPatternError struct { + ResultErrorFields + } + + // DoesNotMatchFormatError is produced if a string does not match the defined format + // ErrorDetails: format + DoesNotMatchFormatError struct { + ResultErrorFields + } + + // MultipleOfError is produced if a number is not a multiple of the defined multipleOf + // ErrorDetails: multiple + MultipleOfError struct { + ResultErrorFields + } + + // NumberGTEError is produced if a number is lower than the allowed minimum + // ErrorDetails: min + NumberGTEError struct { + ResultErrorFields + } + + // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set + // ErrorDetails: min + NumberGTError struct { + ResultErrorFields + } + + // NumberLTEError is produced if a number is higher than the allowed maximum + // ErrorDetails: max + NumberLTEError struct { + ResultErrorFields + } + + // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set + // ErrorDetails: max + NumberLTError struct { + ResultErrorFields + } + + // ConditionThenError is produced if a condition's "then" validation is invalid + // ErrorDetails: - + ConditionThenError struct { + ResultErrorFields + } + + // ConditionElseError is produced if a condition's "else" condition is invalid + // ErrorDetails: - + ConditionElseError struct { + ResultErrorFields + } +) + +// newError takes a ResultError type and sets the type, context, description, details, value, and field +func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) { + var t string + var d string + switch err.(type) { + case *FalseError: + t = "false" + d = locale.False() + case *RequiredError: + t = "required" + d = locale.Required() + case *InvalidTypeError: + t = "invalid_type" + d = locale.InvalidType() + case *NumberAnyOfError: + t = "number_any_of" + d = locale.NumberAnyOf() + case *NumberOneOfError: + t = "number_one_of" + d = locale.NumberOneOf() + case *NumberAllOfError: + t = "number_all_of" + d = locale.NumberAllOf() + case *NumberNotError: + t = "number_not" + d = locale.NumberNot() + case *MissingDependencyError: + t = "missing_dependency" + d = locale.MissingDependency() + case *InternalError: + t = "internal" + d = locale.Internal() + case *ConstError: + t = "const" + d = locale.Const() + case *EnumError: + t = "enum" + d = locale.Enum() + case *ArrayNoAdditionalItemsError: + t = "array_no_additional_items" + d = locale.ArrayNoAdditionalItems() + case *ArrayMinItemsError: + t = "array_min_items" + d = locale.ArrayMinItems() + case *ArrayMaxItemsError: + t = "array_max_items" + d = locale.ArrayMaxItems() + case *ItemsMustBeUniqueError: + t = "unique" + d = locale.Unique() + case *ArrayContainsError: + t = "contains" + d = locale.ArrayContains() + case *ArrayMinPropertiesError: + t = "array_min_properties" + d = locale.ArrayMinProperties() + case *ArrayMaxPropertiesError: + t = "array_max_properties" + d = locale.ArrayMaxProperties() + case *AdditionalPropertyNotAllowedError: + t = "additional_property_not_allowed" + d = locale.AdditionalPropertyNotAllowed() + case *InvalidPropertyPatternError: + t = "invalid_property_pattern" + d = locale.InvalidPropertyPattern() + case *InvalidPropertyNameError: + t = "invalid_property_name" + d = locale.InvalidPropertyName() + case *StringLengthGTEError: + t = "string_gte" + d = locale.StringGTE() + case *StringLengthLTEError: + t = "string_lte" + d = locale.StringLTE() + case *DoesNotMatchPatternError: + t = "pattern" + d = locale.DoesNotMatchPattern() + case *DoesNotMatchFormatError: + t = "format" + d = locale.DoesNotMatchFormat() + case *MultipleOfError: + t = "multiple_of" + d = locale.MultipleOf() + case *NumberGTEError: + t = "number_gte" + d = locale.NumberGTE() + case *NumberGTError: + t = "number_gt" + d = locale.NumberGT() + case *NumberLTEError: + t = "number_lte" + d = locale.NumberLTE() + case *NumberLTError: + t = "number_lt" + d = locale.NumberLT() + case *ConditionThenError: + t = "condition_then" + d = locale.ConditionThen() + case *ConditionElseError: + t = "condition_else" + d = locale.ConditionElse() + } + + err.SetType(t) + err.SetContext(context) + err.SetValue(value) + err.SetDetails(details) + err.SetDescriptionFormat(d) + details["field"] = err.Field() + + if _, exists := details["context"]; !exists && context != nil { + details["context"] = context.String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) +} + +// formatErrorDescription takes a string in the default text/template +// format and converts it to a string with replacements. The fields come +// from the ErrorDetails struct and vary for each type of error. +func formatErrorDescription(s string, details ErrorDetails) string { + + var tpl *template.Template + var descrAsBuffer bytes.Buffer + var err error + + errorTemplates.RLock() + tpl = errorTemplates.Lookup(s) + errorTemplates.RUnlock() + + if tpl == nil { + errorTemplates.Lock() + tpl = errorTemplates.New(s) + + if ErrorTemplateFuncs != nil { + tpl.Funcs(ErrorTemplateFuncs) + } + + tpl, err = tpl.Parse(s) + errorTemplates.Unlock() + + if err != nil { + return err.Error() + } + } + + err = tpl.Execute(&descrAsBuffer, details) + if err != nil { + return err.Error() + } + + return descrAsBuffer.String() +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go new file mode 100644 index 000000000..873ffc7d7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -0,0 +1,368 @@ +package gojsonschema + +import ( + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "sync" + "time" +) + +type ( + // FormatChecker is the interface all formatters added to FormatCheckerChain must implement + FormatChecker interface { + // IsFormat checks if input has the correct format and type + IsFormat(input interface{}) bool + } + + // FormatCheckerChain holds the formatters + FormatCheckerChain struct { + formatters map[string]FormatChecker + } + + // EmailFormatChecker verifies email address formats + EmailFormatChecker struct{} + + // IPV4FormatChecker verifies IP addresses in the IPv4 format + IPV4FormatChecker struct{} + + // IPV6FormatChecker verifies IP addresses in the IPv6 format + IPV6FormatChecker struct{} + + // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Date: YYYY-MM-DD + // Full Time: HH:MM:SSZ-07:00 + // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + // + // Note: Nanoseconds are also suported in all formats + // + // http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimeFormatChecker struct{} + + // DateFormatChecker verifies date formats + // + // Valid format: + // Full Date: YYYY-MM-DD + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + DateFormatChecker struct{} + + // TimeFormatChecker verifies time formats + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Time: HH:MM:SSZ-07:00 + // + // Where + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + TimeFormatChecker struct{} + + // URIFormatChecker validates a URI with a valid Scheme per RFC3986 + URIFormatChecker struct{} + + // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 + URIReferenceFormatChecker struct{} + + // URITemplateFormatChecker validates a URI template per RFC6570 + URITemplateFormatChecker struct{} + + // HostnameFormatChecker validates a hostname is in the correct format + HostnameFormatChecker struct{} + + // UUIDFormatChecker validates a UUID is in the correct format + UUIDFormatChecker struct{} + + // RegexFormatChecker validates a regex is in the correct format + RegexFormatChecker struct{} + + // JSONPointerFormatChecker validates a JSON Pointer per RFC6901 + JSONPointerFormatChecker struct{} + + // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format + RelativeJSONPointerFormatChecker struct{} +) + +var ( + // FormatCheckers holds the valid formatters, and is a public variable + // so library users can add custom formatters + FormatCheckers = FormatCheckerChain{ + formatters: map[string]FormatChecker{ + "date": DateFormatChecker{}, + "time": TimeFormatChecker{}, + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "idn-email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, + "uri-reference": URIReferenceFormatChecker{}, + "iri": URIFormatChecker{}, + "iri-reference": URIReferenceFormatChecker{}, + "uri-template": URITemplateFormatChecker{}, + "uuid": UUIDFormatChecker{}, + "regex": RegexFormatChecker{}, + "json-pointer": JSONPointerFormatChecker{}, + "relative-json-pointer": RelativeJSONPointerFormatChecker{}, + }, + } + + // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) + + // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI + rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$") + + rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + + rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$") + + rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$") + + lock = new(sync.RWMutex) +) + +// Add adds a FormatChecker to the FormatCheckerChain +// The name used will be the value used for the format key in your json schema +func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { + lock.Lock() + c.formatters[name] = f + lock.Unlock() + + return c +} + +// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) +func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { + lock.Lock() + delete(c.formatters, name) + lock.Unlock() + + return c +} + +// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name +func (c *FormatCheckerChain) Has(name string) bool { + lock.RLock() + _, ok := c.formatters[name] + lock.RUnlock() + + return ok +} + +// IsFormat will check an input against a FormatChecker with the given name +// to see if it is the correct format +func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { + lock.RLock() + f, ok := c.formatters[name] + lock.RUnlock() + + // If a format is unrecognized it should always pass validation + if !ok { + return true + } + + return f.IsFormat(input) +} + +// IsFormat checks if input is a correctly formatted e-mail address +func (f EmailFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := mail.ParseAddress(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted IPv4-address +func (f IPV4FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ".") +} + +// IsFormat checks if input is a correctly formatted IPv6=address +func (f IPV6FormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + // Credit: https://github.com/asaskevich/govalidator + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ":") +} + +// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 +func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + formats := []string{ + "15:04:05", + "15:04:05Z07:00", + "2006-01-02", + time.RFC3339, + time.RFC3339Nano, + } + + for _, format := range formats { + if _, err := time.Parse(format, asString); err == nil { + return true + } + } + + return false +} + +// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) +func (f DateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + _, err := time.Parse("2006-01-02", asString) + return err == nil +} + +// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) +func (f TimeFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if _, err := time.Parse("15:04:05Z07:00", asString); err == nil { + return true + } + + _, err := time.Parse("15:04:05", asString) + return err == nil +} + +// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 +func (f URIFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + + if err != nil || u.Scheme == "" { + return false + } + + return !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 +func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + _, err := url.Parse(asString) + return err == nil && !strings.Contains(asString, `\`) +} + +// IsFormat checks if input is a correctly formatted URI template per RFC6570 +func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + u, err := url.Parse(asString) + if err != nil || strings.Contains(asString, `\`) { + return false + } + + return rxURITemplate.MatchString(u.Path) +} + +// IsFormat checks if input is a correctly formatted hostname +func (f HostnameFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxHostname.MatchString(asString) && len(asString) < 256 +} + +// IsFormat checks if input is a correctly formatted UUID +func (f UUIDFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxUUID.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted regular expression +func (f RegexFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + if asString == "" { + return true + } + _, err := regexp.Compile(asString) + return err == nil +} + +// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 +func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxJSONPointer.MatchString(asString) +} + +// IsFormat checks if input is a correctly formatted relative JSON Pointer +func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { + asString, ok := input.(string) + if !ok { + return false + } + + return rxRelJSONPointer.MatchString(asString) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml new file mode 100644 index 000000000..ab6fb867c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml @@ -0,0 +1,13 @@ +package: github.com/xeipuuv/gojsonschema +license: Apache 2.0 +import: +- package: github.com/xeipuuv/gojsonschema + +- package: github.com/xeipuuv/gojsonpointer + +- package: github.com/xeipuuv/gojsonreference + +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go new file mode 100644 index 000000000..4ef7a8d03 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go @@ -0,0 +1,37 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Very simple log wrapper. +// Used for debugging/testing purposes. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "log" +) + +const internalLogEnabled = false + +func internalLog(format string, v ...interface{}) { + log.Printf(format, v...) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go new file mode 100644 index 000000000..0e979707b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go @@ -0,0 +1,73 @@ +// Copyright 2013 MongoDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author tolsen +// author-github https://github.com/tolsen +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context +// +// created 04-09-2013 + +package gojsonschema + +import "bytes" + +// JsonContext implements a persistent linked-list of strings +type JsonContext struct { + head string + tail *JsonContext +} + +// NewJsonContext creates a new JsonContext +func NewJsonContext(head string, tail *JsonContext) *JsonContext { + return &JsonContext{head, tail} +} + +// String displays the context in reverse. +// This plays well with the data structure's persistent nature with +// Cons and a json document's tree structure. +func (c *JsonContext) String(del ...string) string { + byteArr := make([]byte, 0, c.stringLen()) + buf := bytes.NewBuffer(byteArr) + c.writeStringToBuffer(buf, del) + + return buf.String() +} + +func (c *JsonContext) stringLen() int { + length := 0 + if c.tail != nil { + length = c.tail.stringLen() + 1 // add 1 for "." + } + + length += len(c.head) + return length +} + +func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { + if c.tail != nil { + c.tail.writeStringToBuffer(buf, del) + + if len(del) > 0 { + buf.WriteString(del[0]) + } else { + buf.WriteString(".") + } + } + + buf.WriteString(c.head) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go new file mode 100644 index 000000000..5d88af263 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go @@ -0,0 +1,386 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Different strategies to load JSON files. +// Includes References (file and HTTP), JSON strings and Go types. +// +// created 01-02-2015 + +package gojsonschema + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +var osFS = osFileSystem(os.Open) + +// JSONLoader defines the JSON loader interface +type JSONLoader interface { + JsonSource() interface{} + LoadJSON() (interface{}, error) + JsonReference() (gojsonreference.JsonReference, error) + LoaderFactory() JSONLoaderFactory +} + +// JSONLoaderFactory defines the JSON loader factory interface +type JSONLoaderFactory interface { + // New creates a new JSON loader for the given source + New(source string) JSONLoader +} + +// DefaultJSONLoaderFactory is the default JSON loader factory +type DefaultJSONLoaderFactory struct { +} + +// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem +type FileSystemJSONLoaderFactory struct { + fs http.FileSystem +} + +// New creates a new JSON loader for the given source +func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// New creates a new JSON loader for the given source +func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: f.fs, + source: source, + } +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +// Opens a file with the given name +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) +} + +// JSON Reference loader +// references are used to load JSONs from files and HTTP + +type jsonReferenceLoader struct { + fs http.FileSystem + source string +} + +func (l *jsonReferenceLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { + return &FileSystemJSONLoaderFactory{ + fs: l.fs, + } +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. +func NewReferenceLoader(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } +} + +func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { + + var err error + + reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) + if err != nil { + return nil, err + } + + refToURL := reference + refToURL.GetUrl().Fragment = "" + + var document interface{} + + if reference.HasFileScheme { + + filename := strings.TrimPrefix(refToURL.String(), "file://") + filename, err = url.QueryUnescape(filename) + + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, use slashes + // instead of backslashes, and have spaces escaped + filename = strings.TrimPrefix(filename, "/") + filename = filepath.FromSlash(filename) + } + + document, err = l.loadFromFile(filename) + if err != nil { + return nil, err + } + + } else { + + document, err = l.loadFromHTTP(refToURL.String()) + if err != nil { + return nil, err + } + + } + + return document, nil + +} + +func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { + + // returned cached versions for metaschemas for drafts 4, 6 and 7 + // for performance and allow for easier offline use + if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" { + return decodeJSONUsingNumber(strings.NewReader(metaSchema)) + } + + resp, err := http.Get(address) + if err != nil { + return nil, err + } + + // must return HTTP Status 200 OK + if resp.StatusCode != http.StatusOK { + return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) + } + + bodyBuff, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) +} + +func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + bodyBuff, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) + +} + +// JSON string loader + +type jsonStringLoader struct { + source string +} + +func (l *jsonStringLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewStringLoader creates a new JSONLoader, taking a string as source +func NewStringLoader(source string) JSONLoader { + return &jsonStringLoader{source: source} +} + +func (l *jsonStringLoader) LoadJSON() (interface{}, error) { + + return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string))) + +} + +// JSON bytes loader + +type jsonBytesLoader struct { + source []byte +} + +func (l *jsonBytesLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source +func NewBytesLoader(source []byte) JSONLoader { + return &jsonBytesLoader{source: source} +} + +func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) +} + +// JSON Go (types) loader +// used to load JSONs from the code as maps, interface{}, structs ... + +type jsonGoLoader struct { + source interface{} +} + +func (l *jsonGoLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewGoLoader creates a new JSONLoader from a given Go struct +func NewGoLoader(source interface{}) JSONLoader { + return &jsonGoLoader{source: source} +} + +func (l *jsonGoLoader) LoadJSON() (interface{}, error) { + + // convert it to a compliant JSON first to avoid types "mismatches" + + jsonBytes, err := json.Marshal(l.JsonSource()) + if err != nil { + return nil, err + } + + return decodeJSONUsingNumber(bytes.NewReader(jsonBytes)) + +} + +type jsonIOLoader struct { + buf *bytes.Buffer +} + +// NewReaderLoader creates a new JSON loader using the provided io.Reader +func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) +} + +// NewWriterLoader creates a new JSON loader using the provided io.Writer +func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) +} + +func (l *jsonIOLoader) JsonSource() interface{} { + return l.buf.String() +} + +func (l *jsonIOLoader) LoadJSON() (interface{}, error) { + return decodeJSONUsingNumber(l.buf) +} + +func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// JSON raw loader +// In case the JSON is already marshalled to interface{} use this loader +// This is used for testing as otherwise there is no guarantee the JSON is marshalled +// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber +type jsonRawLoader struct { + source interface{} +} + +// NewRawLoader creates a new JSON raw loader for the given source +func NewRawLoader(source interface{}) JSONLoader { + return &jsonRawLoader{source: source} +} +func (l *jsonRawLoader) JsonSource() interface{} { + return l.source +} +func (l *jsonRawLoader) LoadJSON() (interface{}, error) { + return l.source, nil +} +func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} +func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { + + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go new file mode 100644 index 000000000..a416225cd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go @@ -0,0 +1,472 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const string and messages. +// +// created 01-01-2015 + +package gojsonschema + +type ( + // locale is an interface for defining custom error strings + locale interface { + + // False returns a format-string for "false" schema validation errors + False() string + + // Required returns a format-string for "required" schema validation errors + Required() string + + // InvalidType returns a format-string for "invalid type" schema validation errors + InvalidType() string + + // NumberAnyOf returns a format-string for "anyOf" schema validation errors + NumberAnyOf() string + + // NumberOneOf returns a format-string for "oneOf" schema validation errors + NumberOneOf() string + + // NumberAllOf returns a format-string for "allOf" schema validation errors + NumberAllOf() string + + // NumberNot returns a format-string to format a NumberNotError + NumberNot() string + + // MissingDependency returns a format-string for "missing dependency" schema validation errors + MissingDependency() string + + // Internal returns a format-string for internal errors + Internal() string + + // Const returns a format-string to format a ConstError + Const() string + + // Enum returns a format-string to format an EnumError + Enum() string + + // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema + ArrayNotEnoughItems() string + + // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError + ArrayNoAdditionalItems() string + + // ArrayMinItems returns a format-string to format an ArrayMinItemsError + ArrayMinItems() string + + // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError + ArrayMaxItems() string + + // Unique returns a format-string to format an ItemsMustBeUniqueError + Unique() string + + // ArrayContains returns a format-string to format an ArrayContainsError + ArrayContains() string + + // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError + ArrayMinProperties() string + + // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError + ArrayMaxProperties() string + + // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError + AdditionalPropertyNotAllowed() string + + // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError + InvalidPropertyPattern() string + + // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError + InvalidPropertyName() string + + // StringGTE returns a format-string to format an StringLengthGTEError + StringGTE() string + + // StringLTE returns a format-string to format an StringLengthLTEError + StringLTE() string + + // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError + DoesNotMatchPattern() string + + // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError + DoesNotMatchFormat() string + + // MultipleOf returns a format-string to format an MultipleOfError + MultipleOf() string + + // NumberGTE returns a format-string to format an NumberGTEError + NumberGTE() string + + // NumberGT returns a format-string to format an NumberGTError + NumberGT() string + + // NumberLTE returns a format-string to format an NumberLTEError + NumberLTE() string + + // NumberLT returns a format-string to format an NumberLTError + NumberLT() string + + // Schema validations + + // RegexPattern returns a format-string to format a regex-pattern error + RegexPattern() string + + // GreaterThanZero returns a format-string to format an error where a number must be greater than zero + GreaterThanZero() string + + // MustBeOfA returns a format-string to format an error where a value is of the wrong type + MustBeOfA() string + + // MustBeOfAn returns a format-string to format an error where a value is of the wrong type + MustBeOfAn() string + + // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error + CannotBeUsedWithout() string + + // CannotBeGT returns a format-string to format an error where a value are greater than allowed + CannotBeGT() string + + // MustBeOfType returns a format-string to format an error where a value does not match the required type + MustBeOfType() string + + // MustBeValidRegex returns a format-string to format an error where a regex is invalid + MustBeValidRegex() string + + // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format + MustBeValidFormat() string + + // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 + MustBeGTEZero() string + + // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed + KeyCannotBeGreaterThan() string + + // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type + KeyItemsMustBeOfType() string + + // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique + KeyItemsMustBeUnique() string + + // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error + ReferenceMustBeCanonical() string + + // NotAValidType returns a format-string to format an invalid type error + NotAValidType() string + + // Duplicated returns a format-string to format an error where types are duplicated + Duplicated() string + + // HttpBadStatus returns a format-string for errors when loading a schema using HTTP + HttpBadStatus() string + + // ParseError returns a format-string for JSON parsing errors + ParseError() string + + // ConditionThen returns a format-string for ConditionThenError errors + ConditionThen() string + + // ConditionElse returns a format-string for ConditionElseError errors + ConditionElse() string + + // ErrorFormat returns a format string for errors + ErrorFormat() string + } + + // DefaultLocale is the default locale for this package + DefaultLocale struct{} +) + +// False returns a format-string for "false" schema validation errors +func (l DefaultLocale) False() string { + return "False always fails validation" +} + +// Required returns a format-string for "required" schema validation errors +func (l DefaultLocale) Required() string { + return `{{.property}} is required` +} + +// InvalidType returns a format-string for "invalid type" schema validation errors +func (l DefaultLocale) InvalidType() string { + return `Invalid type. Expected: {{.expected}}, given: {{.given}}` +} + +// NumberAnyOf returns a format-string for "anyOf" schema validation errors +func (l DefaultLocale) NumberAnyOf() string { + return `Must validate at least one schema (anyOf)` +} + +// NumberOneOf returns a format-string for "oneOf" schema validation errors +func (l DefaultLocale) NumberOneOf() string { + return `Must validate one and only one schema (oneOf)` +} + +// NumberAllOf returns a format-string for "allOf" schema validation errors +func (l DefaultLocale) NumberAllOf() string { + return `Must validate all the schemas (allOf)` +} + +// NumberNot returns a format-string to format a NumberNotError +func (l DefaultLocale) NumberNot() string { + return `Must not validate the schema (not)` +} + +// MissingDependency returns a format-string for "missing dependency" schema validation errors +func (l DefaultLocale) MissingDependency() string { + return `Has a dependency on {{.dependency}}` +} + +// Internal returns a format-string for internal errors +func (l DefaultLocale) Internal() string { + return `Internal Error {{.error}}` +} + +// Const returns a format-string to format a ConstError +func (l DefaultLocale) Const() string { + return `{{.field}} does not match: {{.allowed}}` +} + +// Enum returns a format-string to format an EnumError +func (l DefaultLocale) Enum() string { + return `{{.field}} must be one of the following: {{.allowed}}` +} + +// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError +func (l DefaultLocale) ArrayNoAdditionalItems() string { + return `No additional items allowed on array` +} + +// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema +func (l DefaultLocale) ArrayNotEnoughItems() string { + return `Not enough items on array to match positional list of schema` +} + +// ArrayMinItems returns a format-string to format an ArrayMinItemsError +func (l DefaultLocale) ArrayMinItems() string { + return `Array must have at least {{.min}} items` +} + +// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError +func (l DefaultLocale) ArrayMaxItems() string { + return `Array must have at most {{.max}} items` +} + +// Unique returns a format-string to format an ItemsMustBeUniqueError +func (l DefaultLocale) Unique() string { + return `{{.type}} items[{{.i}},{{.j}}] must be unique` +} + +// ArrayContains returns a format-string to format an ArrayContainsError +func (l DefaultLocale) ArrayContains() string { + return `At least one of the items must match` +} + +// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError +func (l DefaultLocale) ArrayMinProperties() string { + return `Must have at least {{.min}} properties` +} + +// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError +func (l DefaultLocale) ArrayMaxProperties() string { + return `Must have at most {{.max}} properties` +} + +// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError +func (l DefaultLocale) AdditionalPropertyNotAllowed() string { + return `Additional property {{.property}} is not allowed` +} + +// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError +func (l DefaultLocale) InvalidPropertyPattern() string { + return `Property "{{.property}}" does not match pattern {{.pattern}}` +} + +// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError +func (l DefaultLocale) InvalidPropertyName() string { + return `Property name of "{{.property}}" does not match` +} + +// StringGTE returns a format-string to format an StringLengthGTEError +func (l DefaultLocale) StringGTE() string { + return `String length must be greater than or equal to {{.min}}` +} + +// StringLTE returns a format-string to format an StringLengthLTEError +func (l DefaultLocale) StringLTE() string { + return `String length must be less than or equal to {{.max}}` +} + +// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError +func (l DefaultLocale) DoesNotMatchPattern() string { + return `Does not match pattern '{{.pattern}}'` +} + +// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError +func (l DefaultLocale) DoesNotMatchFormat() string { + return `Does not match format '{{.format}}'` +} + +// MultipleOf returns a format-string to format an MultipleOfError +func (l DefaultLocale) MultipleOf() string { + return `Must be a multiple of {{.multiple}}` +} + +// NumberGTE returns the format string to format a NumberGTEError +func (l DefaultLocale) NumberGTE() string { + return `Must be greater than or equal to {{.min}}` +} + +// NumberGT returns the format string to format a NumberGTError +func (l DefaultLocale) NumberGT() string { + return `Must be greater than {{.min}}` +} + +// NumberLTE returns the format string to format a NumberLTEError +func (l DefaultLocale) NumberLTE() string { + return `Must be less than or equal to {{.max}}` +} + +// NumberLT returns the format string to format a NumberLTError +func (l DefaultLocale) NumberLT() string { + return `Must be less than {{.max}}` +} + +// Schema validators + +// RegexPattern returns a format-string to format a regex-pattern error +func (l DefaultLocale) RegexPattern() string { + return `Invalid regex pattern '{{.pattern}}'` +} + +// GreaterThanZero returns a format-string to format an error where a number must be greater than zero +func (l DefaultLocale) GreaterThanZero() string { + return `{{.number}} must be strictly greater than 0` +} + +// MustBeOfA returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfA() string { + return `{{.x}} must be of a {{.y}}` +} + +// MustBeOfAn returns a format-string to format an error where a value is of the wrong type +func (l DefaultLocale) MustBeOfAn() string { + return `{{.x}} must be of an {{.y}}` +} + +// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error +func (l DefaultLocale) CannotBeUsedWithout() string { + return `{{.x}} cannot be used without {{.y}}` +} + +// CannotBeGT returns a format-string to format an error where a value are greater than allowed +func (l DefaultLocale) CannotBeGT() string { + return `{{.x}} cannot be greater than {{.y}}` +} + +// MustBeOfType returns a format-string to format an error where a value does not match the required type +func (l DefaultLocale) MustBeOfType() string { + return `{{.key}} must be of type {{.type}}` +} + +// MustBeValidRegex returns a format-string to format an error where a regex is invalid +func (l DefaultLocale) MustBeValidRegex() string { + return `{{.key}} must be a valid regex` +} + +// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format +func (l DefaultLocale) MustBeValidFormat() string { + return `{{.key}} must be a valid format {{.given}}` +} + +// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 +func (l DefaultLocale) MustBeGTEZero() string { + return `{{.key}} must be greater than or equal to 0` +} + +// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed +func (l DefaultLocale) KeyCannotBeGreaterThan() string { + return `{{.key}} cannot be greater than {{.y}}` +} + +// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type +func (l DefaultLocale) KeyItemsMustBeOfType() string { + return `{{.key}} items must be {{.type}}` +} + +// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique +func (l DefaultLocale) KeyItemsMustBeUnique() string { + return `{{.key}} items must be unique` +} + +// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error +func (l DefaultLocale) ReferenceMustBeCanonical() string { + return `Reference {{.reference}} must be canonical` +} + +// NotAValidType returns a format-string to format an invalid type error +func (l DefaultLocale) NotAValidType() string { + return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` +} + +// Duplicated returns a format-string to format an error where types are duplicated +func (l DefaultLocale) Duplicated() string { + return `{{.type}} type is duplicated` +} + +// HttpBadStatus returns a format-string for errors when loading a schema using HTTP +func (l DefaultLocale) HttpBadStatus() string { + return `Could not read schema from HTTP, response status is {{.status}}` +} + +// ErrorFormat returns a format string for errors +// Replacement options: field, description, context, value +func (l DefaultLocale) ErrorFormat() string { + return `{{.field}}: {{.description}}` +} + +// ParseError returns a format-string for JSON parsing errors +func (l DefaultLocale) ParseError() string { + return `Expected: {{.expected}}, given: Invalid JSON` +} + +// ConditionThen returns a format-string for ConditionThenError errors +// If/Else +func (l DefaultLocale) ConditionThen() string { + return `Must validate "then" as "if" was valid` +} + +// ConditionElse returns a format-string for ConditionElseError errors +func (l DefaultLocale) ConditionElse() string { + return `Must validate "else" as "if" was not valid` +} + +// constants +const ( + STRING_NUMBER = "number" + STRING_ARRAY_OF_STRINGS = "array of strings" + STRING_ARRAY_OF_SCHEMAS = "array of schemas" + STRING_SCHEMA = "valid schema" + STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" + STRING_PROPERTIES = "properties" + STRING_DEPENDENCY = "dependency" + STRING_PROPERTY = "property" + STRING_UNDEFINED = "undefined" + STRING_CONTEXT_ROOT = "(root)" + STRING_ROOT_SCHEMA_PROPERTY = "(root)" +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go new file mode 100644 index 000000000..0a0179148 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -0,0 +1,220 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Result and ResultError implementations. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // ErrorDetails is a map of details specific to each error. + // While the values will vary, every error will contain a "field" value + ErrorDetails map[string]interface{} + + // ResultError is the interface that library errors must implement + ResultError interface { + // Field returns the field name without the root context + // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName + Field() string + // SetType sets the error-type + SetType(string) + // Type returns the error-type + Type() string + // SetContext sets the JSON-context for the error + SetContext(*JsonContext) + // Context returns the JSON-context of the error + Context() *JsonContext + // SetDescription sets a description for the error + SetDescription(string) + // Description returns the description of the error + Description() string + // SetDescriptionFormat sets the format for the description in the default text/template format + SetDescriptionFormat(string) + // DescriptionFormat returns the format for the description in the default text/template format + DescriptionFormat() string + // SetValue sets the value related to the error + SetValue(interface{}) + // Value returns the value related to the error + Value() interface{} + // SetDetails sets the details specific to the error + SetDetails(ErrorDetails) + // Details returns details about the error + Details() ErrorDetails + // String returns a string representation of the error + String() string + } + + // ResultErrorFields holds the fields for each ResultError implementation. + // ResultErrorFields implements the ResultError interface, so custom errors + // can be defined by just embedding this type + ResultErrorFields struct { + errorType string // A string with the type of error (i.e. invalid_type) + context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... + description string // A human readable error message + descriptionFormat string // A format for human readable error message + value interface{} // Value given by the JSON file that is the source of the error + details ErrorDetails + } + + // Result holds the result of a validation + Result struct { + errors []ResultError + // Scores how well the validation matched. Useful in generating + // better error messages for anyOf and oneOf. + score int + } +) + +// Field returns the field name without the root context +// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName +func (v *ResultErrorFields) Field() string { + return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") +} + +// SetType sets the error-type +func (v *ResultErrorFields) SetType(errorType string) { + v.errorType = errorType +} + +// Type returns the error-type +func (v *ResultErrorFields) Type() string { + return v.errorType +} + +// SetContext sets the JSON-context for the error +func (v *ResultErrorFields) SetContext(context *JsonContext) { + v.context = context +} + +// Context returns the JSON-context of the error +func (v *ResultErrorFields) Context() *JsonContext { + return v.context +} + +// SetDescription sets a description for the error +func (v *ResultErrorFields) SetDescription(description string) { + v.description = description +} + +// Description returns the description of the error +func (v *ResultErrorFields) Description() string { + return v.description +} + +// SetDescriptionFormat sets the format for the description in the default text/template format +func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) { + v.descriptionFormat = descriptionFormat +} + +// DescriptionFormat returns the format for the description in the default text/template format +func (v *ResultErrorFields) DescriptionFormat() string { + return v.descriptionFormat +} + +// SetValue sets the value related to the error +func (v *ResultErrorFields) SetValue(value interface{}) { + v.value = value +} + +// Value returns the value related to the error +func (v *ResultErrorFields) Value() interface{} { + return v.value +} + +// SetDetails sets the details specific to the error +func (v *ResultErrorFields) SetDetails(details ErrorDetails) { + v.details = details +} + +// Details returns details about the error +func (v *ResultErrorFields) Details() ErrorDetails { + return v.details +} + +// String returns a string representation of the error +func (v ResultErrorFields) String() string { + // as a fallback, the value is displayed go style + valueString := fmt.Sprintf("%v", v.value) + + // marshal the go value value to json + if v.value == nil { + valueString = TYPE_NULL + } else { + if vs, err := marshalToJSONString(v.value); err == nil { + if vs == nil { + valueString = TYPE_NULL + } else { + valueString = *vs + } + } + } + + return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ + "context": v.context.String(), + "description": v.description, + "value": valueString, + "field": v.Field(), + }) +} + +// Valid indicates if no errors were found +func (v *Result) Valid() bool { + return len(v.errors) == 0 +} + +// Errors returns the errors that were found +func (v *Result) Errors() []ResultError { + return v.errors +} + +// AddError appends a fully filled error to the error set +// SetDescription() will be called with the result of the parsed err.DescriptionFormat() +func (v *Result) AddError(err ResultError, details ErrorDetails) { + if _, exists := details["context"]; !exists && err.Context() != nil { + details["context"] = err.Context().String() + } + + err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) + + v.errors = append(v.errors, err) +} + +func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) { + newError(err, context, value, Locale, details) + v.errors = append(v.errors, err) + v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function +} + +// Used to copy errors from a sub-schema to the main one +func (v *Result) mergeErrors(otherResult *Result) { + v.errors = append(v.errors, otherResult.Errors()...) + v.score += otherResult.score +} + +func (v *Result) incrementScore() { + v.score++ +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go new file mode 100644 index 000000000..9e93cd795 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go @@ -0,0 +1,1087 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines Schema, the main entry to every subSchema. +// Contains the parsing logic and error checking. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "math/big" + "reflect" + "regexp" + "text/template" + + "github.com/xeipuuv/gojsonreference" +) + +var ( + // Locale is the default locale to use + // Library users can overwrite with their own implementation + Locale locale = DefaultLocale{} + + // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. + ErrorTemplateFuncs template.FuncMap +) + +// NewSchema instances a schema using the given JSONLoader +func NewSchema(l JSONLoader) (*Schema, error) { + return NewSchemaLoader().Compile(l) +} + +// Schema holds a schema +type Schema struct { + documentReference gojsonreference.JsonReference + rootSchema *subSchema + pool *schemaPool + referencePool *schemaReferencePool +} + +func (d *Schema) parse(document interface{}, draft Draft) error { + d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft} + return d.parseSchema(document, d.rootSchema) +} + +// SetRootSchemaName sets the root-schema name +func (d *Schema) SetRootSchemaName(name string) { + d.rootSchema.property = name +} + +// Parses a subSchema +// +// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring +// Not much magic involved here, most of the job is to validate the key names and their values, +// then the values are copied into subSchema struct +// +func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { + + if currentSchema.draft == nil { + if currentSchema.parent == nil { + return errors.New("Draft not set") + } + currentSchema.draft = currentSchema.parent.draft + } + + // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}" + if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) { + b := documentNode.(bool) + currentSchema.pass = &b + return nil + } + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.ParseError(), + ErrorDetails{ + "expected": STRING_SCHEMA, + }, + )) + } + + m := documentNode.(map[string]interface{}) + + if currentSchema.parent == nil { + currentSchema.ref = &d.documentReference + currentSchema.id = &d.documentReference + } + + if currentSchema.id == nil && currentSchema.parent != nil { + currentSchema.id = currentSchema.parent.id + } + + // In draft 6 the id keyword was renamed to $id + // Hybrid mode uses the old id by default + var keyID string + + switch *currentSchema.draft { + case Draft4: + keyID = KEY_ID + case Hybrid: + keyID = KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + default: + keyID = KEY_ID_NEW + } + if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": keyID, + }, + )) + } + if k, ok := m[keyID].(string); ok { + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + if currentSchema == d.rootSchema { + currentSchema.id = &jsonReference + } else { + ref, err := currentSchema.parent.id.Inherits(jsonReference) + if err != nil { + return err + } + currentSchema.id = ref + } + } + + // definitions + if existsMapKey(m, KEY_DEFINITIONS) { + if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) { + for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { + if isKind(dv, reflect.Map, reflect.Bool) { + + newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema} + + err := d.parseSchema(dv, newSchema) + + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + + } + + // title + if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_TITLE, + }, + )) + } + if k, ok := m[KEY_TITLE].(string); ok { + currentSchema.title = &k + } + + // description + if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_DESCRIPTION, + }, + )) + } + if k, ok := m[KEY_DESCRIPTION].(string); ok { + currentSchema.description = &k + } + + // $ref + if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_REF, + }, + )) + } + + if k, ok := m[KEY_REF].(string); ok { + + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + + currentSchema.ref = &jsonReference + + if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { + currentSchema.refSchema = sch + } else { + err := d.parseReference(documentNode, currentSchema) + + if err != nil { + return err + } + + return nil + } + } + + // type + if existsMapKey(m, KEY_TYPE) { + if isKind(m[KEY_TYPE], reflect.String) { + if k, ok := m[KEY_TYPE].(string); ok { + err := currentSchema.types.Add(k) + if err != nil { + return err + } + } + } else { + if isKind(m[KEY_TYPE], reflect.Slice) { + arrayOfTypes := m[KEY_TYPE].([]interface{}) + for _, typeInArray := range arrayOfTypes { + if reflect.ValueOf(typeInArray).Kind() != reflect.String { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + if err := currentSchema.types.Add(typeInArray.(string)); err != nil { + return err + } + } + + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + } + } + + // properties + if existsMapKey(m, KEY_PROPERTIES) { + err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) + if err != nil { + return err + } + } + + // additionalProperties + if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { + if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { + currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) + } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalProperties = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_PROPERTIES, + }, + )) + } + } + + // patternProperties + if existsMapKey(m, KEY_PATTERN_PROPERTIES) { + if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { + patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) + if len(patternPropertiesMap) > 0 { + currentSchema.patternProperties = make(map[string]*subSchema) + for k, v := range patternPropertiesMap { + _, err := regexp.MatchString(k, "") + if err != nil { + return errors.New(formatErrorDescription( + Locale.RegexPattern(), + ErrorDetails{"pattern": k}, + )) + } + newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err = d.parseSchema(v, newSchema) + if err != nil { + return errors.New(err.Error()) + } + currentSchema.patternProperties[k] = newSchema + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // propertyNames + if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 { + if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertyNames = newSchema + err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // dependencies + if existsMapKey(m, KEY_DEPENDENCIES) { + err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) + if err != nil { + return err + } + } + + // items + if existsMapKey(m, KEY_ITEMS) { + if isKind(m[KEY_ITEMS], reflect.Slice) { + for _, itemElement := range m[KEY_ITEMS].([]interface{}) { + if isKind(itemElement, reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(itemElement, newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + currentSchema.itemsChildrenIsSingleSchema = false + } + } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) + err := d.parseSchema(m[KEY_ITEMS], newSchema) + if err != nil { + return err + } + currentSchema.itemsChildrenIsSingleSchema = true + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + } + + // additionalItems + if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { + if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { + currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) + } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalItems = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_ITEMS, + }, + )) + } + } + + // validation : number / integer + + if existsMapKey(m, KEY_MULTIPLE_OF) { + multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) + if multipleOfValue == nil { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_NUMBER, + "given": KEY_MULTIPLE_OF, + }, + )) + } + if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 { + return errors.New(formatErrorDescription( + Locale.GreaterThanZero(), + ErrorDetails{"number": KEY_MULTIPLE_OF}, + )) + } + currentSchema.multipleOf = multipleOfValue + } + + if existsMapKey(m, KEY_MINIMUM) { + minimumValue := mustBeNumber(m[KEY_MINIMUM]) + if minimumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.minimum = minimumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MINIMUM].(bool) { + currentSchema.exclusiveMinimum = currentSchema.minimum + currentSchema.minimum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { + currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MINIMUM, + }, + )) + } + } + } + + if existsMapKey(m, KEY_MAXIMUM) { + maximumValue := mustBeNumber(m[KEY_MAXIMUM]) + if maximumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.maximum = maximumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { + switch *currentSchema.draft { + case Draft4: + if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + case Hybrid: + if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { + currentSchema.exclusiveMaximum = currentSchema.maximum + currentSchema.maximum = nil + } + } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + default: + if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { + currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_NUMBER, + "given": KEY_EXCLUSIVE_MAXIMUM, + }, + )) + } + } + } + + // validation : string + + if existsMapKey(m, KEY_MIN_LENGTH) { + minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) + if minLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *minLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_LENGTH}, + )) + } + currentSchema.minLength = minLengthIntegerValue + } + + if existsMapKey(m, KEY_MAX_LENGTH) { + maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) + if maxLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *maxLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_LENGTH}, + )) + } + currentSchema.maxLength = maxLengthIntegerValue + } + + if currentSchema.minLength != nil && currentSchema.maxLength != nil { + if *currentSchema.minLength > *currentSchema.maxLength { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, + )) + } + } + + if existsMapKey(m, KEY_PATTERN) { + if isKind(m[KEY_PATTERN], reflect.String) { + regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) + if err != nil { + return errors.New(formatErrorDescription( + Locale.MustBeValidRegex(), + ErrorDetails{"key": KEY_PATTERN}, + )) + } + currentSchema.pattern = regexpObject + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, + )) + } + } + + if existsMapKey(m, KEY_FORMAT) { + formatString, ok := m[KEY_FORMAT].(string) + if !ok { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING}, + )) + } + currentSchema.format = formatString + } + + // validation : object + + if existsMapKey(m, KEY_MIN_PROPERTIES) { + minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) + if minPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *minPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_PROPERTIES}, + )) + } + currentSchema.minProperties = minPropertiesIntegerValue + } + + if existsMapKey(m, KEY_MAX_PROPERTIES) { + maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) + if maxPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *maxPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_PROPERTIES}, + )) + } + currentSchema.maxProperties = maxPropertiesIntegerValue + } + + if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { + if *currentSchema.minProperties > *currentSchema.maxProperties { + return errors.New(formatErrorDescription( + Locale.KeyCannotBeGreaterThan(), + ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, + )) + } + } + + if existsMapKey(m, KEY_REQUIRED) { + if isKind(m[KEY_REQUIRED], reflect.Slice) { + requiredValues := m[KEY_REQUIRED].([]interface{}) + for _, requiredValue := range requiredValues { + if isKind(requiredValue, reflect.String) { + if isStringInSlice(currentSchema.required, requiredValue.(string)) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_REQUIRED}, + )) + } + currentSchema.required = append(currentSchema.required, requiredValue.(string)) + } else { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeOfType(), + ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, + )) + } + } + + // validation : array + + if existsMapKey(m, KEY_MIN_ITEMS) { + minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) + if minItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *minItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_ITEMS}, + )) + } + currentSchema.minItems = minItemsIntegerValue + } + + if existsMapKey(m, KEY_MAX_ITEMS) { + maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) + if maxItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *maxItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_ITEMS}, + )) + } + currentSchema.maxItems = maxItemsIntegerValue + } + + if existsMapKey(m, KEY_UNIQUE_ITEMS) { + if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { + currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 { + newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.contains = newSchema + err := d.parseSchema(m[KEY_CONTAINS], newSchema) + if err != nil { + return err + } + } + + // validation : all + + if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 { + is, err := marshalWithoutNumber(m[KEY_CONST]) + if err != nil { + return err + } + currentSchema._const = is + } + + if existsMapKey(m, KEY_ENUM) { + if isKind(m[KEY_ENUM], reflect.Slice) { + for _, v := range m[KEY_ENUM].([]interface{}) { + is, err := marshalWithoutNumber(v) + if err != nil { + return err + } + if isStringInSlice(currentSchema.enum, *is) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_ENUM}, + )) + } + currentSchema.enum = append(currentSchema.enum, *is) + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, + )) + } + } + + // validation : subSchema + + if existsMapKey(m, KEY_ONE_OF) { + if isKind(m[KEY_ONE_OF], reflect.Slice) { + for _, v := range m[KEY_ONE_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.oneOf = append(currentSchema.oneOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ANY_OF) { + if isKind(m[KEY_ANY_OF], reflect.Slice) { + for _, v := range m[KEY_ANY_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.anyOf = append(currentSchema.anyOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ALL_OF) { + if isKind(m[KEY_ALL_OF], reflect.Slice) { + for _, v := range m[KEY_ALL_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.allOf = append(currentSchema.allOf, newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_NOT) { + if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} + currentSchema.not = newSchema + err := d.parseSchema(m[KEY_NOT], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, + )) + } + } + + if *currentSchema.draft >= Draft7 { + if existsMapKey(m, KEY_IF) { + if isKind(m[KEY_IF], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref} + currentSchema._if = newSchema + err := d.parseSchema(m[KEY_IF], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_THEN) { + if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref} + currentSchema._then = newSchema + err := d.parseSchema(m[KEY_THEN], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT}, + )) + } + } + + if existsMapKey(m, KEY_ELSE) { + if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) { + newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref} + currentSchema._else = newSchema + err := d.parseSchema(m[KEY_ELSE], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT}, + )) + } + } + } + + return nil +} + +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { + var ( + refdDocumentNode interface{} + dsp *schemaPoolDocument + err error + ) + + newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} + + d.referencePool.Add(currentSchema.ref.String(), newSchema) + + dsp, err = d.pool.GetDocument(*currentSchema.ref) + if err != nil { + return err + } + newSchema.id = currentSchema.ref + + refdDocumentNode = dsp.Document + newSchema.draft = dsp.Draft + + if err != nil { + return err + } + + if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, + )) + } + + err = d.parseSchema(refdDocumentNode, newSchema) + if err != nil { + return err + } + + currentSchema.refSchema = newSchema + + return nil + +} + +func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + for k := range m { + schemaProperty := k + newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} + currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema) + err := d.parseSchema(m[k], newSchema) + if err != nil { + return err + } + } + + return nil +} + +func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + currentSchema.dependencies = make(map[string]interface{}) + + for k := range m { + switch reflect.ValueOf(m[k]).Kind() { + + case reflect.Slice: + values := m[k].([]interface{}) + var valuesToRegister []string + + for _, value := range values { + if !isKind(value, reflect.String) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + valuesToRegister = append(valuesToRegister, value.(string)) + currentSchema.dependencies[k] = valuesToRegister + } + + case reflect.Map, reflect.Bool: + depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err := d.parseSchema(m[k], depSchema) + if err != nil { + return err + } + currentSchema.dependencies[k] = depSchema + + default: + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + + } + + return nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go new file mode 100644 index 000000000..20db0c1f9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go @@ -0,0 +1,206 @@ +// Copyright 2018 johandorland ( https://github.com/johandorland ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gojsonschema + +import ( + "bytes" + "errors" + + "github.com/xeipuuv/gojsonreference" +) + +// SchemaLoader is used to load schemas +type SchemaLoader struct { + pool *schemaPool + AutoDetect bool + Validate bool + Draft Draft +} + +// NewSchemaLoader creates a new NewSchemaLoader +func NewSchemaLoader() *SchemaLoader { + + ps := &SchemaLoader{ + pool: &schemaPool{ + schemaPoolDocuments: make(map[string]*schemaPoolDocument), + }, + AutoDetect: true, + Validate: false, + Draft: Hybrid, + } + ps.pool.autoDetect = &ps.AutoDetect + + return ps +} + +func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { + + var ( + schema string + err error + ) + if sl.AutoDetect { + schema, _, err = parseSchemaURL(documentNode) + if err != nil { + return err + } + } + + // If no explicit "$schema" is used, use the default metaschema associated with the draft used + if schema == "" { + if sl.Draft == Hybrid { + return nil + } + schema = drafts.GetSchemaURL(sl.Draft) + } + + //Disable validation when loading the metaschema to prevent an infinite recursive loop + sl.Validate = false + + metaSchema, err := sl.Compile(NewReferenceLoader(schema)) + + if err != nil { + return err + } + + sl.Validate = true + + result := metaSchema.validateDocument(documentNode) + + if !result.Valid() { + var res bytes.Buffer + for _, err := range result.Errors() { + res.WriteString(err.String()) + res.WriteString("\n") + } + return errors.New(res.String()) + } + + return nil +} + +// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require +// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema +func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error { + emptyRef, _ := gojsonreference.NewJsonReference("") + + for _, loader := range loaders { + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + // Directly use the Recursive function, so that it get only added to the schema pool by $id + // and not by the ref of the document as it's empty + if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil { + return err + } + } + + return nil +} + +//AddSchema adds a schema under the provided URL to the schema cache +func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error { + + ref, err := gojsonreference.NewJsonReference(url) + + if err != nil { + return err + } + + doc, err := loader.LoadJSON() + + if err != nil { + return err + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return err + } + } + + return sl.pool.parseReferences(doc, ref, true) +} + +// Compile loads and compiles a schema +func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { + + ref, err := rootSchema.JsonReference() + + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = sl.pool + d.pool.jsonLoaderFactory = rootSchema.LoaderFactory() + d.documentReference = ref + d.referencePool = newSchemaReferencePool() + + var doc interface{} + if ref.String() != "" { + // Get document from schema pool + spd, err := d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + doc = spd.Document + } else { + // Load JSON directly + doc, err = rootSchema.LoadJSON() + if err != nil { + return nil, err + } + // References need only be parsed if loading JSON directly + // as pool.GetDocument already does this for us if loading by reference + err = sl.pool.parseReferences(doc, ref, true) + if err != nil { + return nil, err + } + } + + if sl.Validate { + if err := sl.validateMetaschema(doc); err != nil { + return nil, err + } + } + + draft := sl.Draft + if sl.AutoDetect { + _, detectedDraft, err := parseSchemaURL(doc) + if err != nil { + return nil, err + } + if detectedDraft != nil { + draft = *detectedDraft + } + } + + err = d.parse(doc, draft) + if err != nil { + return nil, err + } + + return &d, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go new file mode 100644 index 000000000..35b1cc630 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go @@ -0,0 +1,215 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines resources pooling. +// Eases referencing and avoids downloading the same resource twice. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "reflect" + + "github.com/xeipuuv/gojsonreference" +) + +type schemaPoolDocument struct { + Document interface{} + Draft *Draft +} + +type schemaPool struct { + schemaPoolDocuments map[string]*schemaPoolDocument + jsonLoaderFactory JSONLoaderFactory + autoDetect *bool +} + +func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { + + var ( + draft *Draft + err error + reference = ref.String() + ) + // Only the root document should be added to the schema pool if pooled is true + if _, ok := p.schemaPoolDocuments[reference]; pooled && ok { + return fmt.Errorf("Reference already exists: \"%s\"", reference) + } + + if *p.autoDetect { + _, draft, err = parseSchemaURL(document) + if err != nil { + return err + } + } + + err = p.parseReferencesRecursive(document, ref, draft) + + if pooled { + p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft} + } + + return err +} + +func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { + // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. + // For $ref references it takes into account the $id scope it is in and replaces + // the reference by the absolute resolved reference + + // When encountering errors it fails silently. Error handling is done when the schema + // is syntactically parsed and any error encountered here should also come up there. + switch m := document.(type) { + case []interface{}: + for _, v := range m { + p.parseReferencesRecursive(v, ref, draft) + } + case map[string]interface{}: + localRef := &ref + + keyID := KEY_ID_NEW + if existsMapKey(m, KEY_ID) { + keyID = KEY_ID + } + if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string)) + if err == nil { + localRef, err = ref.Inherits(jsonReference) + if err == nil { + if _, ok := p.schemaPoolDocuments[localRef.String()]; ok { + return fmt.Errorf("Reference already exists: \"%s\"", localRef.String()) + } + p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft} + } + } + } + + if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) { + jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string)) + if err == nil { + absoluteRef, err := localRef.Inherits(jsonReference) + if err == nil { + m[KEY_REF] = absoluteRef.String() + } + } + } + + for k, v := range m { + // const and enums should be interpreted literally, so ignore them + if k == KEY_CONST || k == KEY_ENUM { + continue + } + // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc + // Therefore don't treat it like a schema. + if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES { + if child, ok := v.(map[string]interface{}); ok { + for _, v := range child { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } else { + p.parseReferencesRecursive(v, *localRef, draft) + } + } + } + return nil +} + +func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { + + var ( + spd *schemaPoolDocument + draft *Draft + ok bool + err error + ) + + if internalLogEnabled { + internalLog("Get Document ( %s )", reference.String()) + } + + // Create a deep copy, so we can remove the fragment part later on without altering the original + refToURL, _ := gojsonreference.NewJsonReference(reference.String()) + + // First check if the given fragment is a location independent identifier + // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3 + + if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok { + if internalLogEnabled { + internalLog(" From pool") + } + return spd, nil + } + + // If the given reference is not a location independent identifier, + // strip the fragment and look for a document with it's base URI + + refToURL.GetUrl().Fragment = "" + + if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok { + document, _, err := reference.GetPointer().Get(cachedSpd.Document) + + if err != nil { + return nil, err + } + + if internalLogEnabled { + internalLog(" From pool") + } + + spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft} + p.schemaPoolDocuments[reference.String()] = spd + + return spd, nil + } + + // It is not possible to load anything remotely that is not canonical... + if !reference.IsCanonical() { + return nil, errors.New(formatErrorDescription( + Locale.ReferenceMustBeCanonical(), + ErrorDetails{"reference": reference.String()}, + )) + } + + jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) + document, err := jsonReferenceLoader.LoadJSON() + + if err != nil { + return nil, err + } + + // add the whole document to the pool for potential re-use + p.parseReferences(document, refToURL, true) + + _, draft, _ = parseSchemaURL(document) + + // resolve the potential fragment and also cache it + document, _, err = reference.GetPointer().Get(document) + + if err != nil { + return nil, err + } + + return &schemaPoolDocument{Document: document, Draft: draft}, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go new file mode 100644 index 000000000..6e5e1b5cd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go @@ -0,0 +1,68 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Pool of referenced schemas. +// +// created 25-06-2013 + +package gojsonschema + +import ( + "fmt" +) + +type schemaReferencePool struct { + documents map[string]*subSchema +} + +func newSchemaReferencePool() *schemaReferencePool { + + p := &schemaReferencePool{} + p.documents = make(map[string]*subSchema) + + return p +} + +func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + } + + if sch, ok := p.documents[ref]; ok { + if internalLogEnabled { + internalLog(fmt.Sprintf(" From pool")) + } + return sch, true + } + + return nil, false +} + +func (p *schemaReferencePool) Add(ref string, sch *subSchema) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + } + if _, ok := p.documents[ref]; !ok { + p.documents[ref] = sch + } +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go new file mode 100644 index 000000000..36b447a29 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go @@ -0,0 +1,83 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Helper structure to handle schema types, and the combination of them. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "strings" +) + +type jsonSchemaType struct { + types []string +} + +// Is the schema typed ? that is containing at least one type +// When not typed, the schema does not need any type validation +func (t *jsonSchemaType) IsTyped() bool { + return len(t.types) > 0 +} + +func (t *jsonSchemaType) Add(etype string) error { + + if !isStringInSlice(JSON_TYPES, etype) { + return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) + } + + if t.Contains(etype) { + return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) + } + + t.types = append(t.types, etype) + + return nil +} + +func (t *jsonSchemaType) Contains(etype string) bool { + + for _, v := range t.types { + if v == etype { + return true + } + } + + return false +} + +func (t *jsonSchemaType) String() string { + + if len(t.types) == 0 { + return STRING_UNDEFINED // should never happen + } + + // Displayed as a list [type1,type2,...] + if len(t.types) > 1 { + return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) + } + + // Only one type: name only + return t.types[0] +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go new file mode 100644 index 000000000..ec779812c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go @@ -0,0 +1,149 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines the structure of a sub-subSchema. +// A sub-subSchema can contain other sub-schemas. +// +// created 27-02-2013 + +package gojsonschema + +import ( + "github.com/xeipuuv/gojsonreference" + "math/big" + "regexp" +) + +// Constants +const ( + KEY_SCHEMA = "$schema" + KEY_ID = "id" + KEY_ID_NEW = "$id" + KEY_REF = "$ref" + KEY_TITLE = "title" + KEY_DESCRIPTION = "description" + KEY_TYPE = "type" + KEY_ITEMS = "items" + KEY_ADDITIONAL_ITEMS = "additionalItems" + KEY_PROPERTIES = "properties" + KEY_PATTERN_PROPERTIES = "patternProperties" + KEY_ADDITIONAL_PROPERTIES = "additionalProperties" + KEY_PROPERTY_NAMES = "propertyNames" + KEY_DEFINITIONS = "definitions" + KEY_MULTIPLE_OF = "multipleOf" + KEY_MINIMUM = "minimum" + KEY_MAXIMUM = "maximum" + KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" + KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" + KEY_MIN_LENGTH = "minLength" + KEY_MAX_LENGTH = "maxLength" + KEY_PATTERN = "pattern" + KEY_FORMAT = "format" + KEY_MIN_PROPERTIES = "minProperties" + KEY_MAX_PROPERTIES = "maxProperties" + KEY_DEPENDENCIES = "dependencies" + KEY_REQUIRED = "required" + KEY_MIN_ITEMS = "minItems" + KEY_MAX_ITEMS = "maxItems" + KEY_UNIQUE_ITEMS = "uniqueItems" + KEY_CONTAINS = "contains" + KEY_CONST = "const" + KEY_ENUM = "enum" + KEY_ONE_OF = "oneOf" + KEY_ANY_OF = "anyOf" + KEY_ALL_OF = "allOf" + KEY_NOT = "not" + KEY_IF = "if" + KEY_THEN = "then" + KEY_ELSE = "else" +) + +type subSchema struct { + draft *Draft + + // basic subSchema meta properties + id *gojsonreference.JsonReference + title *string + description *string + + property string + + // Quick pass/fail for boolean schemas + pass *bool + + // Types associated with the subSchema + types jsonSchemaType + + // Reference url + ref *gojsonreference.JsonReference + // Schema referenced + refSchema *subSchema + + // hierarchy + parent *subSchema + itemsChildren []*subSchema + itemsChildrenIsSingleSchema bool + propertiesChildren []*subSchema + + // validation : number / integer + multipleOf *big.Rat + maximum *big.Rat + exclusiveMaximum *big.Rat + minimum *big.Rat + exclusiveMinimum *big.Rat + + // validation : string + minLength *int + maxLength *int + pattern *regexp.Regexp + format string + + // validation : object + minProperties *int + maxProperties *int + required []string + + dependencies map[string]interface{} + additionalProperties interface{} + patternProperties map[string]*subSchema + propertyNames *subSchema + + // validation : array + minItems *int + maxItems *int + uniqueItems bool + contains *subSchema + + additionalItems interface{} + + // validation : all + _const *string //const is a golang keyword + enum []string + + // validation : subSchema + oneOf []*subSchema + anyOf []*subSchema + allOf []*subSchema + not *subSchema + _if *subSchema // if/else are golang keywords + _then *subSchema + _else *subSchema +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go new file mode 100644 index 000000000..0e6fd5173 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/types.go @@ -0,0 +1,62 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const types for schema and JSON. +// +// created 28-02-2013 + +package gojsonschema + +// Type constants +const ( + TYPE_ARRAY = `array` + TYPE_BOOLEAN = `boolean` + TYPE_INTEGER = `integer` + TYPE_NUMBER = `number` + TYPE_NULL = `null` + TYPE_OBJECT = `object` + TYPE_STRING = `string` +) + +// JSON_TYPES hosts the list of type that are supported in JSON +var JSON_TYPES []string + +// SCHEMA_TYPES hosts the list of type that are supported in schemas +var SCHEMA_TYPES []string + +func init() { + JSON_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_NULL, + TYPE_OBJECT, + TYPE_STRING} + + SCHEMA_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_OBJECT, + TYPE_STRING} +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go new file mode 100644 index 000000000..a17d22e3b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go @@ -0,0 +1,197 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Various utility functions. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" +) + +func isKind(what interface{}, kinds ...reflect.Kind) bool { + target := what + if isJSONNumber(what) { + // JSON Numbers are strings! + target = *mustBeNumber(what) + } + targetKind := reflect.ValueOf(target).Kind() + for _, kind := range kinds { + if targetKind == kind { + return true + } + } + return false +} + +func existsMapKey(m map[string]interface{}, k string) bool { + _, ok := m[k] + return ok +} + +func isStringInSlice(s []string, what string) bool { + for i := range s { + if s[i] == what { + return true + } + } + return false +} + +// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s. +func indexStringInSlice(s []string, what string) int { + for i := range s { + if s[i] == what { + return i + } + } + return -1 +} + +func marshalToJSONString(value interface{}) (*string, error) { + + mBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + sBytes := string(mBytes) + return &sBytes, nil +} + +func marshalWithoutNumber(value interface{}) (*string, error) { + + // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber + // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 + // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber + // so that these differences in representation are removed + + jsonString, err := marshalToJSONString(value) + if err != nil { + return nil, err + } + + var document interface{} + + err = json.Unmarshal([]byte(*jsonString), &document) + if err != nil { + return nil, err + } + + return marshalToJSONString(document) +} + +func isJSONNumber(what interface{}) bool { + + switch what.(type) { + + case json.Number: + return true + } + + return false +} + +func checkJSONInteger(what interface{}) (isInt bool) { + + jsonNumber := what.(json.Number) + + bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber)) + + return isValidNumber && bigFloat.IsInt() + +} + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +func mustBeInteger(what interface{}) *int { + + if isJSONNumber(what) { + + number := what.(json.Number) + + isInt := checkJSONInteger(number) + + if isInt { + + int64Value, err := number.Int64() + if err != nil { + return nil + } + + int32Value := int(int64Value) + return &int32Value + } + + } + + return nil +} + +func mustBeNumber(what interface{}) *big.Rat { + + if isJSONNumber(what) { + number := what.(json.Number) + float64Value, success := new(big.Rat).SetString(string(number)) + if success { + return float64Value + } + } + + return nil + +} + +func convertDocumentNode(val interface{}) interface{} { + + if lval, ok := val.([]interface{}); ok { + + res := []interface{}{} + for _, v := range lval { + res = append(res, convertDocumentNode(v)) + } + + return res + + } + + if mval, ok := val.(map[interface{}]interface{}); ok { + + res := map[string]interface{}{} + + for k, v := range mval { + res[k.(string)] = convertDocumentNode(v) + } + + return res + + } + + return val +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go new file mode 100644 index 000000000..74091bca1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -0,0 +1,858 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Extends Schema and subSchema, implements the validation phase. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "math/big" + "reflect" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +// Validate loads and validates a JSON schema +func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { + // load schema + schema, err := NewSchema(ls) + if err != nil { + return nil, err + } + return schema.Validate(ld) +} + +// Validate loads and validates a JSON document +func (v *Schema) Validate(l JSONLoader) (*Result, error) { + root, err := l.LoadJSON() + if err != nil { + return nil, err + } + return v.validateDocument(root), nil +} + +func (v *Schema) validateDocument(root interface{}) *Result { + result := &Result{} + context := NewJsonContext(STRING_CONTEXT_ROOT, nil) + v.rootSchema.validateRecursive(v.rootSchema, root, result, context) + return result +} + +func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result { + result := &Result{} + v.validateRecursive(v, document, result, context) + return result +} + +// Walker function to validate the json recursively against the subSchema +func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateRecursive %s", context.String()) + internalLog(" %v", currentNode) + } + + // Handle true/false schema as early as possible as all other fields will be nil + if currentSubSchema.pass != nil { + if !*currentSubSchema.pass { + result.addInternalError( + new(FalseError), + context, + currentNode, + ErrorDetails{}, + ) + } + return + } + + // Handle referenced schemas, returns directly when a $ref is found + if currentSubSchema.refSchema != nil { + v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) + return + } + + // Check for null value + if currentNode == nil { + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_NULL, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) + v.validateCommon(currentSubSchema, currentNode, result, context) + + } else { // Not a null value + + if isJSONNumber(currentNode) { + + value := currentNode.(json.Number) + + isInt := checkJSONInteger(value) + + validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER)) + + if currentSubSchema.types.IsTyped() && !validType { + + givenType := TYPE_INTEGER + if !isInt { + givenType = TYPE_NUMBER + } + + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": givenType, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } else { + + rValue := reflect.ValueOf(currentNode) + rKind := rValue.Kind() + + switch rKind { + + // Slice => JSON array + + case reflect.Slice: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_ARRAY, + }, + ) + return + } + + castCurrentNode := currentNode.([]interface{}) + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateArray(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + // Map => JSON object + + case reflect.Map: + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_OBJECT, + }, + ) + return + } + + castCurrentNode, ok := currentNode.(map[string]interface{}) + if !ok { + castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + } + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateObject(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + for _, pSchema := range currentSubSchema.propertiesChildren { + nextNode, ok := castCurrentNode[pSchema.property] + if ok { + subContext := NewJsonContext(pSchema.property, context) + v.validateRecursive(pSchema, nextNode, result, subContext) + } + } + + // Simple JSON values : string, number, boolean + + case reflect.Bool: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_BOOLEAN, + }, + ) + return + } + + value := currentNode.(bool) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + case reflect.String: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { + result.addInternalError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_STRING, + }, + ) + return + } + + value := currentNode.(string) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } + + } + + } + + result.incrementScore() +} + +// Different kinds of validation there, subSchema / common / array / object / string... +func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateSchema %s", context.String()) + internalLog(" %v", currentNode) + } + + if len(currentSubSchema.anyOf) > 0 { + + validatedAnyOf := false + var bestValidationResult *Result + + for _, anyOfSchema := range currentSubSchema.anyOf { + if !validatedAnyOf { + validationResult := anyOfSchema.subValidateWithContext(currentNode, context) + validatedAnyOf = validationResult.Valid() + + if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + } + if !validatedAnyOf { + + result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) + + if bestValidationResult != nil { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + } + + if len(currentSubSchema.oneOf) > 0 { + + nbValidated := 0 + var bestValidationResult *Result + + for _, oneOfSchema := range currentSubSchema.oneOf { + validationResult := oneOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + + if nbValidated != 1 { + + result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) + + if nbValidated == 0 { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + + } + + if len(currentSubSchema.allOf) > 0 { + nbValidated := 0 + + for _, allOfSchema := range currentSubSchema.allOf { + validationResult := allOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } + result.mergeErrors(validationResult) + } + + if nbValidated != len(currentSubSchema.allOf) { + result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.not != nil { + validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if isKind(currentNode, reflect.Map) { + for elementKey := range currentNode.(map[string]interface{}) { + if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { + switch dependency := dependency.(type) { + + case []string: + for _, dependOnKey := range dependency { + if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + result.addInternalError( + new(MissingDependencyError), + context, + currentNode, + ErrorDetails{"dependency": dependOnKey}, + ) + } + } + + case *subSchema: + dependency.validateRecursive(dependency, currentNode, result, context) + } + } + } + } + } + + if currentSubSchema._if != nil { + validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context) + if currentSubSchema._then != nil && validationResultIf.Valid() { + validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context) + if !validationResultThen.Valid() { + result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultThen) + } + } + if currentSubSchema._else != nil && !validationResultIf.Valid() { + validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context) + if !validationResultElse.Valid() { + result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{}) + result.mergeErrors(validationResultElse) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateCommon %s", context.String()) + internalLog(" %v", value) + } + + // const: + if currentSubSchema._const != nil { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if *vString != *currentSubSchema._const { + result.addInternalError(new(ConstError), + context, + value, + ErrorDetails{ + "allowed": *currentSubSchema._const, + }, + ) + } + } + + // enum: + if len(currentSubSchema.enum) > 0 { + vString, err := marshalWithoutNumber(value) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if !isStringInSlice(currentSubSchema.enum, *vString) { + result.addInternalError( + new(EnumError), + context, + value, + ErrorDetails{ + "allowed": strings.Join(currentSubSchema.enum, ", "), + }, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateArray %s", context.String()) + internalLog(" %v", value) + } + + nbValues := len(value) + + // TODO explain + if currentSubSchema.itemsChildrenIsSingleSchema { + for i := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else { + if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { + + nbItems := len(currentSubSchema.itemsChildren) + + // while we have both schemas and values, check them against each other + for i := 0; i != nbItems && i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + + if nbItems < nbValues { + // we have less schemas than elements in the instance array, + // but that might be ok if "additionalItems" is specified. + + switch currentSubSchema.additionalItems.(type) { + case bool: + if !currentSubSchema.additionalItems.(bool) { + result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) + } + case *subSchema: + additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) + for i := nbItems; i != nbValues; i++ { + subContext := NewJsonContext(strconv.Itoa(i), context) + validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } + } + } + } + + // minItems & maxItems + if currentSubSchema.minItems != nil { + if nbValues < int(*currentSubSchema.minItems) { + result.addInternalError( + new(ArrayMinItemsError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minItems}, + ) + } + } + if currentSubSchema.maxItems != nil { + if nbValues > int(*currentSubSchema.maxItems) { + result.addInternalError( + new(ArrayMaxItemsError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxItems}, + ) + } + } + + // uniqueItems: + if currentSubSchema.uniqueItems { + var stringifiedItems = make(map[string]int) + for j, v := range value { + vString, err := marshalWithoutNumber(v) + if err != nil { + result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err}) + } + if i, ok := stringifiedItems[*vString]; ok { + result.addInternalError( + new(ItemsMustBeUniqueError), + context, + value, + ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j}, + ) + } + stringifiedItems[*vString] = j + } + } + + // contains: + + if currentSubSchema.contains != nil { + validatedOne := false + var bestValidationResult *Result + + for i, v := range value { + subContext := NewJsonContext(strconv.Itoa(i), context) + + validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext) + if validationResult.Valid() { + validatedOne = true + break + } else { + if bestValidationResult == nil || validationResult.score > bestValidationResult.score { + bestValidationResult = validationResult + } + } + } + if !validatedOne { + result.addInternalError( + new(ArrayContainsError), + context, + value, + ErrorDetails{}, + ) + if bestValidationResult != nil { + result.mergeErrors(bestValidationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) { + + if internalLogEnabled { + internalLog("validateObject %s", context.String()) + internalLog(" %v", value) + } + + // minProperties & maxProperties: + if currentSubSchema.minProperties != nil { + if len(value) < int(*currentSubSchema.minProperties) { + result.addInternalError( + new(ArrayMinPropertiesError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minProperties}, + ) + } + } + if currentSubSchema.maxProperties != nil { + if len(value) > int(*currentSubSchema.maxProperties) { + result.addInternalError( + new(ArrayMaxPropertiesError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxProperties}, + ) + } + } + + // required: + for _, requiredProperty := range currentSubSchema.required { + _, ok := value[requiredProperty] + if ok { + result.incrementScore() + } else { + result.addInternalError( + new(RequiredError), + context, + value, + ErrorDetails{"property": requiredProperty}, + ) + } + } + + // additionalProperty & patternProperty: + for pk := range value { + + // Check whether this property is described by "properties" + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + // Check whether this property is described by "patternProperties" + ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties" + if !found && !ppMatch { + switch ap := currentSubSchema.additionalProperties.(type) { + case bool: + // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema + if !ap { + result.addInternalError( + new(AdditionalPropertyNotAllowedError), + context, + value[pk], + ErrorDetails{"property": pk}, + ) + + } + case *subSchema: + validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context)) + result.mergeErrors(validationResult) + } + } + } + + // propertyNames: + if currentSubSchema.propertyNames != nil { + for pk := range value { + validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context) + if !validationResult.Valid() { + result.addInternalError(new(InvalidPropertyNameError), + context, + value, ErrorDetails{ + "property": pk, + }) + result.mergeErrors(validationResult) + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool { + + if internalLogEnabled { + internalLog("validatePatternProperty %s", context.String()) + internalLog(" %s %v", key, value) + } + + validated := false + + for pk, pv := range currentSubSchema.patternProperties { + if matches, _ := regexp.MatchString(pk, key); matches { + validated = true + subContext := NewJsonContext(key, context) + validationResult := pv.subValidateWithContext(value, subContext) + result.mergeErrors(validationResult) + } + } + + if !validated { + return false + } + + result.incrementScore() + return true +} + +func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore JSON numbers + if isJSONNumber(value) { + return + } + + // Ignore non strings + if !isKind(value, reflect.String) { + return + } + + if internalLogEnabled { + internalLog("validateString %s", context.String()) + internalLog(" %v", value) + } + + stringValue := value.(string) + + // minLength & maxLength: + if currentSubSchema.minLength != nil { + if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { + result.addInternalError( + new(StringLengthGTEError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minLength}, + ) + } + } + if currentSubSchema.maxLength != nil { + if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { + result.addInternalError( + new(StringLengthLTEError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxLength}, + ) + } + } + + // pattern: + if currentSubSchema.pattern != nil { + if !currentSubSchema.pattern.MatchString(stringValue) { + result.addInternalError( + new(DoesNotMatchPatternError), + context, + value, + ErrorDetails{"pattern": currentSubSchema.pattern}, + ) + + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { + + // Ignore non numbers + if !isJSONNumber(value) { + return + } + + if internalLogEnabled { + internalLog("validateNumber %s", context.String()) + internalLog(" %v", value) + } + + number := value.(json.Number) + float64Value, _ := new(big.Rat).SetString(string(number)) + + // multipleOf: + if currentSubSchema.multipleOf != nil { + if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() { + result.addInternalError( + new(MultipleOfError), + context, + number, + ErrorDetails{ + "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf), + }, + ) + } + } + + //maximum & exclusiveMaximum: + if currentSubSchema.maximum != nil { + if float64Value.Cmp(currentSubSchema.maximum) == 1 { + result.addInternalError( + new(NumberLTEError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.maximum), + }, + ) + } + } + if currentSubSchema.exclusiveMaximum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 { + result.addInternalError( + new(NumberLTError), + context, + number, + ErrorDetails{ + "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum), + }, + ) + } + } + + //minimum & exclusiveMinimum: + if currentSubSchema.minimum != nil { + if float64Value.Cmp(currentSubSchema.minimum) == -1 { + result.addInternalError( + new(NumberGTEError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.minimum), + }, + ) + } + } + if currentSubSchema.exclusiveMinimum != nil { + if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 { + result.addInternalError( + new(NumberGTError), + context, + number, + ErrorDetails{ + "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum), + }, + ) + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { + result.addInternalError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} diff --git a/vendor/github.com/xlab/treeprint/.gitignore b/vendor/github.com/xlab/treeprint/.gitignore new file mode 100644 index 000000000..7c9305d66 --- /dev/null +++ b/vendor/github.com/xlab/treeprint/.gitignore @@ -0,0 +1,3 @@ +vendor/** +.idea +**/**.iml diff --git a/vendor/github.com/xlab/treeprint/LICENSE b/vendor/github.com/xlab/treeprint/LICENSE new file mode 100644 index 000000000..5ab533ad2 --- /dev/null +++ b/vendor/github.com/xlab/treeprint/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) +Copyright © 2016 Maxim Kupriianov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the “Softwareâ€), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS ISâ€, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/xlab/treeprint/README.md b/vendor/github.com/xlab/treeprint/README.md new file mode 100644 index 000000000..59fb121fc --- /dev/null +++ b/vendor/github.com/xlab/treeprint/README.md @@ -0,0 +1,154 @@ +treeprint [![GoDoc](https://godoc.org/github.com/xlab/treeprint?status.svg)](https://godoc.org/github.com/xlab/treeprint) ![test coverage](https://img.shields.io/badge/coverage-68.6%25-green.svg) +========= + +Package `treeprint` provides a simple ASCII tree composing tool. + +SYSTEME FIGURE + +If you are familiar with the [tree](http://mama.indstate.edu/users/ice/tree/) utility that is a recursive directory listing command that produces a depth indented listing of files, then you have the idea of what it would look like. + +On my system the command yields the following + +``` + $ tree +. +├── LICENSE +├── README.md +├── treeprint.go +└── treeprint_test.go + +0 directories, 4 files +``` + +and I'd like to have the same format for my Go data structures when I print them. + +## Installation + +``` +$ go get github.com/xlab/treeprint +``` + +## Concept of work + +The general idea is that you initialise a new tree with `treeprint.New()` and then add nodes and +branches into it. Use `AddNode()` when you want add a node on the same level as the target or +use `AddBranch()` when you want to go a level deeper. So `tree.AddBranch().AddNode().AddNode()` would +create a new level with two distinct nodes on it. So `tree.AddNode().AddNode()` is a flat thing and +`tree.AddBranch().AddBranch().AddBranch()` is a high thing. Use `String()` or `Bytes()` on a branch +to render a subtree, or use it on the root to print the whole tree. + +The utility will yield Unicode-friendly trees. The output is predictable and there is no platform-dependent exceptions, so if you have issues with displaying the tree in the console, all platform-related transformations can be done after the tree has been rendered: [an example](https://github.com/xlab/treeprint/issues/2#issuecomment-324944141) for Asian locales. + +## Use cases + +### When you want to render a complex data structure: + +```go +func main() { + // to add a custom root name use `treeprint.NewWithRoot()` instead + tree := treeprint.New() + + // create a new branch in the root + one := tree.AddBranch("one") + + // add some nodes + one.AddNode("subnode1").AddNode("subnode2") + + // create a new sub-branch + one.AddBranch("two"). + AddNode("subnode1").AddNode("subnode2"). // add some nodes + AddBranch("three"). // add a new sub-branch + AddNode("subnode1").AddNode("subnode2") // add some nodes too + + // add one more node that should surround the inner branch + one.AddNode("subnode3") + + // add a new node to the root + tree.AddNode("outernode") + + fmt.Println(tree.String()) +} +``` + +Will give you: + +``` +. +├── one +│   ├── subnode1 +│   ├── subnode2 +│   ├── two +│   │   ├── subnode1 +│   │   ├── subnode2 +│   │   └── three +│   │   ├── subnode1 +│   │   └── subnode2 +│   └── subnode3 +└── outernode +``` + +### Another case, when you have to make a tree where any leaf may have some meta-data (as `tree` is capable of it): + +```go +func main { + // to add a custom root name use `treeprint.NewWithRoot()` instead + tree := treeprint.New() + + tree.AddNode("Dockerfile") + tree.AddNode("Makefile") + tree.AddNode("aws.sh") + tree.AddMetaBranch(" 204", "bin"). + AddNode("dbmaker").AddNode("someserver").AddNode("testtool") + tree.AddMetaBranch(" 374", "deploy"). + AddNode("Makefile").AddNode("bootstrap.sh") + tree.AddMetaNode("122K", "testtool.a") + + fmt.Println(tree.String()) +} +``` + +Output: + +``` +. +├── Dockerfile +├── Makefile +├── aws.sh +├── [ 204] bin +│   ├── dbmaker +│   ├── someserver +│   └── testtool +├── [ 374] deploy +│   ├── Makefile +│   └── bootstrap.sh +└── [122K] testtool.a +``` + +### Iterating over the tree nodes + +```go +tree := New() + +one := tree.AddBranch("one") +one.AddNode("one-subnode1").AddNode("one-subnode2") +one.AddBranch("two").AddNode("two-subnode1").AddNode("two-subnode2"). + AddBranch("three").AddNode("three-subnode1").AddNode("three-subnode2") +tree.AddNode("outernode") + +// if you need to iterate over the whole tree +// call `VisitAll` from your top root node. +tree.VisitAll(func(item *node) { + if len(item.Nodes) > 0 { + // branch nodes + fmt.Println(item.Value) // will output one, two, three + } else { + // leaf nodes + fmt.Println(item.Value) // will output one-*, two-*, three-* and outernode + } +}) + +``` +Yay! So it works. + +## License +MIT diff --git a/vendor/github.com/xlab/treeprint/helpers.go b/vendor/github.com/xlab/treeprint/helpers.go new file mode 100644 index 000000000..a091a5a0f --- /dev/null +++ b/vendor/github.com/xlab/treeprint/helpers.go @@ -0,0 +1,47 @@ +package treeprint + +import ( + "reflect" + "strings" +) + +func isEmpty(v *reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func tagSpec(tag string) (name string, omit bool) { + parts := strings.Split(tag, ",") + if len(parts) < 2 { + return tag, false + } + if parts[1] == "omitempty" { + return parts[0], true + } + return parts[0], false +} + +func filterTags(tag reflect.StructTag) string { + tags := strings.Split(string(tag), " ") + filtered := make([]string, 0, len(tags)) + for i := range tags { + if strings.HasPrefix(tags[i], "tree:") { + continue + } + filtered = append(filtered, tags[i]) + } + return strings.Join(filtered, " ") +} diff --git a/vendor/github.com/xlab/treeprint/struct.go b/vendor/github.com/xlab/treeprint/struct.go new file mode 100644 index 000000000..4d5cc8254 --- /dev/null +++ b/vendor/github.com/xlab/treeprint/struct.go @@ -0,0 +1,322 @@ +package treeprint + +import ( + "fmt" + "reflect" + "strings" +) + +type StructTreeOption int + +const ( + StructNameTree StructTreeOption = iota + StructValueTree + StructTagTree + StructTypeTree + StructTypeSizeTree +) + +func FromStruct(v interface{}, opt ...StructTreeOption) (Tree, error) { + var treeOpt StructTreeOption + if len(opt) > 0 { + treeOpt = opt[0] + } + switch treeOpt { + case StructNameTree: + tree := New() + err := nameTree(tree, v) + return tree, err + case StructValueTree: + tree := New() + err := valueTree(tree, v) + return tree, err + case StructTagTree: + tree := New() + err := tagTree(tree, v) + return tree, err + case StructTypeTree: + tree := New() + err := typeTree(tree, v) + return tree, err + case StructTypeSizeTree: + tree := New() + err := typeSizeTree(tree, v) + return tree, err + default: + err := fmt.Errorf("treeprint: invalid StructTreeOption %v", treeOpt) + return nil, err + } +} + +type FmtFunc func(name string, v interface{}) (string, bool) + +func FromStructWithMeta(v interface{}, fmtFunc FmtFunc) (Tree, error) { + if fmtFunc == nil { + tree := New() + err := nameTree(tree, v) + return tree, err + } + tree := New() + err := metaTree(tree, v, fmtFunc) + return tree, err +} + +func Repr(v interface{}) string { + tree := New() + vType := reflect.TypeOf(v) + vValue := reflect.ValueOf(v) + _, val, isStruct := getValue(vType, &vValue) + if !isStruct { + return fmt.Sprintf("%+v", val.Interface()) + } + err := valueTree(tree, val.Interface()) + if err != nil { + return err.Error() + } + return tree.String() +} + +func nameTree(tree Tree, v interface{}) error { + typ, val, err := checkType(v) + if err != nil { + return err + } + fields := typ.NumField() + for i := 0; i < fields; i++ { + field := typ.Field(i) + fieldValue := val.Field(i) + name, skip, omit := getMeta(field.Name, field.Tag) + if skip || omit && isEmpty(&fieldValue) { + continue + } + typ, val, isStruct := getValue(field.Type, &fieldValue) + if !isStruct { + tree.AddNode(name) + continue + } else if subNum := typ.NumField(); subNum == 0 { + tree.AddNode(name) + continue + } + branch := tree.AddBranch(name) + if err := nameTree(branch, val.Interface()); err != nil { + err := fmt.Errorf("%v on struct branch %s", err, name) + return err + } + } + return nil +} + +func getMeta(fieldName string, tag reflect.StructTag) (name string, skip, omit bool) { + if tagStr := tag.Get("tree"); len(tagStr) > 0 { + name, omit = tagSpec(tagStr) + } + if name == "-" { + return fieldName, true, omit + } + if len(name) == 0 { + name = fieldName + } else if trimmed := strings.TrimSpace(name); len(trimmed) == 0 { + name = fieldName + } + return +} + +func valueTree(tree Tree, v interface{}) error { + typ, val, err := checkType(v) + if err != nil { + return err + } + fields := typ.NumField() + for i := 0; i < fields; i++ { + field := typ.Field(i) + fieldValue := val.Field(i) + name, skip, omit := getMeta(field.Name, field.Tag) + if skip || omit && isEmpty(&fieldValue) { + continue + } + typ, val, isStruct := getValue(field.Type, &fieldValue) + if !isStruct { + tree.AddMetaNode(val.Interface(), name) + continue + } else if subNum := typ.NumField(); subNum == 0 { + tree.AddMetaNode(val.Interface(), name) + continue + } + branch := tree.AddBranch(name) + if err := valueTree(branch, val.Interface()); err != nil { + err := fmt.Errorf("%v on struct branch %s", err, name) + return err + } + } + return nil +} + +func tagTree(tree Tree, v interface{}) error { + typ, val, err := checkType(v) + if err != nil { + return err + } + fields := typ.NumField() + for i := 0; i < fields; i++ { + field := typ.Field(i) + fieldValue := val.Field(i) + name, skip, omit := getMeta(field.Name, field.Tag) + if skip || omit && isEmpty(&fieldValue) { + continue + } + filteredTag := filterTags(field.Tag) + typ, val, isStruct := getValue(field.Type, &fieldValue) + if !isStruct { + tree.AddMetaNode(filteredTag, name) + continue + } else if subNum := typ.NumField(); subNum == 0 { + tree.AddMetaNode(filteredTag, name) + continue + } + branch := tree.AddMetaBranch(filteredTag, name) + if err := tagTree(branch, val.Interface()); err != nil { + err := fmt.Errorf("%v on struct branch %s", err, name) + return err + } + } + return nil +} + +func typeTree(tree Tree, v interface{}) error { + typ, val, err := checkType(v) + if err != nil { + return err + } + fields := typ.NumField() + for i := 0; i < fields; i++ { + field := typ.Field(i) + fieldValue := val.Field(i) + name, skip, omit := getMeta(field.Name, field.Tag) + if skip || omit && isEmpty(&fieldValue) { + continue + } + typ, val, isStruct := getValue(field.Type, &fieldValue) + typename := fmt.Sprintf("%T", val.Interface()) + if !isStruct { + tree.AddMetaNode(typename, name) + continue + } else if subNum := typ.NumField(); subNum == 0 { + tree.AddMetaNode(typename, name) + continue + } + branch := tree.AddMetaBranch(typename, name) + if err := typeTree(branch, val.Interface()); err != nil { + err := fmt.Errorf("%v on struct branch %s", err, name) + return err + } + } + return nil +} + +func typeSizeTree(tree Tree, v interface{}) error { + typ, val, err := checkType(v) + if err != nil { + return err + } + fields := typ.NumField() + for i := 0; i < fields; i++ { + field := typ.Field(i) + fieldValue := val.Field(i) + name, skip, omit := getMeta(field.Name, field.Tag) + if skip || omit && isEmpty(&fieldValue) { + continue + } + typ, val, isStruct := getValue(field.Type, &fieldValue) + typesize := typ.Size() + if !isStruct { + tree.AddMetaNode(typesize, name) + continue + } else if subNum := typ.NumField(); subNum == 0 { + tree.AddMetaNode(typesize, name) + continue + } + branch := tree.AddMetaBranch(typesize, name) + if err := typeSizeTree(branch, val.Interface()); err != nil { + err := fmt.Errorf("%v on struct branch %s", err, name) + return err + } + } + return nil +} + +func metaTree(tree Tree, v interface{}, fmtFunc FmtFunc) error { + typ, val, err := checkType(v) + if err != nil { + return err + } + fields := typ.NumField() + for i := 0; i < fields; i++ { + field := typ.Field(i) + fieldValue := val.Field(i) + name, skip, omit := getMeta(field.Name, field.Tag) + if skip || omit && isEmpty(&fieldValue) { + continue + } + typ, val, isStruct := getValue(field.Type, &fieldValue) + formatted, show := fmtFunc(name, val.Interface()) + if !isStruct { + if show { + tree.AddMetaNode(formatted, name) + continue + } + tree.AddNode(name) + continue + } else if subNum := typ.NumField(); subNum == 0 { + if show { + tree.AddMetaNode(formatted, name) + continue + } + tree.AddNode(name) + continue + } + var branch Tree + if show { + branch = tree.AddMetaBranch(formatted, name) + } else { + branch = tree.AddBranch(name) + } + if err := metaTree(branch, val.Interface(), fmtFunc); err != nil { + err := fmt.Errorf("%v on struct branch %s", err, name) + return err + } + } + return nil +} + +func getValue(typ reflect.Type, val *reflect.Value) (reflect.Type, *reflect.Value, bool) { + switch typ.Kind() { + case reflect.Ptr: + typ = typ.Elem() + if typ.Kind() == reflect.Struct { + elem := val.Elem() + return typ, &elem, true + } + case reflect.Struct: + return typ, val, true + } + return typ, val, false +} + +func checkType(v interface{}) (reflect.Type, *reflect.Value, error) { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + switch typ.Kind() { + case reflect.Ptr: + typ = typ.Elem() + if typ.Kind() != reflect.Struct { + err := fmt.Errorf("treeprint: %T is not a struct we could work with", v) + return nil, nil, err + } + val = val.Elem() + case reflect.Struct: + default: + err := fmt.Errorf("treeprint: %T is not a struct we could work with", v) + return nil, nil, err + } + return typ, &val, nil +} diff --git a/vendor/github.com/xlab/treeprint/treeprint.go b/vendor/github.com/xlab/treeprint/treeprint.go new file mode 100644 index 000000000..fc8204b71 --- /dev/null +++ b/vendor/github.com/xlab/treeprint/treeprint.go @@ -0,0 +1,294 @@ +// Package treeprint provides a simple ASCII tree composing tool. +package treeprint + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Value defines any value +type Value interface{} + +// MetaValue defines any meta value +type MetaValue interface{} + +// NodeVisitor function type for iterating over nodes +type NodeVisitor func(item *Node) + +// Tree represents a tree structure with leaf-nodes and branch-nodes. +type Tree interface { + // AddNode adds a new Node to a branch. + AddNode(v Value) Tree + // AddMetaNode adds a new Node with meta value provided to a branch. + AddMetaNode(meta MetaValue, v Value) Tree + // AddBranch adds a new branch Node (a level deeper). + AddBranch(v Value) Tree + // AddMetaBranch adds a new branch Node (a level deeper) with meta value provided. + AddMetaBranch(meta MetaValue, v Value) Tree + // Branch converts a leaf-Node to a branch-Node, + // applying this on a branch-Node does no effect. + Branch() Tree + // FindByMeta finds a Node whose meta value matches the provided one by reflect.DeepEqual, + // returns nil if not found. + FindByMeta(meta MetaValue) Tree + // FindByValue finds a Node whose value matches the provided one by reflect.DeepEqual, + // returns nil if not found. + FindByValue(value Value) Tree + // returns the last Node of a tree + FindLastNode() Tree + // String renders the tree or subtree as a string. + String() string + // Bytes renders the tree or subtree as byteslice. + Bytes() []byte + + SetValue(value Value) + SetMetaValue(meta MetaValue) + + // VisitAll iterates over the tree, branches and nodes. + // If need to iterate over the whole tree, use the root Node. + // Note this method uses a breadth-first approach. + VisitAll(fn NodeVisitor) +} + +type Node struct { + Root *Node + Meta MetaValue + Value Value + Nodes []*Node +} + +func (n *Node) FindLastNode() Tree { + ns := n.Nodes + if len(ns) == 0 { + return nil + } + return ns[len(ns)-1] +} + +func (n *Node) AddNode(v Value) Tree { + n.Nodes = append(n.Nodes, &Node{ + Root: n, + Value: v, + }) + return n +} + +func (n *Node) AddMetaNode(meta MetaValue, v Value) Tree { + n.Nodes = append(n.Nodes, &Node{ + Root: n, + Meta: meta, + Value: v, + }) + return n +} + +func (n *Node) AddBranch(v Value) Tree { + branch := &Node{ + Root: n, + Value: v, + } + n.Nodes = append(n.Nodes, branch) + return branch +} + +func (n *Node) AddMetaBranch(meta MetaValue, v Value) Tree { + branch := &Node{ + Root: n, + Meta: meta, + Value: v, + } + n.Nodes = append(n.Nodes, branch) + return branch +} + +func (n *Node) Branch() Tree { + n.Root = nil + return n +} + +func (n *Node) FindByMeta(meta MetaValue) Tree { + for _, node := range n.Nodes { + if reflect.DeepEqual(node.Meta, meta) { + return node + } + if v := node.FindByMeta(meta); v != nil { + return v + } + } + return nil +} + +func (n *Node) FindByValue(value Value) Tree { + for _, node := range n.Nodes { + if reflect.DeepEqual(node.Value, value) { + return node + } + if v := node.FindByMeta(value); v != nil { + return v + } + } + return nil +} + +func (n *Node) Bytes() []byte { + buf := new(bytes.Buffer) + level := 0 + var levelsEnded []int + if n.Root == nil { + if n.Meta != nil { + buf.WriteString(fmt.Sprintf("[%v] %v", n.Meta, n.Value)) + } else { + buf.WriteString(fmt.Sprintf("%v", n.Value)) + } + buf.WriteByte('\n') + } else { + edge := EdgeTypeMid + if len(n.Nodes) == 0 { + edge = EdgeTypeEnd + levelsEnded = append(levelsEnded, level) + } + printValues(buf, 0, levelsEnded, edge, n) + } + if len(n.Nodes) > 0 { + printNodes(buf, level, levelsEnded, n.Nodes) + } + return buf.Bytes() +} + +func (n *Node) String() string { + return string(n.Bytes()) +} + +func (n *Node) SetValue(value Value) { + n.Value = value +} + +func (n *Node) SetMetaValue(meta MetaValue) { + n.Meta = meta +} + +func (n *Node) VisitAll(fn NodeVisitor) { + for _, node := range n.Nodes { + fn(node) + + if len(node.Nodes) > 0 { + node.VisitAll(fn) + continue + } + } +} + +func printNodes(wr io.Writer, + level int, levelsEnded []int, nodes []*Node) { + + for i, node := range nodes { + edge := EdgeTypeMid + if i == len(nodes)-1 { + levelsEnded = append(levelsEnded, level) + edge = EdgeTypeEnd + } + printValues(wr, level, levelsEnded, edge, node) + if len(node.Nodes) > 0 { + printNodes(wr, level+1, levelsEnded, node.Nodes) + } + } +} + +func printValues(wr io.Writer, + level int, levelsEnded []int, edge EdgeType, node *Node) { + + for i := 0; i < level; i++ { + if isEnded(levelsEnded, i) { + fmt.Fprint(wr, strings.Repeat(" ", IndentSize+1)) + continue + } + fmt.Fprintf(wr, "%s%s", EdgeTypeLink, strings.Repeat(" ", IndentSize)) + } + + val := renderValue(level, node) + meta := node.Meta + + if meta != nil { + fmt.Fprintf(wr, "%s [%v] %v\n", edge, meta, val) + return + } + fmt.Fprintf(wr, "%s %v\n", edge, val) +} + +func isEnded(levelsEnded []int, level int) bool { + for _, l := range levelsEnded { + if l == level { + return true + } + } + return false +} + +func renderValue(level int, node *Node) Value { + lines := strings.Split(fmt.Sprintf("%v", node.Value), "\n") + + // If value does not contain multiple lines, return itself. + if len(lines) < 2 { + return node.Value + } + + // If value contains multiple lines, + // generate a padding and prefix each line with it. + pad := padding(level, node) + + for i := 1; i < len(lines); i++ { + lines[i] = fmt.Sprintf("%s%s", pad, lines[i]) + } + + return strings.Join(lines, "\n") +} + +// padding returns a padding for the multiline values with correctly placed link edges. +// It is generated by traversing the tree upwards (from leaf to the root of the tree) +// and, on each level, checking if the Node the last one of its siblings. +// If a Node is the last one, the padding on that level should be empty (there's nothing to link to below it). +// If a Node is not the last one, the padding on that level should be the link edge so the sibling below is correctly connected. +func padding(level int, node *Node) string { + links := make([]string, level+1) + + for node.Root != nil { + if isLast(node) { + links[level] = strings.Repeat(" ", IndentSize+1) + } else { + links[level] = fmt.Sprintf("%s%s", EdgeTypeLink, strings.Repeat(" ", IndentSize)) + } + level-- + node = node.Root + } + + return strings.Join(links, "") +} + +// isLast checks if the Node is the last one in the slice of its parent children +func isLast(n *Node) bool { + return n == n.Root.FindLastNode() +} + +type EdgeType string + +var ( + EdgeTypeLink EdgeType = "│" + EdgeTypeMid EdgeType = "├──" + EdgeTypeEnd EdgeType = "└──" +) + +// IndentSize is the number of spaces per tree level. +var IndentSize = 3 + +// New Generates new tree +func New() Tree { + return &Node{Value: "."} +} + +// NewWithRoot Generates new tree with the given root value +func NewWithRoot(root Value) Tree { + return &Node{Value: root} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 000000000..92b8cf73c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of intitialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 000000000..303e5505e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Server HTTP metrics. +const ( + RequestCount = "http.server.request_count" // Incoming request count total + RequestContentLength = "http.server.request_content_length" // Incoming request bytes total + ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total + ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(Version())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 000000000..e4fa1b8d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,208 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +const ( + instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + instrumentationName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionnally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 000000000..38c7f01c7 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 000000000..b2fbe0784 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,275 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "io" + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + counters map[string]metric.Int64Counter + valueRecorders map[string]metric.Float64Histogram + publicEndpoint bool + publicEndpointFn func(*http.Request) bool +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + h.createMeasures() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func (h *middleware) createMeasures() { + h.counters = make(map[string]metric.Int64Counter) + h.valueRecorders = make(map[string]metric.Float64Histogram) + + requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) + handleErr(err) + + responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) + handleErr(err) + + serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) + handleErr(err) + + h.counters[RequestContentLength] = requestBytesCounter + h.counters[ResponseContentLength] = responseBytesCounter + h.valueRecorders[ServerLatency] = serverLatencyMeasure +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), + } + if h.server != "" { + hostAttr := semconv.NetHostName(h.server) + opts = append(opts, trace.WithAttributes(hostAttr)) + } + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + bw.record = readRecordFunc + r.Body = &bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + }) + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics + attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) + if rww.statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) + } + o := metric.WithAttributes(attributes...) + h.counters[RequestContentLength].Add(ctx, bw.read, o) + h.counters[ResponseContentLength].Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) +} + +func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { + attributes := []attribute.KeyValue{} + + // TODO: Consider adding an event after each read and write, possibly as an + // option (defaulting to off), so as to not create needlessly verbose spans. + if read > 0 { + attributes = append(attributes, ReadBytesKey.Int64(read)) + } + if rerr != nil && rerr != io.EOF { + attributes = append(attributes, ReadErrorKey.String(rerr.Error())) + } + if wrote > 0 { + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) + + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attr := semconv.HTTPRouteKey.String(route) + + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 000000000..edf4ce3d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 000000000..d3dede9eb --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,552 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// HTTPRequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPRequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// HTTPResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.flavor(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *httpConv) flavor(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 000000000..bde889343 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,368 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// NetClient returns trace attributes for a client network connection to address. +// See net.Dial for information about acceptable address values, address should +// be the same as the one used to create conn. If conn is nil, only network +// peer attributes will be returned that describe address. Otherwise, the +// socket level information about conn will also be included. +func NetClient(address string, conn net.Conn) []attribute.KeyValue { + return nc.Client(address, conn) +} + +// NetServer returns trace attributes for a network listener listening at address. +// See net.Listen for information about acceptable address values, address +// should be the same as the one used to create ln. If ln is nil, only network +// host attributes will be returned that describe address. Otherwise, the +// socket level information about ln will also be included. +func NetServer(address string, ln net.Listener) []attribute.KeyValue { + return nc.Server(address, ln) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 000000000..26a51a180 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +func injectLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 000000000..e835cac12 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,193 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span and injects the span context into the outbound request headers. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + span.SetAttributes(semconvutil.HTTPClientResponse(res)...) + span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + body io.ReadCloser +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Close() error { + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 000000000..6eace875c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.45.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go new file mode 100644 index 000000000..11a35ed16 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + + "go.opentelemetry.io/otel/propagation" +) + +var _ io.ReadCloser = &bodyWrapper{} + +// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type bodyWrapper struct { + io.ReadCloser + record func(n int64) // must not be nil + + read int64 + err error +} + +func (w *bodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + w.read += n1 + w.err = err + w.record(n1) + return n, err +} + +func (w *bodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +var _ http.ResponseWriter = &respWriterWrapper{} + +// respWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) +// that may be useful when using it in real life situations. +type respWriterWrapper struct { + http.ResponseWriter + record func(n int64) // must not be nil + + // used to inject the header + ctx context.Context + + props propagation.TextMapPropagator + + written int64 + statusCode int + err error + wroteHeader bool +} + +func (w *respWriterWrapper) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *respWriterWrapper) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.record(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *respWriterWrapper) WriteHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 000000000..ae6a3bcf1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,5 @@ +ot +fo +te +collison +consequentially diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 000000000..4afbb1fb3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 000000000..314766e91 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 000000000..f3355c852 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,25 @@ +.DS_Store +Thumbs.db + +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ + +/example/dice/dice +/example/fib/fib +/example/fib/traces.txt +/example/jaeger/jaeger +/example/namedtracer/namedtracer +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin +/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules new file mode 100644 index 000000000..38a1f5698 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitmodules @@ -0,0 +1,3 @@ +[submodule "opentelemetry-proto"] + path = exporters/otlp/internal/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 000000000..6e8eeec00 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,281 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - depguard + - errcheck + - godot + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/example/*.go" + - "**/example/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 000000000..40d62fa2e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 000000000..3202496c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 000000000..3e5c35b5d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,2737 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### âš ï¸ Notice âš ï¸ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### âš ï¸ Notice âš ï¸ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 +[Go 1.19]: https://go.dev/doc/go1.19 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 000000000..623740007 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/community-membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @MadVikingGod @pellared \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 000000000..a00dbca7b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,620 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +## Approvers and Maintainers + +### Approvers + +- [Evan Torrie](https://github.com/evantorrie), Verizon Media +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [David Ashpole](https://github.com/dashpole), Google +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS + +### Maintainers + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Robert PajÄ…k](https://github.com/pellared), Splunk +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 000000000..5c311706b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,291 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default +ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +.PHONY: tools +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := python:3.11.3-slim-bullseye + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: | $(PYTOOLS) + @$(DOCKERPY) $(PIP) install -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: | $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: | $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: | $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: | $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.20 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint + +.PHONY: vanity-import-check +vanity-import-check: | $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: codespell +codespell: | $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +DEPENDABOT_CONFIG = .github/dependabot.yml +.PHONY: dependabot-check +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: prerelease +prerelease: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 000000000..634326ef8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,111 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | Project | +|---------|------------|-----------------------| +| Traces | Stable | N/A | +| Metrics | Mixed [1] | [Go: Metric SDK (GA)] | +| Logs | Frozen [2] | N/A | + +[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34 + +- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta. +- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK. + No Logs Pull Requests are currently being accepted. + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|---------|------------|--------------| +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.21 | 386 | +| Ubuntu | 1.20 | 386 | +| MacOS | 1.21 | amd64 | +| MacOS | 1.20 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.20 | amd64 | +| Windows | 1.21 | 386 | +| Windows | 1.20 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +|---------------------------------------|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 000000000..82ce3ee46 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,139 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.21.0" # Change to the release version you are generating. +export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/ +[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 000000000..412f1e362 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 000000000..dafe7424d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 000000000..fe2bc5766 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 000000000..638c213d5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 000000000..841b271fb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 000000000..0656a04e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 000000000..1ddf3ce05 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 000000000..9f9303d4f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,429 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "reflect" + "sort" + "sync" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type supports the Equivalent method of comparison using values of + // type Distinct. + Set struct { + equivalent Distinct + } + + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } + + // sortables is a pool of Sortables used to create Sets with a user does + // not provide one. + sortables = sync.Pool{ + New: func() interface{} { return new(Sortable) }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + srt := sortables.Get().(*Sortable) + s, _ := NewSetWithSortableFiltered(kvs, srt, nil) + sortables.Put(srt) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + srt := sortables.Get().(*Sortable) + s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) + sortables.Put(srt) + return s, filtered +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + *tmp = kvs + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + sort.Stable(tmp) + + *tmp = nil + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + if filter != nil { + return filterSet(kvs[position:], filter) + } + return Set{ + equivalent: computeDistinct(kvs[position:]), + }, nil +} + +// filterSet reorders kvs so that included keys are contiguous at the end of +// the slice, while excluded keys precede the included keys. +func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + var excluded []KeyValue + + // Move attributes that do not match the filter so they're adjacent before + // calling computeDistinct(). + distinctPosition := len(kvs) + + // Swap indistinct keys forward and distinct keys toward the + // end of the slice. + offset := len(kvs) - 1 + for ; offset >= 0; offset-- { + if filter(kvs[offset]) { + distinctPosition-- + kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] + continue + } + } + excluded = kvs[:distinctPosition] + + return Set{ + equivalent: computeDistinct(kvs[distinctPosition:]), + }, excluded +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return Set{ + equivalent: l.equivalent, + }, nil + } + + // Note: This could be refactored to avoid the temporary slice + // allocation, if it proves to be expensive. + return filterSet(l.ToSlice(), re) +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 000000000..e584b2477 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 000000000..cb21dd5c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,270 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + return fmt.Sprint(v.asInt64Slice()) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + return fmt.Sprint(v.asFloat64Slice()) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + return fmt.Sprint(v.asStringSlice()) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 000000000..9e6b3b7b5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,552 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" + + keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` + valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` + keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` +) + +var ( + keyRe = regexp.MustCompile(`^` + keyDef + `$`) + valueRe = regexp.MustCompile(`^` + valueDef + `$`) + propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. +func NewKeyProperty(key string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// If key or value are invalid, an error will be returned. +func NewKeyValueProperty(key, value string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + match := propertyRe.FindStringSubmatch(property) + if len(match) != 4 { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + var p Property + if match[1] != "" { + p.key = match[1] + } else { + p.key = match[2] + p.value = match[3] + p.hasValue = true + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !keyRe.MatchString(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if p.hasValue && !valueRe.MatchString(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. The key will be +// used directly while the value will be url decoded after validation. An error +// is returned if the created Member would be invalid according to the W3C +// Baggage specification. +func NewMember(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + decodedValue, err := url.QueryUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + m.value = decodedValue + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var ( + key, value string + props properties + ) + + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key = strings.TrimSpace(k) + var err error + value, err = url.QueryUnescape(strings.TrimSpace(v)) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", err, value) + } + if !keyRe.MatchString(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key is just an ASCII string, but a value must be URL encoded UTF-8, +// returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !keyRe.MatchString(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !valueRe.MatchString(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string, but a value is URL encoded UTF-8. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a string compliant with the W3C Baggage +// specification. The returned string will be invalid if the Baggage contains +// any invalid list-members. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 000000000..24b34b756 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 000000000..4545100df --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 000000000..587ebae4e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 000000000..4e328fbb4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 000000000..daa36c89d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 000000000..72fad8541 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 000000000..9a58fb1d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 000000000..4115fe3bb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +var ( + // Compile-time check global.ErrDelegator implements ErrorHandler. + _ ErrorHandler = (*global.ErrDelegator)(nil) + // Compile-time check global.ErrLogger implements ErrorHandler. + _ ErrorHandler = (*global.ErrLogger)(nil) +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { global.Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 000000000..622c3ee3f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 000000000..b96e5408e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 000000000..4469700d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 000000000..f532f07e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 000000000..5e9b83047 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" +) + +var ( + // GlobalErrorHandler provides an ErrorHandler that can be used + // throughout an OpenTelemetry instrumented project. When a user + // specified ErrorHandler is registered (`SetErrorHandler`) all calls to + // `Handle` and will be delegated to the registered ErrorHandler. + GlobalErrorHandler = defaultErrorHandler() + + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*ErrDelegator)(nil) + // Compile-time check that errLogger implements ErrorHandler. + _ ErrorHandler = (*ErrLogger)(nil) +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +func (d *ErrDelegator) Handle(err error) { + d.getDelegate().Handle(err) +} + +func (d *ErrDelegator) getDelegate() ErrorHandler { + return *d.delegate.Load() +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} + +func defaultErrorHandler() *ErrDelegator { + d := &ErrDelegator{} + d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d +} + +// ErrLogger logs errors if no delegate is set, otherwise they are delegated. +type ErrLogger struct { + l *log.Logger +} + +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *ErrLogger) Handle(err error) { + h.l.Print(err) +} + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return GlobalErrorHandler +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + GlobalErrorHandler.setDelegate(h) +} + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { + GetErrorHandler().Handle(err) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 000000000..a33eded87 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,359 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value //metric.Float64ObservableCounter +} + +var _ unwrapper = (*afCounter)(nil) +var _ metric.Float64ObservableCounter = (*afCounter)(nil) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value //metric.Float64ObservableUpDownCounter +} + +var _ unwrapper = (*afUpDownCounter)(nil) +var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value //metric.Float64ObservableGauge +} + +var _ unwrapper = (*afGauge)(nil) +var _ metric.Float64ObservableGauge = (*afGauge)(nil) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value //metric.Int64ObservableCounter +} + +var _ unwrapper = (*aiCounter)(nil) +var _ metric.Int64ObservableCounter = (*aiCounter)(nil) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value //metric.Int64ObservableUpDownCounter +} + +var _ unwrapper = (*aiUpDownCounter)(nil) +var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value //metric.Int64ObservableGauge +} + +var _ unwrapper = (*aiGauge)(nil) +var _ metric.Int64ObservableGauge = (*aiGauge)(nil) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value //metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value //metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value //metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value //metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value //metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value //metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 000000000..c6f305a2b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger atomic.Pointer[logr.Logger] + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} + +// SetLogger overrides the globalLogger with l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +func getLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + getLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + getLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + getLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + getLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 000000000..0097db478 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for e := m.registry.Front(); e != nil; e = e.Next() { + r := e.Value.(*registration) + r.setDelegate(meter) + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() metric.Observable +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 000000000..06bac35c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 000000000..7985005bc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 000000000..5f008d098 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 000000000..e07e79400 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + return int64(r) +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + return (*float64)(unsafe.Pointer(r)) +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + return (*int64)(unsafe.Pointer(r)) +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 000000000..c4f8acd5d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 000000000..f95517195 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 000000000..072baa8e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record int64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 000000000..9bd6ebf02 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,269 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observerable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 000000000..778ad2d74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 000000000..ae24e448d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who want to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 000000000..ae0bdbd2e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,234 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 000000000..cdca00058 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,334 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributes(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 000000000..2520bc74a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 000000000..f0b063721 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,179 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Float64HistogramConfig struct { + description string + unit string +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 000000000..6f508eb66 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,179 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 000000000..d29aaa32c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 000000000..303cdf1cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 000000000..c119eb285 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 000000000..c94438f73 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 000000000..902692da0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "regexp" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var _ TextMapPropagator = TraceContext{} +var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") + +// Inject set tracecontext from the Context into the carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + h := fmt.Sprintf("%.2x-%s-%s-%s", + supportedVersion, + sc.TraceID(), + sc.SpanID(), + flags) + carrier.Set(traceparentHeader, h) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + matches := traceCtxRegExp.FindStringSubmatch(h) + + if len(matches) == 0 { + return trace.SpanContext{} + } + + if len(matches) < 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[1]) != 2 { + return trace.SpanContext{} + } + ver, err := hex.DecodeString(matches[1]) + if err != nil { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + if version == 0 && len(matches) != 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[2]) != 32 { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + + scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[3]) != 16 { + return trace.SpanContext{} + } + scc.SpanID, err = trace.SpanIDFromHex(matches[3]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[4]) != 2 { + return trace.SpanContext{} + } + opts, err := hex.DecodeString(matches[4]) + if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { + return trace.SpanContext{} + } + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 000000000..ddff45468 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.2.5 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 000000000..71a1f7748 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 000000000..679c40c4d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 000000000..9b8c559de --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 000000000..d5c4b5c13 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 000000000..39a2eab3a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,2010 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 000000000..42fc525d1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 000000000..8c4a7299d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3375 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go new file mode 100644 index 000000000..e6cf89510 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go @@ -0,0 +1,1877 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API does not expose a +// clear notion of client and server). This also covers UDP network +// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) +// and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - unix domain + // socket name, IPv4 or IPv6 address. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/tmp/my.sock', '10.1.2.80' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent client address behind + // any intermediaries (e.g. proxies) if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent client port behind any + // intermediaries (e.g. proxies) if it's available. + ClientPortKey = attribute.Key("client.port") + + // ClientSocketAddressKey is the attribute Key conforming to the + // "client.socket.address" semantic conventions. It represents the + // immediate client peer address - unix domain socket name, IPv4 or IPv6 + // address. + // + // Type: string + // RequirementLevel: Recommended (If different than `client.address`.) + // Stability: stable + // Examples: '/tmp/my.sock', '127.0.0.1' + ClientSocketAddressKey = attribute.Key("client.socket.address") + + // ClientSocketPortKey is the attribute Key conforming to the + // "client.socket.port" semantic conventions. It represents the immediate + // client peer port number + // + // Type: int + // RequirementLevel: Recommended (If different than `client.port`.) + // Stability: stable + // Examples: 35555 + ClientSocketPortKey = attribute.Key("client.socket.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// unix domain socket name, IPv4 or IPv6 address. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// ClientSocketAddress returns an attribute KeyValue conforming to the +// "client.socket.address" semantic conventions. It represents the immediate +// client peer address - unix domain socket name, IPv4 or IPv6 address. +func ClientSocketAddress(val string) attribute.KeyValue { + return ClientSocketAddressKey.String(val) +} + +// ClientSocketPort returns an attribute KeyValue conforming to the +// "client.socket.port" semantic conventions. It represents the immediate +// client peer port number +func ClientSocketPort(val int) attribute.KeyValue { + return ClientSocketPortKey.Int(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the deprecated, use + // `http.request.method` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the deprecated, + // use `http.response.status_code` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the deprecated, use `url.scheme` + // instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the deprecated, use `url.full` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + HTTPURLKey = attribute.Key("http.url") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the deprecated, use `url.path` and + // `url.query` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // deprecated, use `http.request.body.size` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // deprecated, use `http.response.body.size` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the deprecated, use +// `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the deprecated, use +// `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the deprecated, use `url.scheme` +// instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the deprecated, use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the deprecated, use `url.path` and +// `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the +// deprecated, use `http.request.body.size` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the +// deprecated, use `http.response.body.size` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the deprecated, + // use `server.socket.domain` on client spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the deprecated, + // use `server.socket.address` on client spans and `client.socket.address` + // on server spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the deprecated, + // use `server.socket.port` on client spans and `client.socket.port` on + // server spans. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the deprecated, use `server.address` + // on client spans and `client.address` on server spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the deprecated, use `server.port` on + // client spans and `client.port` on server spans. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the deprecated, use + // `server.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the deprecated, use `server.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the deprecated, + // use `server.socket.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the deprecated, + // use `server.socket.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the deprecated, use + // `network.transport`. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the deprecated, + // use `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the + // deprecated, use `network.protocol.version`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the deprecated, + // use `network.transport` and `network.type`. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + NetSockFamilyKey = attribute.Key("net.sock.family") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the deprecated, use +// `server.socket.domain` on client spans. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use +// `server.socket.address` on client spans and `client.socket.address` on +// server spans. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the deprecated, use +// `server.socket.port` on client spans and `client.socket.port` on server +// spans. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the deprecated, use +// `server.address` on client spans and `client.address` on server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the deprecated, use +// `server.port` on client spans and `client.port` on server spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the deprecated, use +// `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the deprecated, use +// `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the deprecated, use +// `server.socket.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the deprecated, use +// `server.socket.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the deprecated, use +// `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the deprecated, +// use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API does not expose a clear notion +// of client and server. +const ( + // DestinationDomainKey is the attribute Key conforming to the + // "destination.domain" semantic conventions. It represents the domain name + // of the destination system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'foo.example.com' + // Note: This value may be a host name, a fully qualified domain name, or + // another host naming format. + DestinationDomainKey = attribute.Key("destination.domain") + + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the peer + // address, for example IP address or UNIX socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.5.3.2' + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the peer port + // number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationDomain returns an attribute KeyValue conforming to the +// "destination.domain" semantic conventions. It represents the domain name of +// the destination system. +func DestinationDomain(val string) attribute.KeyValue { + return DestinationDomainKey.String(val) +} + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the peer address, +// for example IP address or UNIX socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the peer port number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes HTTP attributes. +const ( + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER` and, except if reporting + // a metric, MUST + // set the exact method received in the request line as value of the + // `http.request.method_original` attribute. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTP Server attributes +const ( + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // TypeKey is the attribute Key conforming to the "type" semantic + // conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + TypeKey = attribute.Key("type") + + // PoolKey is the attribute Key conforming to the "pool" semantic + // conventions. It represents the name of the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + PoolKey = attribute.Key("pool") +) + +var ( + // Heap memory + TypeHeap = TypeKey.String("heap") + // Non-heap memory + TypeNonHeap = TypeKey.String("non_heap") +) + +// Pool returns an attribute KeyValue conforming to the "pool" semantic +// conventions. It represents the name of the memory pool. +func Pool(val string) attribute.KeyValue { + return PoolKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API does not expose a +// clear notion of client and server). This also covers UDP network +// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) +// and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the logical server hostname, matches + // server FQDN if available, and IP or socket address if FQDN is not known. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the logical server port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + ServerPortKey = attribute.Key("server.port") + + // ServerSocketDomainKey is the attribute Key conforming to the + // "server.socket.domain" semantic conventions. It represents the domain + // name of an immediate peer. + // + // Type: string + // RequirementLevel: Recommended (If different than `server.address`.) + // Stability: stable + // Examples: 'proxy.example.com' + // Note: Typically observed from the client side, and represents a proxy or + // other intermediary domain name. + ServerSocketDomainKey = attribute.Key("server.socket.domain") + + // ServerSocketAddressKey is the attribute Key conforming to the + // "server.socket.address" semantic conventions. It represents the physical + // server IP address or Unix socket address. If set from the client, should + // simply use the socket's peer address, and not attempt to find any actual + // server IP (i.e., if set from client, this may represent some proxy + // server instead of the logical server). + // + // Type: string + // RequirementLevel: Recommended (If different than `server.address`.) + // Stability: stable + // Examples: '10.5.3.2' + ServerSocketAddressKey = attribute.Key("server.socket.address") + + // ServerSocketPortKey is the attribute Key conforming to the + // "server.socket.port" semantic conventions. It represents the physical + // server port. + // + // Type: int + // RequirementLevel: Recommended (If different than `server.port`.) + // Stability: stable + // Examples: 16456 + ServerSocketPortKey = attribute.Key("server.socket.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the logical server +// hostname, matches server FQDN if available, and IP or socket address if FQDN +// is not known. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the logical server port number +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// ServerSocketDomain returns an attribute KeyValue conforming to the +// "server.socket.domain" semantic conventions. It represents the domain name +// of an immediate peer. +func ServerSocketDomain(val string) attribute.KeyValue { + return ServerSocketDomainKey.String(val) +} + +// ServerSocketAddress returns an attribute KeyValue conforming to the +// "server.socket.address" semantic conventions. It represents the physical +// server IP address or Unix socket address. If set from the client, should +// simply use the socket's peer address, and not attempt to find any actual +// server IP (i.e., if set from client, this may represent some proxy server +// instead of the logical server). +func ServerSocketAddress(val string) attribute.KeyValue { + return ServerSocketAddressKey.String(val) +} + +// ServerSocketPort returns an attribute KeyValue conforming to the +// "server.socket.port" semantic conventions. It represents the physical server +// port. +func ServerSocketPort(val int) attribute.KeyValue { + return ServerSocketPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API does not expose a clear notion +// of client and server. +const ( + // SourceDomainKey is the attribute Key conforming to the "source.domain" + // semantic conventions. It represents the domain name of the source + // system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'foo.example.com' + // Note: This value may be a host name, a fully qualified domain name, or + // another host naming format. + SourceDomainKey = attribute.Key("source.domain") + + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address, for example IP + // address or Unix socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.5.3.2' + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceDomain returns an attribute KeyValue conforming to the +// "source.domain" semantic conventions. It represents the domain name of the +// source system. +func SourceDomain(val string) attribute.KeyValue { + return SourceDomainKey.String(val) +} + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address, for +// example IP address or Unix socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // Transport Layer](https://osi-model.com/transport-layer/) or + // [Inter-process Communication + // method](https://en.wikipedia.org/wiki/Inter-process_communication). The + // value SHOULD be normalized to lowercase. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI Network + // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The + // value SHOULD be normalized to lowercase. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + NetworkTypeKey = attribute.Key("network.type") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // Application Layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. See note below + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// Application Layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. The value SHOULD be normalized to lowercase. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's different + // than `http.request.method`.) + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") +) + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Attributes describing URL. +const ( + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it should be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password should be redacted and attribute's value should be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: When missing, the value is assumed to be `/` + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") +) + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go new file mode 100644 index 000000000..7cf424855 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.21.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go new file mode 100644 index 000000000..30ae34fe4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go new file mode 100644 index 000000000..93d3c1760 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go new file mode 100644 index 000000000..b6d8935cf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go @@ -0,0 +1,2310 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") + + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") +) + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // OCI defines a digest of manifest. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S does not have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go new file mode 100644 index 000000000..66ffd5989 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go new file mode 100644 index 000000000..b5a91450d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go @@ -0,0 +1,2495 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended (If a client id is available) + // Stability: stable + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 000000000..caf7249de --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 000000000..cb3efbb9a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,333 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 000000000..76f9a083c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpan{} + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpan{} +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 000000000..ab0346f96 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 000000000..88fcb8161 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 000000000..7cf6c7f3e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{} + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{} + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpan{} + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{} + +var _ Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 000000000..4aa94f79f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,551 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var nilTraceID TraceID +var _ json.Marshaler = nilTraceID + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var nilSpanID SpanID +var _ json.Marshaler = nilSpanID + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: methods may be added to this interface in minor releases. +type Span interface { + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} + +// Tracer is the creator of Spans. +// +// Warning: methods may be added to this interface in minor releases. +type Tracer interface { + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: methods may be added to this interface in minor releases. +type TracerProvider interface { + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 000000000..ca68a82e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiter = "," + + // based on the W3C Trace Context specification, see + // https://www.w3.org/TR/trace-context-1/#tracestate-header + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +var ( + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) +) + +type member struct { + Key string + Value string +} + +func newMember(key, value string) (member, error) { + if !keyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + matches := memberRe.FindStringSubmatch(m) + if len(matches) != 5 { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + + return member{ + Key: matches[1], + Value: matches[4], + }, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return fmt.Sprintf("%s=%s", m.Key, m.Value) +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(tracestate string) (TraceState, error) { + if tracestate == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for _, memberStr := range strings.Split(tracestate, listDelimiter) { + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + members := make([]string, len(ts.list)) + for i, m := range ts.list { + members[i] = m.String() + } + return strings.Join(members, listDelimiter) +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + + cTS := ts.Delete(key) + if cTS.Len()+1 <= maxListMembers { + cTS.list = append(cTS.list, member{}) + } + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], cTS.list) + cTS.list[0] = m + + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 000000000..dbb61a422 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 000000000..ad64e1996 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.19.0" +} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 000000000..7d2127692 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,55 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.19.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/fib + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.42.0 + modules: + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/example/view + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + experimental-schema: + version: v0.0.7 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/go.starlark.net/LICENSE b/vendor/go.starlark.net/LICENSE new file mode 100644 index 000000000..a6609a143 --- /dev/null +++ b/vendor/go.starlark.net/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2017 The Bazel Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.starlark.net/internal/compile/compile.go b/vendor/go.starlark.net/internal/compile/compile.go new file mode 100644 index 000000000..888d95c56 --- /dev/null +++ b/vendor/go.starlark.net/internal/compile/compile.go @@ -0,0 +1,1924 @@ +// Package compile defines the Starlark bytecode compiler. +// It is an internal package of the Starlark interpreter and is not directly accessible to clients. +// +// The compiler generates byte code with optional uint32 operands for a +// virtual machine with the following components: +// - a program counter, which is an index into the byte code array. +// - an operand stack, whose maximum size is computed for each function by the compiler. +// - an stack of active iterators. +// - an array of local variables. +// The number of local variables and their indices are computed by the resolver. +// Locals (possibly including parameters) that are shared with nested functions +// are 'cells': their locals array slot will contain a value of type 'cell', +// an indirect value in a box that is explicitly read/updated by instructions. +// - an array of free variables, for nested functions. +// Free variables are a subset of the ancestors' cell variables. +// As with locals and cells, these are computed by the resolver. +// - an array of global variables, shared among all functions in the same module. +// All elements are initially nil. +// - two maps of predeclared and universal identifiers. +// +// Each function has a line number table that maps each program counter +// offset to a source position, including the column number. +// +// Operands, logically uint32s, are encoded using little-endian 7-bit +// varints, the top bit indicating that more bytes follow. +// +package compile // import "go.starlark.net/internal/compile" + +import ( + "bytes" + "fmt" + "log" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + "go.starlark.net/resolve" + "go.starlark.net/syntax" +) + +// Disassemble causes the assembly code for each function +// to be printed to stderr as it is generated. +var Disassemble = false + +const debug = false // make code generation verbose, for debugging the compiler + +// Increment this to force recompilation of saved bytecode files. +const Version = 13 + +type Opcode uint8 + +// "x DUP x x" is a "stack picture" that describes the state of the +// stack before and after execution of the instruction. +// +// OP indicates an immediate operand that is an index into the +// specified table: locals, names, freevars, constants. +const ( + NOP Opcode = iota // - NOP - + + // stack operations + DUP // x DUP x x + DUP2 // x y DUP2 x y x y + POP // x POP - + EXCH // x y EXCH y x + + // binary comparisons + // (order must match Token) + LT + GT + GE + LE + EQL + NEQ + + // binary arithmetic + // (order must match Token) + PLUS + MINUS + STAR + SLASH + SLASHSLASH + PERCENT + AMP + PIPE + CIRCUMFLEX + LTLT + GTGT + + IN + + // unary operators + UPLUS // x UPLUS x + UMINUS // x UMINUS -x + TILDE // x TILDE ~x + + NONE // - NONE None + TRUE // - TRUE True + FALSE // - FALSE False + MANDATORY // - MANDATORY Mandatory [sentinel value for required kwonly args] + + ITERPUSH // iterable ITERPUSH - [pushes the iterator stack] + ITERPOP // - ITERPOP - [pops the iterator stack] + NOT // value NOT bool + RETURN // value RETURN - + SETINDEX // a i new SETINDEX - + INDEX // a i INDEX elem + SETDICT // dict key value SETDICT - + SETDICTUNIQ // dict key value SETDICTUNIQ - + APPEND // list elem APPEND - + SLICE // x lo hi step SLICE slice + INPLACE_ADD // x y INPLACE_ADD z where z is x+y or x.extend(y) + INPLACE_PIPE // x y INPLACE_PIPE z where z is x|y + MAKEDICT // - MAKEDICT dict + + // --- opcodes with an argument must go below this line --- + + // control flow + JMP // - JMP - + CJMP // cond CJMP - + ITERJMP // - ITERJMP elem (and fall through) [acts on topmost iterator] + // or: - ITERJMP - (and jump) + + CONSTANT // - CONSTANT value + MAKETUPLE // x1 ... xn MAKETUPLE tuple + MAKELIST // x1 ... xn MAKELIST list + MAKEFUNC // defaults+freevars MAKEFUNC fn + LOAD // from1 ... fromN module LOAD v1 ... vN + SETLOCAL // value SETLOCAL - + SETGLOBAL // value SETGLOBAL - + LOCAL // - LOCAL value + FREE // - FREE cell + FREECELL // - FREECELL value (content of FREE cell) + LOCALCELL // - LOCALCELL value (content of LOCAL cell) + SETLOCALCELL // value SETLOCALCELL - (set content of LOCAL cell) + GLOBAL // - GLOBAL value + PREDECLARED // - PREDECLARED value + UNIVERSAL // - UNIVERSAL value + ATTR // x ATTR y y = x.name + SETFIELD // x y SETFIELD - x.name = y + UNPACK // iterable UNPACK vn ... v1 + + // n>>8 is #positional args and n&0xff is #named args (pairs). + CALL // fn positional named CALL result + CALL_VAR // fn positional named *args CALL_VAR result + CALL_KW // fn positional named **kwargs CALL_KW result + CALL_VAR_KW // fn positional named *args **kwargs CALL_VAR_KW result + + OpcodeArgMin = JMP + OpcodeMax = CALL_VAR_KW +) + +// TODO(adonovan): add dynamic checks for missing opcodes in the tables below. + +var opcodeNames = [...]string{ + AMP: "amp", + APPEND: "append", + ATTR: "attr", + CALL: "call", + CALL_KW: "call_kw ", + CALL_VAR: "call_var", + CALL_VAR_KW: "call_var_kw", + CIRCUMFLEX: "circumflex", + CJMP: "cjmp", + CONSTANT: "constant", + DUP2: "dup2", + DUP: "dup", + EQL: "eql", + EXCH: "exch", + FALSE: "false", + FREE: "free", + FREECELL: "freecell", + GE: "ge", + GLOBAL: "global", + GT: "gt", + GTGT: "gtgt", + IN: "in", + INDEX: "index", + INPLACE_ADD: "inplace_add", + INPLACE_PIPE: "inplace_pipe", + ITERJMP: "iterjmp", + ITERPOP: "iterpop", + ITERPUSH: "iterpush", + JMP: "jmp", + LE: "le", + LOAD: "load", + LOCAL: "local", + LOCALCELL: "localcell", + LT: "lt", + LTLT: "ltlt", + MAKEDICT: "makedict", + MAKEFUNC: "makefunc", + MAKELIST: "makelist", + MAKETUPLE: "maketuple", + MANDATORY: "mandatory", + MINUS: "minus", + NEQ: "neq", + NONE: "none", + NOP: "nop", + NOT: "not", + PERCENT: "percent", + PIPE: "pipe", + PLUS: "plus", + POP: "pop", + PREDECLARED: "predeclared", + RETURN: "return", + SETDICT: "setdict", + SETDICTUNIQ: "setdictuniq", + SETFIELD: "setfield", + SETGLOBAL: "setglobal", + SETINDEX: "setindex", + SETLOCAL: "setlocal", + SETLOCALCELL: "setlocalcell", + SLASH: "slash", + SLASHSLASH: "slashslash", + SLICE: "slice", + STAR: "star", + TILDE: "tilde", + TRUE: "true", + UMINUS: "uminus", + UNIVERSAL: "universal", + UNPACK: "unpack", + UPLUS: "uplus", +} + +const variableStackEffect = 0x7f + +// stackEffect records the effect on the size of the operand stack of +// each kind of instruction. For some instructions this requires computation. +var stackEffect = [...]int8{ + AMP: -1, + APPEND: -2, + ATTR: 0, + CALL: variableStackEffect, + CALL_KW: variableStackEffect, + CALL_VAR: variableStackEffect, + CALL_VAR_KW: variableStackEffect, + CIRCUMFLEX: -1, + CJMP: -1, + CONSTANT: +1, + DUP2: +2, + DUP: +1, + EQL: -1, + FALSE: +1, + FREE: +1, + FREECELL: +1, + GE: -1, + GLOBAL: +1, + GT: -1, + GTGT: -1, + IN: -1, + INDEX: -1, + INPLACE_ADD: -1, + INPLACE_PIPE: -1, + ITERJMP: variableStackEffect, + ITERPOP: 0, + ITERPUSH: -1, + JMP: 0, + LE: -1, + LOAD: -1, + LOCAL: +1, + LOCALCELL: +1, + LT: -1, + LTLT: -1, + MAKEDICT: +1, + MAKEFUNC: 0, + MAKELIST: variableStackEffect, + MAKETUPLE: variableStackEffect, + MANDATORY: +1, + MINUS: -1, + NEQ: -1, + NONE: +1, + NOP: 0, + NOT: 0, + PERCENT: -1, + PIPE: -1, + PLUS: -1, + POP: -1, + PREDECLARED: +1, + RETURN: -1, + SETLOCALCELL: -1, + SETDICT: -3, + SETDICTUNIQ: -3, + SETFIELD: -2, + SETGLOBAL: -1, + SETINDEX: -3, + SETLOCAL: -1, + SLASH: -1, + SLASHSLASH: -1, + SLICE: -3, + STAR: -1, + TRUE: +1, + UMINUS: 0, + UNIVERSAL: +1, + UNPACK: variableStackEffect, + UPLUS: 0, +} + +func (op Opcode) String() string { + if op < OpcodeMax { + if name := opcodeNames[op]; name != "" { + return name + } + } + return fmt.Sprintf("illegal op (%d)", op) +} + +// A Program is a Starlark file in executable form. +// +// Programs are serialized by the Program.Encode method, +// which must be updated whenever this declaration is changed. +type Program struct { + Loads []Binding // name (really, string) and position of each load stmt + Names []string // names of attributes and predeclared variables + Constants []interface{} // = string | int64 | float64 | *big.Int | Bytes + Functions []*Funcode + Globals []Binding // for error messages and tracing + Toplevel *Funcode // module initialization function +} + +// The type of a bytes literal value, to distinguish from text string. +type Bytes string + +// A Funcode is the code of a compiled Starlark function. +// +// Funcodes are serialized by the encoder.function method, +// which must be updated whenever this declaration is changed. +type Funcode struct { + Prog *Program + Pos syntax.Position // position of def or lambda token + Name string // name of this function + Doc string // docstring of this function + Code []byte // the byte code + pclinetab []uint16 // mapping from pc to linenum + Locals []Binding // locals, parameters first + Cells []int // indices of Locals that require cells + Freevars []Binding // for tracing + MaxStack int + NumParams int + NumKwonlyParams int + HasVarargs, HasKwargs bool + + // -- transient state -- + + lntOnce sync.Once + lnt []pclinecol // decoded line number table +} + +type pclinecol struct { + pc uint32 + line, col int32 +} + +// A Binding is the name and position of a binding identifier. +type Binding struct { + Name string + Pos syntax.Position +} + +// A pcomp holds the compiler state for a Program. +type pcomp struct { + prog *Program // what we're building + + names map[string]uint32 + constants map[interface{}]uint32 + functions map[*Funcode]uint32 +} + +// An fcomp holds the compiler state for a Funcode. +type fcomp struct { + fn *Funcode // what we're building + + pcomp *pcomp + pos syntax.Position // current position of generated code + loops []loop + block *block +} + +type loop struct { + break_, continue_ *block +} + +type block struct { + insns []insn + + // If the last insn is a RETURN, jmp and cjmp are nil. + // If the last insn is a CJMP or ITERJMP, + // cjmp and jmp are the "true" and "false" successors. + // Otherwise, jmp is the sole successor. + jmp, cjmp *block + + initialstack int // for stack depth computation + + // Used during encoding + index int // -1 => not encoded yet + addr uint32 +} + +type insn struct { + op Opcode + arg uint32 + line, col int32 +} + +// Position returns the source position for program counter pc. +func (fn *Funcode) Position(pc uint32) syntax.Position { + fn.lntOnce.Do(fn.decodeLNT) + + // Binary search to find last LNT entry not greater than pc. + // To avoid dynamic dispatch, this is a specialization of + // sort.Search using this predicate: + // !(i < len(fn.lnt)-1 && fn.lnt[i+1].pc <= pc) + n := len(fn.lnt) + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) + if !(h >= n-1 || fn.lnt[h+1].pc > pc) { + i = h + 1 + } else { + j = h + } + } + + var line, col int32 + if i < n { + line = fn.lnt[i].line + col = fn.lnt[i].col + } + + pos := fn.Pos // copy the (annoyingly inaccessible) filename + pos.Col = col + pos.Line = line + return pos +} + +// decodeLNT decodes the line number table and populates fn.lnt. +// It is called at most once. +func (fn *Funcode) decodeLNT() { + // Conceptually the table contains rows of the form + // (pc uint32, line int32, col int32), sorted by pc. + // We use a delta encoding, since the differences + // between successive pc, line, and column values + // are typically small and positive (though line and + // especially column differences may be negative). + // The delta encoding starts from + // {pc: 0, line: fn.Pos.Line, col: fn.Pos.Col}. + // + // Each entry is packed into one or more 16-bit values: + // Δpc uint4 + // Δline int5 + // Δcol int6 + // incomplete uint1 + // The top 4 bits are the unsigned delta pc. + // The next 5 bits are the signed line number delta. + // The next 6 bits are the signed column number delta. + // The bottom bit indicates that more rows follow because + // one of the deltas was maxed out. + // These field widths were chosen from a sample of real programs, + // and allow >97% of rows to be encoded in a single uint16. + + fn.lnt = make([]pclinecol, 0, len(fn.pclinetab)) // a minor overapproximation + entry := pclinecol{ + pc: 0, + line: fn.Pos.Line, + col: fn.Pos.Col, + } + for _, x := range fn.pclinetab { + entry.pc += uint32(x) >> 12 + entry.line += int32((int16(x) << 4) >> (16 - 5)) // sign extend Δline + entry.col += int32((int16(x) << 9) >> (16 - 6)) // sign extend Δcol + if (x & 1) == 0 { + fn.lnt = append(fn.lnt, entry) + } + } +} + +// bindings converts resolve.Bindings to compiled form. +func bindings(bindings []*resolve.Binding) []Binding { + res := make([]Binding, len(bindings)) + for i, bind := range bindings { + res[i].Name = bind.First.Name + res[i].Pos = bind.First.NamePos + } + return res +} + +// Expr compiles an expression to a program whose toplevel function evaluates it. +func Expr(expr syntax.Expr, name string, locals []*resolve.Binding) *Program { + pos := syntax.Start(expr) + stmts := []syntax.Stmt{&syntax.ReturnStmt{Result: expr}} + return File(stmts, pos, name, locals, nil) +} + +// File compiles the statements of a file into a program. +func File(stmts []syntax.Stmt, pos syntax.Position, name string, locals, globals []*resolve.Binding) *Program { + pcomp := &pcomp{ + prog: &Program{ + Globals: bindings(globals), + }, + names: make(map[string]uint32), + constants: make(map[interface{}]uint32), + functions: make(map[*Funcode]uint32), + } + pcomp.prog.Toplevel = pcomp.function(name, pos, stmts, locals, nil) + + return pcomp.prog +} + +func (pcomp *pcomp) function(name string, pos syntax.Position, stmts []syntax.Stmt, locals, freevars []*resolve.Binding) *Funcode { + fcomp := &fcomp{ + pcomp: pcomp, + pos: pos, + fn: &Funcode{ + Prog: pcomp.prog, + Pos: pos, + Name: name, + Doc: docStringFromBody(stmts), + Locals: bindings(locals), + Freevars: bindings(freevars), + }, + } + + // Record indices of locals that require cells. + for i, local := range locals { + if local.Scope == resolve.Cell { + fcomp.fn.Cells = append(fcomp.fn.Cells, i) + } + } + + if debug { + fmt.Fprintf(os.Stderr, "start function(%s @ %s)\n", name, pos) + } + + // Convert AST to a CFG of instructions. + entry := fcomp.newBlock() + fcomp.block = entry + fcomp.stmts(stmts) + if fcomp.block != nil { + fcomp.emit(NONE) + fcomp.emit(RETURN) + } + + var oops bool // something bad happened + + setinitialstack := func(b *block, depth int) { + if b.initialstack == -1 { + b.initialstack = depth + } else if b.initialstack != depth { + fmt.Fprintf(os.Stderr, "%d: setinitialstack: depth mismatch: %d vs %d\n", + b.index, b.initialstack, depth) + oops = true + } + } + + // Linearize the CFG: + // compute order, address, and initial + // stack depth of each reachable block. + var pc uint32 + var blocks []*block + var maxstack int + var visit func(b *block) + visit = func(b *block) { + if b.index >= 0 { + return // already visited + } + b.index = len(blocks) + b.addr = pc + blocks = append(blocks, b) + + stack := b.initialstack + if debug { + fmt.Fprintf(os.Stderr, "%s block %d: (stack = %d)\n", name, b.index, stack) + } + var cjmpAddr *uint32 + var isiterjmp int + for i, insn := range b.insns { + pc++ + + // Compute size of argument. + if insn.op >= OpcodeArgMin { + switch insn.op { + case ITERJMP: + isiterjmp = 1 + fallthrough + case CJMP: + cjmpAddr = &b.insns[i].arg + pc += 4 + default: + pc += uint32(argLen(insn.arg)) + } + } + + // Compute effect on stack. + se := insn.stackeffect() + if debug { + fmt.Fprintln(os.Stderr, "\t", insn.op, stack, stack+se) + } + stack += se + if stack < 0 { + fmt.Fprintf(os.Stderr, "After pc=%d: stack underflow\n", pc) + oops = true + } + if stack+isiterjmp > maxstack { + maxstack = stack + isiterjmp + } + } + + if debug { + fmt.Fprintf(os.Stderr, "successors of block %d (start=%d):\n", + b.addr, b.index) + if b.jmp != nil { + fmt.Fprintf(os.Stderr, "jmp to %d\n", b.jmp.index) + } + if b.cjmp != nil { + fmt.Fprintf(os.Stderr, "cjmp to %d\n", b.cjmp.index) + } + } + + // Place the jmp block next. + if b.jmp != nil { + // jump threading (empty cycles are impossible) + for b.jmp.insns == nil { + b.jmp = b.jmp.jmp + } + + setinitialstack(b.jmp, stack+isiterjmp) + if b.jmp.index < 0 { + // Successor is not yet visited: + // place it next and fall through. + visit(b.jmp) + } else { + // Successor already visited; + // explicit backward jump required. + pc += 5 + } + } + + // Then the cjmp block. + if b.cjmp != nil { + // jump threading (empty cycles are impossible) + for b.cjmp.insns == nil { + b.cjmp = b.cjmp.jmp + } + + setinitialstack(b.cjmp, stack) + visit(b.cjmp) + + // Patch the CJMP/ITERJMP, if present. + if cjmpAddr != nil { + *cjmpAddr = b.cjmp.addr + } + } + } + setinitialstack(entry, 0) + visit(entry) + + fn := fcomp.fn + fn.MaxStack = maxstack + + // Emit bytecode (and position table). + if Disassemble { + fmt.Fprintf(os.Stderr, "Function %s: (%d blocks, %d bytes)\n", name, len(blocks), pc) + } + fcomp.generate(blocks, pc) + + if debug { + fmt.Fprintf(os.Stderr, "code=%d maxstack=%d\n", fn.Code, fn.MaxStack) + } + + // Don't panic until we've completed printing of the function. + if oops { + panic("internal error") + } + + if debug { + fmt.Fprintf(os.Stderr, "end function(%s @ %s)\n", name, pos) + } + + return fn +} + +func docStringFromBody(body []syntax.Stmt) string { + if len(body) == 0 { + return "" + } + expr, ok := body[0].(*syntax.ExprStmt) + if !ok { + return "" + } + lit, ok := expr.X.(*syntax.Literal) + if !ok { + return "" + } + if lit.Token != syntax.STRING { + return "" + } + return lit.Value.(string) +} + +func (insn *insn) stackeffect() int { + se := int(stackEffect[insn.op]) + if se == variableStackEffect { + arg := int(insn.arg) + switch insn.op { + case CALL, CALL_KW, CALL_VAR, CALL_VAR_KW: + se = -int(2*(insn.arg&0xff) + insn.arg>>8) + if insn.op != CALL { + se-- + } + if insn.op == CALL_VAR_KW { + se-- + } + case ITERJMP: + // Stack effect differs by successor: + // +1 for jmp/false/ok + // 0 for cjmp/true/exhausted + // Handled specially in caller. + se = 0 + case MAKELIST, MAKETUPLE: + se = 1 - arg + case UNPACK: + se = arg - 1 + default: + panic(insn.op) + } + } + return se +} + +// generate emits the linear instruction stream from the CFG, +// and builds the PC-to-line number table. +func (fcomp *fcomp) generate(blocks []*block, codelen uint32) { + code := make([]byte, 0, codelen) + var pclinetab []uint16 + prev := pclinecol{ + pc: 0, + line: fcomp.fn.Pos.Line, + col: fcomp.fn.Pos.Col, + } + + for _, b := range blocks { + if Disassemble { + fmt.Fprintf(os.Stderr, "%d:\n", b.index) + } + pc := b.addr + for _, insn := range b.insns { + if insn.line != 0 { + // Instruction has a source position. Delta-encode it. + // See Funcode.Position for the encoding. + for { + var incomplete uint16 + + // Δpc, uint4 + deltapc := pc - prev.pc + if deltapc > 0x0f { + deltapc = 0x0f + incomplete = 1 + } + prev.pc += deltapc + + // Δline, int5 + deltaline, ok := clip(insn.line-prev.line, -0x10, 0x0f) + if !ok { + incomplete = 1 + } + prev.line += deltaline + + // Δcol, int6 + deltacol, ok := clip(insn.col-prev.col, -0x20, 0x1f) + if !ok { + incomplete = 1 + } + prev.col += deltacol + + entry := uint16(deltapc<<12) | uint16(deltaline&0x1f)<<7 | uint16(deltacol&0x3f)<<1 | incomplete + pclinetab = append(pclinetab, entry) + if incomplete == 0 { + break + } + } + + if Disassemble { + fmt.Fprintf(os.Stderr, "\t\t\t\t\t; %s:%d:%d\n", + filepath.Base(fcomp.fn.Pos.Filename()), insn.line, insn.col) + } + } + if Disassemble { + PrintOp(fcomp.fn, pc, insn.op, insn.arg) + } + code = append(code, byte(insn.op)) + pc++ + if insn.op >= OpcodeArgMin { + if insn.op == CJMP || insn.op == ITERJMP { + code = addUint32(code, insn.arg, 4) // pad arg to 4 bytes + } else { + code = addUint32(code, insn.arg, 0) + } + pc = uint32(len(code)) + } + } + + if b.jmp != nil && b.jmp.index != b.index+1 { + addr := b.jmp.addr + if Disassemble { + fmt.Fprintf(os.Stderr, "\t%d\tjmp\t\t%d\t; block %d\n", + pc, addr, b.jmp.index) + } + code = append(code, byte(JMP)) + code = addUint32(code, addr, 4) + } + } + if len(code) != int(codelen) { + panic("internal error: wrong code length") + } + + fcomp.fn.pclinetab = pclinetab + fcomp.fn.Code = code +} + +// clip returns the value nearest x in the range [min...max], +// and whether it equals x. +func clip(x, min, max int32) (int32, bool) { + if x > max { + return max, false + } else if x < min { + return min, false + } else { + return x, true + } +} + +// addUint32 encodes x as 7-bit little-endian varint. +// TODO(adonovan): opt: steal top two bits of opcode +// to encode the number of complete bytes that follow. +func addUint32(code []byte, x uint32, min int) []byte { + end := len(code) + min + for x >= 0x80 { + code = append(code, byte(x)|0x80) + x >>= 7 + } + code = append(code, byte(x)) + // Pad the operand with NOPs to exactly min bytes. + for len(code) < end { + code = append(code, byte(NOP)) + } + return code +} + +func argLen(x uint32) int { + n := 0 + for x >= 0x80 { + n++ + x >>= 7 + } + return n + 1 +} + +// PrintOp prints an instruction. +// It is provided for debugging. +func PrintOp(fn *Funcode, pc uint32, op Opcode, arg uint32) { + if op < OpcodeArgMin { + fmt.Fprintf(os.Stderr, "\t%d\t%s\n", pc, op) + return + } + + var comment string + switch op { + case CONSTANT: + switch x := fn.Prog.Constants[arg].(type) { + case string: + comment = strconv.Quote(x) + case Bytes: + comment = "b" + strconv.Quote(string(x)) + default: + comment = fmt.Sprint(x) + } + case MAKEFUNC: + comment = fn.Prog.Functions[arg].Name + case SETLOCAL, LOCAL: + comment = fn.Locals[arg].Name + case SETGLOBAL, GLOBAL: + comment = fn.Prog.Globals[arg].Name + case ATTR, SETFIELD, PREDECLARED, UNIVERSAL: + comment = fn.Prog.Names[arg] + case FREE: + comment = fn.Freevars[arg].Name + case CALL, CALL_VAR, CALL_KW, CALL_VAR_KW: + comment = fmt.Sprintf("%d pos, %d named", arg>>8, arg&0xff) + default: + // JMP, CJMP, ITERJMP, MAKETUPLE, MAKELIST, LOAD, UNPACK: + // arg is just a number + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "\t%d\t%-10s\t%d", pc, op, arg) + if comment != "" { + fmt.Fprint(&buf, "\t; ", comment) + } + fmt.Fprintln(&buf) + os.Stderr.Write(buf.Bytes()) +} + +// newBlock returns a new block. +func (fcomp) newBlock() *block { + return &block{index: -1, initialstack: -1} +} + +// emit emits an instruction to the current block. +func (fcomp *fcomp) emit(op Opcode) { + if op >= OpcodeArgMin { + panic("missing arg: " + op.String()) + } + insn := insn{op: op, line: fcomp.pos.Line, col: fcomp.pos.Col} + fcomp.block.insns = append(fcomp.block.insns, insn) + fcomp.pos.Line = 0 + fcomp.pos.Col = 0 +} + +// emit1 emits an instruction with an immediate operand. +func (fcomp *fcomp) emit1(op Opcode, arg uint32) { + if op < OpcodeArgMin { + panic("unwanted arg: " + op.String()) + } + insn := insn{op: op, arg: arg, line: fcomp.pos.Line, col: fcomp.pos.Col} + fcomp.block.insns = append(fcomp.block.insns, insn) + fcomp.pos.Line = 0 + fcomp.pos.Col = 0 +} + +// jump emits a jump to the specified block. +// On return, the current block is unset. +func (fcomp *fcomp) jump(b *block) { + if b == fcomp.block { + panic("self-jump") // unreachable: Starlark has no arbitrary looping constructs + } + fcomp.block.jmp = b + fcomp.block = nil +} + +// condjump emits a conditional jump (CJMP or ITERJMP) +// to the specified true/false blocks. +// (For ITERJMP, the cases are jmp/f/ok and cjmp/t/exhausted.) +// On return, the current block is unset. +func (fcomp *fcomp) condjump(op Opcode, t, f *block) { + if !(op == CJMP || op == ITERJMP) { + panic("not a conditional jump: " + op.String()) + } + fcomp.emit1(op, 0) // fill in address later + fcomp.block.cjmp = t + fcomp.jump(f) +} + +// nameIndex returns the index of the specified name +// within the name pool, adding it if necessary. +func (pcomp *pcomp) nameIndex(name string) uint32 { + index, ok := pcomp.names[name] + if !ok { + index = uint32(len(pcomp.prog.Names)) + pcomp.names[name] = index + pcomp.prog.Names = append(pcomp.prog.Names, name) + } + return index +} + +// constantIndex returns the index of the specified constant +// within the constant pool, adding it if necessary. +func (pcomp *pcomp) constantIndex(v interface{}) uint32 { + index, ok := pcomp.constants[v] + if !ok { + index = uint32(len(pcomp.prog.Constants)) + pcomp.constants[v] = index + pcomp.prog.Constants = append(pcomp.prog.Constants, v) + } + return index +} + +// functionIndex returns the index of the specified function +// AST the nestedfun pool, adding it if necessary. +func (pcomp *pcomp) functionIndex(fn *Funcode) uint32 { + index, ok := pcomp.functions[fn] + if !ok { + index = uint32(len(pcomp.prog.Functions)) + pcomp.functions[fn] = index + pcomp.prog.Functions = append(pcomp.prog.Functions, fn) + } + return index +} + +// string emits code to push the specified string. +func (fcomp *fcomp) string(s string) { + fcomp.emit1(CONSTANT, fcomp.pcomp.constantIndex(s)) +} + +// setPos sets the current source position. +// It should be called prior to any operation that can fail dynamically. +// All positions are assumed to belong to the same file. +func (fcomp *fcomp) setPos(pos syntax.Position) { + fcomp.pos = pos +} + +// set emits code to store the top-of-stack value +// to the specified local, cell, or global variable. +func (fcomp *fcomp) set(id *syntax.Ident) { + bind := id.Binding.(*resolve.Binding) + switch bind.Scope { + case resolve.Local: + fcomp.emit1(SETLOCAL, uint32(bind.Index)) + case resolve.Cell: + fcomp.emit1(SETLOCALCELL, uint32(bind.Index)) + case resolve.Global: + fcomp.emit1(SETGLOBAL, uint32(bind.Index)) + default: + log.Panicf("%s: set(%s): not global/local/cell (%d)", id.NamePos, id.Name, bind.Scope) + } +} + +// lookup emits code to push the value of the specified variable. +func (fcomp *fcomp) lookup(id *syntax.Ident) { + bind := id.Binding.(*resolve.Binding) + if bind.Scope != resolve.Universal { // (universal lookup can't fail) + fcomp.setPos(id.NamePos) + } + switch bind.Scope { + case resolve.Local: + fcomp.emit1(LOCAL, uint32(bind.Index)) + case resolve.Free: + fcomp.emit1(FREECELL, uint32(bind.Index)) + case resolve.Cell: + fcomp.emit1(LOCALCELL, uint32(bind.Index)) + case resolve.Global: + fcomp.emit1(GLOBAL, uint32(bind.Index)) + case resolve.Predeclared: + fcomp.emit1(PREDECLARED, fcomp.pcomp.nameIndex(id.Name)) + case resolve.Universal: + fcomp.emit1(UNIVERSAL, fcomp.pcomp.nameIndex(id.Name)) + default: + log.Panicf("%s: compiler.lookup(%s): scope = %d", id.NamePos, id.Name, bind.Scope) + } +} + +func (fcomp *fcomp) stmts(stmts []syntax.Stmt) { + for _, stmt := range stmts { + fcomp.stmt(stmt) + } +} + +func (fcomp *fcomp) stmt(stmt syntax.Stmt) { + switch stmt := stmt.(type) { + case *syntax.ExprStmt: + if _, ok := stmt.X.(*syntax.Literal); ok { + // Opt: don't compile doc comments only to pop them. + return + } + fcomp.expr(stmt.X) + fcomp.emit(POP) + + case *syntax.BranchStmt: + // Resolver invariant: break/continue appear only within loops. + switch stmt.Token { + case syntax.PASS: + // no-op + case syntax.BREAK: + b := fcomp.loops[len(fcomp.loops)-1].break_ + fcomp.jump(b) + fcomp.block = fcomp.newBlock() // dead code + case syntax.CONTINUE: + b := fcomp.loops[len(fcomp.loops)-1].continue_ + fcomp.jump(b) + fcomp.block = fcomp.newBlock() // dead code + } + + case *syntax.IfStmt: + // Keep consistent with CondExpr. + t := fcomp.newBlock() + f := fcomp.newBlock() + done := fcomp.newBlock() + + fcomp.ifelse(stmt.Cond, t, f) + + fcomp.block = t + fcomp.stmts(stmt.True) + fcomp.jump(done) + + fcomp.block = f + fcomp.stmts(stmt.False) + fcomp.jump(done) + + fcomp.block = done + + case *syntax.AssignStmt: + switch stmt.Op { + case syntax.EQ: + // simple assignment: x = y + fcomp.expr(stmt.RHS) + fcomp.assign(stmt.OpPos, stmt.LHS) + + case syntax.PLUS_EQ, + syntax.MINUS_EQ, + syntax.STAR_EQ, + syntax.SLASH_EQ, + syntax.SLASHSLASH_EQ, + syntax.PERCENT_EQ, + syntax.AMP_EQ, + syntax.PIPE_EQ, + syntax.CIRCUMFLEX_EQ, + syntax.LTLT_EQ, + syntax.GTGT_EQ: + // augmented assignment: x += y + + var set func() + + // Evaluate "address" of x exactly once to avoid duplicate side-effects. + switch lhs := unparen(stmt.LHS).(type) { + case *syntax.Ident: + // x = ... + fcomp.lookup(lhs) + set = func() { + fcomp.set(lhs) + } + + case *syntax.IndexExpr: + // x[y] = ... + fcomp.expr(lhs.X) + fcomp.expr(lhs.Y) + fcomp.emit(DUP2) + fcomp.setPos(lhs.Lbrack) + fcomp.emit(INDEX) + set = func() { + fcomp.setPos(lhs.Lbrack) + fcomp.emit(SETINDEX) + } + + case *syntax.DotExpr: + // x.f = ... + fcomp.expr(lhs.X) + fcomp.emit(DUP) + name := fcomp.pcomp.nameIndex(lhs.Name.Name) + fcomp.setPos(lhs.Dot) + fcomp.emit1(ATTR, name) + set = func() { + fcomp.setPos(lhs.Dot) + fcomp.emit1(SETFIELD, name) + } + + default: + panic(lhs) + } + + fcomp.expr(stmt.RHS) + + // In-place x+=y and x|=y have special semantics: + // the resulting x aliases the original x. + switch stmt.Op { + case syntax.PLUS_EQ: + fcomp.setPos(stmt.OpPos) + fcomp.emit(INPLACE_ADD) + case syntax.PIPE_EQ: + fcomp.setPos(stmt.OpPos) + fcomp.emit(INPLACE_PIPE) + default: + fcomp.binop(stmt.OpPos, stmt.Op-syntax.PLUS_EQ+syntax.PLUS) + } + set() + } + + case *syntax.DefStmt: + fcomp.function(stmt.Function.(*resolve.Function)) + fcomp.set(stmt.Name) + + case *syntax.ForStmt: + // Keep consistent with ForClause. + head := fcomp.newBlock() + body := fcomp.newBlock() + tail := fcomp.newBlock() + + fcomp.expr(stmt.X) + fcomp.setPos(stmt.For) + fcomp.emit(ITERPUSH) + fcomp.jump(head) + + fcomp.block = head + fcomp.condjump(ITERJMP, tail, body) + + fcomp.block = body + fcomp.assign(stmt.For, stmt.Vars) + fcomp.loops = append(fcomp.loops, loop{break_: tail, continue_: head}) + fcomp.stmts(stmt.Body) + fcomp.loops = fcomp.loops[:len(fcomp.loops)-1] + fcomp.jump(head) + + fcomp.block = tail + fcomp.emit(ITERPOP) + + case *syntax.WhileStmt: + head := fcomp.newBlock() + body := fcomp.newBlock() + done := fcomp.newBlock() + + fcomp.jump(head) + fcomp.block = head + fcomp.ifelse(stmt.Cond, body, done) + + fcomp.block = body + fcomp.loops = append(fcomp.loops, loop{break_: done, continue_: head}) + fcomp.stmts(stmt.Body) + fcomp.loops = fcomp.loops[:len(fcomp.loops)-1] + fcomp.jump(head) + + fcomp.block = done + + case *syntax.ReturnStmt: + if stmt.Result != nil { + fcomp.expr(stmt.Result) + } else { + fcomp.emit(NONE) + } + fcomp.emit(RETURN) + fcomp.block = fcomp.newBlock() // dead code + + case *syntax.LoadStmt: + for i := range stmt.From { + fcomp.string(stmt.From[i].Name) + } + module := stmt.Module.Value.(string) + fcomp.pcomp.prog.Loads = append(fcomp.pcomp.prog.Loads, Binding{ + Name: module, + Pos: stmt.Module.TokenPos, + }) + fcomp.string(module) + fcomp.setPos(stmt.Load) + fcomp.emit1(LOAD, uint32(len(stmt.From))) + for i := range stmt.To { + fcomp.set(stmt.To[len(stmt.To)-1-i]) + } + + default: + start, _ := stmt.Span() + log.Panicf("%s: exec: unexpected statement %T", start, stmt) + } +} + +// assign implements lhs = rhs for arbitrary expressions lhs. +// RHS is on top of stack, consumed. +func (fcomp *fcomp) assign(pos syntax.Position, lhs syntax.Expr) { + switch lhs := lhs.(type) { + case *syntax.ParenExpr: + // (lhs) = rhs + fcomp.assign(pos, lhs.X) + + case *syntax.Ident: + // x = rhs + fcomp.set(lhs) + + case *syntax.TupleExpr: + // x, y = rhs + fcomp.assignSequence(pos, lhs.List) + + case *syntax.ListExpr: + // [x, y] = rhs + fcomp.assignSequence(pos, lhs.List) + + case *syntax.IndexExpr: + // x[y] = rhs + fcomp.expr(lhs.X) + fcomp.emit(EXCH) + fcomp.expr(lhs.Y) + fcomp.emit(EXCH) + fcomp.setPos(lhs.Lbrack) + fcomp.emit(SETINDEX) + + case *syntax.DotExpr: + // x.f = rhs + fcomp.expr(lhs.X) + fcomp.emit(EXCH) + fcomp.setPos(lhs.Dot) + fcomp.emit1(SETFIELD, fcomp.pcomp.nameIndex(lhs.Name.Name)) + + default: + panic(lhs) + } +} + +func (fcomp *fcomp) assignSequence(pos syntax.Position, lhs []syntax.Expr) { + fcomp.setPos(pos) + fcomp.emit1(UNPACK, uint32(len(lhs))) + for i := range lhs { + fcomp.assign(pos, lhs[i]) + } +} + +func (fcomp *fcomp) expr(e syntax.Expr) { + switch e := e.(type) { + case *syntax.ParenExpr: + fcomp.expr(e.X) + + case *syntax.Ident: + fcomp.lookup(e) + + case *syntax.Literal: + // e.Value is int64, float64, *bigInt, string + v := e.Value + if e.Token == syntax.BYTES { + v = Bytes(v.(string)) + } + fcomp.emit1(CONSTANT, fcomp.pcomp.constantIndex(v)) + + case *syntax.ListExpr: + for _, x := range e.List { + fcomp.expr(x) + } + fcomp.emit1(MAKELIST, uint32(len(e.List))) + + case *syntax.CondExpr: + // Keep consistent with IfStmt. + t := fcomp.newBlock() + f := fcomp.newBlock() + done := fcomp.newBlock() + + fcomp.ifelse(e.Cond, t, f) + + fcomp.block = t + fcomp.expr(e.True) + fcomp.jump(done) + + fcomp.block = f + fcomp.expr(e.False) + fcomp.jump(done) + + fcomp.block = done + + case *syntax.IndexExpr: + fcomp.expr(e.X) + fcomp.expr(e.Y) + fcomp.setPos(e.Lbrack) + fcomp.emit(INDEX) + + case *syntax.SliceExpr: + fcomp.setPos(e.Lbrack) + fcomp.expr(e.X) + if e.Lo != nil { + fcomp.expr(e.Lo) + } else { + fcomp.emit(NONE) + } + if e.Hi != nil { + fcomp.expr(e.Hi) + } else { + fcomp.emit(NONE) + } + if e.Step != nil { + fcomp.expr(e.Step) + } else { + fcomp.emit(NONE) + } + fcomp.emit(SLICE) + + case *syntax.Comprehension: + if e.Curly { + fcomp.emit(MAKEDICT) + } else { + fcomp.emit1(MAKELIST, 0) + } + fcomp.comprehension(e, 0) + + case *syntax.TupleExpr: + fcomp.tuple(e.List) + + case *syntax.DictExpr: + fcomp.emit(MAKEDICT) + for _, entry := range e.List { + entry := entry.(*syntax.DictEntry) + fcomp.emit(DUP) + fcomp.expr(entry.Key) + fcomp.expr(entry.Value) + fcomp.setPos(entry.Colon) + fcomp.emit(SETDICTUNIQ) + } + + case *syntax.UnaryExpr: + fcomp.expr(e.X) + fcomp.setPos(e.OpPos) + switch e.Op { + case syntax.MINUS: + fcomp.emit(UMINUS) + case syntax.PLUS: + fcomp.emit(UPLUS) + case syntax.NOT: + fcomp.emit(NOT) + case syntax.TILDE: + fcomp.emit(TILDE) + default: + log.Panicf("%s: unexpected unary op: %s", e.OpPos, e.Op) + } + + case *syntax.BinaryExpr: + switch e.Op { + // short-circuit operators + // TODO(adonovan): use ifelse to simplify conditions. + case syntax.OR: + // x or y => if x then x else y + done := fcomp.newBlock() + y := fcomp.newBlock() + + fcomp.expr(e.X) + fcomp.emit(DUP) + fcomp.condjump(CJMP, done, y) + + fcomp.block = y + fcomp.emit(POP) // discard X + fcomp.expr(e.Y) + fcomp.jump(done) + + fcomp.block = done + + case syntax.AND: + // x and y => if x then y else x + done := fcomp.newBlock() + y := fcomp.newBlock() + + fcomp.expr(e.X) + fcomp.emit(DUP) + fcomp.condjump(CJMP, y, done) + + fcomp.block = y + fcomp.emit(POP) // discard X + fcomp.expr(e.Y) + fcomp.jump(done) + + fcomp.block = done + + case syntax.PLUS: + fcomp.plus(e) + + default: + // all other strict binary operator (includes comparisons) + fcomp.expr(e.X) + fcomp.expr(e.Y) + fcomp.binop(e.OpPos, e.Op) + } + + case *syntax.DotExpr: + fcomp.expr(e.X) + fcomp.setPos(e.Dot) + fcomp.emit1(ATTR, fcomp.pcomp.nameIndex(e.Name.Name)) + + case *syntax.CallExpr: + fcomp.call(e) + + case *syntax.LambdaExpr: + fcomp.function(e.Function.(*resolve.Function)) + + default: + start, _ := e.Span() + log.Panicf("%s: unexpected expr %T", start, e) + } +} + +type summand struct { + x syntax.Expr + plusPos syntax.Position +} + +// plus emits optimized code for ((a+b)+...)+z that avoids naive +// quadratic behavior for strings, tuples, and lists, +// and folds together adjacent literals of the same type. +func (fcomp *fcomp) plus(e *syntax.BinaryExpr) { + // Gather all the right operands of the left tree of plusses. + // A tree (((a+b)+c)+d) becomes args=[a +b +c +d]. + args := make([]summand, 0, 2) // common case: 2 operands + for plus := e; ; { + args = append(args, summand{unparen(plus.Y), plus.OpPos}) + left := unparen(plus.X) + x, ok := left.(*syntax.BinaryExpr) + if !ok || x.Op != syntax.PLUS { + args = append(args, summand{x: left}) + break + } + plus = x + } + // Reverse args to syntactic order. + for i, n := 0, len(args)/2; i < n; i++ { + j := len(args) - 1 - i + args[i], args[j] = args[j], args[i] + } + + // Fold sums of adjacent literals of the same type: ""+"", []+[], ()+(). + out := args[:0] // compact in situ + for i := 0; i < len(args); { + j := i + 1 + if code := addable(args[i].x); code != 0 { + for j < len(args) && addable(args[j].x) == code { + j++ + } + if j > i+1 { + args[i].x = add(code, args[i:j]) + } + } + out = append(out, args[i]) + i = j + } + args = out + + // Emit code for an n-ary sum (n > 0). + fcomp.expr(args[0].x) + for _, summand := range args[1:] { + fcomp.expr(summand.x) + fcomp.setPos(summand.plusPos) + fcomp.emit(PLUS) + } + + // If len(args) > 2, use of an accumulator instead of a chain of + // PLUS operations may be more efficient. + // However, no gain was measured on a workload analogous to Bazel loading; + // TODO(adonovan): opt: re-evaluate on a Bazel analysis-like workload. + // + // We cannot use a single n-ary SUM operation + // a b c SUM<3> + // because we need to report a distinct error for each + // individual '+' operation, so three additional operations are + // needed: + // + // ACCSTART => create buffer and append to it + // ACCUM => append to buffer + // ACCEND => get contents of buffer + // + // For string, list, and tuple values, the interpreter can + // optimize these operations by using a mutable buffer. + // For all other types, ACCSTART and ACCEND would behave like + // the identity function and ACCUM behaves like PLUS. + // ACCUM must correctly support user-defined operations + // such as list+foo. + // + // fcomp.emit(ACCSTART) + // for _, summand := range args[1:] { + // fcomp.expr(summand.x) + // fcomp.setPos(summand.plusPos) + // fcomp.emit(ACCUM) + // } + // fcomp.emit(ACCEND) +} + +// addable reports whether e is a statically addable +// expression: a [s]tring, [b]ytes, [l]ist, or [t]uple. +func addable(e syntax.Expr) rune { + switch e := e.(type) { + case *syntax.Literal: + // TODO(adonovan): opt: support INT/FLOAT/BIGINT constant folding. + switch e.Token { + case syntax.STRING: + return 's' + case syntax.BYTES: + return 'b' + } + case *syntax.ListExpr: + return 'l' + case *syntax.TupleExpr: + return 't' + } + return 0 +} + +// add returns an expression denoting the sum of args, +// which are all addable values of the type indicated by code. +// The resulting syntax is degenerate, lacking position, etc. +func add(code rune, args []summand) syntax.Expr { + switch code { + case 's', 'b': + var buf strings.Builder + for _, arg := range args { + buf.WriteString(arg.x.(*syntax.Literal).Value.(string)) + } + tok := syntax.STRING + if code == 'b' { + tok = syntax.BYTES + } + return &syntax.Literal{Token: tok, Value: buf.String()} + case 'l': + var elems []syntax.Expr + for _, arg := range args { + elems = append(elems, arg.x.(*syntax.ListExpr).List...) + } + return &syntax.ListExpr{List: elems} + case 't': + var elems []syntax.Expr + for _, arg := range args { + elems = append(elems, arg.x.(*syntax.TupleExpr).List...) + } + return &syntax.TupleExpr{List: elems} + } + panic(code) +} + +func unparen(e syntax.Expr) syntax.Expr { + if p, ok := e.(*syntax.ParenExpr); ok { + return unparen(p.X) + } + return e +} + +func (fcomp *fcomp) binop(pos syntax.Position, op syntax.Token) { + // TODO(adonovan): simplify by assuming syntax and compiler constants align. + fcomp.setPos(pos) + switch op { + // arithmetic + case syntax.PLUS: + fcomp.emit(PLUS) + case syntax.MINUS: + fcomp.emit(MINUS) + case syntax.STAR: + fcomp.emit(STAR) + case syntax.SLASH: + fcomp.emit(SLASH) + case syntax.SLASHSLASH: + fcomp.emit(SLASHSLASH) + case syntax.PERCENT: + fcomp.emit(PERCENT) + case syntax.AMP: + fcomp.emit(AMP) + case syntax.PIPE: + fcomp.emit(PIPE) + case syntax.CIRCUMFLEX: + fcomp.emit(CIRCUMFLEX) + case syntax.LTLT: + fcomp.emit(LTLT) + case syntax.GTGT: + fcomp.emit(GTGT) + case syntax.IN: + fcomp.emit(IN) + case syntax.NOT_IN: + fcomp.emit(IN) + fcomp.emit(NOT) + + // comparisons + case syntax.EQL, + syntax.NEQ, + syntax.GT, + syntax.LT, + syntax.LE, + syntax.GE: + fcomp.emit(Opcode(op-syntax.EQL) + EQL) + + default: + log.Panicf("%s: unexpected binary op: %s", pos, op) + } +} + +func (fcomp *fcomp) call(call *syntax.CallExpr) { + // TODO(adonovan): opt: Use optimized path for calling methods + // of built-ins: x.f(...) to avoid materializing a closure. + // if dot, ok := call.Fcomp.(*syntax.DotExpr); ok { + // fcomp.expr(dot.X) + // fcomp.args(call) + // fcomp.emit1(CALL_ATTR, fcomp.name(dot.Name.Name)) + // return + // } + + // usual case + fcomp.expr(call.Fn) + op, arg := fcomp.args(call) + fcomp.setPos(call.Lparen) + fcomp.emit1(op, arg) +} + +// args emits code to push a tuple of positional arguments +// and a tuple of named arguments containing alternating keys and values. +// Either or both tuples may be empty (TODO(adonovan): optimize). +func (fcomp *fcomp) args(call *syntax.CallExpr) (op Opcode, arg uint32) { + var callmode int + // Compute the number of each kind of parameter. + var p, n int // number of positional, named arguments + var varargs, kwargs syntax.Expr + for _, arg := range call.Args { + if binary, ok := arg.(*syntax.BinaryExpr); ok && binary.Op == syntax.EQ { + + // named argument (name, value) + fcomp.string(binary.X.(*syntax.Ident).Name) + fcomp.expr(binary.Y) + n++ + continue + } + if unary, ok := arg.(*syntax.UnaryExpr); ok { + if unary.Op == syntax.STAR { + callmode |= 1 + varargs = unary.X + continue + } else if unary.Op == syntax.STARSTAR { + callmode |= 2 + kwargs = unary.X + continue + } + } + + // positional argument + fcomp.expr(arg) + p++ + } + + // Python2 and Python3 both permit named arguments + // to appear both before and after a *args argument: + // f(1, 2, x=3, *[4], y=5, **dict(z=6)) + // + // They also differ in their evaluation order: + // Python2: 1 2 3 5 4 6 (*args and **kwargs evaluated last) + // Python3: 1 2 4 3 5 6 (positional args evaluated before named args) + // Starlark-in-Java historically used a third order: + // Lexical: 1 2 3 4 5 6 (all args evaluated left-to-right) + // + // After discussion in github.com/bazelbuild/starlark#13, the + // spec now requires Starlark to statically reject named + // arguments after *args (e.g. y=5), and to use Python2-style + // evaluation order. This is both easy to implement and + // consistent with lexical order: + // + // f(1, 2, x=3, *[4], **dict(z=6)) # 1 2 3 4 6 + + // *args + if varargs != nil { + fcomp.expr(varargs) + } + + // **kwargs + if kwargs != nil { + fcomp.expr(kwargs) + } + + // TODO(adonovan): avoid this with a more flexible encoding. + if p >= 256 || n >= 256 { + // resolve already checked this; should be unreachable + panic("too many arguments in call") + } + + return CALL + Opcode(callmode), uint32(p<<8 | n) +} + +func (fcomp *fcomp) tuple(elems []syntax.Expr) { + for _, elem := range elems { + fcomp.expr(elem) + } + fcomp.emit1(MAKETUPLE, uint32(len(elems))) +} + +func (fcomp *fcomp) comprehension(comp *syntax.Comprehension, clauseIndex int) { + if clauseIndex == len(comp.Clauses) { + fcomp.emit(DUP) // accumulator + if comp.Curly { + // dict: {k:v for ...} + // Parser ensures that body is of form k:v. + // Python-style set comprehensions {body for vars in x} + // are not supported. + entry := comp.Body.(*syntax.DictEntry) + fcomp.expr(entry.Key) + fcomp.expr(entry.Value) + fcomp.setPos(entry.Colon) + fcomp.emit(SETDICT) + } else { + // list: [body for vars in x] + fcomp.expr(comp.Body) + fcomp.emit(APPEND) + } + return + } + + clause := comp.Clauses[clauseIndex] + switch clause := clause.(type) { + case *syntax.IfClause: + t := fcomp.newBlock() + done := fcomp.newBlock() + fcomp.ifelse(clause.Cond, t, done) + + fcomp.block = t + fcomp.comprehension(comp, clauseIndex+1) + fcomp.jump(done) + + fcomp.block = done + return + + case *syntax.ForClause: + // Keep consistent with ForStmt. + head := fcomp.newBlock() + body := fcomp.newBlock() + tail := fcomp.newBlock() + + fcomp.expr(clause.X) + fcomp.setPos(clause.For) + fcomp.emit(ITERPUSH) + fcomp.jump(head) + + fcomp.block = head + fcomp.condjump(ITERJMP, tail, body) + + fcomp.block = body + fcomp.assign(clause.For, clause.Vars) + fcomp.comprehension(comp, clauseIndex+1) + fcomp.jump(head) + + fcomp.block = tail + fcomp.emit(ITERPOP) + return + } + + start, _ := clause.Span() + log.Panicf("%s: unexpected comprehension clause %T", start, clause) +} + +func (fcomp *fcomp) function(f *resolve.Function) { + // Evaluation of the defaults may fail, so record the position. + fcomp.setPos(f.Pos) + + // To reduce allocation, we emit a combined tuple + // for the defaults and the freevars. + // The function knows where to split it at run time. + + // Generate tuple of parameter defaults. For: + // def f(p1, p2=dp2, p3=dp3, *, k1, k2=dk2, k3, **kwargs) + // the tuple is: + // (dp2, dp3, MANDATORY, dk2, MANDATORY). + ndefaults := 0 + seenStar := false + for _, param := range f.Params { + switch param := param.(type) { + case *syntax.BinaryExpr: + fcomp.expr(param.Y) + ndefaults++ + case *syntax.UnaryExpr: + seenStar = true // * or *args (also **kwargs) + case *syntax.Ident: + if seenStar { + fcomp.emit(MANDATORY) + ndefaults++ + } + } + } + + // Capture the cells of the function's + // free variables from the lexical environment. + for _, freevar := range f.FreeVars { + // Don't call fcomp.lookup because we want + // the cell itself, not its content. + switch freevar.Scope { + case resolve.Free: + fcomp.emit1(FREE, uint32(freevar.Index)) + case resolve.Cell: + fcomp.emit1(LOCAL, uint32(freevar.Index)) + } + } + + fcomp.emit1(MAKETUPLE, uint32(ndefaults+len(f.FreeVars))) + + funcode := fcomp.pcomp.function(f.Name, f.Pos, f.Body, f.Locals, f.FreeVars) + + if debug { + // TODO(adonovan): do compilations sequentially not as a tree, + // to make the log easier to read. + // Simplify by identifying Toplevel and functionIndex 0. + fmt.Fprintf(os.Stderr, "resuming %s @ %s\n", fcomp.fn.Name, fcomp.pos) + } + + // def f(a, *, b=1) has only 2 parameters. + numParams := len(f.Params) + if f.NumKwonlyParams > 0 && !f.HasVarargs { + numParams-- + } + + funcode.NumParams = numParams + funcode.NumKwonlyParams = f.NumKwonlyParams + funcode.HasVarargs = f.HasVarargs + funcode.HasKwargs = f.HasKwargs + fcomp.emit1(MAKEFUNC, fcomp.pcomp.functionIndex(funcode)) +} + +// ifelse emits a Boolean control flow decision. +// On return, the current block is unset. +func (fcomp *fcomp) ifelse(cond syntax.Expr, t, f *block) { + switch cond := cond.(type) { + case *syntax.UnaryExpr: + if cond.Op == syntax.NOT { + // if not x then goto t else goto f + // => + // if x then goto f else goto t + fcomp.ifelse(cond.X, f, t) + return + } + + case *syntax.BinaryExpr: + switch cond.Op { + case syntax.AND: + // if x and y then goto t else goto f + // => + // if x then ifelse(y, t, f) else goto f + fcomp.expr(cond.X) + y := fcomp.newBlock() + fcomp.condjump(CJMP, y, f) + + fcomp.block = y + fcomp.ifelse(cond.Y, t, f) + return + + case syntax.OR: + // if x or y then goto t else goto f + // => + // if x then goto t else ifelse(y, t, f) + fcomp.expr(cond.X) + y := fcomp.newBlock() + fcomp.condjump(CJMP, t, y) + + fcomp.block = y + fcomp.ifelse(cond.Y, t, f) + return + case syntax.NOT_IN: + // if x not in y then goto t else goto f + // => + // if x in y then goto f else goto t + copy := *cond + copy.Op = syntax.IN + fcomp.expr(©) + fcomp.condjump(CJMP, f, t) + return + } + } + + // general case + fcomp.expr(cond) + fcomp.condjump(CJMP, t, f) +} diff --git a/vendor/go.starlark.net/internal/compile/serial.go b/vendor/go.starlark.net/internal/compile/serial.go new file mode 100644 index 000000000..adadabfc2 --- /dev/null +++ b/vendor/go.starlark.net/internal/compile/serial.go @@ -0,0 +1,395 @@ +package compile + +// This file defines functions to read and write a compile.Program to a file. +// +// It is the client's responsibility to avoid version skew between the +// compiler used to produce a file and the interpreter that consumes it. +// The version number is provided as a constant. +// Incompatible protocol changes should also increment the version number. +// +// Encoding +// +// Program: +// "sky!" [4]byte # magic number +// str uint32le # offset of section +// version varint # must match Version +// filename string +// numloads varint +// loads []Ident +// numnames varint +// names []string +// numconsts varint +// consts []Constant +// numglobals varint +// globals []Ident +// toplevel Funcode +// numfuncs varint +// funcs []Funcode +// []byte # concatenation of all referenced strings +// EOF +// +// Funcode: +// id Ident +// code []byte +// pclinetablen varint +// pclinetab []varint +// numlocals varint +// locals []Ident +// numcells varint +// cells []int +// numfreevars varint +// freevar []Ident +// maxstack varint +// numparams varint +// numkwonlyparams varint +// hasvarargs varint (0 or 1) +// haskwargs varint (0 or 1) +// +// Ident: +// filename string +// line, col varint +// +// Constant: # type data +// type varint # 0=string string +// data ... # 1=bytes string +// # 2=int varint +// # 3=float varint (bits as uint64) +// # 4=bigint string (decimal ASCII text) +// +// The encoding starts with a four-byte magic number. +// The next four bytes are a little-endian uint32 +// that provides the offset of the string section +// at the end of the file, which contains the ordered +// concatenation of all strings referenced by the +// program. This design permits the decoder to read +// the first and second parts of the file into different +// memory allocations: the first (the encoded program) +// is transient, but the second (the strings) persists +// for the life of the Program. +// +// Within the encoded program, all strings are referred +// to by their length. As the encoder and decoder process +// the entire file sequentially, they are in lock step, +// so the start offset of each string is implicit. +// +// Program.Code is represented as a []byte slice to permit +// modification when breakpoints are set. All other strings +// are represented as strings. They all (unsafely) share the +// same backing byte slice. +// +// Aside from the str field, all integers are encoded as varints. + +import ( + "encoding/binary" + "fmt" + "math" + "math/big" + debugpkg "runtime/debug" + "unsafe" + + "go.starlark.net/syntax" +) + +const magic = "!sky" + +// Encode encodes a compiled Starlark program. +func (prog *Program) Encode() []byte { + var e encoder + e.p = append(e.p, magic...) + e.p = append(e.p, "????"...) // string data offset; filled in later + e.int(Version) + e.string(prog.Toplevel.Pos.Filename()) + e.bindings(prog.Loads) + e.int(len(prog.Names)) + for _, name := range prog.Names { + e.string(name) + } + e.int(len(prog.Constants)) + for _, c := range prog.Constants { + switch c := c.(type) { + case string: + e.int(0) + e.string(c) + case Bytes: + e.int(1) + e.string(string(c)) + case int64: + e.int(2) + e.int64(c) + case float64: + e.int(3) + e.uint64(math.Float64bits(c)) + case *big.Int: + e.int(4) + e.string(c.Text(10)) + } + } + e.bindings(prog.Globals) + e.function(prog.Toplevel) + e.int(len(prog.Functions)) + for _, fn := range prog.Functions { + e.function(fn) + } + + // Patch in the offset of the string data section. + binary.LittleEndian.PutUint32(e.p[4:8], uint32(len(e.p))) + + return append(e.p, e.s...) +} + +type encoder struct { + p []byte // encoded program + s []byte // strings + tmp [binary.MaxVarintLen64]byte +} + +func (e *encoder) int(x int) { + e.int64(int64(x)) +} + +func (e *encoder) int64(x int64) { + n := binary.PutVarint(e.tmp[:], x) + e.p = append(e.p, e.tmp[:n]...) +} + +func (e *encoder) uint64(x uint64) { + n := binary.PutUvarint(e.tmp[:], x) + e.p = append(e.p, e.tmp[:n]...) +} + +func (e *encoder) string(s string) { + e.int(len(s)) + e.s = append(e.s, s...) +} + +func (e *encoder) bytes(b []byte) { + e.int(len(b)) + e.s = append(e.s, b...) +} + +func (e *encoder) binding(bind Binding) { + e.string(bind.Name) + e.int(int(bind.Pos.Line)) + e.int(int(bind.Pos.Col)) +} + +func (e *encoder) bindings(binds []Binding) { + e.int(len(binds)) + for _, bind := range binds { + e.binding(bind) + } +} + +func (e *encoder) function(fn *Funcode) { + e.binding(Binding{fn.Name, fn.Pos}) + e.string(fn.Doc) + e.bytes(fn.Code) + e.int(len(fn.pclinetab)) + for _, x := range fn.pclinetab { + e.int64(int64(x)) + } + e.bindings(fn.Locals) + e.int(len(fn.Cells)) + for _, index := range fn.Cells { + e.int(index) + } + e.bindings(fn.Freevars) + e.int(fn.MaxStack) + e.int(fn.NumParams) + e.int(fn.NumKwonlyParams) + e.int(b2i(fn.HasVarargs)) + e.int(b2i(fn.HasKwargs)) +} + +func b2i(b bool) int { + if b { + return 1 + } else { + return 0 + } +} + +// DecodeProgram decodes a compiled Starlark program from data. +func DecodeProgram(data []byte) (_ *Program, err error) { + if len(data) < len(magic) { + return nil, fmt.Errorf("not a compiled module: no magic number") + } + if got := string(data[:4]); got != magic { + return nil, fmt.Errorf("not a compiled module: got magic number %q, want %q", + got, magic) + } + defer func() { + if x := recover(); x != nil { + debugpkg.PrintStack() + err = fmt.Errorf("internal error while decoding program: %v", x) + } + }() + + offset := binary.LittleEndian.Uint32(data[4:8]) + d := decoder{ + p: data[8:offset], + s: append([]byte(nil), data[offset:]...), // allocate a copy, which will persist + } + + if v := d.int(); v != Version { + return nil, fmt.Errorf("version mismatch: read %d, want %d", v, Version) + } + + filename := d.string() + d.filename = &filename + + loads := d.bindings() + + names := make([]string, d.int()) + for i := range names { + names[i] = d.string() + } + + // constants + constants := make([]interface{}, d.int()) + for i := range constants { + var c interface{} + switch d.int() { + case 0: + c = d.string() + case 1: + c = Bytes(d.string()) + case 2: + c = d.int64() + case 3: + c = math.Float64frombits(d.uint64()) + case 4: + c, _ = new(big.Int).SetString(d.string(), 10) + } + constants[i] = c + } + + globals := d.bindings() + toplevel := d.function() + funcs := make([]*Funcode, d.int()) + for i := range funcs { + funcs[i] = d.function() + } + + prog := &Program{ + Loads: loads, + Names: names, + Constants: constants, + Globals: globals, + Functions: funcs, + Toplevel: toplevel, + } + toplevel.Prog = prog + for _, f := range funcs { + f.Prog = prog + } + + if len(d.p)+len(d.s) > 0 { + return nil, fmt.Errorf("internal error: unconsumed data during decoding") + } + + return prog, nil +} + +type decoder struct { + p []byte // encoded program + s []byte // strings + filename *string // (indirect to avoid keeping decoder live) +} + +func (d *decoder) int() int { + return int(d.int64()) +} + +func (d *decoder) int64() int64 { + x, len := binary.Varint(d.p[:]) + d.p = d.p[len:] + return x +} + +func (d *decoder) uint64() uint64 { + x, len := binary.Uvarint(d.p[:]) + d.p = d.p[len:] + return x +} + +func (d *decoder) string() (s string) { + if slice := d.bytes(); len(slice) > 0 { + // Avoid a memory allocation for each string + // by unsafely aliasing slice. + type string struct { + data *byte + len int + } + ptr := (*string)(unsafe.Pointer(&s)) + ptr.data = &slice[0] + ptr.len = len(slice) + } + return s +} + +func (d *decoder) bytes() []byte { + len := d.int() + r := d.s[:len:len] + d.s = d.s[len:] + return r +} + +func (d *decoder) binding() Binding { + name := d.string() + line := int32(d.int()) + col := int32(d.int()) + return Binding{Name: name, Pos: syntax.MakePosition(d.filename, line, col)} +} + +func (d *decoder) bindings() []Binding { + bindings := make([]Binding, d.int()) + for i := range bindings { + bindings[i] = d.binding() + } + return bindings +} + +func (d *decoder) ints() []int { + ints := make([]int, d.int()) + for i := range ints { + ints[i] = d.int() + } + return ints +} + +func (d *decoder) bool() bool { return d.int() != 0 } + +func (d *decoder) function() *Funcode { + id := d.binding() + doc := d.string() + code := d.bytes() + pclinetab := make([]uint16, d.int()) + for i := range pclinetab { + pclinetab[i] = uint16(d.int()) + } + locals := d.bindings() + cells := d.ints() + freevars := d.bindings() + maxStack := d.int() + numParams := d.int() + numKwonlyParams := d.int() + hasVarargs := d.int() != 0 + hasKwargs := d.int() != 0 + return &Funcode{ + // Prog is filled in later. + Pos: id.Pos, + Name: id.Name, + Doc: doc, + Code: code, + pclinetab: pclinetab, + Locals: locals, + Cells: cells, + Freevars: freevars, + MaxStack: maxStack, + NumParams: numParams, + NumKwonlyParams: numKwonlyParams, + HasVarargs: hasVarargs, + HasKwargs: hasKwargs, + } +} diff --git a/vendor/go.starlark.net/internal/spell/spell.go b/vendor/go.starlark.net/internal/spell/spell.go new file mode 100644 index 000000000..7739fabaa --- /dev/null +++ b/vendor/go.starlark.net/internal/spell/spell.go @@ -0,0 +1,115 @@ +// Package spell file defines a simple spelling checker for use in attribute errors +// such as "no such field .foo; did you mean .food?". +package spell + +import ( + "strings" + "unicode" +) + +// Nearest returns the element of candidates +// nearest to x using the Levenshtein metric, +// or "" if none were promising. +func Nearest(x string, candidates []string) string { + // Ignore underscores and case when matching. + fold := func(s string) string { + return strings.Map(func(r rune) rune { + if r == '_' { + return -1 + } + return unicode.ToLower(r) + }, s) + } + + x = fold(x) + + var best string + bestD := (len(x) + 1) / 2 // allow up to 50% typos + for _, c := range candidates { + d := levenshtein(x, fold(c), bestD) + if d < bestD { + bestD = d + best = c + } + } + return best +} + +// levenshtein returns the non-negative Levenshtein edit distance +// between the byte strings x and y. +// +// If the computed distance exceeds max, +// the function may return early with an approximate value > max. +func levenshtein(x, y string, max int) int { + // This implementation is derived from one by Laurent Le Brun in + // Bazel that uses the single-row space efficiency trick + // described at bitbucket.org/clearer/iosifovich. + + // Let x be the shorter string. + if len(x) > len(y) { + x, y = y, x + } + + // Remove common prefix. + for i := 0; i < len(x); i++ { + if x[i] != y[i] { + x = x[i:] + y = y[i:] + break + } + } + if x == "" { + return len(y) + } + + if d := abs(len(x) - len(y)); d > max { + return d // excessive length divergence + } + + row := make([]int, len(y)+1) + for i := range row { + row[i] = i + } + + for i := 1; i <= len(x); i++ { + row[0] = i + best := i + prev := i - 1 + for j := 1; j <= len(y); j++ { + a := prev + b2i(x[i-1] != y[j-1]) // substitution + b := 1 + row[j-1] // deletion + c := 1 + row[j] // insertion + k := min(a, min(b, c)) + prev, row[j] = row[j], k + best = min(best, k) + } + if best > max { + return best + } + } + return row[len(y)] +} + +func b2i(b bool) int { + if b { + return 1 + } else { + return 0 + } +} + +func min(x, y int) int { + if x < y { + return x + } else { + return y + } +} + +func abs(x int) int { + if x >= 0 { + return x + } else { + return -x + } +} diff --git a/vendor/go.starlark.net/resolve/binding.go b/vendor/go.starlark.net/resolve/binding.go new file mode 100644 index 000000000..6b99f4b97 --- /dev/null +++ b/vendor/go.starlark.net/resolve/binding.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package resolve + +import "go.starlark.net/syntax" + +// This file defines resolver data types saved in the syntax tree. +// We cannot guarantee API stability for these types +// as they are closely tied to the implementation. + +// A Binding contains resolver information about an identifer. +// The resolver populates the Binding field of each syntax.Identifier. +// The Binding ties together all identifiers that denote the same variable. +type Binding struct { + Scope Scope + + // Index records the index into the enclosing + // - {DefStmt,File}.Locals, if Scope==Local + // - DefStmt.FreeVars, if Scope==Free + // - File.Globals, if Scope==Global. + // It is zero if Scope is Predeclared, Universal, or Undefined. + Index int + + First *syntax.Ident // first binding use (iff Scope==Local/Free/Global) +} + +// The Scope of Binding indicates what kind of scope it has. +type Scope uint8 + +const ( + Undefined Scope = iota // name is not defined + Local // name is local to its function or file + Cell // name is function-local but shared with a nested function + Free // name is cell of some enclosing function + Global // name is global to module + Predeclared // name is predeclared for this module (e.g. glob) + Universal // name is universal (e.g. len) +) + +var scopeNames = [...]string{ + Undefined: "undefined", + Local: "local", + Cell: "cell", + Free: "free", + Global: "global", + Predeclared: "predeclared", + Universal: "universal", +} + +func (scope Scope) String() string { return scopeNames[scope] } + +// A Module contains resolver information about a file. +// The resolver populates the Module field of each syntax.File. +type Module struct { + Locals []*Binding // the file's (comprehension-)local variables + Globals []*Binding // the file's global variables +} + +// A Function contains resolver information about a named or anonymous function. +// The resolver populates the Function field of each syntax.DefStmt and syntax.LambdaExpr. +type Function struct { + Pos syntax.Position // of DEF or LAMBDA + Name string // name of def, or "lambda" + Params []syntax.Expr // param = ident | ident=expr | * | *ident | **ident + Body []syntax.Stmt // contains synthetic 'return expr' for lambda + + HasVarargs bool // whether params includes *args (convenience) + HasKwargs bool // whether params includes **kwargs (convenience) + NumKwonlyParams int // number of keyword-only optional parameters + Locals []*Binding // this function's local/cell variables, parameters first + FreeVars []*Binding // enclosing cells to capture in closure +} diff --git a/vendor/go.starlark.net/resolve/resolve.go b/vendor/go.starlark.net/resolve/resolve.go new file mode 100644 index 000000000..09b9acdea --- /dev/null +++ b/vendor/go.starlark.net/resolve/resolve.go @@ -0,0 +1,969 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package resolve defines a name-resolution pass for Starlark abstract +// syntax trees. +// +// The resolver sets the Locals and FreeVars arrays of each DefStmt and +// the LocalIndex field of each syntax.Ident that refers to a local or +// free variable. It also sets the Locals array of a File for locals +// bound by top-level comprehensions and load statements. +// Identifiers for global variables do not get an index. +package resolve // import "go.starlark.net/resolve" + +// All references to names are statically resolved. Names may be +// predeclared, global, or local to a function or file. +// File-local variables include those bound by top-level comprehensions +// and by load statements. ("Top-level" means "outside of any function".) +// The resolver maps each global name to a small integer and each local +// name to a small integer; these integers enable a fast and compact +// representation of globals and locals in the evaluator. +// +// As an optimization, the resolver classifies each predeclared name as +// either universal (e.g. None, len) or per-module (e.g. glob in Bazel's +// build language), enabling the evaluator to share the representation +// of the universal environment across all modules. +// +// The lexical environment is a tree of blocks with the file block at +// its root. The file's child blocks may be of two kinds: functions +// and comprehensions, and these may have further children of either +// kind. +// +// Python-style resolution requires multiple passes because a name is +// determined to be local to a function only if the function contains a +// "binding" use of it; similarly, a name is determined to be global (as +// opposed to predeclared) if the module contains a top-level binding use. +// Unlike ordinary top-level assignments, the bindings created by load +// statements are local to the file block. +// A non-binding use may lexically precede the binding to which it is resolved. +// In the first pass, we inspect each function, recording in +// 'uses' each identifier and the environment block in which it occurs. +// If a use of a name is binding, such as a function parameter or +// assignment, we add the name to the block's bindings mapping and add a +// local variable to the enclosing function. +// +// As we finish resolving each function, we inspect all the uses within +// that function and discard ones that were found to be function-local. The +// remaining ones must be either free (local to some lexically enclosing +// function), or top-level (global, predeclared, or file-local), but we cannot tell +// which until we have finished inspecting the outermost enclosing +// function. At that point, we can distinguish local from top-level names +// (and this is when Python would compute free variables). +// +// However, Starlark additionally requires that all references to global +// names are satisfied by some declaration in the current module; +// Starlark permits a function to forward-reference a global or file-local +// that has not +// been declared yet so long as it is declared before the end of the +// module. So, instead of re-resolving the unresolved references after +// each top-level function, we defer this until the end of the module +// and ensure that all such references are satisfied by some definition. +// +// At the end of the module, we visit each of the nested function blocks +// in bottom-up order, doing a recursive lexical lookup for each +// unresolved name. If the name is found to be local to some enclosing +// function, we must create a DefStmt.FreeVar (capture) parameter for +// each intervening function. We enter these synthetic bindings into +// the bindings map so that we create at most one freevar per name. If +// the name was not local, we check that it was defined at module level. +// +// We resolve all uses of locals in the module (due to load statements +// and comprehensions) in a similar way and compute the file's set of +// local variables. +// +// Starlark enforces that all global names are assigned at most once on +// all control flow paths by forbidding if/else statements and loops at +// top level. A global may be used before it is defined, leading to a +// dynamic error. However, the AllowGlobalReassign flag (really: allow +// top-level reassign) makes the resolver allow multiple to a variable +// at top-level. It also allows if-, for-, and while-loops at top-level, +// which in turn may make the evaluator dynamically assign multiple +// values to a variable at top-level. (These two roles should be separated.) + +import ( + "fmt" + "log" + "sort" + "strings" + + "go.starlark.net/internal/spell" + "go.starlark.net/syntax" +) + +const debug = false +const doesnt = "this Starlark dialect does not " + +// global options +// These features are either not standard Starlark (yet), or deprecated +// features of the BUILD language, so we put them behind flags. +var ( + AllowSet = false // allow the 'set' built-in + AllowGlobalReassign = false // allow reassignment to top-level names; also, allow if/for/while at top-level + AllowRecursion = false // allow while statements and recursive functions + LoadBindsGlobally = false // load creates global not file-local bindings (deprecated) + + // obsolete flags for features that are now standard. No effect. + AllowNestedDef = true + AllowLambda = true + AllowFloat = true + AllowBitwise = true +) + +// File resolves the specified file and records information about the +// module in file.Module. +// +// The isPredeclared and isUniversal predicates report whether a name is +// a pre-declared identifier (visible in the current module) or a +// universal identifier (visible in every module). +// Clients should typically pass predeclared.Has for the first and +// starlark.Universe.Has for the second, where predeclared is the +// module's StringDict of predeclared names and starlark.Universe is the +// standard set of built-ins. +// The isUniverse predicate is supplied a parameter to avoid a cyclic +// dependency upon starlark.Universe, not because users should ever need +// to redefine it. +func File(file *syntax.File, isPredeclared, isUniversal func(name string) bool) error { + return REPLChunk(file, nil, isPredeclared, isUniversal) +} + +// REPLChunk is a generalization of the File function that supports a +// non-empty initial global block, as occurs in a REPL. +func REPLChunk(file *syntax.File, isGlobal, isPredeclared, isUniversal func(name string) bool) error { + r := newResolver(isGlobal, isPredeclared, isUniversal) + r.stmts(file.Stmts) + + r.env.resolveLocalUses() + + // At the end of the module, resolve all non-local variable references, + // computing closures. + // Function bodies may contain forward references to later global declarations. + r.resolveNonLocalUses(r.env) + + file.Module = &Module{ + Locals: r.moduleLocals, + Globals: r.moduleGlobals, + } + + if len(r.errors) > 0 { + return r.errors + } + return nil +} + +// Expr resolves the specified expression. +// It returns the local variables bound within the expression. +// +// The isPredeclared and isUniversal predicates behave as for the File function. +func Expr(expr syntax.Expr, isPredeclared, isUniversal func(name string) bool) ([]*Binding, error) { + r := newResolver(nil, isPredeclared, isUniversal) + r.expr(expr) + r.env.resolveLocalUses() + r.resolveNonLocalUses(r.env) // globals & universals + if len(r.errors) > 0 { + return nil, r.errors + } + return r.moduleLocals, nil +} + +// An ErrorList is a non-empty list of resolver error messages. +type ErrorList []Error // len > 0 + +func (e ErrorList) Error() string { return e[0].Error() } + +// An Error describes the nature and position of a resolver error. +type Error struct { + Pos syntax.Position + Msg string +} + +func (e Error) Error() string { return e.Pos.String() + ": " + e.Msg } + +func newResolver(isGlobal, isPredeclared, isUniversal func(name string) bool) *resolver { + file := new(block) + return &resolver{ + file: file, + env: file, + isGlobal: isGlobal, + isPredeclared: isPredeclared, + isUniversal: isUniversal, + globals: make(map[string]*Binding), + predeclared: make(map[string]*Binding), + } +} + +type resolver struct { + // env is the current local environment: + // a linked list of blocks, innermost first. + // The tail of the list is the file block. + env *block + file *block // file block (contains load bindings) + + // moduleLocals contains the local variables of the module + // (due to load statements and comprehensions outside any function). + // moduleGlobals contains the global variables of the module. + moduleLocals []*Binding + moduleGlobals []*Binding + + // globals maps each global name in the module to its binding. + // predeclared does the same for predeclared and universal names. + globals map[string]*Binding + predeclared map[string]*Binding + + // These predicates report whether a name is + // pre-declared, either in this module or universally, + // or already declared in the module globals (as in a REPL). + // isGlobal may be nil. + isGlobal, isPredeclared, isUniversal func(name string) bool + + loops int // number of enclosing for/while loops + ifstmts int // number of enclosing if statements loops + + errors ErrorList +} + +// container returns the innermost enclosing "container" block: +// a function (function != nil) or file (function == nil). +// Container blocks accumulate local variable bindings. +func (r *resolver) container() *block { + for b := r.env; ; b = b.parent { + if b.function != nil || b == r.file { + return b + } + } +} + +func (r *resolver) push(b *block) { + r.env.children = append(r.env.children, b) + b.parent = r.env + r.env = b +} + +func (r *resolver) pop() { r.env = r.env.parent } + +type block struct { + parent *block // nil for file block + + // In the file (root) block, both these fields are nil. + function *Function // only for function blocks + comp *syntax.Comprehension // only for comprehension blocks + + // bindings maps a name to its binding. + // A local binding has an index into its innermost enclosing container's locals array. + // A free binding has an index into its innermost enclosing function's freevars array. + bindings map[string]*Binding + + // children records the child blocks of the current one. + children []*block + + // uses records all identifiers seen in this container (function or file), + // and a reference to the environment in which they appear. + // As we leave each container block, we resolve them, + // so that only free and global ones remain. + // At the end of each top-level function we compute closures. + uses []use +} + +func (b *block) bind(name string, bind *Binding) { + if b.bindings == nil { + b.bindings = make(map[string]*Binding) + } + b.bindings[name] = bind +} + +func (b *block) String() string { + if b.function != nil { + return "function block at " + fmt.Sprint(b.function.Pos) + } + if b.comp != nil { + return "comprehension block at " + fmt.Sprint(b.comp.Span()) + } + return "file block" +} + +func (r *resolver) errorf(posn syntax.Position, format string, args ...interface{}) { + r.errors = append(r.errors, Error{posn, fmt.Sprintf(format, args...)}) +} + +// A use records an identifier and the environment in which it appears. +type use struct { + id *syntax.Ident + env *block +} + +// bind creates a binding for id: a global (not file-local) +// binding at top-level, a local binding otherwise. +// At top-level, it reports an error if a global or file-local +// binding already exists, unless AllowGlobalReassign. +// It sets id.Binding to the binding (whether old or new), +// and returns whether a binding already existed. +func (r *resolver) bind(id *syntax.Ident) bool { + // Binding outside any local (comprehension/function) block? + if r.env == r.file { + bind, ok := r.file.bindings[id.Name] + if !ok { + bind, ok = r.globals[id.Name] + if !ok { + // first global binding of this name + bind = &Binding{ + First: id, + Scope: Global, + Index: len(r.moduleGlobals), + } + r.globals[id.Name] = bind + r.moduleGlobals = append(r.moduleGlobals, bind) + } + } + if ok && !AllowGlobalReassign { + r.errorf(id.NamePos, "cannot reassign %s %s declared at %s", + bind.Scope, id.Name, bind.First.NamePos) + } + id.Binding = bind + return ok + } + + return r.bindLocal(id) +} + +func (r *resolver) bindLocal(id *syntax.Ident) bool { + // Mark this name as local to current block. + // Assign it a new local (positive) index in the current container. + _, ok := r.env.bindings[id.Name] + if !ok { + var locals *[]*Binding + if fn := r.container().function; fn != nil { + locals = &fn.Locals + } else { + locals = &r.moduleLocals + } + bind := &Binding{ + First: id, + Scope: Local, + Index: len(*locals), + } + r.env.bind(id.Name, bind) + *locals = append(*locals, bind) + } + + r.use(id) + return ok +} + +func (r *resolver) use(id *syntax.Ident) { + use := use{id, r.env} + + // The spec says that if there is a global binding of a name + // then all references to that name in that block refer to the + // global, even if the use precedes the def---just as for locals. + // For example, in this code, + // + // print(len); len=1; print(len) + // + // both occurrences of len refer to the len=1 binding, which + // completely shadows the predeclared len function. + // + // The rationale for these semantics, which differ from Python, + // is that the static meaning of len (a reference to a global) + // does not change depending on where it appears in the file. + // Of course, its dynamic meaning does change, from an error + // into a valid reference, so it's not clear these semantics + // have any practical advantage. + // + // In any case, the Bazel implementation lags behind the spec + // and follows Python behavior, so the first use of len refers + // to the predeclared function. This typically used in a BUILD + // file that redefines a predeclared name half way through, + // for example: + // + // proto_library(...) # built-in rule + // load("myproto.bzl", "proto_library") + // proto_library(...) # user-defined rule + // + // We will piggyback support for the legacy semantics on the + // AllowGlobalReassign flag, which is loosely related and also + // required for Bazel. + if AllowGlobalReassign && r.env == r.file { + r.useToplevel(use) + return + } + + b := r.container() + b.uses = append(b.uses, use) +} + +// useToplevel resolves use.id as a reference to a name visible at top-level. +// The use.env field captures the original environment for error reporting. +func (r *resolver) useToplevel(use use) (bind *Binding) { + id := use.id + + if prev, ok := r.file.bindings[id.Name]; ok { + // use of load-defined name in file block + bind = prev + } else if prev, ok := r.globals[id.Name]; ok { + // use of global declared by module + bind = prev + } else if r.isGlobal != nil && r.isGlobal(id.Name) { + // use of global defined in a previous REPL chunk + bind = &Binding{ + First: id, // wrong: this is not even a binding use + Scope: Global, + Index: len(r.moduleGlobals), + } + r.globals[id.Name] = bind + r.moduleGlobals = append(r.moduleGlobals, bind) + } else if prev, ok := r.predeclared[id.Name]; ok { + // repeated use of predeclared or universal + bind = prev + } else if r.isPredeclared(id.Name) { + // use of pre-declared name + bind = &Binding{Scope: Predeclared} + r.predeclared[id.Name] = bind // save it + } else if r.isUniversal(id.Name) { + // use of universal name + if !AllowSet && id.Name == "set" { + r.errorf(id.NamePos, doesnt+"support sets") + } + bind = &Binding{Scope: Universal} + r.predeclared[id.Name] = bind // save it + } else { + bind = &Binding{Scope: Undefined} + var hint string + if n := r.spellcheck(use); n != "" { + hint = fmt.Sprintf(" (did you mean %s?)", n) + } + r.errorf(id.NamePos, "undefined: %s%s", id.Name, hint) + } + id.Binding = bind + return bind +} + +// spellcheck returns the most likely misspelling of +// the name use.id in the environment use.env. +func (r *resolver) spellcheck(use use) string { + var names []string + + // locals + for b := use.env; b != nil; b = b.parent { + for name := range b.bindings { + names = append(names, name) + } + } + + // globals + // + // We have no way to enumerate the sets whose membership + // tests are isPredeclared, isUniverse, and isGlobal, + // which includes prior names in the REPL session. + for _, bind := range r.moduleGlobals { + names = append(names, bind.First.Name) + } + + sort.Strings(names) + return spell.Nearest(use.id.Name, names) +} + +// resolveLocalUses is called when leaving a container (function/module) +// block. It resolves all uses of locals/cells within that block. +func (b *block) resolveLocalUses() { + unresolved := b.uses[:0] + for _, use := range b.uses { + if bind := lookupLocal(use); bind != nil && (bind.Scope == Local || bind.Scope == Cell) { + use.id.Binding = bind + } else { + unresolved = append(unresolved, use) + } + } + b.uses = unresolved +} + +func (r *resolver) stmts(stmts []syntax.Stmt) { + for _, stmt := range stmts { + r.stmt(stmt) + } +} + +func (r *resolver) stmt(stmt syntax.Stmt) { + switch stmt := stmt.(type) { + case *syntax.ExprStmt: + r.expr(stmt.X) + + case *syntax.BranchStmt: + if r.loops == 0 && (stmt.Token == syntax.BREAK || stmt.Token == syntax.CONTINUE) { + r.errorf(stmt.TokenPos, "%s not in a loop", stmt.Token) + } + + case *syntax.IfStmt: + if !AllowGlobalReassign && r.container().function == nil { + r.errorf(stmt.If, "if statement not within a function") + } + r.expr(stmt.Cond) + r.ifstmts++ + r.stmts(stmt.True) + r.stmts(stmt.False) + r.ifstmts-- + + case *syntax.AssignStmt: + r.expr(stmt.RHS) + isAugmented := stmt.Op != syntax.EQ + r.assign(stmt.LHS, isAugmented) + + case *syntax.DefStmt: + r.bind(stmt.Name) + fn := &Function{ + Name: stmt.Name.Name, + Pos: stmt.Def, + Params: stmt.Params, + Body: stmt.Body, + } + stmt.Function = fn + r.function(fn, stmt.Def) + + case *syntax.ForStmt: + if !AllowGlobalReassign && r.container().function == nil { + r.errorf(stmt.For, "for loop not within a function") + } + r.expr(stmt.X) + const isAugmented = false + r.assign(stmt.Vars, isAugmented) + r.loops++ + r.stmts(stmt.Body) + r.loops-- + + case *syntax.WhileStmt: + if !AllowRecursion { + r.errorf(stmt.While, doesnt+"support while loops") + } + if !AllowGlobalReassign && r.container().function == nil { + r.errorf(stmt.While, "while loop not within a function") + } + r.expr(stmt.Cond) + r.loops++ + r.stmts(stmt.Body) + r.loops-- + + case *syntax.ReturnStmt: + if r.container().function == nil { + r.errorf(stmt.Return, "return statement not within a function") + } + if stmt.Result != nil { + r.expr(stmt.Result) + } + + case *syntax.LoadStmt: + // A load statement may not be nested in any other statement. + if r.container().function != nil { + r.errorf(stmt.Load, "load statement within a function") + } else if r.loops > 0 { + r.errorf(stmt.Load, "load statement within a loop") + } else if r.ifstmts > 0 { + r.errorf(stmt.Load, "load statement within a conditional") + } + + for i, from := range stmt.From { + if from.Name == "" { + r.errorf(from.NamePos, "load: empty identifier") + continue + } + if from.Name[0] == '_' { + r.errorf(from.NamePos, "load: names with leading underscores are not exported: %s", from.Name) + } + + id := stmt.To[i] + if LoadBindsGlobally { + r.bind(id) + } else if r.bindLocal(id) && !AllowGlobalReassign { + // "Global" in AllowGlobalReassign is a misnomer for "toplevel". + // Sadly we can't report the previous declaration + // as id.Binding may not be set yet. + r.errorf(id.NamePos, "cannot reassign top-level %s", id.Name) + } + } + + default: + log.Panicf("unexpected stmt %T", stmt) + } +} + +func (r *resolver) assign(lhs syntax.Expr, isAugmented bool) { + switch lhs := lhs.(type) { + case *syntax.Ident: + // x = ... + r.bind(lhs) + + case *syntax.IndexExpr: + // x[i] = ... + r.expr(lhs.X) + r.expr(lhs.Y) + + case *syntax.DotExpr: + // x.f = ... + r.expr(lhs.X) + + case *syntax.TupleExpr: + // (x, y) = ... + if isAugmented { + r.errorf(syntax.Start(lhs), "can't use tuple expression in augmented assignment") + } + for _, elem := range lhs.List { + r.assign(elem, isAugmented) + } + + case *syntax.ListExpr: + // [x, y, z] = ... + if isAugmented { + r.errorf(syntax.Start(lhs), "can't use list expression in augmented assignment") + } + for _, elem := range lhs.List { + r.assign(elem, isAugmented) + } + + case *syntax.ParenExpr: + r.assign(lhs.X, isAugmented) + + default: + name := strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", lhs), "*syntax.")) + r.errorf(syntax.Start(lhs), "can't assign to %s", name) + } +} + +func (r *resolver) expr(e syntax.Expr) { + switch e := e.(type) { + case *syntax.Ident: + r.use(e) + + case *syntax.Literal: + + case *syntax.ListExpr: + for _, x := range e.List { + r.expr(x) + } + + case *syntax.CondExpr: + r.expr(e.Cond) + r.expr(e.True) + r.expr(e.False) + + case *syntax.IndexExpr: + r.expr(e.X) + r.expr(e.Y) + + case *syntax.DictEntry: + r.expr(e.Key) + r.expr(e.Value) + + case *syntax.SliceExpr: + r.expr(e.X) + if e.Lo != nil { + r.expr(e.Lo) + } + if e.Hi != nil { + r.expr(e.Hi) + } + if e.Step != nil { + r.expr(e.Step) + } + + case *syntax.Comprehension: + // The 'in' operand of the first clause (always a ForClause) + // is resolved in the outer block; consider: [x for x in x]. + clause := e.Clauses[0].(*syntax.ForClause) + r.expr(clause.X) + + // A list/dict comprehension defines a new lexical block. + // Locals defined within the block will be allotted + // distinct slots in the locals array of the innermost + // enclosing container (function/module) block. + r.push(&block{comp: e}) + + const isAugmented = false + r.assign(clause.Vars, isAugmented) + + for _, clause := range e.Clauses[1:] { + switch clause := clause.(type) { + case *syntax.IfClause: + r.expr(clause.Cond) + case *syntax.ForClause: + r.assign(clause.Vars, isAugmented) + r.expr(clause.X) + } + } + r.expr(e.Body) // body may be *DictEntry + r.pop() + + case *syntax.TupleExpr: + for _, x := range e.List { + r.expr(x) + } + + case *syntax.DictExpr: + for _, entry := range e.List { + entry := entry.(*syntax.DictEntry) + r.expr(entry.Key) + r.expr(entry.Value) + } + + case *syntax.UnaryExpr: + r.expr(e.X) + + case *syntax.BinaryExpr: + r.expr(e.X) + r.expr(e.Y) + + case *syntax.DotExpr: + r.expr(e.X) + // ignore e.Name + + case *syntax.CallExpr: + r.expr(e.Fn) + var seenVarargs, seenKwargs bool + var seenName map[string]bool + var n, p int + for _, arg := range e.Args { + pos, _ := arg.Span() + if unop, ok := arg.(*syntax.UnaryExpr); ok && unop.Op == syntax.STARSTAR { + // **kwargs + if seenKwargs { + r.errorf(pos, "multiple **kwargs not allowed") + } + seenKwargs = true + r.expr(arg) + } else if ok && unop.Op == syntax.STAR { + // *args + if seenKwargs { + r.errorf(pos, "*args may not follow **kwargs") + } else if seenVarargs { + r.errorf(pos, "multiple *args not allowed") + } + seenVarargs = true + r.expr(arg) + } else if binop, ok := arg.(*syntax.BinaryExpr); ok && binop.Op == syntax.EQ { + // k=v + n++ + if seenKwargs { + r.errorf(pos, "keyword argument may not follow **kwargs") + } else if seenVarargs { + r.errorf(pos, "keyword argument may not follow *args") + } + x := binop.X.(*syntax.Ident) + if seenName[x.Name] { + r.errorf(x.NamePos, "keyword argument %q is repeated", x.Name) + } else { + if seenName == nil { + seenName = make(map[string]bool) + } + seenName[x.Name] = true + } + r.expr(binop.Y) + } else { + // positional argument + p++ + if seenVarargs { + r.errorf(pos, "positional argument may not follow *args") + } else if seenKwargs { + r.errorf(pos, "positional argument may not follow **kwargs") + } else if len(seenName) > 0 { + r.errorf(pos, "positional argument may not follow named") + } + r.expr(arg) + } + } + + // Fail gracefully if compiler-imposed limit is exceeded. + if p >= 256 { + pos, _ := e.Span() + r.errorf(pos, "%v positional arguments in call, limit is 255", p) + } + if n >= 256 { + pos, _ := e.Span() + r.errorf(pos, "%v keyword arguments in call, limit is 255", n) + } + + case *syntax.LambdaExpr: + fn := &Function{ + Name: "lambda", + Pos: e.Lambda, + Params: e.Params, + Body: []syntax.Stmt{&syntax.ReturnStmt{Result: e.Body}}, + } + e.Function = fn + r.function(fn, e.Lambda) + + case *syntax.ParenExpr: + r.expr(e.X) + + default: + log.Panicf("unexpected expr %T", e) + } +} + +func (r *resolver) function(function *Function, pos syntax.Position) { + // Resolve defaults in enclosing environment. + for _, param := range function.Params { + if binary, ok := param.(*syntax.BinaryExpr); ok { + r.expr(binary.Y) + } + } + + // Enter function block. + b := &block{function: function} + r.push(b) + + var seenOptional bool + var star *syntax.UnaryExpr // * or *args param + var starStar *syntax.Ident // **kwargs ident + var numKwonlyParams int + for _, param := range function.Params { + switch param := param.(type) { + case *syntax.Ident: + // e.g. x + if starStar != nil { + r.errorf(param.NamePos, "required parameter may not follow **%s", starStar.Name) + } else if star != nil { + numKwonlyParams++ + } else if seenOptional { + r.errorf(param.NamePos, "required parameter may not follow optional") + } + if r.bind(param) { + r.errorf(param.NamePos, "duplicate parameter: %s", param.Name) + } + + case *syntax.BinaryExpr: + // e.g. y=dflt + if starStar != nil { + r.errorf(param.OpPos, "optional parameter may not follow **%s", starStar.Name) + } else if star != nil { + numKwonlyParams++ + } + if id := param.X.(*syntax.Ident); r.bind(id) { + r.errorf(param.OpPos, "duplicate parameter: %s", id.Name) + } + seenOptional = true + + case *syntax.UnaryExpr: + // * or *args or **kwargs + if param.Op == syntax.STAR { + if starStar != nil { + r.errorf(param.OpPos, "* parameter may not follow **%s", starStar.Name) + } else if star != nil { + r.errorf(param.OpPos, "multiple * parameters not allowed") + } else { + star = param + } + } else { + if starStar != nil { + r.errorf(param.OpPos, "multiple ** parameters not allowed") + } + starStar = param.X.(*syntax.Ident) + } + } + } + + // Bind the *args and **kwargs parameters at the end, + // so that regular parameters a/b/c are contiguous and + // there is no hole for the "*": + // def f(a, b, *args, c=0, **kwargs) + // def f(a, b, *, c=0, **kwargs) + if star != nil { + if id, _ := star.X.(*syntax.Ident); id != nil { + // *args + if r.bind(id) { + r.errorf(id.NamePos, "duplicate parameter: %s", id.Name) + } + function.HasVarargs = true + } else if numKwonlyParams == 0 { + r.errorf(star.OpPos, "bare * must be followed by keyword-only parameters") + } + } + if starStar != nil { + if r.bind(starStar) { + r.errorf(starStar.NamePos, "duplicate parameter: %s", starStar.Name) + } + function.HasKwargs = true + } + + function.NumKwonlyParams = numKwonlyParams + r.stmts(function.Body) + + // Resolve all uses of this function's local vars, + // and keep just the remaining uses of free/global vars. + b.resolveLocalUses() + + // Leave function block. + r.pop() + + // References within the function body to globals are not + // resolved until the end of the module. +} + +func (r *resolver) resolveNonLocalUses(b *block) { + // First resolve inner blocks. + for _, child := range b.children { + r.resolveNonLocalUses(child) + } + for _, use := range b.uses { + use.id.Binding = r.lookupLexical(use, use.env) + } +} + +// lookupLocal looks up an identifier within its immediately enclosing function. +func lookupLocal(use use) *Binding { + for env := use.env; env != nil; env = env.parent { + if bind, ok := env.bindings[use.id.Name]; ok { + if bind.Scope == Free { + // shouldn't exist till later + log.Panicf("%s: internal error: %s, %v", use.id.NamePos, use.id.Name, bind) + } + return bind // found + } + if env.function != nil { + break + } + } + return nil // not found in this function +} + +// lookupLexical looks up an identifier use.id within its lexically enclosing environment. +// The use.env field captures the original environment for error reporting. +func (r *resolver) lookupLexical(use use, env *block) (bind *Binding) { + if debug { + fmt.Printf("lookupLexical %s in %s = ...\n", use.id.Name, env) + defer func() { fmt.Printf("= %v\n", bind) }() + } + + // Is this the file block? + if env == r.file { + return r.useToplevel(use) // file-local, global, predeclared, or not found + } + + // Defined in this block? + bind, ok := env.bindings[use.id.Name] + if !ok { + // Defined in parent block? + bind = r.lookupLexical(use, env.parent) + if env.function != nil && (bind.Scope == Local || bind.Scope == Free || bind.Scope == Cell) { + // Found in parent block, which belongs to enclosing function. + // Add the parent's binding to the function's freevars, + // and add a new 'free' binding to the inner function's block, + // and turn the parent's local into cell. + if bind.Scope == Local { + bind.Scope = Cell + } + index := len(env.function.FreeVars) + env.function.FreeVars = append(env.function.FreeVars, bind) + bind = &Binding{ + First: bind.First, + Scope: Free, + Index: index, + } + if debug { + fmt.Printf("creating freevar %v in function at %s: %s\n", + len(env.function.FreeVars), env.function.Pos, use.id.Name) + } + } + + // Memoize, to avoid duplicate free vars + // and redundant global (failing) lookups. + env.bind(use.id.Name, bind) + } + return bind +} diff --git a/vendor/go.starlark.net/starlark/debug.go b/vendor/go.starlark.net/starlark/debug.go new file mode 100644 index 000000000..22a21240f --- /dev/null +++ b/vendor/go.starlark.net/starlark/debug.go @@ -0,0 +1,42 @@ +package starlark + +import "go.starlark.net/syntax" + +// This file defines an experimental API for the debugging tools. +// Some of these declarations expose details of internal packages. +// (The debugger makes liberal use of exported fields of unexported types.) +// Breaking changes may occur without notice. + +// Local returns the value of the i'th local variable. +// It may be nil if not yet assigned. +// +// Local may be called only for frames whose Callable is a *Function (a +// function defined by Starlark source code), and only while the frame +// is active; it will panic otherwise. +// +// This function is provided only for debugging tools. +// +// THIS API IS EXPERIMENTAL AND MAY CHANGE WITHOUT NOTICE. +func (fr *frame) Local(i int) Value { return fr.locals[i] } + +// DebugFrame is the debugger API for a frame of the interpreter's call stack. +// +// Most applications have no need for this API; use CallFrame instead. +// +// Clients must not retain a DebugFrame nor call any of its methods once +// the current built-in call has returned or execution has resumed +// after a breakpoint as this may have unpredictable effects, including +// but not limited to retention of object that would otherwise be garbage. +type DebugFrame interface { + Callable() Callable // returns the frame's function + Local(i int) Value // returns the value of the (Starlark) frame's ith local variable + Position() syntax.Position // returns the current position of execution in this frame +} + +// DebugFrame returns the debugger interface for +// the specified frame of the interpreter's call stack. +// Frame numbering is as for Thread.CallFrame. +// +// This function is intended for use in debugging tools. +// Most applications should have no need for it; use CallFrame instead. +func (thread *Thread) DebugFrame(depth int) DebugFrame { return thread.frameAt(depth) } diff --git a/vendor/go.starlark.net/starlark/empty.s b/vendor/go.starlark.net/starlark/empty.s new file mode 100644 index 000000000..3b8216999 --- /dev/null +++ b/vendor/go.starlark.net/starlark/empty.s @@ -0,0 +1,3 @@ +// The presence of this file allows the package to use the +// "go:linkname" hack to call non-exported functions in the +// Go runtime, such as hardware-accelerated string hashing. diff --git a/vendor/go.starlark.net/starlark/eval.go b/vendor/go.starlark.net/starlark/eval.go new file mode 100644 index 000000000..949cb934d --- /dev/null +++ b/vendor/go.starlark.net/starlark/eval.go @@ -0,0 +1,1648 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package starlark + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "math/big" + "sort" + "strings" + "sync/atomic" + "time" + "unicode" + "unicode/utf8" + "unsafe" + + "go.starlark.net/internal/compile" + "go.starlark.net/internal/spell" + "go.starlark.net/resolve" + "go.starlark.net/syntax" +) + +// A Thread contains the state of a Starlark thread, +// such as its call stack and thread-local storage. +// The Thread is threaded throughout the evaluator. +type Thread struct { + // Name is an optional name that describes the thread, for debugging. + Name string + + // stack is the stack of (internal) call frames. + stack []*frame + + // Print is the client-supplied implementation of the Starlark + // 'print' function. If nil, fmt.Fprintln(os.Stderr, msg) is + // used instead. + Print func(thread *Thread, msg string) + + // Load is the client-supplied implementation of module loading. + // Repeated calls with the same module name must return the same + // module environment or error. + // The error message need not include the module name. + // + // See example_test.go for some example implementations of Load. + Load func(thread *Thread, module string) (StringDict, error) + + // OnMaxSteps is called when the thread reaches the limit set by SetMaxExecutionSteps. + // The default behavior is to call thread.Cancel("too many steps"). + OnMaxSteps func(thread *Thread) + + // Steps a count of abstract computation steps executed + // by this thread. It is incremented by the interpreter. It may be used + // as a measure of the approximate cost of Starlark execution, by + // computing the difference in its value before and after a computation. + // + // The precise meaning of "step" is not specified and may change. + Steps, maxSteps uint64 + + // cancelReason records the reason from the first call to Cancel. + cancelReason *string + + // locals holds arbitrary "thread-local" Go values belonging to the client. + // They are accessible to the client but not to any Starlark program. + locals map[string]interface{} + + // proftime holds the accumulated execution time since the last profile event. + proftime time.Duration +} + +// ExecutionSteps returns the current value of Steps. +func (thread *Thread) ExecutionSteps() uint64 { + return thread.Steps +} + +// SetMaxExecutionSteps sets a limit on the number of Starlark +// computation steps that may be executed by this thread. If the +// thread's step counter exceeds this limit, the interpreter calls +// the optional OnMaxSteps function or the default behavior +// of calling thread.Cancel("too many steps"). +func (thread *Thread) SetMaxExecutionSteps(max uint64) { + thread.maxSteps = max +} + +// Uncancel resets the cancellation state. +// +// Unlike most methods of Thread, it is safe to call Uncancel from any +// goroutine, even if the thread is actively executing. +func (thread *Thread) Uncancel() { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&thread.cancelReason)), nil) +} + +// Cancel causes execution of Starlark code in the specified thread to +// promptly fail with an EvalError that includes the specified reason. +// There may be a delay before the interpreter observes the cancellation +// if the thread is currently in a call to a built-in function. +// +// Call [Uncancel] to reset the cancellation state. +// +// Unlike most methods of Thread, it is safe to call Cancel from any +// goroutine, even if the thread is actively executing. +func (thread *Thread) Cancel(reason string) { + // Atomically set cancelReason, preserving earlier reason if any. + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&thread.cancelReason)), nil, unsafe.Pointer(&reason)) +} + +// SetLocal sets the thread-local value associated with the specified key. +// It must not be called after execution begins. +func (thread *Thread) SetLocal(key string, value interface{}) { + if thread.locals == nil { + thread.locals = make(map[string]interface{}) + } + thread.locals[key] = value +} + +// Local returns the thread-local value associated with the specified key. +func (thread *Thread) Local(key string) interface{} { + return thread.locals[key] +} + +// CallFrame returns a copy of the specified frame of the callstack. +// It should only be used in built-ins called from Starlark code. +// Depth 0 means the frame of the built-in itself, 1 is its caller, and so on. +// +// It is equivalent to CallStack().At(depth), but more efficient. +func (thread *Thread) CallFrame(depth int) CallFrame { + return thread.frameAt(depth).asCallFrame() +} + +func (thread *Thread) frameAt(depth int) *frame { + return thread.stack[len(thread.stack)-1-depth] +} + +// CallStack returns a new slice containing the thread's stack of call frames. +func (thread *Thread) CallStack() CallStack { + frames := make([]CallFrame, len(thread.stack)) + for i, fr := range thread.stack { + frames[i] = fr.asCallFrame() + } + return frames +} + +// CallStackDepth returns the number of frames in the current call stack. +func (thread *Thread) CallStackDepth() int { return len(thread.stack) } + +// A StringDict is a mapping from names to values, and represents +// an environment such as the global variables of a module. +// It is not a true starlark.Value. +type StringDict map[string]Value + +// Keys returns a new sorted slice of d's keys. +func (d StringDict) Keys() []string { + names := make([]string, 0, len(d)) + for name := range d { + names = append(names, name) + } + sort.Strings(names) + return names +} + +func (d StringDict) String() string { + buf := new(strings.Builder) + buf.WriteByte('{') + sep := "" + for _, name := range d.Keys() { + buf.WriteString(sep) + buf.WriteString(name) + buf.WriteString(": ") + writeValue(buf, d[name], nil) + sep = ", " + } + buf.WriteByte('}') + return buf.String() +} + +func (d StringDict) Freeze() { + for _, v := range d { + v.Freeze() + } +} + +// Has reports whether the dictionary contains the specified key. +func (d StringDict) Has(key string) bool { _, ok := d[key]; return ok } + +// A frame records a call to a Starlark function (including module toplevel) +// or a built-in function or method. +type frame struct { + callable Callable // current function (or toplevel) or built-in + pc uint32 // program counter (Starlark frames only) + locals []Value // local variables (Starlark frames only) + spanStart int64 // start time of current profiler span +} + +// Position returns the source position of the current point of execution in this frame. +func (fr *frame) Position() syntax.Position { + switch c := fr.callable.(type) { + case *Function: + // Starlark function + return c.funcode.Position(fr.pc) + case callableWithPosition: + // If a built-in Callable defines + // a Position method, use it. + return c.Position() + } + return syntax.MakePosition(&builtinFilename, 0, 0) +} + +var builtinFilename = "" + +// Function returns the frame's function or built-in. +func (fr *frame) Callable() Callable { return fr.callable } + +// A CallStack is a stack of call frames, outermost first. +type CallStack []CallFrame + +// At returns a copy of the frame at depth i. +// At(0) returns the topmost frame. +func (stack CallStack) At(i int) CallFrame { return stack[len(stack)-1-i] } + +// Pop removes and returns the topmost frame. +func (stack *CallStack) Pop() CallFrame { + last := len(*stack) - 1 + top := (*stack)[last] + *stack = (*stack)[:last] + return top +} + +// String returns a user-friendly description of the stack. +func (stack CallStack) String() string { + out := new(strings.Builder) + if len(stack) > 0 { + fmt.Fprintf(out, "Traceback (most recent call last):\n") + } + for _, fr := range stack { + fmt.Fprintf(out, " %s: in %s\n", fr.Pos, fr.Name) + } + return out.String() +} + +// An EvalError is a Starlark evaluation error and +// a copy of the thread's stack at the moment of the error. +type EvalError struct { + Msg string + CallStack CallStack + cause error +} + +// A CallFrame represents the function name and current +// position of execution of an enclosing call frame. +type CallFrame struct { + Name string + Pos syntax.Position +} + +func (fr *frame) asCallFrame() CallFrame { + return CallFrame{ + Name: fr.Callable().Name(), + Pos: fr.Position(), + } +} + +func (thread *Thread) evalError(err error) *EvalError { + return &EvalError{ + Msg: err.Error(), + CallStack: thread.CallStack(), + cause: err, + } +} + +func (e *EvalError) Error() string { return e.Msg } + +// Backtrace returns a user-friendly error message describing the stack +// of calls that led to this error. +func (e *EvalError) Backtrace() string { + // If the topmost stack frame is a built-in function, + // remove it from the stack and add print "Error in fn:". + stack := e.CallStack + suffix := "" + if last := len(stack) - 1; last >= 0 && stack[last].Pos.Filename() == builtinFilename { + suffix = " in " + stack[last].Name + stack = stack[:last] + } + return fmt.Sprintf("%sError%s: %s", stack, suffix, e.Msg) +} + +func (e *EvalError) Unwrap() error { return e.cause } + +// A Program is a compiled Starlark program. +// +// Programs are immutable, and contain no Values. +// A Program may be created by parsing a source file (see SourceProgram) +// or by loading a previously saved compiled program (see CompiledProgram). +type Program struct { + compiled *compile.Program +} + +// CompilerVersion is the version number of the protocol for compiled +// files. Applications must not run programs compiled by one version +// with an interpreter at another version, and should thus incorporate +// the compiler version into the cache key when reusing compiled code. +const CompilerVersion = compile.Version + +// Filename returns the name of the file from which this program was loaded. +func (prog *Program) Filename() string { return prog.compiled.Toplevel.Pos.Filename() } + +func (prog *Program) String() string { return prog.Filename() } + +// NumLoads returns the number of load statements in the compiled program. +func (prog *Program) NumLoads() int { return len(prog.compiled.Loads) } + +// Load(i) returns the name and position of the i'th module directly +// loaded by this one, where 0 <= i < NumLoads(). +// The name is unresolved---exactly as it appears in the source. +func (prog *Program) Load(i int) (string, syntax.Position) { + id := prog.compiled.Loads[i] + return id.Name, id.Pos +} + +// WriteTo writes the compiled module to the specified output stream. +func (prog *Program) Write(out io.Writer) error { + data := prog.compiled.Encode() + _, err := out.Write(data) + return err +} + +// ExecFile parses, resolves, and executes a Starlark file in the +// specified global environment, which may be modified during execution. +// +// Thread is the state associated with the Starlark thread. +// +// The filename and src parameters are as for syntax.Parse: +// filename is the name of the file to execute, +// and the name that appears in error messages; +// src is an optional source of bytes to use +// instead of filename. +// +// predeclared defines the predeclared names specific to this module. +// Execution does not modify this dictionary, though it may mutate +// its values. +// +// If ExecFile fails during evaluation, it returns an *EvalError +// containing a backtrace. +func ExecFile(thread *Thread, filename string, src interface{}, predeclared StringDict) (StringDict, error) { + // Parse, resolve, and compile a Starlark source file. + _, mod, err := SourceProgram(filename, src, predeclared.Has) + if err != nil { + return nil, err + } + + g, err := mod.Init(thread, predeclared) + g.Freeze() + return g, err +} + +// SourceProgram produces a new program by parsing, resolving, +// and compiling a Starlark source file. +// On success, it returns the parsed file and the compiled program. +// The filename and src parameters are as for syntax.Parse. +// +// The isPredeclared predicate reports whether a name is +// a pre-declared identifier of the current module. +// Its typical value is predeclared.Has, +// where predeclared is a StringDict of pre-declared values. +func SourceProgram(filename string, src interface{}, isPredeclared func(string) bool) (*syntax.File, *Program, error) { + f, err := syntax.Parse(filename, src, 0) + if err != nil { + return nil, nil, err + } + prog, err := FileProgram(f, isPredeclared) + return f, prog, err +} + +// FileProgram produces a new program by resolving, +// and compiling the Starlark source file syntax tree. +// On success, it returns the compiled program. +// +// Resolving a syntax tree mutates it. +// Do not call FileProgram more than once on the same file. +// +// The isPredeclared predicate reports whether a name is +// a pre-declared identifier of the current module. +// Its typical value is predeclared.Has, +// where predeclared is a StringDict of pre-declared values. +func FileProgram(f *syntax.File, isPredeclared func(string) bool) (*Program, error) { + if err := resolve.File(f, isPredeclared, Universe.Has); err != nil { + return nil, err + } + + var pos syntax.Position + if len(f.Stmts) > 0 { + pos = syntax.Start(f.Stmts[0]) + } else { + pos = syntax.MakePosition(&f.Path, 1, 1) + } + + module := f.Module.(*resolve.Module) + compiled := compile.File(f.Stmts, pos, "", module.Locals, module.Globals) + + return &Program{compiled}, nil +} + +// CompiledProgram produces a new program from the representation +// of a compiled program previously saved by Program.Write. +func CompiledProgram(in io.Reader) (*Program, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, err + } + compiled, err := compile.DecodeProgram(data) + if err != nil { + return nil, err + } + return &Program{compiled}, nil +} + +// Init creates a set of global variables for the program, +// executes the toplevel code of the specified program, +// and returns a new, unfrozen dictionary of the globals. +func (prog *Program) Init(thread *Thread, predeclared StringDict) (StringDict, error) { + toplevel := makeToplevelFunction(prog.compiled, predeclared) + + _, err := Call(thread, toplevel, nil, nil) + + // Convert the global environment to a map. + // We return a (partial) map even in case of error. + return toplevel.Globals(), err +} + +// ExecREPLChunk compiles and executes file f in the specified thread +// and global environment. This is a variant of ExecFile specialized to +// the needs of a REPL, in which a sequence of input chunks, each +// syntactically a File, manipulates the same set of module globals, +// which are not frozen after execution. +// +// This function is intended to support only go.starlark.net/repl. +// Its API stability is not guaranteed. +func ExecREPLChunk(f *syntax.File, thread *Thread, globals StringDict) error { + var predeclared StringDict + + // -- variant of FileProgram -- + + if err := resolve.REPLChunk(f, globals.Has, predeclared.Has, Universe.Has); err != nil { + return err + } + + var pos syntax.Position + if len(f.Stmts) > 0 { + pos = syntax.Start(f.Stmts[0]) + } else { + pos = syntax.MakePosition(&f.Path, 1, 1) + } + + module := f.Module.(*resolve.Module) + compiled := compile.File(f.Stmts, pos, "", module.Locals, module.Globals) + prog := &Program{compiled} + + // -- variant of Program.Init -- + + toplevel := makeToplevelFunction(prog.compiled, predeclared) + + // Initialize module globals from parameter. + for i, id := range prog.compiled.Globals { + if v := globals[id.Name]; v != nil { + toplevel.module.globals[i] = v + } + } + + _, err := Call(thread, toplevel, nil, nil) + + // Reflect changes to globals back to parameter, even after an error. + for i, id := range prog.compiled.Globals { + if v := toplevel.module.globals[i]; v != nil { + globals[id.Name] = v + } + } + + return err +} + +func makeToplevelFunction(prog *compile.Program, predeclared StringDict) *Function { + // Create the Starlark value denoted by each program constant c. + constants := make([]Value, len(prog.Constants)) + for i, c := range prog.Constants { + var v Value + switch c := c.(type) { + case int64: + v = MakeInt64(c) + case *big.Int: + v = MakeBigInt(c) + case string: + v = String(c) + case compile.Bytes: + v = Bytes(c) + case float64: + v = Float(c) + default: + log.Panicf("unexpected constant %T: %v", c, c) + } + constants[i] = v + } + + return &Function{ + funcode: prog.Toplevel, + module: &module{ + program: prog, + predeclared: predeclared, + globals: make([]Value, len(prog.Globals)), + constants: constants, + }, + } +} + +// Eval parses, resolves, and evaluates an expression within the +// specified (predeclared) environment. +// +// Evaluation cannot mutate the environment dictionary itself, +// though it may modify variables reachable from the dictionary. +// +// The filename and src parameters are as for syntax.Parse. +// +// If Eval fails during evaluation, it returns an *EvalError +// containing a backtrace. +func Eval(thread *Thread, filename string, src interface{}, env StringDict) (Value, error) { + expr, err := syntax.ParseExpr(filename, src, 0) + if err != nil { + return nil, err + } + f, err := makeExprFunc(expr, env) + if err != nil { + return nil, err + } + return Call(thread, f, nil, nil) +} + +// EvalExpr resolves and evaluates an expression within the +// specified (predeclared) environment. +// Evaluating a comma-separated list of expressions yields a tuple value. +// +// Resolving an expression mutates it. +// Do not call EvalExpr more than once for the same expression. +// +// Evaluation cannot mutate the environment dictionary itself, +// though it may modify variables reachable from the dictionary. +// +// If Eval fails during evaluation, it returns an *EvalError +// containing a backtrace. +func EvalExpr(thread *Thread, expr syntax.Expr, env StringDict) (Value, error) { + fn, err := makeExprFunc(expr, env) + if err != nil { + return nil, err + } + return Call(thread, fn, nil, nil) +} + +// ExprFunc returns a no-argument function +// that evaluates the expression whose source is src. +func ExprFunc(filename string, src interface{}, env StringDict) (*Function, error) { + expr, err := syntax.ParseExpr(filename, src, 0) + if err != nil { + return nil, err + } + return makeExprFunc(expr, env) +} + +// makeExprFunc returns a no-argument function whose body is expr. +func makeExprFunc(expr syntax.Expr, env StringDict) (*Function, error) { + locals, err := resolve.Expr(expr, env.Has, Universe.Has) + if err != nil { + return nil, err + } + + return makeToplevelFunction(compile.Expr(expr, "", locals), env), nil +} + +// The following functions are primitive operations of the byte code interpreter. + +// list += iterable +func listExtend(x *List, y Iterable) { + if ylist, ok := y.(*List); ok { + // fast path: list += list + x.elems = append(x.elems, ylist.elems...) + } else { + iter := y.Iterate() + defer iter.Done() + var z Value + for iter.Next(&z) { + x.elems = append(x.elems, z) + } + } +} + +// getAttr implements x.dot. +func getAttr(x Value, name string) (Value, error) { + hasAttr, ok := x.(HasAttrs) + if !ok { + return nil, fmt.Errorf("%s has no .%s field or method", x.Type(), name) + } + + var errmsg string + v, err := hasAttr.Attr(name) + if err == nil { + if v != nil { + return v, nil // success + } + // (nil, nil) => generic error + errmsg = fmt.Sprintf("%s has no .%s field or method", x.Type(), name) + } else if nsa, ok := err.(NoSuchAttrError); ok { + errmsg = string(nsa) + } else { + return nil, err // return error as is + } + + // add spelling hint + if n := spell.Nearest(name, hasAttr.AttrNames()); n != "" { + errmsg = fmt.Sprintf("%s (did you mean .%s?)", errmsg, n) + } + + return nil, fmt.Errorf("%s", errmsg) +} + +// setField implements x.name = y. +func setField(x Value, name string, y Value) error { + if x, ok := x.(HasSetField); ok { + err := x.SetField(name, y) + if _, ok := err.(NoSuchAttrError); ok { + // No such field: check spelling. + if n := spell.Nearest(name, x.AttrNames()); n != "" { + err = fmt.Errorf("%s (did you mean .%s?)", err, n) + } + } + return err + } + + return fmt.Errorf("can't assign to .%s field of %s", name, x.Type()) +} + +// getIndex implements x[y]. +func getIndex(x, y Value) (Value, error) { + switch x := x.(type) { + case Mapping: // dict + z, found, err := x.Get(y) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("key %v not in %s", y, x.Type()) + } + return z, nil + + case Indexable: // string, list, tuple + n := x.Len() + i, err := AsInt32(y) + if err != nil { + return nil, fmt.Errorf("%s index: %s", x.Type(), err) + } + origI := i + if i < 0 { + i += n + } + if i < 0 || i >= n { + return nil, outOfRange(origI, n, x) + } + return x.Index(i), nil + } + return nil, fmt.Errorf("unhandled index operation %s[%s]", x.Type(), y.Type()) +} + +func outOfRange(i, n int, x Value) error { + if n == 0 { + return fmt.Errorf("index %d out of range: empty %s", i, x.Type()) + } else { + return fmt.Errorf("%s index %d out of range [%d:%d]", x.Type(), i, -n, n-1) + } +} + +// setIndex implements x[y] = z. +func setIndex(x, y, z Value) error { + switch x := x.(type) { + case HasSetKey: + if err := x.SetKey(y, z); err != nil { + return err + } + + case HasSetIndex: + n := x.Len() + i, err := AsInt32(y) + if err != nil { + return err + } + origI := i + if i < 0 { + i += n + } + if i < 0 || i >= n { + return outOfRange(origI, n, x) + } + return x.SetIndex(i, z) + + default: + return fmt.Errorf("%s value does not support item assignment", x.Type()) + } + return nil +} + +// Unary applies a unary operator (+, -, ~, not) to its operand. +func Unary(op syntax.Token, x Value) (Value, error) { + // The NOT operator is not customizable. + if op == syntax.NOT { + return !x.Truth(), nil + } + + // Int, Float, and user-defined types + if x, ok := x.(HasUnary); ok { + // (nil, nil) => unhandled + y, err := x.Unary(op) + if y != nil || err != nil { + return y, err + } + } + + return nil, fmt.Errorf("unknown unary op: %s %s", op, x.Type()) +} + +// Binary applies a strict binary operator (not AND or OR) to its operands. +// For equality tests or ordered comparisons, use Compare instead. +func Binary(op syntax.Token, x, y Value) (Value, error) { + switch op { + case syntax.PLUS: + switch x := x.(type) { + case String: + if y, ok := y.(String); ok { + return x + y, nil + } + case Int: + switch y := y.(type) { + case Int: + return x.Add(y), nil + case Float: + xf, err := x.finiteFloat() + if err != nil { + return nil, err + } + return xf + y, nil + } + case Float: + switch y := y.(type) { + case Float: + return x + y, nil + case Int: + yf, err := y.finiteFloat() + if err != nil { + return nil, err + } + return x + yf, nil + } + case *List: + if y, ok := y.(*List); ok { + z := make([]Value, 0, x.Len()+y.Len()) + z = append(z, x.elems...) + z = append(z, y.elems...) + return NewList(z), nil + } + case Tuple: + if y, ok := y.(Tuple); ok { + z := make(Tuple, 0, len(x)+len(y)) + z = append(z, x...) + z = append(z, y...) + return z, nil + } + } + + case syntax.MINUS: + switch x := x.(type) { + case Int: + switch y := y.(type) { + case Int: + return x.Sub(y), nil + case Float: + xf, err := x.finiteFloat() + if err != nil { + return nil, err + } + return xf - y, nil + } + case Float: + switch y := y.(type) { + case Float: + return x - y, nil + case Int: + yf, err := y.finiteFloat() + if err != nil { + return nil, err + } + return x - yf, nil + } + } + + case syntax.STAR: + switch x := x.(type) { + case Int: + switch y := y.(type) { + case Int: + return x.Mul(y), nil + case Float: + xf, err := x.finiteFloat() + if err != nil { + return nil, err + } + return xf * y, nil + case String: + return stringRepeat(y, x) + case Bytes: + return bytesRepeat(y, x) + case *List: + elems, err := tupleRepeat(Tuple(y.elems), x) + if err != nil { + return nil, err + } + return NewList(elems), nil + case Tuple: + return tupleRepeat(y, x) + } + case Float: + switch y := y.(type) { + case Float: + return x * y, nil + case Int: + yf, err := y.finiteFloat() + if err != nil { + return nil, err + } + return x * yf, nil + } + case String: + if y, ok := y.(Int); ok { + return stringRepeat(x, y) + } + case Bytes: + if y, ok := y.(Int); ok { + return bytesRepeat(x, y) + } + case *List: + if y, ok := y.(Int); ok { + elems, err := tupleRepeat(Tuple(x.elems), y) + if err != nil { + return nil, err + } + return NewList(elems), nil + } + case Tuple: + if y, ok := y.(Int); ok { + return tupleRepeat(x, y) + } + + } + + case syntax.SLASH: + switch x := x.(type) { + case Int: + xf, err := x.finiteFloat() + if err != nil { + return nil, err + } + switch y := y.(type) { + case Int: + yf, err := y.finiteFloat() + if err != nil { + return nil, err + } + if yf == 0.0 { + return nil, fmt.Errorf("floating-point division by zero") + } + return xf / yf, nil + case Float: + if y == 0.0 { + return nil, fmt.Errorf("floating-point division by zero") + } + return xf / y, nil + } + case Float: + switch y := y.(type) { + case Float: + if y == 0.0 { + return nil, fmt.Errorf("floating-point division by zero") + } + return x / y, nil + case Int: + yf, err := y.finiteFloat() + if err != nil { + return nil, err + } + if yf == 0.0 { + return nil, fmt.Errorf("floating-point division by zero") + } + return x / yf, nil + } + } + + case syntax.SLASHSLASH: + switch x := x.(type) { + case Int: + switch y := y.(type) { + case Int: + if y.Sign() == 0 { + return nil, fmt.Errorf("floored division by zero") + } + return x.Div(y), nil + case Float: + xf, err := x.finiteFloat() + if err != nil { + return nil, err + } + if y == 0.0 { + return nil, fmt.Errorf("floored division by zero") + } + return floor(xf / y), nil + } + case Float: + switch y := y.(type) { + case Float: + if y == 0.0 { + return nil, fmt.Errorf("floored division by zero") + } + return floor(x / y), nil + case Int: + yf, err := y.finiteFloat() + if err != nil { + return nil, err + } + if yf == 0.0 { + return nil, fmt.Errorf("floored division by zero") + } + return floor(x / yf), nil + } + } + + case syntax.PERCENT: + switch x := x.(type) { + case Int: + switch y := y.(type) { + case Int: + if y.Sign() == 0 { + return nil, fmt.Errorf("integer modulo by zero") + } + return x.Mod(y), nil + case Float: + xf, err := x.finiteFloat() + if err != nil { + return nil, err + } + if y == 0 { + return nil, fmt.Errorf("floating-point modulo by zero") + } + return xf.Mod(y), nil + } + case Float: + switch y := y.(type) { + case Float: + if y == 0.0 { + return nil, fmt.Errorf("floating-point modulo by zero") + } + return x.Mod(y), nil + case Int: + if y.Sign() == 0 { + return nil, fmt.Errorf("floating-point modulo by zero") + } + yf, err := y.finiteFloat() + if err != nil { + return nil, err + } + return x.Mod(yf), nil + } + case String: + return interpolate(string(x), y) + } + + case syntax.NOT_IN: + z, err := Binary(syntax.IN, x, y) + if err != nil { + return nil, err + } + return !z.Truth(), nil + + case syntax.IN: + switch y := y.(type) { + case *List: + for _, elem := range y.elems { + if eq, err := Equal(elem, x); err != nil { + return nil, err + } else if eq { + return True, nil + } + } + return False, nil + case Tuple: + for _, elem := range y { + if eq, err := Equal(elem, x); err != nil { + return nil, err + } else if eq { + return True, nil + } + } + return False, nil + case Mapping: // e.g. dict + // Ignore error from Get as we cannot distinguish true + // errors (value cycle, type error) from "key not found". + _, found, _ := y.Get(x) + return Bool(found), nil + case *Set: + ok, err := y.Has(x) + return Bool(ok), err + case String: + needle, ok := x.(String) + if !ok { + return nil, fmt.Errorf("'in ' requires string as left operand, not %s", x.Type()) + } + return Bool(strings.Contains(string(y), string(needle))), nil + case Bytes: + switch needle := x.(type) { + case Bytes: + return Bool(strings.Contains(string(y), string(needle))), nil + case Int: + var b byte + if err := AsInt(needle, &b); err != nil { + return nil, fmt.Errorf("int in bytes: %s", err) + } + return Bool(strings.IndexByte(string(y), b) >= 0), nil + default: + return nil, fmt.Errorf("'in bytes' requires bytes or int as left operand, not %s", x.Type()) + } + case rangeValue: + i, err := NumberToInt(x) + if err != nil { + return nil, fmt.Errorf("'in ' requires integer as left operand, not %s", x.Type()) + } + return Bool(y.contains(i)), nil + } + + case syntax.PIPE: + switch x := x.(type) { + case Int: + if y, ok := y.(Int); ok { + return x.Or(y), nil + } + + case *Dict: // union + if y, ok := y.(*Dict); ok { + return x.Union(y), nil + } + + case *Set: // union + if y, ok := y.(*Set); ok { + iter := Iterate(y) + defer iter.Done() + return x.Union(iter) + } + } + + case syntax.AMP: + switch x := x.(type) { + case Int: + if y, ok := y.(Int); ok { + return x.And(y), nil + } + case *Set: // intersection + if y, ok := y.(*Set); ok { + set := new(Set) + if x.Len() > y.Len() { + x, y = y, x // opt: range over smaller set + } + for xe := x.ht.head; xe != nil; xe = xe.next { + // Has, Insert cannot fail here. + if found, _ := y.Has(xe.key); found { + set.Insert(xe.key) + } + } + return set, nil + } + } + + case syntax.CIRCUMFLEX: + switch x := x.(type) { + case Int: + if y, ok := y.(Int); ok { + return x.Xor(y), nil + } + case *Set: // symmetric difference + if y, ok := y.(*Set); ok { + set := new(Set) + for xe := x.ht.head; xe != nil; xe = xe.next { + if found, _ := y.Has(xe.key); !found { + set.Insert(xe.key) + } + } + for ye := y.ht.head; ye != nil; ye = ye.next { + if found, _ := x.Has(ye.key); !found { + set.Insert(ye.key) + } + } + return set, nil + } + } + + case syntax.LTLT, syntax.GTGT: + if x, ok := x.(Int); ok { + y, err := AsInt32(y) + if err != nil { + return nil, err + } + if y < 0 { + return nil, fmt.Errorf("negative shift count: %v", y) + } + if op == syntax.LTLT { + if y >= 512 { + return nil, fmt.Errorf("shift count too large: %v", y) + } + return x.Lsh(uint(y)), nil + } else { + return x.Rsh(uint(y)), nil + } + } + + default: + // unknown operator + goto unknown + } + + // user-defined types + // (nil, nil) => unhandled + if x, ok := x.(HasBinary); ok { + z, err := x.Binary(op, y, Left) + if z != nil || err != nil { + return z, err + } + } + if y, ok := y.(HasBinary); ok { + z, err := y.Binary(op, x, Right) + if z != nil || err != nil { + return z, err + } + } + + // unsupported operand types +unknown: + return nil, fmt.Errorf("unknown binary op: %s %s %s", x.Type(), op, y.Type()) +} + +// It's always possible to overeat in small bites but we'll +// try to stop someone swallowing the world in one gulp. +const maxAlloc = 1 << 30 + +func tupleRepeat(elems Tuple, n Int) (Tuple, error) { + if len(elems) == 0 { + return nil, nil + } + i, err := AsInt32(n) + if err != nil { + return nil, fmt.Errorf("repeat count %s too large", n) + } + if i < 1 { + return nil, nil + } + // Inv: i > 0, len > 0 + sz := len(elems) * i + if sz < 0 || sz >= maxAlloc { // sz < 0 => overflow + // Don't print sz. + return nil, fmt.Errorf("excessive repeat (%d * %d elements)", len(elems), i) + } + res := make([]Value, sz) + // copy elems into res, doubling each time + x := copy(res, elems) + for x < len(res) { + copy(res[x:], res[:x]) + x *= 2 + } + return res, nil +} + +func bytesRepeat(b Bytes, n Int) (Bytes, error) { + res, err := stringRepeat(String(b), n) + return Bytes(res), err +} + +func stringRepeat(s String, n Int) (String, error) { + if s == "" { + return "", nil + } + i, err := AsInt32(n) + if err != nil { + return "", fmt.Errorf("repeat count %s too large", n) + } + if i < 1 { + return "", nil + } + // Inv: i > 0, len > 0 + sz := len(s) * i + if sz < 0 || sz >= maxAlloc { // sz < 0 => overflow + // Don't print sz. + return "", fmt.Errorf("excessive repeat (%d * %d elements)", len(s), i) + } + return String(strings.Repeat(string(s), i)), nil +} + +// Call calls the function fn with the specified positional and keyword arguments. +func Call(thread *Thread, fn Value, args Tuple, kwargs []Tuple) (Value, error) { + c, ok := fn.(Callable) + if !ok { + return nil, fmt.Errorf("invalid call of non-function (%s)", fn.Type()) + } + + // Allocate and push a new frame. + var fr *frame + // Optimization: use slack portion of thread.stack + // slice as a freelist of empty frames. + if n := len(thread.stack); n < cap(thread.stack) { + fr = thread.stack[n : n+1][0] + } + if fr == nil { + fr = new(frame) + } + + if thread.stack == nil { + // one-time initialization of thread + if thread.maxSteps == 0 { + thread.maxSteps-- // (MaxUint64) + } + } + + thread.stack = append(thread.stack, fr) // push + + fr.callable = c + + thread.beginProfSpan() + + // Use defer to ensure that panics from built-ins + // pass through the interpreter without leaving + // it in a bad state. + defer func() { + thread.endProfSpan() + + // clear out any references + // TODO(adonovan): opt: zero fr.Locals and + // reuse it if it is large enough. + *fr = frame{} + + thread.stack = thread.stack[:len(thread.stack)-1] // pop + }() + + result, err := c.CallInternal(thread, args, kwargs) + + // Sanity check: nil is not a valid Starlark value. + if result == nil && err == nil { + err = fmt.Errorf("internal error: nil (not None) returned from %s", fn) + } + + // Always return an EvalError with an accurate frame. + if err != nil { + if _, ok := err.(*EvalError); !ok { + err = thread.evalError(err) + } + } + + return result, err +} + +func slice(x, lo, hi, step_ Value) (Value, error) { + sliceable, ok := x.(Sliceable) + if !ok { + return nil, fmt.Errorf("invalid slice operand %s", x.Type()) + } + + n := sliceable.Len() + step := 1 + if step_ != None { + var err error + step, err = AsInt32(step_) + if err != nil { + return nil, fmt.Errorf("invalid slice step: %s", err) + } + if step == 0 { + return nil, fmt.Errorf("zero is not a valid slice step") + } + } + + // TODO(adonovan): opt: preallocate result array. + + var start, end int + if step > 0 { + // positive stride + // default indices are [0:n]. + var err error + start, end, err = indices(lo, hi, n) + if err != nil { + return nil, err + } + + if end < start { + end = start // => empty result + } + } else { + // negative stride + // default indices are effectively [n-1:-1], though to + // get this effect using explicit indices requires + // [n-1:-1-n:-1] because of the treatment of -ve values. + start = n - 1 + if err := asIndex(lo, n, &start); err != nil { + return nil, fmt.Errorf("invalid start index: %s", err) + } + if start >= n { + start = n - 1 + } + + end = -1 + if err := asIndex(hi, n, &end); err != nil { + return nil, fmt.Errorf("invalid end index: %s", err) + } + if end < -1 { + end = -1 + } + + if start < end { + start = end // => empty result + } + } + + return sliceable.Slice(start, end, step), nil +} + +// From Hacker's Delight, section 2.8. +func signum64(x int64) int { return int(uint64(x>>63) | uint64(-x)>>63) } +func signum(x int) int { return signum64(int64(x)) } + +// indices converts start_ and end_ to indices in the range [0:len]. +// The start index defaults to 0 and the end index defaults to len. +// An index -len < i < 0 is treated like i+len. +// All other indices outside the range are clamped to the nearest value in the range. +// Beware: start may be greater than end. +// This function is suitable only for slices with positive strides. +func indices(start_, end_ Value, len int) (start, end int, err error) { + start = 0 + if err := asIndex(start_, len, &start); err != nil { + return 0, 0, fmt.Errorf("invalid start index: %s", err) + } + // Clamp to [0:len]. + if start < 0 { + start = 0 + } else if start > len { + start = len + } + + end = len + if err := asIndex(end_, len, &end); err != nil { + return 0, 0, fmt.Errorf("invalid end index: %s", err) + } + // Clamp to [0:len]. + if end < 0 { + end = 0 + } else if end > len { + end = len + } + + return start, end, nil +} + +// asIndex sets *result to the integer value of v, adding len to it +// if it is negative. If v is nil or None, *result is unchanged. +func asIndex(v Value, len int, result *int) error { + if v != nil && v != None { + var err error + *result, err = AsInt32(v) + if err != nil { + return err + } + if *result < 0 { + *result += len + } + } + return nil +} + +// setArgs sets the values of the formal parameters of function fn in +// based on the actual parameter values in args and kwargs. +func setArgs(locals []Value, fn *Function, args Tuple, kwargs []Tuple) error { + + // This is the general schema of a function: + // + // def f(p1, p2=dp2, p3=dp3, *args, k1, k2=dk2, k3, **kwargs) + // + // The p parameters are non-kwonly, and may be specified positionally. + // The k parameters are kwonly, and must be specified by name. + // The defaults tuple is (dp2, dp3, mandatory, dk2, mandatory). + // + // Arguments are processed as follows: + // - positional arguments are bound to a prefix of [p1, p2, p3]. + // - surplus positional arguments are bound to *args. + // - keyword arguments are bound to any of {p1, p2, p3, k1, k2, k3}; + // duplicate bindings are rejected. + // - surplus keyword arguments are bound to **kwargs. + // - defaults are bound to each parameter from p2 to k3 if no value was set. + // default values come from the tuple above. + // It is an error if the tuple entry for an unset parameter is 'mandatory'. + + // Nullary function? + if fn.NumParams() == 0 { + if nactual := len(args) + len(kwargs); nactual > 0 { + return fmt.Errorf("function %s accepts no arguments (%d given)", fn.Name(), nactual) + } + return nil + } + + cond := func(x bool, y, z interface{}) interface{} { + if x { + return y + } + return z + } + + // nparams is the number of ordinary parameters (sans *args and **kwargs). + nparams := fn.NumParams() + var kwdict *Dict + if fn.HasKwargs() { + nparams-- + kwdict = new(Dict) + locals[nparams] = kwdict + } + if fn.HasVarargs() { + nparams-- + } + + // nonkwonly is the number of non-kwonly parameters. + nonkwonly := nparams - fn.NumKwonlyParams() + + // Too many positional args? + n := len(args) + if len(args) > nonkwonly { + if !fn.HasVarargs() { + return fmt.Errorf("function %s accepts %s%d positional argument%s (%d given)", + fn.Name(), + cond(len(fn.defaults) > fn.NumKwonlyParams(), "at most ", ""), + nonkwonly, + cond(nonkwonly == 1, "", "s"), + len(args)) + } + n = nonkwonly + } + + // Bind positional arguments to non-kwonly parameters. + for i := 0; i < n; i++ { + locals[i] = args[i] + } + + // Bind surplus positional arguments to *args parameter. + if fn.HasVarargs() { + tuple := make(Tuple, len(args)-n) + for i := n; i < len(args); i++ { + tuple[i-n] = args[i] + } + locals[nparams] = tuple + } + + // Bind keyword arguments to parameters. + paramIdents := fn.funcode.Locals[:nparams] + for _, pair := range kwargs { + k, v := pair[0].(String), pair[1] + if i := findParam(paramIdents, string(k)); i >= 0 { + if locals[i] != nil { + return fmt.Errorf("function %s got multiple values for parameter %s", fn.Name(), k) + } + locals[i] = v + continue + } + if kwdict == nil { + return fmt.Errorf("function %s got an unexpected keyword argument %s", fn.Name(), k) + } + oldlen := kwdict.Len() + kwdict.SetKey(k, v) + if kwdict.Len() == oldlen { + return fmt.Errorf("function %s got multiple values for parameter %s", fn.Name(), k) + } + } + + // Are defaults required? + if n < nparams || fn.NumKwonlyParams() > 0 { + m := nparams - len(fn.defaults) // first default + + // Report errors for missing required arguments. + var missing []string + var i int + for i = n; i < m; i++ { + if locals[i] == nil { + missing = append(missing, paramIdents[i].Name) + } + } + + // Bind default values to parameters. + for ; i < nparams; i++ { + if locals[i] == nil { + dflt := fn.defaults[i-m] + if _, ok := dflt.(mandatory); ok { + missing = append(missing, paramIdents[i].Name) + continue + } + locals[i] = dflt + } + } + + if missing != nil { + return fmt.Errorf("function %s missing %d argument%s (%s)", + fn.Name(), len(missing), cond(len(missing) > 1, "s", ""), strings.Join(missing, ", ")) + } + } + return nil +} + +func findParam(params []compile.Binding, name string) int { + for i, param := range params { + if param.Name == name { + return i + } + } + return -1 +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string-interpolation +func interpolate(format string, x Value) (Value, error) { + buf := new(strings.Builder) + index := 0 + nargs := 1 + if tuple, ok := x.(Tuple); ok { + nargs = len(tuple) + } + for { + i := strings.IndexByte(format, '%') + if i < 0 { + buf.WriteString(format) + break + } + buf.WriteString(format[:i]) + format = format[i+1:] + + if format != "" && format[0] == '%' { + buf.WriteByte('%') + format = format[1:] + continue + } + + var arg Value + if format != "" && format[0] == '(' { + // keyword argument: %(name)s. + format = format[1:] + j := strings.IndexByte(format, ')') + if j < 0 { + return nil, fmt.Errorf("incomplete format key") + } + key := format[:j] + if dict, ok := x.(Mapping); !ok { + return nil, fmt.Errorf("format requires a mapping") + } else if v, found, _ := dict.Get(String(key)); found { + arg = v + } else { + return nil, fmt.Errorf("key not found: %s", key) + } + format = format[j+1:] + } else { + // positional argument: %s. + if index >= nargs { + return nil, fmt.Errorf("not enough arguments for format string") + } + if tuple, ok := x.(Tuple); ok { + arg = tuple[index] + } else { + arg = x + } + } + + // NOTE: Starlark does not support any of these optional Python features: + // - optional conversion flags: [#0- +], etc. + // - optional minimum field width (number or *). + // - optional precision (.123 or *) + // - optional length modifier + + // conversion type + if format == "" { + return nil, fmt.Errorf("incomplete format") + } + switch c := format[0]; c { + case 's', 'r': + if str, ok := AsString(arg); ok && c == 's' { + buf.WriteString(str) + } else { + writeValue(buf, arg, nil) + } + case 'd', 'i', 'o', 'x', 'X': + i, err := NumberToInt(arg) + if err != nil { + return nil, fmt.Errorf("%%%c format requires integer: %v", c, err) + } + switch c { + case 'd', 'i': + fmt.Fprintf(buf, "%d", i) + case 'o': + fmt.Fprintf(buf, "%o", i) + case 'x': + fmt.Fprintf(buf, "%x", i) + case 'X': + fmt.Fprintf(buf, "%X", i) + } + case 'e', 'f', 'g', 'E', 'F', 'G': + f, ok := AsFloat(arg) + if !ok { + return nil, fmt.Errorf("%%%c format requires float, not %s", c, arg.Type()) + } + Float(f).format(buf, c) + case 'c': + switch arg := arg.(type) { + case Int: + // chr(int) + r, err := AsInt32(arg) + if err != nil || r < 0 || r > unicode.MaxRune { + return nil, fmt.Errorf("%%c format requires a valid Unicode code point, got %s", arg) + } + buf.WriteRune(rune(r)) + case String: + r, size := utf8.DecodeRuneInString(string(arg)) + if size != len(arg) || len(arg) == 0 { + return nil, fmt.Errorf("%%c format requires a single-character string") + } + buf.WriteRune(r) + default: + return nil, fmt.Errorf("%%c format requires int or single-character string, not %s", arg.Type()) + } + case '%': + buf.WriteByte('%') + default: + return nil, fmt.Errorf("unknown conversion %%%c", c) + } + format = format[1:] + index++ + } + + if index < nargs { + return nil, fmt.Errorf("too many arguments for format string") + } + + return String(buf.String()), nil +} diff --git a/vendor/go.starlark.net/starlark/hashtable.go b/vendor/go.starlark.net/starlark/hashtable.go new file mode 100644 index 000000000..252d21d12 --- /dev/null +++ b/vendor/go.starlark.net/starlark/hashtable.go @@ -0,0 +1,390 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package starlark + +import ( + "fmt" + _ "unsafe" // for go:linkname hack +) + +// hashtable is used to represent Starlark dict and set values. +// It is a hash table whose key/value entries form a doubly-linked list +// in the order the entries were inserted. +// +// Initialized instances of hashtable must not be copied. +type hashtable struct { + table []bucket // len is zero or a power of two + bucket0 [1]bucket // inline allocation for small maps. + len uint32 + itercount uint32 // number of active iterators (ignored if frozen) + head *entry // insertion order doubly-linked list; may be nil + tailLink **entry // address of nil link at end of list (perhaps &head) + frozen bool + + _ noCopy // triggers vet copylock check on this type. +} + +// noCopy is zero-sized type that triggers vet's copylock check. +// See https://github.com/golang/go/issues/8005#issuecomment-190753527. +type noCopy struct{} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + +const bucketSize = 8 + +type bucket struct { + entries [bucketSize]entry + next *bucket // linked list of buckets +} + +type entry struct { + hash uint32 // nonzero => in use + key, value Value + next *entry // insertion order doubly-linked list; may be nil + prevLink **entry // address of link to this entry (perhaps &head) +} + +func (ht *hashtable) init(size int) { + if size < 0 { + panic("size < 0") + } + nb := 1 + for overloaded(size, nb) { + nb = nb << 1 + } + if nb < 2 { + ht.table = ht.bucket0[:1] + } else { + ht.table = make([]bucket, nb) + } + ht.tailLink = &ht.head +} + +func (ht *hashtable) freeze() { + if !ht.frozen { + ht.frozen = true + for e := ht.head; e != nil; e = e.next { + e.key.Freeze() + e.value.Freeze() + } + } +} + +func (ht *hashtable) insert(k, v Value) error { + if err := ht.checkMutable("insert into"); err != nil { + return err + } + if ht.table == nil { + ht.init(1) + } + h, err := k.Hash() + if err != nil { + return err + } + if h == 0 { + h = 1 // zero is reserved + } + +retry: + var insert *entry + + // Inspect each bucket in the bucket list. + p := &ht.table[h&(uint32(len(ht.table)-1))] + for { + for i := range p.entries { + e := &p.entries[i] + if e.hash != h { + if e.hash == 0 { + // Found empty entry; make a note. + insert = e + } + continue + } + if eq, err := Equal(k, e.key); err != nil { + return err // e.g. excessively recursive tuple + } else if !eq { + continue + } + // Key already present; update value. + e.value = v + return nil + } + if p.next == nil { + break + } + p = p.next + } + + // Key not found. p points to the last bucket. + + // Does the number of elements exceed the buckets' load factor? + if overloaded(int(ht.len), len(ht.table)) { + ht.grow() + goto retry + } + + if insert == nil { + // No space in existing buckets. Add a new one to the bucket list. + b := new(bucket) + p.next = b + insert = &b.entries[0] + } + + // Insert key/value pair. + insert.hash = h + insert.key = k + insert.value = v + + // Append entry to doubly-linked list. + insert.prevLink = ht.tailLink + *ht.tailLink = insert + ht.tailLink = &insert.next + + ht.len++ + + return nil +} + +func overloaded(elems, buckets int) bool { + const loadFactor = 6.5 // just a guess + return elems >= bucketSize && float64(elems) >= loadFactor*float64(buckets) +} + +func (ht *hashtable) grow() { + // Double the number of buckets and rehash. + // + // Even though this makes reentrant calls to ht.insert, + // calls Equals unnecessarily (since there can't be duplicate keys), + // and recomputes the hash unnecessarily, the gains from + // avoiding these steps were found to be too small to justify + // the extra logic: -2% on hashtable benchmark. + ht.table = make([]bucket, len(ht.table)<<1) + oldhead := ht.head + ht.head = nil + ht.tailLink = &ht.head + ht.len = 0 + for e := oldhead; e != nil; e = e.next { + ht.insert(e.key, e.value) + } + ht.bucket0[0] = bucket{} // clear out unused initial bucket +} + +func (ht *hashtable) lookup(k Value) (v Value, found bool, err error) { + h, err := k.Hash() + if err != nil { + return nil, false, err // unhashable + } + if h == 0 { + h = 1 // zero is reserved + } + if ht.table == nil { + return None, false, nil // empty + } + + // Inspect each bucket in the bucket list. + for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next { + for i := range p.entries { + e := &p.entries[i] + if e.hash == h { + if eq, err := Equal(k, e.key); err != nil { + return nil, false, err // e.g. excessively recursive tuple + } else if eq { + return e.value, true, nil // found + } + } + } + } + return None, false, nil // not found +} + +// Items returns all the items in the map (as key/value pairs) in insertion order. +func (ht *hashtable) items() []Tuple { + items := make([]Tuple, 0, ht.len) + array := make([]Value, ht.len*2) // allocate a single backing array + for e := ht.head; e != nil; e = e.next { + pair := Tuple(array[:2:2]) + array = array[2:] + pair[0] = e.key + pair[1] = e.value + items = append(items, pair) + } + return items +} + +func (ht *hashtable) first() (Value, bool) { + if ht.head != nil { + return ht.head.key, true + } + return None, false +} + +func (ht *hashtable) keys() []Value { + keys := make([]Value, 0, ht.len) + for e := ht.head; e != nil; e = e.next { + keys = append(keys, e.key) + } + return keys +} + +func (ht *hashtable) delete(k Value) (v Value, found bool, err error) { + if err := ht.checkMutable("delete from"); err != nil { + return nil, false, err + } + if ht.table == nil { + return None, false, nil // empty + } + h, err := k.Hash() + if err != nil { + return nil, false, err // unhashable + } + if h == 0 { + h = 1 // zero is reserved + } + + // Inspect each bucket in the bucket list. + for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next { + for i := range p.entries { + e := &p.entries[i] + if e.hash == h { + if eq, err := Equal(k, e.key); err != nil { + return nil, false, err + } else if eq { + // Remove e from doubly-linked list. + *e.prevLink = e.next + if e.next == nil { + ht.tailLink = e.prevLink // deletion of last entry + } else { + e.next.prevLink = e.prevLink + } + + v := e.value + *e = entry{} + ht.len-- + return v, true, nil // found + } + } + } + } + + // TODO(adonovan): opt: remove completely empty bucket from bucket list. + + return None, false, nil // not found +} + +// checkMutable reports an error if the hash table should not be mutated. +// verb+" dict" should describe the operation. +func (ht *hashtable) checkMutable(verb string) error { + if ht.frozen { + return fmt.Errorf("cannot %s frozen hash table", verb) + } + if ht.itercount > 0 { + return fmt.Errorf("cannot %s hash table during iteration", verb) + } + return nil +} + +func (ht *hashtable) clear() error { + if err := ht.checkMutable("clear"); err != nil { + return err + } + if ht.table != nil { + for i := range ht.table { + ht.table[i] = bucket{} + } + } + ht.head = nil + ht.tailLink = &ht.head + ht.len = 0 + return nil +} + +func (ht *hashtable) addAll(other *hashtable) error { + for e := other.head; e != nil; e = e.next { + if err := ht.insert(e.key, e.value); err != nil { + return err + } + } + return nil +} + +// dump is provided as an aid to debugging. +func (ht *hashtable) dump() { + fmt.Printf("hashtable %p len=%d head=%p tailLink=%p", + ht, ht.len, ht.head, ht.tailLink) + if ht.tailLink != nil { + fmt.Printf(" *tailLink=%p", *ht.tailLink) + } + fmt.Println() + for j := range ht.table { + fmt.Printf("bucket chain %d\n", j) + for p := &ht.table[j]; p != nil; p = p.next { + fmt.Printf("bucket %p\n", p) + for i := range p.entries { + e := &p.entries[i] + fmt.Printf("\tentry %d @ %p hash=%d key=%v value=%v\n", + i, e, e.hash, e.key, e.value) + fmt.Printf("\t\tnext=%p &next=%p prev=%p", + e.next, &e.next, e.prevLink) + if e.prevLink != nil { + fmt.Printf(" *prev=%p", *e.prevLink) + } + fmt.Println() + } + } + } +} + +func (ht *hashtable) iterate() *keyIterator { + if !ht.frozen { + ht.itercount++ + } + return &keyIterator{ht: ht, e: ht.head} +} + +type keyIterator struct { + ht *hashtable + e *entry +} + +func (it *keyIterator) Next(k *Value) bool { + if it.e != nil { + *k = it.e.key + it.e = it.e.next + return true + } + return false +} + +func (it *keyIterator) Done() { + if !it.ht.frozen { + it.ht.itercount-- + } +} + +// TODO(adonovan): use go1.19's maphash.String. + +// hashString computes the hash of s. +func hashString(s string) uint32 { + if len(s) >= 12 { + // Call the Go runtime's optimized hash implementation, + // which uses the AESENC instruction on amd64 machines. + return uint32(goStringHash(s, 0)) + } + return softHashString(s) +} + +//go:linkname goStringHash runtime.stringHash +func goStringHash(s string, seed uintptr) uintptr + +// softHashString computes the 32-bit FNV-1a hash of s in software. +func softHashString(s string) uint32 { + var h uint32 = 2166136261 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} diff --git a/vendor/go.starlark.net/starlark/int.go b/vendor/go.starlark.net/starlark/int.go new file mode 100644 index 000000000..a264e9d22 --- /dev/null +++ b/vendor/go.starlark.net/starlark/int.go @@ -0,0 +1,452 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package starlark + +import ( + "fmt" + "math" + "math/big" + "reflect" + "strconv" + + "go.starlark.net/syntax" +) + +// Int is the type of a Starlark int. +// +// The zero value is not a legal value; use MakeInt(0). +type Int struct{ impl intImpl } + +// --- high-level accessors --- + +// MakeInt returns a Starlark int for the specified signed integer. +func MakeInt(x int) Int { return MakeInt64(int64(x)) } + +// MakeInt64 returns a Starlark int for the specified int64. +func MakeInt64(x int64) Int { + if math.MinInt32 <= x && x <= math.MaxInt32 { + return makeSmallInt(x) + } + return makeBigInt(big.NewInt(x)) +} + +// MakeUint returns a Starlark int for the specified unsigned integer. +func MakeUint(x uint) Int { return MakeUint64(uint64(x)) } + +// MakeUint64 returns a Starlark int for the specified uint64. +func MakeUint64(x uint64) Int { + if x <= math.MaxInt32 { + return makeSmallInt(int64(x)) + } + return makeBigInt(new(big.Int).SetUint64(x)) +} + +// MakeBigInt returns a Starlark int for the specified big.Int. +// The new Int value will contain a copy of x. The caller is safe to modify x. +func MakeBigInt(x *big.Int) Int { + if isSmall(x) { + return makeSmallInt(x.Int64()) + } + z := new(big.Int).Set(x) + return makeBigInt(z) +} + +func isSmall(x *big.Int) bool { + n := x.BitLen() + return n < 32 || n == 32 && x.Int64() == math.MinInt32 +} + +var ( + zero, one = makeSmallInt(0), makeSmallInt(1) + oneBig = big.NewInt(1) + + _ HasUnary = Int{} +) + +// Unary implements the operations +int, -int, and ~int. +func (i Int) Unary(op syntax.Token) (Value, error) { + switch op { + case syntax.MINUS: + return zero.Sub(i), nil + case syntax.PLUS: + return i, nil + case syntax.TILDE: + return i.Not(), nil + } + return nil, nil +} + +// Int64 returns the value as an int64. +// If it is not exactly representable the result is undefined and ok is false. +func (i Int) Int64() (_ int64, ok bool) { + iSmall, iBig := i.get() + if iBig != nil { + x, acc := bigintToInt64(iBig) + if acc != big.Exact { + return // inexact + } + return x, true + } + return iSmall, true +} + +// BigInt returns a new big.Int with the same value as the Int. +func (i Int) BigInt() *big.Int { + iSmall, iBig := i.get() + if iBig != nil { + return new(big.Int).Set(iBig) + } + return big.NewInt(iSmall) +} + +// bigInt returns the value as a big.Int. +// It differs from BigInt in that this method returns the actual +// reference and any modification will change the state of i. +func (i Int) bigInt() *big.Int { + iSmall, iBig := i.get() + if iBig != nil { + return iBig + } + return big.NewInt(iSmall) +} + +// Uint64 returns the value as a uint64. +// If it is not exactly representable the result is undefined and ok is false. +func (i Int) Uint64() (_ uint64, ok bool) { + iSmall, iBig := i.get() + if iBig != nil { + x, acc := bigintToUint64(iBig) + if acc != big.Exact { + return // inexact + } + return x, true + } + if iSmall < 0 { + return // inexact + } + return uint64(iSmall), true +} + +// The math/big API should provide this function. +func bigintToInt64(i *big.Int) (int64, big.Accuracy) { + sign := i.Sign() + if sign > 0 { + if i.Cmp(maxint64) > 0 { + return math.MaxInt64, big.Below + } + } else if sign < 0 { + if i.Cmp(minint64) < 0 { + return math.MinInt64, big.Above + } + } + return i.Int64(), big.Exact +} + +// The math/big API should provide this function. +func bigintToUint64(i *big.Int) (uint64, big.Accuracy) { + sign := i.Sign() + if sign > 0 { + if i.BitLen() > 64 { + return math.MaxUint64, big.Below + } + } else if sign < 0 { + return 0, big.Above + } + return i.Uint64(), big.Exact +} + +var ( + minint64 = new(big.Int).SetInt64(math.MinInt64) + maxint64 = new(big.Int).SetInt64(math.MaxInt64) +) + +func (i Int) Format(s fmt.State, ch rune) { + iSmall, iBig := i.get() + if iBig != nil { + iBig.Format(s, ch) + return + } + big.NewInt(iSmall).Format(s, ch) +} +func (i Int) String() string { + iSmall, iBig := i.get() + if iBig != nil { + return iBig.Text(10) + } + return strconv.FormatInt(iSmall, 10) +} +func (i Int) Type() string { return "int" } +func (i Int) Freeze() {} // immutable +func (i Int) Truth() Bool { return i.Sign() != 0 } +func (i Int) Hash() (uint32, error) { + iSmall, iBig := i.get() + var lo big.Word + if iBig != nil { + lo = iBig.Bits()[0] + } else { + lo = big.Word(iSmall) + } + return 12582917 * uint32(lo+3), nil +} + +// Required by the TotallyOrdered interface +func (x Int) Cmp(v Value, depth int) (int, error) { + y := v.(Int) + xSmall, xBig := x.get() + ySmall, yBig := y.get() + if xBig != nil || yBig != nil { + return x.bigInt().Cmp(y.bigInt()), nil + } + return signum64(xSmall - ySmall), nil // safe: int32 operands +} + +// Float returns the float value nearest i. +func (i Int) Float() Float { + iSmall, iBig := i.get() + if iBig != nil { + // Fast path for hardware int-to-float conversions. + if iBig.IsUint64() { + return Float(iBig.Uint64()) + } else if iBig.IsInt64() { + return Float(iBig.Int64()) + } + + f, _ := new(big.Float).SetInt(iBig).Float64() + return Float(f) + } + return Float(iSmall) +} + +// finiteFloat returns the finite float value nearest i, +// or an error if the magnitude is too large. +func (i Int) finiteFloat() (Float, error) { + f := i.Float() + if math.IsInf(float64(f), 0) { + return 0, fmt.Errorf("int too large to convert to float") + } + return f, nil +} + +func (x Int) Sign() int { + xSmall, xBig := x.get() + if xBig != nil { + return xBig.Sign() + } + return signum64(xSmall) +} + +func (x Int) Add(y Int) Int { + xSmall, xBig := x.get() + ySmall, yBig := y.get() + if xBig != nil || yBig != nil { + return MakeBigInt(new(big.Int).Add(x.bigInt(), y.bigInt())) + } + return MakeInt64(xSmall + ySmall) +} +func (x Int) Sub(y Int) Int { + xSmall, xBig := x.get() + ySmall, yBig := y.get() + if xBig != nil || yBig != nil { + return MakeBigInt(new(big.Int).Sub(x.bigInt(), y.bigInt())) + } + return MakeInt64(xSmall - ySmall) +} +func (x Int) Mul(y Int) Int { + xSmall, xBig := x.get() + ySmall, yBig := y.get() + if xBig != nil || yBig != nil { + return MakeBigInt(new(big.Int).Mul(x.bigInt(), y.bigInt())) + } + return MakeInt64(xSmall * ySmall) +} +func (x Int) Or(y Int) Int { + xSmall, xBig := x.get() + ySmall, yBig := y.get() + if xBig != nil || yBig != nil { + return MakeBigInt(new(big.Int).Or(x.bigInt(), y.bigInt())) + } + return makeSmallInt(xSmall | ySmall) +} +func (x Int) And(y Int) Int { + xSmall, xBig := x.get() + ySmall, yBig := y.get() + if xBig != nil || yBig != nil { + return MakeBigInt(new(big.Int).And(x.bigInt(), y.bigInt())) + } + return makeSmallInt(xSmall & ySmall) +} +func (x Int) Xor(y Int) Int { + xSmall, xBig := x.get() + ySmall, yBig := y.get() + if xBig != nil || yBig != nil { + return MakeBigInt(new(big.Int).Xor(x.bigInt(), y.bigInt())) + } + return makeSmallInt(xSmall ^ ySmall) +} +func (x Int) Not() Int { + xSmall, xBig := x.get() + if xBig != nil { + return MakeBigInt(new(big.Int).Not(xBig)) + } + return makeSmallInt(^xSmall) +} +func (x Int) Lsh(y uint) Int { return MakeBigInt(new(big.Int).Lsh(x.bigInt(), y)) } +func (x Int) Rsh(y uint) Int { return MakeBigInt(new(big.Int).Rsh(x.bigInt(), y)) } + +// Precondition: y is nonzero. +func (x Int) Div(y Int) Int { + xSmall, xBig := x.get() + ySmall, yBig := y.get() + // http://python-history.blogspot.com/2010/08/why-pythons-integer-division-floors.html + if xBig != nil || yBig != nil { + xb, yb := x.bigInt(), y.bigInt() + + var quo, rem big.Int + quo.QuoRem(xb, yb, &rem) + if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 { + quo.Sub(&quo, oneBig) + } + return MakeBigInt(&quo) + } + quo := xSmall / ySmall + rem := xSmall % ySmall + if (xSmall < 0) != (ySmall < 0) && rem != 0 { + quo -= 1 + } + return MakeInt64(quo) +} + +// Precondition: y is nonzero. +func (x Int) Mod(y Int) Int { + xSmall, xBig := x.get() + ySmall, yBig := y.get() + if xBig != nil || yBig != nil { + xb, yb := x.bigInt(), y.bigInt() + + var quo, rem big.Int + quo.QuoRem(xb, yb, &rem) + if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 { + rem.Add(&rem, yb) + } + return MakeBigInt(&rem) + } + rem := xSmall % ySmall + if (xSmall < 0) != (ySmall < 0) && rem != 0 { + rem += ySmall + } + return makeSmallInt(rem) +} + +func (i Int) rational() *big.Rat { + iSmall, iBig := i.get() + if iBig != nil { + return new(big.Rat).SetInt(iBig) + } + return new(big.Rat).SetInt64(iSmall) +} + +// AsInt32 returns the value of x if is representable as an int32. +func AsInt32(x Value) (int, error) { + i, ok := x.(Int) + if !ok { + return 0, fmt.Errorf("got %s, want int", x.Type()) + } + iSmall, iBig := i.get() + if iBig != nil { + return 0, fmt.Errorf("%s out of range", i) + } + return int(iSmall), nil +} + +// AsInt sets *ptr to the value of Starlark int x, if it is exactly representable, +// otherwise it returns an error. +// The type of ptr must be one of the pointer types *int, *int8, *int16, *int32, or *int64, +// or one of their unsigned counterparts including *uintptr. +func AsInt(x Value, ptr interface{}) error { + xint, ok := x.(Int) + if !ok { + return fmt.Errorf("got %s, want int", x.Type()) + } + + bits := reflect.TypeOf(ptr).Elem().Size() * 8 + switch ptr.(type) { + case *int, *int8, *int16, *int32, *int64: + i, ok := xint.Int64() + if !ok || bits < 64 && !(-1<<(bits-1) <= i && i < 1<<(bits-1)) { + return fmt.Errorf("%s out of range (want value in signed %d-bit range)", xint, bits) + } + switch ptr := ptr.(type) { + case *int: + *ptr = int(i) + case *int8: + *ptr = int8(i) + case *int16: + *ptr = int16(i) + case *int32: + *ptr = int32(i) + case *int64: + *ptr = int64(i) + } + + case *uint, *uint8, *uint16, *uint32, *uint64, *uintptr: + i, ok := xint.Uint64() + if !ok || bits < 64 && i >= 1< value is not representable as int32 +} + +// --- low-level accessors --- + +// get returns the small and big components of the Int. +// small is defined only if big is nil. +// small is sign-extended to 64 bits for ease of subsequent arithmetic. +func (i Int) get() (small int64, big *big.Int) { + return i.impl.small_, i.impl.big_ +} + +// Precondition: math.MinInt32 <= x && x <= math.MaxInt32 +func makeSmallInt(x int64) Int { + return Int{intImpl{small_: x}} +} + +// Precondition: x cannot be represented as int32. +func makeBigInt(x *big.Int) Int { + return Int{intImpl{big_: x}} +} diff --git a/vendor/go.starlark.net/starlark/int_posix64.go b/vendor/go.starlark.net/starlark/int_posix64.go new file mode 100644 index 000000000..2ab0beda3 --- /dev/null +++ b/vendor/go.starlark.net/starlark/int_posix64.go @@ -0,0 +1,91 @@ +//go:build (linux || darwin || dragonfly || freebsd || netbsd || solaris) && (amd64 || arm64 || mips64x || ppc64x || loong64) +// +build linux darwin dragonfly freebsd netbsd solaris +// +build amd64 arm64 mips64x ppc64x loong64 + +package starlark + +// This file defines an optimized Int implementation for 64-bit machines +// running POSIX. It reserves a 4GB portion of the address space using +// mmap and represents int32 values as addresses within that range. This +// disambiguates int32 values from *big.Int pointers, letting all Int +// values be represented as an unsafe.Pointer, so that Int-to-Value +// interface conversion need not allocate. + +// Although iOS (which, like macOS, appears as darwin/arm64) is +// POSIX-compliant, it limits each process to about 700MB of virtual +// address space, which defeats the optimization. Similarly, +// OpenBSD's default ulimit for virtual memory is a measly GB or so. +// On both those platforms the attempted optimization will fail and +// fall back to the slow implementation. + +// An alternative approach to this optimization would be to embed the +// int32 values in pointers using odd values, which can be distinguished +// from (even) *big.Int pointers. However, the Go runtime does not allow +// user programs to manufacture pointers to arbitrary locations such as +// within the zero page, or non-span, non-mmap, non-stack locations, +// and it may panic if it encounters them; see Issue #382. + +import ( + "log" + "math" + "math/big" + "unsafe" + + "golang.org/x/sys/unix" +) + +// intImpl represents a union of (int32, *big.Int) in a single pointer, +// so that Int-to-Value conversions need not allocate. +// +// The pointer is either a *big.Int, if the value is big, or a pointer into a +// reserved portion of the address space (smallints), if the value is small +// and the address space allocation succeeded. +// +// See int_generic.go for the basic representation concepts. +type intImpl unsafe.Pointer + +// get returns the (small, big) arms of the union. +func (i Int) get() (int64, *big.Int) { + if smallints == 0 { + // optimization disabled + if x := (*big.Int)(i.impl); isSmall(x) { + return x.Int64(), nil + } else { + return 0, x + } + } + + if ptr := uintptr(i.impl); ptr >= smallints && ptr < smallints+1<<32 { + return math.MinInt32 + int64(ptr-smallints), nil + } + return 0, (*big.Int)(i.impl) +} + +// Precondition: math.MinInt32 <= x && x <= math.MaxInt32 +func makeSmallInt(x int64) Int { + if smallints == 0 { + // optimization disabled + return Int{intImpl(big.NewInt(x))} + } + + return Int{intImpl(uintptr(x-math.MinInt32) + smallints)} +} + +// Precondition: x cannot be represented as int32. +func makeBigInt(x *big.Int) Int { return Int{intImpl(x)} } + +// smallints is the base address of a 2^32 byte memory region. +// Pointers to addresses in this region represent int32 values. +// We assume smallints is not at the very top of the address space. +// +// Zero means the optimization is disabled and all Ints allocate a big.Int. +var smallints = reserveAddresses(1 << 32) + +func reserveAddresses(len int) uintptr { + b, err := unix.Mmap(-1, 0, len, unix.PROT_READ, unix.MAP_PRIVATE|unix.MAP_ANON) + if err != nil { + log.Printf("Starlark failed to allocate 4GB address space: %v. Integer performance may suffer.", err) + return 0 // optimization disabled + } + return uintptr(unsafe.Pointer(&b[0])) +} diff --git a/vendor/go.starlark.net/starlark/interp.go b/vendor/go.starlark.net/starlark/interp.go new file mode 100644 index 000000000..b41905a0b --- /dev/null +++ b/vendor/go.starlark.net/starlark/interp.go @@ -0,0 +1,705 @@ +package starlark + +// This file defines the bytecode interpreter. + +import ( + "fmt" + "os" + "sync/atomic" + "unsafe" + + "go.starlark.net/internal/compile" + "go.starlark.net/internal/spell" + "go.starlark.net/resolve" + "go.starlark.net/syntax" +) + +const vmdebug = false // TODO(adonovan): use a bitfield of specific kinds of error. + +// TODO(adonovan): +// - optimize position table. +// - opt: record MaxIterStack during compilation and preallocate the stack. + +func (fn *Function) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) { + // Postcondition: args is not mutated. This is stricter than required by Callable, + // but allows CALL to avoid a copy. + + if !resolve.AllowRecursion { + // detect recursion + for _, fr := range thread.stack[:len(thread.stack)-1] { + // We look for the same function code, + // not function value, otherwise the user could + // defeat the check by writing the Y combinator. + if frfn, ok := fr.Callable().(*Function); ok && frfn.funcode == fn.funcode { + return nil, fmt.Errorf("function %s called recursively", fn.Name()) + } + } + } + + f := fn.funcode + fr := thread.frameAt(0) + + // Allocate space for stack and locals. + // Logically these do not escape from this frame + // (See https://github.com/golang/go/issues/20533.) + // + // This heap allocation looks expensive, but I was unable to get + // more than 1% real time improvement in a large alloc-heavy + // benchmark (in which this alloc was 8% of alloc-bytes) + // by allocating space for 8 Values in each frame, or + // by allocating stack by slicing an array held by the Thread + // that is expanded in chunks of min(k, nspace), for k=256 or 1024. + nlocals := len(f.Locals) + nspace := nlocals + f.MaxStack + space := make([]Value, nspace) + locals := space[:nlocals:nlocals] // local variables, starting with parameters + stack := space[nlocals:] // operand stack + + // Digest arguments and set parameters. + err := setArgs(locals, fn, args, kwargs) + if err != nil { + return nil, thread.evalError(err) + } + + fr.locals = locals + + if vmdebug { + fmt.Printf("Entering %s @ %s\n", f.Name, f.Position(0)) + fmt.Printf("%d stack, %d locals\n", len(stack), len(locals)) + defer fmt.Println("Leaving ", f.Name) + } + + // Spill indicated locals to cells. + // Each cell is a separate alloc to avoid spurious liveness. + for _, index := range f.Cells { + locals[index] = &cell{locals[index]} + } + + // TODO(adonovan): add static check that beneath this point + // - there is exactly one return statement + // - there is no redefinition of 'err'. + + var iterstack []Iterator // stack of active iterators + + // Use defer so that application panics can pass through + // interpreter without leaving thread in a bad state. + defer func() { + // ITERPOP the rest of the iterator stack. + for _, iter := range iterstack { + iter.Done() + } + + fr.locals = nil + }() + + sp := 0 + var pc uint32 + var result Value + code := f.Code +loop: + for { + thread.Steps++ + if thread.Steps >= thread.maxSteps { + if thread.OnMaxSteps != nil { + thread.OnMaxSteps(thread) + } else { + thread.Cancel("too many steps") + } + } + if reason := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&thread.cancelReason))); reason != nil { + err = fmt.Errorf("Starlark computation cancelled: %s", *(*string)(reason)) + break loop + } + + fr.pc = pc + + op := compile.Opcode(code[pc]) + pc++ + var arg uint32 + if op >= compile.OpcodeArgMin { + // TODO(adonovan): opt: profile this. + // Perhaps compiling big endian would be less work to decode? + for s := uint(0); ; s += 7 { + b := code[pc] + pc++ + arg |= uint32(b&0x7f) << s + if b < 0x80 { + break + } + } + } + if vmdebug { + fmt.Fprintln(os.Stderr, stack[:sp]) // very verbose! + compile.PrintOp(f, fr.pc, op, arg) + } + + switch op { + case compile.NOP: + // nop + + case compile.DUP: + stack[sp] = stack[sp-1] + sp++ + + case compile.DUP2: + stack[sp] = stack[sp-2] + stack[sp+1] = stack[sp-1] + sp += 2 + + case compile.POP: + sp-- + + case compile.EXCH: + stack[sp-2], stack[sp-1] = stack[sp-1], stack[sp-2] + + case compile.EQL, compile.NEQ, compile.GT, compile.LT, compile.LE, compile.GE: + op := syntax.Token(op-compile.EQL) + syntax.EQL + y := stack[sp-1] + x := stack[sp-2] + sp -= 2 + ok, err2 := Compare(op, x, y) + if err2 != nil { + err = err2 + break loop + } + stack[sp] = Bool(ok) + sp++ + + case compile.PLUS, + compile.MINUS, + compile.STAR, + compile.SLASH, + compile.SLASHSLASH, + compile.PERCENT, + compile.AMP, + compile.PIPE, + compile.CIRCUMFLEX, + compile.LTLT, + compile.GTGT, + compile.IN: + binop := syntax.Token(op-compile.PLUS) + syntax.PLUS + if op == compile.IN { + binop = syntax.IN // IN token is out of order + } + y := stack[sp-1] + x := stack[sp-2] + sp -= 2 + z, err2 := Binary(binop, x, y) + if err2 != nil { + err = err2 + break loop + } + stack[sp] = z + sp++ + + case compile.UPLUS, compile.UMINUS, compile.TILDE: + var unop syntax.Token + if op == compile.TILDE { + unop = syntax.TILDE + } else { + unop = syntax.Token(op-compile.UPLUS) + syntax.PLUS + } + x := stack[sp-1] + y, err2 := Unary(unop, x) + if err2 != nil { + err = err2 + break loop + } + stack[sp-1] = y + + case compile.INPLACE_ADD: + y := stack[sp-1] + x := stack[sp-2] + sp -= 2 + + // It's possible that y is not Iterable but + // nonetheless defines x+y, in which case we + // should fall back to the general case. + var z Value + if xlist, ok := x.(*List); ok { + if yiter, ok := y.(Iterable); ok { + if err = xlist.checkMutable("apply += to"); err != nil { + break loop + } + listExtend(xlist, yiter) + z = xlist + } + } + if z == nil { + z, err = Binary(syntax.PLUS, x, y) + if err != nil { + break loop + } + } + + stack[sp] = z + sp++ + + case compile.INPLACE_PIPE: + y := stack[sp-1] + x := stack[sp-2] + sp -= 2 + + // It's possible that y is not Dict but + // nonetheless defines x|y, in which case we + // should fall back to the general case. + var z Value + if xdict, ok := x.(*Dict); ok { + if ydict, ok := y.(*Dict); ok { + if err = xdict.ht.checkMutable("apply |= to"); err != nil { + break loop + } + xdict.ht.addAll(&ydict.ht) // can't fail + z = xdict + } + } + if z == nil { + z, err = Binary(syntax.PIPE, x, y) + if err != nil { + break loop + } + } + + stack[sp] = z + sp++ + + case compile.NONE: + stack[sp] = None + sp++ + + case compile.TRUE: + stack[sp] = True + sp++ + + case compile.FALSE: + stack[sp] = False + sp++ + + case compile.MANDATORY: + stack[sp] = mandatory{} + sp++ + + case compile.JMP: + pc = arg + + case compile.CALL, compile.CALL_VAR, compile.CALL_KW, compile.CALL_VAR_KW: + var kwargs Value + if op == compile.CALL_KW || op == compile.CALL_VAR_KW { + kwargs = stack[sp-1] + sp-- + } + + var args Value + if op == compile.CALL_VAR || op == compile.CALL_VAR_KW { + args = stack[sp-1] + sp-- + } + + // named args (pairs) + var kvpairs []Tuple + if nkvpairs := int(arg & 0xff); nkvpairs > 0 { + kvpairs = make([]Tuple, 0, nkvpairs) + kvpairsAlloc := make(Tuple, 2*nkvpairs) // allocate a single backing array + sp -= 2 * nkvpairs + for i := 0; i < nkvpairs; i++ { + pair := kvpairsAlloc[:2:2] + kvpairsAlloc = kvpairsAlloc[2:] + pair[0] = stack[sp+2*i] // name + pair[1] = stack[sp+2*i+1] // value + kvpairs = append(kvpairs, pair) + } + } + if kwargs != nil { + // Add key/value items from **kwargs dictionary. + dict, ok := kwargs.(IterableMapping) + if !ok { + err = fmt.Errorf("argument after ** must be a mapping, not %s", kwargs.Type()) + break loop + } + items := dict.Items() + for _, item := range items { + if _, ok := item[0].(String); !ok { + err = fmt.Errorf("keywords must be strings, not %s", item[0].Type()) + break loop + } + } + if len(kvpairs) == 0 { + kvpairs = items + } else { + kvpairs = append(kvpairs, items...) + } + } + + // positional args + var positional Tuple + if npos := int(arg >> 8); npos > 0 { + positional = stack[sp-npos : sp] + sp -= npos + + // Copy positional arguments into a new array, + // unless the callee is another Starlark function, + // in which case it can be trusted not to mutate them. + if _, ok := stack[sp-1].(*Function); !ok || args != nil { + positional = append(Tuple(nil), positional...) + } + } + if args != nil { + // Add elements from *args sequence. + iter := Iterate(args) + if iter == nil { + err = fmt.Errorf("argument after * must be iterable, not %s", args.Type()) + break loop + } + var elem Value + for iter.Next(&elem) { + positional = append(positional, elem) + } + iter.Done() + } + + function := stack[sp-1] + + if vmdebug { + fmt.Printf("VM call %s args=%s kwargs=%s @%s\n", + function, positional, kvpairs, f.Position(fr.pc)) + } + + thread.endProfSpan() + z, err2 := Call(thread, function, positional, kvpairs) + thread.beginProfSpan() + if err2 != nil { + err = err2 + break loop + } + if vmdebug { + fmt.Printf("Resuming %s @ %s\n", f.Name, f.Position(0)) + } + stack[sp-1] = z + + case compile.ITERPUSH: + x := stack[sp-1] + sp-- + iter := Iterate(x) + if iter == nil { + err = fmt.Errorf("%s value is not iterable", x.Type()) + break loop + } + iterstack = append(iterstack, iter) + + case compile.ITERJMP: + iter := iterstack[len(iterstack)-1] + if iter.Next(&stack[sp]) { + sp++ + } else { + pc = arg + } + + case compile.ITERPOP: + n := len(iterstack) - 1 + iterstack[n].Done() + iterstack = iterstack[:n] + + case compile.NOT: + stack[sp-1] = !stack[sp-1].Truth() + + case compile.RETURN: + result = stack[sp-1] + break loop + + case compile.SETINDEX: + z := stack[sp-1] + y := stack[sp-2] + x := stack[sp-3] + sp -= 3 + err = setIndex(x, y, z) + if err != nil { + break loop + } + + case compile.INDEX: + y := stack[sp-1] + x := stack[sp-2] + sp -= 2 + z, err2 := getIndex(x, y) + if err2 != nil { + err = err2 + break loop + } + stack[sp] = z + sp++ + + case compile.ATTR: + x := stack[sp-1] + name := f.Prog.Names[arg] + y, err2 := getAttr(x, name) + if err2 != nil { + err = err2 + break loop + } + stack[sp-1] = y + + case compile.SETFIELD: + y := stack[sp-1] + x := stack[sp-2] + sp -= 2 + name := f.Prog.Names[arg] + if err2 := setField(x, name, y); err2 != nil { + err = err2 + break loop + } + + case compile.MAKEDICT: + stack[sp] = new(Dict) + sp++ + + case compile.SETDICT, compile.SETDICTUNIQ: + dict := stack[sp-3].(*Dict) + k := stack[sp-2] + v := stack[sp-1] + sp -= 3 + oldlen := dict.Len() + if err2 := dict.SetKey(k, v); err2 != nil { + err = err2 + break loop + } + if op == compile.SETDICTUNIQ && dict.Len() == oldlen { + err = fmt.Errorf("duplicate key: %v", k) + break loop + } + + case compile.APPEND: + elem := stack[sp-1] + list := stack[sp-2].(*List) + sp -= 2 + list.elems = append(list.elems, elem) + + case compile.SLICE: + x := stack[sp-4] + lo := stack[sp-3] + hi := stack[sp-2] + step := stack[sp-1] + sp -= 4 + res, err2 := slice(x, lo, hi, step) + if err2 != nil { + err = err2 + break loop + } + stack[sp] = res + sp++ + + case compile.UNPACK: + n := int(arg) + iterable := stack[sp-1] + sp-- + iter := Iterate(iterable) + if iter == nil { + err = fmt.Errorf("got %s in sequence assignment", iterable.Type()) + break loop + } + i := 0 + sp += n + for i < n && iter.Next(&stack[sp-1-i]) { + i++ + } + var dummy Value + if iter.Next(&dummy) { + // NB: Len may return -1 here in obscure cases. + err = fmt.Errorf("too many values to unpack (got %d, want %d)", Len(iterable), n) + break loop + } + iter.Done() + if i < n { + err = fmt.Errorf("too few values to unpack (got %d, want %d)", i, n) + break loop + } + + case compile.CJMP: + if stack[sp-1].Truth() { + pc = arg + } + sp-- + + case compile.CONSTANT: + stack[sp] = fn.module.constants[arg] + sp++ + + case compile.MAKETUPLE: + n := int(arg) + tuple := make(Tuple, n) + sp -= n + copy(tuple, stack[sp:]) + stack[sp] = tuple + sp++ + + case compile.MAKELIST: + n := int(arg) + elems := make([]Value, n) + sp -= n + copy(elems, stack[sp:]) + stack[sp] = NewList(elems) + sp++ + + case compile.MAKEFUNC: + funcode := f.Prog.Functions[arg] + tuple := stack[sp-1].(Tuple) + n := len(tuple) - len(funcode.Freevars) + defaults := tuple[:n:n] + freevars := tuple[n:] + stack[sp-1] = &Function{ + funcode: funcode, + module: fn.module, + defaults: defaults, + freevars: freevars, + } + + case compile.LOAD: + n := int(arg) + module := string(stack[sp-1].(String)) + sp-- + + if thread.Load == nil { + err = fmt.Errorf("load not implemented by this application") + break loop + } + + thread.endProfSpan() + dict, err2 := thread.Load(thread, module) + thread.beginProfSpan() + if err2 != nil { + err = wrappedError{ + msg: fmt.Sprintf("cannot load %s: %v", module, err2), + cause: err2, + } + break loop + } + + for i := 0; i < n; i++ { + from := string(stack[sp-1-i].(String)) + v, ok := dict[from] + if !ok { + err = fmt.Errorf("load: name %s not found in module %s", from, module) + if n := spell.Nearest(from, dict.Keys()); n != "" { + err = fmt.Errorf("%s (did you mean %s?)", err, n) + } + break loop + } + stack[sp-1-i] = v + } + + case compile.SETLOCAL: + locals[arg] = stack[sp-1] + sp-- + + case compile.SETLOCALCELL: + locals[arg].(*cell).v = stack[sp-1] + sp-- + + case compile.SETGLOBAL: + fn.module.globals[arg] = stack[sp-1] + sp-- + + case compile.LOCAL: + x := locals[arg] + if x == nil { + err = fmt.Errorf("local variable %s referenced before assignment", f.Locals[arg].Name) + break loop + } + stack[sp] = x + sp++ + + case compile.FREE: + stack[sp] = fn.freevars[arg] + sp++ + + case compile.LOCALCELL: + v := locals[arg].(*cell).v + if v == nil { + err = fmt.Errorf("local variable %s referenced before assignment", f.Locals[arg].Name) + break loop + } + stack[sp] = v + sp++ + + case compile.FREECELL: + v := fn.freevars[arg].(*cell).v + if v == nil { + err = fmt.Errorf("local variable %s referenced before assignment", f.Freevars[arg].Name) + break loop + } + stack[sp] = v + sp++ + + case compile.GLOBAL: + x := fn.module.globals[arg] + if x == nil { + err = fmt.Errorf("global variable %s referenced before assignment", f.Prog.Globals[arg].Name) + break loop + } + stack[sp] = x + sp++ + + case compile.PREDECLARED: + name := f.Prog.Names[arg] + x := fn.module.predeclared[name] + if x == nil { + err = fmt.Errorf("internal error: predeclared variable %s is uninitialized", name) + break loop + } + stack[sp] = x + sp++ + + case compile.UNIVERSAL: + stack[sp] = Universe[f.Prog.Names[arg]] + sp++ + + default: + err = fmt.Errorf("unimplemented: %s", op) + break loop + } + } + // (deferred cleanup runs here) + return result, err +} + +type wrappedError struct { + msg string + cause error +} + +func (e wrappedError) Error() string { + return e.msg +} + +// Implements the xerrors.Wrapper interface +// https://godoc.org/golang.org/x/xerrors#Wrapper +func (e wrappedError) Unwrap() error { + return e.cause +} + +// mandatory is a sentinel value used in a function's defaults tuple +// to indicate that a (keyword-only) parameter is mandatory. +type mandatory struct{} + +func (mandatory) String() string { return "mandatory" } +func (mandatory) Type() string { return "mandatory" } +func (mandatory) Freeze() {} // immutable +func (mandatory) Truth() Bool { return False } +func (mandatory) Hash() (uint32, error) { return 0, nil } + +// A cell is a box containing a Value. +// Local variables marked as cells hold their value indirectly +// so that they may be shared by outer and inner nested functions. +// Cells are always accessed using indirect {FREE,LOCAL,SETLOCAL}CELL instructions. +// The FreeVars tuple contains only cells. +// The FREE instruction always yields a cell. +type cell struct{ v Value } + +func (c *cell) String() string { return "cell" } +func (c *cell) Type() string { return "cell" } +func (c *cell) Freeze() { + if c.v != nil { + c.v.Freeze() + } +} +func (c *cell) Truth() Bool { panic("unreachable") } +func (c *cell) Hash() (uint32, error) { panic("unreachable") } diff --git a/vendor/go.starlark.net/starlark/library.go b/vendor/go.starlark.net/starlark/library.go new file mode 100644 index 000000000..1c801be64 --- /dev/null +++ b/vendor/go.starlark.net/starlark/library.go @@ -0,0 +1,2289 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package starlark + +// This file defines the library of built-ins. +// +// Built-ins must explicitly check the "frozen" flag before updating +// mutable types such as lists and dicts. + +import ( + "errors" + "fmt" + "math" + "math/big" + "os" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "go.starlark.net/syntax" +) + +// Universe defines the set of universal built-ins, such as None, True, and len. +// +// The Go application may add or remove items from the +// universe dictionary before Starlark evaluation begins. +// All values in the dictionary must be immutable. +// Starlark programs cannot modify the dictionary. +var Universe StringDict + +func init() { + // https://github.com/google/starlark-go/blob/master/doc/spec.md#built-in-constants-and-functions + Universe = StringDict{ + "None": None, + "True": True, + "False": False, + "abs": NewBuiltin("abs", abs), + "any": NewBuiltin("any", any), + "all": NewBuiltin("all", all), + "bool": NewBuiltin("bool", bool_), + "bytes": NewBuiltin("bytes", bytes_), + "chr": NewBuiltin("chr", chr), + "dict": NewBuiltin("dict", dict), + "dir": NewBuiltin("dir", dir), + "enumerate": NewBuiltin("enumerate", enumerate), + "fail": NewBuiltin("fail", fail), + "float": NewBuiltin("float", float), + "getattr": NewBuiltin("getattr", getattr), + "hasattr": NewBuiltin("hasattr", hasattr), + "hash": NewBuiltin("hash", hash), + "int": NewBuiltin("int", int_), + "len": NewBuiltin("len", len_), + "list": NewBuiltin("list", list), + "max": NewBuiltin("max", minmax), + "min": NewBuiltin("min", minmax), + "ord": NewBuiltin("ord", ord), + "print": NewBuiltin("print", print), + "range": NewBuiltin("range", range_), + "repr": NewBuiltin("repr", repr), + "reversed": NewBuiltin("reversed", reversed), + "set": NewBuiltin("set", set), // requires resolve.AllowSet + "sorted": NewBuiltin("sorted", sorted), + "str": NewBuiltin("str", str), + "tuple": NewBuiltin("tuple", tuple), + "type": NewBuiltin("type", type_), + "zip": NewBuiltin("zip", zip), + } +} + +// methods of built-in types +// https://github.com/google/starlark-go/blob/master/doc/spec.md#built-in-methods +var ( + bytesMethods = map[string]*Builtin{ + "elems": NewBuiltin("elems", bytes_elems), + } + + dictMethods = map[string]*Builtin{ + "clear": NewBuiltin("clear", dict_clear), + "get": NewBuiltin("get", dict_get), + "items": NewBuiltin("items", dict_items), + "keys": NewBuiltin("keys", dict_keys), + "pop": NewBuiltin("pop", dict_pop), + "popitem": NewBuiltin("popitem", dict_popitem), + "setdefault": NewBuiltin("setdefault", dict_setdefault), + "update": NewBuiltin("update", dict_update), + "values": NewBuiltin("values", dict_values), + } + + listMethods = map[string]*Builtin{ + "append": NewBuiltin("append", list_append), + "clear": NewBuiltin("clear", list_clear), + "extend": NewBuiltin("extend", list_extend), + "index": NewBuiltin("index", list_index), + "insert": NewBuiltin("insert", list_insert), + "pop": NewBuiltin("pop", list_pop), + "remove": NewBuiltin("remove", list_remove), + } + + stringMethods = map[string]*Builtin{ + "capitalize": NewBuiltin("capitalize", string_capitalize), + "codepoint_ords": NewBuiltin("codepoint_ords", string_iterable), + "codepoints": NewBuiltin("codepoints", string_iterable), // sic + "count": NewBuiltin("count", string_count), + "elem_ords": NewBuiltin("elem_ords", string_iterable), + "elems": NewBuiltin("elems", string_iterable), // sic + "endswith": NewBuiltin("endswith", string_startswith), // sic + "find": NewBuiltin("find", string_find), + "format": NewBuiltin("format", string_format), + "index": NewBuiltin("index", string_index), + "isalnum": NewBuiltin("isalnum", string_isalnum), + "isalpha": NewBuiltin("isalpha", string_isalpha), + "isdigit": NewBuiltin("isdigit", string_isdigit), + "islower": NewBuiltin("islower", string_islower), + "isspace": NewBuiltin("isspace", string_isspace), + "istitle": NewBuiltin("istitle", string_istitle), + "isupper": NewBuiltin("isupper", string_isupper), + "join": NewBuiltin("join", string_join), + "lower": NewBuiltin("lower", string_lower), + "lstrip": NewBuiltin("lstrip", string_strip), // sic + "partition": NewBuiltin("partition", string_partition), + "removeprefix": NewBuiltin("removeprefix", string_removefix), + "removesuffix": NewBuiltin("removesuffix", string_removefix), + "replace": NewBuiltin("replace", string_replace), + "rfind": NewBuiltin("rfind", string_rfind), + "rindex": NewBuiltin("rindex", string_rindex), + "rpartition": NewBuiltin("rpartition", string_partition), // sic + "rsplit": NewBuiltin("rsplit", string_split), // sic + "rstrip": NewBuiltin("rstrip", string_strip), // sic + "split": NewBuiltin("split", string_split), + "splitlines": NewBuiltin("splitlines", string_splitlines), + "startswith": NewBuiltin("startswith", string_startswith), + "strip": NewBuiltin("strip", string_strip), + "title": NewBuiltin("title", string_title), + "upper": NewBuiltin("upper", string_upper), + } + + setMethods = map[string]*Builtin{ + "union": NewBuiltin("union", set_union), + } +) + +func builtinAttr(recv Value, name string, methods map[string]*Builtin) (Value, error) { + b := methods[name] + if b == nil { + return nil, nil // no such method + } + return b.BindReceiver(recv), nil +} + +func builtinAttrNames(methods map[string]*Builtin) []string { + names := make([]string, 0, len(methods)) + for name := range methods { + names = append(names, name) + } + sort.Strings(names) + return names +} + +// ---- built-in functions ---- + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#abs +func abs(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var x Value + if err := UnpackPositionalArgs("abs", args, kwargs, 1, &x); err != nil { + return nil, err + } + switch x := x.(type) { + case Float: + return Float(math.Abs(float64(x))), nil + case Int: + if x.Sign() >= 0 { + return x, nil + } + return zero.Sub(x), nil + default: + return nil, fmt.Errorf("got %s, want int or float", x.Type()) + } +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#all +func all(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var iterable Iterable + if err := UnpackPositionalArgs("all", args, kwargs, 1, &iterable); err != nil { + return nil, err + } + iter := iterable.Iterate() + defer iter.Done() + var x Value + for iter.Next(&x) { + if !x.Truth() { + return False, nil + } + } + return True, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#any +func any(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var iterable Iterable + if err := UnpackPositionalArgs("any", args, kwargs, 1, &iterable); err != nil { + return nil, err + } + iter := iterable.Iterate() + defer iter.Done() + var x Value + for iter.Next(&x) { + if x.Truth() { + return True, nil + } + } + return False, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#bool +func bool_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var x Value = False + if err := UnpackPositionalArgs("bool", args, kwargs, 0, &x); err != nil { + return nil, err + } + return x.Truth(), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#bytes +func bytes_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(kwargs) > 0 { + return nil, fmt.Errorf("bytes does not accept keyword arguments") + } + if len(args) != 1 { + return nil, fmt.Errorf("bytes: got %d arguments, want exactly 1", len(args)) + } + switch x := args[0].(type) { + case Bytes: + return x, nil + case String: + // Invalid encodings are replaced by that of U+FFFD. + return Bytes(utf8Transcode(string(x))), nil + case Iterable: + // iterable of numeric byte values + var buf strings.Builder + if n := Len(x); n >= 0 { + // common case: known length + buf.Grow(n) + } + iter := x.Iterate() + defer iter.Done() + var elem Value + var b byte + for i := 0; iter.Next(&elem); i++ { + if err := AsInt(elem, &b); err != nil { + return nil, fmt.Errorf("bytes: at index %d, %s", i, err) + } + buf.WriteByte(b) + } + return Bytes(buf.String()), nil + + default: + // Unlike string(foo), which stringifies it, bytes(foo) is an error. + return nil, fmt.Errorf("bytes: got %s, want string, bytes, or iterable of ints", x.Type()) + } +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#chr +func chr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(kwargs) > 0 { + return nil, fmt.Errorf("chr does not accept keyword arguments") + } + if len(args) != 1 { + return nil, fmt.Errorf("chr: got %d arguments, want 1", len(args)) + } + i, err := AsInt32(args[0]) + if err != nil { + return nil, fmt.Errorf("chr: %s", err) + } + if i < 0 { + return nil, fmt.Errorf("chr: Unicode code point %d out of range (<0)", i) + } + if i > unicode.MaxRune { + return nil, fmt.Errorf("chr: Unicode code point U+%X out of range (>0x10FFFF)", i) + } + return String(string(rune(i))), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict +func dict(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(args) > 1 { + return nil, fmt.Errorf("dict: got %d arguments, want at most 1", len(args)) + } + dict := new(Dict) + if err := updateDict(dict, args, kwargs); err != nil { + return nil, fmt.Errorf("dict: %v", err) + } + return dict, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dir +func dir(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(kwargs) > 0 { + return nil, fmt.Errorf("dir does not accept keyword arguments") + } + if len(args) != 1 { + return nil, fmt.Errorf("dir: got %d arguments, want 1", len(args)) + } + + var names []string + if x, ok := args[0].(HasAttrs); ok { + names = x.AttrNames() + } + sort.Strings(names) + elems := make([]Value, len(names)) + for i, name := range names { + elems[i] = String(name) + } + return NewList(elems), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#enumerate +func enumerate(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var iterable Iterable + var start int + if err := UnpackPositionalArgs("enumerate", args, kwargs, 1, &iterable, &start); err != nil { + return nil, err + } + + iter := iterable.Iterate() + defer iter.Done() + + var pairs []Value + var x Value + + if n := Len(iterable); n >= 0 { + // common case: known length + pairs = make([]Value, 0, n) + array := make(Tuple, 2*n) // allocate a single backing array + for i := 0; iter.Next(&x); i++ { + pair := array[:2:2] + array = array[2:] + pair[0] = MakeInt(start + i) + pair[1] = x + pairs = append(pairs, pair) + } + } else { + // non-sequence (unknown length) + for i := 0; iter.Next(&x); i++ { + pair := Tuple{MakeInt(start + i), x} + pairs = append(pairs, pair) + } + } + + return NewList(pairs), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#fail +func fail(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + sep := " " + if err := UnpackArgs("fail", nil, kwargs, "sep?", &sep); err != nil { + return nil, err + } + buf := new(strings.Builder) + buf.WriteString("fail: ") + for i, v := range args { + if i > 0 { + buf.WriteString(sep) + } + if s, ok := AsString(v); ok { + buf.WriteString(s) + } else { + writeValue(buf, v, nil) + } + } + + return nil, errors.New(buf.String()) +} + +func float(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(kwargs) > 0 { + return nil, fmt.Errorf("float does not accept keyword arguments") + } + if len(args) == 0 { + return Float(0.0), nil + } + if len(args) != 1 { + return nil, fmt.Errorf("float got %d arguments, wants 1", len(args)) + } + switch x := args[0].(type) { + case Bool: + if x { + return Float(1.0), nil + } else { + return Float(0.0), nil + } + case Int: + return x.finiteFloat() + case Float: + return x, nil + case String: + if x == "" { + return nil, fmt.Errorf("float: empty string") + } + // +/- NaN or Inf or Infinity (case insensitive)? + s := string(x) + switch x[len(x)-1] { + case 'y', 'Y': + if strings.EqualFold(s, "infinity") || strings.EqualFold(s, "+infinity") { + return inf, nil + } else if strings.EqualFold(s, "-infinity") { + return neginf, nil + } + case 'f', 'F': + if strings.EqualFold(s, "inf") || strings.EqualFold(s, "+inf") { + return inf, nil + } else if strings.EqualFold(s, "-inf") { + return neginf, nil + } + case 'n', 'N': + if strings.EqualFold(s, "nan") || strings.EqualFold(s, "+nan") || strings.EqualFold(s, "-nan") { + return nan, nil + } + } + f, err := strconv.ParseFloat(s, 64) + if math.IsInf(f, 0) { + return nil, fmt.Errorf("floating-point number too large") + } + if err != nil { + return nil, fmt.Errorf("invalid float literal: %s", s) + } + return Float(f), nil + default: + return nil, fmt.Errorf("float got %s, want number or string", x.Type()) + } +} + +var ( + inf = Float(math.Inf(+1)) + neginf = Float(math.Inf(-1)) + nan = Float(math.NaN()) +) + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#getattr +func getattr(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var object, dflt Value + var name string + if err := UnpackPositionalArgs("getattr", args, kwargs, 2, &object, &name, &dflt); err != nil { + return nil, err + } + if object, ok := object.(HasAttrs); ok { + v, err := object.Attr(name) + if err != nil { + // An error could mean the field doesn't exist, + // or it exists but could not be computed. + if dflt != nil { + return dflt, nil + } + return nil, nameErr(b, err) + } + if v != nil { + return v, nil + } + // (nil, nil) => no such field + } + if dflt != nil { + return dflt, nil + } + return nil, fmt.Errorf("getattr: %s has no .%s field or method", object.Type(), name) +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#hasattr +func hasattr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var object Value + var name string + if err := UnpackPositionalArgs("hasattr", args, kwargs, 2, &object, &name); err != nil { + return nil, err + } + if object, ok := object.(HasAttrs); ok { + v, err := object.Attr(name) + if err == nil { + return Bool(v != nil), nil + } + + // An error does not conclusively indicate presence or + // absence of a field: it could occur while computing + // the value of a present attribute, or it could be a + // "no such attribute" error with details. + for _, x := range object.AttrNames() { + if x == name { + return True, nil + } + } + } + return False, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#hash +func hash(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var x Value + if err := UnpackPositionalArgs("hash", args, kwargs, 1, &x); err != nil { + return nil, err + } + + var h int64 + switch x := x.(type) { + case String: + // The Starlark spec requires that the hash function be + // deterministic across all runs, motivated by the need + // for reproducibility of builds. Thus we cannot call + // String.Hash, which uses the fastest implementation + // available, because as varies across process restarts, + // and may evolve with the implementation. + h = int64(javaStringHash(string(x))) + case Bytes: + h = int64(softHashString(string(x))) // FNV32 + default: + return nil, fmt.Errorf("hash: got %s, want string or bytes", x.Type()) + } + return MakeInt64(h), nil +} + +// javaStringHash returns the same hash as would be produced by +// java.lang.String.hashCode. This requires transcoding the string to +// UTF-16; transcoding may introduce Unicode replacement characters +// U+FFFD if s does not contain valid UTF-8. +func javaStringHash(s string) (h int32) { + for _, r := range s { + if utf16.IsSurrogate(r) { + c1, c2 := utf16.EncodeRune(r) + h = 31*h + c1 + h = 31*h + c2 + } else { + h = 31*h + r // r may be U+FFFD + } + } + return h +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#int +func int_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var x Value = zero + var base Value + if err := UnpackArgs("int", args, kwargs, "x", &x, "base?", &base); err != nil { + return nil, err + } + + if s, ok := AsString(x); ok { + b := 10 + if base != nil { + var err error + b, err = AsInt32(base) + if err != nil { + return nil, fmt.Errorf("int: for base, got %s, want int", base.Type()) + } + if b != 0 && (b < 2 || b > 36) { + return nil, fmt.Errorf("int: base must be an integer >= 2 && <= 36") + } + } + res := parseInt(s, b) + if res == nil { + return nil, fmt.Errorf("int: invalid literal with base %d: %s", b, s) + } + return res, nil + } + + if base != nil { + return nil, fmt.Errorf("int: can't convert non-string with explicit base") + } + + if b, ok := x.(Bool); ok { + if b { + return one, nil + } else { + return zero, nil + } + } + + i, err := NumberToInt(x) + if err != nil { + return nil, fmt.Errorf("int: %s", err) + } + return i, nil +} + +// parseInt defines the behavior of int(string, base=int). It returns nil on error. +func parseInt(s string, base int) Value { + // remove sign + var neg bool + if s != "" { + if s[0] == '+' { + s = s[1:] + } else if s[0] == '-' { + neg = true + s = s[1:] + } + } + + // remove optional base prefix + baseprefix := 0 + if len(s) > 1 && s[0] == '0' { + if len(s) > 2 { + switch s[1] { + case 'o', 'O': + baseprefix = 8 + case 'x', 'X': + baseprefix = 16 + case 'b', 'B': + baseprefix = 2 + } + } + if baseprefix != 0 { + // Remove the base prefix if it matches + // the explicit base, or if base=0. + if base == 0 || baseprefix == base { + base = baseprefix + s = s[2:] + } + } else { + // For automatic base detection, + // a string starting with zero + // must be all zeros. + // Thus we reject int("0755", 0). + if base == 0 { + for i := 1; i < len(s); i++ { + if s[i] != '0' { + return nil + } + } + return zero + } + } + } + if base == 0 { + base = 10 + } + + // we explicitly handled sign above. + // if a sign remains, it is invalid. + if s != "" && (s[0] == '-' || s[0] == '+') { + return nil + } + + // s has no sign or base prefix. + if i, ok := new(big.Int).SetString(s, base); ok { + res := MakeBigInt(i) + if neg { + res = zero.Sub(res) + } + return res + } + + return nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#len +func len_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var x Value + if err := UnpackPositionalArgs("len", args, kwargs, 1, &x); err != nil { + return nil, err + } + len := Len(x) + if len < 0 { + return nil, fmt.Errorf("len: value of type %s has no len", x.Type()) + } + return MakeInt(len), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#list +func list(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var iterable Iterable + if err := UnpackPositionalArgs("list", args, kwargs, 0, &iterable); err != nil { + return nil, err + } + var elems []Value + if iterable != nil { + iter := iterable.Iterate() + defer iter.Done() + if n := Len(iterable); n > 0 { + elems = make([]Value, 0, n) // preallocate if length known + } + var x Value + for iter.Next(&x) { + elems = append(elems, x) + } + } + return NewList(elems), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#min +func minmax(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(args) == 0 { + return nil, fmt.Errorf("%s requires at least one positional argument", b.Name()) + } + var keyFunc Callable + if err := UnpackArgs(b.Name(), nil, kwargs, "key?", &keyFunc); err != nil { + return nil, err + } + var op syntax.Token + if b.Name() == "max" { + op = syntax.GT + } else { + op = syntax.LT + } + var iterable Value + if len(args) == 1 { + iterable = args[0] + } else { + iterable = args + } + iter := Iterate(iterable) + if iter == nil { + return nil, fmt.Errorf("%s: %s value is not iterable", b.Name(), iterable.Type()) + } + defer iter.Done() + var extremum Value + if !iter.Next(&extremum) { + return nil, nameErr(b, "argument is an empty sequence") + } + + var extremeKey Value + var keyargs Tuple + if keyFunc == nil { + extremeKey = extremum + } else { + keyargs = Tuple{extremum} + res, err := Call(thread, keyFunc, keyargs, nil) + if err != nil { + return nil, err // to preserve backtrace, don't modify error + } + extremeKey = res + } + + var x Value + for iter.Next(&x) { + var key Value + if keyFunc == nil { + key = x + } else { + keyargs[0] = x + res, err := Call(thread, keyFunc, keyargs, nil) + if err != nil { + return nil, err // to preserve backtrace, don't modify error + } + key = res + } + + if ok, err := Compare(op, key, extremeKey); err != nil { + return nil, nameErr(b, err) + } else if ok { + extremum = x + extremeKey = key + } + } + return extremum, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#ord +func ord(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(kwargs) > 0 { + return nil, fmt.Errorf("ord does not accept keyword arguments") + } + if len(args) != 1 { + return nil, fmt.Errorf("ord: got %d arguments, want 1", len(args)) + } + switch x := args[0].(type) { + case String: + // ord(string) returns int value of sole rune. + s := string(x) + r, sz := utf8.DecodeRuneInString(s) + if sz == 0 || sz != len(s) { + n := utf8.RuneCountInString(s) + return nil, fmt.Errorf("ord: string encodes %d Unicode code points, want 1", n) + } + return MakeInt(int(r)), nil + + case Bytes: + // ord(bytes) returns int value of sole byte. + if len(x) != 1 { + return nil, fmt.Errorf("ord: bytes has length %d, want 1", len(x)) + } + return MakeInt(int(x[0])), nil + default: + return nil, fmt.Errorf("ord: got %s, want string or bytes", x.Type()) + } +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#print +func print(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + sep := " " + if err := UnpackArgs("print", nil, kwargs, "sep?", &sep); err != nil { + return nil, err + } + buf := new(strings.Builder) + for i, v := range args { + if i > 0 { + buf.WriteString(sep) + } + if s, ok := AsString(v); ok { + buf.WriteString(s) + } else if b, ok := v.(Bytes); ok { + buf.WriteString(string(b)) + } else { + writeValue(buf, v, nil) + } + } + + s := buf.String() + if thread.Print != nil { + thread.Print(thread, s) + } else { + fmt.Fprintln(os.Stderr, s) + } + return None, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#range +func range_(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var start, stop, step int + step = 1 + if err := UnpackPositionalArgs("range", args, kwargs, 1, &start, &stop, &step); err != nil { + return nil, err + } + + if len(args) == 1 { + // range(stop) + start, stop = 0, start + } + if step == 0 { + // we were given range(start, stop, 0) + return nil, nameErr(b, "step argument must not be zero") + } + + return rangeValue{start: start, stop: stop, step: step, len: rangeLen(start, stop, step)}, nil +} + +// A rangeValue is a comparable, immutable, indexable sequence of integers +// defined by the three parameters to a range(...) call. +// Invariant: step != 0. +type rangeValue struct{ start, stop, step, len int } + +var ( + _ Indexable = rangeValue{} + _ Sequence = rangeValue{} + _ Comparable = rangeValue{} + _ Sliceable = rangeValue{} +) + +func (r rangeValue) Len() int { return r.len } +func (r rangeValue) Index(i int) Value { return MakeInt(r.start + i*r.step) } +func (r rangeValue) Iterate() Iterator { return &rangeIterator{r, 0} } + +// rangeLen calculates the length of a range with the provided start, stop, and step. +// caller must ensure that step is non-zero. +func rangeLen(start, stop, step int) int { + switch { + case step > 0: + if stop > start { + return (stop-1-start)/step + 1 + } + case step < 0: + if start > stop { + return (start-1-stop)/-step + 1 + } + default: + panic("rangeLen: zero step") + } + return 0 +} + +func (r rangeValue) Slice(start, end, step int) Value { + newStart := r.start + r.step*start + newStop := r.start + r.step*end + newStep := r.step * step + return rangeValue{ + start: newStart, + stop: newStop, + step: newStep, + len: rangeLen(newStart, newStop, newStep), + } +} + +func (r rangeValue) Freeze() {} // immutable +func (r rangeValue) String() string { + if r.step != 1 { + return fmt.Sprintf("range(%d, %d, %d)", r.start, r.stop, r.step) + } else if r.start != 0 { + return fmt.Sprintf("range(%d, %d)", r.start, r.stop) + } else { + return fmt.Sprintf("range(%d)", r.stop) + } +} +func (r rangeValue) Type() string { return "range" } +func (r rangeValue) Truth() Bool { return r.len > 0 } +func (r rangeValue) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: range") } + +func (x rangeValue) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { + y := y_.(rangeValue) + switch op { + case syntax.EQL: + return rangeEqual(x, y), nil + case syntax.NEQ: + return !rangeEqual(x, y), nil + default: + return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) + } +} + +func rangeEqual(x, y rangeValue) bool { + // Two ranges compare equal if they denote the same sequence. + if x.len != y.len { + return false // sequences differ in length + } + if x.len == 0 { + return true // both sequences are empty + } + if x.start != y.start { + return false // first element differs + } + return x.len == 1 || x.step == y.step +} + +func (r rangeValue) contains(x Int) bool { + x32, err := AsInt32(x) + if err != nil { + return false // out of range + } + delta := x32 - r.start + quo, rem := delta/r.step, delta%r.step + return rem == 0 && 0 <= quo && quo < r.len +} + +type rangeIterator struct { + r rangeValue + i int +} + +func (it *rangeIterator) Next(p *Value) bool { + if it.i < it.r.len { + *p = it.r.Index(it.i) + it.i++ + return true + } + return false +} +func (*rangeIterator) Done() {} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#repr +func repr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var x Value + if err := UnpackPositionalArgs("repr", args, kwargs, 1, &x); err != nil { + return nil, err + } + return String(x.String()), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#reversed +func reversed(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var iterable Iterable + if err := UnpackPositionalArgs("reversed", args, kwargs, 1, &iterable); err != nil { + return nil, err + } + iter := iterable.Iterate() + defer iter.Done() + var elems []Value + if n := Len(args[0]); n >= 0 { + elems = make([]Value, 0, n) // preallocate if length known + } + var x Value + for iter.Next(&x) { + elems = append(elems, x) + } + n := len(elems) + for i := 0; i < n>>1; i++ { + elems[i], elems[n-1-i] = elems[n-1-i], elems[i] + } + return NewList(elems), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#set +func set(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var iterable Iterable + if err := UnpackPositionalArgs("set", args, kwargs, 0, &iterable); err != nil { + return nil, err + } + set := new(Set) + if iterable != nil { + iter := iterable.Iterate() + defer iter.Done() + var x Value + for iter.Next(&x) { + if err := set.Insert(x); err != nil { + return nil, nameErr(b, err) + } + } + } + return set, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#sorted +func sorted(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + // Oddly, Python's sorted permits all arguments to be positional, thus so do we. + var iterable Iterable + var key Callable + var reverse bool + if err := UnpackArgs("sorted", args, kwargs, + "iterable", &iterable, + "key?", &key, + "reverse?", &reverse, + ); err != nil { + return nil, err + } + + iter := iterable.Iterate() + defer iter.Done() + var values []Value + if n := Len(iterable); n > 0 { + values = make(Tuple, 0, n) // preallocate if length is known + } + var x Value + for iter.Next(&x) { + values = append(values, x) + } + + // Derive keys from values by applying key function. + var keys []Value + if key != nil { + keys = make([]Value, len(values)) + for i, v := range values { + k, err := Call(thread, key, Tuple{v}, nil) + if err != nil { + return nil, err // to preserve backtrace, don't modify error + } + keys[i] = k + } + } + + slice := &sortSlice{keys: keys, values: values} + if reverse { + sort.Stable(sort.Reverse(slice)) + } else { + sort.Stable(slice) + } + return NewList(slice.values), slice.err +} + +type sortSlice struct { + keys []Value // nil => values[i] is key + values []Value + err error +} + +func (s *sortSlice) Len() int { return len(s.values) } +func (s *sortSlice) Less(i, j int) bool { + keys := s.keys + if s.keys == nil { + keys = s.values + } + ok, err := Compare(syntax.LT, keys[i], keys[j]) + if err != nil { + s.err = err + } + return ok +} +func (s *sortSlice) Swap(i, j int) { + if s.keys != nil { + s.keys[i], s.keys[j] = s.keys[j], s.keys[i] + } + s.values[i], s.values[j] = s.values[j], s.values[i] +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#str +func str(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(kwargs) > 0 { + return nil, fmt.Errorf("str does not accept keyword arguments") + } + if len(args) != 1 { + return nil, fmt.Errorf("str: got %d arguments, want exactly 1", len(args)) + } + switch x := args[0].(type) { + case String: + return x, nil + case Bytes: + // Invalid encodings are replaced by that of U+FFFD. + return String(utf8Transcode(string(x))), nil + default: + return String(x.String()), nil + } +} + +// utf8Transcode returns the UTF-8-to-UTF-8 transcoding of s. +// The effect is that each code unit that is part of an +// invalid sequence is replaced by U+FFFD. +func utf8Transcode(s string) string { + if utf8.ValidString(s) { + return s + } + var out strings.Builder + for _, r := range s { + out.WriteRune(r) + } + return out.String() +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#tuple +func tuple(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var iterable Iterable + if err := UnpackPositionalArgs("tuple", args, kwargs, 0, &iterable); err != nil { + return nil, err + } + if len(args) == 0 { + return Tuple(nil), nil + } + iter := iterable.Iterate() + defer iter.Done() + var elems Tuple + if n := Len(iterable); n > 0 { + elems = make(Tuple, 0, n) // preallocate if length is known + } + var x Value + for iter.Next(&x) { + elems = append(elems, x) + } + return elems, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#type +func type_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(kwargs) > 0 { + return nil, fmt.Errorf("type does not accept keyword arguments") + } + if len(args) != 1 { + return nil, fmt.Errorf("type: got %d arguments, want exactly 1", len(args)) + } + return String(args[0].Type()), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#zip +func zip(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(kwargs) > 0 { + return nil, fmt.Errorf("zip does not accept keyword arguments") + } + rows, cols := 0, len(args) + iters := make([]Iterator, cols) + defer func() { + for _, iter := range iters { + if iter != nil { + iter.Done() + } + } + }() + for i, seq := range args { + it := Iterate(seq) + if it == nil { + return nil, fmt.Errorf("zip: argument #%d is not iterable: %s", i+1, seq.Type()) + } + iters[i] = it + n := Len(seq) + if i == 0 || n < rows { + rows = n // possibly -1 + } + } + var result []Value + if rows >= 0 { + // length known + result = make([]Value, rows) + array := make(Tuple, cols*rows) // allocate a single backing array + for i := 0; i < rows; i++ { + tuple := array[:cols:cols] + array = array[cols:] + for j, iter := range iters { + iter.Next(&tuple[j]) + } + result[i] = tuple + } + } else { + // length not known + outer: + for { + tuple := make(Tuple, cols) + for i, iter := range iters { + if !iter.Next(&tuple[i]) { + break outer + } + } + result = append(result, tuple) + } + } + return NewList(result), nil +} + +// ---- methods of built-in types --- + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get +func dict_get(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var key, dflt Value + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { + return nil, err + } + if v, ok, err := b.Receiver().(*Dict).Get(key); err != nil { + return nil, nameErr(b, err) + } else if ok { + return v, nil + } else if dflt != nil { + return dflt, nil + } + return None, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear +func dict_clear(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + return None, b.Receiver().(*Dict).Clear() +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items +func dict_items(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + items := b.Receiver().(*Dict).Items() + res := make([]Value, len(items)) + for i, item := range items { + res[i] = item // convert [2]Value to Value + } + return NewList(res), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys +func dict_keys(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + return NewList(b.Receiver().(*Dict).Keys()), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop +func dict_pop(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var k, d Value + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil { + return nil, err + } + if v, found, err := b.Receiver().(*Dict).Delete(k); err != nil { + return nil, nameErr(b, err) // dict is frozen or key is unhashable + } else if found { + return v, nil + } else if d != nil { + return d, nil + } + return nil, nameErr(b, "missing key") +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·popitem +func dict_popitem(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + recv := b.Receiver().(*Dict) + k, ok := recv.ht.first() + if !ok { + return nil, nameErr(b, "empty dict") + } + v, _, err := recv.Delete(k) + if err != nil { + return nil, nameErr(b, err) // dict is frozen + } + return Tuple{k, v}, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault +func dict_setdefault(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var key, dflt Value = nil, None + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { + return nil, err + } + dict := b.Receiver().(*Dict) + if v, ok, err := dict.Get(key); err != nil { + return nil, nameErr(b, err) + } else if ok { + return v, nil + } else if err := dict.SetKey(key, dflt); err != nil { + return nil, nameErr(b, err) + } else { + return dflt, nil + } +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update +func dict_update(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if len(args) > 1 { + return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args)) + } + if err := updateDict(b.Receiver().(*Dict), args, kwargs); err != nil { + return nil, fmt.Errorf("update: %v", err) + } + return None, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update +func dict_values(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + items := b.Receiver().(*Dict).Items() + res := make([]Value, len(items)) + for i, item := range items { + res[i] = item[1] + } + return NewList(res), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·append +func list_append(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var object Value + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &object); err != nil { + return nil, err + } + recv := b.Receiver().(*List) + if err := recv.checkMutable("append to"); err != nil { + return nil, nameErr(b, err) + } + recv.elems = append(recv.elems, object) + return None, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·clear +func list_clear(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + if err := b.Receiver().(*List).Clear(); err != nil { + return nil, nameErr(b, err) + } + return None, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·extend +func list_extend(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := b.Receiver().(*List) + var iterable Iterable + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &iterable); err != nil { + return nil, err + } + if err := recv.checkMutable("extend"); err != nil { + return nil, nameErr(b, err) + } + listExtend(recv, iterable) + return None, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·index +func list_index(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var value, start_, end_ Value + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &value, &start_, &end_); err != nil { + return nil, err + } + + recv := b.Receiver().(*List) + start, end, err := indices(start_, end_, recv.Len()) + if err != nil { + return nil, nameErr(b, err) + } + + for i := start; i < end; i++ { + if eq, err := Equal(recv.elems[i], value); err != nil { + return nil, nameErr(b, err) + } else if eq { + return MakeInt(i), nil + } + } + return nil, nameErr(b, "value not in list") +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·insert +func list_insert(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := b.Receiver().(*List) + var index int + var object Value + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 2, &index, &object); err != nil { + return nil, err + } + if err := recv.checkMutable("insert into"); err != nil { + return nil, nameErr(b, err) + } + + if index < 0 { + index += recv.Len() + } + + if index >= recv.Len() { + // end + recv.elems = append(recv.elems, object) + } else { + if index < 0 { + index = 0 // start + } + recv.elems = append(recv.elems, nil) + copy(recv.elems[index+1:], recv.elems[index:]) // slide up one + recv.elems[index] = object + } + return None, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·remove +func list_remove(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := b.Receiver().(*List) + var value Value + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &value); err != nil { + return nil, err + } + if err := recv.checkMutable("remove from"); err != nil { + return nil, nameErr(b, err) + } + for i, elem := range recv.elems { + if eq, err := Equal(elem, value); err != nil { + return nil, fmt.Errorf("remove: %v", err) + } else if eq { + recv.elems = append(recv.elems[:i], recv.elems[i+1:]...) + return None, nil + } + } + return nil, fmt.Errorf("remove: element not found") +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·pop +func list_pop(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := b.Receiver() + list := recv.(*List) + n := list.Len() + i := n - 1 + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &i); err != nil { + return nil, err + } + origI := i + if i < 0 { + i += n + } + if i < 0 || i >= n { + return nil, nameErr(b, outOfRange(origI, n, list)) + } + if err := list.checkMutable("pop from"); err != nil { + return nil, nameErr(b, err) + } + res := list.elems[i] + list.elems = append(list.elems[:i], list.elems[i+1:]...) + return res, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·capitalize +func string_capitalize(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + s := string(b.Receiver().(String)) + res := new(strings.Builder) + res.Grow(len(s)) + for i, r := range s { + if i == 0 { + r = unicode.ToTitle(r) + } else { + r = unicode.ToLower(r) + } + res.WriteRune(r) + } + return String(res.String()), nil +} + +// string_iterable returns an unspecified iterable value whose iterator yields: +// - elems: successive 1-byte substrings +// - codepoints: successive substrings that encode a single Unicode code point. +// - elem_ords: numeric values of successive bytes +// - codepoint_ords: numeric values of successive Unicode code points +func string_iterable(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + s := b.Receiver().(String) + ords := b.Name()[len(b.Name())-2] == 'd' + codepoints := b.Name()[0] == 'c' + if codepoints { + return stringCodepoints{s, ords}, nil + } else { + return stringElems{s, ords}, nil + } +} + +// bytes_elems returns an unspecified iterable value whose +// iterator yields the int values of successive elements. +func bytes_elems(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + return bytesIterable{b.Receiver().(Bytes)}, nil +} + +// A bytesIterable is an iterable returned by bytes.elems(), +// whose iterator yields a sequence of numeric bytes values. +type bytesIterable struct{ bytes Bytes } + +var _ Iterable = (*bytesIterable)(nil) + +func (bi bytesIterable) String() string { return bi.bytes.String() + ".elems()" } +func (bi bytesIterable) Type() string { return "bytes.elems" } +func (bi bytesIterable) Freeze() {} // immutable +func (bi bytesIterable) Truth() Bool { return True } +func (bi bytesIterable) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", bi.Type()) } +func (bi bytesIterable) Iterate() Iterator { return &bytesIterator{bi.bytes} } + +type bytesIterator struct{ bytes Bytes } + +func (it *bytesIterator) Next(p *Value) bool { + if it.bytes == "" { + return false + } + *p = MakeInt(int(it.bytes[0])) + it.bytes = it.bytes[1:] + return true +} + +func (*bytesIterator) Done() {} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·count +func string_count(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var sub string + var start_, end_ Value + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sub, &start_, &end_); err != nil { + return nil, err + } + + recv := string(b.Receiver().(String)) + start, end, err := indices(start_, end_, len(recv)) + if err != nil { + return nil, nameErr(b, err) + } + + var slice string + if start < end { + slice = recv[start:end] + } + return MakeInt(strings.Count(slice, sub)), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isalnum +func string_isalnum(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + recv := string(b.Receiver().(String)) + for _, r := range recv { + if !unicode.IsLetter(r) && !unicode.IsDigit(r) { + return False, nil + } + } + return Bool(recv != ""), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isalpha +func string_isalpha(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + recv := string(b.Receiver().(String)) + for _, r := range recv { + if !unicode.IsLetter(r) { + return False, nil + } + } + return Bool(recv != ""), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isdigit +func string_isdigit(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + recv := string(b.Receiver().(String)) + for _, r := range recv { + if !unicode.IsDigit(r) { + return False, nil + } + } + return Bool(recv != ""), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·islower +func string_islower(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + recv := string(b.Receiver().(String)) + return Bool(isCasedString(recv) && recv == strings.ToLower(recv)), nil +} + +// isCasedString reports whether its argument contains any cased code points. +func isCasedString(s string) bool { + for _, r := range s { + if isCasedRune(r) { + return true + } + } + return false +} + +func isCasedRune(r rune) bool { + // It's unclear what the correct behavior is for a rune such as 'ffi', + // a lowercase letter with no upper or title case and no SimpleFold. + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || unicode.SimpleFold(r) != r +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isspace +func string_isspace(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + recv := string(b.Receiver().(String)) + for _, r := range recv { + if !unicode.IsSpace(r) { + return False, nil + } + } + return Bool(recv != ""), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·istitle +func string_istitle(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + recv := string(b.Receiver().(String)) + + // Python semantics differ from x==strings.{To,}Title(x) in Go: + // "uppercase characters may only follow uncased characters and + // lowercase characters only cased ones." + var cased, prevCased bool + for _, r := range recv { + if 'A' <= r && r <= 'Z' || unicode.IsTitle(r) { // e.g. "Ç…" + if prevCased { + return False, nil + } + prevCased = true + cased = true + } else if unicode.IsLower(r) { + if !prevCased { + return False, nil + } + prevCased = true + cased = true + } else if unicode.IsUpper(r) { + return False, nil + } else { + prevCased = false + } + } + return Bool(cased), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isupper +func string_isupper(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + recv := string(b.Receiver().(String)) + return Bool(isCasedString(recv) && recv == strings.ToUpper(recv)), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·find +func string_find(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + return string_find_impl(b, args, kwargs, true, false) +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·format +func string_format(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + format := string(b.Receiver().(String)) + var auto, manual bool // kinds of positional indexing used + buf := new(strings.Builder) + index := 0 + for { + literal := format + i := strings.IndexByte(format, '{') + if i >= 0 { + literal = format[:i] + } + + // Replace "}}" with "}" in non-field portion, rejecting a lone '}'. + for { + j := strings.IndexByte(literal, '}') + if j < 0 { + buf.WriteString(literal) + break + } + if len(literal) == j+1 || literal[j+1] != '}' { + return nil, fmt.Errorf("format: single '}' in format") + } + buf.WriteString(literal[:j+1]) + literal = literal[j+2:] + } + + if i < 0 { + break // end of format string + } + + if i+1 < len(format) && format[i+1] == '{' { + // "{{" means a literal '{' + buf.WriteByte('{') + format = format[i+2:] + continue + } + + format = format[i+1:] + i = strings.IndexByte(format, '}') + if i < 0 { + return nil, fmt.Errorf("format: unmatched '{' in format") + } + + var arg Value + conv := "s" + var spec string + + field := format[:i] + format = format[i+1:] + + var name string + if i := strings.IndexByte(field, '!'); i < 0 { + // "name" or "name:spec" + if i := strings.IndexByte(field, ':'); i < 0 { + name = field + } else { + name = field[:i] + spec = field[i+1:] + } + } else { + // "name!conv" or "name!conv:spec" + name = field[:i] + field = field[i+1:] + // "conv" or "conv:spec" + if i := strings.IndexByte(field, ':'); i < 0 { + conv = field + } else { + conv = field[:i] + spec = field[i+1:] + } + } + + if name == "" { + // "{}": automatic indexing + if manual { + return nil, fmt.Errorf("format: cannot switch from manual field specification to automatic field numbering") + } + auto = true + if index >= len(args) { + return nil, fmt.Errorf("format: tuple index out of range") + } + arg = args[index] + index++ + } else if num, ok := decimal(name); ok { + // positional argument + if auto { + return nil, fmt.Errorf("format: cannot switch from automatic field numbering to manual field specification") + } + manual = true + if num >= len(args) { + return nil, fmt.Errorf("format: tuple index out of range") + } else { + arg = args[num] + } + } else { + // keyword argument + for _, kv := range kwargs { + if string(kv[0].(String)) == name { + arg = kv[1] + break + } + } + if arg == nil { + // Starlark does not support Python's x.y or a[i] syntaxes, + // or nested use of {...}. + if strings.Contains(name, ".") { + return nil, fmt.Errorf("format: attribute syntax x.y is not supported in replacement fields: %s", name) + } + if strings.Contains(name, "[") { + return nil, fmt.Errorf("format: element syntax a[i] is not supported in replacement fields: %s", name) + } + if strings.Contains(name, "{") { + return nil, fmt.Errorf("format: nested replacement fields not supported") + } + return nil, fmt.Errorf("format: keyword %s not found", name) + } + } + + if spec != "" { + // Starlark does not support Python's format_spec features. + return nil, fmt.Errorf("format spec features not supported in replacement fields: %s", spec) + } + + switch conv { + case "s": + if str, ok := AsString(arg); ok { + buf.WriteString(str) + } else { + writeValue(buf, arg, nil) + } + case "r": + writeValue(buf, arg, nil) + default: + return nil, fmt.Errorf("format: unknown conversion %q", conv) + } + } + return String(buf.String()), nil +} + +// decimal interprets s as a sequence of decimal digits. +func decimal(s string) (x int, ok bool) { + n := len(s) + for i := 0; i < n; i++ { + digit := s[i] - '0' + if digit > 9 { + return 0, false + } + x = x*10 + int(digit) + if x < 0 { + return 0, false // underflow + } + } + return x, true +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·index +func string_index(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + return string_find_impl(b, args, kwargs, false, false) +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·join +func string_join(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := string(b.Receiver().(String)) + var iterable Iterable + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &iterable); err != nil { + return nil, err + } + iter := iterable.Iterate() + defer iter.Done() + buf := new(strings.Builder) + var x Value + for i := 0; iter.Next(&x); i++ { + if i > 0 { + buf.WriteString(recv) + } + s, ok := AsString(x) + if !ok { + return nil, fmt.Errorf("join: in list, want string, got %s", x.Type()) + } + buf.WriteString(s) + } + return String(buf.String()), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·lower +func string_lower(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + return String(strings.ToLower(string(b.Receiver().(String)))), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·partition +func string_partition(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := string(b.Receiver().(String)) + var sep string + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sep); err != nil { + return nil, err + } + if sep == "" { + return nil, nameErr(b, "empty separator") + } + var i int + if b.Name()[0] == 'p' { + i = strings.Index(recv, sep) // partition + } else { + i = strings.LastIndex(recv, sep) // rpartition + } + tuple := make(Tuple, 0, 3) + if i < 0 { + if b.Name()[0] == 'p' { + tuple = append(tuple, String(recv), String(""), String("")) + } else { + tuple = append(tuple, String(""), String(""), String(recv)) + } + } else { + tuple = append(tuple, String(recv[:i]), String(sep), String(recv[i+len(sep):])) + } + return tuple, nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·removeprefix +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·removesuffix +func string_removefix(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := string(b.Receiver().(String)) + var fix string + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &fix); err != nil { + return nil, err + } + if b.name[len("remove")] == 'p' { + recv = strings.TrimPrefix(recv, fix) + } else { + recv = strings.TrimSuffix(recv, fix) + } + return String(recv), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·replace +func string_replace(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := string(b.Receiver().(String)) + var old, new string + count := -1 + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 2, &old, &new, &count); err != nil { + return nil, err + } + return String(strings.Replace(recv, old, new, count)), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rfind +func string_rfind(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + return string_find_impl(b, args, kwargs, true, true) +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rindex +func string_rindex(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + return string_find_impl(b, args, kwargs, false, true) +} + +// https://github.com/google/starlark-go/starlark/blob/master/doc/spec.md#string·startswith +// https://github.com/google/starlark-go/starlark/blob/master/doc/spec.md#string·endswith +func string_startswith(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var x Value + var start, end Value = None, None + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &x, &start, &end); err != nil { + return nil, err + } + + // compute effective substring. + s := string(b.Receiver().(String)) + if start, end, err := indices(start, end, len(s)); err != nil { + return nil, nameErr(b, err) + } else { + if end < start { + end = start // => empty result + } + s = s[start:end] + } + + f := strings.HasPrefix + if b.Name()[0] == 'e' { // endswith + f = strings.HasSuffix + } + + switch x := x.(type) { + case Tuple: + for i, x := range x { + prefix, ok := AsString(x) + if !ok { + return nil, fmt.Errorf("%s: want string, got %s, for element %d", + b.Name(), x.Type(), i) + } + if f(s, prefix) { + return True, nil + } + } + return False, nil + case String: + return Bool(f(s, string(x))), nil + } + return nil, fmt.Errorf("%s: got %s, want string or tuple of string", b.Name(), x.Type()) +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·strip +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·lstrip +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rstrip +func string_strip(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var chars string + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &chars); err != nil { + return nil, err + } + recv := string(b.Receiver().(String)) + var s string + switch b.Name()[0] { + case 's': // strip + if chars != "" { + s = strings.Trim(recv, chars) + } else { + s = strings.TrimSpace(recv) + } + case 'l': // lstrip + if chars != "" { + s = strings.TrimLeft(recv, chars) + } else { + s = strings.TrimLeftFunc(recv, unicode.IsSpace) + } + case 'r': // rstrip + if chars != "" { + s = strings.TrimRight(recv, chars) + } else { + s = strings.TrimRightFunc(recv, unicode.IsSpace) + } + } + return String(s), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·title +func string_title(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + + s := string(b.Receiver().(String)) + + // Python semantics differ from x==strings.{To,}Title(x) in Go: + // "uppercase characters may only follow uncased characters and + // lowercase characters only cased ones." + buf := new(strings.Builder) + buf.Grow(len(s)) + var prevCased bool + for _, r := range s { + if prevCased { + r = unicode.ToLower(r) + } else { + r = unicode.ToTitle(r) + } + prevCased = isCasedRune(r) + buf.WriteRune(r) + } + return String(buf.String()), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·upper +func string_upper(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { + return nil, err + } + return String(strings.ToUpper(string(b.Receiver().(String)))), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·split +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rsplit +func string_split(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + recv := string(b.Receiver().(String)) + var sep_ Value + maxsplit := -1 + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &sep_, &maxsplit); err != nil { + return nil, err + } + + var res []string + + if sep_ == nil || sep_ == None { + // special case: split on whitespace + if maxsplit < 0 { + res = strings.Fields(recv) + } else if b.Name() == "split" { + res = splitspace(recv, maxsplit) + } else { // rsplit + res = rsplitspace(recv, maxsplit) + } + + } else if sep, ok := AsString(sep_); ok { + if sep == "" { + return nil, fmt.Errorf("split: empty separator") + } + // usual case: split on non-empty separator + if maxsplit < 0 { + res = strings.Split(recv, sep) + } else if b.Name() == "split" { + res = strings.SplitN(recv, sep, maxsplit+1) + } else { // rsplit + res = strings.Split(recv, sep) + if excess := len(res) - maxsplit; excess > 0 { + res[0] = strings.Join(res[:excess], sep) + res = append(res[:1], res[excess:]...) + } + } + + } else { + return nil, fmt.Errorf("split: got %s for separator, want string", sep_.Type()) + } + + list := make([]Value, len(res)) + for i, x := range res { + list[i] = String(x) + } + return NewList(list), nil +} + +// Precondition: max >= 0. +func rsplitspace(s string, max int) []string { + res := make([]string, 0, max+1) + end := -1 // index of field end, or -1 in a region of spaces. + for i := len(s); i > 0; { + r, sz := utf8.DecodeLastRuneInString(s[:i]) + if unicode.IsSpace(r) { + if end >= 0 { + if len(res) == max { + break // let this field run to the start + } + res = append(res, s[i:end]) + end = -1 + } + } else if end < 0 { + end = i + } + i -= sz + } + if end >= 0 { + res = append(res, s[:end]) + } + + resLen := len(res) + for i := 0; i < resLen/2; i++ { + res[i], res[resLen-1-i] = res[resLen-1-i], res[i] + } + + return res +} + +// Precondition: max >= 0. +func splitspace(s string, max int) []string { + var res []string + start := -1 // index of field start, or -1 in a region of spaces + for i, r := range s { + if unicode.IsSpace(r) { + if start >= 0 { + if len(res) == max { + break // let this field run to the end + } + res = append(res, s[start:i]) + start = -1 + } + } else if start == -1 { + start = i + } + } + if start >= 0 { + res = append(res, s[start:]) + } + return res +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·splitlines +func string_splitlines(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var keepends bool + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &keepends); err != nil { + return nil, err + } + var lines []string + if s := string(b.Receiver().(String)); s != "" { + // TODO(adonovan): handle CRLF correctly. + if keepends { + lines = strings.SplitAfter(s, "\n") + } else { + lines = strings.Split(s, "\n") + } + if strings.HasSuffix(s, "\n") { + lines = lines[:len(lines)-1] + } + } + list := make([]Value, len(lines)) + for i, x := range lines { + list[i] = String(x) + } + return NewList(list), nil +} + +// https://github.com/google/starlark-go/blob/master/doc/spec.md#set·union. +func set_union(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { + var iterable Iterable + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &iterable); err != nil { + return nil, err + } + iter := iterable.Iterate() + defer iter.Done() + union, err := b.Receiver().(*Set).Union(iter) + if err != nil { + return nil, nameErr(b, err) + } + return union, nil +} + +// Common implementation of string_{r}{find,index}. +func string_find_impl(b *Builtin, args Tuple, kwargs []Tuple, allowError, last bool) (Value, error) { + var sub string + var start_, end_ Value + if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sub, &start_, &end_); err != nil { + return nil, err + } + + s := string(b.Receiver().(String)) + start, end, err := indices(start_, end_, len(s)) + if err != nil { + return nil, nameErr(b, err) + } + var slice string + if start < end { + slice = s[start:end] + } + + var i int + if last { + i = strings.LastIndex(slice, sub) + } else { + i = strings.Index(slice, sub) + } + if i < 0 { + if !allowError { + return nil, nameErr(b, "substring not found") + } + return MakeInt(-1), nil + } + return MakeInt(i + start), nil +} + +// Common implementation of builtin dict function and dict.update method. +// Precondition: len(updates) == 0 or 1. +func updateDict(dict *Dict, updates Tuple, kwargs []Tuple) error { + if len(updates) == 1 { + switch updates := updates[0].(type) { + case IterableMapping: + // Iterate over dict's key/value pairs, not just keys. + for _, item := range updates.Items() { + if err := dict.SetKey(item[0], item[1]); err != nil { + return err // dict is frozen + } + } + default: + // all other sequences + iter := Iterate(updates) + if iter == nil { + return fmt.Errorf("got %s, want iterable", updates.Type()) + } + defer iter.Done() + var pair Value + for i := 0; iter.Next(&pair); i++ { + iter2 := Iterate(pair) + if iter2 == nil { + return fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) + + } + defer iter2.Done() + len := Len(pair) + if len < 0 { + return fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type()) + } else if len != 2 { + return fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, len) + } + var k, v Value + iter2.Next(&k) + iter2.Next(&v) + if err := dict.SetKey(k, v); err != nil { + return err + } + } + } + } + + // Then add the kwargs. + before := dict.Len() + for _, pair := range kwargs { + if err := dict.SetKey(pair[0], pair[1]); err != nil { + return err // dict is frozen + } + } + // In the common case, each kwarg will add another dict entry. + // If that's not so, check whether it is because there was a duplicate kwarg. + if dict.Len() < before+len(kwargs) { + keys := make(map[String]bool, len(kwargs)) + for _, kv := range kwargs { + k := kv[0].(String) + if keys[k] { + return fmt.Errorf("duplicate keyword arg: %v", k) + } + keys[k] = true + } + } + + return nil +} + +// nameErr returns an error message of the form "name: msg" +// where name is b.Name() and msg is a string or error. +func nameErr(b *Builtin, msg interface{}) error { + return fmt.Errorf("%s: %v", b.Name(), msg) +} diff --git a/vendor/go.starlark.net/starlark/profile.go b/vendor/go.starlark.net/starlark/profile.go new file mode 100644 index 000000000..38da2b2e9 --- /dev/null +++ b/vendor/go.starlark.net/starlark/profile.go @@ -0,0 +1,449 @@ +// Copyright 2019 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package starlark + +// This file defines a simple execution-time profiler for Starlark. +// It measures the wall time spent executing Starlark code, and emits a +// gzipped protocol message in pprof format (github.com/google/pprof). +// +// When profiling is enabled, the interpreter calls the profiler to +// indicate the start and end of each "span" or time interval. A leaf +// function (whether Go or Starlark) has a single span. A function that +// calls another function has spans for each interval in which it is the +// top of the stack. (A LOAD instruction also ends a span.) +// +// At the start of a span, the interpreter records the current time in +// the thread's topmost frame. At the end of the span, it obtains the +// time again and subtracts the span start time. The difference is added +// to an accumulator variable in the thread. If the accumulator exceeds +// some fixed quantum (10ms, say), the profiler records the current call +// stack and sends it to the profiler goroutine, along with the number +// of quanta, which are subtracted. For example, if the accumulator +// holds 3ms and then a completed span adds 25ms to it, its value is 28ms, +// which exceeeds 10ms. The profiler records a stack with the value 20ms +// (2 quanta), and the accumulator is left with 8ms. +// +// The profiler goroutine converts the stacks into the pprof format and +// emits a gzip-compressed protocol message to the designated output +// file. We use a hand-written streaming proto encoder to avoid +// dependencies on pprof and proto, and to avoid the need to +// materialize the profile data structure in memory. +// +// A limitation of this profiler is that it measures wall time, which +// does not necessarily correspond to CPU time. A CPU profiler requires +// that only running (not runnable) threads are sampled; this is +// commonly achieved by having the kernel deliver a (PROF) signal to an +// arbitrary running thread, through setitimer(2). The CPU profiler in the +// Go runtime uses this mechanism, but it is not possible for a Go +// application to register a SIGPROF handler, nor is it possible for a +// Go handler for some other signal to read the stack pointer of +// the interrupted thread. +// +// Two caveats: +// (1) it is tempting to send the leaf Frame directly to the profiler +// goroutine instead of making a copy of the stack, since a Frame is a +// spaghetti stack--a linked list. However, as soon as execution +// resumes, the stack's Frame.pc values may be mutated, so Frames are +// not safe to share with the asynchronous profiler goroutine. +// (2) it is tempting to use Callables as keys in a map when tabulating +// the pprof protocols's Function entities. However, we cannot assume +// that Callables are valid map keys, and furthermore we must not +// pin function values in memory indefinitely as this may cause lambda +// values to keep their free variables live much longer than necessary. + +// TODO(adonovan): +// - make Start/Stop fully thread-safe. +// - fix the pc hack. +// - experiment with other values of quantum. + +import ( + "bufio" + "bytes" + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "log" + "reflect" + "sync/atomic" + "time" + "unsafe" + + "go.starlark.net/syntax" +) + +// StartProfile enables time profiling of all Starlark threads, +// and writes a profile in pprof format to w. +// It must be followed by a call to StopProfiler to stop +// the profiler and finalize the profile. +// +// StartProfile returns an error if profiling was already enabled. +// +// StartProfile must not be called concurrently with Starlark execution. +func StartProfile(w io.Writer) error { + if !atomic.CompareAndSwapUint32(&profiler.on, 0, 1) { + return fmt.Errorf("profiler already running") + } + + // TODO(adonovan): make the API fully concurrency-safe. + // The main challenge is racy reads/writes of profiler.events, + // and of send/close races on the channel it refers to. + // It's easy to solve them with a mutex but harder to do + // it efficiently. + + profiler.events = make(chan *profEvent, 1) + profiler.done = make(chan error) + + go profile(w) + + return nil +} + +// StopProfiler stops the profiler started by a prior call to +// StartProfile and finalizes the profile. It returns an error if the +// profile could not be completed. +// +// StopProfiler must not be called concurrently with Starlark execution. +func StopProfile() error { + // Terminate the profiler goroutine and get its result. + close(profiler.events) + err := <-profiler.done + + profiler.done = nil + profiler.events = nil + atomic.StoreUint32(&profiler.on, 0) + + return err +} + +// globals +var profiler struct { + on uint32 // nonzero => profiler running + events chan *profEvent // profile events from interpreter threads + done chan error // indicates profiler goroutine is ready +} + +func (thread *Thread) beginProfSpan() { + if profiler.events == nil { + return // profiling not enabled + } + + thread.frameAt(0).spanStart = nanotime() +} + +// TODO(adonovan): experiment with smaller values, +// which trade space and time for greater precision. +const quantum = 10 * time.Millisecond + +func (thread *Thread) endProfSpan() { + if profiler.events == nil { + return // profiling not enabled + } + + // Add the span to the thread's accumulator. + thread.proftime += time.Duration(nanotime() - thread.frameAt(0).spanStart) + if thread.proftime < quantum { + return + } + + // Only record complete quanta. + n := thread.proftime / quantum + thread.proftime -= n * quantum + + // Copy the stack. + // (We can't save thread.frame because its pc will change.) + ev := &profEvent{ + thread: thread, + time: n * quantum, + } + ev.stack = ev.stackSpace[:0] + for i := range thread.stack { + fr := thread.frameAt(i) + ev.stack = append(ev.stack, profFrame{ + pos: fr.Position(), + fn: fr.Callable(), + pc: fr.pc, + }) + } + + profiler.events <- ev +} + +type profEvent struct { + thread *Thread // currently unused + time time.Duration + stack []profFrame + stackSpace [8]profFrame // initial space for stack +} + +type profFrame struct { + fn Callable // don't hold this live for too long (prevents GC of lambdas) + pc uint32 // program counter (Starlark frames only) + pos syntax.Position // position of pc within this frame +} + +// profile is the profiler goroutine. +// It runs until StopProfiler is called. +func profile(w io.Writer) { + // Field numbers from pprof protocol. + // See https://github.com/google/pprof/blob/master/proto/profile.proto + const ( + Profile_sample_type = 1 // repeated ValueType + Profile_sample = 2 // repeated Sample + Profile_mapping = 3 // repeated Mapping + Profile_location = 4 // repeated Location + Profile_function = 5 // repeated Function + Profile_string_table = 6 // repeated string + Profile_time_nanos = 9 // int64 + Profile_duration_nanos = 10 // int64 + Profile_period_type = 11 // ValueType + Profile_period = 12 // int64 + + ValueType_type = 1 // int64 + ValueType_unit = 2 // int64 + + Sample_location_id = 1 // repeated uint64 + Sample_value = 2 // repeated int64 + Sample_label = 3 // repeated Label + + Label_key = 1 // int64 + Label_str = 2 // int64 + Label_num = 3 // int64 + Label_num_unit = 4 // int64 + + Location_id = 1 // uint64 + Location_mapping_id = 2 // uint64 + Location_address = 3 // uint64 + Location_line = 4 // repeated Line + + Line_function_id = 1 // uint64 + Line_line = 2 // int64 + + Function_id = 1 // uint64 + Function_name = 2 // int64 + Function_system_name = 3 // int64 + Function_filename = 4 // int64 + Function_start_line = 5 // int64 + ) + + bufw := bufio.NewWriter(w) // write file in 4KB (not 240B flate-sized) chunks + gz := gzip.NewWriter(bufw) + enc := protoEncoder{w: gz} + + // strings + stringIndex := make(map[string]int64) + str := func(s string) int64 { + i, ok := stringIndex[s] + if !ok { + i = int64(len(stringIndex)) + enc.string(Profile_string_table, s) + stringIndex[s] = i + } + return i + } + str("") // entry 0 + + // functions + // + // function returns the ID of a Callable for use in Line.FunctionId. + // The ID is the same as the function's logical address, + // which is supplied by the caller to avoid the need to recompute it. + functionId := make(map[uintptr]uint64) + function := func(fn Callable, addr uintptr) uint64 { + id, ok := functionId[addr] + if !ok { + id = uint64(addr) + + var pos syntax.Position + if fn, ok := fn.(callableWithPosition); ok { + pos = fn.Position() + } + + name := fn.Name() + if name == "" { + name = pos.Filename() + } + + nameIndex := str(name) + + fun := new(bytes.Buffer) + funenc := protoEncoder{w: fun} + funenc.uint(Function_id, id) + funenc.int(Function_name, nameIndex) + funenc.int(Function_system_name, nameIndex) + funenc.int(Function_filename, str(pos.Filename())) + funenc.int(Function_start_line, int64(pos.Line)) + enc.bytes(Profile_function, fun.Bytes()) + + functionId[addr] = id + } + return id + } + + // locations + // + // location returns the ID of the location denoted by fr. + // For Starlark frames, this is the Frame pc. + locationId := make(map[uintptr]uint64) + location := func(fr profFrame) uint64 { + fnAddr := profFuncAddr(fr.fn) + + // For Starlark functions, the frame position + // represents the current PC value. + // Mix it into the low bits of the address. + // This is super hacky and may result in collisions + // in large functions or if functions are numerous. + // TODO(adonovan): fix: try making this cleaner by treating + // each bytecode segment as a Profile.Mapping. + pcAddr := fnAddr + if _, ok := fr.fn.(*Function); ok { + pcAddr = (pcAddr << 16) ^ uintptr(fr.pc) + } + + id, ok := locationId[pcAddr] + if !ok { + id = uint64(pcAddr) + + line := new(bytes.Buffer) + lineenc := protoEncoder{w: line} + lineenc.uint(Line_function_id, function(fr.fn, fnAddr)) + lineenc.int(Line_line, int64(fr.pos.Line)) + loc := new(bytes.Buffer) + locenc := protoEncoder{w: loc} + locenc.uint(Location_id, id) + locenc.uint(Location_address, uint64(pcAddr)) + locenc.bytes(Location_line, line.Bytes()) + enc.bytes(Profile_location, loc.Bytes()) + + locationId[pcAddr] = id + } + return id + } + + wallNanos := new(bytes.Buffer) + wnenc := protoEncoder{w: wallNanos} + wnenc.int(ValueType_type, str("wall")) + wnenc.int(ValueType_unit, str("nanoseconds")) + + // informational fields of Profile + enc.bytes(Profile_sample_type, wallNanos.Bytes()) + enc.int(Profile_period, quantum.Nanoseconds()) // magnitude of sampling period + enc.bytes(Profile_period_type, wallNanos.Bytes()) // dimension and unit of period + enc.int(Profile_time_nanos, time.Now().UnixNano()) // start (real) time of profile + + startNano := nanotime() + + // Read profile events from the channel + // until it is closed by StopProfiler. + for e := range profiler.events { + sample := new(bytes.Buffer) + sampleenc := protoEncoder{w: sample} + sampleenc.int(Sample_value, e.time.Nanoseconds()) // wall nanoseconds + for _, fr := range e.stack { + sampleenc.uint(Sample_location_id, location(fr)) + } + enc.bytes(Profile_sample, sample.Bytes()) + } + + endNano := nanotime() + enc.int(Profile_duration_nanos, endNano-startNano) + + err := gz.Close() // Close reports any prior write error + if flushErr := bufw.Flush(); err == nil { + err = flushErr + } + profiler.done <- err +} + +// nanotime returns the time in nanoseconds since epoch. +// It is implemented by runtime.nanotime using the linkname hack; +// runtime.nanotime is defined for all OSs/ARCHS and uses the +// monotonic system clock, which there is no portable way to access. +// Should that function ever go away, these alternatives exist: +// +// // POSIX only. REALTIME not MONOTONIC. 17ns. +// var tv syscall.Timeval +// syscall.Gettimeofday(&tv) // can't fail +// return tv.Nano() +// +// // Portable. REALTIME not MONOTONIC. 46ns. +// return time.Now().Nanoseconds() +// +// // POSIX only. Adds a dependency. +// import "golang.org/x/sys/unix" +// var ts unix.Timespec +// unix.ClockGettime(CLOCK_MONOTONIC, &ts) // can't fail +// return unix.TimespecToNsec(ts) +// +//go:linkname nanotime runtime.nanotime +func nanotime() int64 + +// profFuncAddr returns the canonical "address" +// of a Callable for use by the profiler. +func profFuncAddr(fn Callable) uintptr { + switch fn := fn.(type) { + case *Builtin: + return reflect.ValueOf(fn.fn).Pointer() + case *Function: + return uintptr(unsafe.Pointer(fn.funcode)) + } + + // User-defined callable types are typically of + // of kind pointer-to-struct. Handle them specially. + if v := reflect.ValueOf(fn); v.Type().Kind() == reflect.Ptr { + return v.Pointer() + } + + // Address zero is reserved by the protocol. + // Use 1 for callables we don't recognize. + log.Printf("Starlark profiler: no address for Callable %T", fn) + return 1 +} + +// We encode the protocol message by hand to avoid making +// the interpreter depend on both github.com/google/pprof +// and github.com/golang/protobuf. +// +// This also avoids the need to materialize a protocol message object +// tree of unbounded size and serialize it all at the end. +// The pprof format appears to have been designed to +// permit streaming implementations such as this one. +// +// See https://developers.google.com/protocol-buffers/docs/encoding. +type protoEncoder struct { + w io.Writer // *bytes.Buffer or *gzip.Writer + tmp [binary.MaxVarintLen64]byte +} + +func (e *protoEncoder) uvarint(x uint64) { + n := binary.PutUvarint(e.tmp[:], x) + e.w.Write(e.tmp[:n]) +} + +func (e *protoEncoder) tag(field, wire uint) { + e.uvarint(uint64(field<<3 | wire)) +} + +func (e *protoEncoder) string(field uint, s string) { + e.tag(field, 2) // length-delimited + e.uvarint(uint64(len(s))) + io.WriteString(e.w, s) +} + +func (e *protoEncoder) bytes(field uint, b []byte) { + e.tag(field, 2) // length-delimited + e.uvarint(uint64(len(b))) + e.w.Write(b) +} + +func (e *protoEncoder) uint(field uint, x uint64) { + e.tag(field, 0) // varint + e.uvarint(x) +} + +func (e *protoEncoder) int(field uint, x int64) { + e.tag(field, 0) // varint + e.uvarint(uint64(x)) +} diff --git a/vendor/go.starlark.net/starlark/unpack.go b/vendor/go.starlark.net/starlark/unpack.go new file mode 100644 index 000000000..316858992 --- /dev/null +++ b/vendor/go.starlark.net/starlark/unpack.go @@ -0,0 +1,355 @@ +package starlark + +// This file defines the Unpack helper functions used by +// built-in functions to interpret their call arguments. + +import ( + "fmt" + "log" + "reflect" + "strings" + + "go.starlark.net/internal/spell" +) + +// An Unpacker defines custom argument unpacking behavior. +// See UnpackArgs. +type Unpacker interface { + Unpack(v Value) error +} + +// UnpackArgs unpacks the positional and keyword arguments into the +// supplied parameter variables. pairs is an alternating list of names +// and pointers to variables. +// +// If the variable is a bool, integer, string, *List, *Dict, Callable, +// Iterable, or user-defined implementation of Value, +// UnpackArgs performs the appropriate type check. +// Predeclared Go integer types uses the AsInt check. +// +// If the parameter name ends with "?", it is optional. +// +// If the parameter name ends with "??", it is optional and treats the None value +// as if the argument was absent. +// +// If a parameter is marked optional, then all following parameters are +// implicitly optional where or not they are marked. +// +// If the variable implements Unpacker, its Unpack argument +// is called with the argument value, allowing an application +// to define its own argument validation and conversion. +// +// If the variable implements Value, UnpackArgs may call +// its Type() method while constructing the error message. +// +// Examples: +// +// var ( +// a Value +// b = MakeInt(42) +// c Value = starlark.None +// ) +// +// // 1. mixed parameters, like def f(a, b=42, c=None). +// err := UnpackArgs("f", args, kwargs, "a", &a, "b?", &b, "c?", &c) +// +// // 2. keyword parameters only, like def f(*, a, b, c=None). +// if len(args) > 0 { +// return fmt.Errorf("f: unexpected positional arguments") +// } +// err := UnpackArgs("f", args, kwargs, "a", &a, "b?", &b, "c?", &c) +// +// // 3. positional parameters only, like def f(a, b=42, c=None, /) in Python 3.8. +// err := UnpackPositionalArgs("f", args, kwargs, 1, &a, &b, &c) +// +// More complex forms such as def f(a, b=42, *args, c, d=123, **kwargs) +// require additional logic, but their need in built-ins is exceedingly rare. +// +// In the examples above, the declaration of b with type Int causes UnpackArgs +// to require that b's argument value, if provided, is also an int. +// To allow arguments of any type, while retaining the default value of 42, +// declare b as a Value: +// +// var b Value = MakeInt(42) +// +// The zero value of a variable of type Value, such as 'a' in the +// examples above, is not a valid Starlark value, so if the parameter is +// optional, the caller must explicitly handle the default case by +// interpreting nil as None or some computed default. The same is true +// for the zero values of variables of type *List, *Dict, Callable, or +// Iterable. For example: +// +// // def myfunc(d=None, e=[], f={}) +// var ( +// d Value +// e *List +// f *Dict +// ) +// err := UnpackArgs("myfunc", args, kwargs, "d?", &d, "e?", &e, "f?", &f) +// if d == nil { d = None; } +// if e == nil { e = new(List); } +// if f == nil { f = new(Dict); } +// +func UnpackArgs(fnname string, args Tuple, kwargs []Tuple, pairs ...interface{}) error { + nparams := len(pairs) / 2 + var defined intset + defined.init(nparams) + + paramName := func(x interface{}) (name string, skipNone bool) { // (no free variables) + name = x.(string) + if strings.HasSuffix(name, "??") { + name = strings.TrimSuffix(name, "??") + skipNone = true + } else if name[len(name)-1] == '?' { + name = name[:len(name)-1] + } + + return name, skipNone + } + + // positional arguments + if len(args) > nparams { + return fmt.Errorf("%s: got %d arguments, want at most %d", + fnname, len(args), nparams) + } + for i, arg := range args { + defined.set(i) + name, skipNone := paramName(pairs[2*i]) + if skipNone { + if _, isNone := arg.(NoneType); isNone { + continue + } + } + if err := unpackOneArg(arg, pairs[2*i+1]); err != nil { + return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err) + } + } + + // keyword arguments +kwloop: + for _, item := range kwargs { + name, arg := item[0].(String), item[1] + for i := 0; i < nparams; i++ { + pName, skipNone := paramName(pairs[2*i]) + if pName == string(name) { + // found it + if defined.set(i) { + return fmt.Errorf("%s: got multiple values for keyword argument %s", + fnname, name) + } + + if skipNone { + if _, isNone := arg.(NoneType); isNone { + continue kwloop + } + } + + ptr := pairs[2*i+1] + if err := unpackOneArg(arg, ptr); err != nil { + return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err) + } + continue kwloop + } + } + err := fmt.Errorf("%s: unexpected keyword argument %s", fnname, name) + names := make([]string, 0, nparams) + for i := 0; i < nparams; i += 2 { + param, _ := paramName(pairs[i]) + names = append(names, param) + } + if n := spell.Nearest(string(name), names); n != "" { + err = fmt.Errorf("%s (did you mean %s?)", err.Error(), n) + } + return err + } + + // Check that all non-optional parameters are defined. + // (We needn't check the first len(args).) + for i := len(args); i < nparams; i++ { + name := pairs[2*i].(string) + if strings.HasSuffix(name, "?") { + break // optional + } + if !defined.get(i) { + return fmt.Errorf("%s: missing argument for %s", fnname, name) + } + } + + return nil +} + +// UnpackPositionalArgs unpacks the positional arguments into +// corresponding variables. Each element of vars is a pointer; see +// UnpackArgs for allowed types and conversions. +// +// UnpackPositionalArgs reports an error if the number of arguments is +// less than min or greater than len(vars), if kwargs is nonempty, or if +// any conversion fails. +// +// See UnpackArgs for general comments. +func UnpackPositionalArgs(fnname string, args Tuple, kwargs []Tuple, min int, vars ...interface{}) error { + if len(kwargs) > 0 { + return fmt.Errorf("%s: unexpected keyword arguments", fnname) + } + max := len(vars) + if len(args) < min { + var atleast string + if min < max { + atleast = "at least " + } + return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atleast, min) + } + if len(args) > max { + var atmost string + if max > min { + atmost = "at most " + } + return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atmost, max) + } + for i, arg := range args { + if err := unpackOneArg(arg, vars[i]); err != nil { + return fmt.Errorf("%s: for parameter %d: %s", fnname, i+1, err) + } + } + return nil +} + +func unpackOneArg(v Value, ptr interface{}) error { + // On failure, don't clobber *ptr. + switch ptr := ptr.(type) { + case Unpacker: + return ptr.Unpack(v) + case *Value: + *ptr = v + case *string: + s, ok := AsString(v) + if !ok { + return fmt.Errorf("got %s, want string", v.Type()) + } + *ptr = s + case *bool: + b, ok := v.(Bool) + if !ok { + return fmt.Errorf("got %s, want bool", v.Type()) + } + *ptr = bool(b) + case *int, *int8, *int16, *int32, *int64, + *uint, *uint8, *uint16, *uint32, *uint64, *uintptr: + return AsInt(v, ptr) + case *float64: + f, ok := v.(Float) + if !ok { + return fmt.Errorf("got %s, want float", v.Type()) + } + *ptr = float64(f) + case **List: + list, ok := v.(*List) + if !ok { + return fmt.Errorf("got %s, want list", v.Type()) + } + *ptr = list + case **Dict: + dict, ok := v.(*Dict) + if !ok { + return fmt.Errorf("got %s, want dict", v.Type()) + } + *ptr = dict + case *Callable: + f, ok := v.(Callable) + if !ok { + return fmt.Errorf("got %s, want callable", v.Type()) + } + *ptr = f + case *Iterable: + it, ok := v.(Iterable) + if !ok { + return fmt.Errorf("got %s, want iterable", v.Type()) + } + *ptr = it + default: + // v must have type *V, where V is some subtype of starlark.Value. + ptrv := reflect.ValueOf(ptr) + if ptrv.Kind() != reflect.Ptr { + log.Panicf("internal error: not a pointer: %T", ptr) + } + paramVar := ptrv.Elem() + if !reflect.TypeOf(v).AssignableTo(paramVar.Type()) { + // The value is not assignable to the variable. + + // Detect a possible bug in the Go program that called Unpack: + // If the variable *ptr is not a subtype of Value, + // no value of v can possibly work. + if !paramVar.Type().AssignableTo(reflect.TypeOf(new(Value)).Elem()) { + log.Panicf("pointer element type does not implement Value: %T", ptr) + } + + // Report Starlark dynamic type error. + // + // We prefer the Starlark Value.Type name over + // its Go reflect.Type name, but calling the + // Value.Type method on the variable is not safe + // in general. If the variable is an interface, + // the call will fail. Even if the variable has + // a concrete type, it might not be safe to call + // Type() on a zero instance. Thus we must use + // recover. + + // Default to Go reflect.Type name + paramType := paramVar.Type().String() + + // Attempt to call Value.Type method. + func() { + defer func() { recover() }() + if typer, _ := paramVar.Interface().(interface{ Type() string }); typer != nil { + paramType = typer.Type() + } + }() + return fmt.Errorf("got %s, want %s", v.Type(), paramType) + } + paramVar.Set(reflect.ValueOf(v)) + } + return nil +} + +type intset struct { + small uint64 // bitset, used if n < 64 + large map[int]bool // set, used if n >= 64 +} + +func (is *intset) init(n int) { + if n >= 64 { + is.large = make(map[int]bool) + } +} + +func (is *intset) set(i int) (prev bool) { + if is.large == nil { + prev = is.small&(1< Hash(x) == Hash(y). + // Hash may fail if the value's type is not hashable, or if the value + // contains a non-hashable value. The hash is used only by dictionaries and + // is not exposed to the Starlark program. + Hash() (uint32, error) +} + +// A Comparable is a value that defines its own equivalence relation and +// perhaps ordered comparisons. +type Comparable interface { + Value + // CompareSameType compares one value to another of the same Type(). + // The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. + // CompareSameType returns an error if an ordered comparison was + // requested for a type that does not support it. + // + // Implementations that recursively compare subcomponents of + // the value should use the CompareDepth function, not Compare, to + // avoid infinite recursion on cyclic structures. + // + // The depth parameter is used to bound comparisons of cyclic + // data structures. Implementations should decrement depth + // before calling CompareDepth and should return an error if depth + // < 1. + // + // Client code should not call this method. Instead, use the + // standalone Compare or Equals functions, which are defined for + // all pairs of operands. + CompareSameType(op syntax.Token, y Value, depth int) (bool, error) +} + +// A TotallyOrdered is a type whose values form a total order: +// if x and y are of the same TotallyOrdered type, then x must be less than y, +// greater than y, or equal to y. +// +// It is simpler than Comparable and should be preferred in new code, +// but if a type implements both interfaces, Comparable takes precedence. +type TotallyOrdered interface { + Value + // Cmp compares two values x and y of the same totally ordered type. + // It returns negative if x < y, positive if x > y, and zero if the values are equal. + // + // Implementations that recursively compare subcomponents of + // the value should use the CompareDepth function, not Cmp, to + // avoid infinite recursion on cyclic structures. + // + // The depth parameter is used to bound comparisons of cyclic + // data structures. Implementations should decrement depth + // before calling CompareDepth and should return an error if depth + // < 1. + // + // Client code should not call this method. Instead, use the + // standalone Compare or Equals functions, which are defined for + // all pairs of operands. + Cmp(y Value, depth int) (int, error) +} + +var ( + _ TotallyOrdered = Int{} + _ TotallyOrdered = Float(0) + _ Comparable = False + _ Comparable = String("") + _ Comparable = (*Dict)(nil) + _ Comparable = (*List)(nil) + _ Comparable = Tuple(nil) + _ Comparable = (*Set)(nil) +) + +// A Callable value f may be the operand of a function call, f(x). +// +// Clients should use the Call function, never the CallInternal method. +type Callable interface { + Value + Name() string + CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) +} + +type callableWithPosition interface { + Callable + Position() syntax.Position +} + +var ( + _ Callable = (*Builtin)(nil) + _ Callable = (*Function)(nil) + _ callableWithPosition = (*Function)(nil) +) + +// An Iterable abstracts a sequence of values. +// An iterable value may be iterated over by a 'for' loop or used where +// any other Starlark iterable is allowed. Unlike a Sequence, the length +// of an Iterable is not necessarily known in advance of iteration. +type Iterable interface { + Value + Iterate() Iterator // must be followed by call to Iterator.Done +} + +// A Sequence is a sequence of values of known length. +type Sequence interface { + Iterable + Len() int +} + +var ( + _ Sequence = (*Dict)(nil) + _ Sequence = (*Set)(nil) +) + +// An Indexable is a sequence of known length that supports efficient random access. +// It is not necessarily iterable. +type Indexable interface { + Value + Index(i int) Value // requires 0 <= i < Len() + Len() int +} + +// A Sliceable is a sequence that can be cut into pieces with the slice operator (x[i:j:step]). +// +// All native indexable objects are sliceable. +// This is a separate interface for backwards-compatibility. +type Sliceable interface { + Indexable + // For positive strides (step > 0), 0 <= start <= end <= n. + // For negative strides (step < 0), -1 <= end <= start < n. + // The caller must ensure that the start and end indices are valid + // and that step is non-zero. + Slice(start, end, step int) Value +} + +// A HasSetIndex is an Indexable value whose elements may be assigned (x[i] = y). +// +// The implementation should not add Len to a negative index as the +// evaluator does this before the call. +type HasSetIndex interface { + Indexable + SetIndex(index int, v Value) error +} + +var ( + _ HasSetIndex = (*List)(nil) + _ Indexable = Tuple(nil) + _ Indexable = String("") + _ Sliceable = Tuple(nil) + _ Sliceable = String("") + _ Sliceable = (*List)(nil) +) + +// An Iterator provides a sequence of values to the caller. +// +// The caller must call Done when the iterator is no longer needed. +// Operations that modify a sequence will fail if it has active iterators. +// +// Example usage: +// +// iter := iterable.Iterator() +// defer iter.Done() +// var x Value +// for iter.Next(&x) { +// ... +// } +type Iterator interface { + // If the iterator is exhausted, Next returns false. + // Otherwise it sets *p to the current element of the sequence, + // advances the iterator, and returns true. + Next(p *Value) bool + Done() +} + +// A Mapping is a mapping from keys to values, such as a dictionary. +// +// If a type satisfies both Mapping and Iterable, the iterator yields +// the keys of the mapping. +type Mapping interface { + Value + // Get returns the value corresponding to the specified key, + // or !found if the mapping does not contain the key. + // + // Get also defines the behavior of "v in mapping". + // The 'in' operator reports the 'found' component, ignoring errors. + Get(Value) (v Value, found bool, err error) +} + +// An IterableMapping is a mapping that supports key enumeration. +type IterableMapping interface { + Mapping + Iterate() Iterator // see Iterable interface + Items() []Tuple // a new slice containing all key/value pairs +} + +var _ IterableMapping = (*Dict)(nil) + +// A HasSetKey supports map update using x[k]=v syntax, like a dictionary. +type HasSetKey interface { + Mapping + SetKey(k, v Value) error +} + +var _ HasSetKey = (*Dict)(nil) + +// A HasBinary value may be used as either operand of these binary operators: +// + - * / // % in not in | & ^ << >> +// +// The Side argument indicates whether the receiver is the left or right operand. +// +// An implementation may decline to handle an operation by returning (nil, nil). +// For this reason, clients should always call the standalone Binary(op, x, y) +// function rather than calling the method directly. +type HasBinary interface { + Value + Binary(op syntax.Token, y Value, side Side) (Value, error) +} + +type Side bool + +const ( + Left Side = false + Right Side = true +) + +// A HasUnary value may be used as the operand of these unary operators: +// + - ~ +// +// An implementation may decline to handle an operation by returning (nil, nil). +// For this reason, clients should always call the standalone Unary(op, x) +// function rather than calling the method directly. +type HasUnary interface { + Value + Unary(op syntax.Token) (Value, error) +} + +// A HasAttrs value has fields or methods that may be read by a dot expression (y = x.f). +// Attribute names may be listed using the built-in 'dir' function. +// +// For implementation convenience, a result of (nil, nil) from Attr is +// interpreted as a "no such field or method" error. Implementations are +// free to return a more precise error. +type HasAttrs interface { + Value + Attr(name string) (Value, error) // returns (nil, nil) if attribute not present + AttrNames() []string // callers must not modify the result. +} + +var ( + _ HasAttrs = String("") + _ HasAttrs = new(List) + _ HasAttrs = new(Dict) + _ HasAttrs = new(Set) +) + +// A HasSetField value has fields that may be written by a dot expression (x.f = y). +// +// An implementation of SetField may return a NoSuchAttrError, +// in which case the runtime may augment the error message to +// warn of possible misspelling. +type HasSetField interface { + HasAttrs + SetField(name string, val Value) error +} + +// A NoSuchAttrError may be returned by an implementation of +// HasAttrs.Attr or HasSetField.SetField to indicate that no such field +// exists. In that case the runtime may augment the error message to +// warn of possible misspelling. +type NoSuchAttrError string + +func (e NoSuchAttrError) Error() string { return string(e) } + +// NoneType is the type of None. Its only legal value is None. +// (We represent it as a number, not struct{}, so that None may be constant.) +type NoneType byte + +const None = NoneType(0) + +func (NoneType) String() string { return "None" } +func (NoneType) Type() string { return "NoneType" } +func (NoneType) Freeze() {} // immutable +func (NoneType) Truth() Bool { return False } +func (NoneType) Hash() (uint32, error) { return 0, nil } + +// Bool is the type of a Starlark bool. +type Bool bool + +const ( + False Bool = false + True Bool = true +) + +func (b Bool) String() string { + if b { + return "True" + } else { + return "False" + } +} +func (b Bool) Type() string { return "bool" } +func (b Bool) Freeze() {} // immutable +func (b Bool) Truth() Bool { return b } +func (b Bool) Hash() (uint32, error) { return uint32(b2i(bool(b))), nil } +func (x Bool) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { + y := y_.(Bool) + return threeway(op, b2i(bool(x))-b2i(bool(y))), nil +} + +// Float is the type of a Starlark float. +type Float float64 + +func (f Float) String() string { + var buf strings.Builder + f.format(&buf, 'g') + return buf.String() +} + +func (f Float) format(buf *strings.Builder, conv byte) { + ff := float64(f) + if !isFinite(ff) { + if math.IsInf(ff, +1) { + buf.WriteString("+inf") + } else if math.IsInf(ff, -1) { + buf.WriteString("-inf") + } else { + buf.WriteString("nan") + } + return + } + + // %g is the default format used by str. + // It uses the minimum precision to avoid ambiguity, + // and always includes a '.' or an 'e' so that the value + // is self-evidently a float, not an int. + if conv == 'g' || conv == 'G' { + s := strconv.FormatFloat(ff, conv, -1, 64) + buf.WriteString(s) + // Ensure result always has a decimal point if no exponent. + // "123" -> "123.0" + if strings.IndexByte(s, conv-'g'+'e') < 0 && strings.IndexByte(s, '.') < 0 { + buf.WriteString(".0") + } + return + } + + // %[eEfF] use 6-digit precision + buf.WriteString(strconv.FormatFloat(ff, conv, 6, 64)) +} + +func (f Float) Type() string { return "float" } +func (f Float) Freeze() {} // immutable +func (f Float) Truth() Bool { return f != 0.0 } +func (f Float) Hash() (uint32, error) { + // Equal float and int values must yield the same hash. + // TODO(adonovan): opt: if f is non-integral, and thus not equal + // to any Int, we can avoid the Int conversion and use a cheaper hash. + if isFinite(float64(f)) { + return finiteFloatToInt(f).Hash() + } + return 1618033, nil // NaN, +/-Inf +} + +func floor(f Float) Float { return Float(math.Floor(float64(f))) } + +// isFinite reports whether f represents a finite rational value. +// It is equivalent to !math.IsNan(f) && !math.IsInf(f, 0). +func isFinite(f float64) bool { + return math.Abs(f) <= math.MaxFloat64 +} + +func (x Float) Cmp(y_ Value, depth int) (int, error) { + y := y_.(Float) + return floatCmp(x, y), nil +} + +// floatCmp performs a three-valued comparison on floats, +// which are totally ordered with NaN > +Inf. +func floatCmp(x, y Float) int { + if x > y { + return +1 + } else if x < y { + return -1 + } else if x == y { + return 0 + } + + // At least one operand is NaN. + if x == x { + return -1 // y is NaN + } else if y == y { + return +1 // x is NaN + } + return 0 // both NaN +} + +func (f Float) rational() *big.Rat { return new(big.Rat).SetFloat64(float64(f)) } + +// AsFloat returns the float64 value closest to x. +// The f result is undefined if x is not a float or Int. +// The result may be infinite if x is a very large Int. +func AsFloat(x Value) (f float64, ok bool) { + switch x := x.(type) { + case Float: + return float64(x), true + case Int: + return float64(x.Float()), true + } + return 0, false +} + +func (x Float) Mod(y Float) Float { + z := Float(math.Mod(float64(x), float64(y))) + if (x < 0) != (y < 0) && z != 0 { + z += y + } + return z +} + +// Unary implements the operations +float and -float. +func (f Float) Unary(op syntax.Token) (Value, error) { + switch op { + case syntax.MINUS: + return -f, nil + case syntax.PLUS: + return +f, nil + } + return nil, nil +} + +// String is the type of a Starlark text string. +// +// A String encapsulates an an immutable sequence of bytes, +// but strings are not directly iterable. Instead, iterate +// over the result of calling one of these four methods: +// codepoints, codepoint_ords, elems, elem_ords. +// +// Strings typically contain text; use Bytes for binary strings. +// The Starlark spec defines text strings as sequences of UTF-k +// codes that encode Unicode code points. In this Go implementation, +// k=8, whereas in a Java implementation, k=16. For portability, +// operations on strings should aim to avoid assumptions about +// the value of k. +// +// Warning: the contract of the Value interface's String method is that +// it returns the value printed in Starlark notation, +// so s.String() or fmt.Sprintf("%s", s) returns a quoted string. +// Use string(s) or s.GoString() or fmt.Sprintf("%#v", s) to obtain the raw contents +// of a Starlark string as a Go string. +type String string + +func (s String) String() string { return syntax.Quote(string(s), false) } +func (s String) GoString() string { return string(s) } +func (s String) Type() string { return "string" } +func (s String) Freeze() {} // immutable +func (s String) Truth() Bool { return len(s) > 0 } +func (s String) Hash() (uint32, error) { return hashString(string(s)), nil } +func (s String) Len() int { return len(s) } // bytes +func (s String) Index(i int) Value { return s[i : i+1] } + +func (s String) Slice(start, end, step int) Value { + if step == 1 { + return s[start:end] + } + + sign := signum(step) + var str []byte + for i := start; signum(end-i) == sign; i += step { + str = append(str, s[i]) + } + return String(str) +} + +func (s String) Attr(name string) (Value, error) { return builtinAttr(s, name, stringMethods) } +func (s String) AttrNames() []string { return builtinAttrNames(stringMethods) } + +func (x String) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { + y := y_.(String) + return threeway(op, strings.Compare(string(x), string(y))), nil +} + +func AsString(x Value) (string, bool) { v, ok := x.(String); return string(v), ok } + +// A stringElems is an iterable whose iterator yields a sequence of +// elements (bytes), either numerically or as successive substrings. +// It is an indexable sequence. +type stringElems struct { + s String + ords bool +} + +var ( + _ Iterable = (*stringElems)(nil) + _ Indexable = (*stringElems)(nil) +) + +func (si stringElems) String() string { + if si.ords { + return si.s.String() + ".elem_ords()" + } else { + return si.s.String() + ".elems()" + } +} +func (si stringElems) Type() string { return "string.elems" } +func (si stringElems) Freeze() {} // immutable +func (si stringElems) Truth() Bool { return True } +func (si stringElems) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", si.Type()) } +func (si stringElems) Iterate() Iterator { return &stringElemsIterator{si, 0} } +func (si stringElems) Len() int { return len(si.s) } +func (si stringElems) Index(i int) Value { + if si.ords { + return MakeInt(int(si.s[i])) + } else { + // TODO(adonovan): opt: preallocate canonical 1-byte strings + // to avoid interface allocation. + return si.s[i : i+1] + } +} + +type stringElemsIterator struct { + si stringElems + i int +} + +func (it *stringElemsIterator) Next(p *Value) bool { + if it.i == len(it.si.s) { + return false + } + *p = it.si.Index(it.i) + it.i++ + return true +} + +func (*stringElemsIterator) Done() {} + +// A stringCodepoints is an iterable whose iterator yields a sequence of +// Unicode code points, either numerically or as successive substrings. +// It is not indexable. +type stringCodepoints struct { + s String + ords bool +} + +var _ Iterable = (*stringCodepoints)(nil) + +func (si stringCodepoints) String() string { + if si.ords { + return si.s.String() + ".codepoint_ords()" + } else { + return si.s.String() + ".codepoints()" + } +} +func (si stringCodepoints) Type() string { return "string.codepoints" } +func (si stringCodepoints) Freeze() {} // immutable +func (si stringCodepoints) Truth() Bool { return True } +func (si stringCodepoints) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", si.Type()) } +func (si stringCodepoints) Iterate() Iterator { return &stringCodepointsIterator{si, 0} } + +type stringCodepointsIterator struct { + si stringCodepoints + i int +} + +func (it *stringCodepointsIterator) Next(p *Value) bool { + s := it.si.s[it.i:] + if s == "" { + return false + } + r, sz := utf8.DecodeRuneInString(string(s)) + if !it.si.ords { + if r == utf8.RuneError { + *p = String(r) + } else { + *p = s[:sz] + } + } else { + *p = MakeInt(int(r)) + } + it.i += sz + return true +} + +func (*stringCodepointsIterator) Done() {} + +// A Function is a function defined by a Starlark def statement or lambda expression. +// The initialization behavior of a Starlark module is also represented by a Function. +type Function struct { + funcode *compile.Funcode + module *module + defaults Tuple + freevars Tuple +} + +// A module is the dynamic counterpart to a Program. +// All functions in the same program share a module. +type module struct { + program *compile.Program + predeclared StringDict + globals []Value + constants []Value +} + +// makeGlobalDict returns a new, unfrozen StringDict containing all global +// variables so far defined in the module. +func (m *module) makeGlobalDict() StringDict { + r := make(StringDict, len(m.program.Globals)) + for i, id := range m.program.Globals { + if v := m.globals[i]; v != nil { + r[id.Name] = v + } + } + return r +} + +func (fn *Function) Name() string { return fn.funcode.Name } // "lambda" for anonymous functions +func (fn *Function) Doc() string { return fn.funcode.Doc } +func (fn *Function) Hash() (uint32, error) { return hashString(fn.funcode.Name), nil } +func (fn *Function) Freeze() { fn.defaults.Freeze(); fn.freevars.Freeze() } +func (fn *Function) String() string { return toString(fn) } +func (fn *Function) Type() string { return "function" } +func (fn *Function) Truth() Bool { return true } + +// Globals returns a new, unfrozen StringDict containing all global +// variables so far defined in the function's module. +func (fn *Function) Globals() StringDict { return fn.module.makeGlobalDict() } + +func (fn *Function) Position() syntax.Position { return fn.funcode.Pos } +func (fn *Function) NumParams() int { return fn.funcode.NumParams } +func (fn *Function) NumKwonlyParams() int { return fn.funcode.NumKwonlyParams } + +// Param returns the name and position of the ith parameter, +// where 0 <= i < NumParams(). +// The *args and **kwargs parameters are at the end +// even if there were optional parameters after *args. +func (fn *Function) Param(i int) (string, syntax.Position) { + if i >= fn.NumParams() { + panic(i) + } + id := fn.funcode.Locals[i] + return id.Name, id.Pos +} + +// ParamDefault returns the default value of the specified parameter +// (0 <= i < NumParams()), or nil if the parameter is not optional. +func (fn *Function) ParamDefault(i int) Value { + if i < 0 || i >= fn.NumParams() { + panic(i) + } + + // fn.defaults omits all required params up to the first optional param. It + // also does not include *args or **kwargs at the end. + firstOptIdx := fn.NumParams() - len(fn.defaults) + if fn.HasVarargs() { + firstOptIdx-- + } + if fn.HasKwargs() { + firstOptIdx-- + } + if i < firstOptIdx || i >= firstOptIdx+len(fn.defaults) { + return nil + } + + dflt := fn.defaults[i-firstOptIdx] + if _, ok := dflt.(mandatory); ok { + return nil + } + return dflt +} + +func (fn *Function) HasVarargs() bool { return fn.funcode.HasVarargs } +func (fn *Function) HasKwargs() bool { return fn.funcode.HasKwargs } + +// A Builtin is a function implemented in Go. +type Builtin struct { + name string + fn func(thread *Thread, fn *Builtin, args Tuple, kwargs []Tuple) (Value, error) + recv Value // for bound methods (e.g. "".startswith) +} + +func (b *Builtin) Name() string { return b.name } +func (b *Builtin) Freeze() { + if b.recv != nil { + b.recv.Freeze() + } +} +func (b *Builtin) Hash() (uint32, error) { + h := hashString(b.name) + if b.recv != nil { + h ^= 5521 + } + return h, nil +} +func (b *Builtin) Receiver() Value { return b.recv } +func (b *Builtin) String() string { return toString(b) } +func (b *Builtin) Type() string { return "builtin_function_or_method" } +func (b *Builtin) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) { + return b.fn(thread, b, args, kwargs) +} +func (b *Builtin) Truth() Bool { return true } + +// NewBuiltin returns a new 'builtin_function_or_method' value with the specified name +// and implementation. It compares unequal with all other values. +func NewBuiltin(name string, fn func(thread *Thread, fn *Builtin, args Tuple, kwargs []Tuple) (Value, error)) *Builtin { + return &Builtin{name: name, fn: fn} +} + +// BindReceiver returns a new Builtin value representing a method +// closure, that is, a built-in function bound to a receiver value. +// +// In the example below, the value of f is the string.index +// built-in method bound to the receiver value "abc": +// +// f = "abc".index; f("a"); f("b") +// +// In the common case, the receiver is bound only during the call, +// but this still results in the creation of a temporary method closure: +// +// "abc".index("a") +func (b *Builtin) BindReceiver(recv Value) *Builtin { + return &Builtin{name: b.name, fn: b.fn, recv: recv} +} + +// A *Dict represents a Starlark dictionary. +// The zero value of Dict is a valid empty dictionary. +// If you know the exact final number of entries, +// it is more efficient to call NewDict. +type Dict struct { + ht hashtable +} + +// NewDict returns a set with initial space for +// at least size insertions before rehashing. +func NewDict(size int) *Dict { + dict := new(Dict) + dict.ht.init(size) + return dict +} + +func (d *Dict) Clear() error { return d.ht.clear() } +func (d *Dict) Delete(k Value) (v Value, found bool, err error) { return d.ht.delete(k) } +func (d *Dict) Get(k Value) (v Value, found bool, err error) { return d.ht.lookup(k) } +func (d *Dict) Items() []Tuple { return d.ht.items() } +func (d *Dict) Keys() []Value { return d.ht.keys() } +func (d *Dict) Len() int { return int(d.ht.len) } +func (d *Dict) Iterate() Iterator { return d.ht.iterate() } +func (d *Dict) SetKey(k, v Value) error { return d.ht.insert(k, v) } +func (d *Dict) String() string { return toString(d) } +func (d *Dict) Type() string { return "dict" } +func (d *Dict) Freeze() { d.ht.freeze() } +func (d *Dict) Truth() Bool { return d.Len() > 0 } +func (d *Dict) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: dict") } + +func (x *Dict) Union(y *Dict) *Dict { + z := new(Dict) + z.ht.init(x.Len()) // a lower bound + z.ht.addAll(&x.ht) // can't fail + z.ht.addAll(&y.ht) // can't fail + return z +} + +func (d *Dict) Attr(name string) (Value, error) { return builtinAttr(d, name, dictMethods) } +func (d *Dict) AttrNames() []string { return builtinAttrNames(dictMethods) } + +func (x *Dict) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { + y := y_.(*Dict) + switch op { + case syntax.EQL: + ok, err := dictsEqual(x, y, depth) + return ok, err + case syntax.NEQ: + ok, err := dictsEqual(x, y, depth) + return !ok, err + default: + return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) + } +} + +func dictsEqual(x, y *Dict, depth int) (bool, error) { + if x.Len() != y.Len() { + return false, nil + } + for e := x.ht.head; e != nil; e = e.next { + key, xval := e.key, e.value + + if yval, found, _ := y.Get(key); !found { + return false, nil + } else if eq, err := EqualDepth(xval, yval, depth-1); err != nil { + return false, err + } else if !eq { + return false, nil + } + } + return true, nil +} + +// A *List represents a Starlark list value. +type List struct { + elems []Value + frozen bool + itercount uint32 // number of active iterators (ignored if frozen) +} + +// NewList returns a list containing the specified elements. +// Callers should not subsequently modify elems. +func NewList(elems []Value) *List { return &List{elems: elems} } + +func (l *List) Freeze() { + if !l.frozen { + l.frozen = true + for _, elem := range l.elems { + elem.Freeze() + } + } +} + +// checkMutable reports an error if the list should not be mutated. +// verb+" list" should describe the operation. +func (l *List) checkMutable(verb string) error { + if l.frozen { + return fmt.Errorf("cannot %s frozen list", verb) + } + if l.itercount > 0 { + return fmt.Errorf("cannot %s list during iteration", verb) + } + return nil +} + +func (l *List) String() string { return toString(l) } +func (l *List) Type() string { return "list" } +func (l *List) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: list") } +func (l *List) Truth() Bool { return l.Len() > 0 } +func (l *List) Len() int { return len(l.elems) } +func (l *List) Index(i int) Value { return l.elems[i] } + +func (l *List) Slice(start, end, step int) Value { + if step == 1 { + elems := append([]Value{}, l.elems[start:end]...) + return NewList(elems) + } + + sign := signum(step) + var list []Value + for i := start; signum(end-i) == sign; i += step { + list = append(list, l.elems[i]) + } + return NewList(list) +} + +func (l *List) Attr(name string) (Value, error) { return builtinAttr(l, name, listMethods) } +func (l *List) AttrNames() []string { return builtinAttrNames(listMethods) } + +func (l *List) Iterate() Iterator { + if !l.frozen { + l.itercount++ + } + return &listIterator{l: l} +} + +func (x *List) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { + y := y_.(*List) + // It's tempting to check x == y as an optimization here, + // but wrong because a list containing NaN is not equal to itself. + return sliceCompare(op, x.elems, y.elems, depth) +} + +func sliceCompare(op syntax.Token, x, y []Value, depth int) (bool, error) { + // Fast path: check length. + if len(x) != len(y) && (op == syntax.EQL || op == syntax.NEQ) { + return op == syntax.NEQ, nil + } + + // Find first element that is not equal in both lists. + for i := 0; i < len(x) && i < len(y); i++ { + if eq, err := EqualDepth(x[i], y[i], depth-1); err != nil { + return false, err + } else if !eq { + switch op { + case syntax.EQL: + return false, nil + case syntax.NEQ: + return true, nil + default: + return CompareDepth(op, x[i], y[i], depth-1) + } + } + } + + return threeway(op, len(x)-len(y)), nil +} + +type listIterator struct { + l *List + i int +} + +func (it *listIterator) Next(p *Value) bool { + if it.i < it.l.Len() { + *p = it.l.elems[it.i] + it.i++ + return true + } + return false +} + +func (it *listIterator) Done() { + if !it.l.frozen { + it.l.itercount-- + } +} + +func (l *List) SetIndex(i int, v Value) error { + if err := l.checkMutable("assign to element of"); err != nil { + return err + } + l.elems[i] = v + return nil +} + +func (l *List) Append(v Value) error { + if err := l.checkMutable("append to"); err != nil { + return err + } + l.elems = append(l.elems, v) + return nil +} + +func (l *List) Clear() error { + if err := l.checkMutable("clear"); err != nil { + return err + } + for i := range l.elems { + l.elems[i] = nil // aid GC + } + l.elems = l.elems[:0] + return nil +} + +// A Tuple represents a Starlark tuple value. +type Tuple []Value + +func (t Tuple) Len() int { return len(t) } +func (t Tuple) Index(i int) Value { return t[i] } + +func (t Tuple) Slice(start, end, step int) Value { + if step == 1 { + return t[start:end] + } + + sign := signum(step) + var tuple Tuple + for i := start; signum(end-i) == sign; i += step { + tuple = append(tuple, t[i]) + } + return tuple +} + +func (t Tuple) Iterate() Iterator { return &tupleIterator{elems: t} } +func (t Tuple) Freeze() { + for _, elem := range t { + elem.Freeze() + } +} +func (t Tuple) String() string { return toString(t) } +func (t Tuple) Type() string { return "tuple" } +func (t Tuple) Truth() Bool { return len(t) > 0 } + +func (x Tuple) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { + y := y_.(Tuple) + return sliceCompare(op, x, y, depth) +} + +func (t Tuple) Hash() (uint32, error) { + // Use same algorithm as Python. + var x, mult uint32 = 0x345678, 1000003 + for _, elem := range t { + y, err := elem.Hash() + if err != nil { + return 0, err + } + x = x ^ y*mult + mult += 82520 + uint32(len(t)+len(t)) + } + return x, nil +} + +type tupleIterator struct{ elems Tuple } + +func (it *tupleIterator) Next(p *Value) bool { + if len(it.elems) > 0 { + *p = it.elems[0] + it.elems = it.elems[1:] + return true + } + return false +} + +func (it *tupleIterator) Done() {} + +// A Set represents a Starlark set value. +// The zero value of Set is a valid empty set. +// If you know the exact final number of elements, +// it is more efficient to call NewSet. +type Set struct { + ht hashtable // values are all None +} + +// NewSet returns a dictionary with initial space for +// at least size insertions before rehashing. +func NewSet(size int) *Set { + set := new(Set) + set.ht.init(size) + return set +} + +func (s *Set) Delete(k Value) (found bool, err error) { _, found, err = s.ht.delete(k); return } +func (s *Set) Clear() error { return s.ht.clear() } +func (s *Set) Has(k Value) (found bool, err error) { _, found, err = s.ht.lookup(k); return } +func (s *Set) Insert(k Value) error { return s.ht.insert(k, None) } +func (s *Set) Len() int { return int(s.ht.len) } +func (s *Set) Iterate() Iterator { return s.ht.iterate() } +func (s *Set) String() string { return toString(s) } +func (s *Set) Type() string { return "set" } +func (s *Set) Freeze() { s.ht.freeze() } +func (s *Set) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: set") } +func (s *Set) Truth() Bool { return s.Len() > 0 } + +func (s *Set) Attr(name string) (Value, error) { return builtinAttr(s, name, setMethods) } +func (s *Set) AttrNames() []string { return builtinAttrNames(setMethods) } + +func (x *Set) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { + y := y_.(*Set) + switch op { + case syntax.EQL: + ok, err := setsEqual(x, y, depth) + return ok, err + case syntax.NEQ: + ok, err := setsEqual(x, y, depth) + return !ok, err + default: + return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) + } +} + +func setsEqual(x, y *Set, depth int) (bool, error) { + if x.Len() != y.Len() { + return false, nil + } + for e := x.ht.head; e != nil; e = e.next { + if found, _ := y.Has(e.key); !found { + return false, nil + } + } + return true, nil +} + +func (s *Set) Union(iter Iterator) (Value, error) { + set := new(Set) + for e := s.ht.head; e != nil; e = e.next { + set.Insert(e.key) // can't fail + } + var x Value + for iter.Next(&x) { + if err := set.Insert(x); err != nil { + return nil, err + } + } + return set, nil +} + +// toString returns the string form of value v. +// It may be more efficient than v.String() for larger values. +func toString(v Value) string { + buf := new(strings.Builder) + writeValue(buf, v, nil) + return buf.String() +} + +// writeValue writes x to out. +// +// path is used to detect cycles. +// It contains the list of *List and *Dict values we're currently printing. +// (These are the only potentially cyclic structures.) +// Callers should generally pass nil for path. +// It is safe to re-use the same path slice for multiple calls. +func writeValue(out *strings.Builder, x Value, path []Value) { + switch x := x.(type) { + case nil: + out.WriteString("") // indicates a bug + + // These four cases are duplicates of T.String(), for efficiency. + case NoneType: + out.WriteString("None") + + case Int: + out.WriteString(x.String()) + + case Bool: + if x { + out.WriteString("True") + } else { + out.WriteString("False") + } + + case String: + out.WriteString(syntax.Quote(string(x), false)) + + case *List: + out.WriteByte('[') + if pathContains(path, x) { + out.WriteString("...") // list contains itself + } else { + for i, elem := range x.elems { + if i > 0 { + out.WriteString(", ") + } + writeValue(out, elem, append(path, x)) + } + } + out.WriteByte(']') + + case Tuple: + out.WriteByte('(') + for i, elem := range x { + if i > 0 { + out.WriteString(", ") + } + writeValue(out, elem, path) + } + if len(x) == 1 { + out.WriteByte(',') + } + out.WriteByte(')') + + case *Function: + fmt.Fprintf(out, "", x.Name()) + + case *Builtin: + if x.recv != nil { + fmt.Fprintf(out, "", x.Name(), x.recv.Type()) + } else { + fmt.Fprintf(out, "", x.Name()) + } + + case *Dict: + out.WriteByte('{') + if pathContains(path, x) { + out.WriteString("...") // dict contains itself + } else { + sep := "" + for e := x.ht.head; e != nil; e = e.next { + k, v := e.key, e.value + out.WriteString(sep) + writeValue(out, k, path) + out.WriteString(": ") + writeValue(out, v, append(path, x)) // cycle check + sep = ", " + } + } + out.WriteByte('}') + + case *Set: + out.WriteString("set([") + for e := x.ht.head; e != nil; e = e.next { + if e != x.ht.head { + out.WriteString(", ") + } + writeValue(out, e.key, path) + } + out.WriteString("])") + + default: + out.WriteString(x.String()) + } +} + +func pathContains(path []Value, x Value) bool { + for _, y := range path { + if x == y { + return true + } + } + return false +} + +// CompareLimit is the depth limit on recursive comparison operations such as == and <. +// Comparison of data structures deeper than this limit may fail. +var CompareLimit = 10 + +// Equal reports whether two Starlark values are equal. +func Equal(x, y Value) (bool, error) { + if x, ok := x.(String); ok { + return x == y, nil // fast path for an important special case + } + return EqualDepth(x, y, CompareLimit) +} + +// EqualDepth reports whether two Starlark values are equal. +// +// Recursive comparisons by implementations of Value.CompareSameType +// should use EqualDepth to prevent infinite recursion. +func EqualDepth(x, y Value, depth int) (bool, error) { + return CompareDepth(syntax.EQL, x, y, depth) +} + +// Compare compares two Starlark values. +// The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. +// Compare returns an error if an ordered comparison was +// requested for a type that does not support it. +// +// Recursive comparisons by implementations of Value.CompareSameType +// should use CompareDepth to prevent infinite recursion. +func Compare(op syntax.Token, x, y Value) (bool, error) { + return CompareDepth(op, x, y, CompareLimit) +} + +// CompareDepth compares two Starlark values. +// The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. +// CompareDepth returns an error if an ordered comparison was +// requested for a pair of values that do not support it. +// +// The depth parameter limits the maximum depth of recursion +// in cyclic data structures. +func CompareDepth(op syntax.Token, x, y Value, depth int) (bool, error) { + if depth < 1 { + return false, fmt.Errorf("comparison exceeded maximum recursion depth") + } + if sameType(x, y) { + if xcomp, ok := x.(Comparable); ok { + return xcomp.CompareSameType(op, y, depth) + } + + if xcomp, ok := x.(TotallyOrdered); ok { + t, err := xcomp.Cmp(y, depth) + if err != nil { + return false, err + } + return threeway(op, t), nil + } + + // use identity comparison + switch op { + case syntax.EQL: + return x == y, nil + case syntax.NEQ: + return x != y, nil + } + return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) + } + + // different types + + // int/float ordered comparisons + switch x := x.(type) { + case Int: + if y, ok := y.(Float); ok { + var cmp int + if y != y { + cmp = -1 // y is NaN + } else if !math.IsInf(float64(y), 0) { + cmp = x.rational().Cmp(y.rational()) // y is finite + } else if y > 0 { + cmp = -1 // y is +Inf + } else { + cmp = +1 // y is -Inf + } + return threeway(op, cmp), nil + } + case Float: + if y, ok := y.(Int); ok { + var cmp int + if x != x { + cmp = +1 // x is NaN + } else if !math.IsInf(float64(x), 0) { + cmp = x.rational().Cmp(y.rational()) // x is finite + } else if x > 0 { + cmp = +1 // x is +Inf + } else { + cmp = -1 // x is -Inf + } + return threeway(op, cmp), nil + } + } + + // All other values of different types compare unequal. + switch op { + case syntax.EQL: + return false, nil + case syntax.NEQ: + return true, nil + } + return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) +} + +func sameType(x, y Value) bool { + return reflect.TypeOf(x) == reflect.TypeOf(y) || x.Type() == y.Type() +} + +// threeway interprets a three-way comparison value cmp (-1, 0, +1) +// as a boolean comparison (e.g. x < y). +func threeway(op syntax.Token, cmp int) bool { + switch op { + case syntax.EQL: + return cmp == 0 + case syntax.NEQ: + return cmp != 0 + case syntax.LE: + return cmp <= 0 + case syntax.LT: + return cmp < 0 + case syntax.GE: + return cmp >= 0 + case syntax.GT: + return cmp > 0 + } + panic(op) +} + +func b2i(b bool) int { + if b { + return 1 + } else { + return 0 + } +} + +// Len returns the length of a string or sequence value, +// and -1 for all others. +// +// Warning: Len(x) >= 0 does not imply Iterate(x) != nil. +// A string has a known length but is not directly iterable. +func Len(x Value) int { + switch x := x.(type) { + case String: + return x.Len() + case Indexable: + return x.Len() + case Sequence: + return x.Len() + } + return -1 +} + +// Iterate return a new iterator for the value if iterable, nil otherwise. +// If the result is non-nil, the caller must call Done when finished with it. +// +// Warning: Iterate(x) != nil does not imply Len(x) >= 0. +// Some iterables may have unknown length. +func Iterate(x Value) Iterator { + if x, ok := x.(Iterable); ok { + return x.Iterate() + } + return nil +} + +// Bytes is the type of a Starlark binary string. +// +// A Bytes encapsulates an immutable sequence of bytes. +// It is comparable, indexable, and sliceable, but not direcly iterable; +// use bytes.elems() for an iterable view. +// +// In this Go implementation, the elements of 'string' and 'bytes' are +// both bytes, but in other implementations, notably Java, the elements +// of a 'string' are UTF-16 codes (Java chars). The spec abstracts text +// strings as sequences of UTF-k codes that encode Unicode code points, +// and operations that convert from text to binary incur UTF-k-to-UTF-8 +// transcoding; conversely, conversion from binary to text incurs +// UTF-8-to-UTF-k transcoding. Because k=8 for Go, these operations +// are the identity function, at least for valid encodings of text. +type Bytes string + +var ( + _ Comparable = Bytes("") + _ Sliceable = Bytes("") + _ Indexable = Bytes("") +) + +func (b Bytes) String() string { return syntax.Quote(string(b), true) } +func (b Bytes) Type() string { return "bytes" } +func (b Bytes) Freeze() {} // immutable +func (b Bytes) Truth() Bool { return len(b) > 0 } +func (b Bytes) Hash() (uint32, error) { return String(b).Hash() } +func (b Bytes) Len() int { return len(b) } +func (b Bytes) Index(i int) Value { return b[i : i+1] } + +func (b Bytes) Attr(name string) (Value, error) { return builtinAttr(b, name, bytesMethods) } +func (b Bytes) AttrNames() []string { return builtinAttrNames(bytesMethods) } + +func (b Bytes) Slice(start, end, step int) Value { + if step == 1 { + return b[start:end] + } + + sign := signum(step) + var str []byte + for i := start; signum(end-i) == sign; i += step { + str = append(str, b[i]) + } + return Bytes(str) +} + +func (x Bytes) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { + y := y_.(Bytes) + return threeway(op, strings.Compare(string(x), string(y))), nil +} diff --git a/vendor/go.starlark.net/starlarkstruct/module.go b/vendor/go.starlark.net/starlarkstruct/module.go new file mode 100644 index 000000000..735c98ae3 --- /dev/null +++ b/vendor/go.starlark.net/starlarkstruct/module.go @@ -0,0 +1,43 @@ +package starlarkstruct + +import ( + "fmt" + + "go.starlark.net/starlark" +) + +// A Module is a named collection of values, +// typically a suite of functions imported by a load statement. +// +// It differs from Struct primarily in that its string representation +// does not enumerate its fields. +type Module struct { + Name string + Members starlark.StringDict +} + +var _ starlark.HasAttrs = (*Module)(nil) + +func (m *Module) Attr(name string) (starlark.Value, error) { return m.Members[name], nil } +func (m *Module) AttrNames() []string { return m.Members.Keys() } +func (m *Module) Freeze() { m.Members.Freeze() } +func (m *Module) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", m.Type()) } +func (m *Module) String() string { return fmt.Sprintf("", m.Name) } +func (m *Module) Truth() starlark.Bool { return true } +func (m *Module) Type() string { return "module" } + +// MakeModule may be used as the implementation of a Starlark built-in +// function, module(name, **kwargs). It returns a new module with the +// specified name and members. +func MakeModule(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + var name string + if err := starlark.UnpackPositionalArgs(b.Name(), args, nil, 1, &name); err != nil { + return nil, err + } + members := make(starlark.StringDict, len(kwargs)) + for _, kwarg := range kwargs { + k := string(kwarg[0].(starlark.String)) + members[k] = kwarg[1] + } + return &Module{name, members}, nil +} diff --git a/vendor/go.starlark.net/starlarkstruct/struct.go b/vendor/go.starlark.net/starlarkstruct/struct.go new file mode 100644 index 000000000..ea2b1f639 --- /dev/null +++ b/vendor/go.starlark.net/starlarkstruct/struct.go @@ -0,0 +1,282 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package starlarkstruct defines the Starlark types 'struct' and +// 'module', both optional language extensions. +// +package starlarkstruct // import "go.starlark.net/starlarkstruct" + +// It is tempting to introduce a variant of Struct that is a wrapper +// around a Go struct value, for stronger typing guarantees and more +// efficient and convenient field lookup. However: +// 1) all fields of Starlark structs are optional, so we cannot represent +// them using more specific types such as String, Int, *Depset, and +// *File, as such types give no way to represent missing fields. +// 2) the efficiency gain of direct struct field access is rather +// marginal: finding the index of a field by binary searching on the +// sorted list of field names is quite fast compared to the other +// overheads. +// 3) the gains in compactness and spatial locality are also rather +// marginal: the array behind the []entry slice is (due to field name +// strings) only a factor of 2 larger than the corresponding Go struct +// would be, and, like the Go struct, requires only a single allocation. + +import ( + "fmt" + "sort" + "strings" + + "go.starlark.net/starlark" + "go.starlark.net/syntax" +) + +// Make is the implementation of a built-in function that instantiates +// an immutable struct from the specified keyword arguments. +// +// An application can add 'struct' to the Starlark environment like so: +// +// globals := starlark.StringDict{ +// "struct": starlark.NewBuiltin("struct", starlarkstruct.Make), +// } +// +func Make(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + if len(args) > 0 { + return nil, fmt.Errorf("struct: unexpected positional arguments") + } + return FromKeywords(Default, kwargs), nil +} + +// FromKeywords returns a new struct instance whose fields are specified by the +// key/value pairs in kwargs. (Each kwargs[i][0] must be a starlark.String.) +func FromKeywords(constructor starlark.Value, kwargs []starlark.Tuple) *Struct { + if constructor == nil { + panic("nil constructor") + } + s := &Struct{ + constructor: constructor, + entries: make(entries, 0, len(kwargs)), + } + for _, kwarg := range kwargs { + k := string(kwarg[0].(starlark.String)) + v := kwarg[1] + s.entries = append(s.entries, entry{k, v}) + } + sort.Sort(s.entries) + return s +} + +// FromStringDict returns a new struct instance whose elements are those of d. +// The constructor parameter specifies the constructor; use Default for an ordinary struct. +func FromStringDict(constructor starlark.Value, d starlark.StringDict) *Struct { + if constructor == nil { + panic("nil constructor") + } + s := &Struct{ + constructor: constructor, + entries: make(entries, 0, len(d)), + } + for k, v := range d { + s.entries = append(s.entries, entry{k, v}) + } + sort.Sort(s.entries) + return s +} + +// Struct is an immutable Starlark type that maps field names to values. +// It is not iterable and does not support len. +// +// A struct has a constructor, a distinct value that identifies a class +// of structs, and which appears in the struct's string representation. +// +// Operations such as x+y fail if the constructors of the two operands +// are not equal. +// +// The default constructor, Default, is the string "struct", but +// clients may wish to 'brand' structs for their own purposes. +// The constructor value appears in the printed form of the value, +// and is accessible using the Constructor method. +// +// Use Attr to access its fields and AttrNames to enumerate them. +type Struct struct { + constructor starlark.Value + entries entries // sorted by name +} + +// Default is the default constructor for structs. +// It is merely the string "struct". +const Default = starlark.String("struct") + +type entries []entry + +func (a entries) Len() int { return len(a) } +func (a entries) Less(i, j int) bool { return a[i].name < a[j].name } +func (a entries) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type entry struct { + name string + value starlark.Value +} + +var ( + _ starlark.HasAttrs = (*Struct)(nil) + _ starlark.HasBinary = (*Struct)(nil) +) + +// ToStringDict adds a name/value entry to d for each field of the struct. +func (s *Struct) ToStringDict(d starlark.StringDict) { + for _, e := range s.entries { + d[e.name] = e.value + } +} + +func (s *Struct) String() string { + buf := new(strings.Builder) + switch constructor := s.constructor.(type) { + case starlark.String: + // NB: The Java implementation always prints struct + // even for Bazel provider instances. + buf.WriteString(constructor.GoString()) // avoid String()'s quotation + default: + buf.WriteString(s.constructor.String()) + } + buf.WriteByte('(') + for i, e := range s.entries { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(e.name) + buf.WriteString(" = ") + buf.WriteString(e.value.String()) + } + buf.WriteByte(')') + return buf.String() +} + +// Constructor returns the constructor used to create this struct. +func (s *Struct) Constructor() starlark.Value { return s.constructor } + +func (s *Struct) Type() string { return "struct" } +func (s *Struct) Truth() starlark.Bool { return true } // even when empty +func (s *Struct) Hash() (uint32, error) { + // Same algorithm as Tuple.hash, but with different primes. + var x, m uint32 = 8731, 9839 + for _, e := range s.entries { + namehash, _ := starlark.String(e.name).Hash() + x = x ^ 3*namehash + y, err := e.value.Hash() + if err != nil { + return 0, err + } + x = x ^ y*m + m += 7349 + } + return x, nil +} +func (s *Struct) Freeze() { + for _, e := range s.entries { + e.value.Freeze() + } +} + +func (x *Struct) Binary(op syntax.Token, y starlark.Value, side starlark.Side) (starlark.Value, error) { + if y, ok := y.(*Struct); ok && op == syntax.PLUS { + if side == starlark.Right { + x, y = y, x + } + + if eq, err := starlark.Equal(x.constructor, y.constructor); err != nil { + return nil, fmt.Errorf("in %s + %s: error comparing constructors: %v", + x.constructor, y.constructor, err) + } else if !eq { + return nil, fmt.Errorf("cannot add structs of different constructors: %s + %s", + x.constructor, y.constructor) + } + + z := make(starlark.StringDict, x.len()+y.len()) + for _, e := range x.entries { + z[e.name] = e.value + } + for _, e := range y.entries { + z[e.name] = e.value + } + + return FromStringDict(x.constructor, z), nil + } + return nil, nil // unhandled +} + +// Attr returns the value of the specified field. +func (s *Struct) Attr(name string) (starlark.Value, error) { + // Binary search the entries. + // This implementation is a specialization of + // sort.Search that avoids dynamic dispatch. + n := len(s.entries) + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) + if s.entries[h].name < name { + i = h + 1 + } else { + j = h + } + } + if i < n && s.entries[i].name == name { + return s.entries[i].value, nil + } + + var ctor string + if s.constructor != Default { + ctor = s.constructor.String() + " " + } + return nil, starlark.NoSuchAttrError( + fmt.Sprintf("%sstruct has no .%s attribute", ctor, name)) +} + +func (s *Struct) len() int { return len(s.entries) } + +// AttrNames returns a new sorted list of the struct fields. +func (s *Struct) AttrNames() []string { + names := make([]string, len(s.entries)) + for i, e := range s.entries { + names[i] = e.name + } + return names +} + +func (x *Struct) CompareSameType(op syntax.Token, y_ starlark.Value, depth int) (bool, error) { + y := y_.(*Struct) + switch op { + case syntax.EQL: + return structsEqual(x, y, depth) + case syntax.NEQ: + eq, err := structsEqual(x, y, depth) + return !eq, err + default: + return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) + } +} + +func structsEqual(x, y *Struct, depth int) (bool, error) { + if x.len() != y.len() { + return false, nil + } + + if eq, err := starlark.Equal(x.constructor, y.constructor); err != nil { + return false, fmt.Errorf("error comparing struct constructors %v and %v: %v", + x.constructor, y.constructor, err) + } else if !eq { + return false, nil + } + + for i, n := 0, x.len(); i < n; i++ { + if x.entries[i].name != y.entries[i].name { + return false, nil + } else if eq, err := starlark.EqualDepth(x.entries[i].value, y.entries[i].value, depth-1); err != nil { + return false, err + } else if !eq { + return false, nil + } + } + return true, nil +} diff --git a/vendor/go.starlark.net/syntax/grammar.txt b/vendor/go.starlark.net/syntax/grammar.txt new file mode 100644 index 000000000..7f5dfc811 --- /dev/null +++ b/vendor/go.starlark.net/syntax/grammar.txt @@ -0,0 +1,129 @@ + +Grammar of Starlark +================== + +File = {Statement | newline} eof . + +Statement = DefStmt | IfStmt | ForStmt | WhileStmt | SimpleStmt . + +DefStmt = 'def' identifier '(' [Parameters [',']] ')' ':' Suite . + +Parameters = Parameter {',' Parameter}. + +Parameter = identifier | identifier '=' Test | '*' | '*' identifier | '**' identifier . + +IfStmt = 'if' Test ':' Suite {'elif' Test ':' Suite} ['else' ':' Suite] . + +ForStmt = 'for' LoopVariables 'in' Expression ':' Suite . + +WhileStmt = 'while' Test ':' Suite . + +Suite = [newline indent {Statement} outdent] | SimpleStmt . + +SimpleStmt = SmallStmt {';' SmallStmt} [';'] '\n' . +# NOTE: '\n' optional at EOF + +SmallStmt = ReturnStmt + | BreakStmt | ContinueStmt | PassStmt + | AssignStmt + | ExprStmt + | LoadStmt + . + +ReturnStmt = 'return' [Expression] . +BreakStmt = 'break' . +ContinueStmt = 'continue' . +PassStmt = 'pass' . +AssignStmt = Expression ('=' | '+=' | '-=' | '*=' | '/=' | '//=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=') Expression . +ExprStmt = Expression . + +LoadStmt = 'load' '(' string {',' [identifier '='] string} [','] ')' . + +Test = LambdaExpr + | IfExpr + | PrimaryExpr + | UnaryExpr + | BinaryExpr + . + +LambdaExpr = 'lambda' [Parameters] ':' Test . + +IfExpr = Test 'if' Test 'else' Test . + +PrimaryExpr = Operand + | PrimaryExpr DotSuffix + | PrimaryExpr CallSuffix + | PrimaryExpr SliceSuffix + . + +Operand = identifier + | int | float | string + | ListExpr | ListComp + | DictExpr | DictComp + | '(' [Expression [',']] ')' + | ('-' | '+') PrimaryExpr + . + +DotSuffix = '.' identifier . +CallSuffix = '(' [Arguments [',']] ')' . +SliceSuffix = '[' [Expression] [':' Test [':' Test]] ']' . + +Arguments = Argument {',' Argument} . +Argument = Test | identifier '=' Test | '*' Test | '**' Test . + +ListExpr = '[' [Expression [',']] ']' . +ListComp = '[' Test {CompClause} ']'. + +DictExpr = '{' [Entries [',']] '}' . +DictComp = '{' Entry {CompClause} '}' . +Entries = Entry {',' Entry} . +Entry = Test ':' Test . + +CompClause = 'for' LoopVariables 'in' Test | 'if' Test . + +UnaryExpr = 'not' Test . + +BinaryExpr = Test {Binop Test} . + +Binop = 'or' + | 'and' + | '==' | '!=' | '<' | '>' | '<=' | '>=' | 'in' | 'not' 'in' + | '|' + | '^' + | '&' + | '-' | '+' + | '*' | '%' | '/' | '//' + . + +Expression = Test {',' Test} . +# NOTE: trailing comma permitted only when within [...] or (...). + +LoopVariables = PrimaryExpr {',' PrimaryExpr} . + + +# Notation (similar to Go spec): +- lowercase and 'quoted' items are lexical tokens. +- Capitalized names denote grammar productions. +- (...) implies grouping +- x | y means either x or y. +- [x] means x is optional +- {x} means x is repeated zero or more times +- The end of each declaration is marked with a period. + +# Tokens +- spaces: newline, eof, indent, outdent. +- identifier. +- literals: string, int, float. +- plus all quoted tokens such as '+=', 'return'. + +# Notes: +- Ambiguity is resolved using operator precedence. +- The grammar does not enforce the legal order of params and args, + nor that the first compclause must be a 'for'. + +TODO: +- explain how the lexer generates indent, outdent, and newline tokens. +- why is unary NOT separated from unary - and +? +- the grammar is (mostly) in LL(1) style so, for example, + dot expressions are formed suffixes, not complete expressions, + which makes the spec harder to read. Reorganize into non-LL(1) form? diff --git a/vendor/go.starlark.net/syntax/parse.go b/vendor/go.starlark.net/syntax/parse.go new file mode 100644 index 000000000..f4c8fff4d --- /dev/null +++ b/vendor/go.starlark.net/syntax/parse.go @@ -0,0 +1,1028 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +// This file defines a recursive-descent parser for Starlark. +// The LL(1) grammar of Starlark and the names of many productions follow Python 2.7. +// +// TODO(adonovan): use syntax.Error more systematically throughout the +// package. Verify that error positions are correct using the +// chunkedfile mechanism. + +import "log" + +// Enable this flag to print the token stream and log.Fatal on the first error. +const debug = false + +// A Mode value is a set of flags (or 0) that controls optional parser functionality. +type Mode uint + +const ( + RetainComments Mode = 1 << iota // retain comments in AST; see Node.Comments +) + +// Parse parses the input data and returns the corresponding parse tree. +// +// If src != nil, ParseFile parses the source from src and the filename +// is only used when recording position information. +// The type of the argument for the src parameter must be string, +// []byte, io.Reader, or FilePortion. +// If src == nil, ParseFile parses the file specified by filename. +func Parse(filename string, src interface{}, mode Mode) (f *File, err error) { + in, err := newScanner(filename, src, mode&RetainComments != 0) + if err != nil { + return nil, err + } + p := parser{in: in} + defer p.in.recover(&err) + + p.nextToken() // read first lookahead token + f = p.parseFile() + if f != nil { + f.Path = filename + } + p.assignComments(f) + return f, nil +} + +// ParseCompoundStmt parses a single compound statement: +// a blank line, a def, for, while, or if statement, or a +// semicolon-separated list of simple statements followed +// by a newline. These are the units on which the REPL operates. +// ParseCompoundStmt does not consume any following input. +// The parser calls the readline function each +// time it needs a new line of input. +func ParseCompoundStmt(filename string, readline func() ([]byte, error)) (f *File, err error) { + in, err := newScanner(filename, readline, false) + if err != nil { + return nil, err + } + + p := parser{in: in} + defer p.in.recover(&err) + + p.nextToken() // read first lookahead token + + var stmts []Stmt + switch p.tok { + case DEF, IF, FOR, WHILE: + stmts = p.parseStmt(stmts) + case NEWLINE: + // blank line + default: + stmts = p.parseSimpleStmt(stmts, false) + // Require but don't consume newline, to avoid blocking again. + if p.tok != NEWLINE { + p.in.errorf(p.in.pos, "invalid syntax") + } + } + + return &File{Path: filename, Stmts: stmts}, nil +} + +// ParseExpr parses a Starlark expression. +// A comma-separated list of expressions is parsed as a tuple. +// See Parse for explanation of parameters. +func ParseExpr(filename string, src interface{}, mode Mode) (expr Expr, err error) { + in, err := newScanner(filename, src, mode&RetainComments != 0) + if err != nil { + return nil, err + } + p := parser{in: in} + defer p.in.recover(&err) + + p.nextToken() // read first lookahead token + + // Use parseExpr, not parseTest, to permit an unparenthesized tuple. + expr = p.parseExpr(false) + + // A following newline (e.g. "f()\n") appears outside any brackets, + // on a non-blank line, and thus results in a NEWLINE token. + if p.tok == NEWLINE { + p.nextToken() + } + + if p.tok != EOF { + p.in.errorf(p.in.pos, "got %#v after expression, want EOF", p.tok) + } + p.assignComments(expr) + return expr, nil +} + +type parser struct { + in *scanner + tok Token + tokval tokenValue +} + +// nextToken advances the scanner and returns the position of the +// previous token. +func (p *parser) nextToken() Position { + oldpos := p.tokval.pos + p.tok = p.in.nextToken(&p.tokval) + // enable to see the token stream + if debug { + log.Printf("nextToken: %-20s%+v\n", p.tok, p.tokval.pos) + } + return oldpos +} + +// file_input = (NEWLINE | stmt)* EOF +func (p *parser) parseFile() *File { + var stmts []Stmt + for p.tok != EOF { + if p.tok == NEWLINE { + p.nextToken() + continue + } + stmts = p.parseStmt(stmts) + } + return &File{Stmts: stmts} +} + +func (p *parser) parseStmt(stmts []Stmt) []Stmt { + if p.tok == DEF { + return append(stmts, p.parseDefStmt()) + } else if p.tok == IF { + return append(stmts, p.parseIfStmt()) + } else if p.tok == FOR { + return append(stmts, p.parseForStmt()) + } else if p.tok == WHILE { + return append(stmts, p.parseWhileStmt()) + } + return p.parseSimpleStmt(stmts, true) +} + +func (p *parser) parseDefStmt() Stmt { + defpos := p.nextToken() // consume DEF + id := p.parseIdent() + p.consume(LPAREN) + params := p.parseParams() + p.consume(RPAREN) + p.consume(COLON) + body := p.parseSuite() + return &DefStmt{ + Def: defpos, + Name: id, + Params: params, + Body: body, + } +} + +func (p *parser) parseIfStmt() Stmt { + ifpos := p.nextToken() // consume IF + cond := p.parseTest() + p.consume(COLON) + body := p.parseSuite() + ifStmt := &IfStmt{ + If: ifpos, + Cond: cond, + True: body, + } + tail := ifStmt + for p.tok == ELIF { + elifpos := p.nextToken() // consume ELIF + cond := p.parseTest() + p.consume(COLON) + body := p.parseSuite() + elif := &IfStmt{ + If: elifpos, + Cond: cond, + True: body, + } + tail.ElsePos = elifpos + tail.False = []Stmt{elif} + tail = elif + } + if p.tok == ELSE { + tail.ElsePos = p.nextToken() // consume ELSE + p.consume(COLON) + tail.False = p.parseSuite() + } + return ifStmt +} + +func (p *parser) parseForStmt() Stmt { + forpos := p.nextToken() // consume FOR + vars := p.parseForLoopVariables() + p.consume(IN) + x := p.parseExpr(false) + p.consume(COLON) + body := p.parseSuite() + return &ForStmt{ + For: forpos, + Vars: vars, + X: x, + Body: body, + } +} + +func (p *parser) parseWhileStmt() Stmt { + whilepos := p.nextToken() // consume WHILE + cond := p.parseTest() + p.consume(COLON) + body := p.parseSuite() + return &WhileStmt{ + While: whilepos, + Cond: cond, + Body: body, + } +} + +// Equivalent to 'exprlist' production in Python grammar. +// +// loop_variables = primary_with_suffix (COMMA primary_with_suffix)* COMMA? +func (p *parser) parseForLoopVariables() Expr { + // Avoid parseExpr because it would consume the IN token + // following x in "for x in y: ...". + v := p.parsePrimaryWithSuffix() + if p.tok != COMMA { + return v + } + + list := []Expr{v} + for p.tok == COMMA { + p.nextToken() + if terminatesExprList(p.tok) { + break + } + list = append(list, p.parsePrimaryWithSuffix()) + } + return &TupleExpr{List: list} +} + +// simple_stmt = small_stmt (SEMI small_stmt)* SEMI? NEWLINE +// In REPL mode, it does not consume the NEWLINE. +func (p *parser) parseSimpleStmt(stmts []Stmt, consumeNL bool) []Stmt { + for { + stmts = append(stmts, p.parseSmallStmt()) + if p.tok != SEMI { + break + } + p.nextToken() // consume SEMI + if p.tok == NEWLINE || p.tok == EOF { + break + } + } + // EOF without NEWLINE occurs in `if x: pass`, for example. + if p.tok != EOF && consumeNL { + p.consume(NEWLINE) + } + + return stmts +} + +// small_stmt = RETURN expr? +// | PASS | BREAK | CONTINUE +// | LOAD ... +// | expr ('=' | '+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=') expr // assign +// | expr +func (p *parser) parseSmallStmt() Stmt { + switch p.tok { + case RETURN: + pos := p.nextToken() // consume RETURN + var result Expr + if p.tok != EOF && p.tok != NEWLINE && p.tok != SEMI { + result = p.parseExpr(false) + } + return &ReturnStmt{Return: pos, Result: result} + + case BREAK, CONTINUE, PASS: + tok := p.tok + pos := p.nextToken() // consume it + return &BranchStmt{Token: tok, TokenPos: pos} + + case LOAD: + return p.parseLoadStmt() + } + + // Assignment + x := p.parseExpr(false) + switch p.tok { + case EQ, PLUS_EQ, MINUS_EQ, STAR_EQ, SLASH_EQ, SLASHSLASH_EQ, PERCENT_EQ, AMP_EQ, PIPE_EQ, CIRCUMFLEX_EQ, LTLT_EQ, GTGT_EQ: + op := p.tok + pos := p.nextToken() // consume op + rhs := p.parseExpr(false) + return &AssignStmt{OpPos: pos, Op: op, LHS: x, RHS: rhs} + } + + // Expression statement (e.g. function call, doc string). + return &ExprStmt{X: x} +} + +// stmt = LOAD '(' STRING {',' (IDENT '=')? STRING} [','] ')' +func (p *parser) parseLoadStmt() *LoadStmt { + loadPos := p.nextToken() // consume LOAD + lparen := p.consume(LPAREN) + + if p.tok != STRING { + p.in.errorf(p.in.pos, "first operand of load statement must be a string literal") + } + module := p.parsePrimary().(*Literal) + + var from, to []*Ident + for p.tok != RPAREN && p.tok != EOF { + p.consume(COMMA) + if p.tok == RPAREN { + break // allow trailing comma + } + switch p.tok { + case STRING: + // load("module", "id") + // To name is same as original. + lit := p.parsePrimary().(*Literal) + id := &Ident{ + NamePos: lit.TokenPos.add(`"`), + Name: lit.Value.(string), + } + to = append(to, id) + from = append(from, id) + + case IDENT: + // load("module", to="from") + id := p.parseIdent() + to = append(to, id) + if p.tok != EQ { + p.in.errorf(p.in.pos, `load operand must be "%[1]s" or %[1]s="originalname" (want '=' after %[1]s)`, id.Name) + } + p.consume(EQ) + if p.tok != STRING { + p.in.errorf(p.in.pos, `original name of loaded symbol must be quoted: %s="originalname"`, id.Name) + } + lit := p.parsePrimary().(*Literal) + from = append(from, &Ident{ + NamePos: lit.TokenPos.add(`"`), + Name: lit.Value.(string), + }) + + case RPAREN: + p.in.errorf(p.in.pos, "trailing comma in load statement") + + default: + p.in.errorf(p.in.pos, `load operand must be "name" or localname="name" (got %#v)`, p.tok) + } + } + rparen := p.consume(RPAREN) + + if len(to) == 0 { + p.in.errorf(lparen, "load statement must import at least 1 symbol") + } + return &LoadStmt{ + Load: loadPos, + Module: module, + To: to, + From: from, + Rparen: rparen, + } +} + +// suite is typically what follows a COLON (e.g. after DEF or FOR). +// suite = simple_stmt | NEWLINE INDENT stmt+ OUTDENT +func (p *parser) parseSuite() []Stmt { + if p.tok == NEWLINE { + p.nextToken() // consume NEWLINE + p.consume(INDENT) + var stmts []Stmt + for p.tok != OUTDENT && p.tok != EOF { + stmts = p.parseStmt(stmts) + } + p.consume(OUTDENT) + return stmts + } + + return p.parseSimpleStmt(nil, true) +} + +func (p *parser) parseIdent() *Ident { + if p.tok != IDENT { + p.in.error(p.in.pos, "not an identifier") + } + id := &Ident{ + NamePos: p.tokval.pos, + Name: p.tokval.raw, + } + p.nextToken() + return id +} + +func (p *parser) consume(t Token) Position { + if p.tok != t { + p.in.errorf(p.in.pos, "got %#v, want %#v", p.tok, t) + } + return p.nextToken() +} + +// params = (param COMMA)* param COMMA? +// | +// +// param = IDENT +// | IDENT EQ test +// | STAR +// | STAR IDENT +// | STARSTAR IDENT +// +// parseParams parses a parameter list. The resulting expressions are of the form: +// +// *Ident x +// *Binary{Op: EQ, X: *Ident, Y: Expr} x=y +// *Unary{Op: STAR} * +// *Unary{Op: STAR, X: *Ident} *args +// *Unary{Op: STARSTAR, X: *Ident} **kwargs +func (p *parser) parseParams() []Expr { + var params []Expr + for p.tok != RPAREN && p.tok != COLON && p.tok != EOF { + if len(params) > 0 { + p.consume(COMMA) + } + if p.tok == RPAREN { + break + } + + // * or *args or **kwargs + if p.tok == STAR || p.tok == STARSTAR { + op := p.tok + pos := p.nextToken() + var x Expr + if op == STARSTAR || p.tok == IDENT { + x = p.parseIdent() + } + params = append(params, &UnaryExpr{ + OpPos: pos, + Op: op, + X: x, + }) + continue + } + + // IDENT + // IDENT = test + id := p.parseIdent() + if p.tok == EQ { // default value + eq := p.nextToken() + dflt := p.parseTest() + params = append(params, &BinaryExpr{ + X: id, + OpPos: eq, + Op: EQ, + Y: dflt, + }) + continue + } + + params = append(params, id) + } + return params +} + +// parseExpr parses an expression, possible consisting of a +// comma-separated list of 'test' expressions. +// +// In many cases we must use parseTest to avoid ambiguity such as +// f(x, y) vs. f((x, y)). +func (p *parser) parseExpr(inParens bool) Expr { + x := p.parseTest() + if p.tok != COMMA { + return x + } + + // tuple + exprs := p.parseExprs([]Expr{x}, inParens) + return &TupleExpr{List: exprs} +} + +// parseExprs parses a comma-separated list of expressions, starting with the comma. +// It is used to parse tuples and list elements. +// expr_list = (',' expr)* ','? +func (p *parser) parseExprs(exprs []Expr, allowTrailingComma bool) []Expr { + for p.tok == COMMA { + pos := p.nextToken() + if terminatesExprList(p.tok) { + if !allowTrailingComma { + p.in.error(pos, "unparenthesized tuple with trailing comma") + } + break + } + exprs = append(exprs, p.parseTest()) + } + return exprs +} + +// parseTest parses a 'test', a single-component expression. +func (p *parser) parseTest() Expr { + if p.tok == LAMBDA { + return p.parseLambda(true) + } + + x := p.parseTestPrec(0) + + // conditional expression (t IF cond ELSE f) + if p.tok == IF { + ifpos := p.nextToken() + cond := p.parseTestPrec(0) + if p.tok != ELSE { + p.in.error(ifpos, "conditional expression without else clause") + } + elsepos := p.nextToken() + else_ := p.parseTest() + return &CondExpr{If: ifpos, Cond: cond, True: x, ElsePos: elsepos, False: else_} + } + + return x +} + +// parseTestNoCond parses a a single-component expression without +// consuming a trailing 'if expr else expr'. +func (p *parser) parseTestNoCond() Expr { + if p.tok == LAMBDA { + return p.parseLambda(false) + } + return p.parseTestPrec(0) +} + +// parseLambda parses a lambda expression. +// The allowCond flag allows the body to be an 'a if b else c' conditional. +func (p *parser) parseLambda(allowCond bool) Expr { + lambda := p.nextToken() + var params []Expr + if p.tok != COLON { + params = p.parseParams() + } + p.consume(COLON) + + var body Expr + if allowCond { + body = p.parseTest() + } else { + body = p.parseTestNoCond() + } + + return &LambdaExpr{ + Lambda: lambda, + Params: params, + Body: body, + } +} + +func (p *parser) parseTestPrec(prec int) Expr { + if prec >= len(preclevels) { + return p.parsePrimaryWithSuffix() + } + + // expr = NOT expr + if p.tok == NOT && prec == int(precedence[NOT]) { + pos := p.nextToken() + x := p.parseTestPrec(prec) + return &UnaryExpr{ + OpPos: pos, + Op: NOT, + X: x, + } + } + + return p.parseBinopExpr(prec) +} + +// expr = test (OP test)* +// Uses precedence climbing; see http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm#climbing. +func (p *parser) parseBinopExpr(prec int) Expr { + x := p.parseTestPrec(prec + 1) + for first := true; ; first = false { + if p.tok == NOT { + p.nextToken() // consume NOT + // In this context, NOT must be followed by IN. + // Replace NOT IN by a single NOT_IN token. + if p.tok != IN { + p.in.errorf(p.in.pos, "got %#v, want in", p.tok) + } + p.tok = NOT_IN + } + + // Binary operator of specified precedence? + opprec := int(precedence[p.tok]) + if opprec < prec { + return x + } + + // Comparisons are non-associative. + if !first && opprec == int(precedence[EQL]) { + p.in.errorf(p.in.pos, "%s does not associate with %s (use parens)", + x.(*BinaryExpr).Op, p.tok) + } + + op := p.tok + pos := p.nextToken() + y := p.parseTestPrec(opprec + 1) + x = &BinaryExpr{OpPos: pos, Op: op, X: x, Y: y} + } +} + +// precedence maps each operator to its precedence (0-7), or -1 for other tokens. +var precedence [maxToken]int8 + +// preclevels groups operators of equal precedence. +// Comparisons are nonassociative; other binary operators associate to the left. +// Unary MINUS, unary PLUS, and TILDE have higher precedence so are handled in parsePrimary. +// See https://github.com/google/starlark-go/blob/master/doc/spec.md#binary-operators +var preclevels = [...][]Token{ + {OR}, // or + {AND}, // and + {NOT}, // not (unary) + {EQL, NEQ, LT, GT, LE, GE, IN, NOT_IN}, // == != < > <= >= in not in + {PIPE}, // | + {CIRCUMFLEX}, // ^ + {AMP}, // & + {LTLT, GTGT}, // << >> + {MINUS, PLUS}, // - + {STAR, PERCENT, SLASH, SLASHSLASH}, // * % / // +} + +func init() { + // populate precedence table + for i := range precedence { + precedence[i] = -1 + } + for level, tokens := range preclevels { + for _, tok := range tokens { + precedence[tok] = int8(level) + } + } +} + +// primary_with_suffix = primary +// | primary '.' IDENT +// | primary slice_suffix +// | primary call_suffix +func (p *parser) parsePrimaryWithSuffix() Expr { + x := p.parsePrimary() + for { + switch p.tok { + case DOT: + dot := p.nextToken() + id := p.parseIdent() + x = &DotExpr{Dot: dot, X: x, Name: id} + case LBRACK: + x = p.parseSliceSuffix(x) + case LPAREN: + x = p.parseCallSuffix(x) + default: + return x + } + } +} + +// slice_suffix = '[' expr? ':' expr? ':' expr? ']' +func (p *parser) parseSliceSuffix(x Expr) Expr { + lbrack := p.nextToken() + var lo, hi, step Expr + if p.tok != COLON { + y := p.parseExpr(false) + + // index x[y] + if p.tok == RBRACK { + rbrack := p.nextToken() + return &IndexExpr{X: x, Lbrack: lbrack, Y: y, Rbrack: rbrack} + } + + lo = y + } + + // slice or substring x[lo:hi:step] + if p.tok == COLON { + p.nextToken() + if p.tok != COLON && p.tok != RBRACK { + hi = p.parseTest() + } + } + if p.tok == COLON { + p.nextToken() + if p.tok != RBRACK { + step = p.parseTest() + } + } + rbrack := p.consume(RBRACK) + return &SliceExpr{X: x, Lbrack: lbrack, Lo: lo, Hi: hi, Step: step, Rbrack: rbrack} +} + +// call_suffix = '(' arg_list? ')' +func (p *parser) parseCallSuffix(fn Expr) Expr { + lparen := p.consume(LPAREN) + var rparen Position + var args []Expr + if p.tok == RPAREN { + rparen = p.nextToken() + } else { + args = p.parseArgs() + rparen = p.consume(RPAREN) + } + return &CallExpr{Fn: fn, Lparen: lparen, Args: args, Rparen: rparen} +} + +// parseArgs parses a list of actual parameter values (arguments). +// It mirrors the structure of parseParams. +// arg_list = ((arg COMMA)* arg COMMA?)? +func (p *parser) parseArgs() []Expr { + var args []Expr + for p.tok != RPAREN && p.tok != EOF { + if len(args) > 0 { + p.consume(COMMA) + } + if p.tok == RPAREN { + break + } + + // *args or **kwargs + if p.tok == STAR || p.tok == STARSTAR { + op := p.tok + pos := p.nextToken() + x := p.parseTest() + args = append(args, &UnaryExpr{ + OpPos: pos, + Op: op, + X: x, + }) + continue + } + + // We use a different strategy from Bazel here to stay within LL(1). + // Instead of looking ahead two tokens (IDENT, EQ) we parse + // 'test = test' then check that the first was an IDENT. + x := p.parseTest() + + if p.tok == EQ { + // name = value + if _, ok := x.(*Ident); !ok { + p.in.errorf(p.in.pos, "keyword argument must have form name=expr") + } + eq := p.nextToken() + y := p.parseTest() + x = &BinaryExpr{ + X: x, + OpPos: eq, + Op: EQ, + Y: y, + } + } + + args = append(args, x) + } + return args +} + +// primary = IDENT +// | INT | FLOAT | STRING | BYTES +// | '[' ... // list literal or comprehension +// | '{' ... // dict literal or comprehension +// | '(' ... // tuple or parenthesized expression +// | ('-'|'+'|'~') primary_with_suffix +func (p *parser) parsePrimary() Expr { + switch p.tok { + case IDENT: + return p.parseIdent() + + case INT, FLOAT, STRING, BYTES: + var val interface{} + tok := p.tok + switch tok { + case INT: + if p.tokval.bigInt != nil { + val = p.tokval.bigInt + } else { + val = p.tokval.int + } + case FLOAT: + val = p.tokval.float + case STRING, BYTES: + val = p.tokval.string + } + raw := p.tokval.raw + pos := p.nextToken() + return &Literal{Token: tok, TokenPos: pos, Raw: raw, Value: val} + + case LBRACK: + return p.parseList() + + case LBRACE: + return p.parseDict() + + case LPAREN: + lparen := p.nextToken() + if p.tok == RPAREN { + // empty tuple + rparen := p.nextToken() + return &TupleExpr{Lparen: lparen, Rparen: rparen} + } + e := p.parseExpr(true) // allow trailing comma + rparen := p.consume(RPAREN) + return &ParenExpr{ + Lparen: lparen, + X: e, + Rparen: rparen, + } + + case MINUS, PLUS, TILDE: // unary + tok := p.tok + pos := p.nextToken() + x := p.parsePrimaryWithSuffix() + return &UnaryExpr{ + OpPos: pos, + Op: tok, + X: x, + } + } + p.in.errorf(p.in.pos, "got %#v, want primary expression", p.tok) + panic("unreachable") +} + +// list = '[' ']' +// | '[' expr ']' +// | '[' expr expr_list ']' +// | '[' expr (FOR loop_variables IN expr)+ ']' +func (p *parser) parseList() Expr { + lbrack := p.nextToken() + if p.tok == RBRACK { + // empty List + rbrack := p.nextToken() + return &ListExpr{Lbrack: lbrack, Rbrack: rbrack} + } + + x := p.parseTest() + + if p.tok == FOR { + // list comprehension + return p.parseComprehensionSuffix(lbrack, x, RBRACK) + } + + exprs := []Expr{x} + if p.tok == COMMA { + // multi-item list literal + exprs = p.parseExprs(exprs, true) // allow trailing comma + } + + rbrack := p.consume(RBRACK) + return &ListExpr{Lbrack: lbrack, List: exprs, Rbrack: rbrack} +} + +// dict = '{' '}' +// | '{' dict_entry_list '}' +// | '{' dict_entry FOR loop_variables IN expr '}' +func (p *parser) parseDict() Expr { + lbrace := p.nextToken() + if p.tok == RBRACE { + // empty dict + rbrace := p.nextToken() + return &DictExpr{Lbrace: lbrace, Rbrace: rbrace} + } + + x := p.parseDictEntry() + + if p.tok == FOR { + // dict comprehension + return p.parseComprehensionSuffix(lbrace, x, RBRACE) + } + + entries := []Expr{x} + for p.tok == COMMA { + p.nextToken() + if p.tok == RBRACE { + break + } + entries = append(entries, p.parseDictEntry()) + } + + rbrace := p.consume(RBRACE) + return &DictExpr{Lbrace: lbrace, List: entries, Rbrace: rbrace} +} + +// dict_entry = test ':' test +func (p *parser) parseDictEntry() *DictEntry { + k := p.parseTest() + colon := p.consume(COLON) + v := p.parseTest() + return &DictEntry{Key: k, Colon: colon, Value: v} +} + +// comp_suffix = FOR loopvars IN expr comp_suffix +// | IF expr comp_suffix +// | ']' or ')' (end) +// +// There can be multiple FOR/IF clauses; the first is always a FOR. +func (p *parser) parseComprehensionSuffix(lbrace Position, body Expr, endBrace Token) Expr { + var clauses []Node + for p.tok != endBrace { + if p.tok == FOR { + pos := p.nextToken() + vars := p.parseForLoopVariables() + in := p.consume(IN) + // Following Python 3, the operand of IN cannot be: + // - a conditional expression ('x if y else z'), + // due to conflicts in Python grammar + // ('if' is used by the comprehension); + // - a lambda expression + // - an unparenthesized tuple. + x := p.parseTestPrec(0) + clauses = append(clauses, &ForClause{For: pos, Vars: vars, In: in, X: x}) + } else if p.tok == IF { + pos := p.nextToken() + cond := p.parseTestNoCond() + clauses = append(clauses, &IfClause{If: pos, Cond: cond}) + } else { + p.in.errorf(p.in.pos, "got %#v, want '%s', for, or if", p.tok, endBrace) + } + } + rbrace := p.nextToken() + + return &Comprehension{ + Curly: endBrace == RBRACE, + Lbrack: lbrace, + Body: body, + Clauses: clauses, + Rbrack: rbrace, + } +} + +func terminatesExprList(tok Token) bool { + switch tok { + case EOF, NEWLINE, EQ, RBRACE, RBRACK, RPAREN, SEMI: + return true + } + return false +} + +// Comment assignment. +// We build two lists of all subnodes, preorder and postorder. +// The preorder list is ordered by start location, with outer nodes first. +// The postorder list is ordered by end location, with outer nodes last. +// We use the preorder list to assign each whole-line comment to the syntax +// immediately following it, and we use the postorder list to assign each +// end-of-line comment to the syntax immediately preceding it. + +// flattenAST returns the list of AST nodes, both in prefix order and in postfix +// order. +func flattenAST(root Node) (pre, post []Node) { + stack := []Node{} + Walk(root, func(n Node) bool { + if n != nil { + pre = append(pre, n) + stack = append(stack, n) + } else { + post = append(post, stack[len(stack)-1]) + stack = stack[:len(stack)-1] + } + return true + }) + return pre, post +} + +// assignComments attaches comments to nearby syntax. +func (p *parser) assignComments(n Node) { + // Leave early if there are no comments + if len(p.in.lineComments)+len(p.in.suffixComments) == 0 { + return + } + + pre, post := flattenAST(n) + + // Assign line comments to syntax immediately following. + line := p.in.lineComments + for _, x := range pre { + start, _ := x.Span() + + switch x.(type) { + case *File: + continue + } + + for len(line) > 0 && !start.isBefore(line[0].Start) { + x.AllocComments() + x.Comments().Before = append(x.Comments().Before, line[0]) + line = line[1:] + } + } + + // Remaining line comments go at end of file. + if len(line) > 0 { + n.AllocComments() + n.Comments().After = append(n.Comments().After, line...) + } + + // Assign suffix comments to syntax immediately before. + suffix := p.in.suffixComments + for i := len(post) - 1; i >= 0; i-- { + x := post[i] + + // Do not assign suffix comments to file + switch x.(type) { + case *File: + continue + } + + _, end := x.Span() + if len(suffix) > 0 && end.isBefore(suffix[len(suffix)-1].Start) { + x.AllocComments() + x.Comments().Suffix = append(x.Comments().Suffix, suffix[len(suffix)-1]) + suffix = suffix[:len(suffix)-1] + } + } +} diff --git a/vendor/go.starlark.net/syntax/quote.go b/vendor/go.starlark.net/syntax/quote.go new file mode 100644 index 000000000..741e106ad --- /dev/null +++ b/vendor/go.starlark.net/syntax/quote.go @@ -0,0 +1,309 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +// Starlark quoted string utilities. + +import ( + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// unesc maps single-letter chars following \ to their actual values. +var unesc = [256]byte{ + 'a': '\a', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', + 'v': '\v', + '\\': '\\', + '\'': '\'', + '"': '"', +} + +// esc maps escape-worthy bytes to the char that should follow \. +var esc = [256]byte{ + '\a': 'a', + '\b': 'b', + '\f': 'f', + '\n': 'n', + '\r': 'r', + '\t': 't', + '\v': 'v', + '\\': '\\', + '\'': '\'', + '"': '"', +} + +// unquote unquotes the quoted string, returning the actual +// string value, whether the original was triple-quoted, +// whether it was a byte string, and an error describing invalid input. +func unquote(quoted string) (s string, triple, isByte bool, err error) { + // Check for raw prefix: means don't interpret the inner \. + raw := false + if strings.HasPrefix(quoted, "r") { + raw = true + quoted = quoted[1:] + } + // Check for bytes prefix. + if strings.HasPrefix(quoted, "b") { + isByte = true + quoted = quoted[1:] + } + + if len(quoted) < 2 { + err = fmt.Errorf("string literal too short") + return + } + + if quoted[0] != '"' && quoted[0] != '\'' || quoted[0] != quoted[len(quoted)-1] { + err = fmt.Errorf("string literal has invalid quotes") + return + } + + // Check for triple quoted string. + quote := quoted[0] + if len(quoted) >= 6 && quoted[1] == quote && quoted[2] == quote && quoted[:3] == quoted[len(quoted)-3:] { + triple = true + quoted = quoted[3 : len(quoted)-3] + } else { + quoted = quoted[1 : len(quoted)-1] + } + + // Now quoted is the quoted data, but no quotes. + // If we're in raw mode or there are no escapes or + // carriage returns, we're done. + var unquoteChars string + if raw { + unquoteChars = "\r" + } else { + unquoteChars = "\\\r" + } + if !strings.ContainsAny(quoted, unquoteChars) { + s = quoted + return + } + + // Otherwise process quoted string. + // Each iteration processes one escape sequence along with the + // plain text leading up to it. + buf := new(strings.Builder) + for { + // Remove prefix before escape sequence. + i := strings.IndexAny(quoted, unquoteChars) + if i < 0 { + i = len(quoted) + } + buf.WriteString(quoted[:i]) + quoted = quoted[i:] + + if len(quoted) == 0 { + break + } + + // Process carriage return. + if quoted[0] == '\r' { + buf.WriteByte('\n') + if len(quoted) > 1 && quoted[1] == '\n' { + quoted = quoted[2:] + } else { + quoted = quoted[1:] + } + continue + } + + // Process escape sequence. + if len(quoted) == 1 { + err = fmt.Errorf(`truncated escape sequence \`) + return + } + + switch quoted[1] { + default: + // In Starlark, like Go, a backslash must escape something. + // (Python still treats unnecessary backslashes literally, + // but since 3.6 has emitted a deprecation warning.) + err = fmt.Errorf("invalid escape sequence \\%c", quoted[1]) + return + + case '\n': + // Ignore the escape and the line break. + quoted = quoted[2:] + + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '\'', '"': + // One-char escape. + // Escapes are allowed for both kinds of quotation + // mark, not just the kind in use. + buf.WriteByte(unesc[quoted[1]]) + quoted = quoted[2:] + + case '0', '1', '2', '3', '4', '5', '6', '7': + // Octal escape, up to 3 digits, \OOO. + n := int(quoted[1] - '0') + quoted = quoted[2:] + for i := 1; i < 3; i++ { + if len(quoted) == 0 || quoted[0] < '0' || '7' < quoted[0] { + break + } + n = n*8 + int(quoted[0]-'0') + quoted = quoted[1:] + } + if !isByte && n > 127 { + err = fmt.Errorf(`non-ASCII octal escape \%o (use \u%04X for the UTF-8 encoding of U+%04X)`, n, n, n) + return + } + if n >= 256 { + // NOTE: Python silently discards the high bit, + // so that '\541' == '\141' == 'a'. + // Let's see if we can avoid doing that in BUILD files. + err = fmt.Errorf(`invalid escape sequence \%03o`, n) + return + } + buf.WriteByte(byte(n)) + + case 'x': + // Hexadecimal escape, exactly 2 digits, \xXX. [0-127] + if len(quoted) < 4 { + err = fmt.Errorf(`truncated escape sequence %s`, quoted) + return + } + n, err1 := strconv.ParseUint(quoted[2:4], 16, 0) + if err1 != nil { + err = fmt.Errorf(`invalid escape sequence %s`, quoted[:4]) + return + } + if !isByte && n > 127 { + err = fmt.Errorf(`non-ASCII hex escape %s (use \u%04X for the UTF-8 encoding of U+%04X)`, + quoted[:4], n, n) + return + } + buf.WriteByte(byte(n)) + quoted = quoted[4:] + + case 'u', 'U': + // Unicode code point, 4 (\uXXXX) or 8 (\UXXXXXXXX) hex digits. + sz := 6 + if quoted[1] == 'U' { + sz = 10 + } + if len(quoted) < sz { + err = fmt.Errorf(`truncated escape sequence %s`, quoted) + return + } + n, err1 := strconv.ParseUint(quoted[2:sz], 16, 0) + if err1 != nil { + err = fmt.Errorf(`invalid escape sequence %s`, quoted[:sz]) + return + } + if n > unicode.MaxRune { + err = fmt.Errorf(`code point out of range: %s (max \U%08x)`, + quoted[:sz], n) + return + } + // As in Go, surrogates are disallowed. + if 0xD800 <= n && n < 0xE000 { + err = fmt.Errorf(`invalid Unicode code point U+%04X`, n) + return + } + buf.WriteRune(rune(n)) + quoted = quoted[sz:] + } + } + + s = buf.String() + return +} + +// indexByte returns the index of the first instance of b in s, or else -1. +func indexByte(s string, b byte) int { + for i := 0; i < len(s); i++ { + if s[i] == b { + return i + } + } + return -1 +} + +// Quote returns a Starlark literal that denotes s. +// If b, it returns a bytes literal. +func Quote(s string, b bool) string { + const hex = "0123456789abcdef" + var runeTmp [utf8.UTFMax]byte + + buf := make([]byte, 0, 3*len(s)/2) + if b { + buf = append(buf, 'b') + } + buf = append(buf, '"') + for width := 0; len(s) > 0; s = s[width:] { + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeRuneInString(s) + } + if width == 1 && r == utf8.RuneError { + // String (!b) literals accept \xXX escapes only for ASCII, + // but we must use them here to represent invalid bytes. + // The result is not a legal literal. + buf = append(buf, `\x`...) + buf = append(buf, hex[s[0]>>4]) + buf = append(buf, hex[s[0]&0xF]) + continue + } + if r == '"' || r == '\\' { // always backslashed + buf = append(buf, '\\') + buf = append(buf, byte(r)) + continue + } + if strconv.IsPrint(r) { + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + continue + } + switch r { + case '\a': + buf = append(buf, `\a`...) + case '\b': + buf = append(buf, `\b`...) + case '\f': + buf = append(buf, `\f`...) + case '\n': + buf = append(buf, `\n`...) + case '\r': + buf = append(buf, `\r`...) + case '\t': + buf = append(buf, `\t`...) + case '\v': + buf = append(buf, `\v`...) + default: + switch { + case r < ' ' || r == 0x7f: + buf = append(buf, `\x`...) + buf = append(buf, hex[byte(r)>>4]) + buf = append(buf, hex[byte(r)&0xF]) + case r > utf8.MaxRune: + r = 0xFFFD + fallthrough + case r < 0x10000: + buf = append(buf, `\u`...) + for s := 12; s >= 0; s -= 4 { + buf = append(buf, hex[r>>uint(s)&0xF]) + } + default: + buf = append(buf, `\U`...) + for s := 28; s >= 0; s -= 4 { + buf = append(buf, hex[r>>uint(s)&0xF]) + } + } + } + } + buf = append(buf, '"') + return string(buf) +} diff --git a/vendor/go.starlark.net/syntax/scan.go b/vendor/go.starlark.net/syntax/scan.go new file mode 100644 index 000000000..bb4165e9d --- /dev/null +++ b/vendor/go.starlark.net/syntax/scan.go @@ -0,0 +1,1123 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +// A lexical scanner for Starlark. + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "math/big" + "os" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Token represents a Starlark lexical token. +type Token int8 + +const ( + ILLEGAL Token = iota + EOF + + NEWLINE + INDENT + OUTDENT + + // Tokens with values + IDENT // x + INT // 123 + FLOAT // 1.23e45 + STRING // "foo" or 'foo' or '''foo''' or r'foo' or r"foo" + BYTES // b"foo", etc + + // Punctuation + PLUS // + + MINUS // - + STAR // * + SLASH // / + SLASHSLASH // // + PERCENT // % + AMP // & + PIPE // | + CIRCUMFLEX // ^ + LTLT // << + GTGT // >> + TILDE // ~ + DOT // . + COMMA // , + EQ // = + SEMI // ; + COLON // : + LPAREN // ( + RPAREN // ) + LBRACK // [ + RBRACK // ] + LBRACE // { + RBRACE // } + LT // < + GT // > + GE // >= + LE // <= + EQL // == + NEQ // != + PLUS_EQ // += (keep order consistent with PLUS..GTGT) + MINUS_EQ // -= + STAR_EQ // *= + SLASH_EQ // /= + SLASHSLASH_EQ // //= + PERCENT_EQ // %= + AMP_EQ // &= + PIPE_EQ // |= + CIRCUMFLEX_EQ // ^= + LTLT_EQ // <<= + GTGT_EQ // >>= + STARSTAR // ** + + // Keywords + AND + BREAK + CONTINUE + DEF + ELIF + ELSE + FOR + IF + IN + LAMBDA + LOAD + NOT + NOT_IN // synthesized by parser from NOT IN + OR + PASS + RETURN + WHILE + + maxToken +) + +func (tok Token) String() string { return tokenNames[tok] } + +// GoString is like String but quotes punctuation tokens. +// Use Sprintf("%#v", tok) when constructing error messages. +func (tok Token) GoString() string { + if tok >= PLUS && tok <= STARSTAR { + return "'" + tokenNames[tok] + "'" + } + return tokenNames[tok] +} + +var tokenNames = [...]string{ + ILLEGAL: "illegal token", + EOF: "end of file", + NEWLINE: "newline", + INDENT: "indent", + OUTDENT: "outdent", + IDENT: "identifier", + INT: "int literal", + FLOAT: "float literal", + STRING: "string literal", + PLUS: "+", + MINUS: "-", + STAR: "*", + SLASH: "/", + SLASHSLASH: "//", + PERCENT: "%", + AMP: "&", + PIPE: "|", + CIRCUMFLEX: "^", + LTLT: "<<", + GTGT: ">>", + TILDE: "~", + DOT: ".", + COMMA: ",", + EQ: "=", + SEMI: ";", + COLON: ":", + LPAREN: "(", + RPAREN: ")", + LBRACK: "[", + RBRACK: "]", + LBRACE: "{", + RBRACE: "}", + LT: "<", + GT: ">", + GE: ">=", + LE: "<=", + EQL: "==", + NEQ: "!=", + PLUS_EQ: "+=", + MINUS_EQ: "-=", + STAR_EQ: "*=", + SLASH_EQ: "/=", + SLASHSLASH_EQ: "//=", + PERCENT_EQ: "%=", + AMP_EQ: "&=", + PIPE_EQ: "|=", + CIRCUMFLEX_EQ: "^=", + LTLT_EQ: "<<=", + GTGT_EQ: ">>=", + STARSTAR: "**", + AND: "and", + BREAK: "break", + CONTINUE: "continue", + DEF: "def", + ELIF: "elif", + ELSE: "else", + FOR: "for", + IF: "if", + IN: "in", + LAMBDA: "lambda", + LOAD: "load", + NOT: "not", + NOT_IN: "not in", + OR: "or", + PASS: "pass", + RETURN: "return", + WHILE: "while", +} + +// A FilePortion describes the content of a portion of a file. +// Callers may provide a FilePortion for the src argument of Parse +// when the desired initial line and column numbers are not (1, 1), +// such as when an expression is parsed from within larger file. +type FilePortion struct { + Content []byte + FirstLine, FirstCol int32 +} + +// A Position describes the location of a rune of input. +type Position struct { + file *string // filename (indirect for compactness) + Line int32 // 1-based line number; 0 if line unknown + Col int32 // 1-based column (rune) number; 0 if column unknown +} + +// IsValid reports whether the position is valid. +func (p Position) IsValid() bool { return p.file != nil } + +// Filename returns the name of the file containing this position. +func (p Position) Filename() string { + if p.file != nil { + return *p.file + } + return "" +} + +// MakePosition returns position with the specified components. +func MakePosition(file *string, line, col int32) Position { return Position{file, line, col} } + +// add returns the position at the end of s, assuming it starts at p. +func (p Position) add(s string) Position { + if n := strings.Count(s, "\n"); n > 0 { + p.Line += int32(n) + s = s[strings.LastIndex(s, "\n")+1:] + p.Col = 1 + } + p.Col += int32(utf8.RuneCountInString(s)) + return p +} + +func (p Position) String() string { + file := p.Filename() + if p.Line > 0 { + if p.Col > 0 { + return fmt.Sprintf("%s:%d:%d", file, p.Line, p.Col) + } + return fmt.Sprintf("%s:%d", file, p.Line) + } + return file +} + +func (p Position) isBefore(q Position) bool { + if p.Line != q.Line { + return p.Line < q.Line + } + return p.Col < q.Col +} + +// An scanner represents a single input file being parsed. +type scanner struct { + rest []byte // rest of input (in REPL, a line of input) + token []byte // token being scanned + pos Position // current input position + depth int // nesting of [ ] { } ( ) + indentstk []int // stack of indentation levels + dents int // number of saved INDENT (>0) or OUTDENT (<0) tokens to return + lineStart bool // after NEWLINE; convert spaces to indentation tokens + keepComments bool // accumulate comments in slice + lineComments []Comment // list of full line comments (if keepComments) + suffixComments []Comment // list of suffix comments (if keepComments) + + readline func() ([]byte, error) // read next line of input (REPL only) +} + +func newScanner(filename string, src interface{}, keepComments bool) (*scanner, error) { + var firstLine, firstCol int32 = 1, 1 + if portion, ok := src.(FilePortion); ok { + firstLine, firstCol = portion.FirstLine, portion.FirstCol + } + sc := &scanner{ + pos: MakePosition(&filename, firstLine, firstCol), + indentstk: make([]int, 1, 10), // []int{0} + spare capacity + lineStart: true, + keepComments: keepComments, + } + sc.readline, _ = src.(func() ([]byte, error)) // ParseCompoundStmt (REPL) only + if sc.readline == nil { + data, err := readSource(filename, src) + if err != nil { + return nil, err + } + sc.rest = data + } + return sc, nil +} + +func readSource(filename string, src interface{}) ([]byte, error) { + switch src := src.(type) { + case string: + return []byte(src), nil + case []byte: + return src, nil + case io.Reader: + data, err := ioutil.ReadAll(src) + if err != nil { + err = &os.PathError{Op: "read", Path: filename, Err: err} + return nil, err + } + return data, nil + case FilePortion: + return src.Content, nil + case nil: + return ioutil.ReadFile(filename) + default: + return nil, fmt.Errorf("invalid source: %T", src) + } +} + +// An Error describes the nature and position of a scanner or parser error. +type Error struct { + Pos Position + Msg string +} + +func (e Error) Error() string { return e.Pos.String() + ": " + e.Msg } + +// errorf is called to report an error. +// errorf does not return: it panics. +func (sc *scanner) error(pos Position, s string) { + panic(Error{pos, s}) +} + +func (sc *scanner) errorf(pos Position, format string, args ...interface{}) { + sc.error(pos, fmt.Sprintf(format, args...)) +} + +func (sc *scanner) recover(err *error) { + // The scanner and parser panic both for routine errors like + // syntax errors and for programmer bugs like array index + // errors. Turn both into error returns. Catching bug panics + // is especially important when processing many files. + switch e := recover().(type) { + case nil: + // no panic + case Error: + *err = e + default: + *err = Error{sc.pos, fmt.Sprintf("internal error: %v", e)} + if debug { + log.Fatal(*err) + } + } +} + +// eof reports whether the input has reached end of file. +func (sc *scanner) eof() bool { + return len(sc.rest) == 0 && !sc.readLine() +} + +// readLine attempts to read another line of input. +// Precondition: len(sc.rest)==0. +func (sc *scanner) readLine() bool { + if sc.readline != nil { + var err error + sc.rest, err = sc.readline() + if err != nil { + sc.errorf(sc.pos, "%v", err) // EOF or ErrInterrupt + } + return len(sc.rest) > 0 + } + return false +} + +// peekRune returns the next rune in the input without consuming it. +// Newlines in Unix, DOS, or Mac format are treated as one rune, '\n'. +func (sc *scanner) peekRune() rune { + // TODO(adonovan): opt: measure and perhaps inline eof. + if sc.eof() { + return 0 + } + + // fast path: ASCII + if b := sc.rest[0]; b < utf8.RuneSelf { + if b == '\r' { + return '\n' + } + return rune(b) + } + + r, _ := utf8.DecodeRune(sc.rest) + return r +} + +// readRune consumes and returns the next rune in the input. +// Newlines in Unix, DOS, or Mac format are treated as one rune, '\n'. +func (sc *scanner) readRune() rune { + // eof() has been inlined here, both to avoid a call + // and to establish len(rest)>0 to avoid a bounds check. + if len(sc.rest) == 0 { + if !sc.readLine() { + sc.error(sc.pos, "internal scanner error: readRune at EOF") + } + // Redundant, but eliminates the bounds-check below. + if len(sc.rest) == 0 { + return 0 + } + } + + // fast path: ASCII + if b := sc.rest[0]; b < utf8.RuneSelf { + r := rune(b) + sc.rest = sc.rest[1:] + if r == '\r' { + if len(sc.rest) > 0 && sc.rest[0] == '\n' { + sc.rest = sc.rest[1:] + } + r = '\n' + } + if r == '\n' { + sc.pos.Line++ + sc.pos.Col = 1 + } else { + sc.pos.Col++ + } + return r + } + + r, size := utf8.DecodeRune(sc.rest) + sc.rest = sc.rest[size:] + sc.pos.Col++ + return r +} + +// tokenValue records the position and value associated with each token. +type tokenValue struct { + raw string // raw text of token + int int64 // decoded int + bigInt *big.Int // decoded integers > int64 + float float64 // decoded float + string string // decoded string or bytes + pos Position // start position of token +} + +// startToken marks the beginning of the next input token. +// It must be followed by a call to endToken once the token has +// been consumed using readRune. +func (sc *scanner) startToken(val *tokenValue) { + sc.token = sc.rest + val.raw = "" + val.pos = sc.pos +} + +// endToken marks the end of an input token. +// It records the actual token string in val.raw if the caller +// has not done that already. +func (sc *scanner) endToken(val *tokenValue) { + if val.raw == "" { + val.raw = string(sc.token[:len(sc.token)-len(sc.rest)]) + } +} + +// nextToken is called by the parser to obtain the next input token. +// It returns the token value and sets val to the data associated with +// the token. +// +// For all our input tokens, the associated data is val.pos (the +// position where the token begins), val.raw (the input string +// corresponding to the token). For string and int tokens, the string +// and int fields additionally contain the token's interpreted value. +func (sc *scanner) nextToken(val *tokenValue) Token { + + // The following distribution of tokens guides case ordering: + // + // COMMA 27 % + // STRING 23 % + // IDENT 15 % + // EQL 11 % + // LBRACK 5.5 % + // RBRACK 5.5 % + // NEWLINE 3 % + // LPAREN 2.9 % + // RPAREN 2.9 % + // INT 2 % + // others < 1 % + // + // Although NEWLINE tokens are infrequent, and lineStart is + // usually (~97%) false on entry, skipped newlines account for + // about 50% of all iterations of the 'start' loop. + +start: + var c rune + + // Deal with leading spaces and indentation. + blank := false + savedLineStart := sc.lineStart + if sc.lineStart { + sc.lineStart = false + col := 0 + for { + c = sc.peekRune() + if c == ' ' { + col++ + sc.readRune() + } else if c == '\t' { + const tab = 8 + col += int(tab - (sc.pos.Col-1)%tab) + sc.readRune() + } else { + break + } + } + + // The third clause matches EOF. + if c == '#' || c == '\n' || c == 0 { + blank = true + } + + // Compute indentation level for non-blank lines not + // inside an expression. This is not the common case. + if !blank && sc.depth == 0 { + cur := sc.indentstk[len(sc.indentstk)-1] + if col > cur { + // indent + sc.dents++ + sc.indentstk = append(sc.indentstk, col) + } else if col < cur { + // outdent(s) + for len(sc.indentstk) > 0 && col < sc.indentstk[len(sc.indentstk)-1] { + sc.dents-- + sc.indentstk = sc.indentstk[:len(sc.indentstk)-1] // pop + } + if col != sc.indentstk[len(sc.indentstk)-1] { + sc.error(sc.pos, "unindent does not match any outer indentation level") + } + } + } + } + + // Return saved indentation tokens. + if sc.dents != 0 { + sc.startToken(val) + sc.endToken(val) + if sc.dents < 0 { + sc.dents++ + return OUTDENT + } else { + sc.dents-- + return INDENT + } + } + + // start of line proper + c = sc.peekRune() + + // Skip spaces. + for c == ' ' || c == '\t' { + sc.readRune() + c = sc.peekRune() + } + + // comment + if c == '#' { + if sc.keepComments { + sc.startToken(val) + } + // Consume up to newline (included). + for c != 0 && c != '\n' { + sc.readRune() + c = sc.peekRune() + } + if sc.keepComments { + sc.endToken(val) + if blank { + sc.lineComments = append(sc.lineComments, Comment{val.pos, val.raw}) + } else { + sc.suffixComments = append(sc.suffixComments, Comment{val.pos, val.raw}) + } + } + } + + // newline + if c == '\n' { + sc.lineStart = true + + // Ignore newlines within expressions (common case). + if sc.depth > 0 { + sc.readRune() + goto start + } + + // Ignore blank lines, except in the REPL, + // where they emit OUTDENTs and NEWLINE. + if blank { + if sc.readline == nil { + sc.readRune() + goto start + } else if len(sc.indentstk) > 1 { + sc.dents = 1 - len(sc.indentstk) + sc.indentstk = sc.indentstk[:1] + goto start + } + } + + // At top-level (not in an expression). + sc.startToken(val) + sc.readRune() + val.raw = "\n" + return NEWLINE + } + + // end of file + if c == 0 { + // Emit OUTDENTs for unfinished indentation, + // preceded by a NEWLINE if we haven't just emitted one. + if len(sc.indentstk) > 1 { + if savedLineStart { + sc.dents = 1 - len(sc.indentstk) + sc.indentstk = sc.indentstk[:1] + goto start + } else { + sc.lineStart = true + sc.startToken(val) + val.raw = "\n" + return NEWLINE + } + } + + sc.startToken(val) + sc.endToken(val) + return EOF + } + + // line continuation + if c == '\\' { + sc.readRune() + if sc.peekRune() != '\n' { + sc.errorf(sc.pos, "stray backslash in program") + } + sc.readRune() + goto start + } + + // start of the next token + sc.startToken(val) + + // comma (common case) + if c == ',' { + sc.readRune() + sc.endToken(val) + return COMMA + } + + // string literal + if c == '"' || c == '\'' { + return sc.scanString(val, c) + } + + // identifier or keyword + if isIdentStart(c) { + if (c == 'r' || c == 'b') && len(sc.rest) > 1 && (sc.rest[1] == '"' || sc.rest[1] == '\'') { + // r"..." + // b"..." + sc.readRune() + c = sc.peekRune() + return sc.scanString(val, c) + } else if c == 'r' && len(sc.rest) > 2 && sc.rest[1] == 'b' && (sc.rest[2] == '"' || sc.rest[2] == '\'') { + // rb"..." + sc.readRune() + sc.readRune() + c = sc.peekRune() + return sc.scanString(val, c) + } + + for isIdent(c) { + sc.readRune() + c = sc.peekRune() + } + sc.endToken(val) + if k, ok := keywordToken[val.raw]; ok { + return k + } + + return IDENT + } + + // brackets + switch c { + case '[', '(', '{': + sc.depth++ + sc.readRune() + sc.endToken(val) + switch c { + case '[': + return LBRACK + case '(': + return LPAREN + case '{': + return LBRACE + } + panic("unreachable") + + case ']', ')', '}': + if sc.depth == 0 { + sc.errorf(sc.pos, "unexpected %q", c) + } else { + sc.depth-- + } + sc.readRune() + sc.endToken(val) + switch c { + case ']': + return RBRACK + case ')': + return RPAREN + case '}': + return RBRACE + } + panic("unreachable") + } + + // int or float literal, or period + if isdigit(c) || c == '.' { + return sc.scanNumber(val, c) + } + + // other punctuation + defer sc.endToken(val) + switch c { + case '=', '<', '>', '!', '+', '-', '%', '/', '&', '|', '^': // possibly followed by '=' + start := sc.pos + sc.readRune() + if sc.peekRune() == '=' { + sc.readRune() + switch c { + case '<': + return LE + case '>': + return GE + case '=': + return EQL + case '!': + return NEQ + case '+': + return PLUS_EQ + case '-': + return MINUS_EQ + case '/': + return SLASH_EQ + case '%': + return PERCENT_EQ + case '&': + return AMP_EQ + case '|': + return PIPE_EQ + case '^': + return CIRCUMFLEX_EQ + } + } + switch c { + case '=': + return EQ + case '<': + if sc.peekRune() == '<' { + sc.readRune() + if sc.peekRune() == '=' { + sc.readRune() + return LTLT_EQ + } else { + return LTLT + } + } + return LT + case '>': + if sc.peekRune() == '>' { + sc.readRune() + if sc.peekRune() == '=' { + sc.readRune() + return GTGT_EQ + } else { + return GTGT + } + } + return GT + case '!': + sc.error(start, "unexpected input character '!'") + case '+': + return PLUS + case '-': + return MINUS + case '/': + if sc.peekRune() == '/' { + sc.readRune() + if sc.peekRune() == '=' { + sc.readRune() + return SLASHSLASH_EQ + } else { + return SLASHSLASH + } + } + return SLASH + case '%': + return PERCENT + case '&': + return AMP + case '|': + return PIPE + case '^': + return CIRCUMFLEX + } + panic("unreachable") + + case ':', ';', '~': // single-char tokens (except comma) + sc.readRune() + switch c { + case ':': + return COLON + case ';': + return SEMI + case '~': + return TILDE + } + panic("unreachable") + + case '*': // possibly followed by '*' or '=' + sc.readRune() + switch sc.peekRune() { + case '*': + sc.readRune() + return STARSTAR + case '=': + sc.readRune() + return STAR_EQ + } + return STAR + } + + sc.errorf(sc.pos, "unexpected input character %#q", c) + panic("unreachable") +} + +func (sc *scanner) scanString(val *tokenValue, quote rune) Token { + start := sc.pos + triple := len(sc.rest) >= 3 && sc.rest[0] == byte(quote) && sc.rest[1] == byte(quote) && sc.rest[2] == byte(quote) + sc.readRune() + + // String literals may contain escaped or unescaped newlines, + // causing them to span multiple lines (gulps) of REPL input; + // they are the only such token. Thus we cannot call endToken, + // as it assumes sc.rest is unchanged since startToken. + // Instead, buffer the token here. + // TODO(adonovan): opt: buffer only if we encounter a newline. + raw := new(strings.Builder) + + // Copy the prefix, e.g. r' or " (see startToken). + raw.Write(sc.token[:len(sc.token)-len(sc.rest)]) + + if !triple { + // single-quoted string literal + for { + if sc.eof() { + sc.error(val.pos, "unexpected EOF in string") + } + c := sc.readRune() + raw.WriteRune(c) + if c == quote { + break + } + if c == '\n' { + sc.error(val.pos, "unexpected newline in string") + } + if c == '\\' { + if sc.eof() { + sc.error(val.pos, "unexpected EOF in string") + } + c = sc.readRune() + raw.WriteRune(c) + } + } + } else { + // triple-quoted string literal + sc.readRune() + raw.WriteRune(quote) + sc.readRune() + raw.WriteRune(quote) + + quoteCount := 0 + for { + if sc.eof() { + sc.error(val.pos, "unexpected EOF in string") + } + c := sc.readRune() + raw.WriteRune(c) + if c == quote { + quoteCount++ + if quoteCount == 3 { + break + } + } else { + quoteCount = 0 + } + if c == '\\' { + if sc.eof() { + sc.error(val.pos, "unexpected EOF in string") + } + c = sc.readRune() + raw.WriteRune(c) + } + } + } + val.raw = raw.String() + + s, _, isByte, err := unquote(val.raw) + if err != nil { + sc.error(start, err.Error()) + } + val.string = s + if isByte { + return BYTES + } else { + return STRING + } +} + +func (sc *scanner) scanNumber(val *tokenValue, c rune) Token { + // https://github.com/google/starlark-go/blob/master/doc/spec.md#lexical-elements + // + // Python features not supported: + // - integer literals of >64 bits of precision + // - 123L or 123l long suffix + // - traditional octal: 0755 + // https://docs.python.org/2/reference/lexical_analysis.html#integer-and-long-integer-literals + + start := sc.pos + fraction, exponent := false, false + + if c == '.' { + // dot or start of fraction + sc.readRune() + c = sc.peekRune() + if !isdigit(c) { + sc.endToken(val) + return DOT + } + fraction = true + } else if c == '0' { + // hex, octal, binary or float + sc.readRune() + c = sc.peekRune() + + if c == '.' { + fraction = true + } else if c == 'x' || c == 'X' { + // hex + sc.readRune() + c = sc.peekRune() + if !isxdigit(c) { + sc.error(start, "invalid hex literal") + } + for isxdigit(c) { + sc.readRune() + c = sc.peekRune() + } + } else if c == 'o' || c == 'O' { + // octal + sc.readRune() + c = sc.peekRune() + if !isodigit(c) { + sc.error(sc.pos, "invalid octal literal") + } + for isodigit(c) { + sc.readRune() + c = sc.peekRune() + } + } else if c == 'b' || c == 'B' { + // binary + sc.readRune() + c = sc.peekRune() + if !isbdigit(c) { + sc.error(sc.pos, "invalid binary literal") + } + for isbdigit(c) { + sc.readRune() + c = sc.peekRune() + } + } else { + // float (or obsolete octal "0755") + allzeros, octal := true, true + for isdigit(c) { + if c != '0' { + allzeros = false + } + if c > '7' { + octal = false + } + sc.readRune() + c = sc.peekRune() + } + if c == '.' { + fraction = true + } else if c == 'e' || c == 'E' { + exponent = true + } else if octal && !allzeros { + sc.endToken(val) + sc.errorf(sc.pos, "obsolete form of octal literal; use 0o%s", val.raw[1:]) + } + } + } else { + // decimal + for isdigit(c) { + sc.readRune() + c = sc.peekRune() + } + + if c == '.' { + fraction = true + } else if c == 'e' || c == 'E' { + exponent = true + } + } + + if fraction { + sc.readRune() // consume '.' + c = sc.peekRune() + for isdigit(c) { + sc.readRune() + c = sc.peekRune() + } + + if c == 'e' || c == 'E' { + exponent = true + } + } + + if exponent { + sc.readRune() // consume [eE] + c = sc.peekRune() + if c == '+' || c == '-' { + sc.readRune() + c = sc.peekRune() + if !isdigit(c) { + sc.error(sc.pos, "invalid float literal") + } + } + for isdigit(c) { + sc.readRune() + c = sc.peekRune() + } + } + + sc.endToken(val) + if fraction || exponent { + var err error + val.float, err = strconv.ParseFloat(val.raw, 64) + if err != nil { + sc.error(sc.pos, "invalid float literal") + } + return FLOAT + } else { + var err error + s := val.raw + val.bigInt = nil + if len(s) > 2 && s[0] == '0' && (s[1] == 'o' || s[1] == 'O') { + val.int, err = strconv.ParseInt(s[2:], 8, 64) + } else if len(s) > 2 && s[0] == '0' && (s[1] == 'b' || s[1] == 'B') { + val.int, err = strconv.ParseInt(s[2:], 2, 64) + } else { + val.int, err = strconv.ParseInt(s, 0, 64) + if err != nil { + num := new(big.Int) + var ok bool + val.bigInt, ok = num.SetString(s, 0) + if ok { + err = nil + } + } + } + if err != nil { + sc.error(start, "invalid int literal") + } + return INT + } +} + +// isIdent reports whether c is an identifier rune. +func isIdent(c rune) bool { + return isdigit(c) || isIdentStart(c) +} + +func isIdentStart(c rune) bool { + return 'a' <= c && c <= 'z' || + 'A' <= c && c <= 'Z' || + c == '_' || + unicode.IsLetter(c) +} + +func isdigit(c rune) bool { return '0' <= c && c <= '9' } +func isodigit(c rune) bool { return '0' <= c && c <= '7' } +func isxdigit(c rune) bool { return isdigit(c) || 'A' <= c && c <= 'F' || 'a' <= c && c <= 'f' } +func isbdigit(c rune) bool { return '0' == c || c == '1' } + +// keywordToken records the special tokens for +// strings that should not be treated as ordinary identifiers. +var keywordToken = map[string]Token{ + "and": AND, + "break": BREAK, + "continue": CONTINUE, + "def": DEF, + "elif": ELIF, + "else": ELSE, + "for": FOR, + "if": IF, + "in": IN, + "lambda": LAMBDA, + "load": LOAD, + "not": NOT, + "or": OR, + "pass": PASS, + "return": RETURN, + "while": WHILE, + + // reserved words: + "as": ILLEGAL, + // "assert": ILLEGAL, // heavily used by our tests + "class": ILLEGAL, + "del": ILLEGAL, + "except": ILLEGAL, + "finally": ILLEGAL, + "from": ILLEGAL, + "global": ILLEGAL, + "import": ILLEGAL, + "is": ILLEGAL, + "nonlocal": ILLEGAL, + "raise": ILLEGAL, + "try": ILLEGAL, + "with": ILLEGAL, + "yield": ILLEGAL, +} diff --git a/vendor/go.starlark.net/syntax/syntax.go b/vendor/go.starlark.net/syntax/syntax.go new file mode 100644 index 000000000..375663758 --- /dev/null +++ b/vendor/go.starlark.net/syntax/syntax.go @@ -0,0 +1,525 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package syntax provides a Starlark parser and abstract syntax tree. +package syntax // import "go.starlark.net/syntax" + +// A Node is a node in a Starlark syntax tree. +type Node interface { + // Span returns the start and end position of the expression. + Span() (start, end Position) + + // Comments returns the comments associated with this node. + // It returns nil if RetainComments was not specified during parsing, + // or if AllocComments was not called. + Comments() *Comments + + // AllocComments allocates a new Comments node if there was none. + // This makes possible to add new comments using Comments() method. + AllocComments() +} + +// A Comment represents a single # comment. +type Comment struct { + Start Position + Text string // without trailing newline +} + +// Comments collects the comments associated with an expression. +type Comments struct { + Before []Comment // whole-line comments before this expression + Suffix []Comment // end-of-line comments after this expression (up to 1) + + // For top-level expressions only, After lists whole-line + // comments following the expression. + After []Comment +} + +// A commentsRef is a possibly-nil reference to a set of comments. +// A commentsRef is embedded in each type of syntax node, +// and provides its Comments and AllocComments methods. +type commentsRef struct{ ref *Comments } + +// Comments returns the comments associated with a syntax node, +// or nil if AllocComments has not yet been called. +func (cr commentsRef) Comments() *Comments { return cr.ref } + +// AllocComments enables comments to be associated with a syntax node. +func (cr *commentsRef) AllocComments() { + if cr.ref == nil { + cr.ref = new(Comments) + } +} + +// Start returns the start position of the expression. +func Start(n Node) Position { + start, _ := n.Span() + return start +} + +// End returns the end position of the expression. +func End(n Node) Position { + _, end := n.Span() + return end +} + +// A File represents a Starlark file. +type File struct { + commentsRef + Path string + Stmts []Stmt + + Module interface{} // a *resolve.Module, set by resolver +} + +func (x *File) Span() (start, end Position) { + if len(x.Stmts) == 0 { + return + } + start, _ = x.Stmts[0].Span() + _, end = x.Stmts[len(x.Stmts)-1].Span() + return start, end +} + +// A Stmt is a Starlark statement. +type Stmt interface { + Node + stmt() +} + +func (*AssignStmt) stmt() {} +func (*BranchStmt) stmt() {} +func (*DefStmt) stmt() {} +func (*ExprStmt) stmt() {} +func (*ForStmt) stmt() {} +func (*WhileStmt) stmt() {} +func (*IfStmt) stmt() {} +func (*LoadStmt) stmt() {} +func (*ReturnStmt) stmt() {} + +// An AssignStmt represents an assignment: +// x = 0 +// x, y = y, x +// x += 1 +type AssignStmt struct { + commentsRef + OpPos Position + Op Token // = EQ | {PLUS,MINUS,STAR,PERCENT}_EQ + LHS Expr + RHS Expr +} + +func (x *AssignStmt) Span() (start, end Position) { + start, _ = x.LHS.Span() + _, end = x.RHS.Span() + return +} + +// A DefStmt represents a function definition. +type DefStmt struct { + commentsRef + Def Position + Name *Ident + Params []Expr // param = ident | ident=expr | * | *ident | **ident + Body []Stmt + + Function interface{} // a *resolve.Function, set by resolver +} + +func (x *DefStmt) Span() (start, end Position) { + _, end = x.Body[len(x.Body)-1].Span() + return x.Def, end +} + +// An ExprStmt is an expression evaluated for side effects. +type ExprStmt struct { + commentsRef + X Expr +} + +func (x *ExprStmt) Span() (start, end Position) { + return x.X.Span() +} + +// An IfStmt is a conditional: If Cond: True; else: False. +// 'elseif' is desugared into a chain of IfStmts. +type IfStmt struct { + commentsRef + If Position // IF or ELIF + Cond Expr + True []Stmt + ElsePos Position // ELSE or ELIF + False []Stmt // optional +} + +func (x *IfStmt) Span() (start, end Position) { + body := x.False + if body == nil { + body = x.True + } + _, end = body[len(body)-1].Span() + return x.If, end +} + +// A LoadStmt loads another module and binds names from it: +// load(Module, "x", y="foo"). +// +// The AST is slightly unfaithful to the concrete syntax here because +// Starlark's load statement, so that it can be implemented in Python, +// binds some names (like y above) with an identifier and some (like x) +// without. For consistency we create fake identifiers for all the +// strings. +type LoadStmt struct { + commentsRef + Load Position + Module *Literal // a string + From []*Ident // name defined in loading module + To []*Ident // name in loaded module + Rparen Position +} + +func (x *LoadStmt) Span() (start, end Position) { + return x.Load, x.Rparen +} + +// ModuleName returns the name of the module loaded by this statement. +func (x *LoadStmt) ModuleName() string { return x.Module.Value.(string) } + +// A BranchStmt changes the flow of control: break, continue, pass. +type BranchStmt struct { + commentsRef + Token Token // = BREAK | CONTINUE | PASS + TokenPos Position +} + +func (x *BranchStmt) Span() (start, end Position) { + return x.TokenPos, x.TokenPos.add(x.Token.String()) +} + +// A ReturnStmt returns from a function. +type ReturnStmt struct { + commentsRef + Return Position + Result Expr // may be nil +} + +func (x *ReturnStmt) Span() (start, end Position) { + if x.Result == nil { + return x.Return, x.Return.add("return") + } + _, end = x.Result.Span() + return x.Return, end +} + +// An Expr is a Starlark expression. +type Expr interface { + Node + expr() +} + +func (*BinaryExpr) expr() {} +func (*CallExpr) expr() {} +func (*Comprehension) expr() {} +func (*CondExpr) expr() {} +func (*DictEntry) expr() {} +func (*DictExpr) expr() {} +func (*DotExpr) expr() {} +func (*Ident) expr() {} +func (*IndexExpr) expr() {} +func (*LambdaExpr) expr() {} +func (*ListExpr) expr() {} +func (*Literal) expr() {} +func (*ParenExpr) expr() {} +func (*SliceExpr) expr() {} +func (*TupleExpr) expr() {} +func (*UnaryExpr) expr() {} + +// An Ident represents an identifier. +type Ident struct { + commentsRef + NamePos Position + Name string + + Binding interface{} // a *resolver.Binding, set by resolver +} + +func (x *Ident) Span() (start, end Position) { + return x.NamePos, x.NamePos.add(x.Name) +} + +// A Literal represents a literal string or number. +type Literal struct { + commentsRef + Token Token // = STRING | BYTES | INT | FLOAT + TokenPos Position + Raw string // uninterpreted text + Value interface{} // = string | int64 | *big.Int | float64 +} + +func (x *Literal) Span() (start, end Position) { + return x.TokenPos, x.TokenPos.add(x.Raw) +} + +// A ParenExpr represents a parenthesized expression: (X). +type ParenExpr struct { + commentsRef + Lparen Position + X Expr + Rparen Position +} + +func (x *ParenExpr) Span() (start, end Position) { + return x.Lparen, x.Rparen.add(")") +} + +// A CallExpr represents a function call expression: Fn(Args). +type CallExpr struct { + commentsRef + Fn Expr + Lparen Position + Args []Expr // arg = expr | ident=expr | *expr | **expr + Rparen Position +} + +func (x *CallExpr) Span() (start, end Position) { + start, _ = x.Fn.Span() + return start, x.Rparen.add(")") +} + +// A DotExpr represents a field or method selector: X.Name. +type DotExpr struct { + commentsRef + X Expr + Dot Position + NamePos Position + Name *Ident +} + +func (x *DotExpr) Span() (start, end Position) { + start, _ = x.X.Span() + _, end = x.Name.Span() + return +} + +// A Comprehension represents a list or dict comprehension: +// [Body for ... if ...] or {Body for ... if ...} +type Comprehension struct { + commentsRef + Curly bool // {x:y for ...} or {x for ...}, not [x for ...] + Lbrack Position + Body Expr + Clauses []Node // = *ForClause | *IfClause + Rbrack Position +} + +func (x *Comprehension) Span() (start, end Position) { + return x.Lbrack, x.Rbrack.add("]") +} + +// A ForStmt represents a loop: for Vars in X: Body. +type ForStmt struct { + commentsRef + For Position + Vars Expr // name, or tuple of names + X Expr + Body []Stmt +} + +func (x *ForStmt) Span() (start, end Position) { + _, end = x.Body[len(x.Body)-1].Span() + return x.For, end +} + +// A WhileStmt represents a while loop: while X: Body. +type WhileStmt struct { + commentsRef + While Position + Cond Expr + Body []Stmt +} + +func (x *WhileStmt) Span() (start, end Position) { + _, end = x.Body[len(x.Body)-1].Span() + return x.While, end +} + +// A ForClause represents a for clause in a list comprehension: for Vars in X. +type ForClause struct { + commentsRef + For Position + Vars Expr // name, or tuple of names + In Position + X Expr +} + +func (x *ForClause) Span() (start, end Position) { + _, end = x.X.Span() + return x.For, end +} + +// An IfClause represents an if clause in a list comprehension: if Cond. +type IfClause struct { + commentsRef + If Position + Cond Expr +} + +func (x *IfClause) Span() (start, end Position) { + _, end = x.Cond.Span() + return x.If, end +} + +// A DictExpr represents a dictionary literal: { List }. +type DictExpr struct { + commentsRef + Lbrace Position + List []Expr // all *DictEntrys + Rbrace Position +} + +func (x *DictExpr) Span() (start, end Position) { + return x.Lbrace, x.Rbrace.add("}") +} + +// A DictEntry represents a dictionary entry: Key: Value. +// Used only within a DictExpr. +type DictEntry struct { + commentsRef + Key Expr + Colon Position + Value Expr +} + +func (x *DictEntry) Span() (start, end Position) { + start, _ = x.Key.Span() + _, end = x.Value.Span() + return start, end +} + +// A LambdaExpr represents an inline function abstraction. +type LambdaExpr struct { + commentsRef + Lambda Position + Params []Expr // param = ident | ident=expr | * | *ident | **ident + Body Expr + + Function interface{} // a *resolve.Function, set by resolver +} + +func (x *LambdaExpr) Span() (start, end Position) { + _, end = x.Body.Span() + return x.Lambda, end +} + +// A ListExpr represents a list literal: [ List ]. +type ListExpr struct { + commentsRef + Lbrack Position + List []Expr + Rbrack Position +} + +func (x *ListExpr) Span() (start, end Position) { + return x.Lbrack, x.Rbrack.add("]") +} + +// CondExpr represents the conditional: X if COND else ELSE. +type CondExpr struct { + commentsRef + If Position + Cond Expr + True Expr + ElsePos Position + False Expr +} + +func (x *CondExpr) Span() (start, end Position) { + start, _ = x.True.Span() + _, end = x.False.Span() + return start, end +} + +// A TupleExpr represents a tuple literal: (List). +type TupleExpr struct { + commentsRef + Lparen Position // optional (e.g. in x, y = 0, 1), but required if List is empty + List []Expr + Rparen Position +} + +func (x *TupleExpr) Span() (start, end Position) { + if x.Lparen.IsValid() { + return x.Lparen, x.Rparen + } else { + return Start(x.List[0]), End(x.List[len(x.List)-1]) + } +} + +// A UnaryExpr represents a unary expression: Op X. +// +// As a special case, UnaryOp{Op:Star} may also represent +// the star parameter in def f(*args) or def f(*, x). +type UnaryExpr struct { + commentsRef + OpPos Position + Op Token + X Expr // may be nil if Op==STAR +} + +func (x *UnaryExpr) Span() (start, end Position) { + if x.X != nil { + _, end = x.X.Span() + } else { + end = x.OpPos.add("*") + } + return x.OpPos, end +} + +// A BinaryExpr represents a binary expression: X Op Y. +// +// As a special case, BinaryExpr{Op:EQ} may also +// represent a named argument in a call f(k=v) +// or a named parameter in a function declaration +// def f(param=default). +type BinaryExpr struct { + commentsRef + X Expr + OpPos Position + Op Token + Y Expr +} + +func (x *BinaryExpr) Span() (start, end Position) { + start, _ = x.X.Span() + _, end = x.Y.Span() + return start, end +} + +// A SliceExpr represents a slice or substring expression: X[Lo:Hi:Step]. +type SliceExpr struct { + commentsRef + X Expr + Lbrack Position + Lo, Hi, Step Expr // all optional + Rbrack Position +} + +func (x *SliceExpr) Span() (start, end Position) { + start, _ = x.X.Span() + return start, x.Rbrack +} + +// An IndexExpr represents an index expression: X[Y]. +type IndexExpr struct { + commentsRef + X Expr + Lbrack Position + Y Expr + Rbrack Position +} + +func (x *IndexExpr) Span() (start, end Position) { + start, _ = x.X.Span() + return start, x.Rbrack +} diff --git a/vendor/go.starlark.net/syntax/walk.go b/vendor/go.starlark.net/syntax/walk.go new file mode 100644 index 000000000..5e6c805c2 --- /dev/null +++ b/vendor/go.starlark.net/syntax/walk.go @@ -0,0 +1,161 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +// Walk traverses a syntax tree in depth-first order. +// It starts by calling f(n); n must not be nil. +// If f returns true, Walk calls itself +// recursively for each non-nil child of n. +// Walk then calls f(nil). +func Walk(n Node, f func(Node) bool) { + if n == nil { + panic("nil") + } + if !f(n) { + return + } + + // TODO(adonovan): opt: order cases using profile data. + switch n := n.(type) { + case *File: + walkStmts(n.Stmts, f) + + case *ExprStmt: + Walk(n.X, f) + + case *BranchStmt: + // no-op + + case *IfStmt: + Walk(n.Cond, f) + walkStmts(n.True, f) + walkStmts(n.False, f) + + case *AssignStmt: + Walk(n.LHS, f) + Walk(n.RHS, f) + + case *DefStmt: + Walk(n.Name, f) + for _, param := range n.Params { + Walk(param, f) + } + walkStmts(n.Body, f) + + case *ForStmt: + Walk(n.Vars, f) + Walk(n.X, f) + walkStmts(n.Body, f) + + case *ReturnStmt: + if n.Result != nil { + Walk(n.Result, f) + } + + case *LoadStmt: + Walk(n.Module, f) + for _, from := range n.From { + Walk(from, f) + } + for _, to := range n.To { + Walk(to, f) + } + + case *Ident, *Literal: + // no-op + + case *ListExpr: + for _, x := range n.List { + Walk(x, f) + } + + case *ParenExpr: + Walk(n.X, f) + + case *CondExpr: + Walk(n.Cond, f) + Walk(n.True, f) + Walk(n.False, f) + + case *IndexExpr: + Walk(n.X, f) + Walk(n.Y, f) + + case *DictEntry: + Walk(n.Key, f) + Walk(n.Value, f) + + case *SliceExpr: + Walk(n.X, f) + if n.Lo != nil { + Walk(n.Lo, f) + } + if n.Hi != nil { + Walk(n.Hi, f) + } + if n.Step != nil { + Walk(n.Step, f) + } + + case *Comprehension: + Walk(n.Body, f) + for _, clause := range n.Clauses { + Walk(clause, f) + } + + case *IfClause: + Walk(n.Cond, f) + + case *ForClause: + Walk(n.Vars, f) + Walk(n.X, f) + + case *TupleExpr: + for _, x := range n.List { + Walk(x, f) + } + + case *DictExpr: + for _, entry := range n.List { + Walk(entry, f) + } + + case *UnaryExpr: + if n.X != nil { + Walk(n.X, f) + } + + case *BinaryExpr: + Walk(n.X, f) + Walk(n.Y, f) + + case *DotExpr: + Walk(n.X, f) + Walk(n.Name, f) + + case *CallExpr: + Walk(n.Fn, f) + for _, arg := range n.Args { + Walk(arg, f) + } + + case *LambdaExpr: + for _, param := range n.Params { + Walk(param, f) + } + Walk(n.Body, f) + + default: + panic(n) + } + + f(nil) +} + +func walkStmts(stmts []Stmt, f func(Node) bool) { + for _, stmt := range stmts { + Walk(stmt, f) + } +} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go new file mode 100644 index 000000000..016e90215 --- /dev/null +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -0,0 +1,536 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cast5 implements CAST5, as defined in RFC 2144. +// +// CAST5 is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package cast5 + +import ( + "errors" + "math/bits" +) + +const BlockSize = 8 +const KeySize = 16 + +type Cipher struct { + masking [16]uint32 + rotate [16]uint8 +} + +func NewCipher(key []byte) (c *Cipher, err error) { + if len(key) != KeySize { + return nil, errors.New("CAST5: keys must be 16 bytes") + } + + c = new(Cipher) + c.keySchedule(key) + return +} + +func (c *Cipher) BlockSize() int { + return BlockSize +} + +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +type keyScheduleA [4][7]uint8 +type keyScheduleB [4][5]uint8 + +// keyScheduleRound contains the magic values for a round of the key schedule. +// The keyScheduleA deals with the lines like: +// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] +// Conceptually, both x and z are in the same array, x first. The first +// element describes which word of this array gets written to and the +// second, which word gets read. So, for the line above, it's "4, 0", because +// it's writing to the first word of z, which, being after x, is word 4, and +// reading from the first word of x: word 0. +// +// Next are the indexes into the S-boxes. Now the array is treated as bytes. So +// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear +// that it's z that we're indexing. +// +// keyScheduleB deals with lines like: +// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] +// "K1" is ignored because key words are always written in order. So the five +// elements are the S-box indexes. They use the same form as in keyScheduleA, +// above. + +type keyScheduleRound struct{} +type keySchedule []keyScheduleRound + +var schedule = []struct { + a keyScheduleA + b keyScheduleB +}{ + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, + {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, + {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, + {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {3, 2, 0xc, 0xd, 8}, + {1, 0, 0xe, 0xf, 0xd}, + {7, 6, 8, 9, 3}, + {5, 4, 0xa, 0xb, 7}, + }, + }, + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, + {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, + {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, + {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {8, 9, 7, 6, 3}, + {0xa, 0xb, 5, 4, 7}, + {0xc, 0xd, 3, 2, 8}, + {0xe, 0xf, 1, 0, 0xd}, + }, + }, +} + +func (c *Cipher) keySchedule(in []byte) { + var t [8]uint32 + var k [32]uint32 + + for i := 0; i < 4; i++ { + j := i * 4 + t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) + } + + x := []byte{6, 7, 4, 5} + ki := 0 + + for half := 0; half < 2; half++ { + for _, round := range schedule { + for j := 0; j < 4; j++ { + var a [7]uint8 + copy(a[:], round.a[j][:]) + w := t[a[1]] + w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] + w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] + w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] + w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] + w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] + t[a[0]] = w + } + + for j := 0; j < 4; j++ { + var b [5]uint8 + copy(b[:], round.b[j][:]) + w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] + w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] + w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] + w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] + w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] + k[ki] = w + ki++ + } + } + } + + for i := 0; i < 16; i++ { + c.masking[i] = k[i] + c.rotate[i] = uint8(k[16+i] & 0x1f) + } +} + +// These are the three 'f' functions. See RFC 2144, section 2.2. +func f1(d, m uint32, r uint8) uint32 { + t := m + d + I := bits.RotateLeft32(t, int(r)) + return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] +} + +func f2(d, m uint32, r uint8) uint32 { + t := m ^ d + I := bits.RotateLeft32(t, int(r)) + return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] +} + +func f3(d, m uint32, r uint8) uint32 { + t := m - d + I := bits.RotateLeft32(t, int(r)) + return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] +} + +var sBox = [8][256]uint32{ + { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, + }, + { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, + }, + { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, + }, + { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, + }, + { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, + }, + { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, + }, + { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, + }, + { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, + }, +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go new file mode 100644 index 000000000..e664d127c --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -0,0 +1,233 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package armor + +import ( + "bufio" + "bytes" + "encoding/base64" + "io" + + "golang.org/x/crypto/openpgp/errors" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + l.crcSet = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask { + return 0, ArmorCorrupt + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go new file mode 100644 index 000000000..5b6e16c19 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/encode.go @@ -0,0 +1,161 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go new file mode 100644 index 000000000..e601e389f --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/canonical_text.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import "hash" + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go new file mode 100644 index 000000000..cea48efdc --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go @@ -0,0 +1,424 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clearsign generates and processes OpenPGP, clear-signed data. See +// RFC 4880, section 7. +// +// Clearsigned messages are cryptographically signed, but the contents of the +// message are kept in plaintext so that it can be read without special tools. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package clearsign + +import ( + "bufio" + "bytes" + "crypto" + "fmt" + "hash" + "io" + "net/textproto" + "strconv" + "strings" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// A Block represents a clearsigned message. A signature on a Block can +// be checked by passing Bytes into openpgp.CheckDetachedSignature. +type Block struct { + Headers textproto.MIMEHeader // Optional unverified Hash headers + Plaintext []byte // The original message text + Bytes []byte // The signed message + ArmoredSignature *armor.Block // The signature block +} + +// start is the marker which denotes the beginning of a clearsigned message. +var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----") + +// dashEscape is prefixed to any lines that begin with a hyphen so that they +// can't be confused with endText. +var dashEscape = []byte("- ") + +// endText is a marker which denotes the end of the message and the start of +// an armored signature. +var endText = []byte("-----BEGIN PGP SIGNATURE-----") + +// end is a marker which denotes the end of the armored signature. +var end = []byte("\n-----END PGP SIGNATURE-----") + +var crlf = []byte("\r\n") +var lf = byte('\n') + +// getLine returns the first \r\n or \n delineated line from the given byte +// array. The line does not include the \r\n or \n. The remainder of the byte +// array (also not including the new line bytes) is also returned and this will +// always be smaller than the original argument. +func getLine(data []byte) (line, rest []byte) { + i := bytes.Index(data, []byte{'\n'}) + var j int + if i < 0 { + i = len(data) + j = i + } else { + j = i + 1 + if i > 0 && data[i-1] == '\r' { + i-- + } + } + return data[0:i], data[j:] +} + +// Decode finds the first clearsigned message in data and returns it, as well as +// the suffix of data which remains after the message. Any prefix data is +// discarded. +// +// If no message is found, or if the message is invalid, Decode returns nil and +// the whole data slice. The only allowed header type is Hash, and it is not +// verified against the signature hash. +func Decode(data []byte) (b *Block, rest []byte) { + // start begins with a newline. However, at the very beginning of + // the byte array, we'll accept the start string without it. + rest = data + if bytes.HasPrefix(data, start[1:]) { + rest = rest[len(start)-1:] + } else if i := bytes.Index(data, start); i >= 0 { + rest = rest[i+len(start):] + } else { + return nil, data + } + + // Consume the start line and check it does not have a suffix. + suffix, rest := getLine(rest) + if len(suffix) != 0 { + return nil, data + } + + var line []byte + b = &Block{ + Headers: make(textproto.MIMEHeader), + } + + // Next come a series of header lines. + for { + // This loop terminates because getLine's second result is + // always smaller than its argument. + if len(rest) == 0 { + return nil, data + } + // An empty line marks the end of the headers. + if line, rest = getLine(rest); len(line) == 0 { + break + } + + // Reject headers with control or Unicode characters. + if i := bytes.IndexFunc(line, func(r rune) bool { + return r < 0x20 || r > 0x7e + }); i != -1 { + return nil, data + } + + i := bytes.Index(line, []byte{':'}) + if i == -1 { + return nil, data + } + + key, val := string(line[0:i]), string(line[i+1:]) + key = strings.TrimSpace(key) + if key != "Hash" { + return nil, data + } + val = strings.TrimSpace(val) + b.Headers.Add(key, val) + } + + firstLine := true + for { + start := rest + + line, rest = getLine(rest) + if len(line) == 0 && len(rest) == 0 { + // No armored data was found, so this isn't a complete message. + return nil, data + } + if bytes.Equal(line, endText) { + // Back up to the start of the line because armor expects to see the + // header line. + rest = start + break + } + + // The final CRLF isn't included in the hash so we don't write it until + // we've seen the next line. + if firstLine { + firstLine = false + } else { + b.Bytes = append(b.Bytes, crlf...) + } + + if bytes.HasPrefix(line, dashEscape) { + line = line[2:] + } + line = bytes.TrimRight(line, " \t") + b.Bytes = append(b.Bytes, line...) + + b.Plaintext = append(b.Plaintext, line...) + b.Plaintext = append(b.Plaintext, lf) + } + + // We want to find the extent of the armored data (including any newlines at + // the end). + i := bytes.Index(rest, end) + if i == -1 { + return nil, data + } + i += len(end) + for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') { + i++ + } + armored := rest[:i] + rest = rest[i:] + + var err error + b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored)) + if err != nil { + return nil, data + } + + return b, rest +} + +// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed +// message. The clear-signed message is written to buffered and a hash, suitable +// for signing, is maintained in h. +// +// When closed, an armored signature is created and written to complete the +// message. +type dashEscaper struct { + buffered *bufio.Writer + hashers []hash.Hash // one per key in privateKeys + hashType crypto.Hash + toHash io.Writer // writes to all the hashes in hashers + + atBeginningOfLine bool + isFirstLine bool + + whitespace []byte + byteBuf []byte // a one byte buffer to save allocations + + privateKeys []*packet.PrivateKey + config *packet.Config +} + +func (d *dashEscaper) Write(data []byte) (n int, err error) { + for _, b := range data { + d.byteBuf[0] = b + + if d.atBeginningOfLine { + // The final CRLF isn't included in the hash so we have to wait + // until this point (the start of the next line) before writing it. + if !d.isFirstLine { + d.toHash.Write(crlf) + } + d.isFirstLine = false + } + + // Any whitespace at the end of the line has to be removed so we + // buffer it until we find out whether there's more on this line. + if b == ' ' || b == '\t' || b == '\r' { + d.whitespace = append(d.whitespace, b) + d.atBeginningOfLine = false + continue + } + + if d.atBeginningOfLine { + // At the beginning of a line, hyphens have to be escaped. + if b == '-' { + // The signature isn't calculated over the dash-escaped text so + // the escape is only written to buffered. + if _, err = d.buffered.Write(dashEscape); err != nil { + return + } + d.toHash.Write(d.byteBuf) + d.atBeginningOfLine = false + } else if b == '\n' { + // Nothing to do because we delay writing CRLF to the hash. + } else { + d.toHash.Write(d.byteBuf) + d.atBeginningOfLine = false + } + if err = d.buffered.WriteByte(b); err != nil { + return + } + } else { + if b == '\n' { + // We got a raw \n. Drop any trailing whitespace and write a + // CRLF. + d.whitespace = d.whitespace[:0] + // We delay writing CRLF to the hash until the start of the + // next line. + if err = d.buffered.WriteByte(b); err != nil { + return + } + d.atBeginningOfLine = true + } else { + // Any buffered whitespace wasn't at the end of the line so + // we need to write it out. + if len(d.whitespace) > 0 { + d.toHash.Write(d.whitespace) + if _, err = d.buffered.Write(d.whitespace); err != nil { + return + } + d.whitespace = d.whitespace[:0] + } + d.toHash.Write(d.byteBuf) + if err = d.buffered.WriteByte(b); err != nil { + return + } + } + } + } + + n = len(data) + return +} + +func (d *dashEscaper) Close() (err error) { + if !d.atBeginningOfLine { + if err = d.buffered.WriteByte(lf); err != nil { + return + } + } + + out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil) + if err != nil { + return + } + + t := d.config.Now() + for i, k := range d.privateKeys { + sig := new(packet.Signature) + sig.SigType = packet.SigTypeText + sig.PubKeyAlgo = k.PubKeyAlgo + sig.Hash = d.hashType + sig.CreationTime = t + sig.IssuerKeyId = &k.KeyId + + if err = sig.Sign(d.hashers[i], k, d.config); err != nil { + return + } + if err = sig.Serialize(out); err != nil { + return + } + } + + if err = out.Close(); err != nil { + return + } + if err = d.buffered.Flush(); err != nil { + return + } + return +} + +// Encode returns a WriteCloser which will clear-sign a message with privateKey +// and write it to w. If config is nil, sensible defaults are used. +func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + return EncodeMulti(w, []*packet.PrivateKey{privateKey}, config) +} + +// EncodeMulti returns a WriteCloser which will clear-sign a message with all the +// private keys indicated and write it to w. If config is nil, sensible defaults +// are used. +func EncodeMulti(w io.Writer, privateKeys []*packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + for _, k := range privateKeys { + if k.Encrypted { + return nil, errors.InvalidArgumentError(fmt.Sprintf("signing key %s is encrypted", k.KeyIdString())) + } + } + + hashType := config.Hash() + name := nameOfHash(hashType) + if len(name) == 0 { + return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType))) + } + + if !hashType.Available() { + return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType))) + } + var hashers []hash.Hash + var ws []io.Writer + for range privateKeys { + h := hashType.New() + hashers = append(hashers, h) + ws = append(ws, h) + } + toHash := io.MultiWriter(ws...) + + buffered := bufio.NewWriter(w) + // start has a \n at the beginning that we don't want here. + if _, err = buffered.Write(start[1:]); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if _, err = buffered.WriteString("Hash: "); err != nil { + return + } + if _, err = buffered.WriteString(name); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + + plaintext = &dashEscaper{ + buffered: buffered, + hashers: hashers, + hashType: hashType, + toHash: toHash, + + atBeginningOfLine: true, + isFirstLine: true, + + byteBuf: make([]byte, 1), + + privateKeys: privateKeys, + config: config, + } + + return +} + +// nameOfHash returns the OpenPGP name for the given hash, or the empty string +// if the name isn't known. See RFC 4880, section 9.4. +func nameOfHash(h crypto.Hash) string { + switch h { + case crypto.MD5: + return "MD5" + case crypto.SHA1: + return "SHA1" + case crypto.RIPEMD160: + return "RIPEMD160" + case crypto.SHA224: + return "SHA224" + case crypto.SHA256: + return "SHA256" + case crypto.SHA384: + return "SHA384" + case crypto.SHA512: + return "SHA512" + } + return "" +} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 000000000..f922bdbca --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,130 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +// +// Deprecated: this package was only provided to support ElGamal encryption in +// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see +// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has +// compatibility and security issues (see https://eprint.iacr.org/2021/923). +// Moreover, this package doesn't protect against side-channel attacks. +package elgamal + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See “Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1â€, Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + if s.ModInverse(s, priv.P) == nil { + return nil, errors.New("elgamal: invalid private key") + } + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go new file mode 100644 index 000000000..a32874947 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -0,0 +1,78 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package errors + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go new file mode 100644 index 000000000..d62f787e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/keys.go @@ -0,0 +1,693 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/rsa" + "io" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// primaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) primaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// encryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) encryptionKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest key + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.Sig.KeyExpired(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt to, then we can obviously use it. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + // This Entity appears to be signing only. + return Key{}, false +} + +// signingKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) signingKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.Sig.KeyExpired(now) { + candidateSubkey = i + break + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// KeysByIdUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + identity.SelfSignature = sig + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Sig = sig + case packet.SigTypeSubkeyBinding: + + if shouldReplaceSubkeySig(subKey.Sig, sig) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { + if potentialNewSig == nil { + return false + } + + if existingSig == nil { + return true + } + + if existingSig.SigType == packet.SigTypeSubkeyRevocation { + return false // never override a revocation signature + } + + return potentialNewSig.CreationTime.After(existingSig.CreationTime) +} + +const defaultRSAKeyBits = 2048 + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + signingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + + e := &Entity{ + PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), + Identities: make(map[string]*Identity), + } + isPrimaryId := true + e.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: &packet.Signature{ + CreationTime: creationTime, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return nil, err + } + + // If the user passes in a DefaultHash via packet.Config, + // set the PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + + // Likewise for DefaultCipher. + if config != nil && config.DefaultCipher != 0 { + e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} + } + + e.Subkeys = make([]Subkey, 1) + e.Subkeys[0] = Subkey{ + PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), + Sig: &packet.Signature{ + CreationTime: creationTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + e.Subkeys[0].PublicKey.IsSubkey = true + e.Subkeys[0].PrivateKey.IsSubkey = true + err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) + if err != nil { + return nil, err + } + return e, nil +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go new file mode 100644 index 000000000..353f94524 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "golang.org/x/crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriteCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go new file mode 100644 index 000000000..c76eecc96 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/config.go @@ -0,0 +1,91 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 000000000..6d7639722 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,208 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 parsedMPI +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + if err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + // Supports both *rsa.PrivateKey and crypto.Decrypter + k := priv.PrivateKey.(crypto.Decrypter) + b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + default: + err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + return writeMPI(w, 8*uint16(len(cipherText)), cipherText) +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + err = writeBig(w, c1) + if err != nil { + return err + } + return writeBig(w, c2) +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go new file mode 100644 index 000000000..1a9ec6e51 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/literal.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go new file mode 100644 index 000000000..ce2a33a54 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. If an incorrect key is detected then nil is returned. On +// successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if prefixCopy[blockSize-2] != prefixCopy[blockSize] || + prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { + return nil + } + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 000000000..171350339 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go new file mode 100644 index 000000000..398447731 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + + "golang.org/x/crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = io.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go new file mode 100644 index 000000000..a84a1a214 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -0,0 +1,590 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package packet + +import ( + "bufio" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rsa" + "io" + "math/big" + "math/bits" + + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/openpgp/errors" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + lengthByte [1]byte + sentFirst bool + buf []byte +} + +// RFC 4880 4.2.2.4: the first partial length MUST be at least 512 octets long. +const minFirstPartialWrite = 512 + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + off := 0 + if !w.sentFirst { + if len(w.buf) > 0 || len(p) < minFirstPartialWrite { + off = len(w.buf) + w.buf = append(w.buf, p...) + if len(w.buf) < minFirstPartialWrite { + return len(p), nil + } + p = w.buf + w.buf = nil + } + w.sentFirst = true + } + + power := uint8(30) + for len(p) > 0 { + l := 1 << power + if len(p) < l { + power = uint8(bits.Len32(uint32(len(p)))) - 1 + l = 1 << power + } + w.lengthByte[0] = 224 + power + _, err = w.w.Write(w.lengthByte[:]) + if err == nil { + var m int + m, err = w.w.Write(p[:l]) + n += m + } + if err != nil { + if n < off { + return 0, err + } + return n - off, err + } + p = p[l:] + } + return n - off, nil +} + +func (w *partialLengthWriter) Close() error { + if len(w.buf) > 0 { + // In this case we can't send a 512 byte packet. + // Just send what we have. + p := w.buf + w.sentFirst = true + w.buf = nil + if _, err := w.Write(p); err != nil { + return err + } + } + + w.lengthByte[0] = 0 + _, err := w.w.Write(w.lengthByte[:]) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + var buf [6]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = 192 + byte(length>>8) + buf[2] = byte(length) + n = 3 + } else { + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// peekVersion detects the version of a public key packet about to +// be read. A bufio.Reader at the original position of the io.Reader +// is returned. +func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { + bufr = bufio.NewReader(r) + var verBuf []byte + if verBuf, err = bufr.Peek(1); err != nil { + return + } + ver = verBuf[0] + return +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + var version byte + // Detect signature version + if contents, version, err = peekVersion(contents); err != nil { + return + } + if version < 4 { + p = new(SignatureV3) + } else { + p = new(Signature) + } + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + var version byte + if contents, version, err = peekVersion(contents); err != nil { + return + } + isSubkey := tag == packetTypePublicSubkey + if version < 4 { + p = &PublicKeyV3{IsSubkey: isSubkey} + } else { + p = &PublicKey{IsSubkey: isSubkey} + } + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case Cipher3DES: + return 24 + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case Cipher3DES: + return des.BlockSize + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case Cipher3DES: + block, _ = des.NewTripleDESCipher(key) + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialized exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. + return +} + +// writeMPI serializes a big integer to w. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go new file mode 100644 index 000000000..192aac376 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go @@ -0,0 +1,384 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha1" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only). + sha1Checksum bool + iv []byte +} + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA or ECDSA. +func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.Public().(type) { + case *rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) + case *ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = io.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + // TODO(agl): support encrypted private keys + buf := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(buf) + if err != nil { + return + } + buf.WriteByte(0 /* no encryption */) + + privateKeyBuf := bytes.NewBuffer(nil) + + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(privateKeyBuf, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(privateKeyBuf, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(privateKeyBuf, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(privateKeyBuf, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + if err != nil { + return + } + + ptype := packetTypePrivateKey + contents := buf.Bytes() + privateKeyBytes := privateKeyBuf.Bytes() + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) + if err != nil { + return + } + _, err = w.Write(contents) + if err != nil { + return + } + _, err = w.Write(privateKeyBytes) + if err != nil { + return + } + + checksum := mod64kHash(privateKeyBytes) + var checksumBytes [2]byte + checksumBytes[0] = byte(checksum >> 8) + checksumBytes[1] = byte(checksum) + _, err = w.Write(checksumBytes[:]) + + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + err := writeBig(w, priv.D) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[1]) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[0]) + if err != nil { + return err + } + return writeBig(w, priv.Precomputed.Qinv) +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + pk.PrivateKey = &ecdsa.PrivateKey{ + PublicKey: *ecdsaPub, + D: new(big.Int).SetBytes(d), + } + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go new file mode 100644 index 000000000..fcd5f5251 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go @@ -0,0 +1,753 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +var ( + // NIST curve P-256 + oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} + // NIST curve P-384 + oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} + // NIST curve P-521 + oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} +) + +const maxOIDLength = 8 + +// ecdsaKey stores the algorithm-specific fields for ECDSA keys. +// as defined in RFC 6637, Section 9. +type ecdsaKey struct { + // oid contains the OID byte sequence identifying the elliptic curve used + oid []byte + // p contains the elliptic curve point that represents the public key + p parsedMPI +} + +// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. +func parseOID(r io.Reader) (oid []byte, err error) { + buf := make([]byte, maxOIDLength) + if _, err = readFull(r, buf[:1]); err != nil { + return + } + oidLen := buf[0] + if int(oidLen) > len(buf) { + err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) + return + } + oid = buf[:oidLen] + _, err = readFull(r, oid) + return +} + +func (f *ecdsaKey) parse(r io.Reader) (err error) { + if f.oid, err = parseOID(r); err != nil { + return err + } + f.p.bytes, f.p.bitLength, err = readMPI(r) + return +} + +func (f *ecdsaKey) serialize(w io.Writer) (err error) { + buf := make([]byte, maxOIDLength+1) + buf[0] = byte(len(f.oid)) + copy(buf[1:], f.oid) + if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { + return + } + return writeMPIs(w, f.p) +} + +func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { + var c elliptic.Curve + if bytes.Equal(f.oid, oidCurveP256) { + c = elliptic.P256() + } else if bytes.Equal(f.oid, oidCurveP384) { + c = elliptic.P384() + } else if bytes.Equal(f.oid, oidCurveP521) { + c = elliptic.P521() + } else { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + x, y := elliptic.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) byteLen() int { + return 1 + len(f.oid) + 2 + len(f.p.bytes) +} + +type kdfHashFunction byte +type kdfAlgorithm byte + +// ecdhKdf stores key derivation function parameters +// used for ECDH encryption. See RFC 6637, Section 9. +type ecdhKdf struct { + KdfHash kdfHashFunction + KdfAlgo kdfAlgorithm +} + +func (f *ecdhKdf) parse(r io.Reader) (err error) { + buf := make([]byte, 1) + if _, err = readFull(r, buf); err != nil { + return + } + kdfLen := int(buf[0]) + if kdfLen < 3 { + return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + buf = make([]byte, kdfLen) + if _, err = readFull(r, buf); err != nil { + return + } + reserved := int(buf[0]) + f.KdfHash = kdfHashFunction(buf[1]) + f.KdfAlgo = kdfAlgorithm(buf[2]) + if reserved != 0x01 { + return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) + } + return +} + +func (f *ecdhKdf) serialize(w io.Writer) (err error) { + buf := make([]byte, 4) + // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. + buf[0] = byte(0x03) // Length of the following fields + buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now + buf[2] = byte(f.KdfHash) + buf[3] = byte(f.KdfAlgo) + _, err = w.Write(buf[:]) + return +} + +func (f *ecdhKdf) byteLen() int { + return 4 +} + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI + + // RFC 6637 fields + ec *ecdsaKey + ecdh *ecdhKdf +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +func fromBig(n *big.Int) parsedMPI { + return parsedMPI{ + bytes: n.Bytes(), + bitLength: uint16(n.BitLen()), + } +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: fromBig(pub.P), + q: fromBig(pub.Q), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: fromBig(pub.P), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + + switch pub.Curve { + case elliptic.P256(): + pk.ec.oid = oidCurveP256 + case elliptic.P384(): + pk.ec.oid = oidCurveP384 + case elliptic.P521(): + pk.ec.oid = oidCurveP521 + default: + panic("unknown elliptic curve") + } + + pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + + // The bit length is 3 (for the 0x04 specifying an uncompressed key) + // plus two field elements (for x and y), which are rounded up to the + // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 + fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 + pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return err + } + pk.PublicKey, err = pk.ec.newECDSA() + case PubKeyAlgoECDH: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return + } + pk.ecdh = new(ecdhKdf) + if err = pk.ecdh.parse(r); err != nil { + return + } + // The ECDH key is stored in an ecdsa.PublicKey for convenience. + pk.PublicKey, err = pk.ec.newECDSA() + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKey) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.serializeWithoutHeaders(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.bytes) + elgamal.G = new(big.Int).SetBytes(pk.g.bytes) + elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = elgamal + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoElGamal: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoECDSA: + pLength += uint16(pk.ec.byteLen()) + case PubKeyAlgoECDH: + pLength += uint16(pk.ec.byteLen()) + pLength += uint16(pk.ecdh.byteLen()) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + case PubKeyAlgoDSA: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.q.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoElGamal: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoECDSA: + length += pk.ec.byteLen() + case PubKeyAlgoECDH: + length += pk.ec.byteLen() + length += pk.ecdh.byteLen() + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [6]byte + buf[0] = 4 + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + case PubKeyAlgoElGamal: + return writeMPIs(w, pk.p, pk.g, pk.y) + case PubKeyAlgoECDSA: + return pk.ec.serialize(w) + case PubKeyAlgoECDH: + if err = pk.ec.serialize(w); err != nil { + return + } + return pk.ecdh.serialize(w) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + case PubKeyAlgoDSA: + dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + signed.SerializeSignaturePrefix(h) + signed.serializeWithoutHeaders(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialized exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serializing several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + case PubKeyAlgoDSA: + bitLength = pk.p.bitLength + case PubKeyAlgoElGamal: + bitLength = pk.p.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go new file mode 100644 index 000000000..5daf7b6cf --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go @@ -0,0 +1,279 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/md5" + "crypto/rsa" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" +) + +// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and +// should not be used for signing or encrypting. They are supported here only for +// parsing version 3 key material and validating signatures. +// See RFC 4880, section 5.5.2. +type PublicKeyV3 struct { + CreationTime time.Time + DaysToExpire uint16 + PubKeyAlgo PublicKeyAlgorithm + PublicKey *rsa.PublicKey + Fingerprint [16]byte + KeyId uint64 + IsSubkey bool + + n, e parsedMPI +} + +// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. +// Included here for testing purposes only. RFC 4880, section 5.5.2: +// "an implementation MUST NOT generate a V3 key, but MAY accept it." +func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { + pk := &PublicKeyV3{ + CreationTime: creationTime, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKeyV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [8]byte + if _, err = readFull(r, buf[:]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKeyV3) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := md5.New() + fingerPrint.Write(pk.n.bytes) + fingerPrint.Write(pk.e.bytes) + fingerPrint.Sum(pk.Fingerprint[:0]) + pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { + if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { + return + } + if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { + return + } + + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { + length := 8 // 8 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + if err = serializeHeader(w, packetType, length); err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [8]byte + // Version 3 + buf[0] = 3 + // Creation time + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + // Days to expire + buf[5] = byte(pk.DaysToExpire >> 8) + buf[6] = byte(pk.DaysToExpire) + // Public key algorithm + buf[7] = byte(pk.PubKeyAlgo) + + if _, err = w.Write(buf[:]); err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKeyV3) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + default: + // V3 public keys only support RSA. + panic("shouldn't happen") + } +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// userIdSignatureV3Hash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { + if !hfn.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hfn.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + h.Write([]byte(id)) + + return +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKeyV3) KeyIdString() string { + return fmt.Sprintf("%X", pk.KeyId) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKeyV3) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go new file mode 100644 index 000000000..34bc7c613 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/reader.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go new file mode 100644 index 000000000..b2a24a532 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature.go @@ -0,0 +1,731 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime time.Time + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI + ECDSASigR, ECDSASigS parsedMPI + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + IssuerKeyId *uint64 + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // MDC is set if this signature has a feature packet that indicates + // support for MDC subpackets. + MDC bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoECDSA: + sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. In practice, the subpacket is used exclusively to + // indicate support for MDC-protected encryption. + sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired. +func (sig *Signature) KeyExpired(currentTime time.Time) bool { + if sig.KeyLifetimeSecs == nil { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + sig.outSubpackets = sig.buildSubpackets() + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR.bytes = r.Bytes() + sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) + sig.DSASigS.bytes = s.Bytes() + sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = fromBig(r) + sig.ECDSASigS = fromBig(s) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = 2 + len(sig.RSASignature.bytes) + case PubKeyAlgoDSA: + sigLength = 2 + len(sig.DSASigR.bytes) + sigLength += 2 + len(sig.DSASigS.bytes) + case PubKeyAlgoECDSA: + sigLength = 2 + len(sig.ECDSASigR.bytes) + sigLength += 2 + len(sig.ECDSASigS.bytes) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + case PubKeyAlgoECDSA: + err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go new file mode 100644 index 000000000..6edff8893 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go @@ -0,0 +1,146 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// SignatureV3 represents older version 3 signatures. These signatures are less secure +// than version 4 and should not be used to create new signatures. They are included +// here for backwards compatibility to read and validate with older key material. +// See RFC 4880, section 5.2.2. +type SignatureV3 struct { + SigType SignatureType + CreationTime time.Time + IssuerKeyId uint64 + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + HashTag [2]byte + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI +} + +func (sig *SignatureV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.2 + var buf [8]byte + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] != 5 { + err = errors.UnsupportedError( + "invalid hashed material length " + strconv.Itoa(int(buf[0]))) + return + } + + // Read hashed material: signature type + creation time + if _, err = readFull(r, buf[:5]); err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + t := binary.BigEndian.Uint32(buf[1:5]) + sig.CreationTime = time.Unix(int64(t), 0) + + // Eight-octet Key ID of signer. + if _, err = readFull(r, buf[:8]); err != nil { + return + } + sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) + + // Public-key and hash algorithm + if _, err = readFull(r, buf[:2]); err != nil { + return + } + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + var ok bool + if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + // Two-octet field holding left 16 bits of signed hash value. + if _, err = readFull(r, sig.HashTag[:2]); err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { + return + } + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + default: + panic("unreachable") + } + return +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *SignatureV3) Serialize(w io.Writer) (err error) { + buf := make([]byte, 8) + + // Write the sig type and creation time + buf[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + // Write the issuer long key ID + binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) + if _, err = w.Write(buf[:8]); err != nil { + return + } + + // Write public key algorithm, hash ID, and hash value + buf[0] = byte(sig.PubKeyAlgo) + hashId, ok := s2k.HashToHashId(sig.Hash) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) + } + buf[1] = hashId + copy(buf[2:4], sig.HashTag[:]) + if _, err = w.Write(buf[:4]); err != nil { + return + } + + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + default: + panic("impossible") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 000000000..744c2d2c4 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + s2k func(out, in []byte) + encryptedKey []byte +} + +const symmetricKeyEncryptedVersion = 4 + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + if buf[0] != symmetricKeyEncryptedVersion { + return errors.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + var err error + ske.s2k, err = s2k.Parse(r) + if err != nil { + return err + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + + "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") + } + return plaintextKey, cipherFunc, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The +// packet contains a random session key, encrypted by a key derived from the +// given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + var buf [2]byte + buf[0] = symmetricKeyEncryptedVersion + buf[1] = byte(cipherFunc) + _, err = w.Write(buf[:]) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + + key = sessionKey + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 000000000..1a1a62964 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "golang.org/x/crypto/openpgp/errors" + "hash" + "io" + "strconv" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, errors.ErrKeyIncorrect + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.SignatureError("error during reading") + } + } + + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.SignatureError("hash mismatch") + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an io.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go new file mode 100644 index 000000000..ff7ef5307 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go @@ -0,0 +1,90 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := io.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + sp.Serialize(&buf) + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go new file mode 100644 index 000000000..359a462eb --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userid.go @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := io.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go new file mode 100644 index 000000000..cff3db919 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -0,0 +1,448 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package openpgp + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + var se *packet.SymmetricallyEncrypted + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted: + se = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + } + if len(pk.encryptedKey.Key) == 0 { + continue + } + decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + return readSignedMessage(packets, md, keyring) +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if !p.IsLast { + return nil, errors.UnsupportedError("nested signatures") + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md = nil + return + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.SignedBy != nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (n int, err error) { + n, err = cr.md.LiteralData.Body.Read(buf) + if err == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + return +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails +} + +func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + n, err = scr.md.LiteralData.Body.Read(buf) + scr.wrappedHash.Write(buf[:n]) + if err == io.EOF { + var p packet.Packet + p, scr.md.SignatureError = scr.packets.Next() + if scr.md.SignatureError != nil { + return + } + + var ok bool + if scr.md.Signature, ok = p.(*packet.Signature); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + } + return +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + switch sig := p.(type) { + case *packet.Signature: + err = key.PublicKey.VerifySignature(h, sig) + case *packet.SignatureV3: + err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") + } + + if err == nil { + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body) +} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go new file mode 100644 index 000000000..fa1a91907 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -0,0 +1,279 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +// +// Deprecated: this package is unmaintained except for security fixes. New +// applications should consider a more focused, modern alternative to OpenPGP +// for their specific task. If you are required to interoperate with OpenPGP +// systems and need a maintained package, consider a community fork. +// See https://golang.org/issue/44226. +package s2k + +import ( + "crypto" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" +) + +// Config collects configuration parameters for s2k key-stretching +// transformatioms. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // Hash is the default hash function to be used. If + // nil, SHA1 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + // SHA1 is the historical default in this package. + return crypto.SHA1 + } + + return c.Hash +} + +func (c *Config) encodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 96 // The common case. Correspoding to 65536 + } + + i := c.S2KCount + switch { + // Behave like GPG. Should we make 65536 the lowest value used? + case i < 1024: + i = 1024 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 1024 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 0; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + hash, ok := HashIdToHash(buf[1]) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) + } + if !hash.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) + } + h := hash.New() + + switch buf[0] { + case 0: + f := func(out, in []byte) { + Simple(out, h, in) + } + return f, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return + } + f := func(out, in []byte) { + Salted(out, h, in, buf[:8]) + } + return f, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return + } + count := decodeCount(buf[8]) + f := func(out, in []byte) { + Iterated(out, h, in, buf[:8], count) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + var buf [11]byte + buf[0] = 3 /* iterated and salted */ + buf[1], _ = HashToHashId(c.hash()) + salt := buf[2:10] + if _, err := io.ReadFull(rand, salt); err != nil { + return err + } + encodedCount := c.encodedCount() + count := decodeCount(encodedCount) + buf[10] = encodedCount + if _, err := w.Write(buf[:]); err != nil { + return err + } + + Iterated(key, c.hash().New(), passphrase, salt, count) + return nil +} + +// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +var hashToHashIdMapping = []struct { + id byte + hash crypto.Hash + name string +}{ + {1, crypto.MD5, "MD5"}, + {2, crypto.SHA1, "SHA1"}, + {3, crypto.RIPEMD160, "RIPEMD160"}, + {8, crypto.SHA256, "SHA256"}, + {9, crypto.SHA384, "SHA384"}, + {10, crypto.SHA512, "SHA512"}, + {11, crypto.SHA224, "SHA224"}, +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.hash, true + } + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.name, true + } + } + + return "", false +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for _, m := range hashToHashIdMapping { + if m.hash == h { + return m.id, true + } + } + return 0, false +} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go new file mode 100644 index 000000000..b89d48b81 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/write.go @@ -0,0 +1,418 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" + "golang.org/x/crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &signer.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + io.Copy(wrappedHash, message) + + err = sig.Sign(h, signer.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + + literaldata := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literaldata, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.signingKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil + } + return literalData, nil +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] + defaultHashes := candidateHashes[len(candidateHashes)-1:] + + encryptKeys := make([]Key, len(to)) + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].encryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].primaryIdentity().SelfSignature + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) + if err != nil { + return + } + + return writeAndSign(payload, candidateHashes, signed, hints, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + defaultHashes := candidateHashes[len(candidateHashes)-1:] + preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + h hash.Hash + signer *packet.PrivateKey + config *packet.Config +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.h.Write(data) + return s.literalData.Write(data) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + SigType: packet.SigTypeBinary, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an io.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 000000000..948a3ee63 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,135 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := withCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. +// +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) +} diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 000000000..f93c740b6 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 000000000..88ce33434 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go new file mode 100644 index 000000000..3bf40fdfe --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package execabs is a drop-in replacement for os/exec +// that requires PATH lookups to find absolute paths. +// That is, execabs.Command("cmd") runs the same PATH lookup +// as exec.Command("cmd"), but if the result is a path +// which is relative, the Run and Start methods will report +// an error instead of running the executable. +// +// See https://blog.golang.org/path-security for more information +// about when it may be necessary or appropriate to use this package. +package execabs + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "reflect" + "unsafe" +) + +// ErrNotFound is the error resulting if a path search failed to find an executable file. +// It is an alias for exec.ErrNotFound. +var ErrNotFound = exec.ErrNotFound + +// Cmd represents an external command being prepared or run. +// It is an alias for exec.Cmd. +type Cmd = exec.Cmd + +// Error is returned by LookPath when it fails to classify a file as an executable. +// It is an alias for exec.Error. +type Error = exec.Error + +// An ExitError reports an unsuccessful exit by a command. +// It is an alias for exec.ExitError. +type ExitError = exec.ExitError + +func relError(file, path string) error { + return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) +} + +// LookPath searches for an executable named file in the directories +// named by the PATH environment variable. If file contains a slash, +// it is tried directly and the PATH is not consulted. The result will be +// an absolute path. +// +// LookPath differs from exec.LookPath in its handling of PATH lookups, +// which are used for file names without slashes. If exec.LookPath's +// PATH lookup would have returned an executable from the current directory, +// LookPath instead returns an error. +func LookPath(file string) (string, error) { + path, err := exec.LookPath(file) + if err != nil && !isGo119ErrDot(err) { + return "", err + } + if filepath.Base(file) == file && !filepath.IsAbs(path) { + return "", relError(file, path) + } + return path, nil +} + +func fixCmd(name string, cmd *exec.Cmd) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { + // exec.Command was called with a bare binary name and + // exec.LookPath returned a path which is not absolute. + // Set cmd.lookPathErr and clear cmd.Path so that it + // cannot be run. + lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) + if *lookPathErr == nil { + *lookPathErr = relError(name, cmd.Path) + } + cmd.Path = "" + } +} + +// CommandContext is like Command but includes a context. +// +// The provided context is used to kill the process (by calling os.Process.Kill) +// if the context becomes done before the command completes on its own. +func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, name, arg...) + fixCmd(name, cmd) + return cmd + +} + +// Command returns the Cmd struct to execute the named program with the given arguments. +// See exec.Command for most details. +// +// Command differs from exec.Command in its handling of PATH lookups, +// which are used when the program name contains no slashes. +// If exec.Command would have returned an exec.Cmd configured to run an +// executable from the current directory, Command instead +// returns an exec.Cmd that will return an error from Start or Run. +func Command(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + fixCmd(name, cmd) + return cmd +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 000000000..5627d70e3 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 + +package execabs + +import "os/exec" + +func isGo119ErrDot(err error) bool { + return false +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 000000000..d60ab1b41 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,20 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 + +package execabs + +import ( + "errors" + "os/exec" +) + +func isGo119ErrDot(err error) bool { + return errors.Is(err, exec.ErrDot) +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return cmd.Err != nil +} diff --git a/vendor/helm.sh/helm/v3/LICENSE b/vendor/helm.sh/helm/v3/LICENSE new file mode 100644 index 000000000..21c57fae2 --- /dev/null +++ b/vendor/helm.sh/helm/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Kubernetes Authors All Rights Reserved + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go b/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go new file mode 100644 index 000000000..4ea09cca4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go @@ -0,0 +1,50 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fileutil + +import ( + "io" + "os" + "path/filepath" + + "helm.sh/helm/v3/internal/third_party/dep/fs" +) + +// AtomicWriteFile atomically (as atomic as os.Rename allows) writes a file to a +// disk. +func AtomicWriteFile(filename string, reader io.Reader, mode os.FileMode) error { + tempFile, err := os.CreateTemp(filepath.Split(filename)) + if err != nil { + return err + } + tempName := tempFile.Name() + + if _, err := io.Copy(tempFile, reader); err != nil { + tempFile.Close() // return value is ignored as we are already on error path + return err + } + + if err := tempFile.Close(); err != nil { + return err + } + + if err := os.Chmod(tempName, mode); err != nil { + return err + } + + return fs.RenameWithFallback(tempName, filename) +} diff --git a/vendor/helm.sh/helm/v3/internal/resolver/resolver.go b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go new file mode 100644 index 000000000..5e8921f96 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go @@ -0,0 +1,263 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/provenance" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +// Resolver resolves dependencies from semantic version ranges to a particular version. +type Resolver struct { + chartpath string + cachepath string + registryClient *registry.Client +} + +// New creates a new resolver for a given chart, helm home and registry client. +func New(chartpath, cachepath string, registryClient *registry.Client) *Resolver { + return &Resolver{ + chartpath: chartpath, + cachepath: cachepath, + registryClient: registryClient, + } +} + +// Resolve resolves dependencies and returns a lock file with the resolution. +func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) { + + // Now we clone the dependencies, locking as we go. + locked := make([]*chart.Dependency, len(reqs)) + missing := []string{} + for i, d := range reqs { + constraint, err := semver.NewConstraint(d.Version) + if err != nil { + return nil, errors.Wrapf(err, "dependency %q has an invalid version/constraint format", d.Name) + } + + if d.Repository == "" { + // Local chart subfolder + if _, err := GetLocalPath(filepath.Join("charts", d.Name), r.chartpath); err != nil { + return nil, err + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: "", + Version: d.Version, + } + continue + } + if strings.HasPrefix(d.Repository, "file://") { + + chartpath, err := GetLocalPath(d.Repository, r.chartpath) + if err != nil { + return nil, err + } + + ch, err := loader.LoadDir(chartpath) + if err != nil { + return nil, err + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + // Not a legit entry. + continue + } + + if !constraint.Check(v) { + missing = append(missing, d.Name) + continue + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: ch.Metadata.Version, + } + continue + } + + repoName := repoNames[d.Name] + // if the repository was not defined, but the dependency defines a repository url, bypass the cache + if repoName == "" && d.Repository != "" { + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: d.Version, + } + continue + } + + var vs repo.ChartVersions + var version string + var ok bool + found := true + if !registry.IsOCI(d.Repository) { + repoIndex, err := repo.LoadIndexFile(filepath.Join(r.cachepath, helmpath.CacheIndexFile(repoName))) + if err != nil { + return nil, errors.Wrapf(err, "no cached repository for %s found. (try 'helm repo update')", repoName) + } + + vs, ok = repoIndex.Entries[d.Name] + if !ok { + return nil, errors.Errorf("%s chart not found in repo %s", d.Name, d.Repository) + } + found = false + } else { + version = d.Version + + // Check to see if an explicit version has been provided + _, err := semver.NewVersion(version) + + // Use an explicit version, otherwise search for tags + if err == nil { + vs = []*repo.ChartVersion{{ + Metadata: &chart.Metadata{ + Version: version, + }, + }} + + } else { + // Retrieve list of tags for repository + ref := fmt.Sprintf("%s/%s", strings.TrimPrefix(d.Repository, fmt.Sprintf("%s://", registry.OCIScheme)), d.Name) + tags, err := r.registryClient.Tags(ref) + if err != nil { + return nil, errors.Wrapf(err, "could not retrieve list of tags for repository %s", d.Repository) + } + + vs = make(repo.ChartVersions, len(tags)) + for ti, t := range tags { + // Mock chart version objects + version := &repo.ChartVersion{ + Metadata: &chart.Metadata{ + Version: t, + }, + } + vs[ti] = version + } + } + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: version, + } + // The version are already sorted and hence the first one to satisfy the constraint is used + for _, ver := range vs { + v, err := semver.NewVersion(ver.Version) + // OCI does not need URLs + if err != nil || (!registry.IsOCI(d.Repository) && len(ver.URLs) == 0) { + // Not a legit entry. + continue + } + if constraint.Check(v) { + found = true + locked[i].Version = v.Original() + break + } + } + + if !found { + missing = append(missing, d.Name) + } + } + if len(missing) > 0 { + return nil, errors.Errorf("can't get a valid version for repositories %s. Try changing the version constraint in Chart.yaml", strings.Join(missing, ", ")) + } + + digest, err := HashReq(reqs, locked) + if err != nil { + return nil, err + } + + return &chart.Lock{ + Generated: time.Now(), + Digest: digest, + Dependencies: locked, + }, nil +} + +// HashReq generates a hash of the dependencies. +// +// This should be used only to compare against another hash generated by this +// function. +func HashReq(req, lock []*chart.Dependency) (string, error) { + data, err := json.Marshal([2][]*chart.Dependency{req, lock}) + if err != nil { + return "", err + } + s, err := provenance.Digest(bytes.NewBuffer(data)) + return "sha256:" + s, err +} + +// HashV2Req generates a hash of requirements generated in Helm v2. +// +// This should be used only to compare against another hash generated by the +// Helm v2 hash function. It is to handle issue: +// https://github.com/helm/helm/issues/7233 +func HashV2Req(req []*chart.Dependency) (string, error) { + dep := make(map[string][]*chart.Dependency) + dep["dependencies"] = req + data, err := json.Marshal(dep) + if err != nil { + return "", err + } + s, err := provenance.Digest(bytes.NewBuffer(data)) + return "sha256:" + s, err +} + +// GetLocalPath generates absolute local path when use +// "file://" in repository of dependencies +func GetLocalPath(repo, chartpath string) (string, error) { + var depPath string + var err error + p := strings.TrimPrefix(repo, "file://") + + // root path is absolute + if strings.HasPrefix(p, "/") { + if depPath, err = filepath.Abs(p); err != nil { + return "", err + } + } else { + depPath = filepath.Join(chartpath, p) + } + + if _, err = os.Stat(depPath); os.IsNotExist(err) { + return "", errors.Errorf("directory %s not found", depPath) + } else if err != nil { + return "", err + } + + return depPath, nil +} diff --git a/vendor/k8s.io/helm/pkg/sympath/walk.go b/vendor/helm.sh/helm/v3/internal/sympath/walk.go similarity index 91% rename from vendor/k8s.io/helm/pkg/sympath/walk.go rename to vendor/helm.sh/helm/v3/internal/sympath/walk.go index 9a62261d0..a276cfeff 100644 --- a/vendor/k8s.io/helm/pkg/sympath/walk.go +++ b/vendor/helm.sh/helm/v3/internal/sympath/walk.go @@ -1,6 +1,6 @@ /* -Copyright (c) for portions of walk.go are held by The Go Authors, 2009 and are provided under -the BSD license. +Copyright (c) for portions of walk.go are held by The Go Authors, 2009 and are +provided under the BSD license. https://github.com/golang/go/blob/master/LICENSE @@ -21,11 +21,12 @@ limitations under the License. package sympath import ( - "fmt" "log" "os" "path/filepath" "sort" + + "github.com/pkg/errors" ) // Walk walks the file tree rooted at root, calling walkFn for each file or directory @@ -68,9 +69,9 @@ func symwalk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { if IsSymlink(info) { resolved, err := filepath.EvalSymlinks(path) if err != nil { - return fmt.Errorf("error evaluating symlink %s: %s", path, err) + return errors.Wrapf(err, "error evaluating symlink %s", path) } - log.Printf("found symbolic link in path: %s resolves to %s", path, resolved) + log.Printf("found symbolic link in path: %s resolves to %s. Contents of linked file included and used", path, resolved) if info, err = os.Lstat(resolved); err != nil { return err } diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go new file mode 100644 index 000000000..4e4eacc60 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go @@ -0,0 +1,372 @@ +/* +Copyright (c) for portions of fs.go are held by The Go Authors, 2016 and are provided under +the BSD license. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "io" + "os" + "path/filepath" + "runtime" + "syscall" + + "github.com/pkg/errors" +) + +// fs contains a copy of a few functions from dep tool code to avoid a dependency on golang/dep. +// This code is copied from https://github.com/golang/dep/blob/37d6c560cdf407be7b6cd035b23dba89df9275cf/internal/fs/fs.go +// No changes to the code were made other than removing some unused functions + +// RenameWithFallback attempts to rename a file or directory, but falls back to +// copying in the event of a cross-device link error. If the fallback copy +// succeeds, src is still removed, emulating normal rename behavior. +func RenameWithFallback(src, dst string) error { + _, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "cannot stat %s", src) + } + + err = os.Rename(src, dst) + if err == nil { + return nil + } + + return renameFallback(err, src, dst) +} + +// renameByCopy attempts to rename a file or directory by copying it to the +// destination and then removing the src thus emulating the rename behavior. +func renameByCopy(src, dst string) error { + var cerr error + if dir, _ := IsDir(src); dir { + cerr = CopyDir(src, dst) + if cerr != nil { + cerr = errors.Wrap(cerr, "copying directory failed") + } + } else { + cerr = copyFile(src, dst) + if cerr != nil { + cerr = errors.Wrap(cerr, "copying file failed") + } + } + + if cerr != nil { + return errors.Wrapf(cerr, "rename fallback failed: cannot rename %s to %s", src, dst) + } + + return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src) +} + +var ( + errSrcNotDir = errors.New("source is not a directory") + errDstExist = errors.New("destination already exists") +) + +// CopyDir recursively copies a directory tree, attempting to preserve permissions. +// Source directory must exist, destination directory must *not* exist. +func CopyDir(src, dst string) error { + src = filepath.Clean(src) + dst = filepath.Clean(dst) + + // We use os.Lstat() here to ensure we don't fall in a loop where a symlink + // actually links to a one of its parent directories. + fi, err := os.Lstat(src) + if err != nil { + return err + } + if !fi.IsDir() { + return errSrcNotDir + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + return errDstExist + } + + if err = os.MkdirAll(dst, fi.Mode()); err != nil { + return errors.Wrapf(err, "cannot mkdir %s", dst) + } + + entries, err := os.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "cannot read directory %s", dst) + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if entry.IsDir() { + if err = CopyDir(srcPath, dstPath); err != nil { + return errors.Wrap(err, "copying directory failed") + } + } else { + // This will include symlinks, which is what we want when + // copying things. + if err = copyFile(srcPath, dstPath); err != nil { + return errors.Wrap(err, "copying file failed") + } + } + } + + return nil +} + +// copyFile copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all its contents will be replaced by the contents +// of the source file. The file mode will be copied from the source. +func copyFile(src, dst string) (err error) { + if sym, err := IsSymlink(src); err != nil { + return errors.Wrap(err, "symlink check failed") + } else if sym { + if err := cloneSymlink(src, dst); err != nil { + if runtime.GOOS == "windows" { + // If cloning the symlink fails on Windows because the user + // does not have the required privileges, ignore the error and + // fall back to copying the file contents. + // + // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx + if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { + return err + } + } else { + return err + } + } else { + return nil + } + } + + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return + } + + if _, err = io.Copy(out, in); err != nil { + out.Close() + return + } + + // Check for write errors on Close + if err = out.Close(); err != nil { + return + } + + si, err := os.Stat(src) + if err != nil { + return + } + + // Temporary fix for Go < 1.9 + // + // See: https://github.com/golang/dep/issues/774 + // and https://github.com/golang/go/issues/20829 + if runtime.GOOS == "windows" { + dst = fixLongPath(dst) + } + err = os.Chmod(dst, si.Mode()) + + return +} + +// cloneSymlink will create a new symlink that points to the resolved path of sl. +// If sl is a relative symlink, dst will also be a relative symlink. +func cloneSymlink(sl, dst string) error { + resolved, err := os.Readlink(sl) + if err != nil { + return err + } + + return os.Symlink(resolved, dst) +} + +// IsDir determines is the path given is a directory or not. +func IsDir(name string) (bool, error) { + fi, err := os.Stat(name) + if err != nil { + return false, err + } + if !fi.IsDir() { + return false, errors.Errorf("%q is not a directory", name) + } + return true, nil +} + +// IsSymlink determines if the given path is a symbolic link. +func IsSymlink(path string) (bool, error) { + l, err := os.Lstat(path) + if err != nil { + return false, err + } + + return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil +} + +// fixLongPath returns the extended-length (\\?\-prefixed) form of +// path when needed, in order to avoid the default 260 character file +// path limit imposed by Windows. If path is not easily converted to +// the extended-length form (for example, if path is a relative path +// or contains .. elements), or is short enough, fixLongPath returns +// path unmodified. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath +func fixLongPath(path string) string { + // Do nothing (and don't allocate) if the path is "short". + // Empirically (at least on the Windows Server 2013 builder), + // the kernel is arbitrarily okay with < 248 bytes. That + // matches what the docs above say: + // "When using an API to create a directory, the specified + // path cannot be so long that you cannot append an 8.3 file + // name (that is, the directory name cannot exceed MAX_PATH + // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. + // + // The MSDN docs appear to say that a normal path that is 248 bytes long + // will work; empirically the path must be less then 248 bytes long. + if len(path) < 248 { + // Don't fix. (This is how Go 1.7 and earlier worked, + // not automatically generating the \\?\ form) + return path + } + + // The extended form begins with \\?\, as in + // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. + // The extended form disables evaluation of . and .. path + // elements and disables the interpretation of / as equivalent + // to \. The conversion here rewrites / to \ and elides + // . elements as well as trailing or duplicate separators. For + // simplicity it avoids the conversion entirely for relative + // paths or paths containing .. elements. For now, + // \\server\share paths are not converted to + // \\?\UNC\server\share paths because the rules for doing so + // are less well-specified. + if len(path) >= 2 && path[:2] == `\\` { + // Don't canonicalize UNC paths. + return path + } + if !isAbs(path) { + // Relative path + return path + } + + const prefix = `\\?` + + pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) + copy(pathbuf, prefix) + n := len(path) + r, w := 0, len(prefix) + for r < n { + switch { + case os.IsPathSeparator(path[r]): + // empty block + r++ + case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): + // /./ + r++ + case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): + // /../ is currently unhandled + return path + default: + pathbuf[w] = '\\' + w++ + for ; r < n && !os.IsPathSeparator(path[r]); r++ { + pathbuf[w] = path[r] + w++ + } + } + } + // A drive's root directory needs a trailing \ + if w == len(`\\?\c:`) { + pathbuf[w] = '\\' + w++ + } + return string(pathbuf[:w]) +} + +func isAbs(path string) (b bool) { + v := volumeName(path) + if v == "" { + return false + } + path = path[len(v):] + if path == "" { + return false + } + return os.IsPathSeparator(path[0]) +} + +func volumeName(path string) (v string) { + if len(path) < 2 { + return "" + } + // with drive letter + c := path[0] + if path[1] == ':' && + ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || + 'A' <= c && c <= 'Z') { + return path[:2] + } + // is it UNC + if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) && + !os.IsPathSeparator(path[2]) && path[2] != '.' { + // first, leading `\\` and next shouldn't be `\`. its server name. + for n := 3; n < l-1; n++ { + // second, next '\' shouldn't be repeated. + if os.IsPathSeparator(path[n]) { + n++ + // third, following something characters. its share name. + if !os.IsPathSeparator(path[n]) { + if path[n] == '.' { + break + } + for ; n < l; n++ { + if os.IsPathSeparator(path[n]) { + break + } + } + return path[:n] + } + break + } + } + } + return "" +} diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go similarity index 55% rename from vendor/github.com/ghodss/yaml/LICENSE rename to vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go index 7805d36de..a3e5e56a6 100644 --- a/vendor/github.com/ghodss/yaml/LICENSE +++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go @@ -1,27 +1,8 @@ -The MIT License (MIT) +//go:build !windows -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. +/* +Copyright (c) for portions of rename.go are held by The Go Authors, 2016 and are provided under +the BSD license. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -48,3 +29,30 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } else if terr.Err != syscall.EXDEV { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + + return renameByCopy(src, dst) +} diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go new file mode 100644 index 000000000..a377720a6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go @@ -0,0 +1,69 @@ +//go:build windows + +/* +Copyright (c) for portions of rename_windows.go are held by The Go Authors, 2016 and are provided under +the BSD license. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" +) + +// renameFallback attempts to determine the appropriate fallback to failed rename +// operation depending on the resulting error. +func renameFallback(err error, src, dst string) error { + // Rename may fail if src and dst are on different devices; fall back to + // copy if we detect that case. syscall.EXDEV is the common name for the + // cross device link error which has varying output text across different + // operating systems. + terr, ok := err.(*os.LinkError) + if !ok { + return err + } + + if terr.Err != syscall.EXDEV { + // In windows it can drop down to an operating system call that + // returns an operating system error with a different number and + // message. Checking for that as a fall back. + noerr, ok := terr.Err.(syscall.Errno) + + // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. + // See https://msdn.microsoft.com/en-us/library/cc231199.aspx + if ok && noerr != 0x11 { + return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) + } + } + + return renameByCopy(src, dst) +} diff --git a/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go b/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go new file mode 100644 index 000000000..ae62d0e6f --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go @@ -0,0 +1,178 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "sort" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +// deploymentutil contains a copy of a few functions from Kubernetes controller code to avoid a dependency on k8s.io/kubernetes. +// This code is copied from https://github.com/kubernetes/kubernetes/blob/e856613dd5bb00bcfaca6974431151b5c06cbed5/pkg/controller/deployment/util/deployment_util.go +// No changes to the code were made other than removing some unused functions + +// RsListFunc returns the ReplicaSet from the ReplicaSet namespace and the List metav1.ListOptions. +type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error) + +// ListReplicaSets returns a slice of RSes the given deployment targets. +// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), +// because only the controller itself should do that. +// However, it does filter out anything whose ControllerRef doesn't match. +func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) { + // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector + // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. + namespace := deployment.Namespace + selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return nil, err + } + options := metav1.ListOptions{LabelSelector: selector.String()} + all, err := getRSList(namespace, options) + if err != nil { + return nil, err + } + // Only include those whose ControllerRef matches the Deployment. + owned := make([]*apps.ReplicaSet, 0, len(all)) + for _, rs := range all { + if metav1.IsControlledBy(rs, deployment) { + owned = append(owned, rs) + } + } + return owned, nil +} + +// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. +type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet + +func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) } +func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). +func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet { + sort.Sort(ReplicaSetsByCreationTimestamp(rsList)) + for i := range rsList { + if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { + // In rare cases, such as after cluster upgrades, Deployment may end up with + // having more than one new ReplicaSets that have the same template as its template, + // see https://github.com/kubernetes/kubernetes/issues/40415 + // We deterministically choose the oldest new ReplicaSet. + return rsList[i] + } + } + // new ReplicaSet does not exist. + return nil +} + +// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because: +// 1. The hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. +// Returns nil if the new replica set doesn't exist yet. +func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) { + rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) + if err != nil { + return nil, err + } + return FindNewReplicaSet(deployment, rsList), nil +} + +// RsListFromClient returns an rsListFunc that wraps the given client. +func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc { + return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) { + rsList, err := c.ReplicaSets(namespace).List(context.Background(), options) + if err != nil { + return nil, err + } + var ret []*apps.ReplicaSet + for i := range rsList.Items { + ret = append(ret, &rsList.Items[i]) + } + return ret, err + } +} + +// IsRollingUpdate returns true if the strategy type is a rolling update. +func IsRollingUpdate(deployment *apps.Deployment) bool { + return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType +} + +// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take. +func MaxUnavailable(deployment apps.Deployment) int32 { + if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { + return int32(0) + } + // Error caught by validation + _, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) + if maxUnavailable > *deployment.Spec.Replicas { + return *deployment.Spec.Replicas + } + return maxUnavailable +} + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} diff --git a/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go b/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go new file mode 100644 index 000000000..8b9d4329f --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go @@ -0,0 +1,58 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "os" + + "github.com/pkg/errors" +) + +// Options represents configurable options used to create client and server TLS configurations. +type Options struct { + CaCertFile string + // If either the KeyFile or CertFile is empty, ClientConfig() will not load them. + KeyFile string + CertFile string + // Client-only options + InsecureSkipVerify bool +} + +// ClientConfig returns a TLS configuration for use by a Helm client. +func ClientConfig(opts Options) (cfg *tls.Config, err error) { + var cert *tls.Certificate + var pool *x509.CertPool + + if opts.CertFile != "" || opts.KeyFile != "" { + if cert, err = CertFromFilePair(opts.CertFile, opts.KeyFile); err != nil { + if os.IsNotExist(err) { + return nil, errors.Wrapf(err, "could not load x509 key pair (cert: %q, key: %q)", opts.CertFile, opts.KeyFile) + } + return nil, errors.Wrapf(err, "could not read x509 key pair (cert: %q, key: %q)", opts.CertFile, opts.KeyFile) + } + } + if !opts.InsecureSkipVerify && opts.CaCertFile != "" { + if pool, err = CertPoolFromFile(opts.CaCertFile); err != nil { + return nil, err + } + } + + cfg = &tls.Config{InsecureSkipVerify: opts.InsecureSkipVerify, Certificates: []tls.Certificate{*cert}, RootCAs: pool} + return cfg, nil +} diff --git a/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go b/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go new file mode 100644 index 000000000..dc832ed80 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "os" + + "github.com/pkg/errors" +) + +// NewClientTLS returns tls.Config appropriate for client auth. +func NewClientTLS(certFile, keyFile, caFile string, insecureSkipTLSverify bool) (*tls.Config, error) { + config := tls.Config{ + InsecureSkipVerify: insecureSkipTLSverify, + } + + if certFile != "" && keyFile != "" { + cert, err := CertFromFilePair(certFile, keyFile) + if err != nil { + return nil, err + } + config.Certificates = []tls.Certificate{*cert} + } + + if caFile != "" { + cp, err := CertPoolFromFile(caFile) + if err != nil { + return nil, err + } + config.RootCAs = cp + } + + return &config, nil +} + +// CertPoolFromFile returns an x509.CertPool containing the certificates +// in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not +// be parsed, or if the file does not contain any certificates +func CertPoolFromFile(filename string) (*x509.CertPool, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, errors.Errorf("can't read CA file: %v", filename) + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, errors.Errorf("failed to append certificates from file: %s", filename) + } + return cp, nil +} + +// CertFromFilePair returns an tls.Certificate containing the +// certificates public/private key pair from a pair of given PEM-encoded files. +// Returns an error if the file could not be read, a certificate could not +// be parsed, or if the file does not contain any certificates +func CertFromFilePair(certFile, keyFile string) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, errors.Wrapf(err, "can't load key pair from cert %s and key %s", certFile, keyFile) + } + return &cert, err +} diff --git a/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go b/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go new file mode 100644 index 000000000..a8cf7398c --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go @@ -0,0 +1,73 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package urlutil + +import ( + "net/url" + "path" + "path/filepath" +) + +// URLJoin joins a base URL to one or more path components. +// +// It's like filepath.Join for URLs. If the baseURL is pathish, this will still +// perform a join. +// +// If the URL is unparsable, this returns an error. +func URLJoin(baseURL string, paths ...string) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + // We want path instead of filepath because path always uses /. + all := []string{u.Path} + all = append(all, paths...) + u.Path = path.Join(all...) + return u.String(), nil +} + +// Equal normalizes two URLs and then compares for equality. +func Equal(a, b string) bool { + au, err := url.Parse(a) + if err != nil { + a = filepath.Clean(a) + b = filepath.Clean(b) + // If urls are paths, return true only if they are an exact match + return a == b + } + bu, err := url.Parse(b) + if err != nil { + return false + } + + for _, u := range []*url.URL{au, bu} { + if u.Path == "" { + u.Path = "/" + } + u.Path = filepath.Clean(u.Path) + } + return au.String() == bu.String() +} + +// ExtractHostname returns hostname from URL +func ExtractHostname(addr string) (string, error) { + u, err := url.Parse(addr) + if err != nil { + return "", err + } + return u.Hostname(), nil +} diff --git a/vendor/helm.sh/helm/v3/internal/version/version.go b/vendor/helm.sh/helm/v3/internal/version/version.go new file mode 100644 index 000000000..414957bc9 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/version/version.go @@ -0,0 +1,81 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version // import "helm.sh/helm/v3/internal/version" + +import ( + "flag" + "runtime" + "strings" +) + +var ( + // version is the current version of Helm. + // Update this whenever making a new release. + // The version is of the format Major.Minor.Patch[-Prerelease][+BuildMetadata] + // + // Increment major number for new feature additions and behavioral changes. + // Increment minor number for bug fixes and performance enhancements. + version = "v3.14" + + // metadata is extra build time data + metadata = "" + // gitCommit is the git sha1 + gitCommit = "" + // gitTreeState is the state of the git tree + gitTreeState = "" +) + +// BuildInfo describes the compile time information. +type BuildInfo struct { + // Version is the current semver. + Version string `json:"version,omitempty"` + // GitCommit is the git sha1. + GitCommit string `json:"git_commit,omitempty"` + // GitTreeState is the state of the git tree. + GitTreeState string `json:"git_tree_state,omitempty"` + // GoVersion is the version of the Go compiler used. + GoVersion string `json:"go_version,omitempty"` +} + +// GetVersion returns the semver string of the version +func GetVersion() string { + if metadata == "" { + return version + } + return version + "+" + metadata +} + +// GetUserAgent returns a user agent for user with an HTTP client +func GetUserAgent() string { + return "Helm/" + strings.TrimPrefix(GetVersion(), "v") +} + +// Get returns build info +func Get() BuildInfo { + v := BuildInfo{ + Version: GetVersion(), + GitCommit: gitCommit, + GitTreeState: gitTreeState, + GoVersion: runtime.Version(), + } + + // HACK(bacongobbler): strip out GoVersion during a test run for consistent test output + if flag.Lookup("test.v") != nil { + v.GoVersion = "" + } + return v +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/action.go b/vendor/helm.sh/helm/v3/pkg/action/action.go new file mode 100644 index 000000000..5693f4838 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/action.go @@ -0,0 +1,424 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/engine" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" + "helm.sh/helm/v3/pkg/time" +) + +// Timestamper is a function capable of producing a timestamp.Timestamper. +// +// By default, this is a time.Time function from the Helm time package. This can +// be overridden for testing though, so that timestamps are predictable. +var Timestamper = time.Now + +var ( + // errMissingChart indicates that a chart was not provided. + errMissingChart = errors.New("no chart provided") + // errMissingRelease indicates that a release (name) was not provided. + errMissingRelease = errors.New("no release provided") + // errInvalidRevision indicates that an invalid release revision number was provided. + errInvalidRevision = errors.New("invalid release revision") + // errPending indicates that another instance of Helm is already applying an operation on a release. + errPending = errors.New("another operation (install/upgrade/rollback) is in progress") +) + +// ValidName is a regular expression for resource names. +// +// DEPRECATED: This will be removed in Helm 4, and is no longer used here. See +// pkg/lint/rules.validateMetadataNameFunc for the replacement. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +var ValidName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) + +// Configuration injects the dependencies that all actions share. +type Configuration struct { + // RESTClientGetter is an interface that loads Kubernetes clients. + RESTClientGetter RESTClientGetter + + // Releases stores records of releases. + Releases *storage.Storage + + // KubeClient is a Kubernetes API client. + KubeClient kube.Interface + + // RegistryClient is a client for working with registries + RegistryClient *registry.Client + + // Capabilities describes the capabilities of the Kubernetes cluster. + Capabilities *chartutil.Capabilities + + Log func(string, ...interface{}) +} + +// renderResources renders the templates in a chart +// +// TODO: This function is badly in need of a refactor. +// TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed +// +// This code has to do with writing files to disk. +func (cfg *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrender.PostRenderer, interactWithRemote, enableDNS bool) ([]*release.Hook, *bytes.Buffer, string, error) { + hs := []*release.Hook{} + b := bytes.NewBuffer(nil) + + caps, err := cfg.getCapabilities() + if err != nil { + return hs, b, "", err + } + + if ch.Metadata.KubeVersion != "" { + if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) { + return hs, b, "", errors.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String()) + } + } + + var files map[string]string + var err2 error + + // A `helm template` should not talk to the remote cluster. However, commands with the flag + //`--dry-run` with the value of `false`, `none`, or `server` should try to interact with the cluster. + // It may break in interesting and exotic ways because other data (e.g. discovery) is mocked. + if interactWithRemote && cfg.RESTClientGetter != nil { + restConfig, err := cfg.RESTClientGetter.ToRESTConfig() + if err != nil { + return hs, b, "", err + } + e := engine.New(restConfig) + e.EnableDNS = enableDNS + files, err2 = e.Render(ch, values) + } else { + var e engine.Engine + e.EnableDNS = enableDNS + files, err2 = e.Render(ch, values) + } + + if err2 != nil { + return hs, b, "", err2 + } + + // NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource, + // pull it out of here into a separate file so that we can actually use the output of the rendered + // text file. We have to spin through this map because the file contains path information, so we + // look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip + // it in the sortHooks. + var notesBuffer bytes.Buffer + for k, v := range files { + if strings.HasSuffix(k, notesFileSuffix) { + if subNotes || (k == path.Join(ch.Name(), "templates", notesFileSuffix)) { + // If buffer contains data, add newline before adding more + if notesBuffer.Len() > 0 { + notesBuffer.WriteString("\n") + } + notesBuffer.WriteString(v) + } + delete(files, k) + } + } + notes := notesBuffer.String() + + // Sort hooks, manifests, and partials. Only hooks and manifests are returned, + // as partials are not used after renderer.Render. Empty manifests are also + // removed here. + hs, manifests, err := releaseutil.SortManifests(files, caps.APIVersions, releaseutil.InstallOrder) + if err != nil { + // By catching parse errors here, we can prevent bogus releases from going + // to Kubernetes. + // + // We return the files as a big blob of data to help the user debug parser + // errors. + for name, content := range files { + if strings.TrimSpace(content) == "" { + continue + } + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", name, content) + } + return hs, b, "", err + } + + // Aggregate all valid manifests into one big doc. + fileWritten := make(map[string]bool) + + if includeCrds { + for _, crd := range ch.CRDObjects() { + if outputDir == "" { + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", crd.Filename, string(crd.File.Data[:])) + } else { + err = writeToFile(outputDir, crd.Filename, string(crd.File.Data[:]), fileWritten[crd.Filename]) + if err != nil { + return hs, b, "", err + } + fileWritten[crd.Filename] = true + } + } + } + + for _, m := range manifests { + if outputDir == "" { + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", m.Name, m.Content) + } else { + newDir := outputDir + if useReleaseName { + newDir = filepath.Join(outputDir, releaseName) + } + // NOTE: We do not have to worry about the post-renderer because + // output dir is only used by `helm template`. In the next major + // release, we should move this logic to template only as it is not + // used by install or upgrade + err = writeToFile(newDir, m.Name, m.Content, fileWritten[m.Name]) + if err != nil { + return hs, b, "", err + } + fileWritten[m.Name] = true + } + } + + if pr != nil { + b, err = pr.Run(b) + if err != nil { + return hs, b, notes, errors.Wrap(err, "error while running post render on files") + } + } + + return hs, b, notes, nil +} + +// RESTClientGetter gets the rest client +type RESTClientGetter interface { + ToRESTConfig() (*rest.Config, error) + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + ToRESTMapper() (meta.RESTMapper, error) +} + +// DebugLog sets the logger that writes debug strings +type DebugLog func(format string, v ...interface{}) + +// capabilities builds a Capabilities from discovery information. +func (cfg *Configuration) getCapabilities() (*chartutil.Capabilities, error) { + if cfg.Capabilities != nil { + return cfg.Capabilities, nil + } + dc, err := cfg.RESTClientGetter.ToDiscoveryClient() + if err != nil { + return nil, errors.Wrap(err, "could not get Kubernetes discovery client") + } + // force a discovery cache invalidation to always fetch the latest server version/capabilities. + dc.Invalidate() + kubeVersion, err := dc.ServerVersion() + if err != nil { + return nil, errors.Wrap(err, "could not get server version from Kubernetes") + } + // Issue #6361: + // Client-Go emits an error when an API service is registered but unimplemented. + // We trap that error here and print a warning. But since the discovery client continues + // building the API object, it is correctly populated with all valid APIs. + // See https://github.com/kubernetes/kubernetes/issues/72051#issuecomment-521157642 + apiVersions, err := GetVersionSet(dc) + if err != nil { + if discovery.IsGroupDiscoveryFailedError(err) { + cfg.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err) + cfg.Log("WARNING: To fix this, kubectl delete apiservice ") + } else { + return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") + } + } + + cfg.Capabilities = &chartutil.Capabilities{ + APIVersions: apiVersions, + KubeVersion: chartutil.KubeVersion{ + Version: kubeVersion.GitVersion, + Major: kubeVersion.Major, + Minor: kubeVersion.Minor, + }, + HelmVersion: chartutil.DefaultCapabilities.HelmVersion, + } + return cfg.Capabilities, nil +} + +// KubernetesClientSet creates a new kubernetes ClientSet based on the configuration +func (cfg *Configuration) KubernetesClientSet() (kubernetes.Interface, error) { + conf, err := cfg.RESTClientGetter.ToRESTConfig() + if err != nil { + return nil, errors.Wrap(err, "unable to generate config for kubernetes client") + } + + return kubernetes.NewForConfig(conf) +} + +// Now generates a timestamp +// +// If the configuration has a Timestamper on it, that will be used. +// Otherwise, this will use time.Now(). +func (cfg *Configuration) Now() time.Time { + return Timestamper() +} + +func (cfg *Configuration) releaseContent(name string, version int) (*release.Release, error) { + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("releaseContent: Release name is invalid: %s", name) + } + + if version <= 0 { + return cfg.Releases.Last(name) + } + + return cfg.Releases.Get(name, version) +} + +// GetVersionSet retrieves a set of available k8s API versions +func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.VersionSet, error) { + groups, resources, err := client.ServerGroupsAndResources() + if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { + return chartutil.DefaultVersionSet, errors.Wrap(err, "could not get apiVersions from Kubernetes") + } + + // FIXME: The Kubernetes test fixture for cli appears to always return nil + // for calls to Discovery().ServerGroupsAndResources(). So in this case, we + // return the default API list. This is also a safe value to return in any + // other odd-ball case. + if len(groups) == 0 && len(resources) == 0 { + return chartutil.DefaultVersionSet, nil + } + + versionMap := make(map[string]interface{}) + versions := []string{} + + // Extract the groups + for _, g := range groups { + for _, gv := range g.Versions { + versionMap[gv.GroupVersion] = struct{}{} + } + } + + // Extract the resources + var id string + var ok bool + for _, r := range resources { + for _, rl := range r.APIResources { + + // A Kind at a GroupVersion can show up more than once. We only want + // it displayed once in the final output. + id = path.Join(r.GroupVersion, rl.Kind) + if _, ok = versionMap[id]; !ok { + versionMap[id] = struct{}{} + } + } + } + + // Convert to a form that NewVersionSet can use + for k := range versionMap { + versions = append(versions, k) + } + + return chartutil.VersionSet(versions), nil +} + +// recordRelease with an update operation in case reuse has been set. +func (cfg *Configuration) recordRelease(r *release.Release) { + if err := cfg.Releases.Update(r); err != nil { + cfg.Log("warning: Failed to update release %s: %s", r.Name, err) + } +} + +// Init initializes the action configuration +func (cfg *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { + kc := kube.New(getter) + kc.Log = log + + lazyClient := &lazyClient{ + namespace: namespace, + clientFn: kc.Factory.KubernetesClientSet, + } + + var store *storage.Storage + switch helmDriver { + case "secret", "secrets", "": + d := driver.NewSecrets(newSecretClient(lazyClient)) + d.Log = log + store = storage.Init(d) + case "configmap", "configmaps": + d := driver.NewConfigMaps(newConfigMapClient(lazyClient)) + d.Log = log + store = storage.Init(d) + case "memory": + var d *driver.Memory + if cfg.Releases != nil { + if mem, ok := cfg.Releases.Driver.(*driver.Memory); ok { + // This function can be called more than once (e.g., helm list --all-namespaces). + // If a memory driver was already initialized, re-use it but set the possibly new namespace. + // We re-use it in case some releases where already created in the existing memory driver. + d = mem + } + } + if d == nil { + d = driver.NewMemory() + } + d.SetNamespace(namespace) + store = storage.Init(d) + case "sql": + d, err := driver.NewSQL( + os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"), + log, + namespace, + ) + if err != nil { + panic(fmt.Sprintf("Unable to instantiate SQL driver: %v", err)) + } + store = storage.Init(d) + default: + // Not sure what to do here. + panic("Unknown driver in HELM_DRIVER: " + helmDriver) + } + + cfg.RESTClientGetter = getter + cfg.KubeClient = kc + cfg.Releases = store + cfg.Log = log + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/dependency.go b/vendor/helm.sh/helm/v3/pkg/action/dependency.go new file mode 100644 index 000000000..3265f1f17 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/dependency.go @@ -0,0 +1,230 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Masterminds/semver/v3" + "github.com/gosuri/uitable" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// Dependency is the action for building a given chart's dependency tree. +// +// It provides the implementation of 'helm dependency' and its respective subcommands. +type Dependency struct { + Verify bool + Keyring string + SkipRefresh bool + ColumnWidth uint +} + +// NewDependency creates a new Dependency object with the given configuration. +func NewDependency() *Dependency { + return &Dependency{ + ColumnWidth: 80, + } +} + +// List executes 'helm dependency list'. +func (d *Dependency) List(chartpath string, out io.Writer) error { + c, err := loader.Load(chartpath) + if err != nil { + return err + } + + if c.Metadata.Dependencies == nil { + fmt.Fprintf(out, "WARNING: no dependencies at %s\n", filepath.Join(chartpath, "charts")) + return nil + } + + d.printDependencies(chartpath, out, c) + fmt.Fprintln(out) + d.printMissing(chartpath, out, c.Metadata.Dependencies) + return nil +} + +// dependencyStatus returns a string describing the status of a dependency viz a viz the parent chart. +func (d *Dependency) dependencyStatus(chartpath string, dep *chart.Dependency, parent *chart.Chart) string { + filename := fmt.Sprintf("%s-%s.tgz", dep.Name, "*") + + // If a chart is unpacked, this will check the unpacked chart's `charts/` directory for tarballs. + // Technically, this is COMPLETELY unnecessary, and should be removed in Helm 4. It is here + // to preserved backward compatibility. In Helm 2/3, there is a "difference" between + // the tgz version (which outputs "ok" if it unpacks) and the loaded version (which outputs + // "unpacked"). Early in Helm 2's history, this would have made a difference. But it no + // longer does. However, since this code shipped with Helm 3, the output must remain stable + // until Helm 4. + switch archives, err := filepath.Glob(filepath.Join(chartpath, "charts", filename)); { + case err != nil: + return "bad pattern" + case len(archives) > 1: + // See if the second part is a SemVer + found := []string{} + for _, arc := range archives { + // we need to trip the prefix dirs and the extension off. + filename = strings.TrimSuffix(filepath.Base(arc), ".tgz") + maybeVersion := strings.TrimPrefix(filename, fmt.Sprintf("%s-", dep.Name)) + + if _, err := semver.StrictNewVersion(maybeVersion); err == nil { + // If the version parsed without an error, it is possibly a valid + // version. + found = append(found, arc) + } + } + + if l := len(found); l == 1 { + // If we get here, we do the same thing as in len(archives) == 1. + if r := statArchiveForStatus(found[0], dep); r != "" { + return r + } + + // Fall through and look for directories + } else if l > 1 { + return "too many matches" + } + + // The sanest thing to do here is to fall through and see if we have any directory + // matches. + + case len(archives) == 1: + archive := archives[0] + if r := statArchiveForStatus(archive, dep); r != "" { + return r + } + + } + // End unnecessary code. + + var depChart *chart.Chart + for _, item := range parent.Dependencies() { + if item.Name() == dep.Name { + depChart = item + } + } + + if depChart == nil { + return "missing" + } + + if depChart.Metadata.Version != dep.Version { + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return "invalid version" + } + + v, err := semver.NewVersion(depChart.Metadata.Version) + if err != nil { + return "invalid version" + } + + if !constraint.Check(v) { + return "wrong version" + } + } + + return "unpacked" +} + +// stat an archive and return a message if the stat is successful +// +// This is a refactor of the code originally in dependencyStatus. It is here to +// support legacy behavior, and should be removed in Helm 4. +func statArchiveForStatus(archive string, dep *chart.Dependency) string { + if _, err := os.Stat(archive); err == nil { + c, err := loader.Load(archive) + if err != nil { + return "corrupt" + } + if c.Name() != dep.Name { + return "misnamed" + } + + if c.Metadata.Version != dep.Version { + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return "invalid version" + } + + v, err := semver.NewVersion(c.Metadata.Version) + if err != nil { + return "invalid version" + } + + if !constraint.Check(v) { + return "wrong version" + } + } + return "ok" + } + return "" +} + +// printDependencies prints all of the dependencies in the yaml file. +func (d *Dependency) printDependencies(chartpath string, out io.Writer, c *chart.Chart) { + table := uitable.New() + table.MaxColWidth = d.ColumnWidth + table.AddRow("NAME", "VERSION", "REPOSITORY", "STATUS") + for _, row := range c.Metadata.Dependencies { + table.AddRow(row.Name, row.Version, row.Repository, d.dependencyStatus(chartpath, row, c)) + } + fmt.Fprintln(out, table) +} + +// printMissing prints warnings about charts that are present on disk, but are +// not in Chart.yaml. +func (d *Dependency) printMissing(chartpath string, out io.Writer, reqs []*chart.Dependency) { + folder := filepath.Join(chartpath, "charts/*") + files, err := filepath.Glob(folder) + if err != nil { + fmt.Fprintln(out, err) + return + } + + for _, f := range files { + fi, err := os.Stat(f) + if err != nil { + fmt.Fprintf(out, "Warning: %s\n", err) + } + // Skip anything that is not a directory and not a tgz file. + if !fi.IsDir() && filepath.Ext(f) != ".tgz" { + continue + } + c, err := loader.Load(f) + if err != nil { + fmt.Fprintf(out, "WARNING: %q is not a chart.\n", f) + continue + } + found := false + for _, d := range reqs { + if d.Name == c.Name() { + found = true + break + } + } + if !found { + fmt.Fprintf(out, "WARNING: %q is not in Chart.yaml.\n", f) + } + } +} diff --git a/vendor/k8s.io/helm/pkg/manifest/doc.go b/vendor/helm.sh/helm/v3/pkg/action/doc.go similarity index 64% rename from vendor/k8s.io/helm/pkg/manifest/doc.go rename to vendor/helm.sh/helm/v3/pkg/action/doc.go index c2f127cda..3c91bd618 100644 --- a/vendor/k8s.io/helm/pkg/manifest/doc.go +++ b/vendor/helm.sh/helm/v3/pkg/action/doc.go @@ -14,10 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -/*Package manifest contains tools for working with kubernetes manifests. - -Much like other parts of helm, it does not generally require that the manifests -be correct yaml, so these functions can be run on broken manifests to aid in -user debugging -*/ -package manifest // import "k8s.io/helm/pkg/manifest" +// Package action contains the logic for each action that Helm can perform. +// +// This is a library for calling top-level Helm actions like 'install', +// 'upgrade', or 'list'. Actions approximately match the command line +// invocations that the Helm client uses. +package action diff --git a/vendor/helm.sh/helm/v3/pkg/action/get.go b/vendor/helm.sh/helm/v3/pkg/action/get.go new file mode 100644 index 000000000..f44b53307 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/get.go @@ -0,0 +1,47 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "helm.sh/helm/v3/pkg/release" +) + +// Get is the action for checking a given release's information. +// +// It provides the implementation of 'helm get' and its respective subcommands (except `helm get values`). +type Get struct { + cfg *Configuration + + // Initializing Version to 0 will get the latest revision of the release. + Version int +} + +// NewGet creates a new Get object with the given configuration. +func NewGet(cfg *Configuration) *Get { + return &Get{ + cfg: cfg, + } +} + +// Run executes 'helm get' against the given release. +func (g *Get) Run(name string) (*release.Release, error) { + if err := g.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + return g.cfg.releaseContent(name, g.Version) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go b/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go new file mode 100644 index 000000000..ec096ae16 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/get_metadata.go @@ -0,0 +1,69 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import "time" + +// GetMetadata is the action for checking a given release's metadata. +// +// It provides the implementation of 'helm get metadata'. +type GetMetadata struct { + cfg *Configuration + + Version int +} + +type Metadata struct { + Name string `json:"name" yaml:"name"` + Chart string `json:"chart" yaml:"chart"` + Version string `json:"version" yaml:"version"` + AppVersion string `json:"appVersion" yaml:"appVersion"` + Namespace string `json:"namespace" yaml:"namespace"` + Revision int `json:"revision" yaml:"revision"` + Status string `json:"status" yaml:"status"` + DeployedAt string `json:"deployedAt" yaml:"deployedAt"` +} + +// NewGetMetadata creates a new GetMetadata object with the given configuration. +func NewGetMetadata(cfg *Configuration) *GetMetadata { + return &GetMetadata{ + cfg: cfg, + } +} + +// Run executes 'helm get metadata' against the given release. +func (g *GetMetadata) Run(name string) (*Metadata, error) { + if err := g.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + rel, err := g.cfg.releaseContent(name, g.Version) + if err != nil { + return nil, err + } + + return &Metadata{ + Name: rel.Name, + Chart: rel.Chart.Metadata.Name, + Version: rel.Chart.Metadata.Version, + AppVersion: rel.Chart.Metadata.AppVersion, + Namespace: rel.Namespace, + Revision: rel.Version, + Status: rel.Info.Status.String(), + DeployedAt: rel.Info.LastDeployed.Format(time.RFC3339), + }, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/get_values.go b/vendor/helm.sh/helm/v3/pkg/action/get_values.go new file mode 100644 index 000000000..9c32db213 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/get_values.go @@ -0,0 +1,60 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "helm.sh/helm/v3/pkg/chartutil" +) + +// GetValues is the action for checking a given release's values. +// +// It provides the implementation of 'helm get values'. +type GetValues struct { + cfg *Configuration + + Version int + AllValues bool +} + +// NewGetValues creates a new GetValues object with the given configuration. +func NewGetValues(cfg *Configuration) *GetValues { + return &GetValues{ + cfg: cfg, + } +} + +// Run executes 'helm get values' against the given release. +func (g *GetValues) Run(name string) (map[string]interface{}, error) { + if err := g.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + rel, err := g.cfg.releaseContent(name, g.Version) + if err != nil { + return nil, err + } + + // If the user wants all values, compute the values and return. + if g.AllValues { + cfg, err := chartutil.CoalesceValues(rel.Chart, rel.Config) + if err != nil { + return nil, err + } + return cfg, nil + } + return rel.Config, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/history.go b/vendor/helm.sh/helm/v3/pkg/action/history.go new file mode 100644 index 000000000..0430aaf7a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/history.go @@ -0,0 +1,58 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +// History is the action for checking the release's ledger. +// +// It provides the implementation of 'helm history'. +// It returns all the revisions for a specific release. +// To list up to one revision of every release in one specific, or in all, +// namespaces, see the List action. +type History struct { + cfg *Configuration + + Max int + Version int +} + +// NewHistory creates a new History object with the given configuration. +func NewHistory(cfg *Configuration) *History { + return &History{ + cfg: cfg, + } +} + +// Run executes 'helm history' against the given release. +func (h *History) Run(name string) ([]*release.Release, error) { + if err := h.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("release name is invalid: %s", name) + } + + h.cfg.Log("getting history for release %s", name) + return h.cfg.Releases.History(name) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/hooks.go b/vendor/helm.sh/helm/v3/pkg/action/hooks.go new file mode 100644 index 000000000..0af625dff --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/hooks.go @@ -0,0 +1,159 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "sort" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/release" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// execHook executes all of the hooks for the given hook event. +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error { + executingHooks := []*release.Hook{} + + for _, h := range rl.Hooks { + for _, e := range h.Events { + if e == hook { + executingHooks = append(executingHooks, h) + } + } + } + + // hooke are pre-ordered by kind, so keep order stable + sort.Stable(hookByWeight(executingHooks)) + + for _, h := range executingHooks { + // Set default delete policy to before-hook-creation + if h.DeletePolicies == nil || len(h.DeletePolicies) == 0 { + // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion + // resources. For all other resource types update in place if a + // resource with the same name already exists and is owned by the + // current release. + h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} + } + + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { + return err + } + + resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) + if err != nil { + return errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path) + } + + // Record the time at which the hook was applied to the cluster + h.LastRun = release.HookExecution{ + StartedAt: helmtime.Now(), + Phase: release.HookPhaseRunning, + } + cfg.recordRelease(rl) + + // As long as the implementation of WatchUntilReady does not panic, HookPhaseFailed or HookPhaseSucceeded + // should always be set by this function. If we fail to do that for any reason, then HookPhaseUnknown is + // the most appropriate value to surface. + h.LastRun.Phase = release.HookPhaseUnknown + + // Create hook resources + if _, err := cfg.KubeClient.Create(resources); err != nil { + h.LastRun.CompletedAt = helmtime.Now() + h.LastRun.Phase = release.HookPhaseFailed + return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) + } + + // Watch hook resources until they have completed + err = cfg.KubeClient.WatchUntilReady(resources, timeout) + // Note the time of success/failure + h.LastRun.CompletedAt = helmtime.Now() + // Mark hook as succeeded or failed + if err != nil { + h.LastRun.Phase = release.HookPhaseFailed + // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted + // under failed condition. If so, then clear the corresponding resource object in the hook + if err := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); err != nil { + return err + } + return err + } + h.LastRun.Phase = release.HookPhaseSucceeded + } + + // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted + // under succeeded condition. If so, then clear the corresponding resource object in each hook + for _, h := range executingHooks { + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { + return err + } + } + + return nil +} + +// hookByWeight is a sorter for hooks +type hookByWeight []*release.Hook + +func (x hookByWeight) Len() int { return len(x) } +func (x hookByWeight) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x hookByWeight) Less(i, j int) bool { + if x[i].Weight == x[j].Weight { + return x[i].Name < x[j].Name + } + return x[i].Weight < x[j].Weight +} + +// deleteHookByPolicy deletes a hook if the hook policy instructs it to +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { + // Never delete CustomResourceDefinitions; this could cause lots of + // cascading garbage collection. + if h.Kind == "CustomResourceDefinition" { + return nil + } + if hookHasDeletePolicy(h, policy) { + resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), false) + if err != nil { + return errors.Wrapf(err, "unable to build kubernetes object for deleting hook %s", h.Path) + } + _, errs := cfg.KubeClient.Delete(resources) + if len(errs) > 0 { + return errors.New(joinErrors(errs)) + } + + //wait for resources until they are deleted to avoid conflicts + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { + if err := kubeClient.WaitForDelete(resources, timeout); err != nil { + return err + } + } + } + return nil +} + +// hookHasDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices +// supported by helm. If so, mark the hook as one should be deleted. +func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool { + for _, v := range h.DeletePolicies { + if policy == v { + return true + } + } + return false +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/install.go b/vendor/helm.sh/helm/v3/pkg/action/install.go new file mode 100644 index 000000000..e3538a4f5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/install.go @@ -0,0 +1,815 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "context" + "fmt" + "io" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "sync" + "text/template" + "time" + + "github.com/Masterminds/sprig/v3" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/resource" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/downloader" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/kube" + kubefake "helm.sh/helm/v3/pkg/kube/fake" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/repo" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine +// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually +// wants to see this file after rendering in the status command. However, it must be a suffix +// since there can be filepath in front of it. +const notesFileSuffix = "NOTES.txt" + +const defaultDirectoryPermission = 0755 + +// Install performs an installation operation. +type Install struct { + cfg *Configuration + + ChartPathOptions + + ClientOnly bool + Force bool + CreateNamespace bool + DryRun bool + DryRunOption string + DisableHooks bool + Replace bool + Wait bool + WaitForJobs bool + Devel bool + DependencyUpdate bool + Timeout time.Duration + Namespace string + ReleaseName string + GenerateName bool + NameTemplate string + Description string + OutputDir string + Atomic bool + SkipCRDs bool + SubNotes bool + DisableOpenAPIValidation bool + IncludeCRDs bool + Labels map[string]string + // KubeVersion allows specifying a custom kubernetes version to use and + // APIVersions allows a manual set of supported API Versions to be passed + // (for things like templating). These are ignored if ClientOnly is false + KubeVersion *chartutil.KubeVersion + APIVersions chartutil.VersionSet + // Used by helm template to render charts with .Release.IsUpgrade. Ignored if Dry-Run is false + IsUpgrade bool + // Enable DNS lookups when rendering templates + EnableDNS bool + // Used by helm template to add the release as part of OutputDir path + // OutputDir/ + UseReleaseName bool + PostRenderer postrender.PostRenderer + // Lock to control raceconditions when the process receives a SIGTERM + Lock sync.Mutex +} + +// ChartPathOptions captures common options used for controlling chart paths +type ChartPathOptions struct { + CaFile string // --ca-file + CertFile string // --cert-file + KeyFile string // --key-file + InsecureSkipTLSverify bool // --insecure-skip-verify + PlainHTTP bool // --plain-http + Keyring string // --keyring + Password string // --password + PassCredentialsAll bool // --pass-credentials + RepoURL string // --repo + Username string // --username + Verify bool // --verify + Version string // --version + + // registryClient provides a registry client but is not added with + // options from a flag + registryClient *registry.Client +} + +// NewInstall creates a new Install object with the given configuration. +func NewInstall(cfg *Configuration) *Install { + in := &Install{ + cfg: cfg, + } + in.ChartPathOptions.registryClient = cfg.RegistryClient + + return in +} + +// SetRegistryClient sets the registry client for the install action +func (i *Install) SetRegistryClient(registryClient *registry.Client) { + i.ChartPathOptions.registryClient = registryClient +} + +// GetRegistryClient get the registry client. +func (i *Install) GetRegistryClient() *registry.Client { + return i.ChartPathOptions.registryClient +} + +func (i *Install) installCRDs(crds []chart.CRD) error { + // We do these one file at a time in the order they were read. + totalItems := []*resource.Info{} + for _, obj := range crds { + // Read in the resources + res, err := i.cfg.KubeClient.Build(bytes.NewBuffer(obj.File.Data), false) + if err != nil { + return errors.Wrapf(err, "failed to install CRD %s", obj.Name) + } + + // Send them to Kube + if _, err := i.cfg.KubeClient.Create(res); err != nil { + // If the error is CRD already exists, continue. + if apierrors.IsAlreadyExists(err) { + crdName := res[0].Name + i.cfg.Log("CRD %s is already present. Skipping.", crdName) + continue + } + return errors.Wrapf(err, "failed to install CRD %s", obj.Name) + } + totalItems = append(totalItems, res...) + } + if len(totalItems) > 0 { + // Give time for the CRD to be recognized. + if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil { + return err + } + + // If we have already gathered the capabilities, we need to invalidate + // the cache so that the new CRDs are recognized. This should only be + // the case when an action configuration is reused for multiple actions, + // as otherwise it is later loaded by ourselves when getCapabilities + // is called later on in the installation process. + if i.cfg.Capabilities != nil { + discoveryClient, err := i.cfg.RESTClientGetter.ToDiscoveryClient() + if err != nil { + return err + } + + i.cfg.Log("Clearing discovery cache") + discoveryClient.Invalidate() + + _, _ = discoveryClient.ServerGroups() + } + + // Invalidate the REST mapper, since it will not have the new CRDs + // present. + restMapper, err := i.cfg.RESTClientGetter.ToRESTMapper() + if err != nil { + return err + } + if resettable, ok := restMapper.(meta.ResettableRESTMapper); ok { + i.cfg.Log("Clearing REST mapper cache") + resettable.Reset() + } + } + return nil +} + +// Run executes the installation +// +// If DryRun is set to true, this will prepare the release, but not install it + +func (i *Install) Run(chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + ctx := context.Background() + return i.RunWithContext(ctx, chrt, vals) +} + +// Run executes the installation with Context +// +// When the task is cancelled through ctx, the function returns and the install +// proceeds in the background. +func (i *Install) RunWithContext(ctx context.Context, chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + // Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`) + if !i.ClientOnly { + if err := i.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + } + + if err := i.availableName(); err != nil { + return nil, err + } + + if err := chartutil.ProcessDependenciesWithMerge(chrt, vals); err != nil { + return nil, err + } + + var interactWithRemote bool + if !i.isDryRun() || i.DryRunOption == "server" || i.DryRunOption == "none" || i.DryRunOption == "false" { + interactWithRemote = true + } + + // Pre-install anything in the crd/ directory. We do this before Helm + // contacts the upstream server and builds the capabilities object. + if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 { + // On dry run, bail here + if i.isDryRun() { + i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") + } else if err := i.installCRDs(crds); err != nil { + return nil, err + } + } + + if i.ClientOnly { + // Add mock objects in here so it doesn't use Kube API server + // NOTE(bacongobbler): used for `helm template` + i.cfg.Capabilities = chartutil.DefaultCapabilities.Copy() + if i.KubeVersion != nil { + i.cfg.Capabilities.KubeVersion = *i.KubeVersion + } + i.cfg.Capabilities.APIVersions = append(i.cfg.Capabilities.APIVersions, i.APIVersions...) + i.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard} + + mem := driver.NewMemory() + mem.SetNamespace(i.Namespace) + i.cfg.Releases = storage.Init(mem) + } else if !i.ClientOnly && len(i.APIVersions) > 0 { + i.cfg.Log("API Version list given outside of client only mode, this list will be ignored") + } + + // Make sure if Atomic is set, that wait is set as well. This makes it so + // the user doesn't have to specify both + i.Wait = i.Wait || i.Atomic + + caps, err := i.cfg.getCapabilities() + if err != nil { + return nil, err + } + + // special case for helm template --is-upgrade + isUpgrade := i.IsUpgrade && i.isDryRun() + options := chartutil.ReleaseOptions{ + Name: i.ReleaseName, + Namespace: i.Namespace, + Revision: 1, + IsInstall: !isUpgrade, + IsUpgrade: isUpgrade, + } + valuesToRender, err := chartutil.ToRenderValues(chrt, vals, options, caps) + if err != nil { + return nil, err + } + + if driver.ContainsSystemLabels(i.Labels) { + return nil, fmt.Errorf("user suplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) + } + + rel := i.createRelease(chrt, vals, i.Labels) + + var manifestDoc *bytes.Buffer + rel.Hooks, manifestDoc, rel.Info.Notes, err = i.cfg.renderResources(chrt, valuesToRender, i.ReleaseName, i.OutputDir, i.SubNotes, i.UseReleaseName, i.IncludeCRDs, i.PostRenderer, interactWithRemote, i.EnableDNS) + // Even for errors, attach this if available + if manifestDoc != nil { + rel.Manifest = manifestDoc.String() + } + // Check error from render + if err != nil { + rel.SetStatus(release.StatusFailed, fmt.Sprintf("failed to render resource: %s", err.Error())) + // Return a release with partial data so that the client can show debugging information. + return rel, err + } + + // Mark this release as in-progress + rel.SetStatus(release.StatusPendingInstall, "Initial install underway") + + var toBeAdopted kube.ResourceList + resources, err := i.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), !i.DisableOpenAPIValidation) + if err != nil { + return nil, errors.Wrap(err, "unable to build kubernetes objects from release manifest") + } + + // It is safe to use "force" here because these are resources currently rendered by the chart. + err = resources.Visit(setMetadataVisitor(rel.Name, rel.Namespace, true)) + if err != nil { + return nil, err + } + + // Install requires an extra validation step of checking that resources + // don't already exist before we actually create resources. If we continue + // forward and create the release object with resources that already exist, + // we'll end up in a state where we will delete those resources upon + // deleting the release because the manifest will be pointing at that + // resource + if !i.ClientOnly && !isUpgrade && len(resources) > 0 { + toBeAdopted, err = existingResourceConflict(resources, rel.Name, rel.Namespace) + if err != nil { + return nil, errors.Wrap(err, "Unable to continue with install") + } + } + + // Bail out here if it is a dry run + if i.isDryRun() { + rel.Info.Description = "Dry run complete" + return rel, nil + } + + if i.CreateNamespace { + ns := &v1.Namespace{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: i.Namespace, + Labels: map[string]string{ + "name": i.Namespace, + }, + }, + } + buf, err := yaml.Marshal(ns) + if err != nil { + return nil, err + } + resourceList, err := i.cfg.KubeClient.Build(bytes.NewBuffer(buf), true) + if err != nil { + return nil, err + } + if _, err := i.cfg.KubeClient.Create(resourceList); err != nil && !apierrors.IsAlreadyExists(err) { + return nil, err + } + } + + // If Replace is true, we need to supercede the last release. + if i.Replace { + if err := i.replaceRelease(rel); err != nil { + return nil, err + } + } + + // Store the release in history before continuing (new in Helm 3). We always know + // that this is a create operation. + if err := i.cfg.Releases.Create(rel); err != nil { + // We could try to recover gracefully here, but since nothing has been installed + // yet, this is probably safer than trying to continue when we know storage is + // not working. + return rel, err + } + + rel, err = i.performInstallCtx(ctx, rel, toBeAdopted, resources) + if err != nil { + rel, err = i.failRelease(rel, err) + } + return rel, err +} + +func (i *Install) performInstallCtx(ctx context.Context, rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) (*release.Release, error) { + type Msg struct { + r *release.Release + e error + } + resultChan := make(chan Msg, 1) + + go func() { + rel, err := i.performInstall(rel, toBeAdopted, resources) + resultChan <- Msg{rel, err} + }() + select { + case <-ctx.Done(): + err := ctx.Err() + return rel, err + case msg := <-resultChan: + return msg.r, msg.e + } +} + +// isDryRun returns true if Upgrade is set to run as a DryRun +func (i *Install) isDryRun() bool { + if i.DryRun || i.DryRunOption == "client" || i.DryRunOption == "server" || i.DryRunOption == "true" { + return true + } + return false +} + +func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.ResourceList, resources kube.ResourceList) (*release.Release, error) { + var err error + // pre-install hooks + if !i.DisableHooks { + if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { + return rel, fmt.Errorf("failed pre-install: %s", err) + } + } + + // At this point, we can do the install. Note that before we were detecting whether to + // do an update, but it's not clear whether we WANT to do an update if the re-use is set + // to true, since that is basically an upgrade operation. + if len(toBeAdopted) == 0 && len(resources) > 0 { + _, err = i.cfg.KubeClient.Create(resources) + } else if len(resources) > 0 { + _, err = i.cfg.KubeClient.Update(toBeAdopted, resources, i.Force) + } + if err != nil { + return rel, err + } + + if i.Wait { + if i.WaitForJobs { + err = i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout) + } else { + err = i.cfg.KubeClient.Wait(resources, i.Timeout) + } + if err != nil { + return rel, err + } + } + + if !i.DisableHooks { + if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { + return rel, fmt.Errorf("failed post-install: %s", err) + } + } + + if len(i.Description) > 0 { + rel.SetStatus(release.StatusDeployed, i.Description) + } else { + rel.SetStatus(release.StatusDeployed, "Install complete") + } + + // This is a tricky case. The release has been created, but the result + // cannot be recorded. The truest thing to tell the user is that the + // release was created. However, the user will not be able to do anything + // further with this release. + // + // One possible strategy would be to do a timed retry to see if we can get + // this stored in the future. + if err := i.recordRelease(rel); err != nil { + i.cfg.Log("failed to record the release: %s", err) + } + + return rel, nil +} + +func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) { + rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error())) + if i.Atomic { + i.cfg.Log("Install failed and atomic is set, uninstalling release") + uninstall := NewUninstall(i.cfg) + uninstall.DisableHooks = i.DisableHooks + uninstall.KeepHistory = false + uninstall.Timeout = i.Timeout + if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil { + return rel, errors.Wrapf(uninstallErr, "an error occurred while uninstalling the release. original install error: %s", err) + } + return rel, errors.Wrapf(err, "release %s failed, and has been uninstalled due to atomic being set", i.ReleaseName) + } + i.recordRelease(rel) // Ignore the error, since we have another error to deal with. + return rel, err +} + +// availableName tests whether a name is available +// +// Roughly, this will return an error if name is +// +// - empty +// - too long +// - already in use, and not deleted +// - used by a deleted release, and i.Replace is false +func (i *Install) availableName() error { + start := i.ReleaseName + + if err := chartutil.ValidateReleaseName(start); err != nil { + return errors.Wrapf(err, "release name %q", start) + } + // On dry run, bail here + if i.isDryRun() { + return nil + } + + h, err := i.cfg.Releases.History(start) + if err != nil || len(h) < 1 { + return nil + } + releaseutil.Reverse(h, releaseutil.SortByRevision) + rel := h[0] + + if st := rel.Info.Status; i.Replace && (st == release.StatusUninstalled || st == release.StatusFailed) { + return nil + } + return errors.New("cannot re-use a name that is still in use") +} + +// createRelease creates a new release object +func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{}, labels map[string]string) *release.Release { + ts := i.cfg.Now() + return &release.Release{ + Name: i.ReleaseName, + Namespace: i.Namespace, + Chart: chrt, + Config: rawVals, + Info: &release.Info{ + FirstDeployed: ts, + LastDeployed: ts, + Status: release.StatusUnknown, + }, + Version: 1, + Labels: labels, + } +} + +// recordRelease with an update operation in case reuse has been set. +func (i *Install) recordRelease(r *release.Release) error { + // This is a legacy function which has been reduced to a oneliner. Could probably + // refactor it out. + return i.cfg.Releases.Update(r) +} + +// replaceRelease replaces an older release with this one +// +// This allows us to re-use names by superseding an existing release with a new one +func (i *Install) replaceRelease(rel *release.Release) error { + hist, err := i.cfg.Releases.History(rel.Name) + if err != nil || len(hist) == 0 { + // No releases exist for this name, so we can return early + return nil + } + + releaseutil.Reverse(hist, releaseutil.SortByRevision) + last := hist[0] + + // Update version to the next available + rel.Version = last.Version + 1 + + // Do not change the status of a failed release. + if last.Info.Status == release.StatusFailed { + return nil + } + + // For any other status, mark it as superseded and store the old record + last.SetStatus(release.StatusSuperseded, "superseded by new release") + return i.recordRelease(last) +} + +// write the to /. controls if the file is created or content will be appended +func writeToFile(outputDir string, name string, data string, append bool) error { + outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator)) + + err := ensureDirectoryForFile(outfileName) + if err != nil { + return err + } + + f, err := createOrOpenFile(outfileName, append) + if err != nil { + return err + } + + defer f.Close() + + _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data)) + + if err != nil { + return err + } + + fmt.Printf("wrote %s\n", outfileName) + return nil +} + +func createOrOpenFile(filename string, append bool) (*os.File, error) { + if append { + return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0600) + } + return os.Create(filename) +} + +// check if the directory exists to create file. creates if don't exists +func ensureDirectoryForFile(file string) error { + baseDir := path.Dir(file) + _, err := os.Stat(baseDir) + if err != nil && !os.IsNotExist(err) { + return err + } + + return os.MkdirAll(baseDir, defaultDirectoryPermission) +} + +// NameAndChart returns the name and chart that should be used. +// +// This will read the flags and handle name generation if necessary. +func (i *Install) NameAndChart(args []string) (string, string, error) { + flagsNotSet := func() error { + if i.GenerateName { + return errors.New("cannot set --generate-name and also specify a name") + } + if i.NameTemplate != "" { + return errors.New("cannot set --name-template and also specify a name") + } + return nil + } + + if len(args) > 2 { + return args[0], args[1], errors.Errorf("expected at most two arguments, unexpected arguments: %v", strings.Join(args[2:], ", ")) + } + + if len(args) == 2 { + return args[0], args[1], flagsNotSet() + } + + if i.NameTemplate != "" { + name, err := TemplateName(i.NameTemplate) + return name, args[0], err + } + + if i.ReleaseName != "" { + return i.ReleaseName, args[0], nil + } + + if !i.GenerateName { + return "", args[0], errors.New("must either provide a name or specify --generate-name") + } + + base := filepath.Base(args[0]) + if base == "." || base == "" { + base = "chart" + } + // if present, strip out the file extension from the name + if idx := strings.Index(base, "."); idx != -1 { + base = base[0:idx] + } + + return fmt.Sprintf("%s-%d", base, time.Now().Unix()), args[0], nil +} + +// TemplateName renders a name template, returning the name or an error. +func TemplateName(nameTemplate string) (string, error) { + if nameTemplate == "" { + return "", nil + } + + t, err := template.New("name-template").Funcs(sprig.TxtFuncMap()).Parse(nameTemplate) + if err != nil { + return "", err + } + var b bytes.Buffer + if err := t.Execute(&b, nil); err != nil { + return "", err + } + + return b.String(), nil +} + +// CheckDependencies checks the dependencies for a chart. +func CheckDependencies(ch *chart.Chart, reqs []*chart.Dependency) error { + var missing []string + +OUTER: + for _, r := range reqs { + for _, d := range ch.Dependencies() { + if d.Name() == r.Name { + continue OUTER + } + } + missing = append(missing, r.Name) + } + + if len(missing) > 0 { + return errors.Errorf("found in Chart.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", ")) + } + return nil +} + +// LocateChart looks for a chart directory in known places, and returns either the full path or an error. +// +// This does not ensure that the chart is well-formed; only that the requested filename exists. +// +// Order of resolution: +// - relative to current working directory +// - if path is absolute or begins with '.', error out here +// - URL +// +// If 'verify' was set on ChartPathOptions, this will attempt to also verify the chart. +func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (string, error) { + if registry.IsOCI(name) && c.registryClient == nil { + return "", fmt.Errorf("unable to lookup chart %q, missing registry client", name) + } + + name = strings.TrimSpace(name) + version := strings.TrimSpace(c.Version) + + if _, err := os.Stat(name); err == nil { + abs, err := filepath.Abs(name) + if err != nil { + return abs, err + } + if c.Verify { + if _, err := downloader.VerifyChart(abs, c.Keyring); err != nil { + return "", err + } + } + return abs, nil + } + if filepath.IsAbs(name) || strings.HasPrefix(name, ".") { + return name, errors.Errorf("path %q not found", name) + } + + dl := downloader.ChartDownloader{ + Out: os.Stdout, + Keyring: c.Keyring, + Getters: getter.All(settings), + Options: []getter.Option{ + getter.WithPassCredentialsAll(c.PassCredentialsAll), + getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile), + getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSverify), + getter.WithPlainHTTP(c.PlainHTTP), + }, + RepositoryConfig: settings.RepositoryConfig, + RepositoryCache: settings.RepositoryCache, + RegistryClient: c.registryClient, + } + + if registry.IsOCI(name) { + dl.Options = append(dl.Options, getter.WithRegistryClient(c.registryClient)) + } + + if c.Verify { + dl.Verify = downloader.VerifyAlways + } + if c.RepoURL != "" { + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(c.RepoURL, c.Username, c.Password, name, version, + c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, c.PassCredentialsAll, getter.All(settings)) + if err != nil { + return "", err + } + name = chartURL + + // Only pass the user/pass on when the user has said to or when the + // location of the chart repo and the chart are the same domain. + u1, err := url.Parse(c.RepoURL) + if err != nil { + return "", err + } + u2, err := url.Parse(chartURL) + if err != nil { + return "", err + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if c.PassCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth("", "")) + } + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) + } + + if err := os.MkdirAll(settings.RepositoryCache, 0755); err != nil { + return "", err + } + + filename, _, err := dl.DownloadTo(name, version, settings.RepositoryCache) + if err != nil { + return "", err + } + + lname, err := filepath.Abs(filename) + if err != nil { + return filename, err + } + return lname, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go b/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go new file mode 100644 index 000000000..9037782bb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go @@ -0,0 +1,197 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "context" + "sync" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" + "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// lazyClient is a workaround to deal with Kubernetes having an unstable client API. +// In Kubernetes v1.18 the defaults where removed which broke creating a +// client without an explicit configuration. ಠ_ಠ +type lazyClient struct { + // client caches an initialized kubernetes client + initClient sync.Once + client kubernetes.Interface + clientErr error + + // clientFn loads a kubernetes client + clientFn func() (*kubernetes.Clientset, error) + + // namespace passed to each client request + namespace string +} + +func (s *lazyClient) init() error { + s.initClient.Do(func() { + s.client, s.clientErr = s.clientFn() + }) + return s.clientErr +} + +// secretClient implements a corev1.SecretsInterface +type secretClient struct{ *lazyClient } + +var _ corev1.SecretInterface = (*secretClient)(nil) + +func newSecretClient(lc *lazyClient) *secretClient { + return &secretClient{lazyClient: lc} +} + +func (s *secretClient) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Create(ctx, secret, opts) +} + +func (s *secretClient) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Update(ctx, secret, opts) +} + +func (s *secretClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + if err := s.init(); err != nil { + return err + } + return s.client.CoreV1().Secrets(s.namespace).Delete(ctx, name, opts) +} + +func (s *secretClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + if err := s.init(); err != nil { + return err + } + return s.client.CoreV1().Secrets(s.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (s *secretClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Get(ctx, name, opts) +} + +func (s *secretClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).List(ctx, opts) +} + +func (s *secretClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Watch(ctx, opts) +} + +func (s *secretClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Patch(ctx, name, pt, data, opts, subresources...) +} + +func (s *secretClient) Apply(ctx context.Context, secretConfiguration *applycorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Apply(ctx, secretConfiguration, opts) +} + +// configMapClient implements a corev1.ConfigMapInterface +type configMapClient struct{ *lazyClient } + +var _ corev1.ConfigMapInterface = (*configMapClient)(nil) + +func newConfigMapClient(lc *lazyClient) *configMapClient { + return &configMapClient{lazyClient: lc} +} + +func (c *configMapClient) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Create(ctx, configMap, opts) +} + +func (c *configMapClient) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Update(ctx, configMap, opts) +} + +func (c *configMapClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + if err := c.init(); err != nil { + return err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Delete(ctx, name, opts) +} + +func (c *configMapClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + if err := c.init(); err != nil { + return err + } + return c.client.CoreV1().ConfigMaps(c.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (c *configMapClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Get(ctx, name, opts) +} + +func (c *configMapClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).List(ctx, opts) +} + +func (c *configMapClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Watch(ctx, opts) +} + +func (c *configMapClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Patch(ctx, name, pt, data, opts, subresources...) +} + +func (c *configMapClient) Apply(ctx context.Context, configMap *applycorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Apply(ctx, configMap, opts) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/lint.go b/vendor/helm.sh/helm/v3/pkg/action/lint.go new file mode 100644 index 000000000..ca497f2b8 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/lint.go @@ -0,0 +1,129 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Lint is the action for checking that the semantics of a chart are well-formed. +// +// It provides the implementation of 'helm lint'. +type Lint struct { + Strict bool + Namespace string + WithSubcharts bool + Quiet bool + KubeVersion *chartutil.KubeVersion +} + +// LintResult is the result of Lint +type LintResult struct { + TotalChartsLinted int + Messages []support.Message + Errors []error +} + +// NewLint creates a new Lint object with the given configuration. +func NewLint() *Lint { + return &Lint{} +} + +// Run executes 'helm Lint' against the given chart. +func (l *Lint) Run(paths []string, vals map[string]interface{}) *LintResult { + lowestTolerance := support.ErrorSev + if l.Strict { + lowestTolerance = support.WarningSev + } + result := &LintResult{} + for _, path := range paths { + linter, err := lintChart(path, vals, l.Namespace, l.KubeVersion) + if err != nil { + result.Errors = append(result.Errors, err) + continue + } + + result.Messages = append(result.Messages, linter.Messages...) + result.TotalChartsLinted++ + for _, msg := range linter.Messages { + if msg.Severity >= lowestTolerance { + result.Errors = append(result.Errors, msg.Err) + } + } + } + return result +} + +// HasWarningsOrErrors checks is LintResult has any warnings or errors +func HasWarningsOrErrors(result *LintResult) bool { + for _, msg := range result.Messages { + if msg.Severity > support.InfoSev { + return true + } + } + return len(result.Errors) > 0 +} + +func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) (support.Linter, error) { + var chartPath string + linter := support.Linter{} + + if strings.HasSuffix(path, ".tgz") || strings.HasSuffix(path, ".tar.gz") { + tempDir, err := os.MkdirTemp("", "helm-lint") + if err != nil { + return linter, errors.Wrap(err, "unable to create temp dir to extract tarball") + } + defer os.RemoveAll(tempDir) + + file, err := os.Open(path) + if err != nil { + return linter, errors.Wrap(err, "unable to open tarball") + } + defer file.Close() + + if err = chartutil.Expand(tempDir, file); err != nil { + return linter, errors.Wrap(err, "unable to extract tarball") + } + + files, err := os.ReadDir(tempDir) + if err != nil { + return linter, errors.Wrapf(err, "unable to read temporary output directory %s", tempDir) + } + if !files[0].IsDir() { + return linter, errors.Errorf("unexpected file %s in temporary output directory %s", files[0].Name(), tempDir) + } + + chartPath = filepath.Join(tempDir, files[0].Name()) + } else { + chartPath = path + } + + // Guard: Error out if this is not a chart. + if _, err := os.Stat(filepath.Join(chartPath, "Chart.yaml")); err != nil { + return linter, errors.Wrap(err, "unable to check Chart.yaml file in chart") + } + + return lint.AllWithKubeVersion(chartPath, vals, namespace, kubeVersion), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/list.go b/vendor/helm.sh/helm/v3/pkg/action/list.go new file mode 100644 index 000000000..af0725c4a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/list.go @@ -0,0 +1,324 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "path" + "regexp" + + "k8s.io/apimachinery/pkg/labels" + + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" +) + +// ListStates represents zero or more status codes that a list item may have set +// +// Because this is used as a bitmask filter, more than one bit can be flipped +// in the ListStates. +type ListStates uint + +const ( + // ListDeployed filters on status "deployed" + ListDeployed ListStates = 1 << iota + // ListUninstalled filters on status "uninstalled" + ListUninstalled + // ListUninstalling filters on status "uninstalling" (uninstall in progress) + ListUninstalling + // ListPendingInstall filters on status "pending" (deployment in progress) + ListPendingInstall + // ListPendingUpgrade filters on status "pending_upgrade" (upgrade in progress) + ListPendingUpgrade + // ListPendingRollback filters on status "pending_rollback" (rollback in progress) + ListPendingRollback + // ListSuperseded filters on status "superseded" (historical release version that is no longer deployed) + ListSuperseded + // ListFailed filters on status "failed" (release version not deployed because of error) + ListFailed + // ListUnknown filters on an unknown status + ListUnknown +) + +// FromName takes a state name and returns a ListStates representation. +// +// Currently, there are only names for individual flipped bits, so the returned +// ListStates will only match one of the constants. However, it is possible that +// this behavior could change in the future. +func (s ListStates) FromName(str string) ListStates { + switch str { + case "deployed": + return ListDeployed + case "uninstalled": + return ListUninstalled + case "superseded": + return ListSuperseded + case "failed": + return ListFailed + case "uninstalling": + return ListUninstalling + case "pending-install": + return ListPendingInstall + case "pending-upgrade": + return ListPendingUpgrade + case "pending-rollback": + return ListPendingRollback + } + return ListUnknown +} + +// ListAll is a convenience for enabling all list filters +const ListAll = ListDeployed | ListUninstalled | ListUninstalling | ListPendingInstall | ListPendingRollback | ListPendingUpgrade | ListSuperseded | ListFailed + +// Sorter is a top-level sort +type Sorter uint + +const ( + // ByNameDesc sorts by descending lexicographic order + ByNameDesc Sorter = iota + 1 + // ByDateAsc sorts by ascending dates (oldest updated release first) + ByDateAsc + // ByDateDesc sorts by descending dates (latest updated release first) + ByDateDesc +) + +// List is the action for listing releases. +// +// It provides, for example, the implementation of 'helm list'. +// It returns no more than one revision of every release in one specific, or in +// all, namespaces. +// To list all the revisions of a specific release, see the History action. +type List struct { + cfg *Configuration + + // All ignores the limit/offset + All bool + // AllNamespaces searches across namespaces + AllNamespaces bool + // Sort indicates the sort to use + // + // see pkg/releaseutil for several useful sorters + Sort Sorter + // Overrides the default lexicographic sorting + ByDate bool + SortReverse bool + // StateMask accepts a bitmask of states for items to show. + // The default is ListDeployed + StateMask ListStates + // Limit is the number of items to return per Run() + Limit int + // Offset is the starting index for the Run() call + Offset int + // Filter is a filter that is applied to the results + Filter string + Short bool + NoHeaders bool + TimeFormat string + Uninstalled bool + Superseded bool + Uninstalling bool + Deployed bool + Failed bool + Pending bool + Selector string +} + +// NewList constructs a new *List +func NewList(cfg *Configuration) *List { + return &List{ + StateMask: ListDeployed | ListFailed, + cfg: cfg, + } +} + +// Run executes the list command, returning a set of matches. +func (l *List) Run() ([]*release.Release, error) { + if err := l.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + var filter *regexp.Regexp + if l.Filter != "" { + var err error + filter, err = regexp.Compile(l.Filter) + if err != nil { + return nil, err + } + } + + results, err := l.cfg.Releases.List(func(rel *release.Release) bool { + // Skip anything that doesn't match the filter. + if filter != nil && !filter.MatchString(rel.Name) { + return false + } + + return true + }) + + if err != nil { + return nil, err + } + + if results == nil { + return results, nil + } + + // by definition, superseded releases are never shown if + // only the latest releases are returned. so if requested statemask + // is _only_ ListSuperseded, skip the latest release filter + if l.StateMask != ListSuperseded { + results = filterLatestReleases(results) + } + + // State mask application must occur after filtering to + // latest releases, otherwise outdated entries can be returned + results = l.filterStateMask(results) + + // Skip anything that doesn't match the selector + selectorObj, err := labels.Parse(l.Selector) + if err != nil { + return nil, err + } + results = l.filterSelector(results, selectorObj) + + // Unfortunately, we have to sort before truncating, which can incur substantial overhead + l.sort(results) + + // Guard on offset + if l.Offset >= len(results) { + return []*release.Release{}, nil + } + + // Calculate the limit and offset, and then truncate results if necessary. + limit := len(results) + if l.Limit > 0 && l.Limit < limit { + limit = l.Limit + } + last := l.Offset + limit + if l := len(results); l < last { + last = l + } + results = results[l.Offset:last] + + return results, err +} + +// sort is an in-place sort where order is based on the value of a.Sort +func (l *List) sort(rels []*release.Release) { + if l.SortReverse { + l.Sort = ByNameDesc + } + + if l.ByDate { + l.Sort = ByDateDesc + if l.SortReverse { + l.Sort = ByDateAsc + } + } + + switch l.Sort { + case ByDateDesc: + releaseutil.SortByDate(rels) + case ByDateAsc: + releaseutil.Reverse(rels, releaseutil.SortByDate) + case ByNameDesc: + releaseutil.Reverse(rels, releaseutil.SortByName) + default: + releaseutil.SortByName(rels) + } +} + +// filterLatestReleases returns a list scrubbed of old releases. +func filterLatestReleases(releases []*release.Release) []*release.Release { + latestReleases := make(map[string]*release.Release) + + for _, rls := range releases { + name, namespace := rls.Name, rls.Namespace + key := path.Join(namespace, name) + if latestRelease, exists := latestReleases[key]; exists && latestRelease.Version > rls.Version { + continue + } + latestReleases[key] = rls + } + + var list = make([]*release.Release, 0, len(latestReleases)) + for _, rls := range latestReleases { + list = append(list, rls) + } + return list +} + +func (l *List) filterStateMask(releases []*release.Release) []*release.Release { + desiredStateReleases := make([]*release.Release, 0) + + for _, rls := range releases { + currentStatus := l.StateMask.FromName(rls.Info.Status.String()) + mask := l.StateMask & currentStatus + if mask == 0 { + continue + } + desiredStateReleases = append(desiredStateReleases, rls) + } + + return desiredStateReleases +} + +func (l *List) filterSelector(releases []*release.Release, selector labels.Selector) []*release.Release { + desiredStateReleases := make([]*release.Release, 0) + + for _, rls := range releases { + if selector.Matches(labels.Set(rls.Labels)) { + desiredStateReleases = append(desiredStateReleases, rls) + } + } + + return desiredStateReleases +} + +// SetStateMask calculates the state mask based on parameters. +func (l *List) SetStateMask() { + if l.All { + l.StateMask = ListAll + return + } + + state := ListStates(0) + if l.Deployed { + state |= ListDeployed + } + if l.Uninstalled { + state |= ListUninstalled + } + if l.Uninstalling { + state |= ListUninstalling + } + if l.Pending { + state |= ListPendingInstall | ListPendingRollback | ListPendingUpgrade + } + if l.Failed { + state |= ListFailed + } + if l.Superseded { + state |= ListSuperseded + } + + // Apply a default + if state == 0 { + state = ListDeployed | ListFailed + } + + l.StateMask = state +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/package.go b/vendor/helm.sh/helm/v3/pkg/action/package.go new file mode 100644 index 000000000..b79fcb54f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/package.go @@ -0,0 +1,181 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bufio" + "fmt" + "os" + "syscall" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + "golang.org/x/term" + + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/provenance" +) + +// Package is the action for packaging a chart. +// +// It provides the implementation of 'helm package'. +type Package struct { + Sign bool + Key string + Keyring string + PassphraseFile string + Version string + AppVersion string + Destination string + DependencyUpdate bool + + RepositoryConfig string + RepositoryCache string +} + +// NewPackage creates a new Package object with the given configuration. +func NewPackage() *Package { + return &Package{} +} + +// Run executes 'helm package' against the given chart and returns the path to the packaged chart. +func (p *Package) Run(path string, _ map[string]interface{}) (string, error) { + ch, err := loader.LoadDir(path) + if err != nil { + return "", err + } + + // If version is set, modify the version. + if p.Version != "" { + ch.Metadata.Version = p.Version + } + + if err := validateVersion(ch.Metadata.Version); err != nil { + return "", err + } + + if p.AppVersion != "" { + ch.Metadata.AppVersion = p.AppVersion + } + + if reqs := ch.Metadata.Dependencies; reqs != nil { + if err := CheckDependencies(ch, reqs); err != nil { + return "", err + } + } + + var dest string + if p.Destination == "." { + // Save to the current working directory. + dest, err = os.Getwd() + if err != nil { + return "", err + } + } else { + // Otherwise save to set destination + dest = p.Destination + } + + name, err := chartutil.Save(ch, dest) + if err != nil { + return "", errors.Wrap(err, "failed to save") + } + + if p.Sign { + err = p.Clearsign(name) + } + + return name, err +} + +// validateVersion Verify that version is a Version, and error out if it is not. +func validateVersion(ver string) error { + if _, err := semver.NewVersion(ver); err != nil { + return err + } + return nil +} + +// Clearsign signs a chart +func (p *Package) Clearsign(filename string) error { + // Load keyring + signer, err := provenance.NewFromKeyring(p.Keyring, p.Key) + if err != nil { + return err + } + + passphraseFetcher := promptUser + if p.PassphraseFile != "" { + passphraseFetcher, err = passphraseFileFetcher(p.PassphraseFile, os.Stdin) + if err != nil { + return err + } + } + + if err := signer.DecryptKey(passphraseFetcher); err != nil { + return err + } + + sig, err := signer.ClearSign(filename) + if err != nil { + return err + } + + return os.WriteFile(filename+".prov", []byte(sig), 0644) +} + +// promptUser implements provenance.PassphraseFetcher +func promptUser(name string) ([]byte, error) { + fmt.Printf("Password for key %q > ", name) + // syscall.Stdin is not an int in all environments and needs to be coerced + // into one there (e.g., Windows) + pw, err := term.ReadPassword(int(syscall.Stdin)) + fmt.Println() + return pw, err +} + +func passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) { + file, err := openPassphraseFile(passphraseFile, stdin) + if err != nil { + return nil, err + } + defer file.Close() + + reader := bufio.NewReader(file) + passphrase, _, err := reader.ReadLine() + if err != nil { + return nil, err + } + return func(name string) ([]byte, error) { + return passphrase, nil + }, nil +} + +func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) { + if passphraseFile == "-" { + stat, err := stdin.Stat() + if err != nil { + return nil, err + } + if (stat.Mode() & os.ModeNamedPipe) == 0 { + return nil, errors.New("specified reading passphrase from stdin, without input on stdin") + } + return stdin, nil + } + return os.Open(passphraseFile) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/pull.go b/vendor/helm.sh/helm/v3/pkg/action/pull.go new file mode 100644 index 000000000..787553125 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/pull.go @@ -0,0 +1,172 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/downloader" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +// Pull is the action for checking a given release's information. +// +// It provides the implementation of 'helm pull'. +type Pull struct { + ChartPathOptions + + Settings *cli.EnvSettings // TODO: refactor this out of pkg/action + + Devel bool + Untar bool + VerifyLater bool + UntarDir string + DestDir string + cfg *Configuration +} + +type PullOpt func(*Pull) + +func WithConfig(cfg *Configuration) PullOpt { + return func(p *Pull) { + p.cfg = cfg + } +} + +// NewPull creates a new Pull object. +func NewPull() *Pull { + return NewPullWithOpts() +} + +// NewPullWithOpts creates a new pull, with configuration options. +func NewPullWithOpts(opts ...PullOpt) *Pull { + p := &Pull{} + for _, fn := range opts { + fn(p) + } + + return p +} + +// SetRegistryClient sets the registry client on the pull configuration object. +func (p *Pull) SetRegistryClient(client *registry.Client) { + p.cfg.RegistryClient = client +} + +// Run executes 'helm pull' against the given release. +func (p *Pull) Run(chartRef string) (string, error) { + var out strings.Builder + + c := downloader.ChartDownloader{ + Out: &out, + Keyring: p.Keyring, + Verify: downloader.VerifyNever, + Getters: getter.All(p.Settings), + Options: []getter.Option{ + getter.WithBasicAuth(p.Username, p.Password), + getter.WithPassCredentialsAll(p.PassCredentialsAll), + getter.WithTLSClientConfig(p.CertFile, p.KeyFile, p.CaFile), + getter.WithInsecureSkipVerifyTLS(p.InsecureSkipTLSverify), + getter.WithPlainHTTP(p.PlainHTTP), + }, + RegistryClient: p.cfg.RegistryClient, + RepositoryConfig: p.Settings.RepositoryConfig, + RepositoryCache: p.Settings.RepositoryCache, + } + + if registry.IsOCI(chartRef) { + c.Options = append(c.Options, + getter.WithRegistryClient(p.cfg.RegistryClient)) + c.RegistryClient = p.cfg.RegistryClient + } + + if p.Verify { + c.Verify = downloader.VerifyAlways + } else if p.VerifyLater { + c.Verify = downloader.VerifyLater + } + + // If untar is set, we fetch to a tempdir, then untar and copy after + // verification. + dest := p.DestDir + if p.Untar { + var err error + dest, err = os.MkdirTemp("", "helm-") + if err != nil { + return out.String(), errors.Wrap(err, "failed to untar") + } + defer os.RemoveAll(dest) + } + + if p.RepoURL != "" { + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, p.PassCredentialsAll, getter.All(p.Settings)) + if err != nil { + return out.String(), err + } + chartRef = chartURL + } + + saved, v, err := c.DownloadTo(chartRef, p.Version, dest) + if err != nil { + return out.String(), err + } + + if p.Verify { + for name := range v.SignedBy.Identities { + fmt.Fprintf(&out, "Signed by: %v\n", name) + } + fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", v.SignedBy.PrimaryKey.Fingerprint) + fmt.Fprintf(&out, "Chart Hash Verified: %s\n", v.FileHash) + } + + // After verification, untar the chart into the requested directory. + if p.Untar { + ud := p.UntarDir + if !filepath.IsAbs(ud) { + ud = filepath.Join(p.DestDir, ud) + } + // Let udCheck to check conflict file/dir without replacing ud when untarDir is the current directory(.). + udCheck := ud + if udCheck == "." { + _, udCheck = filepath.Split(chartRef) + } else { + _, chartName := filepath.Split(chartRef) + udCheck = filepath.Join(udCheck, chartName) + } + + if _, err := os.Stat(udCheck); err != nil { + if err := os.MkdirAll(udCheck, 0755); err != nil { + return out.String(), errors.Wrap(err, "failed to untar (mkdir)") + } + + } else { + return out.String(), errors.Errorf("failed to untar: a file or directory with the name %s already exists", udCheck) + } + + return out.String(), chartutil.ExpandFile(ud, saved) + } + return out.String(), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/push.go b/vendor/helm.sh/helm/v3/pkg/action/push.go new file mode 100644 index 000000000..68d2ba42d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/push.go @@ -0,0 +1,112 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" + "strings" + + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/pusher" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/uploader" +) + +// Push is the action for uploading a chart. +// +// It provides the implementation of 'helm push'. +type Push struct { + Settings *cli.EnvSettings + cfg *Configuration + certFile string + keyFile string + caFile string + insecureSkipTLSverify bool + plainHTTP bool + out io.Writer +} + +// PushOpt is a type of function that sets options for a push action. +type PushOpt func(*Push) + +// WithPushConfig sets the cfg field on the push configuration object. +func WithPushConfig(cfg *Configuration) PushOpt { + return func(p *Push) { + p.cfg = cfg + } +} + +// WithTLSClientConfig sets the certFile, keyFile, and caFile fields on the push configuration object. +func WithTLSClientConfig(certFile, keyFile, caFile string) PushOpt { + return func(p *Push) { + p.certFile = certFile + p.keyFile = keyFile + p.caFile = caFile + } +} + +// WithInsecureSkipTLSVerify determines if a TLS Certificate will be checked +func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) PushOpt { + return func(p *Push) { + p.insecureSkipTLSverify = insecureSkipTLSVerify + } +} + +// WithPlainHTTP configures the use of plain HTTP connections. +func WithPlainHTTP(plainHTTP bool) PushOpt { + return func(p *Push) { + p.plainHTTP = plainHTTP + } +} + +// WithOptWriter sets the registryOut field on the push configuration object. +func WithPushOptWriter(out io.Writer) PushOpt { + return func(p *Push) { + p.out = out + } +} + +// NewPushWithOpts creates a new push, with configuration options. +func NewPushWithOpts(opts ...PushOpt) *Push { + p := &Push{} + for _, fn := range opts { + fn(p) + } + return p +} + +// Run executes 'helm push' against the given chart archive. +func (p *Push) Run(chartRef string, remote string) (string, error) { + var out strings.Builder + + c := uploader.ChartUploader{ + Out: &out, + Pushers: pusher.All(p.Settings), + Options: []pusher.Option{ + pusher.WithTLSClientConfig(p.certFile, p.keyFile, p.caFile), + pusher.WithInsecureSkipTLSVerify(p.insecureSkipTLSverify), + pusher.WithPlainHTTP(p.plainHTTP), + }, + } + + if registry.IsOCI(remote) { + // Don't use the default registry client if tls options are set. + c.Options = append(c.Options, pusher.WithRegistryClient(p.cfg.RegistryClient)) + } + + return out.String(), c.UploadTo(chartRef, remote) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/registry_login.go b/vendor/helm.sh/helm/v3/pkg/action/registry_login.go new file mode 100644 index 000000000..cd144e1e7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/registry_login.go @@ -0,0 +1,88 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" + + "helm.sh/helm/v3/pkg/registry" +) + +// RegistryLogin performs a registry login operation. +type RegistryLogin struct { + cfg *Configuration + certFile string + keyFile string + caFile string + insecure bool +} + +type RegistryLoginOpt func(*RegistryLogin) error + +// WithCertFile specifies the path to the certificate file to use for TLS. +func WithCertFile(certFile string) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.certFile = certFile + return nil + } +} + +// WithKeyFile specifies whether to very certificates when communicating. +func WithInsecure(insecure bool) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.insecure = insecure + return nil + } +} + +// WithKeyFile specifies the path to the key file to use for TLS. +func WithKeyFile(keyFile string) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.keyFile = keyFile + return nil + } +} + +// WithCAFile specifies the path to the CA file to use for TLS. +func WithCAFile(caFile string) RegistryLoginOpt { + return func(r *RegistryLogin) error { + r.caFile = caFile + return nil + } +} + +// NewRegistryLogin creates a new RegistryLogin object with the given configuration. +func NewRegistryLogin(cfg *Configuration) *RegistryLogin { + return &RegistryLogin{ + cfg: cfg, + } +} + +// Run executes the registry login operation +func (a *RegistryLogin) Run(_ io.Writer, hostname string, username string, password string, opts ...RegistryLoginOpt) error { + for _, opt := range opts { + if err := opt(a); err != nil { + return err + } + } + + return a.cfg.RegistryClient.Login( + hostname, + registry.LoginOptBasicAuth(username, password), + registry.LoginOptInsecure(a.insecure), + registry.LoginOptTLSClientConfig(a.certFile, a.keyFile, a.caFile)) +} diff --git a/vendor/k8s.io/helm/pkg/renderutil/doc.go b/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go similarity index 53% rename from vendor/k8s.io/helm/pkg/renderutil/doc.go rename to vendor/helm.sh/helm/v3/pkg/action/registry_logout.go index 38c3ae60d..7ce92defc 100644 --- a/vendor/k8s.io/helm/pkg/renderutil/doc.go +++ b/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go @@ -14,11 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -/*Package renderutil contains tools related to the local rendering of charts. +package action -Local rendering means rendering without the tiller; this is generally used for -local debugging and testing (see the `helm template` command for examples of -use). This package will not render charts exactly the same way as the tiller -will, but will be generally close enough for local debug purposes. -*/ -package renderutil // import "k8s.io/helm/pkg/renderutil" +import ( + "io" +) + +// RegistryLogout performs a registry login operation. +type RegistryLogout struct { + cfg *Configuration +} + +// NewRegistryLogout creates a new RegistryLogout object with the given configuration. +func NewRegistryLogout(cfg *Configuration) *RegistryLogout { + return &RegistryLogout{ + cfg: cfg, + } +} + +// Run executes the registry logout operation +func (a *RegistryLogout) Run(_ io.Writer, hostname string) error { + return a.cfg.RegistryClient.Logout(hostname) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/release_testing.go b/vendor/helm.sh/helm/v3/pkg/action/release_testing.go new file mode 100644 index 000000000..3c10cecf8 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/release_testing.go @@ -0,0 +1,152 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "context" + "fmt" + "io" + "sort" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +const ( + ExcludeNameFilter = "!name" + IncludeNameFilter = "name" +) + +// ReleaseTesting is the action for testing a release. +// +// It provides the implementation of 'helm test'. +type ReleaseTesting struct { + cfg *Configuration + Timeout time.Duration + // Used for fetching logs from test pods + Namespace string + Filters map[string][]string +} + +// NewReleaseTesting creates a new ReleaseTesting object with the given configuration. +func NewReleaseTesting(cfg *Configuration) *ReleaseTesting { + return &ReleaseTesting{ + cfg: cfg, + Filters: map[string][]string{}, + } +} + +// Run executes 'helm test' against the given release. +func (r *ReleaseTesting) Run(name string) (*release.Release, error) { + if err := r.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("releaseTest: Release name is invalid: %s", name) + } + + // finds the non-deleted release with the given name + rel, err := r.cfg.Releases.Last(name) + if err != nil { + return rel, err + } + + skippedHooks := []*release.Hook{} + executingHooks := []*release.Hook{} + if len(r.Filters[ExcludeNameFilter]) != 0 { + for _, h := range rel.Hooks { + if contains(r.Filters[ExcludeNameFilter], h.Name) { + skippedHooks = append(skippedHooks, h) + } else { + executingHooks = append(executingHooks, h) + } + } + rel.Hooks = executingHooks + } + if len(r.Filters[IncludeNameFilter]) != 0 { + executingHooks = nil + for _, h := range rel.Hooks { + if contains(r.Filters[IncludeNameFilter], h.Name) { + executingHooks = append(executingHooks, h) + } else { + skippedHooks = append(skippedHooks, h) + } + } + rel.Hooks = executingHooks + } + + if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { + rel.Hooks = append(skippedHooks, rel.Hooks...) + r.cfg.Releases.Update(rel) + return rel, err + } + + rel.Hooks = append(skippedHooks, rel.Hooks...) + return rel, r.cfg.Releases.Update(rel) +} + +// GetPodLogs will write the logs for all test pods in the given release into +// the given writer. These can be immediately output to the user or captured for +// other uses +func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error { + client, err := r.cfg.KubernetesClientSet() + if err != nil { + return errors.Wrap(err, "unable to get kubernetes client to fetch pod logs") + } + + hooksByWight := append([]*release.Hook{}, rel.Hooks...) + sort.Stable(hookByWeight(hooksByWight)) + for _, h := range hooksByWight { + for _, e := range h.Events { + if e == release.HookTest { + if contains(r.Filters[ExcludeNameFilter], h.Name) { + continue + } + if len(r.Filters[IncludeNameFilter]) > 0 && !contains(r.Filters[IncludeNameFilter], h.Name) { + continue + } + req := client.CoreV1().Pods(r.Namespace).GetLogs(h.Name, &v1.PodLogOptions{}) + logReader, err := req.Stream(context.Background()) + if err != nil { + return errors.Wrapf(err, "unable to get pod logs for %s", h.Name) + } + + fmt.Fprintf(out, "POD LOGS: %s\n", h.Name) + _, err = io.Copy(out, logReader) + fmt.Fprintln(out) + if err != nil { + return errors.Wrapf(err, "unable to write pod logs for %s", h.Name) + } + } + } + } + return nil +} + +func contains(arr []string, value string) bool { + for _, item := range arr { + if item == value { + return true + } + } + return false +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go b/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go new file mode 100644 index 000000000..63e83f3d9 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go @@ -0,0 +1,46 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "strings" + + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/releaseutil" +) + +func filterManifestsToKeep(manifests []releaseutil.Manifest) (keep, remaining []releaseutil.Manifest) { + for _, m := range manifests { + if m.Head.Metadata == nil || m.Head.Metadata.Annotations == nil || len(m.Head.Metadata.Annotations) == 0 { + remaining = append(remaining, m) + continue + } + + resourcePolicyType, ok := m.Head.Metadata.Annotations[kube.ResourcePolicyAnno] + if !ok { + remaining = append(remaining, m) + continue + } + + resourcePolicyType = strings.ToLower(strings.TrimSpace(resourcePolicyType)) + if resourcePolicyType == kube.KeepPolicy { + keep = append(keep, m) + } + + } + return keep, remaining +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/rollback.go b/vendor/helm.sh/helm/v3/pkg/action/rollback.go new file mode 100644 index 000000000..b0be17d13 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/rollback.go @@ -0,0 +1,265 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// Rollback is the action for rolling back to a given release. +// +// It provides the implementation of 'helm rollback'. +type Rollback struct { + cfg *Configuration + + Version int + Timeout time.Duration + Wait bool + WaitForJobs bool + DisableHooks bool + DryRun bool + Recreate bool // will (if true) recreate pods after a rollback. + Force bool // will (if true) force resource upgrade through uninstall/recreate if needed + CleanupOnFail bool + MaxHistory int // MaxHistory limits the maximum number of revisions saved per release +} + +// NewRollback creates a new Rollback object with the given configuration. +func NewRollback(cfg *Configuration) *Rollback { + return &Rollback{ + cfg: cfg, + } +} + +// Run executes 'helm rollback' against the given release. +func (r *Rollback) Run(name string) error { + if err := r.cfg.KubeClient.IsReachable(); err != nil { + return err + } + + r.cfg.Releases.MaxHistory = r.MaxHistory + + r.cfg.Log("preparing rollback of %s", name) + currentRelease, targetRelease, err := r.prepareRollback(name) + if err != nil { + return err + } + + if !r.DryRun { + r.cfg.Log("creating rolled back release for %s", name) + if err := r.cfg.Releases.Create(targetRelease); err != nil { + return err + } + } + + r.cfg.Log("performing rollback of %s", name) + if _, err := r.performRollback(currentRelease, targetRelease); err != nil { + return err + } + + if !r.DryRun { + r.cfg.Log("updating status for rolled back release for %s", name) + if err := r.cfg.Releases.Update(targetRelease); err != nil { + return err + } + } + return nil +} + +// prepareRollback finds the previous release and prepares a new release object with +// the previous release's configuration +func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) { + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, nil, errors.Errorf("prepareRollback: Release name is invalid: %s", name) + } + + if r.Version < 0 { + return nil, nil, errInvalidRevision + } + + currentRelease, err := r.cfg.Releases.Last(name) + if err != nil { + return nil, nil, err + } + + previousVersion := r.Version + if r.Version == 0 { + previousVersion = currentRelease.Version - 1 + } + + historyReleases, err := r.cfg.Releases.History(name) + if err != nil { + return nil, nil, err + } + + // Check if the history version to be rolled back exists + previousVersionExist := false + for _, historyRelease := range historyReleases { + version := historyRelease.Version + if previousVersion == version { + previousVersionExist = true + break + } + } + if !previousVersionExist { + return nil, nil, errors.Errorf("release has no %d version", previousVersion) + } + + r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion) + + previousRelease, err := r.cfg.Releases.Get(name, previousVersion) + if err != nil { + return nil, nil, err + } + + // Store a new release object with previous release's configuration + targetRelease := &release.Release{ + Name: name, + Namespace: currentRelease.Namespace, + Chart: previousRelease.Chart, + Config: previousRelease.Config, + Info: &release.Info{ + FirstDeployed: currentRelease.Info.FirstDeployed, + LastDeployed: helmtime.Now(), + Status: release.StatusPendingRollback, + Notes: previousRelease.Info.Notes, + // Because we lose the reference to previous version elsewhere, we set the + // message here, and only override it later if we experience failure. + Description: fmt.Sprintf("Rollback to %d", previousVersion), + }, + Version: currentRelease.Version + 1, + Labels: previousRelease.Labels, + Manifest: previousRelease.Manifest, + Hooks: previousRelease.Hooks, + } + + return currentRelease, targetRelease, nil +} + +func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) { + if r.DryRun { + r.cfg.Log("dry run for %s", targetRelease.Name) + return targetRelease, nil + } + + current, err := r.cfg.KubeClient.Build(bytes.NewBufferString(currentRelease.Manifest), false) + if err != nil { + return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest") + } + target, err := r.cfg.KubeClient.Build(bytes.NewBufferString(targetRelease.Manifest), false) + if err != nil { + return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest") + } + + // pre-rollback hooks + if !r.DisableHooks { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { + return targetRelease, err + } + } else { + r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name) + } + + // It is safe to use "force" here because these are resources currently rendered by the chart. + err = target.Visit(setMetadataVisitor(targetRelease.Name, targetRelease.Namespace, true)) + if err != nil { + return targetRelease, errors.Wrap(err, "unable to set metadata visitor from target release") + } + results, err := r.cfg.KubeClient.Update(current, target, r.Force) + + if err != nil { + msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) + r.cfg.Log("warning: %s", msg) + currentRelease.Info.Status = release.StatusSuperseded + targetRelease.Info.Status = release.StatusFailed + targetRelease.Info.Description = msg + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + if r.CleanupOnFail { + r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created)) + _, errs := r.cfg.KubeClient.Delete(results.Created) + if errs != nil { + var errorList []string + for _, e := range errs { + errorList = append(errorList, e.Error()) + } + return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err) + } + r.cfg.Log("Resource cleanup complete") + } + return targetRelease, err + } + + if r.Recreate { + // NOTE: Because this is not critical for a release to succeed, we just + // log if an error occurs and continue onward. If we ever introduce log + // levels, we should make these error level logs so users are notified + // that they'll need to go do the cleanup on their own + if err := recreate(r.cfg, results.Updated); err != nil { + r.cfg.Log(err.Error()) + } + } + + if r.Wait { + if r.WaitForJobs { + if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) + } + } else { + if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) + } + } + } + + // post-rollback hooks + if !r.DisableHooks { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { + return targetRelease, err + } + } + + deployed, err := r.cfg.Releases.DeployedAll(currentRelease.Name) + if err != nil && !strings.Contains(err.Error(), "has no deployed releases") { + return nil, err + } + // Supersede all previous deployments, see issue #2941. + for _, rel := range deployed { + r.cfg.Log("superseding previous deployment %d", rel.Version) + rel.Info.Status = release.StatusSuperseded + r.cfg.recordRelease(rel) + } + + targetRelease.Info.Status = release.StatusDeployed + + return targetRelease, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/show.go b/vendor/helm.sh/helm/v3/pkg/action/show.go new file mode 100644 index 000000000..6ed855b83 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/show.go @@ -0,0 +1,165 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "strings" + + "github.com/pkg/errors" + "k8s.io/cli-runtime/pkg/printers" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/registry" +) + +// ShowOutputFormat is the format of the output of `helm show` +type ShowOutputFormat string + +const ( + // ShowAll is the format which shows all the information of a chart + ShowAll ShowOutputFormat = "all" + // ShowChart is the format which only shows the chart's definition + ShowChart ShowOutputFormat = "chart" + // ShowValues is the format which only shows the chart's values + ShowValues ShowOutputFormat = "values" + // ShowReadme is the format which only shows the chart's README + ShowReadme ShowOutputFormat = "readme" + // ShowCRDs is the format which only shows the chart's CRDs + ShowCRDs ShowOutputFormat = "crds" +) + +var readmeFileNames = []string{"readme.md", "readme.txt", "readme"} + +func (o ShowOutputFormat) String() string { + return string(o) +} + +// Show is the action for checking a given release's information. +// +// It provides the implementation of 'helm show' and its respective subcommands. +type Show struct { + ChartPathOptions + Devel bool + OutputFormat ShowOutputFormat + JSONPathTemplate string + chart *chart.Chart // for testing +} + +// NewShow creates a new Show object with the given configuration. +// Deprecated: Use NewShowWithConfig +// TODO Helm 4: Fold NewShowWithConfig back into NewShow +func NewShow(output ShowOutputFormat) *Show { + return &Show{ + OutputFormat: output, + } +} + +// NewShowWithConfig creates a new Show object with the given configuration. +func NewShowWithConfig(output ShowOutputFormat, cfg *Configuration) *Show { + sh := &Show{ + OutputFormat: output, + } + sh.ChartPathOptions.registryClient = cfg.RegistryClient + + return sh +} + +// SetRegistryClient sets the registry client to use when pulling a chart from a registry. +func (s *Show) SetRegistryClient(client *registry.Client) { + s.ChartPathOptions.registryClient = client +} + +// Run executes 'helm show' against the given release. +func (s *Show) Run(chartpath string) (string, error) { + if s.chart == nil { + chrt, err := loader.Load(chartpath) + if err != nil { + return "", err + } + s.chart = chrt + } + cf, err := yaml.Marshal(s.chart.Metadata) + if err != nil { + return "", err + } + + var out strings.Builder + if s.OutputFormat == ShowChart || s.OutputFormat == ShowAll { + fmt.Fprintf(&out, "%s\n", cf) + } + + if (s.OutputFormat == ShowValues || s.OutputFormat == ShowAll) && s.chart.Values != nil { + if s.OutputFormat == ShowAll { + fmt.Fprintln(&out, "---") + } + if s.JSONPathTemplate != "" { + printer, err := printers.NewJSONPathPrinter(s.JSONPathTemplate) + if err != nil { + return "", errors.Wrapf(err, "error parsing jsonpath %s", s.JSONPathTemplate) + } + printer.Execute(&out, s.chart.Values) + } else { + for _, f := range s.chart.Raw { + if f.Name == chartutil.ValuesfileName { + fmt.Fprintln(&out, string(f.Data)) + } + } + } + } + + if s.OutputFormat == ShowReadme || s.OutputFormat == ShowAll { + readme := findReadme(s.chart.Files) + if readme != nil { + if s.OutputFormat == ShowAll { + fmt.Fprintln(&out, "---") + } + fmt.Fprintf(&out, "%s\n", readme.Data) + } + } + + if s.OutputFormat == ShowCRDs || s.OutputFormat == ShowAll { + crds := s.chart.CRDObjects() + if len(crds) > 0 { + if s.OutputFormat == ShowAll && !bytes.HasPrefix(crds[0].File.Data, []byte("---")) { + fmt.Fprintln(&out, "---") + } + for _, crd := range crds { + fmt.Fprintf(&out, "%s\n", string(crd.File.Data)) + } + } + } + return out.String(), nil +} + +func findReadme(files []*chart.File) (file *chart.File) { + for _, file := range files { + for _, n := range readmeFileNames { + if file == nil { + continue + } + if strings.EqualFold(file.Name, n) { + return file + } + } + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/status.go b/vendor/helm.sh/helm/v3/pkg/action/status.go new file mode 100644 index 000000000..ee1c9d613 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/status.go @@ -0,0 +1,95 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "errors" + + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/release" +) + +// Status is the action for checking the deployment status of releases. +// +// It provides the implementation of 'helm status'. +type Status struct { + cfg *Configuration + + Version int + + // If true, display description to output format, + // only affect print type table. + // TODO Helm 4: Remove this flag and output the description by default. + ShowDescription bool + + // ShowResources sets if the resources should be retrieved with the status. + // TODO Helm 4: Remove this flag and output the resources by default. + ShowResources bool + + // ShowResourcesTable is used with ShowResources. When true this will cause + // the resulting objects to be retrieved as a kind=table. + ShowResourcesTable bool +} + +// NewStatus creates a new Status object with the given configuration. +func NewStatus(cfg *Configuration) *Status { + return &Status{ + cfg: cfg, + } +} + +// Run executes 'helm status' against the given release. +func (s *Status) Run(name string) (*release.Release, error) { + if err := s.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if !s.ShowResources { + return s.cfg.releaseContent(name, s.Version) + } + + rel, err := s.cfg.releaseContent(name, s.Version) + if err != nil { + return nil, err + } + + if kubeClient, ok := s.cfg.KubeClient.(kube.InterfaceResources); ok { + var resources kube.ResourceList + if s.ShowResourcesTable { + resources, err = kubeClient.BuildTable(bytes.NewBufferString(rel.Manifest), false) + if err != nil { + return nil, err + } + } else { + resources, err = s.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), false) + if err != nil { + return nil, err + } + } + + resp, err := kubeClient.Get(resources, true) + if err != nil { + return nil, err + } + + rel.Info.Resources = resp + + return rel, nil + } + return nil, errors.New("unable to get kubeClient with interface InterfaceResources") +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/uninstall.go b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go new file mode 100644 index 000000000..40d82243e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go @@ -0,0 +1,251 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "strings" + "time" + + "github.com/pkg/errors" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// Uninstall is the action for uninstalling releases. +// +// It provides the implementation of 'helm uninstall'. +type Uninstall struct { + cfg *Configuration + + DisableHooks bool + DryRun bool + IgnoreNotFound bool + KeepHistory bool + Wait bool + DeletionPropagation string + Timeout time.Duration + Description string +} + +// NewUninstall creates a new Uninstall object with the given configuration. +func NewUninstall(cfg *Configuration) *Uninstall { + return &Uninstall{ + cfg: cfg, + } +} + +// Run uninstalls the given release. +func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) { + if err := u.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if u.DryRun { + // In the dry run case, just see if the release exists + r, err := u.cfg.releaseContent(name, 0) + if err != nil { + return &release.UninstallReleaseResponse{}, err + } + return &release.UninstallReleaseResponse{Release: r}, nil + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("uninstall: Release name is invalid: %s", name) + } + + rels, err := u.cfg.Releases.History(name) + if err != nil { + if u.IgnoreNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "uninstall: Release not loaded: %s", name) + } + if len(rels) < 1 { + return nil, errMissingRelease + } + + releaseutil.SortByRevision(rels) + rel := rels[len(rels)-1] + + // TODO: Are there any cases where we want to force a delete even if it's + // already marked deleted? + if rel.Info.Status == release.StatusUninstalled { + if !u.KeepHistory { + if err := u.purgeReleases(rels...); err != nil { + return nil, errors.Wrap(err, "uninstall: Failed to purge the release") + } + return &release.UninstallReleaseResponse{Release: rel}, nil + } + return nil, errors.Errorf("the release named %q is already deleted", name) + } + + u.cfg.Log("uninstall: Deleting %s", name) + rel.Info.Status = release.StatusUninstalling + rel.Info.Deleted = helmtime.Now() + rel.Info.Description = "Deletion in progress (or silently failed)" + res := &release.UninstallReleaseResponse{Release: rel} + + if !u.DisableHooks { + if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { + return res, err + } + } else { + u.cfg.Log("delete hooks disabled for %s", name) + } + + // From here on out, the release is currently considered to be in StatusUninstalling + // state. + if err := u.cfg.Releases.Update(rel); err != nil { + u.cfg.Log("uninstall: Failed to store updated release: %s", err) + } + + deletedResources, kept, errs := u.deleteRelease(rel) + if errs != nil { + u.cfg.Log("uninstall: Failed to delete release: %s", errs) + return nil, errors.Errorf("failed to delete release: %s", name) + } + + if kept != "" { + kept = "These resources were kept due to the resource policy:\n" + kept + } + res.Info = kept + + if u.Wait { + if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceExt); ok { + if err := kubeClient.WaitForDelete(deletedResources, u.Timeout); err != nil { + errs = append(errs, err) + } + } + } + + if !u.DisableHooks { + if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { + errs = append(errs, err) + } + } + + rel.Info.Status = release.StatusUninstalled + if len(u.Description) > 0 { + rel.Info.Description = u.Description + } else { + rel.Info.Description = "Uninstallation complete" + } + + if !u.KeepHistory { + u.cfg.Log("purge requested for %s", name) + err := u.purgeReleases(rels...) + if err != nil { + errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release")) + } + + // Return the errors that occurred while deleting the release, if any + if len(errs) > 0 { + return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs)) + } + + return res, nil + } + + if err := u.cfg.Releases.Update(rel); err != nil { + u.cfg.Log("uninstall: Failed to store updated release: %s", err) + } + + if len(errs) > 0 { + return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs)) + } + return res, nil +} + +func (u *Uninstall) purgeReleases(rels ...*release.Release) error { + for _, rel := range rels { + if _, err := u.cfg.Releases.Delete(rel.Name, rel.Version); err != nil { + return err + } + } + return nil +} + +func joinErrors(errs []error) string { + es := make([]string, 0, len(errs)) + for _, e := range errs { + es = append(es, e.Error()) + } + return strings.Join(es, "; ") +} + +// deleteRelease deletes the release and returns list of delete resources and manifests that were kept in the deletion process +func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, string, []error) { + var errs []error + caps, err := u.cfg.getCapabilities() + if err != nil { + return nil, rel.Manifest, []error{errors.Wrap(err, "could not get apiVersions from Kubernetes")} + } + + manifests := releaseutil.SplitManifests(rel.Manifest) + _, files, err := releaseutil.SortManifests(manifests, caps.APIVersions, releaseutil.UninstallOrder) + if err != nil { + // We could instead just delete everything in no particular order. + // FIXME: One way to delete at this point would be to try a label-based + // deletion. The problem with this is that we could get a false positive + // and delete something that was not legitimately part of this release. + return nil, rel.Manifest, []error{errors.Wrap(err, "corrupted release record. You must manually delete the resources")} + } + + filesToKeep, filesToDelete := filterManifestsToKeep(files) + var kept string + for _, f := range filesToKeep { + kept += "[" + f.Head.Kind + "] " + f.Head.Metadata.Name + "\n" + } + + var builder strings.Builder + for _, file := range filesToDelete { + builder.WriteString("\n---\n" + file.Content) + } + + resources, err := u.cfg.KubeClient.Build(strings.NewReader(builder.String()), false) + if err != nil { + return nil, "", []error{errors.Wrap(err, "unable to build kubernetes objects for delete")} + } + if len(resources) > 0 { + if kubeClient, ok := u.cfg.KubeClient.(kube.InterfaceDeletionPropagation); ok { + _, errs = kubeClient.DeleteWithPropagationPolicy(resources, parseCascadingFlag(u.cfg, u.DeletionPropagation)) + return resources, kept, errs + } + _, errs = u.cfg.KubeClient.Delete(resources) + } + return resources, kept, errs +} + +func parseCascadingFlag(cfg *Configuration, cascadingFlag string) v1.DeletionPropagation { + switch cascadingFlag { + case "orphan": + return v1.DeletePropagationOrphan + case "foreground": + return v1.DeletePropagationForeground + case "background": + return v1.DeletePropagationBackground + default: + cfg.Log("uninstall: given cascade value: %s, defaulting to delete propagation background", cascadingFlag) + return v1.DeletePropagationBackground + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/upgrade.go b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go new file mode 100644 index 000000000..ffb7538a6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go @@ -0,0 +1,627 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// Upgrade is the action for upgrading releases. +// +// It provides the implementation of 'helm upgrade'. +type Upgrade struct { + cfg *Configuration + + ChartPathOptions + + // Install is a purely informative flag that indicates whether this upgrade was done in "install" mode. + // + // Applications may use this to determine whether this Upgrade operation was done as part of a + // pure upgrade (Upgrade.Install == false) or as part of an install-or-upgrade operation + // (Upgrade.Install == true). + // + // Setting this to `true` will NOT cause `Upgrade` to perform an install if the release does not exist. + // That process must be handled by creating an Install action directly. See cmd/upgrade.go for an + // example of how this flag is used. + Install bool + // Devel indicates that the operation is done in devel mode. + Devel bool + // Namespace is the namespace in which this operation should be performed. + Namespace string + // SkipCRDs skips installing CRDs when install flag is enabled during upgrade + SkipCRDs bool + // Timeout is the timeout for this operation + Timeout time.Duration + // Wait determines whether the wait operation should be performed after the upgrade is requested. + Wait bool + // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. + WaitForJobs bool + // DisableHooks disables hook processing if set to true. + DisableHooks bool + // DryRun controls whether the operation is prepared, but not executed. + DryRun bool + // DryRunOption controls whether the operation is prepared, but not executed with options on whether or not to interact with the remote cluster. + DryRunOption string + // Force will, if set to `true`, ignore certain warnings and perform the upgrade anyway. + // + // This should be used with caution. + Force bool + // ResetValues will reset the values to the chart's built-ins rather than merging with existing. + ResetValues bool + // ReuseValues will re-use the user's last supplied values. + ReuseValues bool + // ResetThenReuseValues will reset the values to the chart's built-ins then merge with user's last supplied values. + ResetThenReuseValues bool + // Recreate will (if true) recreate pods after a rollback. + Recreate bool + // MaxHistory limits the maximum number of revisions saved per release + MaxHistory int + // Atomic, if true, will roll back on failure. + Atomic bool + // CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update. + CleanupOnFail bool + // SubNotes determines whether sub-notes are rendered in the chart. + SubNotes bool + // Description is the description of this operation + Description string + Labels map[string]string + // PostRender is an optional post-renderer + // + // If this is non-nil, then after templates are rendered, they will be sent to the + // post renderer before sending to the Kubernetes API server. + PostRenderer postrender.PostRenderer + // DisableOpenAPIValidation controls whether OpenAPI validation is enforced. + DisableOpenAPIValidation bool + // Get missing dependencies + DependencyUpdate bool + // Lock to control raceconditions when the process receives a SIGTERM + Lock sync.Mutex + // Enable DNS lookups when rendering templates + EnableDNS bool +} + +type resultMessage struct { + r *release.Release + e error +} + +// NewUpgrade creates a new Upgrade object with the given configuration. +func NewUpgrade(cfg *Configuration) *Upgrade { + up := &Upgrade{ + cfg: cfg, + } + up.ChartPathOptions.registryClient = cfg.RegistryClient + + return up +} + +// SetRegistryClient sets the registry client to use when fetching charts. +func (u *Upgrade) SetRegistryClient(client *registry.Client) { + u.ChartPathOptions.registryClient = client +} + +// Run executes the upgrade on the given release. +func (u *Upgrade) Run(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + ctx := context.Background() + return u.RunWithContext(ctx, name, chart, vals) +} + +// RunWithContext executes the upgrade on the given release with context. +func (u *Upgrade) RunWithContext(ctx context.Context, name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + if err := u.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + // Make sure if Atomic is set, that wait is set as well. This makes it so + // the user doesn't have to specify both + u.Wait = u.Wait || u.Atomic + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("release name is invalid: %s", name) + } + + u.cfg.Log("preparing upgrade for %s", name) + currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals) + if err != nil { + return nil, err + } + + u.cfg.Releases.MaxHistory = u.MaxHistory + + u.cfg.Log("performing update for %s", name) + res, err := u.performUpgrade(ctx, currentRelease, upgradedRelease) + if err != nil { + return res, err + } + + // Do not update for dry runs + if !u.isDryRun() { + u.cfg.Log("updating status for upgraded release for %s", name) + if err := u.cfg.Releases.Update(upgradedRelease); err != nil { + return res, err + } + } + + return res, nil +} + +// isDryRun returns true if Upgrade is set to run as a DryRun +func (u *Upgrade) isDryRun() bool { + if u.DryRun || u.DryRunOption == "client" || u.DryRunOption == "server" || u.DryRunOption == "true" { + return true + } + return false +} + +// prepareUpgrade builds an upgraded release for an upgrade operation. +func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, *release.Release, error) { + if chart == nil { + return nil, nil, errMissingChart + } + + // finds the last non-deleted release with the given name + lastRelease, err := u.cfg.Releases.Last(name) + if err != nil { + // to keep existing behavior of returning the "%q has no deployed releases" error when an existing release does not exist + if errors.Is(err, driver.ErrReleaseNotFound) { + return nil, nil, driver.NewErrNoDeployedReleases(name) + } + return nil, nil, err + } + + // Concurrent `helm upgrade`s will either fail here with `errPending` or when creating the release with "already exists". This should act as a pessimistic lock. + if lastRelease.Info.Status.IsPending() { + return nil, nil, errPending + } + + var currentRelease *release.Release + if lastRelease.Info.Status == release.StatusDeployed { + // no need to retrieve the last deployed release from storage as the last release is deployed + currentRelease = lastRelease + } else { + // finds the deployed release with the given name + currentRelease, err = u.cfg.Releases.Deployed(name) + if err != nil { + if errors.Is(err, driver.ErrNoDeployedReleases) && + (lastRelease.Info.Status == release.StatusFailed || lastRelease.Info.Status == release.StatusSuperseded) { + currentRelease = lastRelease + } else { + return nil, nil, err + } + } + } + + // determine if values will be reused + vals, err = u.reuseValues(chart, currentRelease, vals) + if err != nil { + return nil, nil, err + } + + if err := chartutil.ProcessDependenciesWithMerge(chart, vals); err != nil { + return nil, nil, err + } + + // Increment revision count. This is passed to templates, and also stored on + // the release object. + revision := lastRelease.Version + 1 + + options := chartutil.ReleaseOptions{ + Name: name, + Namespace: currentRelease.Namespace, + Revision: revision, + IsUpgrade: true, + } + + caps, err := u.cfg.getCapabilities() + if err != nil { + return nil, nil, err + } + valuesToRender, err := chartutil.ToRenderValues(chart, vals, options, caps) + if err != nil { + return nil, nil, err + } + + // Determine whether or not to interact with remote + var interactWithRemote bool + if !u.isDryRun() || u.DryRunOption == "server" || u.DryRunOption == "none" || u.DryRunOption == "false" { + interactWithRemote = true + } + + hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, interactWithRemote, u.EnableDNS) + if err != nil { + return nil, nil, err + } + + if driver.ContainsSystemLabels(u.Labels) { + return nil, nil, fmt.Errorf("user suplied labels contains system reserved label name. System labels: %+v", driver.GetSystemLabels()) + } + + // Store an upgraded release. + upgradedRelease := &release.Release{ + Name: name, + Namespace: currentRelease.Namespace, + Chart: chart, + Config: vals, + Info: &release.Info{ + FirstDeployed: currentRelease.Info.FirstDeployed, + LastDeployed: Timestamper(), + Status: release.StatusPendingUpgrade, + Description: "Preparing upgrade", // This should be overwritten later. + }, + Version: revision, + Manifest: manifestDoc.String(), + Hooks: hooks, + Labels: mergeCustomLabels(lastRelease.Labels, u.Labels), + } + + if len(notesTxt) > 0 { + upgradedRelease.Info.Notes = notesTxt + } + err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation) + return currentRelease, upgradedRelease, err +} + +func (u *Upgrade) performUpgrade(ctx context.Context, originalRelease, upgradedRelease *release.Release) (*release.Release, error) { + current, err := u.cfg.KubeClient.Build(bytes.NewBufferString(originalRelease.Manifest), false) + if err != nil { + // Checking for removed Kubernetes API error so can provide a more informative error message to the user + // Ref: https://github.com/helm/helm/issues/7219 + if strings.Contains(err.Error(), "unable to recognize \"\": no matches for kind") { + return upgradedRelease, errors.Wrap(err, "current release manifest contains removed kubernetes api(s) for this "+ + "kubernetes version and it is therefore unable to build the kubernetes "+ + "objects for performing the diff. error from kubernetes") + } + return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest") + } + target, err := u.cfg.KubeClient.Build(bytes.NewBufferString(upgradedRelease.Manifest), !u.DisableOpenAPIValidation) + if err != nil { + return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest") + } + + // It is safe to use force only on target because these are resources currently rendered by the chart. + err = target.Visit(setMetadataVisitor(upgradedRelease.Name, upgradedRelease.Namespace, true)) + if err != nil { + return upgradedRelease, err + } + + // Do a basic diff using gvk + name to figure out what new resources are being created so we can validate they don't already exist + existingResources := make(map[string]bool) + for _, r := range current { + existingResources[objectKey(r)] = true + } + + var toBeCreated kube.ResourceList + for _, r := range target { + if !existingResources[objectKey(r)] { + toBeCreated = append(toBeCreated, r) + } + } + + toBeUpdated, err := existingResourceConflict(toBeCreated, upgradedRelease.Name, upgradedRelease.Namespace) + if err != nil { + return nil, errors.Wrap(err, "Unable to continue with update") + } + + toBeUpdated.Visit(func(r *resource.Info, err error) error { + if err != nil { + return err + } + current.Append(r) + return nil + }) + + // Run if it is a dry run + if u.isDryRun() { + u.cfg.Log("dry run for %s", upgradedRelease.Name) + if len(u.Description) > 0 { + upgradedRelease.Info.Description = u.Description + } else { + upgradedRelease.Info.Description = "Dry run complete" + } + return upgradedRelease, nil + } + + u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name) + if err := u.cfg.Releases.Create(upgradedRelease); err != nil { + return nil, err + } + rChan := make(chan resultMessage) + ctxChan := make(chan resultMessage) + doneChan := make(chan interface{}) + defer close(doneChan) + go u.releasingUpgrade(rChan, upgradedRelease, current, target, originalRelease) + go u.handleContext(ctx, doneChan, ctxChan, upgradedRelease) + select { + case result := <-rChan: + return result.r, result.e + case result := <-ctxChan: + return result.r, result.e + } +} + +// Function used to lock the Mutex, this is important for the case when the atomic flag is set. +// In that case the upgrade will finish before the rollback is finished so it is necessary to wait for the rollback to finish. +// The rollback will be trigger by the function failRelease +func (u *Upgrade) reportToPerformUpgrade(c chan<- resultMessage, rel *release.Release, created kube.ResourceList, err error) { + u.Lock.Lock() + if err != nil { + rel, err = u.failRelease(rel, created, err) + } + c <- resultMessage{r: rel, e: err} + u.Lock.Unlock() +} + +// Setup listener for SIGINT and SIGTERM +func (u *Upgrade) handleContext(ctx context.Context, done chan interface{}, c chan<- resultMessage, upgradedRelease *release.Release) { + select { + case <-ctx.Done(): + err := ctx.Err() + + // when the atomic flag is set the ongoing release finish first and doesn't give time for the rollback happens. + u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, err) + case <-done: + return + } +} +func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *release.Release, current kube.ResourceList, target kube.ResourceList, originalRelease *release.Release) { + // pre-upgrade hooks + + if !u.DisableHooks { + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { + u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) + return + } + } else { + u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name) + } + + results, err := u.cfg.KubeClient.Update(current, target, u.Force) + if err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } + + if u.Recreate { + // NOTE: Because this is not critical for a release to succeed, we just + // log if an error occurs and continue onward. If we ever introduce log + // levels, we should make these error level logs so users are notified + // that they'll need to go do the cleanup on their own + if err := recreate(u.cfg, results.Updated); err != nil { + u.cfg.Log(err.Error()) + } + } + + if u.Wait { + u.cfg.Log( + "waiting for release %s resources (created: %d updated: %d deleted: %d)", + upgradedRelease.Name, len(results.Created), len(results.Updated), len(results.Deleted)) + if u.WaitForJobs { + if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } + } else { + if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) + return + } + } + } + + // post-upgrade hooks + if !u.DisableHooks { + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) + return + } + } + + originalRelease.Info.Status = release.StatusSuperseded + u.cfg.recordRelease(originalRelease) + + upgradedRelease.Info.Status = release.StatusDeployed + if len(u.Description) > 0 { + upgradedRelease.Info.Description = u.Description + } else { + upgradedRelease.Info.Description = "Upgrade complete" + } + u.reportToPerformUpgrade(c, upgradedRelease, nil, nil) +} + +func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { + msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) + u.cfg.Log("warning: %s", msg) + + rel.Info.Status = release.StatusFailed + rel.Info.Description = msg + u.cfg.recordRelease(rel) + if u.CleanupOnFail && len(created) > 0 { + u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created)) + _, errs := u.cfg.KubeClient.Delete(created) + if errs != nil { + var errorList []string + for _, e := range errs { + errorList = append(errorList, e.Error()) + } + return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err) + } + u.cfg.Log("Resource cleanup complete") + } + if u.Atomic { + u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release") + + // As a protection, get the last successful release before rollback. + // If there are no successful releases, bail out + hist := NewHistory(u.cfg) + fullHistory, herr := hist.Run(rel.Name) + if herr != nil { + return rel, errors.Wrapf(herr, "an error occurred while finding last successful release. original upgrade error: %s", err) + } + + // There isn't a way to tell if a previous release was successful, but + // generally failed releases do not get superseded unless the next + // release is successful, so this should be relatively safe + filteredHistory := releaseutil.FilterFunc(func(r *release.Release) bool { + return r.Info.Status == release.StatusSuperseded || r.Info.Status == release.StatusDeployed + }).Filter(fullHistory) + if len(filteredHistory) == 0 { + return rel, errors.Wrap(err, "unable to find a previously successful release when attempting to rollback. original upgrade error") + } + + releaseutil.Reverse(filteredHistory, releaseutil.SortByRevision) + + rollin := NewRollback(u.cfg) + rollin.Version = filteredHistory[0].Version + rollin.Wait = true + rollin.WaitForJobs = u.WaitForJobs + rollin.DisableHooks = u.DisableHooks + rollin.Recreate = u.Recreate + rollin.Force = u.Force + rollin.Timeout = u.Timeout + if rollErr := rollin.Run(rel.Name); rollErr != nil { + return rel, errors.Wrapf(rollErr, "an error occurred while rolling back the release. original upgrade error: %s", err) + } + return rel, errors.Wrapf(err, "release %s failed, and has been rolled back due to atomic being set", rel.Name) + } + + return rel, err +} + +// reuseValues copies values from the current release to a new release if the +// new release does not have any values. +// +// If the request already has values, or if there are no values in the current +// release, this does nothing. +// +// This is skipped if the u.ResetValues flag is set, in which case the +// request values are not altered. +func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) { + if u.ResetValues { + // If ResetValues is set, we completely ignore current.Config. + u.cfg.Log("resetting values to the chart's original version") + return newVals, nil + } + + // If the ReuseValues flag is set, we always copy the old values over the new config's values. + if u.ReuseValues { + u.cfg.Log("reusing the old release's values") + + // We have to regenerate the old coalesced values: + oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config) + if err != nil { + return nil, errors.Wrap(err, "failed to rebuild old values") + } + + newVals = chartutil.CoalesceTables(newVals, current.Config) + + chart.Values = oldVals + + return newVals, nil + } + + // If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values. + if u.ResetThenReuseValues { + u.cfg.Log("merging values from old release to new values") + + newVals = chartutil.CoalesceTables(newVals, current.Config) + + return newVals, nil + } + + if len(newVals) == 0 && len(current.Config) > 0 { + u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version) + newVals = current.Config + } + return newVals, nil +} + +func validateManifest(c kube.Interface, manifest []byte, openAPIValidation bool) error { + _, err := c.Build(bytes.NewReader(manifest), openAPIValidation) + return err +} + +// recreate captures all the logic for recreating pods for both upgrade and +// rollback. If we end up refactoring rollback to use upgrade, this can just be +// made an unexported method on the upgrade action. +func recreate(cfg *Configuration, resources kube.ResourceList) error { + for _, res := range resources { + versioned := kube.AsVersioned(res) + selector, err := kube.SelectorsForObject(versioned) + if err != nil { + // If no selector is returned, it means this object is + // definitely not a pod, so continue onward + continue + } + + client, err := cfg.KubernetesClientSet() + if err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + + pods, err := client.CoreV1().Pods(res.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + + // Restart pods + for _, pod := range pods.Items { + // Delete each pod for get them restarted with changed spec. + if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, *metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + } + } + return nil +} + +func objectKey(r *resource.Info) string { + gvk := r.Object.GetObjectKind().GroupVersionKind() + return fmt.Sprintf("%s/%s/%s/%s", gvk.GroupVersion().String(), gvk.Kind, r.Namespace, r.Name) +} + +func mergeCustomLabels(current, desired map[string]string) map[string]string { + labels := mergeStrStrMaps(current, desired) + for k, v := range labels { + if v == "null" { + delete(labels, k) + } + } + return labels +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/validate.go b/vendor/helm.sh/helm/v3/pkg/action/validate.go new file mode 100644 index 000000000..73eb1937b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/validate.go @@ -0,0 +1,184 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +var accessor = meta.NewAccessor() + +const ( + appManagedByLabel = "app.kubernetes.io/managed-by" + appManagedByHelm = "Helm" + helmReleaseNameAnnotation = "meta.helm.sh/release-name" + helmReleaseNamespaceAnnotation = "meta.helm.sh/release-namespace" +) + +func existingResourceConflict(resources kube.ResourceList, releaseName, releaseNamespace string) (kube.ResourceList, error) { + var requireUpdate kube.ResourceList + + err := resources.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + helper := resource.NewHelper(info.Client, info.Mapping) + existing, err := helper.Get(info.Namespace, info.Name) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return errors.Wrapf(err, "could not get information about the resource %s", resourceString(info)) + } + + // Allow adoption of the resource if it is managed by Helm and is annotated with correct release name and namespace. + if err := checkOwnership(existing, releaseName, releaseNamespace); err != nil { + return fmt.Errorf("%s exists and cannot be imported into the current release: %s", resourceString(info), err) + } + + requireUpdate.Append(info) + return nil + }) + + return requireUpdate, err +} + +func checkOwnership(obj runtime.Object, releaseName, releaseNamespace string) error { + lbls, err := accessor.Labels(obj) + if err != nil { + return err + } + annos, err := accessor.Annotations(obj) + if err != nil { + return err + } + + var errs []error + if err := requireValue(lbls, appManagedByLabel, appManagedByHelm); err != nil { + errs = append(errs, fmt.Errorf("label validation error: %s", err)) + } + if err := requireValue(annos, helmReleaseNameAnnotation, releaseName); err != nil { + errs = append(errs, fmt.Errorf("annotation validation error: %s", err)) + } + if err := requireValue(annos, helmReleaseNamespaceAnnotation, releaseNamespace); err != nil { + errs = append(errs, fmt.Errorf("annotation validation error: %s", err)) + } + + if len(errs) > 0 { + err := errors.New("invalid ownership metadata") + for _, e := range errs { + err = fmt.Errorf("%w; %s", err, e) + } + return err + } + + return nil +} + +func requireValue(meta map[string]string, k, v string) error { + actual, ok := meta[k] + if !ok { + return fmt.Errorf("missing key %q: must be set to %q", k, v) + } + if actual != v { + return fmt.Errorf("key %q must equal %q: current value is %q", k, v, actual) + } + return nil +} + +// setMetadataVisitor adds release tracking metadata to all resources. If force is enabled, existing +// ownership metadata will be overwritten. Otherwise an error will be returned if any resource has an +// existing and conflicting value for the managed by label or Helm release/namespace annotations. +func setMetadataVisitor(releaseName, releaseNamespace string, force bool) resource.VisitorFunc { + return func(info *resource.Info, err error) error { + if err != nil { + return err + } + + if !force { + if err := checkOwnership(info.Object, releaseName, releaseNamespace); err != nil { + return fmt.Errorf("%s cannot be owned: %s", resourceString(info), err) + } + } + + if err := mergeLabels(info.Object, map[string]string{ + appManagedByLabel: appManagedByHelm, + }); err != nil { + return fmt.Errorf( + "%s labels could not be updated: %s", + resourceString(info), err, + ) + } + + if err := mergeAnnotations(info.Object, map[string]string{ + helmReleaseNameAnnotation: releaseName, + helmReleaseNamespaceAnnotation: releaseNamespace, + }); err != nil { + return fmt.Errorf( + "%s annotations could not be updated: %s", + resourceString(info), err, + ) + } + + return nil + } +} + +func resourceString(info *resource.Info) string { + _, k := info.Mapping.GroupVersionKind.ToAPIVersionAndKind() + return fmt.Sprintf( + "%s %q in namespace %q", + k, info.Name, info.Namespace, + ) +} + +func mergeLabels(obj runtime.Object, labels map[string]string) error { + current, err := accessor.Labels(obj) + if err != nil { + return err + } + return accessor.SetLabels(obj, mergeStrStrMaps(current, labels)) +} + +func mergeAnnotations(obj runtime.Object, annotations map[string]string) error { + current, err := accessor.Annotations(obj) + if err != nil { + return err + } + return accessor.SetAnnotations(obj, mergeStrStrMaps(current, annotations)) +} + +// merge two maps, always taking the value on the right +func mergeStrStrMaps(current, desired map[string]string) map[string]string { + result := make(map[string]string) + for k, v := range current { + result[k] = v + } + for k, desiredVal := range desired { + result[k] = desiredVal + } + return result +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/verify.go b/vendor/helm.sh/helm/v3/pkg/action/verify.go new file mode 100644 index 000000000..f36239496 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/verify.go @@ -0,0 +1,59 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "strings" + + "helm.sh/helm/v3/pkg/downloader" +) + +// Verify is the action for building a given chart's Verify tree. +// +// It provides the implementation of 'helm verify'. +type Verify struct { + Keyring string + Out string +} + +// NewVerify creates a new Verify object with the given configuration. +func NewVerify() *Verify { + return &Verify{} +} + +// Run executes 'helm verify'. +func (v *Verify) Run(chartfile string) error { + var out strings.Builder + p, err := downloader.VerifyChart(chartfile, v.Keyring) + if err != nil { + return err + } + + for name := range p.SignedBy.Identities { + fmt.Fprintf(&out, "Signed by: %v\n", name) + } + fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", p.SignedBy.PrimaryKey.Fingerprint) + fmt.Fprintf(&out, "Chart Hash Verified: %s\n", p.FileHash) + + // TODO(mattfarina): The output is set as a property rather than returned + // to maintain the Go API. In Helm v4 this function should return the out + // and the property on the struct can be removed. + v.Out = out.String() + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/chart.go b/vendor/helm.sh/helm/v3/pkg/chart/chart.go new file mode 100644 index 000000000..a3bed63a3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/chart.go @@ -0,0 +1,173 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "path/filepath" + "regexp" + "strings" +) + +// APIVersionV1 is the API version number for version 1. +const APIVersionV1 = "v1" + +// APIVersionV2 is the API version number for version 2. +const APIVersionV2 = "v2" + +// aliasNameFormat defines the characters that are legal in an alias name. +var aliasNameFormat = regexp.MustCompile("^[a-zA-Z0-9_-]+$") + +// Chart is a helm package that contains metadata, a default config, zero or more +// optionally parameterizable templates, and zero or more charts (dependencies). +type Chart struct { + // Raw contains the raw contents of the files originally contained in the chart archive. + // + // This should not be used except in special cases like `helm show values`, + // where we want to display the raw values, comments and all. + Raw []*File `json:"-"` + // Metadata is the contents of the Chartfile. + Metadata *Metadata `json:"metadata"` + // Lock is the contents of Chart.lock. + Lock *Lock `json:"lock"` + // Templates for this chart. + Templates []*File `json:"templates"` + // Values are default config for this chart. + Values map[string]interface{} `json:"values"` + // Schema is an optional JSON schema for imposing structure on Values + Schema []byte `json:"schema"` + // Files are miscellaneous files in a chart archive, + // e.g. README, LICENSE, etc. + Files []*File `json:"files"` + + parent *Chart + dependencies []*Chart +} + +type CRD struct { + // Name is the File.Name for the crd file + Name string + // Filename is the File obj Name including (sub-)chart.ChartFullPath + Filename string + // File is the File obj for the crd + File *File +} + +// SetDependencies replaces the chart dependencies. +func (ch *Chart) SetDependencies(charts ...*Chart) { + ch.dependencies = nil + ch.AddDependency(charts...) +} + +// Name returns the name of the chart. +func (ch *Chart) Name() string { + if ch.Metadata == nil { + return "" + } + return ch.Metadata.Name +} + +// AddDependency determines if the chart is a subchart. +func (ch *Chart) AddDependency(charts ...*Chart) { + for i, x := range charts { + charts[i].parent = ch + ch.dependencies = append(ch.dependencies, x) + } +} + +// Root finds the root chart. +func (ch *Chart) Root() *Chart { + if ch.IsRoot() { + return ch + } + return ch.Parent().Root() +} + +// Dependencies are the charts that this chart depends on. +func (ch *Chart) Dependencies() []*Chart { return ch.dependencies } + +// IsRoot determines if the chart is the root chart. +func (ch *Chart) IsRoot() bool { return ch.parent == nil } + +// Parent returns a subchart's parent chart. +func (ch *Chart) Parent() *Chart { return ch.parent } + +// ChartPath returns the full path to this chart in dot notation. +func (ch *Chart) ChartPath() string { + if !ch.IsRoot() { + return ch.Parent().ChartPath() + "." + ch.Name() + } + return ch.Name() +} + +// ChartFullPath returns the full path to this chart. +func (ch *Chart) ChartFullPath() string { + if !ch.IsRoot() { + return ch.Parent().ChartFullPath() + "/charts/" + ch.Name() + } + return ch.Name() +} + +// Validate validates the metadata. +func (ch *Chart) Validate() error { + return ch.Metadata.Validate() +} + +// AppVersion returns the appversion of the chart. +func (ch *Chart) AppVersion() string { + if ch.Metadata == nil { + return "" + } + return ch.Metadata.AppVersion +} + +// CRDs returns a list of File objects in the 'crds/' directory of a Helm chart. +// Deprecated: use CRDObjects() +func (ch *Chart) CRDs() []*File { + files := []*File{} + // Find all resources in the crds/ directory + for _, f := range ch.Files { + if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) { + files = append(files, f) + } + } + // Get CRDs from dependencies, too. + for _, dep := range ch.Dependencies() { + files = append(files, dep.CRDs()...) + } + return files +} + +// CRDObjects returns a list of CRD objects in the 'crds/' directory of a Helm chart & subcharts +func (ch *Chart) CRDObjects() []CRD { + crds := []CRD{} + // Find all resources in the crds/ directory + for _, f := range ch.Files { + if strings.HasPrefix(f.Name, "crds/") && hasManifestExtension(f.Name) { + mycrd := CRD{Name: f.Name, Filename: filepath.Join(ch.ChartFullPath(), f.Name), File: f} + crds = append(crds, mycrd) + } + } + // Get CRDs from dependencies, too. + for _, dep := range ch.Dependencies() { + crds = append(crds, dep.CRDObjects()...) + } + return crds +} + +func hasManifestExtension(fname string) bool { + ext := filepath.Ext(fname) + return strings.EqualFold(ext, ".yaml") || strings.EqualFold(ext, ".yml") || strings.EqualFold(ext, ".json") +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/dependency.go b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go new file mode 100644 index 000000000..4ef5eeb32 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go @@ -0,0 +1,82 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import "time" + +// Dependency describes a chart upon which another chart depends. +// +// Dependencies can be used to express developer intent, or to capture the state +// of a chart. +type Dependency struct { + // Name is the name of the dependency. + // + // This must mach the name in the dependency's Chart.yaml. + Name string `json:"name"` + // Version is the version (range) of this chart. + // + // A lock file will always produce a single version, while a dependency + // may contain a semantic version range. + Version string `json:"version,omitempty"` + // The URL to the repository. + // + // Appending `index.yaml` to this string should result in a URL that can be + // used to fetch the repository index. + Repository string `json:"repository"` + // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled ) + Condition string `json:"condition,omitempty"` + // Tags can be used to group charts for enabling/disabling together + Tags []string `json:"tags,omitempty"` + // Enabled bool determines if chart should be loaded + Enabled bool `json:"enabled,omitempty"` + // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a + // string or pair of child/parent sublist items. + ImportValues []interface{} `json:"import-values,omitempty"` + // Alias usable alias to be used for the chart + Alias string `json:"alias,omitempty"` +} + +// Validate checks for common problems with the dependency datastructure in +// the chart. This check must be done at load time before the dependency's charts are +// loaded. +func (d *Dependency) Validate() error { + if d == nil { + return ValidationError("dependencies must not contain empty or null nodes") + } + d.Name = sanitizeString(d.Name) + d.Version = sanitizeString(d.Version) + d.Repository = sanitizeString(d.Repository) + d.Condition = sanitizeString(d.Condition) + for i := range d.Tags { + d.Tags[i] = sanitizeString(d.Tags[i]) + } + if d.Alias != "" && !aliasNameFormat.MatchString(d.Alias) { + return ValidationErrorf("dependency %q has disallowed characters in the alias", d.Name) + } + return nil +} + +// Lock is a lock file for dependencies. +// +// It represents the state that the dependencies should be in. +type Lock struct { + // Generated is the date the lock file was last generated. + Generated time.Time `json:"generated"` + // Digest is a hash of the dependencies in Chart.yaml. + Digest string `json:"digest"` + // Dependencies is the list of dependencies that this lock file has locked. + Dependencies []*Dependency `json:"dependencies"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/errors.go b/vendor/helm.sh/helm/v3/pkg/chart/errors.go new file mode 100644 index 000000000..2fad5f370 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/errors.go @@ -0,0 +1,30 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import "fmt" + +// ValidationError represents a data validation error. +type ValidationError string + +func (v ValidationError) Error() string { + return "validation: " + string(v) +} + +// ValidationErrorf takes a message and formatting options and creates a ValidationError +func ValidationErrorf(msg string, args ...interface{}) ValidationError { + return ValidationError(fmt.Sprintf(msg, args...)) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/file.go b/vendor/helm.sh/helm/v3/pkg/chart/file.go new file mode 100644 index 000000000..9dd7c08d5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/file.go @@ -0,0 +1,27 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +// File represents a file as a name/value pair. +// +// By convention, name is a relative path within the scope of the chart's +// base directory. +type File struct { + // Name is the path-like name of the template. + Name string `json:"name"` + // Data is the template as byte data. + Data []byte `json:"data"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go new file mode 100644 index 000000000..196e5f81d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/archive.go @@ -0,0 +1,205 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "os" + "path" + "regexp" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" +) + +var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`) + +// FileLoader loads a chart from a file +type FileLoader string + +// Load loads a chart +func (l FileLoader) Load() (*chart.Chart, error) { + return LoadFile(string(l)) +} + +// LoadFile loads from an archive file. +func LoadFile(name string) (*chart.Chart, error) { + if fi, err := os.Stat(name); err != nil { + return nil, err + } else if fi.IsDir() { + return nil, errors.New("cannot load a directory") + } + + raw, err := os.Open(name) + if err != nil { + return nil, err + } + defer raw.Close() + + err = ensureArchive(name, raw) + if err != nil { + return nil, err + } + + c, err := LoadArchive(raw) + if err != nil { + if err == gzip.ErrHeader { + return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err) + } + } + return c, err +} + +// ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive. +// +// Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence +// of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error +// if we didn't check for this. +func ensureArchive(name string, raw *os.File) error { + defer raw.Seek(0, 0) // reset read offset to allow archive loading to proceed. + + // Check the file format to give us a chance to provide the user with more actionable feedback. + buffer := make([]byte, 512) + _, err := raw.Read(buffer) + if err != nil && err != io.EOF { + return fmt.Errorf("file '%s' cannot be read: %s", name, err) + } + + // Helm may identify achieve of the application/x-gzip as application/vnd.ms-fontobject. + // Fix for: https://github.com/helm/helm/issues/12261 + if contentType := http.DetectContentType(buffer); contentType != "application/x-gzip" && !isGZipApplication(buffer) { + // TODO: Is there a way to reliably test if a file content is YAML? ghodss/yaml accepts a wide + // variety of content (Makefile, .zshrc) as valid YAML without errors. + + // Wrong content type. Let's check if it's yaml and give an extra hint? + if strings.HasSuffix(name, ".yml") || strings.HasSuffix(name, ".yaml") { + return fmt.Errorf("file '%s' seems to be a YAML file, but expected a gzipped archive", name) + } + return fmt.Errorf("file '%s' does not appear to be a gzipped archive; got '%s'", name, contentType) + } + return nil +} + +// isGZipApplication checks whether the achieve is of the application/x-gzip type. +func isGZipApplication(data []byte) bool { + sig := []byte("\x1F\x8B\x08") + return bytes.HasPrefix(data, sig) +} + +// LoadArchiveFiles reads in files out of an archive into memory. This function +// performs important path security checks and should always be used before +// expanding a tarball +func LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { + unzipped, err := gzip.NewReader(in) + if err != nil { + return nil, err + } + defer unzipped.Close() + + files := []*BufferedFile{} + tr := tar.NewReader(unzipped) + for { + b := bytes.NewBuffer(nil) + hd, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + if hd.FileInfo().IsDir() { + // Use this instead of hd.Typeflag because we don't have to do any + // inference chasing. + continue + } + + switch hd.Typeflag { + // We don't want to process these extension header files. + case tar.TypeXGlobalHeader, tar.TypeXHeader: + continue + } + + // Archive could contain \ if generated on Windows + delimiter := "/" + if strings.ContainsRune(hd.Name, '\\') { + delimiter = "\\" + } + + parts := strings.Split(hd.Name, delimiter) + n := strings.Join(parts[1:], delimiter) + + // Normalize the path to the / delimiter + n = strings.ReplaceAll(n, delimiter, "/") + + if path.IsAbs(n) { + return nil, errors.New("chart illegally contains absolute paths") + } + + n = path.Clean(n) + if n == "." { + // In this case, the original path was relative when it should have been absolute. + return nil, errors.Errorf("chart illegally contains content outside the base directory: %q", hd.Name) + } + if strings.HasPrefix(n, "..") { + return nil, errors.New("chart illegally references parent directory") + } + + // In some particularly arcane acts of path creativity, it is possible to intermix + // UNIX and Windows style paths in such a way that you produce a result of the form + // c:/foo even after all the built-in absolute path checks. So we explicitly check + // for this condition. + if drivePathPattern.MatchString(n) { + return nil, errors.New("chart contains illegally named files") + } + + if parts[0] == "Chart.yaml" { + return nil, errors.New("chart yaml not in base directory") + } + + if _, err := io.Copy(b, tr); err != nil { + return nil, err + } + + data := bytes.TrimPrefix(b.Bytes(), utf8bom) + + files = append(files, &BufferedFile{Name: n, Data: data}) + b.Reset() + } + + if len(files) == 0 { + return nil, errors.New("no files in chart archive") + } + return files, nil +} + +// LoadArchive loads from a reader containing a compressed tar archive. +func LoadArchive(in io.Reader) (*chart.Chart, error) { + files, err := LoadArchiveFiles(in) + if err != nil { + return nil, err + } + + return LoadFiles(files) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go new file mode 100644 index 000000000..9bcbee60c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go @@ -0,0 +1,119 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/sympath" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/ignore" +) + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +// DirLoader loads a chart from a directory +type DirLoader string + +// Load loads the chart +func (l DirLoader) Load() (*chart.Chart, error) { + return LoadDir(string(l)) +} + +// LoadDir loads from a directory. +// +// This loads charts only from directories. +func LoadDir(dir string) (*chart.Chart, error) { + topdir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + + // Just used for errors. + c := &chart.Chart{} + + rules := ignore.Empty() + ifile := filepath.Join(topdir, ignore.HelmIgnore) + if _, err := os.Stat(ifile); err == nil { + r, err := ignore.ParseFile(ifile) + if err != nil { + return c, err + } + rules = r + } + rules.AddDefaults() + + files := []*BufferedFile{} + topdir += string(filepath.Separator) + + walk := func(name string, fi os.FileInfo, err error) error { + n := strings.TrimPrefix(name, topdir) + if n == "" { + // No need to process top level. Avoid bug with helmignore .* matching + // empty names. See issue 1779. + return nil + } + + // Normalize to / since it will also work on Windows + n = filepath.ToSlash(n) + + if err != nil { + return err + } + if fi.IsDir() { + // Directory-based ignore rules should involve skipping the entire + // contents of that directory. + if rules.Ignore(n, fi) { + return filepath.SkipDir + } + return nil + } + + // If a .helmignore file matches, skip this file. + if rules.Ignore(n, fi) { + return nil + } + + // Irregular files include devices, sockets, and other uses of files that + // are not regular files. In Go they have a file mode type bit set. + // See https://golang.org/pkg/os/#FileMode for examples. + if !fi.Mode().IsRegular() { + return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name) + } + + data, err := os.ReadFile(name) + if err != nil { + return errors.Wrapf(err, "error reading %s", n) + } + + data = bytes.TrimPrefix(data, utf8bom) + + files = append(files, &BufferedFile{Name: n, Data: data}) + return nil + } + if err = sympath.Walk(topdir, walk); err != nil { + return c, err + } + + return LoadFiles(files) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go new file mode 100644 index 000000000..7cc8878a8 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/load.go @@ -0,0 +1,200 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "bytes" + "log" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// ChartLoader loads a chart. +type ChartLoader interface { + Load() (*chart.Chart, error) +} + +// Loader returns a new ChartLoader appropriate for the given chart name +func Loader(name string) (ChartLoader, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if fi.IsDir() { + return DirLoader(name), nil + } + return FileLoader(name), nil + +} + +// Load takes a string name, tries to resolve it to a file or directory, and then loads it. +// +// This is the preferred way to load a chart. It will discover the chart encoding +// and hand off to the appropriate chart reader. +// +// If a .helmignore file is present, the directory loader will skip loading any files +// matching it. But .helmignore is not evaluated when reading out of an archive. +func Load(name string) (*chart.Chart, error) { + l, err := Loader(name) + if err != nil { + return nil, err + } + return l.Load() +} + +// BufferedFile represents an archive file buffered for later processing. +type BufferedFile struct { + Name string + Data []byte +} + +// LoadFiles loads from in-memory files. +func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { + c := new(chart.Chart) + subcharts := make(map[string][]*BufferedFile) + + // do not rely on assumed ordering of files in the chart and crash + // if Chart.yaml was not coming early enough to initialize metadata + for _, f := range files { + c.Raw = append(c.Raw, &chart.File{Name: f.Name, Data: f.Data}) + if f.Name == "Chart.yaml" { + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil { + return c, errors.Wrap(err, "cannot load Chart.yaml") + } + // NOTE(bacongobbler): while the chart specification says that APIVersion must be set, + // Helm 2 accepted charts that did not provide an APIVersion in their chart metadata. + // Because of that, if APIVersion is unset, we should assume we're loading a v1 chart. + if c.Metadata.APIVersion == "" { + c.Metadata.APIVersion = chart.APIVersionV1 + } + } + } + for _, f := range files { + switch { + case f.Name == "Chart.yaml": + // already processed + continue + case f.Name == "Chart.lock": + c.Lock = new(chart.Lock) + if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil { + return c, errors.Wrap(err, "cannot load Chart.lock") + } + case f.Name == "values.yaml": + c.Values = make(map[string]interface{}) + if err := yaml.Unmarshal(f.Data, &c.Values); err != nil { + return c, errors.Wrap(err, "cannot load values.yaml") + } + case f.Name == "values.schema.json": + c.Schema = f.Data + + // Deprecated: requirements.yaml is deprecated use Chart.yaml. + // We will handle it for you because we are nice people + case f.Name == "requirements.yaml": + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if c.Metadata.APIVersion != chart.APIVersionV1 { + log.Printf("Warning: Dependencies are handled in Chart.yaml since apiVersion \"v2\". We recommend migrating dependencies to Chart.yaml.") + } + if err := yaml.Unmarshal(f.Data, c.Metadata); err != nil { + return c, errors.Wrap(err, "cannot load requirements.yaml") + } + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + // Deprecated: requirements.lock is deprecated use Chart.lock. + case f.Name == "requirements.lock": + c.Lock = new(chart.Lock) + if err := yaml.Unmarshal(f.Data, &c.Lock); err != nil { + return c, errors.Wrap(err, "cannot load requirements.lock") + } + if c.Metadata == nil { + c.Metadata = new(chart.Metadata) + } + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + + case strings.HasPrefix(f.Name, "templates/"): + c.Templates = append(c.Templates, &chart.File{Name: f.Name, Data: f.Data}) + case strings.HasPrefix(f.Name, "charts/"): + if filepath.Ext(f.Name) == ".prov" { + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + continue + } + + fname := strings.TrimPrefix(f.Name, "charts/") + cname := strings.SplitN(fname, "/", 2)[0] + subcharts[cname] = append(subcharts[cname], &BufferedFile{Name: fname, Data: f.Data}) + default: + c.Files = append(c.Files, &chart.File{Name: f.Name, Data: f.Data}) + } + } + + if c.Metadata == nil { + return c, errors.New("Chart.yaml file is missing") + } + + if err := c.Validate(); err != nil { + return c, err + } + + for n, files := range subcharts { + var sc *chart.Chart + var err error + switch { + case strings.IndexAny(n, "_.") == 0: + continue + case filepath.Ext(n) == ".tgz": + file := files[0] + if file.Name != n { + return c, errors.Errorf("error unpacking tar in %s: expected %s, got %s", c.Name(), n, file.Name) + } + // Untar the chart and add to c.Dependencies + sc, err = LoadArchive(bytes.NewBuffer(file.Data)) + default: + // We have to trim the prefix off of every file, and ignore any file + // that is in charts/, but isn't actually a chart. + buff := make([]*BufferedFile, 0, len(files)) + for _, f := range files { + parts := strings.SplitN(f.Name, "/", 2) + if len(parts) < 2 { + continue + } + f.Name = parts[1] + buff = append(buff, f) + } + sc, err = LoadFiles(buff) + } + + if err != nil { + return c, errors.Wrapf(err, "error unpacking %s in %s", n, c.Name()) + } + c.AddDependency(sc) + } + + return c, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go new file mode 100644 index 000000000..97bfc2c0c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go @@ -0,0 +1,172 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "strings" + "unicode" + + "github.com/Masterminds/semver/v3" +) + +// Maintainer describes a Chart maintainer. +type Maintainer struct { + // Name is a user name or organization name + Name string `json:"name,omitempty"` + // Email is an optional email address to contact the named maintainer + Email string `json:"email,omitempty"` + // URL is an optional URL to an address for the named maintainer + URL string `json:"url,omitempty"` +} + +// Validate checks valid data and sanitizes string characters. +func (m *Maintainer) Validate() error { + if m == nil { + return ValidationError("maintainers must not contain empty or null nodes") + } + m.Name = sanitizeString(m.Name) + m.Email = sanitizeString(m.Email) + m.URL = sanitizeString(m.URL) + return nil +} + +// Metadata for a Chart file. This models the structure of a Chart.yaml file. +type Metadata struct { + // The name of the chart. Required. + Name string `json:"name,omitempty"` + // The URL to a relevant project page, git repo, or contact person + Home string `json:"home,omitempty"` + // Source is the URL to the source code of this chart + Sources []string `json:"sources,omitempty"` + // A SemVer 2 conformant version string of the chart. Required. + Version string `json:"version,omitempty"` + // A one-sentence description of the chart + Description string `json:"description,omitempty"` + // A list of string keywords + Keywords []string `json:"keywords,omitempty"` + // A list of name and URL/email address combinations for the maintainer(s) + Maintainers []*Maintainer `json:"maintainers,omitempty"` + // The URL to an icon file. + Icon string `json:"icon,omitempty"` + // The API Version of this chart. Required. + APIVersion string `json:"apiVersion,omitempty"` + // The condition to check to enable chart + Condition string `json:"condition,omitempty"` + // The tags to check to enable chart + Tags string `json:"tags,omitempty"` + // The version of the application enclosed inside of this chart. + AppVersion string `json:"appVersion,omitempty"` + // Whether or not this chart is deprecated + Deprecated bool `json:"deprecated,omitempty"` + // Annotations are additional mappings uninterpreted by Helm, + // made available for inspection by other applications. + Annotations map[string]string `json:"annotations,omitempty"` + // KubeVersion is a SemVer constraint specifying the version of Kubernetes required. + KubeVersion string `json:"kubeVersion,omitempty"` + // Dependencies are a list of dependencies for a chart. + Dependencies []*Dependency `json:"dependencies,omitempty"` + // Specifies the chart type: application or library + Type string `json:"type,omitempty"` +} + +// Validate checks the metadata for known issues and sanitizes string +// characters. +func (md *Metadata) Validate() error { + if md == nil { + return ValidationError("chart.metadata is required") + } + + md.Name = sanitizeString(md.Name) + md.Description = sanitizeString(md.Description) + md.Home = sanitizeString(md.Home) + md.Icon = sanitizeString(md.Icon) + md.Condition = sanitizeString(md.Condition) + md.Tags = sanitizeString(md.Tags) + md.AppVersion = sanitizeString(md.AppVersion) + md.KubeVersion = sanitizeString(md.KubeVersion) + for i := range md.Sources { + md.Sources[i] = sanitizeString(md.Sources[i]) + } + for i := range md.Keywords { + md.Keywords[i] = sanitizeString(md.Keywords[i]) + } + + if md.APIVersion == "" { + return ValidationError("chart.metadata.apiVersion is required") + } + if md.Name == "" { + return ValidationError("chart.metadata.name is required") + } + if md.Version == "" { + return ValidationError("chart.metadata.version is required") + } + if !isValidSemver(md.Version) { + return ValidationErrorf("chart.metadata.version %q is invalid", md.Version) + } + if !isValidChartType(md.Type) { + return ValidationError("chart.metadata.type must be application or library") + } + + for _, m := range md.Maintainers { + if err := m.Validate(); err != nil { + return err + } + } + + // Aliases need to be validated here to make sure that the alias name does + // not contain any illegal characters. + dependencies := map[string]*Dependency{} + for _, dependency := range md.Dependencies { + if err := dependency.Validate(); err != nil { + return err + } + key := dependency.Name + if dependency.Alias != "" { + key = dependency.Alias + } + if dependencies[key] != nil { + return ValidationErrorf("more than one dependency with name or alias %q", key) + } + dependencies[key] = dependency + } + return nil +} + +func isValidChartType(in string) bool { + switch in { + case "", "application", "library": + return true + } + return false +} + +func isValidSemver(v string) bool { + _, err := semver.NewVersion(v) + return err == nil +} + +// sanitizeString normalize spaces and removes non-printable characters. +func sanitizeString(str string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, str) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go new file mode 100644 index 000000000..5f57e11a5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go @@ -0,0 +1,126 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "strconv" + + "github.com/Masterminds/semver/v3" + "k8s.io/client-go/kubernetes/scheme" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + helmversion "helm.sh/helm/v3/internal/version" +) + +var ( + // The Kubernetes version can be set by LDFLAGS. In order to do that the value + // must be a string. + k8sVersionMajor = "1" + k8sVersionMinor = "20" + + // DefaultVersionSet is the default version set, which includes only Core V1 ("v1"). + DefaultVersionSet = allKnownVersions() + + // DefaultCapabilities is the default set of capabilities. + DefaultCapabilities = &Capabilities{ + KubeVersion: KubeVersion{ + Version: fmt.Sprintf("v%s.%s.0", k8sVersionMajor, k8sVersionMinor), + Major: k8sVersionMajor, + Minor: k8sVersionMinor, + }, + APIVersions: DefaultVersionSet, + HelmVersion: helmversion.Get(), + } +) + +// Capabilities describes the capabilities of the Kubernetes cluster. +type Capabilities struct { + // KubeVersion is the Kubernetes version. + KubeVersion KubeVersion + // APIversions are supported Kubernetes API versions. + APIVersions VersionSet + // HelmVersion is the build information for this helm version + HelmVersion helmversion.BuildInfo +} + +func (capabilities *Capabilities) Copy() *Capabilities { + return &Capabilities{ + KubeVersion: capabilities.KubeVersion, + APIVersions: capabilities.APIVersions, + HelmVersion: capabilities.HelmVersion, + } +} + +// KubeVersion is the Kubernetes version. +type KubeVersion struct { + Version string // Kubernetes version + Major string // Kubernetes major version + Minor string // Kubernetes minor version +} + +// String implements fmt.Stringer +func (kv *KubeVersion) String() string { return kv.Version } + +// GitVersion returns the Kubernetes version string. +// +// Deprecated: use KubeVersion.Version. +func (kv *KubeVersion) GitVersion() string { return kv.Version } + +// ParseKubeVersion parses kubernetes version from string +func ParseKubeVersion(version string) (*KubeVersion, error) { + sv, err := semver.NewVersion(version) + if err != nil { + return nil, err + } + return &KubeVersion{ + Version: "v" + sv.String(), + Major: strconv.FormatUint(sv.Major(), 10), + Minor: strconv.FormatUint(sv.Minor(), 10), + }, nil +} + +// VersionSet is a set of Kubernetes API versions. +type VersionSet []string + +// Has returns true if the version string is in the set. +// +// vs.Has("apps/v1") +func (v VersionSet) Has(apiVersion string) bool { + for _, x := range v { + if x == apiVersion { + return true + } + } + return false +} + +func allKnownVersions() VersionSet { + // We should register the built in extension APIs as well so CRDs are + // supported in the default version set. This has caused problems with `helm + // template` in the past, so let's be safe + apiextensionsv1beta1.AddToScheme(scheme.Scheme) + apiextensionsv1.AddToScheme(scheme.Scheme) + + groups := scheme.Scheme.PrioritizedVersionsAllGroups() + vs := make(VersionSet, 0, len(groups)) + for _, gv := range groups { + vs = append(vs, gv.String()) + } + return vs +} diff --git a/vendor/k8s.io/helm/pkg/chartutil/chartfile.go b/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go similarity index 55% rename from vendor/k8s.io/helm/pkg/chartutil/chartfile.go rename to vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go index f997d15fe..4f537a6e7 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/chartfile.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go @@ -17,50 +17,44 @@ limitations under the License. package chartutil import ( - "errors" - "fmt" - "io/ioutil" "os" "path/filepath" - "github.com/ghodss/yaml" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" - "k8s.io/helm/pkg/proto/hapi/chart" + "helm.sh/helm/v3/pkg/chart" ) -// ApiVersionV1 is the API version number for version 1. -// -// This is ApiVersionV1 instead of APIVersionV1 to match the protobuf-generated name. -const ApiVersionV1 = "v1" // nolint - -// UnmarshalChartfile takes raw Chart.yaml data and unmarshals it. -func UnmarshalChartfile(data []byte) (*chart.Metadata, error) { - y := &chart.Metadata{} - err := yaml.Unmarshal(data, y) - if err != nil { - return nil, err - } - return y, nil -} - // LoadChartfile loads a Chart.yaml file into a *chart.Metadata. func LoadChartfile(filename string) (*chart.Metadata, error) { - b, err := ioutil.ReadFile(filename) + b, err := os.ReadFile(filename) if err != nil { return nil, err } - return UnmarshalChartfile(b) + y := new(chart.Metadata) + err = yaml.Unmarshal(b, y) + return y, err } // SaveChartfile saves the given metadata as a Chart.yaml file at the given path. // // 'filename' should be the complete path and filename ('foo/Chart.yaml') func SaveChartfile(filename string, cf *chart.Metadata) error { + // Pull out the dependencies of a v1 Chart, since there's no way + // to tell the serializer to skip a field for just this use case + savedDependencies := cf.Dependencies + if cf.APIVersion == chart.APIVersionV1 { + cf.Dependencies = nil + } out, err := yaml.Marshal(cf) + if cf.APIVersion == chart.APIVersionV1 { + cf.Dependencies = savedDependencies + } if err != nil { return err } - return ioutil.WriteFile(filename, out, 0644) + return os.WriteFile(filename, out, 0644) } // IsChartDir validate a chart directory. @@ -70,28 +64,28 @@ func IsChartDir(dirName string) (bool, error) { if fi, err := os.Stat(dirName); err != nil { return false, err } else if !fi.IsDir() { - return false, fmt.Errorf("%q is not a directory", dirName) + return false, errors.Errorf("%q is not a directory", dirName) } - chartYaml := filepath.Join(dirName, "Chart.yaml") + chartYaml := filepath.Join(dirName, ChartfileName) if _, err := os.Stat(chartYaml); os.IsNotExist(err) { - return false, fmt.Errorf("no Chart.yaml exists in directory %q", dirName) + return false, errors.Errorf("no %s exists in directory %q", ChartfileName, dirName) } - chartYamlContent, err := ioutil.ReadFile(chartYaml) + chartYamlContent, err := os.ReadFile(chartYaml) if err != nil { - return false, fmt.Errorf("cannot read Chart.Yaml in directory %q", dirName) + return false, errors.Errorf("cannot read %s in directory %q", ChartfileName, dirName) } - chartContent, err := UnmarshalChartfile(chartYamlContent) - if err != nil { + chartContent := new(chart.Metadata) + if err := yaml.Unmarshal(chartYamlContent, &chartContent); err != nil { return false, err } if chartContent == nil { - return false, errors.New("chart metadata (Chart.yaml) missing") + return false, errors.Errorf("chart metadata (%s) missing", ChartfileName) } if chartContent.Name == "" { - return false, errors.New("invalid chart (Chart.yaml): name must not be empty") + return false, errors.Errorf("invalid chart (%s): name must not be empty", ChartfileName) } return true, nil diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go new file mode 100644 index 000000000..f0272fd6a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go @@ -0,0 +1,293 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "log" + + "github.com/mitchellh/copystructure" + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" +) + +func concatPrefix(a, b string) string { + if a == "" { + return b + } + return fmt.Sprintf("%s.%s", a, b) +} + +// CoalesceValues coalesces all of the values in a chart (and its subcharts). +// +// Values are coalesced together using the following rules: +// +// - Values in a higher level chart always override values in a lower-level +// dependency chart +// - Scalar values and arrays are replaced, maps are merged +// - A chart has access to all of the variables for it, as well as all of +// the values destined for its dependencies. +func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { + valsCopy, err := copyValues(vals) + if err != nil { + return vals, err + } + return coalesce(log.Printf, chrt, valsCopy, "", false) +} + +// MergeValues is used to merge the values in a chart and its subcharts. This +// is different from Coalescing as nil/null values are preserved. +// +// Values are coalesced together using the following rules: +// +// - Values in a higher level chart always override values in a lower-level +// dependency chart +// - Scalar values and arrays are replaced, maps are merged +// - A chart has access to all of the variables for it, as well as all of +// the values destined for its dependencies. +// +// Retaining Nils is useful when processes early in a Helm action or business +// logic need to retain them for when Coalescing will happen again later in the +// business logic. +func MergeValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { + valsCopy, err := copyValues(vals) + if err != nil { + return vals, err + } + return coalesce(log.Printf, chrt, valsCopy, "", true) +} + +func copyValues(vals map[string]interface{}) (Values, error) { + v, err := copystructure.Copy(vals) + if err != nil { + return vals, err + } + + valsCopy := v.(map[string]interface{}) + // if we have an empty map, make sure it is initialized + if valsCopy == nil { + valsCopy = make(map[string]interface{}) + } + + return valsCopy, nil +} + +type printFn func(format string, v ...interface{}) + +// coalesce coalesces the dest values and the chart values, giving priority to the dest values. +// +// This is a helper function for CoalesceValues and MergeValues. +// +// Note, the merge argument specifies whether this is being used by MergeValues +// or CoalesceValues. Coalescing removes null values and their keys in some +// situations while merging keeps the null values. +func coalesce(printf printFn, ch *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { + coalesceValues(printf, ch, dest, prefix, merge) + return coalesceDeps(printf, ch, dest, prefix, merge) +} + +// coalesceDeps coalesces the dependencies of the given chart. +func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{}, prefix string, merge bool) (map[string]interface{}, error) { + for _, subchart := range chrt.Dependencies() { + if c, ok := dest[subchart.Name()]; !ok { + // If dest doesn't already have the key, create it. + dest[subchart.Name()] = make(map[string]interface{}) + } else if !istable(c) { + return dest, errors.Errorf("type mismatch on %s: %t", subchart.Name(), c) + } + if dv, ok := dest[subchart.Name()]; ok { + dvmap := dv.(map[string]interface{}) + subPrefix := concatPrefix(prefix, chrt.Metadata.Name) + // Get globals out of dest and merge them into dvmap. + coalesceGlobals(printf, dvmap, dest, subPrefix, merge) + // Now coalesce the rest of the values. + var err error + dest[subchart.Name()], err = coalesce(printf, subchart, dvmap, subPrefix, merge) + if err != nil { + return dest, err + } + } + } + return dest, nil +} + +// coalesceGlobals copies the globals out of src and merges them into dest. +// +// For convenience, returns dest. +func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, _ bool) { + var dg, sg map[string]interface{} + + if destglob, ok := dest[GlobalKey]; !ok { + dg = make(map[string]interface{}) + } else if dg, ok = destglob.(map[string]interface{}); !ok { + printf("warning: skipping globals because destination %s is not a table.", GlobalKey) + return + } + + if srcglob, ok := src[GlobalKey]; !ok { + sg = make(map[string]interface{}) + } else if sg, ok = srcglob.(map[string]interface{}); !ok { + printf("warning: skipping globals because source %s is not a table.", GlobalKey) + return + } + + // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This + // reverses that decision. It may somehow be possible to introduce a loop + // here, but I haven't found a way. So for the time being, let's allow + // tables in globals. + for key, val := range sg { + if istable(val) { + vv := copyMap(val.(map[string]interface{})) + if destv, ok := dg[key]; !ok { + // Here there is no merge. We're just adding. + dg[key] = vv + } else { + if destvmap, ok := destv.(map[string]interface{}); !ok { + printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key) + } else { + // Basically, we reverse order of coalesce here to merge + // top-down. + subPrefix := concatPrefix(prefix, key) + // In this location coalesceTablesFullKey should always have + // merge set to true. The output of coalesceGlobals is run + // through coalesce where any nils will be removed. + coalesceTablesFullKey(printf, vv, destvmap, subPrefix, true) + dg[key] = vv + } + } + } else if dv, ok := dg[key]; ok && istable(dv) { + // It's not clear if this condition can actually ever trigger. + printf("key %s is table. Skipping", key) + } else { + // TODO: Do we need to do any additional checking on the value? + dg[key] = val + } + } + dest[GlobalKey] = dg +} + +func copyMap(src map[string]interface{}) map[string]interface{} { + m := make(map[string]interface{}, len(src)) + for k, v := range src { + m[k] = v + } + return m +} + +// coalesceValues builds up a values map for a particular chart. +// +// Values in v will override the values in the chart. +func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, prefix string, merge bool) { + subPrefix := concatPrefix(prefix, c.Metadata.Name) + + // Using c.Values directly when coalescing a table can cause problems where + // the original c.Values is altered. Creating a deep copy stops the problem. + // This section is fault-tolerant as there is no ability to return an error. + valuesCopy, err := copystructure.Copy(c.Values) + var vc map[string]interface{} + var ok bool + if err != nil { + // If there is an error something is wrong with copying c.Values it + // means there is a problem in the deep copying package or something + // wrong with c.Values. In this case we will use c.Values and report + // an error. + printf("warning: unable to copy values, err: %s", err) + vc = c.Values + } else { + vc, ok = valuesCopy.(map[string]interface{}) + if !ok { + // c.Values has a map[string]interface{} structure. If the copy of + // it cannot be treated as map[string]interface{} there is something + // strangely wrong. Log it and use c.Values + printf("warning: unable to convert values copy to values type") + vc = c.Values + } + } + + for key, val := range vc { + if value, ok := v[key]; ok { + if value == nil && !merge { + // When the YAML value is null and we are coalescing instead of + // merging, we remove the value's key. + // This allows Helm's various sources of values (value files or --set) to + // remove incompatible keys from any previous chart, file, or set values. + delete(v, key) + } else if dest, ok := value.(map[string]interface{}); ok { + // if v[key] is a table, merge nv's val table into v[key]. + src, ok := val.(map[string]interface{}) + if !ok { + // If the original value is nil, there is nothing to coalesce, so we don't print + // the warning + if val != nil { + printf("warning: skipped value for %s.%s: Not a table.", subPrefix, key) + } + } else { + // Because v has higher precedence than nv, dest values override src + // values. + coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key), merge) + } + } + } else { + // If the key is not in v, copy it from nv. + v[key] = val + } + } +} + +// CoalesceTables merges a source map into a destination map. +// +// dest is considered authoritative. +func CoalesceTables(dst, src map[string]interface{}) map[string]interface{} { + return coalesceTablesFullKey(log.Printf, dst, src, "", false) +} + +func MergeTables(dst, src map[string]interface{}) map[string]interface{} { + return coalesceTablesFullKey(log.Printf, dst, src, "", true) +} + +// coalesceTablesFullKey merges a source map into a destination map. +// +// dest is considered authoritative. +func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, prefix string, merge bool) map[string]interface{} { + // When --reuse-values is set but there are no modifications yet, return new values + if src == nil { + return dst + } + if dst == nil { + return src + } + // Because dest has higher precedence than src, dest values override src + // values. + for key, val := range src { + fullkey := concatPrefix(prefix, key) + if dv, ok := dst[key]; ok && !merge && dv == nil { + delete(dst, key) + } else if !ok { + dst[key] = val + } else if istable(val) { + if istable(dv) { + coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey, merge) + } else { + printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val) + } + } else if istable(dv) && val != nil { + printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val) + } + } + return dst +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go b/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go new file mode 100644 index 000000000..f4656c913 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go @@ -0,0 +1,34 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import "github.com/Masterminds/semver/v3" + +// IsCompatibleRange compares a version to a constraint. +// It returns true if the version matches the constraint, and false in all other cases. +func IsCompatibleRange(constraint, ver string) bool { + sv, err := semver.NewVersion(ver) + if err != nil { + return false + } + + c, err := semver.NewConstraint(constraint) + if err != nil { + return false + } + return c.Check(sv) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go new file mode 100644 index 000000000..0bb5a83cd --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go @@ -0,0 +1,723 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// chartName is a regular expression for testing the supplied name of a chart. +// This regular expression is probably stricter than it needs to be. We can relax it +// somewhat. Newline characters, as well as $, quotes, +, parens, and % are known to be +// problematic. +var chartName = regexp.MustCompile("^[a-zA-Z0-9._-]+$") + +const ( + // ChartfileName is the default Chart file name. + ChartfileName = "Chart.yaml" + // ValuesfileName is the default values file name. + ValuesfileName = "values.yaml" + // SchemafileName is the default values schema file name. + SchemafileName = "values.schema.json" + // TemplatesDir is the relative directory name for templates. + TemplatesDir = "templates" + // ChartsDir is the relative directory name for charts dependencies. + ChartsDir = "charts" + // TemplatesTestsDir is the relative directory name for tests. + TemplatesTestsDir = TemplatesDir + sep + "tests" + // IgnorefileName is the name of the Helm ignore file. + IgnorefileName = ".helmignore" + // IngressFileName is the name of the example ingress file. + IngressFileName = TemplatesDir + sep + "ingress.yaml" + // DeploymentName is the name of the example deployment file. + DeploymentName = TemplatesDir + sep + "deployment.yaml" + // ServiceName is the name of the example service file. + ServiceName = TemplatesDir + sep + "service.yaml" + // ServiceAccountName is the name of the example serviceaccount file. + ServiceAccountName = TemplatesDir + sep + "serviceaccount.yaml" + // HorizontalPodAutoscalerName is the name of the example hpa file. + HorizontalPodAutoscalerName = TemplatesDir + sep + "hpa.yaml" + // NotesName is the name of the example NOTES.txt file. + NotesName = TemplatesDir + sep + "NOTES.txt" + // HelpersName is the name of the example helpers file. + HelpersName = TemplatesDir + sep + "_helpers.tpl" + // TestConnectionName is the name of the example test file. + TestConnectionName = TemplatesTestsDir + sep + "test-connection.yaml" +) + +// maxChartNameLength is lower than the limits we know of with certain file systems, +// and with certain Kubernetes fields. +const maxChartNameLength = 250 + +const sep = string(filepath.Separator) + +const defaultChartfile = `apiVersion: v2 +name: %s +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" +` + +const defaultValues = `# Default values for %s. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} +podLabels: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +livenessProbe: + httpGet: + path: / + port: http +readinessProbe: + httpGet: + path: / + port: http + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +nodeSelector: {} + +tolerations: [] + +affinity: {} +` + +const defaultIgnore = `# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +` + +const defaultIngress = `{{- if .Values.ingress.enabled -}} +{{- $fullName := include ".fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include ".labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +` + +const defaultDeployment = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include ".selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include ".labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include ".serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +` + +const defaultService = `apiVersion: v1 +kind: Service +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include ".selectorLabels" . | nindent 4 }} +` + +const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include ".serviceAccountName" . }} + labels: + {{- include ".labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} +` + +const defaultHorizontalPodAutoscaler = `{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include ".fullname" . }} + labels: + {{- include ".labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} +` + +const defaultNotes = `1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} +` + +const defaultHelpers = `{{/* +Expand the name of the chart. +*/}} +{{- define ".name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define ".labels" -}} +helm.sh/chart: {{ include ".chart" . }} +{{ include ".selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define ".selectorLabels" -}} +app.kubernetes.io/name: {{ include ".name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define ".serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include ".fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} +` + +const defaultTestConnection = `apiVersion: v1 +kind: Pod +metadata: + name: "{{ include ".fullname" . }}-test-connection" + labels: + {{- include ".labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include ".fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never +` + +// Stderr is an io.Writer to which error messages can be written +// +// In Helm 4, this will be replaced. It is needed in Helm 3 to preserve API backward +// compatibility. +var Stderr io.Writer = os.Stderr + +// CreateFrom creates a new chart, but scaffolds it from the src chart. +func CreateFrom(chartfile *chart.Metadata, dest, src string) error { + schart, err := loader.Load(src) + if err != nil { + return errors.Wrapf(err, "could not load %s", src) + } + + schart.Metadata = chartfile + + var updatedTemplates []*chart.File + + for _, template := range schart.Templates { + newData := transform(string(template.Data), schart.Name()) + updatedTemplates = append(updatedTemplates, &chart.File{Name: template.Name, Data: newData}) + } + + schart.Templates = updatedTemplates + b, err := yaml.Marshal(schart.Values) + if err != nil { + return errors.Wrap(err, "reading values file") + } + + var m map[string]interface{} + if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil { + return errors.Wrap(err, "transforming values file") + } + schart.Values = m + + // SaveDir looks for the file values.yaml when saving rather than the values + // key in order to preserve the comments in the YAML. The name placeholder + // needs to be replaced on that file. + for _, f := range schart.Raw { + if f.Name == ValuesfileName { + f.Data = transform(string(f.Data), schart.Name()) + } + } + + return SaveDir(schart, dest) +} + +// Create creates a new chart in a directory. +// +// Inside of dir, this will create a directory based on the name of +// chartfile.Name. It will then write the Chart.yaml into this directory and +// create the (empty) appropriate directories. +// +// The returned string will point to the newly created directory. It will be +// an absolute path, even if the provided base directory was relative. +// +// If dir does not exist, this will return an error. +// If Chart.yaml or any directories cannot be created, this will return an +// error. In such a case, this will attempt to clean up by removing the +// new chart directory. +func Create(name, dir string) (string, error) { + + // Sanity-check the name of a chart so user doesn't create one that causes problems. + if err := validateChartName(name); err != nil { + return "", err + } + + path, err := filepath.Abs(dir) + if err != nil { + return path, err + } + + if fi, err := os.Stat(path); err != nil { + return path, err + } else if !fi.IsDir() { + return path, errors.Errorf("no such directory %s", path) + } + + cdir := filepath.Join(path, name) + if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() { + return cdir, errors.Errorf("file %s already exists and is not a directory", cdir) + } + + files := []struct { + path string + content []byte + }{ + { + // Chart.yaml + path: filepath.Join(cdir, ChartfileName), + content: []byte(fmt.Sprintf(defaultChartfile, name)), + }, + { + // values.yaml + path: filepath.Join(cdir, ValuesfileName), + content: []byte(fmt.Sprintf(defaultValues, name)), + }, + { + // .helmignore + path: filepath.Join(cdir, IgnorefileName), + content: []byte(defaultIgnore), + }, + { + // ingress.yaml + path: filepath.Join(cdir, IngressFileName), + content: transform(defaultIngress, name), + }, + { + // deployment.yaml + path: filepath.Join(cdir, DeploymentName), + content: transform(defaultDeployment, name), + }, + { + // service.yaml + path: filepath.Join(cdir, ServiceName), + content: transform(defaultService, name), + }, + { + // serviceaccount.yaml + path: filepath.Join(cdir, ServiceAccountName), + content: transform(defaultServiceAccount, name), + }, + { + // hpa.yaml + path: filepath.Join(cdir, HorizontalPodAutoscalerName), + content: transform(defaultHorizontalPodAutoscaler, name), + }, + { + // NOTES.txt + path: filepath.Join(cdir, NotesName), + content: transform(defaultNotes, name), + }, + { + // _helpers.tpl + path: filepath.Join(cdir, HelpersName), + content: transform(defaultHelpers, name), + }, + { + // test-connection.yaml + path: filepath.Join(cdir, TestConnectionName), + content: transform(defaultTestConnection, name), + }, + } + + for _, file := range files { + if _, err := os.Stat(file.path); err == nil { + // There is no handle to a preferred output stream here. + fmt.Fprintf(Stderr, "WARNING: File %q already exists. Overwriting.\n", file.path) + } + if err := writeFile(file.path, file.content); err != nil { + return cdir, err + } + } + // Need to add the ChartsDir explicitly as it does not contain any file OOTB + if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil { + return cdir, err + } + return cdir, nil +} + +// transform performs a string replacement of the specified source for +// a given key with the replacement string +func transform(src, replacement string) []byte { + return []byte(strings.ReplaceAll(src, "", replacement)) +} + +func writeFile(name string, content []byte) error { + if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil { + return err + } + return os.WriteFile(name, content, 0644) +} + +func validateChartName(name string) error { + if name == "" || len(name) > maxChartNameLength { + return fmt.Errorf("chart name must be between 1 and %d characters", maxChartNameLength) + } + if !chartName.MatchString(name) { + return fmt.Errorf("chart name must match the regular expression %q", chartName.String()) + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go new file mode 100644 index 000000000..205d99e09 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go @@ -0,0 +1,356 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "log" + "strings" + + "github.com/mitchellh/copystructure" + + "helm.sh/helm/v3/pkg/chart" +) + +// ProcessDependencies checks through this chart's dependencies, processing accordingly. +// +// TODO: For Helm v4 this can be combined with or turned into ProcessDependenciesWithMerge +func ProcessDependencies(c *chart.Chart, v Values) error { + if err := processDependencyEnabled(c, v, ""); err != nil { + return err + } + return processDependencyImportValues(c, false) +} + +// ProcessDependenciesWithMerge checks through this chart's dependencies, processing accordingly. +// It is similar to ProcessDependencies but it does not remove nil values during +// the import/export handling process. +func ProcessDependenciesWithMerge(c *chart.Chart, v Values) error { + if err := processDependencyEnabled(c, v, ""); err != nil { + return err + } + return processDependencyImportValues(c, true) +} + +// processDependencyConditions disables charts based on condition path value in values +func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath string) { + if reqs == nil { + return + } + for _, r := range reqs { + for _, c := range strings.Split(strings.TrimSpace(r.Condition), ",") { + if len(c) > 0 { + // retrieve value + vv, err := cvals.PathValue(cpath + c) + if err == nil { + // if not bool, warn + if bv, ok := vv.(bool); ok { + r.Enabled = bv + break + } + log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) + } else if _, ok := err.(ErrNoValue); !ok { + // this is a real error + log.Printf("Warning: PathValue returned error %v", err) + } + } + } + } +} + +// processDependencyTags disables charts based on tags in values +func processDependencyTags(reqs []*chart.Dependency, cvals Values) { + if reqs == nil { + return + } + vt, err := cvals.Table("tags") + if err != nil { + return + } + for _, r := range reqs { + var hasTrue, hasFalse bool + for _, k := range r.Tags { + if b, ok := vt[k]; ok { + // if not bool, warn + if bv, ok := b.(bool); ok { + if bv { + hasTrue = true + } else { + hasFalse = true + } + } else { + log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name) + } + } + } + if !hasTrue && hasFalse { + r.Enabled = false + } else if hasTrue || !hasTrue && !hasFalse { + r.Enabled = true + } + } +} + +func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart { + for _, c := range charts { + if c == nil { + continue + } + if c.Name() != dep.Name { + continue + } + if !IsCompatibleRange(dep.Version, c.Metadata.Version) { + continue + } + + out := *c + md := *c.Metadata + out.Metadata = &md + + if dep.Alias != "" { + md.Name = dep.Alias + } + return &out + } + return nil +} + +// processDependencyEnabled removes disabled charts from dependencies +func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error { + if c.Metadata.Dependencies == nil { + return nil + } + + var chartDependencies []*chart.Chart + // If any dependency is not a part of Chart.yaml + // then this should be added to chartDependencies. + // However, if the dependency is already specified in Chart.yaml + // we should not add it, as it would be anyways processed from Chart.yaml + +Loop: + for _, existing := range c.Dependencies() { + for _, req := range c.Metadata.Dependencies { + if existing.Name() == req.Name && IsCompatibleRange(req.Version, existing.Metadata.Version) { + continue Loop + } + } + chartDependencies = append(chartDependencies, existing) + } + + for _, req := range c.Metadata.Dependencies { + if req == nil { + continue + } + if chartDependency := getAliasDependency(c.Dependencies(), req); chartDependency != nil { + chartDependencies = append(chartDependencies, chartDependency) + } + if req.Alias != "" { + req.Name = req.Alias + } + } + c.SetDependencies(chartDependencies...) + + // set all to true + for _, lr := range c.Metadata.Dependencies { + lr.Enabled = true + } + cvals, err := CoalesceValues(c, v) + if err != nil { + return err + } + // flag dependencies as enabled/disabled + processDependencyTags(c.Metadata.Dependencies, cvals) + processDependencyConditions(c.Metadata.Dependencies, cvals, path) + // make a map of charts to remove + rm := map[string]struct{}{} + for _, r := range c.Metadata.Dependencies { + if !r.Enabled { + // remove disabled chart + rm[r.Name] = struct{}{} + } + } + // don't keep disabled charts in new slice + cd := []*chart.Chart{} + copy(cd, c.Dependencies()[:0]) + for _, n := range c.Dependencies() { + if _, ok := rm[n.Metadata.Name]; !ok { + cd = append(cd, n) + } + } + // don't keep disabled charts in metadata + cdMetadata := []*chart.Dependency{} + copy(cdMetadata, c.Metadata.Dependencies[:0]) + for _, n := range c.Metadata.Dependencies { + if _, ok := rm[n.Name]; !ok { + cdMetadata = append(cdMetadata, n) + } + } + + // recursively call self to process sub dependencies + for _, t := range cd { + subpath := path + t.Metadata.Name + "." + if err := processDependencyEnabled(t, cvals, subpath); err != nil { + return err + } + } + // set the correct dependencies in metadata + c.Metadata.Dependencies = nil + c.Metadata.Dependencies = append(c.Metadata.Dependencies, cdMetadata...) + c.SetDependencies(cd...) + + return nil +} + +// pathToMap creates a nested map given a YAML path in dot notation. +func pathToMap(path string, data map[string]interface{}) map[string]interface{} { + if path == "." { + return data + } + return set(parsePath(path), data) +} + +func set(path []string, data map[string]interface{}) map[string]interface{} { + if len(path) == 0 { + return nil + } + cur := data + for i := len(path) - 1; i >= 0; i-- { + cur = map[string]interface{}{path[i]: cur} + } + return cur +} + +// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field. +func processImportValues(c *chart.Chart, merge bool) error { + if c.Metadata.Dependencies == nil { + return nil + } + // combine chart values and empty config to get Values + var cvals Values + var err error + if merge { + cvals, err = MergeValues(c, nil) + } else { + cvals, err = CoalesceValues(c, nil) + } + if err != nil { + return err + } + b := make(map[string]interface{}) + // import values from each dependency if specified in import-values + for _, r := range c.Metadata.Dependencies { + var outiv []interface{} + for _, riv := range r.ImportValues { + switch iv := riv.(type) { + case map[string]interface{}: + child := iv["child"].(string) + parent := iv["parent"].(string) + + outiv = append(outiv, map[string]string{ + "child": child, + "parent": parent, + }) + + // get child table + vv, err := cvals.Table(r.Name + "." + child) + if err != nil { + log.Printf("Warning: ImportValues missing table from chart %s: %v", r.Name, err) + continue + } + // create value map from child to be merged into parent + if merge { + b = MergeTables(b, pathToMap(parent, vv.AsMap())) + } else { + b = CoalesceTables(b, pathToMap(parent, vv.AsMap())) + } + case string: + child := "exports." + iv + outiv = append(outiv, map[string]string{ + "child": child, + "parent": ".", + }) + vm, err := cvals.Table(r.Name + "." + child) + if err != nil { + log.Printf("Warning: ImportValues missing table: %v", err) + continue + } + if merge { + b = MergeTables(b, vm.AsMap()) + } else { + b = CoalesceTables(b, vm.AsMap()) + } + } + } + r.ImportValues = outiv + } + + // Imported values from a child to a parent chart have a lower priority than + // the parents values. This enables parent charts to import a large section + // from a child and then override select parts. This is why b is merged into + // cvals in the code below and not the other way around. + if merge { + // deep copying the cvals as there are cases where pointers can end + // up in the cvals when they are copied onto b in ways that break things. + cvals = deepCopyMap(cvals) + c.Values = MergeTables(cvals, b) + } else { + // Trimming the nil values from cvals is needed for backwards compatibility. + // Previously, the b value had been populated with cvals along with some + // overrides. This caused the coalescing functionality to remove the + // nil/null values. This trimming is for backwards compat. + cvals = trimNilValues(cvals) + c.Values = CoalesceTables(cvals, b) + } + + return nil +} + +func deepCopyMap(vals map[string]interface{}) map[string]interface{} { + valsCopy, err := copystructure.Copy(vals) + if err != nil { + return vals + } + return valsCopy.(map[string]interface{}) +} + +func trimNilValues(vals map[string]interface{}) map[string]interface{} { + valsCopy, err := copystructure.Copy(vals) + if err != nil { + return vals + } + valsCopyMap := valsCopy.(map[string]interface{}) + for key, val := range valsCopyMap { + if val == nil { + // Iterate over the values and remove nil keys + delete(valsCopyMap, key) + } else if istable(val) { + // Recursively call into ourselves to remove keys from inner tables + valsCopyMap[key] = trimNilValues(val.(map[string]interface{})) + } + } + + return valsCopyMap +} + +// processDependencyImportValues imports specified chart values from child to parent. +func processDependencyImportValues(c *chart.Chart, merge bool) error { + for _, d := range c.Dependencies() { + // recurse + if err := processDependencyImportValues(d, merge); err != nil { + return err + } + } + return processImportValues(c, merge) +} diff --git a/vendor/k8s.io/helm/pkg/chartutil/doc.go b/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go similarity index 61% rename from vendor/k8s.io/helm/pkg/chartutil/doc.go rename to vendor/helm.sh/helm/v3/pkg/chartutil/doc.go index ad2224f94..49c55ac52 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/doc.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go @@ -14,31 +14,32 @@ See the License for the specific language governing permissions and limitations under the License. */ -/*Package chartutil contains tools for working with charts. +/* +Package chartutil contains tools for working with charts. -Charts are described in the protocol buffer definition (pkg/proto/hapi/charts). +Charts are described in the chart package (pkg/chart). This package provides utilities for serializing and deserializing charts. A chart can be represented on the file system in one of two ways: - - As a directory that contains a Chart.yaml file and other chart things. - - As a tarred gzipped file containing a directory that then contains a - Chart.yaml file. + - As a directory that contains a Chart.yaml file and other chart things. + - As a tarred gzipped file containing a directory that then contains a + Chart.yaml file. This package provides utilities for working with those file formats. -The preferred way of loading a chart is using 'chartutil.Load`: +The preferred way of loading a chart is using 'loader.Load`: - chart, err := chartutil.Load(filename) + chart, err := loader.Load(filename) This will attempt to discover whether the file at 'filename' is a directory or a chart archive. It will then load accordingly. For accepting raw compressed tar file data from an io.Reader, the -'chartutil.LoadArchive()' will read in the data, uncompress it, and unpack it +'loader.LoadArchive()' will read in the data, uncompress it, and unpack it into a Chart. -When creating charts in memory, use the 'k8s.io/helm/pkg/proto/hapi/chart' +When creating charts in memory, use the 'helm.sh/helm/pkg/chart' package directly. */ -package chartutil // import "k8s.io/helm/pkg/chartutil" +package chartutil // import "helm.sh/helm/v3/pkg/chartutil" diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go new file mode 100644 index 000000000..fcdcc27ea --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go @@ -0,0 +1,35 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" +) + +// ErrNoTable indicates that a chart does not have a matching table. +type ErrNoTable struct { + Key string +} + +func (e ErrNoTable) Error() string { return fmt.Sprintf("%q is not a table", e.Key) } + +// ErrNoValue indicates that Values does not contain a key with a value +type ErrNoValue struct { + Key string +} + +func (e ErrNoValue) Error() string { return fmt.Sprintf("%q is not a value", e.Key) } diff --git a/vendor/k8s.io/helm/pkg/chartutil/expand.go b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go similarity index 82% rename from vendor/k8s.io/helm/pkg/chartutil/expand.go rename to vendor/helm.sh/helm/v3/pkg/chartutil/expand.go index 9ed021d9c..7ae1ae6fa 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/expand.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go @@ -17,18 +17,21 @@ limitations under the License. package chartutil import ( - "errors" "io" - "io/ioutil" "os" "path/filepath" securejoin "github.com/cyphar/filepath-securejoin" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" ) // Expand uncompresses and extracts a chart into the specified directory. func Expand(dir string, r io.Reader) error { - files, err := loadArchiveFiles(r) + files, err := loader.LoadArchiveFiles(r) if err != nil { return err } @@ -37,11 +40,11 @@ func Expand(dir string, r io.Reader) error { var chartName string for _, file := range files { if file.Name == "Chart.yaml" { - ch, err := UnmarshalChartfile(file.Data) - if err != nil { - return err + ch := &chart.Metadata{} + if err := yaml.Unmarshal(file.Data, ch); err != nil { + return errors.Wrap(err, "cannot load Chart.yaml") } - chartName = ch.GetName() + chartName = ch.Name } } if chartName == "" { @@ -68,10 +71,11 @@ func Expand(dir string, r io.Reader) error { return err } - if err := ioutil.WriteFile(outpath, file.Data, 0644); err != nil { + if err := os.WriteFile(outpath, file.Data, 0644); err != nil { return err } } + return nil } diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go new file mode 100644 index 000000000..7b9768fd3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go @@ -0,0 +1,93 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "bytes" + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/xeipuuv/gojsonschema" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// ValidateAgainstSchema checks that values does not violate the structure laid out in schema +func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { + var sb strings.Builder + if chrt.Schema != nil { + err := ValidateAgainstSingleSchema(values, chrt.Schema) + if err != nil { + sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) + sb.WriteString(err.Error()) + } + } + + // For each dependency, recursively call this function with the coalesced values + for _, subchart := range chrt.Dependencies() { + subchartValues := values[subchart.Name()].(map[string]interface{}) + if err := ValidateAgainstSchema(subchart, subchartValues); err != nil { + sb.WriteString(err.Error()) + } + } + + if sb.Len() > 0 { + return errors.New(sb.String()) + } + + return nil +} + +// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema +func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error) { + defer func() { + if r := recover(); r != nil { + reterr = fmt.Errorf("unable to validate schema: %s", r) + } + }() + + valuesData, err := yaml.Marshal(values) + if err != nil { + return err + } + valuesJSON, err := yaml.YAMLToJSON(valuesData) + if err != nil { + return err + } + if bytes.Equal(valuesJSON, []byte("null")) { + valuesJSON = []byte("{}") + } + schemaLoader := gojsonschema.NewBytesLoader(schemaJSON) + valuesLoader := gojsonschema.NewBytesLoader(valuesJSON) + + result, err := gojsonschema.Validate(schemaLoader, valuesLoader) + if err != nil { + return err + } + + if !result.Valid() { + var sb strings.Builder + for _, desc := range result.Errors() { + sb.WriteString(fmt.Sprintf("- %s\n", desc)) + } + return errors.New(sb.String()) + } + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/save.go b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go new file mode 100644 index 000000000..2ce4eddaf --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go @@ -0,0 +1,244 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=") + +// SaveDir saves a chart as files in a directory. +// +// This takes the chart name, and creates a new subdirectory inside of the given dest +// directory, writing the chart's contents to that subdirectory. +func SaveDir(c *chart.Chart, dest string) error { + // Create the chart directory + outdir := filepath.Join(dest, c.Name()) + if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() { + return errors.Errorf("file %s already exists and is not a directory", outdir) + } + if err := os.MkdirAll(outdir, 0755); err != nil { + return err + } + + // Save the chart file. + if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil { + return err + } + + // Save values.yaml + for _, f := range c.Raw { + if f.Name == ValuesfileName { + vf := filepath.Join(outdir, ValuesfileName) + if err := writeFile(vf, f.Data); err != nil { + return err + } + } + } + + // Save values.schema.json if it exists + if c.Schema != nil { + filename := filepath.Join(outdir, SchemafileName) + if err := writeFile(filename, c.Schema); err != nil { + return err + } + } + + // Save templates and files + for _, o := range [][]*chart.File{c.Templates, c.Files} { + for _, f := range o { + n := filepath.Join(outdir, f.Name) + if err := writeFile(n, f.Data); err != nil { + return err + } + } + } + + // Save dependencies + base := filepath.Join(outdir, ChartsDir) + for _, dep := range c.Dependencies() { + // Here, we write each dependency as a tar file. + if _, err := Save(dep, base); err != nil { + return errors.Wrapf(err, "saving %s", dep.ChartFullPath()) + } + } + return nil +} + +// Save creates an archived chart to the given directory. +// +// This takes an existing chart and a destination directory. +// +// If the directory is /foo, and the chart is named bar, with version 1.0.0, this +// will generate /foo/bar-1.0.0.tgz. +// +// This returns the absolute path to the chart archive file. +func Save(c *chart.Chart, outDir string) (string, error) { + if err := c.Validate(); err != nil { + return "", errors.Wrap(err, "chart validation") + } + + filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version) + filename = filepath.Join(outDir, filename) + dir := filepath.Dir(filename) + if stat, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if err2 := os.MkdirAll(dir, 0755); err2 != nil { + return "", err2 + } + } else { + return "", errors.Wrapf(err, "stat %s", dir) + } + } else if !stat.IsDir() { + return "", errors.Errorf("is not a directory: %s", dir) + } + + f, err := os.Create(filename) + if err != nil { + return "", err + } + + // Wrap in gzip writer + zipper := gzip.NewWriter(f) + zipper.Header.Extra = headerBytes + zipper.Header.Comment = "Helm" + + // Wrap in tar writer + twriter := tar.NewWriter(zipper) + rollback := false + defer func() { + twriter.Close() + zipper.Close() + f.Close() + if rollback { + os.Remove(filename) + } + }() + + if err := writeTarContents(twriter, c, ""); err != nil { + rollback = true + return filename, err + } + return filename, nil +} + +func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error { + base := filepath.Join(prefix, c.Name()) + + // Pull out the dependencies of a v1 Chart, since there's no way + // to tell the serializer to skip a field for just this use case + savedDependencies := c.Metadata.Dependencies + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Metadata.Dependencies = nil + } + // Save Chart.yaml + cdata, err := yaml.Marshal(c.Metadata) + if c.Metadata.APIVersion == chart.APIVersionV1 { + c.Metadata.Dependencies = savedDependencies + } + if err != nil { + return err + } + if err := writeToTar(out, filepath.Join(base, ChartfileName), cdata); err != nil { + return err + } + + // Save Chart.lock + // TODO: remove the APIVersion check when APIVersionV1 is not used anymore + if c.Metadata.APIVersion == chart.APIVersionV2 { + if c.Lock != nil { + ldata, err := yaml.Marshal(c.Lock) + if err != nil { + return err + } + if err := writeToTar(out, filepath.Join(base, "Chart.lock"), ldata); err != nil { + return err + } + } + } + + // Save values.yaml + for _, f := range c.Raw { + if f.Name == ValuesfileName { + if err := writeToTar(out, filepath.Join(base, ValuesfileName), f.Data); err != nil { + return err + } + } + } + + // Save values.schema.json if it exists + if c.Schema != nil { + if !json.Valid(c.Schema) { + return errors.New("Invalid JSON in " + SchemafileName) + } + if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema); err != nil { + return err + } + } + + // Save templates + for _, f := range c.Templates { + n := filepath.Join(base, f.Name) + if err := writeToTar(out, n, f.Data); err != nil { + return err + } + } + + // Save files + for _, f := range c.Files { + n := filepath.Join(base, f.Name) + if err := writeToTar(out, n, f.Data); err != nil { + return err + } + } + + // Save dependencies + for _, dep := range c.Dependencies() { + if err := writeTarContents(out, dep, filepath.Join(base, ChartsDir)); err != nil { + return err + } + } + return nil +} + +// writeToTar writes a single file to a tar archive. +func writeToTar(out *tar.Writer, name string, body []byte) error { + // TODO: Do we need to create dummy parent directory names if none exist? + h := &tar.Header{ + Name: filepath.ToSlash(name), + Mode: 0644, + Size: int64(len(body)), + ModTime: time.Now(), + } + if err := out.WriteHeader(h); err != nil { + return err + } + _, err := out.Write(body) + return err +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go new file mode 100644 index 000000000..05c090cb6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go @@ -0,0 +1,112 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "regexp" + + "github.com/pkg/errors" +) + +// validName is a regular expression for resource names. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +var validName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) + +var ( + // errMissingName indicates that a release (name) was not provided. + errMissingName = errors.New("no name provided") + + // errInvalidName indicates that an invalid release name was provided + errInvalidName = fmt.Errorf( + "invalid release name, must match regex %s and the length must not be longer than 53", + validName.String()) + + // errInvalidKubernetesName indicates that the name does not meet the Kubernetes + // restrictions on metadata names. + errInvalidKubernetesName = fmt.Errorf( + "invalid metadata name, must match regex %s and the length must not be longer than 253", + validName.String()) +) + +const ( + // According to the Kubernetes docs (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#rfc-1035-label-names) + // some resource names have a max length of 63 characters while others have a max + // length of 253 characters. As we cannot be sure the resources used in a chart, we + // therefore need to limit it to 63 chars and reserve 10 chars for additional part to name + // of the resource. The reason is that chart maintainers can use release name as part of + // the resource name (and some additional chars). + maxReleaseNameLen = 53 + // maxMetadataNameLen is the maximum length Kubernetes allows for any name. + maxMetadataNameLen = 253 +) + +// ValidateReleaseName performs checks for an entry for a Helm release name +// +// For Helm to allow a name, it must be below a certain character count (53) and also match +// a regular expression. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +func ValidateReleaseName(name string) error { + // This case is preserved for backwards compatibility + if name == "" { + return errMissingName + + } + if len(name) > maxReleaseNameLen || !validName.MatchString(name) { + return errInvalidName + } + return nil +} + +// ValidateMetadataName validates the name field of a Kubernetes metadata object. +// +// Empty strings, strings longer than 253 chars, or strings that don't match the regexp +// will fail. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +// +// Deprecated: remove in Helm 4. Name validation now uses rules defined in +// pkg/lint/rules.validateMetadataNameFunc() +func ValidateMetadataName(name string) error { + if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) { + return errInvalidKubernetesName + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/values.go b/vendor/helm.sh/helm/v3/pkg/chartutil/values.go new file mode 100644 index 000000000..2fa2bdabb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/values.go @@ -0,0 +1,212 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chartutil + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" +) + +// GlobalKey is the name of the Values key that is used for storing global vars. +const GlobalKey = "global" + +// Values represents a collection of chart values. +type Values map[string]interface{} + +// YAML encodes the Values into a YAML string. +func (v Values) YAML() (string, error) { + b, err := yaml.Marshal(v) + return string(b), err +} + +// Table gets a table (YAML subsection) from a Values object. +// +// The table is returned as a Values. +// +// Compound table names may be specified with dots: +// +// foo.bar +// +// The above will be evaluated as "The table bar inside the table +// foo". +// +// An ErrNoTable is returned if the table does not exist. +func (v Values) Table(name string) (Values, error) { + table := v + var err error + + for _, n := range parsePath(name) { + if table, err = tableLookup(table, n); err != nil { + break + } + } + return table, err +} + +// AsMap is a utility function for converting Values to a map[string]interface{}. +// +// It protects against nil map panics. +func (v Values) AsMap() map[string]interface{} { + if len(v) == 0 { + return map[string]interface{}{} + } + return v +} + +// Encode writes serialized Values information to the given io.Writer. +func (v Values) Encode(w io.Writer) error { + out, err := yaml.Marshal(v) + if err != nil { + return err + } + _, err = w.Write(out) + return err +} + +func tableLookup(v Values, simple string) (Values, error) { + v2, ok := v[simple] + if !ok { + return v, ErrNoTable{simple} + } + if vv, ok := v2.(map[string]interface{}); ok { + return vv, nil + } + + // This catches a case where a value is of type Values, but doesn't (for some + // reason) match the map[string]interface{}. This has been observed in the + // wild, and might be a result of a nil map of type Values. + if vv, ok := v2.(Values); ok { + return vv, nil + } + + return Values{}, ErrNoTable{simple} +} + +// ReadValues will parse YAML byte data into a Values. +func ReadValues(data []byte) (vals Values, err error) { + err = yaml.Unmarshal(data, &vals) + if len(vals) == 0 { + vals = Values{} + } + return vals, err +} + +// ReadValuesFile will parse a YAML file into a map of values. +func ReadValuesFile(filename string) (Values, error) { + data, err := os.ReadFile(filename) + if err != nil { + return map[string]interface{}{}, err + } + return ReadValues(data) +} + +// ReleaseOptions represents the additional release options needed +// for the composition of the final values struct +type ReleaseOptions struct { + Name string + Namespace string + Revision int + IsUpgrade bool + IsInstall bool +} + +// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files +// +// This takes both ReleaseOptions and Capabilities to merge into the render values. +func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) { + if caps == nil { + caps = DefaultCapabilities + } + top := map[string]interface{}{ + "Chart": chrt.Metadata, + "Capabilities": caps, + "Release": map[string]interface{}{ + "Name": options.Name, + "Namespace": options.Namespace, + "IsUpgrade": options.IsUpgrade, + "IsInstall": options.IsInstall, + "Revision": options.Revision, + "Service": "Helm", + }, + } + + vals, err := CoalesceValues(chrt, chrtVals) + if err != nil { + return top, err + } + + if err := ValidateAgainstSchema(chrt, vals); err != nil { + errFmt := "values don't meet the specifications of the schema(s) in the following chart(s):\n%s" + return top, fmt.Errorf(errFmt, err.Error()) + } + + top["Values"] = vals + return top, nil +} + +// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. +func istable(v interface{}) bool { + _, ok := v.(map[string]interface{}) + return ok +} + +// PathValue takes a path that traverses a YAML structure and returns the value at the end of that path. +// The path starts at the root of the YAML structure and is comprised of YAML keys separated by periods. +// Given the following YAML data the value at path "chapter.one.title" is "Loomings". +// +// chapter: +// one: +// title: "Loomings" +func (v Values) PathValue(path string) (interface{}, error) { + if path == "" { + return nil, errors.New("YAML path cannot be empty") + } + return v.pathValue(parsePath(path)) +} + +func (v Values) pathValue(path []string) (interface{}, error) { + if len(path) == 1 { + // if exists must be root key not table + if _, ok := v[path[0]]; ok && !istable(v[path[0]]) { + return v[path[0]], nil + } + return nil, ErrNoValue{path[0]} + } + + key, path := path[len(path)-1], path[:len(path)-1] + // get our table for table path + t, err := v.Table(joinPath(path...)) + if err != nil { + return nil, ErrNoValue{key} + } + // check table for key and ensure value is not a table + if k, ok := t[key]; ok && !istable(k) { + return k, nil + } + return nil, ErrNoValue{key} +} + +func parsePath(key string) []string { return strings.Split(key, ".") } + +func joinPath(path ...string) string { return strings.Join(path, ".") } diff --git a/vendor/helm.sh/helm/v3/pkg/cli/environment.go b/vendor/helm.sh/helm/v3/pkg/cli/environment.go new file mode 100644 index 000000000..4f74f2642 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/cli/environment.go @@ -0,0 +1,258 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package cli describes the operating environment for the Helm CLI. + +Helm's environment encapsulates all of the service dependencies Helm has. +These dependencies are expressed as interfaces so that alternate implementations +(mocks, etc.) can be easily generated. +*/ +package cli + +import ( + "fmt" + "net/http" + "os" + "strconv" + "strings" + + "github.com/spf13/pflag" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/internal/version" + "helm.sh/helm/v3/pkg/helmpath" +) + +// defaultMaxHistory sets the maximum number of releases to 0: unlimited +const defaultMaxHistory = 10 + +// defaultBurstLimit sets the default client-side throttling limit +const defaultBurstLimit = 100 + +// defaultQPS sets the default QPS value to 0 to to use library defaults unless specified +const defaultQPS = float32(0) + +// EnvSettings describes all of the environment settings. +type EnvSettings struct { + namespace string + config *genericclioptions.ConfigFlags + + // KubeConfig is the path to the kubeconfig file + KubeConfig string + // KubeContext is the name of the kubeconfig context. + KubeContext string + // Bearer KubeToken used for authentication + KubeToken string + // Username to impersonate for the operation + KubeAsUser string + // Groups to impersonate for the operation, multiple groups parsed from a comma delimited list + KubeAsGroups []string + // Kubernetes API Server Endpoint for authentication + KubeAPIServer string + // Custom certificate authority file. + KubeCaFile string + // KubeInsecureSkipTLSVerify indicates if server's certificate will not be checked for validity. + // This makes the HTTPS connections insecure + KubeInsecureSkipTLSVerify bool + // KubeTLSServerName overrides the name to use for server certificate validation. + // If it is not provided, the hostname used to contact the server is used + KubeTLSServerName string + // Debug indicates whether or not Helm is running in Debug mode. + Debug bool + // RegistryConfig is the path to the registry config file. + RegistryConfig string + // RepositoryConfig is the path to the repositories file. + RepositoryConfig string + // RepositoryCache is the path to the repository cache directory. + RepositoryCache string + // PluginsDirectory is the path to the plugins directory. + PluginsDirectory string + // MaxHistory is the max release history maintained. + MaxHistory int + // BurstLimit is the default client-side throttling limit. + BurstLimit int + // QPS is queries per second which may be used to avoid throttling. + QPS float32 +} + +func New() *EnvSettings { + env := &EnvSettings{ + namespace: os.Getenv("HELM_NAMESPACE"), + MaxHistory: envIntOr("HELM_MAX_HISTORY", defaultMaxHistory), + KubeContext: os.Getenv("HELM_KUBECONTEXT"), + KubeToken: os.Getenv("HELM_KUBETOKEN"), + KubeAsUser: os.Getenv("HELM_KUBEASUSER"), + KubeAsGroups: envCSV("HELM_KUBEASGROUPS"), + KubeAPIServer: os.Getenv("HELM_KUBEAPISERVER"), + KubeCaFile: os.Getenv("HELM_KUBECAFILE"), + KubeTLSServerName: os.Getenv("HELM_KUBETLS_SERVER_NAME"), + KubeInsecureSkipTLSVerify: envBoolOr("HELM_KUBEINSECURE_SKIP_TLS_VERIFY", false), + PluginsDirectory: envOr("HELM_PLUGINS", helmpath.DataPath("plugins")), + RegistryConfig: envOr("HELM_REGISTRY_CONFIG", helmpath.ConfigPath("registry/config.json")), + RepositoryConfig: envOr("HELM_REPOSITORY_CONFIG", helmpath.ConfigPath("repositories.yaml")), + RepositoryCache: envOr("HELM_REPOSITORY_CACHE", helmpath.CachePath("repository")), + BurstLimit: envIntOr("HELM_BURST_LIMIT", defaultBurstLimit), + QPS: envFloat32Or("HELM_QPS", defaultQPS), + } + env.Debug, _ = strconv.ParseBool(os.Getenv("HELM_DEBUG")) + + // bind to kubernetes config flags + env.config = &genericclioptions.ConfigFlags{ + Namespace: &env.namespace, + Context: &env.KubeContext, + BearerToken: &env.KubeToken, + APIServer: &env.KubeAPIServer, + CAFile: &env.KubeCaFile, + KubeConfig: &env.KubeConfig, + Impersonate: &env.KubeAsUser, + Insecure: &env.KubeInsecureSkipTLSVerify, + TLSServerName: &env.KubeTLSServerName, + ImpersonateGroup: &env.KubeAsGroups, + WrapConfigFn: func(config *rest.Config) *rest.Config { + config.Burst = env.BurstLimit + config.QPS = env.QPS + config.Wrap(func(rt http.RoundTripper) http.RoundTripper { + return &retryingRoundTripper{wrapped: rt} + }) + config.UserAgent = version.GetUserAgent() + return config + }, + } + return env +} + +// AddFlags binds flags to the given flagset. +func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) { + fs.StringVarP(&s.namespace, "namespace", "n", s.namespace, "namespace scope for this request") + fs.StringVar(&s.KubeConfig, "kubeconfig", "", "path to the kubeconfig file") + fs.StringVar(&s.KubeContext, "kube-context", s.KubeContext, "name of the kubeconfig context to use") + fs.StringVar(&s.KubeToken, "kube-token", s.KubeToken, "bearer token used for authentication") + fs.StringVar(&s.KubeAsUser, "kube-as-user", s.KubeAsUser, "username to impersonate for the operation") + fs.StringArrayVar(&s.KubeAsGroups, "kube-as-group", s.KubeAsGroups, "group to impersonate for the operation, this flag can be repeated to specify multiple groups.") + fs.StringVar(&s.KubeAPIServer, "kube-apiserver", s.KubeAPIServer, "the address and the port for the Kubernetes API server") + fs.StringVar(&s.KubeCaFile, "kube-ca-file", s.KubeCaFile, "the certificate authority file for the Kubernetes API server connection") + fs.StringVar(&s.KubeTLSServerName, "kube-tls-server-name", s.KubeTLSServerName, "server name to use for Kubernetes API server certificate validation. If it is not provided, the hostname used to contact the server is used") + fs.BoolVar(&s.KubeInsecureSkipTLSVerify, "kube-insecure-skip-tls-verify", s.KubeInsecureSkipTLSVerify, "if true, the Kubernetes API server's certificate will not be checked for validity. This will make your HTTPS connections insecure") + fs.BoolVar(&s.Debug, "debug", s.Debug, "enable verbose output") + fs.StringVar(&s.RegistryConfig, "registry-config", s.RegistryConfig, "path to the registry config file") + fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs") + fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the file containing cached repository indexes") + fs.IntVar(&s.BurstLimit, "burst-limit", s.BurstLimit, "client-side default throttling limit") + fs.Float32Var(&s.QPS, "qps", s.QPS, "queries per second used when communicating with the Kubernetes API, not including bursting") +} + +func envOr(name, def string) string { + if v, ok := os.LookupEnv(name); ok { + return v + } + return def +} + +func envBoolOr(name string, def bool) bool { + if name == "" { + return def + } + envVal := envOr(name, strconv.FormatBool(def)) + ret, err := strconv.ParseBool(envVal) + if err != nil { + return def + } + return ret +} + +func envIntOr(name string, def int) int { + if name == "" { + return def + } + envVal := envOr(name, strconv.Itoa(def)) + ret, err := strconv.Atoi(envVal) + if err != nil { + return def + } + return ret +} + +func envFloat32Or(name string, def float32) float32 { + if name == "" { + return def + } + envVal := envOr(name, strconv.FormatFloat(float64(def), 'f', 2, 32)) + ret, err := strconv.ParseFloat(envVal, 32) + if err != nil { + return def + } + return float32(ret) +} + +func envCSV(name string) (ls []string) { + trimmed := strings.Trim(os.Getenv(name), ", ") + if trimmed != "" { + ls = strings.Split(trimmed, ",") + } + return +} + +func (s *EnvSettings) EnvVars() map[string]string { + envvars := map[string]string{ + "HELM_BIN": os.Args[0], + "HELM_CACHE_HOME": helmpath.CachePath(""), + "HELM_CONFIG_HOME": helmpath.ConfigPath(""), + "HELM_DATA_HOME": helmpath.DataPath(""), + "HELM_DEBUG": fmt.Sprint(s.Debug), + "HELM_PLUGINS": s.PluginsDirectory, + "HELM_REGISTRY_CONFIG": s.RegistryConfig, + "HELM_REPOSITORY_CACHE": s.RepositoryCache, + "HELM_REPOSITORY_CONFIG": s.RepositoryConfig, + "HELM_NAMESPACE": s.Namespace(), + "HELM_MAX_HISTORY": strconv.Itoa(s.MaxHistory), + "HELM_BURST_LIMIT": strconv.Itoa(s.BurstLimit), + "HELM_QPS": strconv.FormatFloat(float64(s.QPS), 'f', 2, 32), + + // broken, these are populated from helm flags and not kubeconfig. + "HELM_KUBECONTEXT": s.KubeContext, + "HELM_KUBETOKEN": s.KubeToken, + "HELM_KUBEASUSER": s.KubeAsUser, + "HELM_KUBEASGROUPS": strings.Join(s.KubeAsGroups, ","), + "HELM_KUBEAPISERVER": s.KubeAPIServer, + "HELM_KUBECAFILE": s.KubeCaFile, + "HELM_KUBEINSECURE_SKIP_TLS_VERIFY": strconv.FormatBool(s.KubeInsecureSkipTLSVerify), + "HELM_KUBETLS_SERVER_NAME": s.KubeTLSServerName, + } + if s.KubeConfig != "" { + envvars["KUBECONFIG"] = s.KubeConfig + } + return envvars +} + +// Namespace gets the namespace from the configuration +func (s *EnvSettings) Namespace() string { + if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil { + return ns + } + return "default" +} + +// SetNamespace sets the namespace in the configuration +func (s *EnvSettings) SetNamespace(namespace string) { + s.namespace = namespace +} + +// RESTClientGetter gets the kubeconfig from EnvSettings +func (s *EnvSettings) RESTClientGetter() genericclioptions.RESTClientGetter { + return s.config +} diff --git a/vendor/helm.sh/helm/v3/pkg/cli/roundtripper.go b/vendor/helm.sh/helm/v3/pkg/cli/roundtripper.go new file mode 100644 index 000000000..9cd4eacba --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/cli/roundtripper.go @@ -0,0 +1,80 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "strings" +) + +type retryingRoundTripper struct { + wrapped http.RoundTripper +} + +func (rt *retryingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.roundTrip(req, 1, nil) +} + +func (rt *retryingRoundTripper) roundTrip(req *http.Request, retry int, prevResp *http.Response) (*http.Response, error) { + if retry < 0 { + return prevResp, nil + } + resp, rtErr := rt.wrapped.RoundTrip(req) + if rtErr != nil { + return resp, rtErr + } + if resp.StatusCode < 500 { + return resp, rtErr + } + if resp.Header.Get("content-type") != "application/json" { + return resp, rtErr + } + b, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return resp, rtErr + } + + var ke kubernetesError + r := bytes.NewReader(b) + err = json.NewDecoder(r).Decode(&ke) + r.Seek(0, io.SeekStart) + resp.Body = io.NopCloser(r) + if err != nil { + return resp, rtErr + } + if ke.Code < 500 { + return resp, rtErr + } + // Matches messages like "etcdserver: leader changed" + if strings.HasSuffix(ke.Message, "etcdserver: leader changed") { + return rt.roundTrip(req, retry-1, resp) + } + // Matches messages like "rpc error: code = Unknown desc = raft proposal dropped" + if strings.HasSuffix(ke.Message, "raft proposal dropped") { + return rt.roundTrip(req, retry-1, resp) + } + return resp, rtErr +} + +type kubernetesError struct { + Message string `json:"message"` + Code int `json:"code"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go new file mode 100644 index 000000000..a95894e00 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go @@ -0,0 +1,406 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/fileutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/provenance" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +// VerificationStrategy describes a strategy for determining whether to verify a chart. +type VerificationStrategy int + +const ( + // VerifyNever will skip all verification of a chart. + VerifyNever VerificationStrategy = iota + // VerifyIfPossible will attempt a verification, it will not error if verification + // data is missing. But it will not stop processing if verification fails. + VerifyIfPossible + // VerifyAlways will always attempt a verification, and will fail if the + // verification fails. + VerifyAlways + // VerifyLater will fetch verification data, but not do any verification. + // This is to accommodate the case where another step of the process will + // perform verification. + VerifyLater +) + +// ErrNoOwnerRepo indicates that a given chart URL can't be found in any repos. +var ErrNoOwnerRepo = errors.New("could not find a repo containing the given URL") + +// ChartDownloader handles downloading a chart. +// +// It is capable of performing verifications on charts as well. +type ChartDownloader struct { + // Out is the location to write warning and info messages. + Out io.Writer + // Verify indicates what verification strategy to use. + Verify VerificationStrategy + // Keyring is the keyring file used for verification. + Keyring string + // Getter collection for the operation + Getters getter.Providers + // Options provide parameters to be passed along to the Getter being initialized. + Options []getter.Option + RegistryClient *registry.Client + RepositoryConfig string + RepositoryCache string +} + +// DownloadTo retrieves a chart. Depending on the settings, it may also download a provenance file. +// +// If Verify is set to VerifyNever, the verification will be nil. +// If Verify is set to VerifyIfPossible, this will return a verification (or nil on failure), and print a warning on failure. +// If Verify is set to VerifyAlways, this will return a verification or an error if the verification fails. +// If Verify is set to VerifyLater, this will download the prov file (if it exists), but not verify it. +// +// For VerifyNever and VerifyIfPossible, the Verification may be empty. +// +// Returns a string path to the location where the file was downloaded and a verification +// (if provenance was verified), or an error if something bad happened. +func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) { + u, err := c.ResolveChartVersion(ref, version) + if err != nil { + return "", nil, err + } + + g, err := c.Getters.ByScheme(u.Scheme) + if err != nil { + return "", nil, err + } + + data, err := g.Get(u.String(), c.Options...) + if err != nil { + return "", nil, err + } + + name := filepath.Base(u.Path) + if u.Scheme == registry.OCIScheme { + idx := strings.LastIndexByte(name, ':') + name = fmt.Sprintf("%s-%s.tgz", name[:idx], name[idx+1:]) + } + + destfile := filepath.Join(dest, name) + if err := fileutil.AtomicWriteFile(destfile, data, 0644); err != nil { + return destfile, nil, err + } + + // If provenance is requested, verify it. + ver := &provenance.Verification{} + if c.Verify > VerifyNever { + body, err := g.Get(u.String() + ".prov") + if err != nil { + if c.Verify == VerifyAlways { + return destfile, ver, errors.Errorf("failed to fetch provenance %q", u.String()+".prov") + } + fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err) + return destfile, ver, nil + } + provfile := destfile + ".prov" + if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil { + return destfile, nil, err + } + + if c.Verify != VerifyLater { + ver, err = VerifyChart(destfile, c.Keyring) + if err != nil { + // Fail always in this case, since it means the verification step + // failed. + return destfile, ver, err + } + } + } + return destfile, ver, nil +} + +func (c *ChartDownloader) getOciURI(ref, version string, u *url.URL) (*url.URL, error) { + var tag string + var err error + + // Evaluate whether an explicit version has been provided. Otherwise, determine version to use + _, errSemVer := semver.NewVersion(version) + if errSemVer == nil { + tag = version + } else { + // Retrieve list of repository tags + tags, err := c.RegistryClient.Tags(strings.TrimPrefix(ref, fmt.Sprintf("%s://", registry.OCIScheme))) + if err != nil { + return nil, err + } + if len(tags) == 0 { + return nil, errors.Errorf("Unable to locate any tags in provided repository: %s", ref) + } + + // Determine if version provided + // If empty, try to get the highest available tag + // If exact version, try to find it + // If semver constraint string, try to find a match + tag, err = registry.GetTagMatchingVersionOrConstraint(tags, version) + if err != nil { + return nil, err + } + } + + u.Path = fmt.Sprintf("%s:%s", u.Path, tag) + + return u, err +} + +// ResolveChartVersion resolves a chart reference to a URL. +// +// It returns the URL and sets the ChartDownloader's Options that can fetch +// the URL using the appropriate Getter. +// +// A reference may be an HTTP URL, an oci reference URL, a 'reponame/chartname' +// reference, or a local path. +// +// A version is a SemVer string (1.2.3-beta.1+f334a6789). +// +// - For fully qualified URLs, the version will be ignored (since URLs aren't versioned) +// - For a chart reference +// - If version is non-empty, this will return the URL for that version +// - If version is empty, this will return the URL for the latest version +// - If no version can be found, an error is returned +func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, error) { + u, err := url.Parse(ref) + if err != nil { + return nil, errors.Errorf("invalid chart URL format: %s", ref) + } + + if registry.IsOCI(u.String()) { + return c.getOciURI(ref, version, u) + } + + rf, err := loadRepoConfig(c.RepositoryConfig) + if err != nil { + return u, err + } + + if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 { + // In this case, we have to find the parent repo that contains this chart + // URL. And this is an unfortunate problem, as it requires actually going + // through each repo cache file and finding a matching URL. But basically + // we want to find the repo in case we have special SSL cert config + // for that repo. + + rc, err := c.scanReposForURL(ref, rf) + if err != nil { + // If there is no special config, return the default HTTP client and + // swallow the error. + if err == ErrNoOwnerRepo { + // Make sure to add the ref URL as the URL for the getter + c.Options = append(c.Options, getter.WithURL(ref)) + return u, nil + } + return u, err + } + + // If we get here, we don't need to go through the next phase of looking + // up the URL. We have it already. So we just set the parameters and return. + c.Options = append( + c.Options, + getter.WithURL(rc.URL), + ) + if rc.CertFile != "" || rc.KeyFile != "" || rc.CAFile != "" { + c.Options = append(c.Options, getter.WithTLSClientConfig(rc.CertFile, rc.KeyFile, rc.CAFile)) + } + if rc.Username != "" && rc.Password != "" { + c.Options = append( + c.Options, + getter.WithBasicAuth(rc.Username, rc.Password), + getter.WithPassCredentialsAll(rc.PassCredentialsAll), + ) + } + return u, nil + } + + // See if it's of the form: repo/path_to_chart + p := strings.SplitN(u.Path, "/", 2) + if len(p) < 2 { + return u, errors.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u) + } + + repoName := p[0] + chartName := p[1] + rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories) + + if err != nil { + return u, err + } + + // Now that we have the chart repository information we can use that URL + // to set the URL for the getter. + c.Options = append(c.Options, getter.WithURL(rc.URL)) + + r, err := repo.NewChartRepository(rc, c.Getters) + if err != nil { + return u, err + } + + if r != nil && r.Config != nil { + if r.Config.CertFile != "" || r.Config.KeyFile != "" || r.Config.CAFile != "" { + c.Options = append(c.Options, getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile)) + } + if r.Config.Username != "" && r.Config.Password != "" { + c.Options = append(c.Options, + getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), + ) + } + } + + // Next, we need to load the index, and actually look up the chart. + idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name)) + i, err := repo.LoadIndexFile(idxFile) + if err != nil { + return u, errors.Wrap(err, "no cached repo found. (try 'helm repo update')") + } + + cv, err := i.Get(chartName, version) + if err != nil { + return u, errors.Wrapf(err, "chart %q matching %s not found in %s index. (try 'helm repo update')", chartName, version, r.Config.Name) + } + + if len(cv.URLs) == 0 { + return u, errors.Errorf("chart %q has no downloadable URLs", ref) + } + + // TODO: Seems that picking first URL is not fully correct + resolvedURL, err := repo.ResolveReferenceURL(rc.URL, cv.URLs[0]) + + if err != nil { + return u, errors.Errorf("invalid chart URL format: %s", ref) + } + + return url.Parse(resolvedURL) +} + +// VerifyChart takes a path to a chart archive and a keyring, and verifies the chart. +// +// It assumes that a chart archive file is accompanied by a provenance file whose +// name is the archive file name plus the ".prov" extension. +func VerifyChart(path, keyring string) (*provenance.Verification, error) { + // For now, error out if it's not a tar file. + switch fi, err := os.Stat(path); { + case err != nil: + return nil, err + case fi.IsDir(): + return nil, errors.New("unpacked charts cannot be verified") + case !isTar(path): + return nil, errors.New("chart must be a tgz file") + } + + provfile := path + ".prov" + if _, err := os.Stat(provfile); err != nil { + return nil, errors.Wrapf(err, "could not load provenance file %s", provfile) + } + + sig, err := provenance.NewFromKeyring(keyring, "") + if err != nil { + return nil, errors.Wrap(err, "failed to load keyring") + } + return sig.Verify(path, provfile) +} + +// isTar tests whether the given file is a tar file. +// +// Currently, this simply checks extension, since a subsequent function will +// untar the file and validate its binary format. +func isTar(filename string) bool { + return strings.EqualFold(filepath.Ext(filename), ".tgz") +} + +func pickChartRepositoryConfigByName(name string, cfgs []*repo.Entry) (*repo.Entry, error) { + for _, rc := range cfgs { + if rc.Name == name { + if rc.URL == "" { + return nil, errors.Errorf("no URL found for repository %s", name) + } + return rc, nil + } + } + return nil, errors.Errorf("repo %s not found", name) +} + +// scanReposForURL scans all repos to find which repo contains the given URL. +// +// This will attempt to find the given URL in all of the known repositories files. +// +// If the URL is found, this will return the repo entry that contained that URL. +// +// If all of the repos are checked, but the URL is not found, an ErrNoOwnerRepo +// error is returned. +// +// Other errors may be returned when repositories cannot be loaded or searched. +// +// Technically, the fact that a URL is not found in a repo is not a failure indication. +// Charts are not required to be included in an index before they are valid. So +// be mindful of this case. +// +// The same URL can technically exist in two or more repositories. This algorithm +// will return the first one it finds. Order is determined by the order of repositories +// in the repositories.yaml file. +func (c *ChartDownloader) scanReposForURL(u string, rf *repo.File) (*repo.Entry, error) { + // FIXME: This is far from optimal. Larger installations and index files will + // incur a performance hit for this type of scanning. + for _, rc := range rf.Repositories { + r, err := repo.NewChartRepository(rc, c.Getters) + if err != nil { + return nil, err + } + + idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name)) + i, err := repo.LoadIndexFile(idxFile) + if err != nil { + return nil, errors.Wrap(err, "no cached repo found. (try 'helm repo update')") + } + + for _, entry := range i.Entries { + for _, ver := range entry { + for _, dl := range ver.URLs { + if urlutil.Equal(u, dl) { + return rc, nil + } + } + } + } + } + // This means that there is no repo file for the given URL. + return nil, ErrNoOwnerRepo +} + +func loadRepoConfig(file string) (*repo.File, error) { + r, err := repo.LoadFile(file) + if err != nil && !os.IsNotExist(errors.Cause(err)) { + return nil, err + } + return r, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/doc.go b/vendor/helm.sh/helm/v3/pkg/downloader/doc.go new file mode 100644 index 000000000..848468090 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/doc.go @@ -0,0 +1,24 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package downloader provides a library for downloading charts. + +This package contains various tools for downloading charts from repository +servers, and then storing them in Helm-specific directory structures. This +library contains many functions that depend on a specific +filesystem layout. +*/ +package downloader diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go new file mode 100644 index 000000000..68c9c6e00 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go @@ -0,0 +1,905 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "crypto" + "encoding/hex" + "fmt" + "io" + "log" + "net/url" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/internal/resolver" + "helm.sh/helm/v3/internal/third_party/dep/fs" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/repo" +) + +// ErrRepoNotFound indicates that chart repositories can't be found in local repo cache. +// The value of Repos is missing repos. +type ErrRepoNotFound struct { + Repos []string +} + +// Error implements the error interface. +func (e ErrRepoNotFound) Error() string { + return fmt.Sprintf("no repository definition for %s", strings.Join(e.Repos, ", ")) +} + +// Manager handles the lifecycle of fetching, resolving, and storing dependencies. +type Manager struct { + // Out is used to print warnings and notifications. + Out io.Writer + // ChartPath is the path to the unpacked base chart upon which this operates. + ChartPath string + // Verification indicates whether the chart should be verified. + Verify VerificationStrategy + // Debug is the global "--debug" flag + Debug bool + // Keyring is the key ring file. + Keyring string + // SkipUpdate indicates that the repository should not be updated first. + SkipUpdate bool + // Getter collection for the operation + Getters []getter.Provider + RegistryClient *registry.Client + RepositoryConfig string + RepositoryCache string +} + +// Build rebuilds a local charts directory from a lockfile. +// +// If the lockfile is not present, this will run a Manager.Update() +// +// If SkipUpdate is set, this will not update the repository. +func (m *Manager) Build() error { + c, err := m.loadChartDir() + if err != nil { + return err + } + + // If a lock file is found, run a build from that. Otherwise, just do + // an update. + lock := c.Lock + if lock == nil { + return m.Update() + } + + // Check that all of the repos we're dependent on actually exist. + req := c.Metadata.Dependencies + + // If using apiVersion v1, calculate the hash before resolve repo names + // because resolveRepoNames will change req if req uses repo alias + // and Helm 2 calculate the digest from the original req + // Fix for: https://github.com/helm/helm/issues/7619 + var v2Sum string + if c.Metadata.APIVersion == chart.APIVersionV1 { + v2Sum, err = resolver.HashV2Req(req) + if err != nil { + return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies") + } + } + + if _, err := m.resolveRepoNames(req); err != nil { + return err + } + + if sum, err := resolver.HashReq(req, lock.Dependencies); err != nil || sum != lock.Digest { + // If lock digest differs and chart is apiVersion v1, it maybe because the lock was built + // with Helm 2 and therefore should be checked with Helm v2 hash + // Fix for: https://github.com/helm/helm/issues/7233 + if c.Metadata.APIVersion == chart.APIVersionV1 { + log.Println("warning: a valid Helm v3 hash was not found. Checking against Helm v2 hash...") + if v2Sum != lock.Digest { + return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies") + } + } else { + return errors.New("the lock file (Chart.lock) is out of sync with the dependencies file (Chart.yaml). Please update the dependencies") + } + } + + // Check that all of the repos we're dependent on actually exist. + if err := m.hasAllRepos(lock.Dependencies); err != nil { + return err + } + + if !m.SkipUpdate { + // For each repo in the file, update the cached copy of that repo + if err := m.UpdateRepositories(); err != nil { + return err + } + } + + // Now we need to fetch every package here into charts/ + return m.downloadAll(lock.Dependencies) +} + +// Update updates a local charts directory. +// +// It first reads the Chart.yaml file, and then attempts to +// negotiate versions based on that. It will download the versions +// from remote chart repositories unless SkipUpdate is true. +func (m *Manager) Update() error { + c, err := m.loadChartDir() + if err != nil { + return err + } + + // If no dependencies are found, we consider this a successful + // completion. + req := c.Metadata.Dependencies + if req == nil { + return nil + } + + // Get the names of the repositories the dependencies need that Helm is + // configured to know about. + repoNames, err := m.resolveRepoNames(req) + if err != nil { + return err + } + + // For the repositories Helm is not configured to know about, ensure Helm + // has some information about them and, when possible, the index files + // locally. + // TODO(mattfarina): Repositories should be explicitly added by end users + // rather than automattic. In Helm v4 require users to add repositories. They + // should have to add them in order to make sure they are aware of the + // repositories and opt-in to any locations, for security. + repoNames, err = m.ensureMissingRepos(repoNames, req) + if err != nil { + return err + } + + // For each of the repositories Helm is configured to know about, update + // the index information locally. + if !m.SkipUpdate { + if err := m.UpdateRepositories(); err != nil { + return err + } + } + + // Now we need to find out which version of a chart best satisfies the + // dependencies in the Chart.yaml + lock, err := m.resolve(req, repoNames) + if err != nil { + return err + } + + // Now we need to fetch every package here into charts/ + if err := m.downloadAll(lock.Dependencies); err != nil { + return err + } + + // downloadAll might overwrite dependency version, recalculate lock digest + newDigest, err := resolver.HashReq(req, lock.Dependencies) + if err != nil { + return err + } + lock.Digest = newDigest + + // If the lock file hasn't changed, don't write a new one. + oldLock := c.Lock + if oldLock != nil && oldLock.Digest == lock.Digest { + return nil + } + + // Finally, we need to write the lockfile. + return writeLock(m.ChartPath, lock, c.Metadata.APIVersion == chart.APIVersionV1) +} + +func (m *Manager) loadChartDir() (*chart.Chart, error) { + if fi, err := os.Stat(m.ChartPath); err != nil { + return nil, errors.Wrapf(err, "could not find %s", m.ChartPath) + } else if !fi.IsDir() { + return nil, errors.New("only unpacked charts can be updated") + } + return loader.LoadDir(m.ChartPath) +} + +// resolve takes a list of dependencies and translates them into an exact version to download. +// +// This returns a lock file, which has all of the dependencies normalized to a specific version. +func (m *Manager) resolve(req []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) { + res := resolver.New(m.ChartPath, m.RepositoryCache, m.RegistryClient) + return res.Resolve(req, repoNames) +} + +// downloadAll takes a list of dependencies and downloads them into charts/ +// +// It will delete versions of the chart that exist on disk and might cause +// a conflict. +func (m *Manager) downloadAll(deps []*chart.Dependency) error { + repos, err := m.loadChartRepositories() + if err != nil { + return err + } + + destPath := filepath.Join(m.ChartPath, "charts") + tmpPath := filepath.Join(m.ChartPath, "tmpcharts") + + // Check if 'charts' directory is not actually a directory. If it does not exist, create it. + if fi, err := os.Stat(destPath); err == nil { + if !fi.IsDir() { + return errors.Errorf("%q is not a directory", destPath) + } + } else if os.IsNotExist(err) { + if err := os.MkdirAll(destPath, 0755); err != nil { + return err + } + } else { + return fmt.Errorf("unable to retrieve file info for '%s': %v", destPath, err) + } + + // Prepare tmpPath + if err := os.MkdirAll(tmpPath, 0755); err != nil { + return err + } + defer os.RemoveAll(tmpPath) + + fmt.Fprintf(m.Out, "Saving %d charts\n", len(deps)) + var saveError error + churls := make(map[string]struct{}) + for _, dep := range deps { + // No repository means the chart is in charts directory + if dep.Repository == "" { + fmt.Fprintf(m.Out, "Dependency %s did not declare a repository. Assuming it exists in the charts directory\n", dep.Name) + // NOTE: we are only validating the local dependency conforms to the constraints. No copying to tmpPath is necessary. + chartPath := filepath.Join(destPath, dep.Name) + ch, err := loader.LoadDir(chartPath) + if err != nil { + return fmt.Errorf("unable to load chart '%s': %v", chartPath, err) + } + + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return fmt.Errorf("dependency %s has an invalid version/constraint format: %s", dep.Name, err) + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return fmt.Errorf("invalid version %s for dependency %s: %s", dep.Version, dep.Name, err) + } + + if !constraint.Check(v) { + saveError = fmt.Errorf("dependency %s at version %s does not satisfy the constraint %s", dep.Name, ch.Metadata.Version, dep.Version) + break + } + continue + } + if strings.HasPrefix(dep.Repository, "file://") { + if m.Debug { + fmt.Fprintf(m.Out, "Archiving %s from repo %s\n", dep.Name, dep.Repository) + } + ver, err := tarFromLocalDir(m.ChartPath, dep.Name, dep.Repository, dep.Version, tmpPath) + if err != nil { + saveError = err + break + } + dep.Version = ver + continue + } + + // Any failure to resolve/download a chart should fail: + // https://github.com/helm/helm/issues/1439 + churl, username, password, insecureskiptlsverify, passcredentialsall, caFile, certFile, keyFile, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) + if err != nil { + saveError = errors.Wrapf(err, "could not find %s", churl) + break + } + + if _, ok := churls[churl]; ok { + fmt.Fprintf(m.Out, "Already downloaded %s from repo %s\n", dep.Name, dep.Repository) + continue + } + + fmt.Fprintf(m.Out, "Downloading %s from repo %s\n", dep.Name, dep.Repository) + + dl := ChartDownloader{ + Out: m.Out, + Verify: m.Verify, + Keyring: m.Keyring, + RepositoryConfig: m.RepositoryConfig, + RepositoryCache: m.RepositoryCache, + RegistryClient: m.RegistryClient, + Getters: m.Getters, + Options: []getter.Option{ + getter.WithBasicAuth(username, password), + getter.WithPassCredentialsAll(passcredentialsall), + getter.WithInsecureSkipVerifyTLS(insecureskiptlsverify), + getter.WithTLSClientConfig(certFile, keyFile, caFile), + }, + } + + version := "" + if registry.IsOCI(churl) { + churl, version, err = parseOCIRef(churl) + if err != nil { + return errors.Wrapf(err, "could not parse OCI reference") + } + dl.Options = append(dl.Options, + getter.WithRegistryClient(m.RegistryClient), + getter.WithTagName(version)) + } + + if _, _, err = dl.DownloadTo(churl, version, tmpPath); err != nil { + saveError = errors.Wrapf(err, "could not download %s", churl) + break + } + + churls[churl] = struct{}{} + } + + // TODO: this should probably be refactored to be a []error, so we can capture and provide more information rather than "last error wins". + if saveError == nil { + // now we can move all downloaded charts to destPath and delete outdated dependencies + if err := m.safeMoveDeps(deps, tmpPath, destPath); err != nil { + return err + } + } else { + fmt.Fprintln(m.Out, "Save error occurred: ", saveError) + return saveError + } + return nil +} + +func parseOCIRef(chartRef string) (string, string, error) { + refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`) + caps := refTagRegexp.FindStringSubmatch(chartRef) + if len(caps) != 4 { + return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef) + } + chartRef = caps[1] + tag := caps[3] + + return chartRef, tag, nil +} + +// safeMoveDep moves all dependencies in the source and moves them into dest. +// +// It does this by first matching the file name to an expected pattern, then loading +// the file to verify that it is a chart. +// +// Any charts in dest that do not exist in source are removed (barring local dependencies) +// +// Because it requires tar file introspection, it is more intensive than a basic move. +// +// This will only return errors that should stop processing entirely. Other errors +// will emit log messages or be ignored. +func (m *Manager) safeMoveDeps(deps []*chart.Dependency, source, dest string) error { + existsInSourceDirectory := map[string]bool{} + isLocalDependency := map[string]bool{} + sourceFiles, err := os.ReadDir(source) + if err != nil { + return err + } + // attempt to read destFiles; fail fast if we can't + destFiles, err := os.ReadDir(dest) + if err != nil { + return err + } + + for _, dep := range deps { + if dep.Repository == "" { + isLocalDependency[dep.Name] = true + } + } + + for _, file := range sourceFiles { + if file.IsDir() { + continue + } + filename := file.Name() + sourcefile := filepath.Join(source, filename) + destfile := filepath.Join(dest, filename) + existsInSourceDirectory[filename] = true + if _, err := loader.LoadFile(sourcefile); err != nil { + fmt.Fprintf(m.Out, "Could not verify %s for moving: %s (Skipping)", sourcefile, err) + continue + } + // NOTE: no need to delete the dest; os.Rename replaces it. + if err := fs.RenameWithFallback(sourcefile, destfile); err != nil { + fmt.Fprintf(m.Out, "Unable to move %s to charts dir %s (Skipping)", sourcefile, err) + continue + } + } + + fmt.Fprintln(m.Out, "Deleting outdated charts") + // find all files that exist in dest that do not exist in source; delete them (outdated dependencies) + for _, file := range destFiles { + if !file.IsDir() && !existsInSourceDirectory[file.Name()] { + fname := filepath.Join(dest, file.Name()) + ch, err := loader.LoadFile(fname) + if err != nil { + fmt.Fprintf(m.Out, "Could not verify %s for deletion: %s (Skipping)\n", fname, err) + continue + } + // local dependency - skip + if isLocalDependency[ch.Name()] { + continue + } + if err := os.Remove(fname); err != nil { + fmt.Fprintf(m.Out, "Could not delete %s: %s (Skipping)", fname, err) + continue + } + } + } + + return nil +} + +// hasAllRepos ensures that all of the referenced deps are in the local repo cache. +func (m *Manager) hasAllRepos(deps []*chart.Dependency) error { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return err + } + repos := rf.Repositories + + // Verify that all repositories referenced in the deps are actually known + // by Helm. + missing := []string{} +Loop: + for _, dd := range deps { + // If repo is from local path or OCI, continue + if strings.HasPrefix(dd.Repository, "file://") || registry.IsOCI(dd.Repository) { + continue + } + + if dd.Repository == "" { + continue + } + for _, repo := range repos { + if urlutil.Equal(repo.URL, strings.TrimSuffix(dd.Repository, "/")) { + continue Loop + } + } + missing = append(missing, dd.Repository) + } + if len(missing) > 0 { + return ErrRepoNotFound{missing} + } + return nil +} + +// ensureMissingRepos attempts to ensure the repository information for repos +// not managed by Helm is present. This takes in the repoNames Helm is configured +// to work with along with the chart dependencies. It will find the deps not +// in a known repo and attempt to ensure the data is present for steps like +// version resolution. +func (m *Manager) ensureMissingRepos(repoNames map[string]string, deps []*chart.Dependency) (map[string]string, error) { + + var ru []*repo.Entry + + for _, dd := range deps { + + // If the chart is in the local charts directory no repository needs + // to be specified. + if dd.Repository == "" { + continue + } + + // When the repoName for a dependency is known we can skip ensuring + if _, ok := repoNames[dd.Name]; ok { + continue + } + + // The generated repository name, which will result in an index being + // locally cached, has a name pattern of "helm-manager-" followed by a + // sha256 of the repo name. This assumes end users will never create + // repositories with these names pointing to other repositories. Using + // this method of naming allows the existing repository pulling and + // resolution code to do most of the work. + rn, err := key(dd.Repository) + if err != nil { + return repoNames, err + } + rn = managerKeyPrefix + rn + + repoNames[dd.Name] = rn + + // Assuming the repository is generally available. For Helm managed + // access controls the repository needs to be added through the user + // managed system. This path will work for public charts, like those + // supplied by Bitnami, but not for protected charts, like corp ones + // behind a username and pass. + ri := &repo.Entry{ + Name: rn, + URL: dd.Repository, + } + ru = append(ru, ri) + } + + // Calls to UpdateRepositories (a public function) will only update + // repositories configured by the user. Here we update repos found in + // the dependencies that are not known to the user if update skipping + // is not configured. + if !m.SkipUpdate && len(ru) > 0 { + fmt.Fprintln(m.Out, "Getting updates for unmanaged Helm repositories...") + if err := m.parallelRepoUpdate(ru); err != nil { + return repoNames, err + } + } + + return repoNames, nil +} + +// resolveRepoNames returns the repo names of the referenced deps which can be used to fetch the cached index file +// and replaces aliased repository URLs into resolved URLs in dependencies. +func (m *Manager) resolveRepoNames(deps []*chart.Dependency) (map[string]string, error) { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + if os.IsNotExist(err) { + return make(map[string]string), nil + } + return nil, err + } + repos := rf.Repositories + + reposMap := make(map[string]string) + + // Verify that all repositories referenced in the deps are actually known + // by Helm. + missing := []string{} + for _, dd := range deps { + // Don't map the repository, we don't need to download chart from charts directory + if dd.Repository == "" { + continue + } + // if dep chart is from local path, verify the path is valid + if strings.HasPrefix(dd.Repository, "file://") { + if _, err := resolver.GetLocalPath(dd.Repository, m.ChartPath); err != nil { + return nil, err + } + + if m.Debug { + fmt.Fprintf(m.Out, "Repository from local path: %s\n", dd.Repository) + } + reposMap[dd.Name] = dd.Repository + continue + } + + if registry.IsOCI(dd.Repository) { + reposMap[dd.Name] = dd.Repository + continue + } + + found := false + + for _, repo := range repos { + if (strings.HasPrefix(dd.Repository, "@") && strings.TrimPrefix(dd.Repository, "@") == repo.Name) || + (strings.HasPrefix(dd.Repository, "alias:") && strings.TrimPrefix(dd.Repository, "alias:") == repo.Name) { + found = true + dd.Repository = repo.URL + reposMap[dd.Name] = repo.Name + break + } else if urlutil.Equal(repo.URL, dd.Repository) { + found = true + reposMap[dd.Name] = repo.Name + break + } + } + if !found { + repository := dd.Repository + // Add if URL + _, err := url.ParseRequestURI(repository) + if err == nil { + reposMap[repository] = repository + continue + } + missing = append(missing, repository) + } + } + if len(missing) > 0 { + errorMessage := fmt.Sprintf("no repository definition for %s. Please add them via 'helm repo add'", strings.Join(missing, ", ")) + // It is common for people to try to enter "stable" as a repository instead of the actual URL. + // For this case, let's give them a suggestion. + containsNonURL := false + for _, repo := range missing { + if !strings.Contains(repo, "//") && !strings.HasPrefix(repo, "@") && !strings.HasPrefix(repo, "alias:") { + containsNonURL = true + } + } + if containsNonURL { + errorMessage += ` +Note that repositories must be URLs or aliases. For example, to refer to the "example" +repository, use "https://charts.example.com/" or "@example" instead of +"example". Don't forget to add the repo, too ('helm repo add').` + } + return nil, errors.New(errorMessage) + } + return reposMap, nil +} + +// UpdateRepositories updates all of the local repos to the latest. +func (m *Manager) UpdateRepositories() error { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return err + } + repos := rf.Repositories + if len(repos) > 0 { + fmt.Fprintln(m.Out, "Hang tight while we grab the latest from your chart repositories...") + // This prints warnings straight to out. + if err := m.parallelRepoUpdate(repos); err != nil { + return err + } + fmt.Fprintln(m.Out, "Update Complete. ⎈Happy Helming!⎈") + } + return nil +} + +func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error { + + var wg sync.WaitGroup + for _, c := range repos { + r, err := repo.NewChartRepository(c, m.Getters) + if err != nil { + return err + } + r.CachePath = m.RepositoryCache + wg.Add(1) + go func(r *repo.ChartRepository) { + if _, err := r.DownloadIndexFile(); err != nil { + // For those dependencies that are not known to helm and using a + // generated key name we display the repo url. + if strings.HasPrefix(r.Config.Name, managerKeyPrefix) { + fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository:\n\t%s\n", r.Config.URL, err) + } else { + fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", r.Config.Name, r.Config.URL, err) + } + } else { + // For those dependencies that are not known to helm and using a + // generated key name we display the repo url. + if strings.HasPrefix(r.Config.Name, managerKeyPrefix) { + fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.URL) + } else { + fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.Name) + } + } + wg.Done() + }(r) + } + wg.Wait() + + return nil +} + +// findChartURL searches the cache of repo data for a chart that has the name and the repoURL specified. +// +// 'name' is the name of the chart. Version is an exact semver, or an empty string. If empty, the +// newest version will be returned. +// +// repoURL is the repository to search +// +// If it finds a URL that is "relative", it will prepend the repoURL. +func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, insecureskiptlsverify, passcredentialsall bool, caFile, certFile, keyFile string, err error) { + if registry.IsOCI(repoURL) { + return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", false, false, "", "", "", nil + } + + for _, cr := range repos { + + if urlutil.Equal(repoURL, cr.Config.URL) { + var entry repo.ChartVersions + entry, err = findEntryByName(name, cr) + if err != nil { + // TODO: Where linting is skipped in this function we should + // refactor to remove naked returns while ensuring the same + // behavior + //nolint:nakedret + return + } + var ve *repo.ChartVersion + ve, err = findVersionedEntry(version, entry) + if err != nil { + //nolint:nakedret + return + } + url, err = normalizeURL(repoURL, ve.URLs[0]) + if err != nil { + //nolint:nakedret + return + } + username = cr.Config.Username + password = cr.Config.Password + passcredentialsall = cr.Config.PassCredentialsAll + insecureskiptlsverify = cr.Config.InsecureSkipTLSverify + caFile = cr.Config.CAFile + certFile = cr.Config.CertFile + keyFile = cr.Config.KeyFile + //nolint:nakedret + return + } + } + url, err = repo.FindChartInRepoURL(repoURL, name, version, certFile, keyFile, caFile, m.Getters) + if err == nil { + return url, username, password, false, false, "", "", "", err + } + err = errors.Errorf("chart %s not found in %s: %s", name, repoURL, err) + return url, username, password, false, false, "", "", "", err +} + +// findEntryByName finds an entry in the chart repository whose name matches the given name. +// +// It returns the ChartVersions for that entry. +func findEntryByName(name string, cr *repo.ChartRepository) (repo.ChartVersions, error) { + for ename, entry := range cr.IndexFile.Entries { + if ename == name { + return entry, nil + } + } + return nil, errors.New("entry not found") +} + +// findVersionedEntry takes a ChartVersions list and returns a single chart version that satisfies the version constraints. +// +// If version is empty, the first chart found is returned. +func findVersionedEntry(version string, vers repo.ChartVersions) (*repo.ChartVersion, error) { + for _, verEntry := range vers { + if len(verEntry.URLs) == 0 { + // Not a legit entry. + continue + } + + if version == "" || versionEquals(version, verEntry.Version) { + return verEntry, nil + } + } + return nil, errors.New("no matching version") +} + +func versionEquals(v1, v2 string) bool { + sv1, err := semver.NewVersion(v1) + if err != nil { + // Fallback to string comparison. + return v1 == v2 + } + sv2, err := semver.NewVersion(v2) + if err != nil { + return false + } + return sv1.Equal(sv2) +} + +func normalizeURL(baseURL, urlOrPath string) (string, error) { + u, err := url.Parse(urlOrPath) + if err != nil { + return urlOrPath, err + } + if u.IsAbs() { + return u.String(), nil + } + u2, err := url.Parse(baseURL) + if err != nil { + return urlOrPath, errors.Wrap(err, "base URL failed to parse") + } + + u2.RawPath = path.Join(u2.RawPath, urlOrPath) + u2.Path = path.Join(u2.Path, urlOrPath) + return u2.String(), nil +} + +// loadChartRepositories reads the repositories.yaml, and then builds a map of +// ChartRepositories. +// +// The key is the local name (which is only present in the repositories.yaml). +func (m *Manager) loadChartRepositories() (map[string]*repo.ChartRepository, error) { + indices := map[string]*repo.ChartRepository{} + + // Load repositories.yaml file + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return indices, errors.Wrapf(err, "failed to load %s", m.RepositoryConfig) + } + + for _, re := range rf.Repositories { + lname := re.Name + idxFile := filepath.Join(m.RepositoryCache, helmpath.CacheIndexFile(lname)) + index, err := repo.LoadIndexFile(idxFile) + if err != nil { + return indices, err + } + + // TODO: use constructor + cr := &repo.ChartRepository{ + Config: re, + IndexFile: index, + } + indices[lname] = cr + } + return indices, nil +} + +// writeLock writes a lockfile to disk +func writeLock(chartpath string, lock *chart.Lock, legacyLockfile bool) error { + data, err := yaml.Marshal(lock) + if err != nil { + return err + } + lockfileName := "Chart.lock" + if legacyLockfile { + lockfileName = "requirements.lock" + } + dest := filepath.Join(chartpath, lockfileName) + return os.WriteFile(dest, data, 0644) +} + +// archive a dep chart from local directory and save it into destPath +func tarFromLocalDir(chartpath, name, repo, version, destPath string) (string, error) { + if !strings.HasPrefix(repo, "file://") { + return "", errors.Errorf("wrong format: chart %s repository %s", name, repo) + } + + origPath, err := resolver.GetLocalPath(repo, chartpath) + if err != nil { + return "", err + } + + ch, err := loader.LoadDir(origPath) + if err != nil { + return "", err + } + + constraint, err := semver.NewConstraint(version) + if err != nil { + return "", errors.Wrapf(err, "dependency %s has an invalid version/constraint format", name) + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return "", err + } + + if constraint.Check(v) { + _, err = chartutil.Save(ch, destPath) + return ch.Metadata.Version, err + } + + return "", errors.Errorf("can't get a valid version for dependency %s", name) +} + +// The prefix to use for cache keys created by the manager for repo names +const managerKeyPrefix = "helm-manager-" + +// key is used to turn a name, such as a repository url, into a filesystem +// safe name that is unique for querying. To accomplish this a unique hash of +// the string is used. +func key(name string) (string, error) { + in := strings.NewReader(name) + hash := crypto.SHA256.New() + if _, err := io.Copy(hash, in); err != nil { + return "", nil + } + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/vendor/k8s.io/helm/pkg/engine/doc.go b/vendor/helm.sh/helm/v3/pkg/engine/doc.go similarity index 65% rename from vendor/k8s.io/helm/pkg/engine/doc.go rename to vendor/helm.sh/helm/v3/pkg/engine/doc.go index 63c036605..6b3443aaf 100644 --- a/vendor/k8s.io/helm/pkg/engine/doc.go +++ b/vendor/helm.sh/helm/v3/pkg/engine/doc.go @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -/*Package engine implements the Go template engine as a Tiller Engine. +/* +Package engine implements the Go text template engine as needed for Helm. -Tiller provides a simple interface for taking a Chart and rendering its templates. -The 'engine' package implements this interface using Go's built-in 'text/template' -package. +When Helm renders templates it does so with additional functions and different +modes (e.g., strict, lint mode). This package handles the helm specific +implementation. */ -package engine // import "k8s.io/helm/pkg/engine" +package engine // import "helm.sh/helm/v3/pkg/engine" diff --git a/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/vendor/helm.sh/helm/v3/pkg/engine/engine.go new file mode 100644 index 000000000..61c0782fc --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/engine.go @@ -0,0 +1,441 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "fmt" + "log" + "path" + "path/filepath" + "regexp" + "sort" + "strings" + "text/template" + + "github.com/pkg/errors" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" +) + +// Engine is an implementation of the Helm rendering implementation for templates. +type Engine struct { + // If strict is enabled, template rendering will fail if a template references + // a value that was not passed in. + Strict bool + // In LintMode, some 'required' template values may be missing, so don't fail + LintMode bool + // optional provider of clients to talk to the Kubernetes API + clientProvider *ClientProvider + // EnableDNS tells the engine to allow DNS lookups when rendering templates + EnableDNS bool +} + +// New creates a new instance of Engine using the passed in rest config. +func New(config *rest.Config) Engine { + var clientProvider ClientProvider = clientProviderFromConfig{config} + return Engine{ + clientProvider: &clientProvider, + } +} + +// Render takes a chart, optional values, and value overrides, and attempts to render the Go templates. +// +// Render can be called repeatedly on the same engine. +// +// This will look in the chart's 'templates' data (e.g. the 'templates/' directory) +// and attempt to render the templates there using the values passed in. +// +// Values are scoped to their templates. A dependency template will not have +// access to the values set for its parent. If chart "foo" includes chart "bar", +// "bar" will not have access to the values for "foo". +// +// Values should be prepared with something like `chartutils.ReadValues`. +// +// Values are passed through the templates according to scope. If the top layer +// chart includes the chart foo, which includes the chart bar, the values map +// will be examined for a table called "foo". If "foo" is found in vals, +// that section of the values will be passed into the "foo" chart. And if that +// section contains a value named "bar", that value will be passed on to the +// bar chart during render time. +func (e Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { + tmap := allTemplates(chrt, values) + return e.render(tmap) +} + +// Render takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. +func Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { + return new(Engine).Render(chrt, values) +} + +// RenderWithClient takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. This engine is client aware and so can have template +// functions that interact with the client. +func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.Config) (map[string]string, error) { + var clientProvider ClientProvider = clientProviderFromConfig{config} + return Engine{ + clientProvider: &clientProvider, + }.Render(chrt, values) +} + +// RenderWithClientProvider takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. This engine is client aware and so can have template +// functions that interact with the client. +// This function differs from RenderWithClient in that it lets you customize the way a dynamic client is constructed. +func RenderWithClientProvider(chrt *chart.Chart, values chartutil.Values, clientProvider ClientProvider) (map[string]string, error) { + return Engine{ + clientProvider: &clientProvider, + }.Render(chrt, values) +} + +// renderable is an object that can be rendered. +type renderable struct { + // tpl is the current template. + tpl string + // vals are the values to be supplied to the template. + vals chartutil.Values + // namespace prefix to the templates of the current chart + basePath string +} + +const warnStartDelim = "HELM_ERR_START" +const warnEndDelim = "HELM_ERR_END" +const recursionMaxNums = 1000 + +var warnRegex = regexp.MustCompile(warnStartDelim + `((?s).*)` + warnEndDelim) + +func warnWrap(warn string) string { + return warnStartDelim + warn + warnEndDelim +} + +// 'include' needs to be defined in the scope of a 'tpl' template as +// well as regular file-loaded templates. +func includeFun(t *template.Template, includedNames map[string]int) func(string, interface{}) (string, error) { + return func(name string, data interface{}) (string, error) { + var buf strings.Builder + if v, ok := includedNames[name]; ok { + if v > recursionMaxNums { + return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name) + } + includedNames[name]++ + } else { + includedNames[name] = 1 + } + err := t.ExecuteTemplate(&buf, name, data) + includedNames[name]-- + return buf.String(), err + } +} + +// As does 'tpl', so that nested calls to 'tpl' see the templates +// defined by their enclosing contexts. +func tplFun(parent *template.Template, includedNames map[string]int, strict bool) func(string, interface{}) (string, error) { + return func(tpl string, vals interface{}) (string, error) { + t, err := parent.Clone() + if err != nil { + return "", errors.Wrapf(err, "cannot clone template") + } + + // Re-inject the missingkey option, see text/template issue https://github.com/golang/go/issues/43022 + // We have to go by strict from our engine configuration, as the option fields are private in Template. + // TODO: Remove workaround (and the strict parameter) once we build only with golang versions with a fix. + if strict { + t.Option("missingkey=error") + } else { + t.Option("missingkey=zero") + } + + // Re-inject 'include' so that it can close over our clone of t; + // this lets any 'define's inside tpl be 'include'd. + t.Funcs(template.FuncMap{ + "include": includeFun(t, includedNames), + "tpl": tplFun(t, includedNames, strict), + }) + + // We need a .New template, as template text which is just blanks + // or comments after parsing out defines just addes new named + // template definitions without changing the main template. + // https://pkg.go.dev/text/template#Template.Parse + // Use the parent's name for lack of a better way to identify the tpl + // text string. (Maybe we could use a hash appended to the name?) + t, err = t.New(parent.Name()).Parse(tpl) + if err != nil { + return "", errors.Wrapf(err, "cannot parse template %q", tpl) + } + + var buf strings.Builder + if err := t.Execute(&buf, vals); err != nil { + return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl) + } + + // See comment in renderWithReferences explaining the hack. + return strings.ReplaceAll(buf.String(), "", ""), nil + } +} + +// initFunMap creates the Engine's FuncMap and adds context-specific functions. +func (e Engine) initFunMap(t *template.Template) { + funcMap := funcMap() + includedNames := make(map[string]int) + + // Add the template-rendering functions here so we can close over t. + funcMap["include"] = includeFun(t, includedNames) + funcMap["tpl"] = tplFun(t, includedNames, e.Strict) + + // Add the `required` function here so we can use lintMode + funcMap["required"] = func(warn string, val interface{}) (interface{}, error) { + if val == nil { + if e.LintMode { + // Don't fail on missing required values when linting + log.Printf("[INFO] Missing required value: %s", warn) + return "", nil + } + return val, errors.Errorf(warnWrap(warn)) + } else if _, ok := val.(string); ok { + if val == "" { + if e.LintMode { + // Don't fail on missing required values when linting + log.Printf("[INFO] Missing required value: %s", warn) + return "", nil + } + return val, errors.Errorf(warnWrap(warn)) + } + } + return val, nil + } + + // Override sprig fail function for linting and wrapping message + funcMap["fail"] = func(msg string) (string, error) { + if e.LintMode { + // Don't fail when linting + log.Printf("[INFO] Fail: %s", msg) + return "", nil + } + return "", errors.New(warnWrap(msg)) + } + + // If we are not linting and have a cluster connection, provide a Kubernetes-backed + // implementation. + if !e.LintMode && e.clientProvider != nil { + funcMap["lookup"] = newLookupFunction(*e.clientProvider) + } + + // When DNS lookups are not enabled override the sprig function and return + // an empty string. + if !e.EnableDNS { + funcMap["getHostByName"] = func(name string) string { + return "" + } + } + + t.Funcs(funcMap) +} + +// render takes a map of templates/values and renders them. +func (e Engine) render(tpls map[string]renderable) (rendered map[string]string, err error) { + // Basically, what we do here is start with an empty parent template and then + // build up a list of templates -- one for each file. Once all of the templates + // have been parsed, we loop through again and execute every template. + // + // The idea with this process is to make it possible for more complex templates + // to share common blocks, but to make the entire thing feel like a file-based + // template engine. + defer func() { + if r := recover(); r != nil { + err = errors.Errorf("rendering template failed: %v", r) + } + }() + t := template.New("gotpl") + if e.Strict { + t.Option("missingkey=error") + } else { + // Not that zero will attempt to add default values for types it knows, + // but will still emit for others. We mitigate that later. + t.Option("missingkey=zero") + } + + e.initFunMap(t) + + // We want to parse the templates in a predictable order. The order favors + // higher-level (in file system) templates over deeply nested templates. + keys := sortTemplates(tpls) + + for _, filename := range keys { + r := tpls[filename] + if _, err := t.New(filename).Parse(r.tpl); err != nil { + return map[string]string{}, cleanupParseError(filename, err) + } + } + + rendered = make(map[string]string, len(keys)) + for _, filename := range keys { + // Don't render partials. We don't care out the direct output of partials. + // They are only included from other templates. + if strings.HasPrefix(path.Base(filename), "_") { + continue + } + // At render time, add information about the template that is being rendered. + vals := tpls[filename].vals + vals["Template"] = chartutil.Values{"Name": filename, "BasePath": tpls[filename].basePath} + var buf strings.Builder + if err := t.ExecuteTemplate(&buf, filename, vals); err != nil { + return map[string]string{}, cleanupExecError(filename, err) + } + + // Work around the issue where Go will emit "" even if Options(missing=zero) + // is set. Since missing=error will never get here, we do not need to handle + // the Strict case. + rendered[filename] = strings.ReplaceAll(buf.String(), "", "") + } + + return rendered, nil +} + +func cleanupParseError(filename string, err error) error { + tokens := strings.Split(err.Error(), ": ") + if len(tokens) == 1 { + // This might happen if a non-templating error occurs + return fmt.Errorf("parse error in (%s): %s", filename, err) + } + // The first token is "template" + // The second token is either "filename:lineno" or "filename:lineNo:columnNo" + location := tokens[1] + // The remaining tokens make up a stacktrace-like chain, ending with the relevant error + errMsg := tokens[len(tokens)-1] + return fmt.Errorf("parse error at (%s): %s", string(location), errMsg) +} + +func cleanupExecError(filename string, err error) error { + if _, isExecError := err.(template.ExecError); !isExecError { + return err + } + + tokens := strings.SplitN(err.Error(), ": ", 3) + if len(tokens) != 3 { + // This might happen if a non-templating error occurs + return fmt.Errorf("execution error in (%s): %s", filename, err) + } + + // The first token is "template" + // The second token is either "filename:lineno" or "filename:lineNo:columnNo" + location := tokens[1] + + parts := warnRegex.FindStringSubmatch(tokens[2]) + if len(parts) >= 2 { + return fmt.Errorf("execution error at (%s): %s", string(location), parts[1]) + } + + return err +} + +func sortTemplates(tpls map[string]renderable) []string { + keys := make([]string, len(tpls)) + i := 0 + for key := range tpls { + keys[i] = key + i++ + } + sort.Sort(sort.Reverse(byPathLen(keys))) + return keys +} + +type byPathLen []string + +func (p byPathLen) Len() int { return len(p) } +func (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] } +func (p byPathLen) Less(i, j int) bool { + a, b := p[i], p[j] + ca, cb := strings.Count(a, "/"), strings.Count(b, "/") + if ca == cb { + return strings.Compare(a, b) == -1 + } + return ca < cb +} + +// allTemplates returns all templates for a chart and its dependencies. +// +// As it goes, it also prepares the values in a scope-sensitive manner. +func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable { + templates := make(map[string]renderable) + recAllTpls(c, templates, vals) + return templates +} + +// recAllTpls recurses through the templates in a chart. +// +// As it recurses, it also sets the values to be appropriate for the template +// scope. +func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil.Values) map[string]interface{} { + subCharts := make(map[string]interface{}) + chartMetaData := struct { + chart.Metadata + IsRoot bool + }{*c.Metadata, c.IsRoot()} + + next := map[string]interface{}{ + "Chart": chartMetaData, + "Files": newFiles(c.Files), + "Release": vals["Release"], + "Capabilities": vals["Capabilities"], + "Values": make(chartutil.Values), + "Subcharts": subCharts, + } + + // If there is a {{.Values.ThisChart}} in the parent metadata, + // copy that into the {{.Values}} for this template. + if c.IsRoot() { + next["Values"] = vals["Values"] + } else if vs, err := vals.Table("Values." + c.Name()); err == nil { + next["Values"] = vs + } + + for _, child := range c.Dependencies() { + subCharts[child.Name()] = recAllTpls(child, templates, next) + } + + newParentID := c.ChartFullPath() + for _, t := range c.Templates { + if t == nil { + continue + } + if !isTemplateValid(c, t.Name) { + continue + } + templates[path.Join(newParentID, t.Name)] = renderable{ + tpl: string(t.Data), + vals: next, + basePath: path.Join(newParentID, "templates"), + } + } + + return next +} + +// isTemplateValid returns true if the template is valid for the chart type +func isTemplateValid(ch *chart.Chart, templateName string) bool { + if isLibraryChart(ch) { + return strings.HasPrefix(filepath.Base(templateName), "_") + } + return true +} + +// isLibraryChart returns true if the chart is a library chart +func isLibraryChart(c *chart.Chart) bool { + return strings.EqualFold(c.Metadata.Type, "library") +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/files.go b/vendor/helm.sh/helm/v3/pkg/engine/files.go new file mode 100644 index 000000000..f2cfdb3f3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/files.go @@ -0,0 +1,165 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "encoding/base64" + "path" + "strings" + + "github.com/gobwas/glob" + + "helm.sh/helm/v3/pkg/chart" +) + +// files is a map of files in a chart that can be accessed from a template. +type files map[string][]byte + +// NewFiles creates a new files from chart files. +// Given an []*chart.File (the format for files in a chart.Chart), extract a map of files. +func newFiles(from []*chart.File) files { + files := make(map[string][]byte) + for _, f := range from { + files[f.Name] = f.Data + } + return files +} + +// GetBytes gets a file by path. +// +// The returned data is raw. In a template context, this is identical to calling +// {{index .Files $path}}. +// +// This is intended to be accessed from within a template, so a missed key returns +// an empty []byte. +func (f files) GetBytes(name string) []byte { + if v, ok := f[name]; ok { + return v + } + return []byte{} +} + +// Get returns a string representation of the given file. +// +// Fetch the contents of a file as a string. It is designed to be called in a +// template. +// +// {{.Files.Get "foo"}} +func (f files) Get(name string) string { + return string(f.GetBytes(name)) +} + +// Glob takes a glob pattern and returns another files object only containing +// matched files. +// +// This is designed to be called from a template. +// +// {{ range $name, $content := .Files.Glob("foo/**") }} +// {{ $name }}: | +// {{ .Files.Get($name) | indent 4 }}{{ end }} +func (f files) Glob(pattern string) files { + g, err := glob.Compile(pattern, '/') + if err != nil { + g, _ = glob.Compile("**") + } + + nf := newFiles(nil) + for name, contents := range f { + if g.Match(name) { + nf[name] = contents + } + } + + return nf +} + +// AsConfig turns a Files group and flattens it to a YAML map suitable for +// including in the 'data' section of a Kubernetes ConfigMap definition. +// Duplicate keys will be overwritten, so be aware that your file names +// (regardless of path) should be unique. +// +// This is designed to be called from a template, and will return empty string +// (via toYAML function) if it cannot be serialized to YAML, or if the Files +// object is nil. +// +// The output will not be indented, so you will want to pipe this to the +// 'indent' template function. +// +// data: +// +// {{ .Files.Glob("config/**").AsConfig() | indent 4 }} +func (f files) AsConfig() string { + if f == nil { + return "" + } + + m := make(map[string]string) + + // Explicitly convert to strings, and file names + for k, v := range f { + m[path.Base(k)] = string(v) + } + + return toYAML(m) +} + +// AsSecrets returns the base64-encoded value of a Files object suitable for +// including in the 'data' section of a Kubernetes Secret definition. +// Duplicate keys will be overwritten, so be aware that your file names +// (regardless of path) should be unique. +// +// This is designed to be called from a template, and will return empty string +// (via toYAML function) if it cannot be serialized to YAML, or if the Files +// object is nil. +// +// The output will not be indented, so you will want to pipe this to the +// 'indent' template function. +// +// data: +// +// {{ .Files.Glob("secrets/*").AsSecrets() | indent 4 }} +func (f files) AsSecrets() string { + if f == nil { + return "" + } + + m := make(map[string]string) + + for k, v := range f { + m[path.Base(k)] = base64.StdEncoding.EncodeToString(v) + } + + return toYAML(m) +} + +// Lines returns each line of a named file (split by "\n") as a slice, so it can +// be ranged over in your templates. +// +// This is designed to be called from a template. +// +// {{ range .Files.Lines "foo/bar.html" }} +// {{ . }}{{ end }} +func (f files) Lines(path string) []string { + if f == nil || f[path] == nil { + return []string{} + } + s := string(f[path]) + if s[len(s)-1] == '\n' { + s = s[:len(s)-1] + } + return strings.Split(s, "\n") +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/funcs.go b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go new file mode 100644 index 000000000..8f05a3a1d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go @@ -0,0 +1,176 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" + + "github.com/BurntSushi/toml" + "github.com/Masterminds/sprig/v3" + "sigs.k8s.io/yaml" +) + +// funcMap returns a mapping of all of the functions that Engine has. +// +// Because some functions are late-bound (e.g. contain context-sensitive +// data), the functions may not all perform identically outside of an Engine +// as they will inside of an Engine. +// +// Known late-bound functions: +// +// - "include" +// - "tpl" +// +// These are late-bound in Engine.Render(). The +// version included in the FuncMap is a placeholder. +func funcMap() template.FuncMap { + f := sprig.TxtFuncMap() + delete(f, "env") + delete(f, "expandenv") + + // Add some extra functionality + extra := template.FuncMap{ + "toToml": toTOML, + "toYaml": toYAML, + "fromYaml": fromYAML, + "fromYamlArray": fromYAMLArray, + "toJson": toJSON, + "fromJson": fromJSON, + "fromJsonArray": fromJSONArray, + + // This is a placeholder for the "include" function, which is + // late-bound to a template. By declaring it here, we preserve the + // integrity of the linter. + "include": func(string, interface{}) string { return "not implemented" }, + "tpl": func(string, interface{}) interface{} { return "not implemented" }, + "required": func(string, interface{}) (interface{}, error) { return "not implemented", nil }, + // Provide a placeholder for the "lookup" function, which requires a kubernetes + // connection. + "lookup": func(string, string, string, string) (map[string]interface{}, error) { + return map[string]interface{}{}, nil + }, + } + + for k, v := range extra { + f[k] = v + } + + return f +} + +// toYAML takes an interface, marshals it to yaml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toYAML(v interface{}) string { + data, err := yaml.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "" + } + return strings.TrimSuffix(string(data), "\n") +} + +// fromYAML converts a YAML document into a map[string]interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromYAML(str string) map[string]interface{} { + m := map[string]interface{}{} + + if err := yaml.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// fromYAMLArray converts a YAML array into a []interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromYAMLArray(str string) []interface{} { + a := []interface{}{} + + if err := yaml.Unmarshal([]byte(str), &a); err != nil { + a = []interface{}{err.Error()} + } + return a +} + +// toTOML takes an interface, marshals it to toml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toTOML(v interface{}) string { + b := bytes.NewBuffer(nil) + e := toml.NewEncoder(b) + err := e.Encode(v) + if err != nil { + return err.Error() + } + return b.String() +} + +// toJSON takes an interface, marshals it to json, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toJSON(v interface{}) string { + data, err := json.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "" + } + return string(data) +} + +// fromJSON converts a JSON document into a map[string]interface{}. +// +// This is not a general-purpose JSON parser, and will not parse all valid +// JSON documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromJSON(str string) map[string]interface{} { + m := make(map[string]interface{}) + + if err := json.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// fromJSONArray converts a JSON array into a []interface{}. +// +// This is not a general-purpose JSON parser, and will not parse all valid +// JSON documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromJSONArray(str string) []interface{} { + a := []interface{}{} + + if err := json.Unmarshal([]byte(str), &a); err != nil { + a = []interface{}{err.Error()} + } + return a +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go new file mode 100644 index 000000000..86a7d698c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go @@ -0,0 +1,143 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "log" + "strings" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +type lookupFunc = func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) + +// NewLookupFunction returns a function for looking up objects in the cluster. +// +// If the resource does not exist, no error is raised. +// +// This function is considered deprecated, and will be renamed in Helm 4. It will no +// longer be a public function. +func NewLookupFunction(config *rest.Config) lookupFunc { + return newLookupFunction(clientProviderFromConfig{config: config}) +} + +type ClientProvider interface { + // GetClientFor returns a dynamic.NamespaceableResourceInterface suitable for interacting with resources + // corresponding to the provided apiVersion and kind, as well as a boolean indicating whether the resources + // are namespaced. + GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error) +} + +type clientProviderFromConfig struct { + config *rest.Config +} + +func (c clientProviderFromConfig) GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error) { + return getDynamicClientOnKind(apiVersion, kind, c.config) +} + +func newLookupFunction(clientProvider ClientProvider) lookupFunc { + return func(apiversion string, kind string, namespace string, name string) (map[string]interface{}, error) { + var client dynamic.ResourceInterface + c, namespaced, err := clientProvider.GetClientFor(apiversion, kind) + if err != nil { + return map[string]interface{}{}, err + } + if namespaced && namespace != "" { + client = c.Namespace(namespace) + } else { + client = c + } + if name != "" { + // this will return a single object + obj, err := client.Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // Just return an empty interface when the object was not found. + // That way, users can use `if not (lookup ...)` in their templates. + return map[string]interface{}{}, nil + } + return map[string]interface{}{}, err + } + return obj.UnstructuredContent(), nil + } + // this will return a list + obj, err := client.List(context.Background(), metav1.ListOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // Just return an empty interface when the object was not found. + // That way, users can use `if not (lookup ...)` in their templates. + return map[string]interface{}{}, nil + } + return map[string]interface{}{}, err + } + return obj.UnstructuredContent(), nil + } +} + +// getDynamicClientOnKind returns a dynamic client on an Unstructured type. This client can be further namespaced. +func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) (dynamic.NamespaceableResourceInterface, bool, error) { + gvk := schema.FromAPIVersionAndKind(apiversion, kind) + apiRes, err := getAPIResourceForGVK(gvk, config) + if err != nil { + log.Printf("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) + return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) + } + gvr := schema.GroupVersionResource{ + Group: apiRes.Group, + Version: apiRes.Version, + Resource: apiRes.Name, + } + intf, err := dynamic.NewForConfig(config) + if err != nil { + log.Printf("[ERROR] unable to get dynamic client %s", err) + return nil, false, err + } + res := intf.Resource(gvr) + return res, apiRes.Namespaced, nil +} + +func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (metav1.APIResource, error) { + res := metav1.APIResource{} + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + log.Printf("[ERROR] unable to create discovery client %s", err) + return res, err + } + resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + log.Printf("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) + return res, err + } + for _, resource := range resList.APIResources { + // if a resource contains a "/" it's referencing a subresource. we don't support suberesource for now. + if resource.Kind == gvk.Kind && !strings.Contains(resource.Name, "/") { + res = resource + res.Group = gvk.Group + res.Version = gvk.Version + break + } + } + return res, nil +} diff --git a/vendor/k8s.io/helm/pkg/manifest/types.go b/vendor/helm.sh/helm/v3/pkg/getter/doc.go similarity index 65% rename from vendor/k8s.io/helm/pkg/manifest/types.go rename to vendor/helm.sh/helm/v3/pkg/getter/doc.go index 4c748c9e5..11cf6153e 100644 --- a/vendor/k8s.io/helm/pkg/manifest/types.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/doc.go @@ -1,11 +1,10 @@ /* Copyright The Helm Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,15 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package manifest - -import ( - "k8s.io/helm/pkg/releaseutil" -) +/* +Package getter provides a generalize tool for fetching data by scheme. -// Manifest represents a manifest file, which has a name and some content. -type Manifest struct { - Name string - Content string - Head *releaseutil.SimpleHead -} +This provides a method by which the plugin system can load arbitrary protocol +handlers based upon a URL scheme. +*/ +package getter diff --git a/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/vendor/helm.sh/helm/v3/pkg/getter/getter.go new file mode 100644 index 000000000..45ab4da7e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/getter.go @@ -0,0 +1,212 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "net/http" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/registry" +) + +// options are generic parameters to be provided to the getter during instantiation. +// +// Getters may or may not ignore these parameters as they are passed in. +type options struct { + url string + certFile string + keyFile string + caFile string + unTar bool + insecureSkipVerifyTLS bool + plainHTTP bool + username string + password string + passCredentialsAll bool + userAgent string + version string + registryClient *registry.Client + timeout time.Duration + transport *http.Transport +} + +// Option allows specifying various settings configurable by the user for overriding the defaults +// used when performing Get operations with the Getter. +type Option func(*options) + +// WithURL informs the getter the server name that will be used when fetching objects. Used in conjunction with +// WithTLSClientConfig to set the TLSClientConfig's server name. +func WithURL(url string) Option { + return func(opts *options) { + opts.url = url + } +} + +// WithBasicAuth sets the request's Authorization header to use the provided credentials +func WithBasicAuth(username, password string) Option { + return func(opts *options) { + opts.username = username + opts.password = password + } +} + +func WithPassCredentialsAll(pass bool) Option { + return func(opts *options) { + opts.passCredentialsAll = pass + } +} + +// WithUserAgent sets the request's User-Agent header to use the provided agent name. +func WithUserAgent(userAgent string) Option { + return func(opts *options) { + opts.userAgent = userAgent + } +} + +// WithInsecureSkipVerifyTLS determines if a TLS Certificate will be checked +func WithInsecureSkipVerifyTLS(insecureSkipVerifyTLS bool) Option { + return func(opts *options) { + opts.insecureSkipVerifyTLS = insecureSkipVerifyTLS + } +} + +// WithTLSClientConfig sets the client auth with the provided credentials. +func WithTLSClientConfig(certFile, keyFile, caFile string) Option { + return func(opts *options) { + opts.certFile = certFile + opts.keyFile = keyFile + opts.caFile = caFile + } +} + +func WithPlainHTTP(plainHTTP bool) Option { + return func(opts *options) { + opts.plainHTTP = plainHTTP + } +} + +// WithTimeout sets the timeout for requests +func WithTimeout(timeout time.Duration) Option { + return func(opts *options) { + opts.timeout = timeout + } +} + +func WithTagName(tagname string) Option { + return func(opts *options) { + opts.version = tagname + } +} + +func WithRegistryClient(client *registry.Client) Option { + return func(opts *options) { + opts.registryClient = client + } +} + +func WithUntar() Option { + return func(opts *options) { + opts.unTar = true + } +} + +// WithTransport sets the http.Transport to allow overwriting the HTTPGetter default. +func WithTransport(transport *http.Transport) Option { + return func(opts *options) { + opts.transport = transport + } +} + +// Getter is an interface to support GET to the specified URL. +type Getter interface { + // Get file content by url string + Get(url string, options ...Option) (*bytes.Buffer, error) +} + +// Constructor is the function for every getter which creates a specific instance +// according to the configuration +type Constructor func(options ...Option) (Getter, error) + +// Provider represents any getter and the schemes that it supports. +// +// For example, an HTTP provider may provide one getter that handles both +// 'http' and 'https' schemes. +type Provider struct { + Schemes []string + New Constructor +} + +// Provides returns true if the given scheme is supported by this Provider. +func (p Provider) Provides(scheme string) bool { + for _, i := range p.Schemes { + if i == scheme { + return true + } + } + return false +} + +// Providers is a collection of Provider objects. +type Providers []Provider + +// ByScheme returns a Provider that handles the given scheme. +// +// If no provider handles this scheme, this will return an error. +func (p Providers) ByScheme(scheme string) (Getter, error) { + for _, pp := range p { + if pp.Provides(scheme) { + return pp.New() + } + } + return nil, errors.Errorf("scheme %q not supported", scheme) +} + +const ( + // The cost timeout references curl's default connection timeout. + // https://github.com/curl/curl/blob/master/lib/connect.h#L40C21-L40C21 + // The helm commands are usually executed manually. Considering the acceptable waiting time, we reduced the entire request time to 120s. + DefaultHTTPTimeout = 120 +) + +var defaultOptions = []Option{WithTimeout(time.Second * DefaultHTTPTimeout)} + +var httpProvider = Provider{ + Schemes: []string{"http", "https"}, + New: func(options ...Option) (Getter, error) { + options = append(options, defaultOptions...) + return NewHTTPGetter(options...) + }, +} + +var ociProvider = Provider{ + Schemes: []string{registry.OCIScheme}, + New: NewOCIGetter, +} + +// All finds all of the registered getters as a list of Provider instances. +// Currently, the built-in getters and the discovered plugins with downloader +// notations are collected. +func All(settings *cli.EnvSettings) Providers { + result := Providers{httpProvider, ociProvider} + pluginDownloaders, _ := collectPlugins(settings) + result = append(result, pluginDownloaders...) + return result +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go new file mode 100644 index 000000000..b53e558e3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go @@ -0,0 +1,157 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "crypto/tls" + "io" + "net/http" + "net/url" + "sync" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/internal/version" +) + +// HTTPGetter is the default HTTP(/S) backend handler +type HTTPGetter struct { + opts options + transport *http.Transport + once sync.Once +} + +// Get performs a Get from repo.Getter and returns the body. +func (g *HTTPGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { + for _, opt := range options { + opt(&g.opts) + } + return g.get(href) +} + +func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { + // Set a helm specific user agent so that a repo server and metrics can + // separate helm calls from other tools interacting with repos. + req, err := http.NewRequest(http.MethodGet, href, nil) + if err != nil { + return nil, err + } + + req.Header.Set("User-Agent", version.GetUserAgent()) + if g.opts.userAgent != "" { + req.Header.Set("User-Agent", g.opts.userAgent) + } + + // Before setting the basic auth credentials, make sure the URL associated + // with the basic auth is the one being fetched. + u1, err := url.Parse(g.opts.url) + if err != nil { + return nil, errors.Wrap(err, "Unable to parse getter URL") + } + u2, err := url.Parse(href) + if err != nil { + return nil, errors.Wrap(err, "Unable to parse URL getting from") + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + if g.opts.username != "" && g.opts.password != "" { + req.SetBasicAuth(g.opts.username, g.opts.password) + } + } + + client, err := g.httpClient() + if err != nil { + return nil, err + } + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.Errorf("failed to fetch %s : %s", href, resp.Status) + } + + buf := bytes.NewBuffer(nil) + _, err = io.Copy(buf, resp.Body) + return buf, err +} + +// NewHTTPGetter constructs a valid http/https client as a Getter +func NewHTTPGetter(options ...Option) (Getter, error) { + var client HTTPGetter + + for _, opt := range options { + opt(&client.opts) + } + + return &client, nil +} + +func (g *HTTPGetter) httpClient() (*http.Client, error) { + if g.opts.transport != nil { + return &http.Client{ + Transport: g.opts.transport, + Timeout: g.opts.timeout, + }, nil + } + + g.once.Do(func() { + g.transport = &http.Transport{ + DisableCompression: true, + Proxy: http.ProxyFromEnvironment, + } + }) + + if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" || g.opts.insecureSkipVerifyTLS { + tlsConf, err := tlsutil.NewClientTLS(g.opts.certFile, g.opts.keyFile, g.opts.caFile, g.opts.insecureSkipVerifyTLS) + if err != nil { + return nil, errors.Wrap(err, "can't create TLS config for client") + } + + sni, err := urlutil.ExtractHostname(g.opts.url) + if err != nil { + return nil, err + } + tlsConf.ServerName = sni + + g.transport.TLSClientConfig = tlsConf + } + + if g.opts.insecureSkipVerifyTLS { + if g.transport.TLSClientConfig == nil { + g.transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } else { + g.transport.TLSClientConfig.InsecureSkipVerify = true + } + } + + client := &http.Client{ + Transport: g.transport, + Timeout: g.opts.timeout, + } + + return client, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go b/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go new file mode 100644 index 000000000..209786bd7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go @@ -0,0 +1,155 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "fmt" + "net" + "net/http" + "strings" + "sync" + "time" + + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/registry" +) + +// OCIGetter is the default HTTP(/S) backend handler +type OCIGetter struct { + opts options + transport *http.Transport + once sync.Once +} + +// Get performs a Get from repo.Getter and returns the body. +func (g *OCIGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { + for _, opt := range options { + opt(&g.opts) + } + return g.get(href) +} + +func (g *OCIGetter) get(href string) (*bytes.Buffer, error) { + client := g.opts.registryClient + // if the user has already provided a configured registry client, use it, + // this is particularly true when user has his own way of handling the client credentials. + if client == nil { + c, err := g.newRegistryClient() + if err != nil { + return nil, err + } + client = c + } + + ref := strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)) + + var pullOpts []registry.PullOption + requestingProv := strings.HasSuffix(ref, ".prov") + if requestingProv { + ref = strings.TrimSuffix(ref, ".prov") + pullOpts = append(pullOpts, + registry.PullOptWithChart(false), + registry.PullOptWithProv(true)) + } + + result, err := client.Pull(ref, pullOpts...) + if err != nil { + return nil, err + } + + if requestingProv { + return bytes.NewBuffer(result.Prov.Data), nil + } + return bytes.NewBuffer(result.Chart.Data), nil +} + +// NewOCIGetter constructs a valid http/https client as a Getter +func NewOCIGetter(ops ...Option) (Getter, error) { + var client OCIGetter + + for _, opt := range ops { + opt(&client.opts) + } + + return &client, nil +} + +func (g *OCIGetter) newRegistryClient() (*registry.Client, error) { + if g.opts.transport != nil { + client, err := registry.NewClient( + registry.ClientOptHTTPClient(&http.Client{ + Transport: g.opts.transport, + Timeout: g.opts.timeout, + }), + ) + if err != nil { + return nil, err + } + return client, nil + } + + g.once.Do(func() { + g.transport = &http.Transport{ + // From https://github.com/google/go-containerregistry/blob/31786c6cbb82d6ec4fb8eb79cd9387905130534e/pkg/v1/remote/options.go#L87 + DisableCompression: true, + DialContext: (&net.Dialer{ + // By default we wrap the transport in retries, so reduce the + // default dial timeout to 5s to avoid 5x 30s of connection + // timeouts when doing the "ping" on certain http registries. + Timeout: 5 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + }) + + if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" || g.opts.insecureSkipVerifyTLS { + tlsConf, err := tlsutil.NewClientTLS(g.opts.certFile, g.opts.keyFile, g.opts.caFile, g.opts.insecureSkipVerifyTLS) + if err != nil { + return nil, fmt.Errorf("can't create TLS config for client: %w", err) + } + + sni, err := urlutil.ExtractHostname(g.opts.url) + if err != nil { + return nil, err + } + tlsConf.ServerName = sni + + g.transport.TLSClientConfig = tlsConf + } + + opts := []registry.ClientOption{registry.ClientOptHTTPClient(&http.Client{ + Transport: g.transport, + Timeout: g.opts.timeout, + })} + if g.opts.plainHTTP { + opts = append(opts, registry.ClientOptPlainHTTP()) + } + + client, err := registry.NewClient(opts...) + + if err != nil { + return nil, err + } + + return client, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go b/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go new file mode 100644 index 000000000..a371b52eb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go @@ -0,0 +1,110 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package getter + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/plugin" +) + +// collectPlugins scans for getter plugins. +// This will load plugins according to the cli. +func collectPlugins(settings *cli.EnvSettings) (Providers, error) { + plugins, err := plugin.FindPlugins(settings.PluginsDirectory) + if err != nil { + return nil, err + } + var result Providers + for _, plugin := range plugins { + for _, downloader := range plugin.Metadata.Downloaders { + result = append(result, Provider{ + Schemes: downloader.Protocols, + New: NewPluginGetter( + downloader.Command, + settings, + plugin.Metadata.Name, + plugin.Dir, + ), + }) + } + } + return result, nil +} + +// pluginGetter is a generic type to invoke custom downloaders, +// implemented in plugins. +type pluginGetter struct { + command string + settings *cli.EnvSettings + name string + base string + opts options +} + +func (p *pluginGetter) setupOptionsEnv(env []string) []string { + env = append(env, fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", p.opts.username)) + env = append(env, fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", p.opts.password)) + env = append(env, fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", p.opts.passCredentialsAll)) + return env +} + +// Get runs downloader plugin command +func (p *pluginGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { + for _, opt := range options { + opt(&p.opts) + } + commands := strings.Split(p.command, " ") + argv := append(commands[1:], p.opts.certFile, p.opts.keyFile, p.opts.caFile, href) + prog := exec.Command(filepath.Join(p.base, commands[0]), argv...) + plugin.SetupPluginEnv(p.settings, p.name, p.base) + prog.Env = p.setupOptionsEnv(os.Environ()) + buf := bytes.NewBuffer(nil) + prog.Stdout = buf + prog.Stderr = os.Stderr + if err := prog.Run(); err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + os.Stderr.Write(eerr.Stderr) + return nil, errors.Errorf("plugin %q exited with error", p.command) + } + return nil, err + } + return buf, nil +} + +// NewPluginGetter constructs a valid plugin getter +func NewPluginGetter(command string, settings *cli.EnvSettings, name, base string) Constructor { + return func(options ...Option) (Getter, error) { + result := &pluginGetter{ + command: command, + settings: settings, + name: name, + base: base, + } + for _, opt := range options { + opt(&result.opts) + } + return result, nil + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/home.go b/vendor/helm.sh/helm/v3/pkg/helmpath/home.go new file mode 100644 index 000000000..bd43e8890 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/home.go @@ -0,0 +1,44 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package helmpath calculates filesystem paths to Helm's configuration, cache and data. +package helmpath + +// This helper builds paths to Helm's configuration, cache and data paths. +const lp = lazypath("helm") + +// ConfigPath returns the path where Helm stores configuration. +func ConfigPath(elem ...string) string { return lp.configPath(elem...) } + +// CachePath returns the path where Helm stores cached objects. +func CachePath(elem ...string) string { return lp.cachePath(elem...) } + +// DataPath returns the path where Helm stores data. +func DataPath(elem ...string) string { return lp.dataPath(elem...) } + +// CacheIndexFile returns the path to an index for the given named repository. +func CacheIndexFile(name string) string { + if name != "" { + name += "-" + } + return name + "index.yaml" +} + +// CacheChartsFile returns the path to a text file listing all the charts +// within the given named repository. +func CacheChartsFile(name string) string { + if name != "" { + name += "-" + } + return name + "charts.txt" +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go new file mode 100644 index 000000000..22d7bf0a1 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go @@ -0,0 +1,72 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helmpath + +import ( + "os" + "path/filepath" + + "helm.sh/helm/v3/pkg/helmpath/xdg" +) + +const ( + // CacheHomeEnvVar is the environment variable used by Helm + // for the cache directory. When no value is set a default is used. + CacheHomeEnvVar = "HELM_CACHE_HOME" + + // ConfigHomeEnvVar is the environment variable used by Helm + // for the config directory. When no value is set a default is used. + ConfigHomeEnvVar = "HELM_CONFIG_HOME" + + // DataHomeEnvVar is the environment variable used by Helm + // for the data directory. When no value is set a default is used. + DataHomeEnvVar = "HELM_DATA_HOME" +) + +// lazypath is an lazy-loaded path buffer for the XDG base directory specification. +type lazypath string + +func (l lazypath) path(helmEnvVar, xdgEnvVar string, defaultFn func() string, elem ...string) string { + + // There is an order to checking for a path. + // 1. See if a Helm specific environment variable has been set. + // 2. Check if an XDG environment variable is set + // 3. Fall back to a default + base := os.Getenv(helmEnvVar) + if base != "" { + return filepath.Join(base, filepath.Join(elem...)) + } + base = os.Getenv(xdgEnvVar) + if base == "" { + base = defaultFn() + } + return filepath.Join(base, string(l), filepath.Join(elem...)) +} + +// cachePath defines the base directory relative to which user specific non-essential data files +// should be stored. +func (l lazypath) cachePath(elem ...string) string { + return l.path(CacheHomeEnvVar, xdg.CacheHomeEnvVar, cacheHome, filepath.Join(elem...)) +} + +// configPath defines the base directory relative to which user specific configuration files should +// be stored. +func (l lazypath) configPath(elem ...string) string { + return l.path(ConfigHomeEnvVar, xdg.ConfigHomeEnvVar, configHome, filepath.Join(elem...)) +} + +// dataPath defines the base directory relative to which user specific data files should be stored. +func (l lazypath) dataPath(elem ...string) string { + return l.path(DataHomeEnvVar, xdg.DataHomeEnvVar, dataHome, filepath.Join(elem...)) +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go new file mode 100644 index 000000000..eba6dde15 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go @@ -0,0 +1,34 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin + +package helmpath + +import ( + "path/filepath" + + "k8s.io/client-go/util/homedir" +) + +func dataHome() string { + return filepath.Join(homedir.HomeDir(), "Library") +} + +func configHome() string { + return filepath.Join(homedir.HomeDir(), "Library", "Preferences") +} + +func cacheHome() string { + return filepath.Join(homedir.HomeDir(), "Library", "Caches") +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go new file mode 100644 index 000000000..82fb4b6f1 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go @@ -0,0 +1,45 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !darwin + +package helmpath + +import ( + "path/filepath" + + "k8s.io/client-go/util/homedir" +) + +// dataHome defines the base directory relative to which user specific data files should be stored. +// +// If $XDG_DATA_HOME is either not set or empty, a default equal to $HOME/.local/share is used. +func dataHome() string { + return filepath.Join(homedir.HomeDir(), ".local", "share") +} + +// configHome defines the base directory relative to which user specific configuration files should +// be stored. +// +// If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config is used. +func configHome() string { + return filepath.Join(homedir.HomeDir(), ".config") +} + +// cacheHome defines the base directory relative to which user specific non-essential data files +// should be stored. +// +// If $XDG_CACHE_HOME is either not set or empty, a default equal to $HOME/.cache is used. +func cacheHome() string { + return filepath.Join(homedir.HomeDir(), ".cache") +} diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go new file mode 100644 index 000000000..230aee2a9 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go @@ -0,0 +1,24 @@ +// Copyright The Helm Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package helmpath + +import "os" + +func dataHome() string { return configHome() } + +func configHome() string { return os.Getenv("APPDATA") } + +func cacheHome() string { return os.Getenv("TEMP") } diff --git a/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go b/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go new file mode 100644 index 000000000..eaa3e6864 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go @@ -0,0 +1,34 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package xdg holds constants pertaining to XDG Base Directory Specification. +// +// The XDG Base Directory Specification https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html +// specifies the environment variables that define user-specific base directories for various categories of files. +package xdg + +const ( + // CacheHomeEnvVar is the environment variable used by the + // XDG base directory specification for the cache directory. + CacheHomeEnvVar = "XDG_CACHE_HOME" + + // ConfigHomeEnvVar is the environment variable used by the + // XDG base directory specification for the config directory. + ConfigHomeEnvVar = "XDG_CONFIG_HOME" + + // DataHomeEnvVar is the environment variable used by the + // XDG base directory specification for the data directory. + DataHomeEnvVar = "XDG_DATA_HOME" +) diff --git a/vendor/helm.sh/helm/v3/pkg/ignore/doc.go b/vendor/helm.sh/helm/v3/pkg/ignore/doc.go new file mode 100644 index 000000000..5245d410e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/ignore/doc.go @@ -0,0 +1,68 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package ignore provides tools for writing ignore files (a la .gitignore). + +This provides both an ignore parser and a file-aware processor. + +The format of ignore files closely follows, but does not exactly match, the +format for .gitignore files (https://git-scm.com/docs/gitignore). + +The formatting rules are as follows: + + - Parsing is line-by-line + - Empty lines are ignored + - Lines the begin with # (comments) will be ignored + - Leading and trailing spaces are always ignored + - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment) + - There is no support for multi-line patterns + - Shell glob patterns are supported. See Go's "path/filepath".Match + - If a pattern begins with a leading !, the match will be negated. + - If a pattern begins with a leading /, only paths relatively rooted will match. + - If the pattern ends with a trailing /, only directories will match + - If a pattern contains no slashes, file basenames are tested (not paths) + - The pattern sequence "**", while legal in a glob, will cause an error here + (to indicate incompatibility with .gitignore). + +Example: + + # Match any file named foo.txt + foo.txt + + # Match any text file + *.txt + + # Match only directories named mydir + mydir/ + + # Match only text files in the top-level directory + /*.txt + + # Match only the file foo.txt in the top-level directory + /foo.txt + + # Match any file named ab.txt, ac.txt, or ad.txt + a[b-d].txt + +Notable differences from .gitignore: + - The '**' syntax is not supported. + - The globbing library is Go's 'filepath.Match', not fnmatch(3) + - Trailing spaces are always ignored (there is no supported escape sequence) + - The evaluation of escape sequences has not been tested for compatibility + - There is no support for '\!' as a special leading sequence. +*/ +package ignore // import "helm.sh/helm/v3/pkg/ignore" diff --git a/vendor/k8s.io/helm/pkg/ignore/rules.go b/vendor/helm.sh/helm/v3/pkg/ignore/rules.go similarity index 94% rename from vendor/k8s.io/helm/pkg/ignore/rules.go rename to vendor/helm.sh/helm/v3/pkg/ignore/rules.go index 9a8e08327..a80923baf 100644 --- a/vendor/k8s.io/helm/pkg/ignore/rules.go +++ b/vendor/helm.sh/helm/v3/pkg/ignore/rules.go @@ -18,12 +18,14 @@ package ignore import ( "bufio" - "errors" + "bytes" "io" "log" "os" "path/filepath" "strings" + + "github.com/pkg/errors" ) // HelmIgnore default name of an ignorefile. @@ -64,19 +66,24 @@ func Parse(file io.Reader) (*Rules, error) { r := &Rules{patterns: []*pattern{}} s := bufio.NewScanner(file) + currentLine := 0 + utf8bom := []byte{0xEF, 0xBB, 0xBF} for s.Scan() { - if err := r.parseRule(s.Text()); err != nil { + scannedBytes := s.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + line := string(scannedBytes) + currentLine++ + + if err := r.parseRule(line); err != nil { return r, err } } return r, s.Err() } -// Len returns the number of patterns in this rule set. -func (r *Rules) Len() int { - return len(r.patterns) -} - // Ignore evaluates the file at the given path, and returns true if it should be ignored. // // Ignore evaluates path against the rules in order. Evaluation stops when a match diff --git a/vendor/helm.sh/helm/v3/pkg/kube/client.go b/vendor/helm.sh/helm/v3/pkg/kube/client.go new file mode 100644 index 000000000..9df833a43 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/client.go @@ -0,0 +1,850 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/pkg/errors" + batch "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + + multierror "github.com/hashicorp/go-multierror" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + cachetools "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + cmdutil "k8s.io/kubectl/pkg/cmd/util" +) + +// ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. +var ErrNoObjectsVisited = errors.New("no objects visited") + +var metadataAccessor = meta.NewAccessor() + +// ManagedFieldsManager is the name of the manager of Kubernetes managedFields +// first introduced in Kubernetes 1.18 +var ManagedFieldsManager string + +// Client represents a client capable of communicating with the Kubernetes API. +type Client struct { + // Factory provides a minimal version of the kubectl Factory interface. If + // you need the full Factory you can type switch to the full interface. + // Since Kubernetes Go API does not provide backwards compatibility across + // minor versions, this API does not follow Helm backwards compatibility. + // Helm is exposing Kubernetes in this property and cannot guarantee this + // will not change. The minimal interface only has the functions that Helm + // needs. The smaller surface area of the interface means there is a lower + // chance of it changing. + Factory Factory + Log func(string, ...interface{}) + // Namespace allows to bypass the kubeconfig file for the choice of the namespace + Namespace string + + kubeClient *kubernetes.Clientset +} + +var addToScheme sync.Once + +// New creates a new Client. +func New(getter genericclioptions.RESTClientGetter) *Client { + if getter == nil { + getter = genericclioptions.NewConfigFlags(true) + } + // Add CRDs to the scheme. They are missing by default. + addToScheme.Do(func() { + if err := apiextv1.AddToScheme(scheme.Scheme); err != nil { + // This should never happen. + panic(err) + } + if err := apiextv1beta1.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } + }) + return &Client{ + Factory: cmdutil.NewFactory(getter), + Log: nopLogger, + } +} + +var nopLogger = func(_ string, _ ...interface{}) {} + +// getKubeClient get or create a new KubernetesClientSet +func (c *Client) getKubeClient() (*kubernetes.Clientset, error) { + var err error + if c.kubeClient == nil { + c.kubeClient, err = c.Factory.KubernetesClientSet() + } + + return c.kubeClient, err +} + +// IsReachable tests connectivity to the cluster. +func (c *Client) IsReachable() error { + client, err := c.getKubeClient() + if err == genericclioptions.ErrEmptyConfig { + // re-replace kubernetes ErrEmptyConfig error with a friendy error + // moar workarounds for Kubernetes API breaking. + return errors.New("Kubernetes cluster unreachable") + } + if err != nil { + return errors.Wrap(err, "Kubernetes cluster unreachable") + } + if _, err := client.ServerVersion(); err != nil { + return errors.Wrap(err, "Kubernetes cluster unreachable") + } + return nil +} + +// Create creates Kubernetes resources specified in the resource list. +func (c *Client) Create(resources ResourceList) (*Result, error) { + c.Log("creating %d resource(s)", len(resources)) + if err := perform(resources, createResource); err != nil { + return nil, err + } + return &Result{Created: resources}, nil +} + +func transformRequests(req *rest.Request) { + tableParam := strings.Join([]string{ + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName), + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName), + "application/json", + }, ",") + req.SetHeader("Accept", tableParam) + + // if sorting, ensure we receive the full object in order to introspect its fields via jsonpath + req.Param("includeObject", "Object") +} + +// Get retrieves the resource objects supplied. If related is set to true the +// related pods are fetched as well. If the passed in resources are a table kind +// the related resources will also be fetched as kind=table. +func (c *Client) Get(resources ResourceList, related bool) (map[string][]runtime.Object, error) { + buf := new(bytes.Buffer) + objs := make(map[string][]runtime.Object) + + podSelectors := []map[string]string{} + err := resources.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + gvk := info.ResourceMapping().GroupVersionKind + vk := gvk.Version + "/" + gvk.Kind + obj, err := getResource(info) + if err != nil { + fmt.Fprintf(buf, "Get resource %s failed, err:%v\n", info.Name, err) + } else { + objs[vk] = append(objs[vk], obj) + + // Only fetch related pods if they are requested + if related { + // Discover if the existing object is a table. If it is, request + // the pods as Tables. Otherwise request them normally. + objGVK := obj.GetObjectKind().GroupVersionKind() + var isTable bool + if objGVK.Kind == "Table" { + isTable = true + } + + objs, err = c.getSelectRelationPod(info, objs, isTable, &podSelectors) + if err != nil { + c.Log("Warning: get the relation pod is failed, err:%s", err.Error()) + } + } + } + + return nil + }) + if err != nil { + return nil, err + } + + return objs, nil +} + +func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]runtime.Object, table bool, podSelectors *[]map[string]string) (map[string][]runtime.Object, error) { + if info == nil { + return objs, nil + } + c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name) + selector, ok, _ := getSelectorFromObject(info.Object) + if !ok { + return objs, nil + } + + for index := range *podSelectors { + if reflect.DeepEqual((*podSelectors)[index], selector) { + // check if pods for selectors are already added. This avoids duplicate printing of pods + return objs, nil + } + } + + *podSelectors = append(*podSelectors, selector) + + var infos []*resource.Info + var err error + if table { + infos, err = c.Factory.NewBuilder(). + Unstructured(). + ContinueOnError(). + NamespaceParam(info.Namespace). + DefaultNamespace(). + ResourceTypes("pods"). + LabelSelector(labels.Set(selector).AsSelector().String()). + TransformRequests(transformRequests). + Do().Infos() + if err != nil { + return objs, err + } + } else { + infos, err = c.Factory.NewBuilder(). + Unstructured(). + ContinueOnError(). + NamespaceParam(info.Namespace). + DefaultNamespace(). + ResourceTypes("pods"). + LabelSelector(labels.Set(selector).AsSelector().String()). + Do().Infos() + if err != nil { + return objs, err + } + } + vk := "v1/Pod(related)" + + for _, info := range infos { + objs[vk] = append(objs[vk], info.Object) + } + return objs, nil +} + +func getSelectorFromObject(obj runtime.Object) (map[string]string, bool, error) { + typed := obj.(*unstructured.Unstructured) + kind := typed.Object["kind"] + switch kind { + case "ReplicaSet", "Deployment", "StatefulSet", "DaemonSet", "Job": + return unstructured.NestedStringMap(typed.Object, "spec", "selector", "matchLabels") + case "ReplicationController": + return unstructured.NestedStringMap(typed.Object, "spec", "selector") + default: + return nil, false, nil + } +} + +func getResource(info *resource.Info) (runtime.Object, error) { + obj, err := resource.NewHelper(info.Client, info.Mapping).Get(info.Namespace, info.Name) + if err != nil { + return nil, err + } + return obj, nil +} + +// Wait waits up to the given timeout for the specified resources to be ready. +func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { + cs, err := c.getKubeClient() + if err != nil { + return err + } + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) + w := waiter{ + c: checker, + log: c.Log, + timeout: timeout, + } + return w.waitForResources(resources) +} + +// WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. +func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { + cs, err := c.getKubeClient() + if err != nil { + return err + } + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) + w := waiter{ + c: checker, + log: c.Log, + timeout: timeout, + } + return w.waitForResources(resources) +} + +// WaitForDelete wait up to the given timeout for the specified resources to be deleted. +func (c *Client) WaitForDelete(resources ResourceList, timeout time.Duration) error { + w := waiter{ + log: c.Log, + timeout: timeout, + } + return w.waitForDeletedResources(resources) +} + +func (c *Client) namespace() string { + if c.Namespace != "" { + return c.Namespace + } + if ns, _, err := c.Factory.ToRawKubeConfigLoader().Namespace(); err == nil { + return ns + } + return v1.NamespaceDefault +} + +// newBuilder returns a new resource builder for structured api objects. +func (c *Client) newBuilder() *resource.Builder { + return c.Factory.NewBuilder(). + ContinueOnError(). + NamespaceParam(c.namespace()). + DefaultNamespace(). + Flatten() +} + +// Build validates for Kubernetes objects and returns unstructured infos. +func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) { + validationDirective := metav1.FieldValidationIgnore + if validate { + validationDirective = metav1.FieldValidationStrict + } + + schema, err := c.Factory.Validator(validationDirective) + if err != nil { + return nil, err + } + result, err := c.newBuilder(). + Unstructured(). + Schema(schema). + Stream(reader, ""). + Do().Infos() + return result, scrubValidationError(err) +} + +// BuildTable validates for Kubernetes objects and returns unstructured infos. +// The returned kind is a Table. +func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, error) { + validationDirective := metav1.FieldValidationIgnore + if validate { + validationDirective = metav1.FieldValidationStrict + } + + schema, err := c.Factory.Validator(validationDirective) + if err != nil { + return nil, err + } + result, err := c.newBuilder(). + Unstructured(). + Schema(schema). + Stream(reader, ""). + TransformRequests(transformRequests). + Do().Infos() + return result, scrubValidationError(err) +} + +// Update takes the current list of objects and target list of objects and +// creates resources that don't already exist, updates resources that have been +// modified in the target configuration, and deletes resources from the current +// configuration that are not present in the target configuration. If an error +// occurs, a Result will still be returned with the error, containing all +// resource updates, creations, and deletions that were attempted. These can be +// used for cleanup or other logging purposes. +func (c *Client) Update(original, target ResourceList, force bool) (*Result, error) { + updateErrors := []string{} + res := &Result{} + + c.Log("checking %d resources for changes", len(target)) + err := target.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + helper := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()) + if _, err := helper.Get(info.Namespace, info.Name); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "could not get information about the resource") + } + + // Append the created resource to the results, even if something fails + res.Created = append(res.Created, info) + + // Since the resource does not exist, create it. + if err := createResource(info); err != nil { + return errors.Wrap(err, "failed to create resource") + } + + kind := info.Mapping.GroupVersionKind.Kind + c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace) + return nil + } + + originalInfo := original.Get(info) + if originalInfo == nil { + kind := info.Mapping.GroupVersionKind.Kind + return errors.Errorf("no %s with the name %q found", kind, info.Name) + } + + if err := updateResource(c, info, originalInfo.Object, force); err != nil { + c.Log("error updating the resource %q:\n\t %v", info.Name, err) + updateErrors = append(updateErrors, err.Error()) + } + // Because we check for errors later, append the info regardless + res.Updated = append(res.Updated, info) + + return nil + }) + + switch { + case err != nil: + return res, err + case len(updateErrors) != 0: + return res, errors.Errorf(strings.Join(updateErrors, " && ")) + } + + for _, info := range original.Difference(target) { + c.Log("Deleting %s %q in namespace %s...", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace) + + if err := info.Get(); err != nil { + c.Log("Unable to get obj %q, err: %s", info.Name, err) + continue + } + annotations, err := metadataAccessor.Annotations(info.Object) + if err != nil { + c.Log("Unable to get annotations on %q, err: %s", info.Name, err) + } + if annotations != nil && annotations[ResourcePolicyAnno] == KeepPolicy { + c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy) + continue + } + if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { + c.Log("Failed to delete %q, err: %s", info.ObjectName(), err) + continue + } + res.Deleted = append(res.Deleted, info) + } + return res, nil +} + +// Delete deletes Kubernetes resources specified in the resources list with +// background cascade deletion. It will attempt to delete all resources even +// if one or more fail and collect any errors. All successfully deleted items +// will be returned in the `Deleted` ResourceList that is part of the result. +func (c *Client) Delete(resources ResourceList) (*Result, []error) { + return rdelete(c, resources, metav1.DeletePropagationBackground) +} + +// Delete deletes Kubernetes resources specified in the resources list with +// given deletion propagation policy. It will attempt to delete all resources even +// if one or more fail and collect any errors. All successfully deleted items +// will be returned in the `Deleted` ResourceList that is part of the result. +func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) { + return rdelete(c, resources, policy) +} + +func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { + var errs []error + res := &Result{} + mtx := sync.Mutex{} + err := perform(resources, func(info *resource.Info) error { + c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind) + err := deleteResource(info, propagation) + if err == nil || apierrors.IsNotFound(err) { + if err != nil { + c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err) + } + mtx.Lock() + defer mtx.Unlock() + res.Deleted = append(res.Deleted, info) + return nil + } + mtx.Lock() + defer mtx.Unlock() + // Collect the error and continue on + errs = append(errs, err) + return nil + }) + if err != nil { + if errors.Is(err, ErrNoObjectsVisited) { + err = fmt.Errorf("object not found, skipping delete: %w", err) + } + errs = append(errs, err) + } + if errs != nil { + return nil, errs + } + return res, nil +} + +func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error { + return func(info *resource.Info) error { + return c.watchUntilReady(t, info) + } +} + +// WatchUntilReady watches the resources given and waits until it is ready. +// +// This method is mainly for hook implementations. It watches for a resource to +// hit a particular milestone. The milestone depends on the Kind. +// +// For most kinds, it checks to see if the resource is marked as Added or Modified +// by the Kubernetes event stream. For some kinds, it does more: +// +// - Jobs: A job is marked "Ready" when it has successfully completed. This is +// ascertained by watching the Status fields in a job's output. +// - Pods: A pod is marked "Ready" when it has successfully completed. This is +// ascertained by watching the status.phase field in a pod's output. +// +// Handling for other kinds will be added as necessary. +func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration) error { + // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): + // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 + return perform(resources, c.watchTimeout(timeout)) +} + +func perform(infos ResourceList, fn func(*resource.Info) error) error { + var result error + + if len(infos) == 0 { + return ErrNoObjectsVisited + } + + errs := make(chan error) + go batchPerform(infos, fn, errs) + + for range infos { + err := <-errs + if err != nil { + result = multierror.Append(result, err) + } + } + + return result +} + +// getManagedFieldsManager returns the manager string. If one was set it will be returned. +// Otherwise, one is calculated based on the name of the binary. +func getManagedFieldsManager() string { + + // When a manager is explicitly set use it + if ManagedFieldsManager != "" { + return ManagedFieldsManager + } + + // When no manager is set and no calling application can be found it is unknown + if len(os.Args[0]) == 0 { + return "unknown" + } + + // When there is an application that can be determined and no set manager + // use the base name. This is one of the ways Kubernetes libs handle figuring + // names out. + return filepath.Base(os.Args[0]) +} + +func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- error) { + var kind string + var wg sync.WaitGroup + for _, info := range infos { + currentKind := info.Object.GetObjectKind().GroupVersionKind().Kind + if kind != currentKind { + wg.Wait() + kind = currentKind + } + wg.Add(1) + go func(i *resource.Info) { + errs <- fn(i) + wg.Done() + }(info) + } +} + +func createResource(info *resource.Info) error { + obj, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).Create(info.Namespace, true, info.Object) + if err != nil { + return err + } + return info.Refresh(obj, true) +} + +func deleteResource(info *resource.Info, policy metav1.DeletionPropagation) error { + opts := &metav1.DeleteOptions{PropagationPolicy: &policy} + _, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).DeleteWithOptions(info.Namespace, info.Name, opts) + return err +} + +func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.PatchType, error) { + oldData, err := json.Marshal(current) + if err != nil { + return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing current configuration") + } + newData, err := json.Marshal(target.Object) + if err != nil { + return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing target configuration") + } + + // Fetch the current object for the three way merge + helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) + currentObj, err := helper.Get(target.Namespace, target.Name) + if err != nil && !apierrors.IsNotFound(err) { + return nil, types.StrategicMergePatchType, errors.Wrapf(err, "unable to get data for current object %s/%s", target.Namespace, target.Name) + } + + // Even if currentObj is nil (because it was not found), it will marshal just fine + currentData, err := json.Marshal(currentObj) + if err != nil { + return nil, types.StrategicMergePatchType, errors.Wrap(err, "serializing live configuration") + } + + // Get a versioned object + versionedObject := AsVersioned(target) + + // Unstructured objects, such as CRDs, may not have an not registered error + // returned from ConvertToVersion. Anything that's unstructured should + // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported + // on objects like CRDs. + _, isUnstructured := versionedObject.(runtime.Unstructured) + + // On newer K8s versions, CRDs aren't unstructured but has this dedicated type + _, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) + + if isUnstructured || isCRD { + // fall back to generic JSON merge patch + patch, err := jsonpatch.CreateMergePatch(oldData, newData) + return patch, types.MergePatchType, err + } + + patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject) + if err != nil { + return nil, types.StrategicMergePatchType, errors.Wrap(err, "unable to create patch metadata from object") + } + + patch, err := strategicpatch.CreateThreeWayMergePatch(oldData, newData, currentData, patchMeta, true) + return patch, types.StrategicMergePatchType, err +} + +func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error { + var ( + obj runtime.Object + helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) + kind = target.Mapping.GroupVersionKind.Kind + ) + + // if --force is applied, attempt to replace the existing resource with the new object. + if force { + var err error + obj, err = helper.Replace(target.Namespace, target.Name, true, target.Object) + if err != nil { + return errors.Wrap(err, "failed to replace object") + } + c.Log("Replaced %q with kind %s for kind %s", target.Name, currentObj.GetObjectKind().GroupVersionKind().Kind, kind) + } else { + patch, patchType, err := createPatch(target, currentObj) + if err != nil { + return errors.Wrap(err, "failed to create patch") + } + + if patch == nil || string(patch) == "{}" { + c.Log("Looks like there are no changes for %s %q", kind, target.Name) + // This needs to happen to make sure that Helm has the latest info from the API + // Otherwise there will be no labels and other functions that use labels will panic + if err := target.Get(); err != nil { + return errors.Wrap(err, "failed to refresh resource information") + } + return nil + } + // send patch to server + c.Log("Patch %s %q in namespace %s", kind, target.Name, target.Namespace) + obj, err = helper.Patch(target.Namespace, target.Name, patchType, patch, nil) + if err != nil { + return errors.Wrapf(err, "cannot patch %q with kind %s", target.Name, kind) + } + } + + target.Refresh(obj, true) + return nil +} + +func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error { + kind := info.Mapping.GroupVersionKind.Kind + switch kind { + case "Job", "Pod": + default: + return nil + } + + c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout) + + // Use a selector on the name of the resource. This should be unique for the + // given version and kind + selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) + if err != nil { + return err + } + lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) + + // What we watch for depends on the Kind. + // - For a Job, we watch for completion. + // - For all else, we watch until Ready. + // In the future, we might want to add some special logic for types + // like Ingress, Volume, etc. + + ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) + defer cancel() + _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) { + // Make sure the incoming object is versioned as we use unstructured + // objects when we build manifests + obj := convertWithMapper(e.Object, info.Mapping) + switch e.Type { + case watch.Added, watch.Modified: + // For things like a secret or a config map, this is the best indicator + // we get. We care mostly about jobs, where what we want to see is + // the status go into a good state. For other types, like ReplicaSet + // we don't really do anything to support these as hooks. + c.Log("Add/Modify event for %s: %v", info.Name, e.Type) + switch kind { + case "Job": + return c.waitForJob(obj, info.Name) + case "Pod": + return c.waitForPodSuccess(obj, info.Name) + } + return true, nil + case watch.Deleted: + c.Log("Deleted event for %s", info.Name) + return true, nil + case watch.Error: + // Handle error and return with an error. + c.Log("Error event for %s", info.Name) + return true, errors.Errorf("failed to deploy %s", info.Name) + default: + return false, nil + } + }) + return err +} + +// waitForJob is a helper that waits for a job to complete. +// +// This operates on an event returned from a watcher. +func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) { + o, ok := obj.(*batch.Job) + if !ok { + return true, errors.Errorf("expected %s to be a *batch.Job, got %T", name, obj) + } + + for _, c := range o.Status.Conditions { + if c.Type == batch.JobComplete && c.Status == "True" { + return true, nil + } else if c.Type == batch.JobFailed && c.Status == "True" { + return true, errors.Errorf("job %s failed: %s", name, c.Reason) + } + } + + c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, o.Status.Active, o.Status.Failed, o.Status.Succeeded) + return false, nil +} + +// waitForPodSuccess is a helper that waits for a pod to complete. +// +// This operates on an event returned from a watcher. +func (c *Client) waitForPodSuccess(obj runtime.Object, name string) (bool, error) { + o, ok := obj.(*v1.Pod) + if !ok { + return true, errors.Errorf("expected %s to be a *v1.Pod, got %T", name, obj) + } + + switch o.Status.Phase { + case v1.PodSucceeded: + c.Log("Pod %s succeeded", o.Name) + return true, nil + case v1.PodFailed: + return true, errors.Errorf("pod %s failed", o.Name) + case v1.PodPending: + c.Log("Pod %s pending", o.Name) + case v1.PodRunning: + c.Log("Pod %s running", o.Name) + } + + return false, nil +} + +// scrubValidationError removes kubectl info from the message. +func scrubValidationError(err error) error { + if err == nil { + return nil + } + const stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" + + if strings.Contains(err.Error(), stopValidateMessage) { + return errors.New(strings.ReplaceAll(err.Error(), "; "+stopValidateMessage, "")) + } + return err +} + +// WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase +// and returns said phase (PodSucceeded or PodFailed qualify). +func (c *Client) WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) { + client, err := c.getKubeClient() + if err != nil { + return v1.PodUnknown, err + } + to := int64(timeout) + watcher, err := client.CoreV1().Pods(c.namespace()).Watch(context.Background(), metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", name), + TimeoutSeconds: &to, + }) + if err != nil { + return v1.PodUnknown, err + } + + for event := range watcher.ResultChan() { + p, ok := event.Object.(*v1.Pod) + if !ok { + return v1.PodUnknown, fmt.Errorf("%s not a pod", name) + } + switch p.Status.Phase { + case v1.PodFailed: + return v1.PodFailed, nil + case v1.PodSucceeded: + return v1.PodSucceeded, nil + } + } + + return v1.PodUnknown, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/config.go b/vendor/helm.sh/helm/v3/pkg/kube/config.go new file mode 100644 index 000000000..e00c9acb1 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/config.go @@ -0,0 +1,30 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import "k8s.io/cli-runtime/pkg/genericclioptions" + +// GetConfig returns a Kubernetes client config. +// +// Deprecated +func GetConfig(kubeconfig, context, namespace string) *genericclioptions.ConfigFlags { + cf := genericclioptions.NewConfigFlags(true) + cf.Namespace = &namespace + cf.Context = &context + cf.KubeConfig = &kubeconfig + return cf +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/converter.go b/vendor/helm.sh/helm/v3/pkg/kube/converter.go new file mode 100644 index 000000000..3bf0e358c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/converter.go @@ -0,0 +1,69 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "sync" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes/scheme" +) + +var k8sNativeScheme *runtime.Scheme +var k8sNativeSchemeOnce sync.Once + +// AsVersioned converts the given info into a runtime.Object with the correct +// group and version set +func AsVersioned(info *resource.Info) runtime.Object { + return convertWithMapper(info.Object, info.Mapping) +} + +// convertWithMapper converts the given object with the optional provided +// RESTMapping. If no mapping is provided, the default schema versioner is used +func convertWithMapper(obj runtime.Object, mapping *meta.RESTMapping) runtime.Object { + s := kubernetesNativeScheme() + var gv = runtime.GroupVersioner(schema.GroupVersions(s.PrioritizedVersionsAllGroups())) + if mapping != nil { + gv = mapping.GroupVersionKind.GroupVersion() + } + if obj, err := runtime.ObjectConvertor(s).ConvertToVersion(obj, gv); err == nil { + return obj + } + return obj +} + +// kubernetesNativeScheme returns a clean *runtime.Scheme with _only_ Kubernetes +// native resources added to it. This is required to break free of custom resources +// that may have been added to scheme.Scheme due to Helm being used as a package in +// combination with e.g. a versioned kube client. If we would not do this, the client +// may attempt to perform e.g. a 3-way-merge strategy patch for custom resources. +func kubernetesNativeScheme() *runtime.Scheme { + k8sNativeSchemeOnce.Do(func() { + k8sNativeScheme = runtime.NewScheme() + scheme.AddToScheme(k8sNativeScheme) + // API extensions are not in the above scheme set, + // and must thus be added separately. + apiextensionsv1beta1.AddToScheme(k8sNativeScheme) + apiextensionsv1.AddToScheme(k8sNativeScheme) + }) + return k8sNativeScheme +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/factory.go b/vendor/helm.sh/helm/v3/pkg/kube/factory.go new file mode 100644 index 000000000..f19d62dc3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/factory.go @@ -0,0 +1,51 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/validation" +) + +// Factory provides abstractions that allow the Kubectl command to be extended across multiple types +// of resources and different API sets. +// This interface is a minimal copy of the kubectl Factory interface containing only the functions +// needed by Helm. Since Kubernetes Go APIs, including interfaces, can change in any minor release +// this interface is not covered by the Helm backwards compatibility guarantee. The reasons for the +// minimal copy is that it does not include the full interface. Changes or additions to functions +// Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes +// being exposed. +type Factory interface { + // ToRawKubeConfigLoader return kubeconfig loader as-is + ToRawKubeConfigLoader() clientcmd.ClientConfig + + // DynamicClient returns a dynamic client ready for use + DynamicClient() (dynamic.Interface, error) + + // KubernetesClientSet gives you back an external clientset + KubernetesClientSet() (*kubernetes.Clientset, error) + + // NewBuilder returns an object that assists in loading objects from both disk and the server + // and which implements the common patterns for CLI interactions with generic resources. + NewBuilder() *resource.Builder + + // Returns a schema that can validate objects stored on disk. + Validator(validationDirective string) (validation.Schema, error) +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go new file mode 100644 index 000000000..267020d57 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go @@ -0,0 +1,160 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fake implements various fake KubeClients for use in testing +package fake + +import ( + "io" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +// FailingKubeClient implements KubeClient for testing purposes. It also has +// additional errors you can set to fail different functions, otherwise it +// delegates all its calls to `PrintingKubeClient` +type FailingKubeClient struct { + PrintingKubeClient + CreateError error + GetError error + WaitError error + DeleteError error + DeleteWithPropagationError error + WatchUntilReadyError error + UpdateError error + BuildError error + BuildTableError error + BuildDummy bool + BuildUnstructuredError error + WaitAndGetCompletedPodPhaseError error + WaitDuration time.Duration +} + +// Create returns the configured error if set or prints +func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { + if f.CreateError != nil { + return nil, f.CreateError + } + return f.PrintingKubeClient.Create(resources) +} + +// Get returns the configured error if set or prints +func (f *FailingKubeClient) Get(resources kube.ResourceList, related bool) (map[string][]runtime.Object, error) { + if f.GetError != nil { + return nil, f.GetError + } + return f.PrintingKubeClient.Get(resources, related) +} + +// Waits the amount of time defined on f.WaitDuration, then returns the configured error if set or prints. +func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error { + time.Sleep(f.WaitDuration) + if f.WaitError != nil { + return f.WaitError + } + return f.PrintingKubeClient.Wait(resources, d) +} + +// WaitWithJobs returns the configured error if set or prints +func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error { + if f.WaitError != nil { + return f.WaitError + } + return f.PrintingKubeClient.WaitWithJobs(resources, d) +} + +// WaitForDelete returns the configured error if set or prints +func (f *FailingKubeClient) WaitForDelete(resources kube.ResourceList, d time.Duration) error { + if f.WaitError != nil { + return f.WaitError + } + return f.PrintingKubeClient.WaitForDelete(resources, d) +} + +// Delete returns the configured error if set or prints +func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) { + if f.DeleteError != nil { + return nil, []error{f.DeleteError} + } + return f.PrintingKubeClient.Delete(resources) +} + +// WatchUntilReady returns the configured error if set or prints +func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { + if f.WatchUntilReadyError != nil { + return f.WatchUntilReadyError + } + return f.PrintingKubeClient.WatchUntilReady(resources, d) +} + +// Update returns the configured error if set or prints +func (f *FailingKubeClient) Update(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) { + if f.UpdateError != nil { + return &kube.Result{}, f.UpdateError + } + return f.PrintingKubeClient.Update(r, modified, ignoreMe) +} + +// Build returns the configured error if set or prints +func (f *FailingKubeClient) Build(r io.Reader, _ bool) (kube.ResourceList, error) { + if f.BuildError != nil { + return []*resource.Info{}, f.BuildError + } + if f.BuildDummy { + return createDummyResourceList(), nil + } + return f.PrintingKubeClient.Build(r, false) +} + +// BuildTable returns the configured error if set or prints +func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, error) { + if f.BuildTableError != nil { + return []*resource.Info{}, f.BuildTableError + } + return f.PrintingKubeClient.BuildTable(r, false) +} + +// WaitAndGetCompletedPodPhase returns the configured error if set or prints +func (f *FailingKubeClient) WaitAndGetCompletedPodPhase(s string, d time.Duration) (v1.PodPhase, error) { + if f.WaitAndGetCompletedPodPhaseError != nil { + return v1.PodSucceeded, f.WaitAndGetCompletedPodPhaseError + } + return f.PrintingKubeClient.WaitAndGetCompletedPodPhase(s, d) +} + +// DeleteWithPropagationPolicy returns the configured error if set or prints +func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { + if f.DeleteWithPropagationError != nil { + return nil, []error{f.DeleteWithPropagationError} + } + return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy) +} + +func createDummyResourceList() kube.ResourceList { + var resInfo resource.Info + resInfo.Name = "dummyName" + resInfo.Namespace = "dummyNamespace" + var resourceList kube.ResourceList + resourceList.Append(&resInfo) + return resourceList + +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go new file mode 100644 index 000000000..cc2c84b40 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go @@ -0,0 +1,136 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "io" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +// PrintingKubeClient implements KubeClient, but simply prints the reader to +// the given output. +type PrintingKubeClient struct { + Out io.Writer +} + +// IsReachable checks if the cluster is reachable +func (p *PrintingKubeClient) IsReachable() error { + return nil +} + +// Create prints the values of what would be created with a real KubeClient. +func (p *PrintingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, err + } + return &kube.Result{Created: resources}, nil +} + +func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[string][]runtime.Object, error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, err + } + return make(map[string][]runtime.Object), nil +} + +func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +func (p *PrintingKubeClient) WaitForDelete(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +// Delete implements KubeClient delete. +// +// It only prints out the content to be deleted. +func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, []error{err} + } + return &kube.Result{Deleted: resources}, nil +} + +// WatchUntilReady implements KubeClient WatchUntilReady. +func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +// Update implements KubeClient Update. +func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) { + _, err := io.Copy(p.Out, bufferize(modified)) + if err != nil { + return nil, err + } + // TODO: This doesn't completely mock out have some that get created, + // updated, and deleted. I don't think these are used in any unit tests, but + // we may want to refactor a way to handle future tests + return &kube.Result{Updated: modified}, nil +} + +// Build implements KubeClient Build. +func (p *PrintingKubeClient) Build(_ io.Reader, _ bool) (kube.ResourceList, error) { + return []*resource.Info{}, nil +} + +// BuildTable implements KubeClient BuildTable. +func (p *PrintingKubeClient) BuildTable(_ io.Reader, _ bool) (kube.ResourceList, error) { + return []*resource.Info{}, nil +} + +// WaitAndGetCompletedPodPhase implements KubeClient WaitAndGetCompletedPodPhase. +func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Duration) (v1.PodPhase, error) { + return v1.PodSucceeded, nil +} + +// DeleteWithPropagationPolicy implements KubeClient delete. +// +// It only prints out the content to be deleted. +func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, []error{err} + } + return &kube.Result{Deleted: resources}, nil +} + +func bufferize(resources kube.ResourceList) io.Reader { + var builder strings.Builder + for _, info := range resources { + builder.WriteString(info.String() + "\n") + } + return strings.NewReader(builder.String()) +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/interface.go b/vendor/helm.sh/helm/v3/pkg/kube/interface.go new file mode 100644 index 000000000..ce42ed950 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/interface.go @@ -0,0 +1,116 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "io" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Interface represents a client capable of communicating with the Kubernetes API. +// +// A KubernetesClient must be concurrency safe. +type Interface interface { + // Create creates one or more resources. + Create(resources ResourceList) (*Result, error) + + // Wait waits up to the given timeout for the specified resources to be ready. + Wait(resources ResourceList, timeout time.Duration) error + + // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. + WaitWithJobs(resources ResourceList, timeout time.Duration) error + + // Delete destroys one or more resources. + Delete(resources ResourceList) (*Result, []error) + + // WatchUntilReady watches the resources given and waits until it is ready. + // + // This method is mainly for hook implementations. It watches for a resource to + // hit a particular milestone. The milestone depends on the Kind. + // + // For Jobs, "ready" means the Job ran to completion (exited without error). + // For Pods, "ready" means the Pod phase is marked "succeeded". + // For all other kinds, it means the kind was created or modified without + // error. + WatchUntilReady(resources ResourceList, timeout time.Duration) error + + // Update updates one or more resources or creates the resource + // if it doesn't exist. + Update(original, target ResourceList, force bool) (*Result, error) + + // Build creates a resource list from a Reader. + // + // Reader must contain a YAML stream (one or more YAML documents separated + // by "\n---\n") + // + // Validates against OpenAPI schema if validate is true. + Build(reader io.Reader, validate bool) (ResourceList, error) + + // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase + // and returns said phase (PodSucceeded or PodFailed qualify). + WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) + + // IsReachable checks whether the client is able to connect to the cluster. + IsReachable() error +} + +// InterfaceExt is introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceExt and integrate its method(s) into the Interface. +type InterfaceExt interface { + // WaitForDelete wait up to the given timeout for the specified resources to be deleted. + WaitForDelete(resources ResourceList, timeout time.Duration) error +} + +// InterfaceDeletionPropagation is introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceDeletionPropagation and integrate its method(s) into the Interface. +type InterfaceDeletionPropagation interface { + // Delete destroys one or more resources. The deletion propagation is handled as per the given deletion propagation value. + DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) +} + +// InterfaceResources is introduced to avoid breaking backwards compatibility for Interface implementers. +// +// TODO Helm 4: Remove InterfaceResources and integrate its method(s) into the Interface. +type InterfaceResources interface { + // Get details of deployed resources. + // The first argument is a list of resources to get. The second argument + // specifies if related pods should be fetched. For example, the pods being + // managed by a deployment. + Get(resources ResourceList, related bool) (map[string][]runtime.Object, error) + + // BuildTable creates a resource list from a Reader. This differs from + // Interface.Build() in that a table kind is returned. A table is useful + // if you want to use a printer to display the information. + // + // Reader must contain a YAML stream (one or more YAML documents separated + // by "\n---\n") + // + // Validates against OpenAPI schema if validate is true. + // TODO Helm 4: Integrate into Build with an argument + BuildTable(reader io.Reader, validate bool) (ResourceList, error) +} + +var _ Interface = (*Client)(nil) +var _ InterfaceExt = (*Client)(nil) +var _ InterfaceDeletionPropagation = (*Client)(nil) +var _ InterfaceResources = (*Client)(nil) diff --git a/vendor/helm.sh/helm/v3/pkg/kube/ready.go b/vendor/helm.sh/helm/v3/pkg/kube/ready.go new file mode 100644 index 000000000..b2d26ba76 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/ready.go @@ -0,0 +1,463 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + + deploymentutil "helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util" +) + +// ReadyCheckerOption is a function that configures a ReadyChecker. +type ReadyCheckerOption func(*ReadyChecker) + +// PausedAsReady returns a ReadyCheckerOption that configures a ReadyChecker +// to consider paused resources to be ready. For example a Deployment +// with spec.paused equal to true would be considered ready. +func PausedAsReady(pausedAsReady bool) ReadyCheckerOption { + return func(c *ReadyChecker) { + c.pausedAsReady = pausedAsReady + } +} + +// CheckJobs returns a ReadyCheckerOption that configures a ReadyChecker +// to consider readiness of Job resources. +func CheckJobs(checkJobs bool) ReadyCheckerOption { + return func(c *ReadyChecker) { + c.checkJobs = checkJobs + } +} + +// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can +// be used to override defaults. +func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker { + c := ReadyChecker{ + client: cl, + log: log, + } + if c.log == nil { + c.log = nopLogger + } + for _, opt := range opts { + opt(&c) + } + return c +} + +// ReadyChecker is a type that can check core Kubernetes types for readiness. +type ReadyChecker struct { + client kubernetes.Interface + log func(string, ...interface{}) + checkJobs bool + pausedAsReady bool +} + +// IsReady checks if v is ready. It supports checking readiness for pods, +// deployments, persistent volume claims, services, daemon sets, custom +// resource definitions, stateful sets, replication controllers, jobs (optional), +// and replica sets. All other resource kinds are always considered ready. +// +// IsReady will fetch the latest state of the object from the server prior to +// performing readiness checks, and it will return any error encountered. +func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, error) { + switch value := AsVersioned(v).(type) { + case *corev1.Pod: + pod, err := c.client.CoreV1().Pods(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil || !c.isPodReady(pod) { + return false, err + } + case *batchv1.Job: + if c.checkJobs { + job, err := c.client.BatchV1().Jobs(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + ready, err := c.jobReady(job) + return ready, err + } + case *appsv1.Deployment, *appsv1beta1.Deployment, *appsv1beta2.Deployment, *extensionsv1beta1.Deployment: + currentDeployment, err := c.client.AppsV1().Deployments(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + // If paused deployment will never be ready + if currentDeployment.Spec.Paused { + return c.pausedAsReady, nil + } + // Find RS associated with deployment + newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, c.client.AppsV1()) + if err != nil || newReplicaSet == nil { + return false, err + } + if !c.deploymentReady(newReplicaSet, currentDeployment) { + return false, nil + } + case *corev1.PersistentVolumeClaim: + claim, err := c.client.CoreV1().PersistentVolumeClaims(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.volumeReady(claim) { + return false, nil + } + case *corev1.Service: + svc, err := c.client.CoreV1().Services(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.serviceReady(svc) { + return false, nil + } + case *extensionsv1beta1.DaemonSet, *appsv1.DaemonSet, *appsv1beta2.DaemonSet: + ds, err := c.client.AppsV1().DaemonSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.daemonSetReady(ds) { + return false, nil + } + case *apiextv1beta1.CustomResourceDefinition: + if err := v.Get(); err != nil { + return false, err + } + crd := &apiextv1beta1.CustomResourceDefinition{} + if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil { + return false, err + } + if !c.crdBetaReady(*crd) { + return false, nil + } + case *apiextv1.CustomResourceDefinition: + if err := v.Get(); err != nil { + return false, err + } + crd := &apiextv1.CustomResourceDefinition{} + if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil { + return false, err + } + if !c.crdReady(*crd) { + return false, nil + } + case *appsv1.StatefulSet, *appsv1beta1.StatefulSet, *appsv1beta2.StatefulSet: + sts, err := c.client.AppsV1().StatefulSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.statefulSetReady(sts) { + return false, nil + } + case *corev1.ReplicationController: + rc, err := c.client.CoreV1().ReplicationControllers(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.replicationControllerReady(rc) { + return false, nil + } + ready, err := c.podsReadyForObject(ctx, v.Namespace, value) + if !ready || err != nil { + return false, err + } + case *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet: + rs, err := c.client.AppsV1().ReplicaSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.replicaSetReady(rs) { + return false, nil + } + ready, err := c.podsReadyForObject(ctx, v.Namespace, value) + if !ready || err != nil { + return false, err + } + } + return true, nil +} + +func (c *ReadyChecker) podsReadyForObject(ctx context.Context, namespace string, obj runtime.Object) (bool, error) { + pods, err := c.podsforObject(ctx, namespace, obj) + if err != nil { + return false, err + } + for _, pod := range pods { + if !c.isPodReady(&pod) { + return false, nil + } + } + return true, nil +} + +func (c *ReadyChecker) podsforObject(ctx context.Context, namespace string, obj runtime.Object) ([]corev1.Pod, error) { + selector, err := SelectorsForObject(obj) + if err != nil { + return nil, err + } + list, err := getPods(ctx, c.client, namespace, selector.String()) + return list, err +} + +// isPodReady returns true if a pod is ready; false otherwise. +func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool { + for _, c := range pod.Status.Conditions { + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return true + } + } + c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName()) + return false +} + +func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) { + if job.Status.Failed > *job.Spec.BackoffLimit { + c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName()) + // If a job is failed, it can't recover, so throw an error + return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName()) + } + if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions { + c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName()) + return false, nil + } + return true, nil +} + +func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { + // ExternalName Services are external to cluster so helm shouldn't be checking to see if they're 'ready' (i.e. have an IP Set) + if s.Spec.Type == corev1.ServiceTypeExternalName { + return true + } + + // Ensure that the service cluster IP is not empty + if s.Spec.ClusterIP == "" { + c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName()) + return false + } + + // This checks if the service has a LoadBalancer and that balancer has an Ingress defined + if s.Spec.Type == corev1.ServiceTypeLoadBalancer { + // do not wait when at least 1 external IP is set + if len(s.Spec.ExternalIPs) > 0 { + c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs) + return true + } + + if s.Status.LoadBalancer.Ingress == nil { + c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName()) + return false + } + } + + return true +} + +func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { + if v.Status.Phase != corev1.ClaimBound { + c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName()) + return false + } + return true +} + +func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool { + // Verify the replicaset readiness + if !c.replicaSetReady(rs) { + return false + } + // Verify the generation observed by the deployment controller matches the spec generation + if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { + c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation) + return false + } + + expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) + if !(rs.Status.ReadyReplicas >= expectedReady) { + c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady) + return false + } + return true +} + +func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { + // Verify the generation observed by the daemonSet controller matches the spec generation + if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation { + c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation) + return false + } + + // If the update strategy is not a rolling update, there will be nothing to wait for + if ds.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { + return true + } + + // Make sure all the updated pods have been scheduled + if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { + c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled) + return false + } + maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) + if err != nil { + // If for some reason the value is invalid, set max unavailable to the + // number of desired replicas. This is the same behavior as the + // `MaxUnavailable` function in deploymentutil + maxUnavailable = int(ds.Status.DesiredNumberScheduled) + } + + expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable + if !(int(ds.Status.NumberReady) >= expectedReady) { + c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady) + return false + } + return true +} + +// Because the v1 extensions API is not available on all supported k8s versions +// yet and because Go doesn't support generics, we need to have a duplicate +// function to support the v1beta1 types +func (c *ReadyChecker) crdBetaReady(crd apiextv1beta1.CustomResourceDefinition) bool { + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextv1beta1.Established: + if cond.Status == apiextv1beta1.ConditionTrue { + return true + } + case apiextv1beta1.NamesAccepted: + if cond.Status == apiextv1beta1.ConditionFalse { + // This indicates a naming conflict, but it's probably not the + // job of this function to fail because of that. Instead, + // we treat it as a success, since the process should be able to + // continue. + return true + } + } + } + return false +} + +func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextv1.Established: + if cond.Status == apiextv1.ConditionTrue { + return true + } + case apiextv1.NamesAccepted: + if cond.Status == apiextv1.ConditionFalse { + // This indicates a naming conflict, but it's probably not the + // job of this function to fail because of that. Instead, + // we treat it as a success, since the process should be able to + // continue. + return true + } + } + } + return false +} + +func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { + // Verify the generation observed by the statefulSet controller matches the spec generation + if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { + c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation) + return false + } + + // If the update strategy is not a rolling update, there will be nothing to wait for + if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { + c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type) + return true + } + + // Dereference all the pointers because StatefulSets like them + var partition int + // 1 is the default for replicas if not set + replicas := 1 + // For some reason, even if the update strategy is a rolling update, the + // actual rollingUpdate field can be nil. If it is, we can safely assume + // there is no partition value + if sts.Spec.UpdateStrategy.RollingUpdate != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { + partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition) + } + if sts.Spec.Replicas != nil { + replicas = int(*sts.Spec.Replicas) + } + + // Because an update strategy can use partitioning, we need to calculate the + // number of updated replicas we should have. For example, if the replicas + // is set to 3 and the partition is 2, we'd expect only one pod to be + // updated + expectedReplicas := replicas - partition + + // Make sure all the updated pods have been scheduled + if int(sts.Status.UpdatedReplicas) < expectedReplicas { + c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas) + return false + } + + if int(sts.Status.ReadyReplicas) != replicas { + c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) + return false + } + // This check only makes sense when all partitions are being upgraded otherwise during a + // partioned rolling upgrade, this condition will never evaluate to true, leading to + // error. + if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision { + c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision) + return false + } + + c.log("StatefulSet is ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) + return true +} + +func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { + // Verify the generation observed by the replicationController controller matches the spec generation + if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { + c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation) + return false + } + return true +} + +func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { + // Verify the generation observed by the replicaSet controller matches the spec generation + if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { + c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation) + return false + } + return true +} + +func getPods(ctx context.Context, client kubernetes.Interface, namespace, selector string) ([]corev1.Pod, error) { + list, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) + return list.Items, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/resource.go b/vendor/helm.sh/helm/v3/pkg/kube/resource.go new file mode 100644 index 000000000..ee8f83a25 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/resource.go @@ -0,0 +1,85 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import "k8s.io/cli-runtime/pkg/resource" + +// ResourceList provides convenience methods for comparing collections of Infos. +type ResourceList []*resource.Info + +// Append adds an Info to the Result. +func (r *ResourceList) Append(val *resource.Info) { + *r = append(*r, val) +} + +// Visit implements resource.Visitor. +func (r ResourceList) Visit(fn resource.VisitorFunc) error { + for _, i := range r { + if err := fn(i, nil); err != nil { + return err + } + } + return nil +} + +// Filter returns a new Result with Infos that satisfy the predicate fn. +func (r ResourceList) Filter(fn func(*resource.Info) bool) ResourceList { + var result ResourceList + for _, i := range r { + if fn(i) { + result.Append(i) + } + } + return result +} + +// Get returns the Info from the result that matches the name and kind. +func (r ResourceList) Get(info *resource.Info) *resource.Info { + for _, i := range r { + if isMatchingInfo(i, info) { + return i + } + } + return nil +} + +// Contains checks to see if an object exists. +func (r ResourceList) Contains(info *resource.Info) bool { + for _, i := range r { + if isMatchingInfo(i, info) { + return true + } + } + return false +} + +// Difference will return a new Result with objects not contained in rs. +func (r ResourceList) Difference(rs ResourceList) ResourceList { + return r.Filter(func(info *resource.Info) bool { + return !rs.Contains(info) + }) +} + +// Intersect will return a new Result with objects contained in both Results. +func (r ResourceList) Intersect(rs ResourceList) ResourceList { + return r.Filter(rs.Contains) +} + +// isMatchingInfo returns true if infos match on Name and GroupVersionKind. +func isMatchingInfo(a, b *resource.Info) bool { + return a.Name == b.Name && a.Namespace == b.Namespace && a.Mapping.GroupVersionKind.Kind == b.Mapping.GroupVersionKind.Kind +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go b/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go new file mode 100644 index 000000000..46b8680dd --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/resource_policy.go @@ -0,0 +1,27 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +// ResourcePolicyAnno is the annotation name for a resource policy +const ResourcePolicyAnno = "helm.sh/resource-policy" + +// KeepPolicy is the resource policy type for keep +// +// This resource policy type allows resources to skip being deleted +// +// during an uninstallRelease action. +const KeepPolicy = "keep" diff --git a/vendor/helm.sh/helm/v3/pkg/kube/result.go b/vendor/helm.sh/helm/v3/pkg/kube/result.go new file mode 100644 index 000000000..c3e171c2e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/result.go @@ -0,0 +1,28 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +// Result contains the information of created, updated, and deleted resources +// for various kube API calls along with helper methods for using those +// resources +type Result struct { + Created ResourceList + Updated ResourceList + Deleted ResourceList +} + +// If needed, we can add methods to the Result type for things like diffing diff --git a/vendor/helm.sh/helm/v3/pkg/kube/wait.go b/vendor/helm.sh/helm/v3/pkg/kube/wait.go new file mode 100644 index 000000000..ecdd38940 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/wait.go @@ -0,0 +1,128 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + + "k8s.io/apimachinery/pkg/util/wait" +) + +type waiter struct { + c ReadyChecker + timeout time.Duration + log func(string, ...interface{}) +} + +// waitForResources polls to get the current status of all pods, PVCs, Services and +// Jobs(optional) until all are ready or a timeout is reached +func (w *waiter) waitForResources(created ResourceList) error { + w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) + + ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + defer cancel() + + return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { + for _, v := range created { + ready, err := w.c.IsReady(ctx, v) + if !ready || err != nil { + return false, err + } + } + return true, nil + }) +} + +// waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached +func (w *waiter) waitForDeletedResources(deleted ResourceList) error { + w.log("beginning wait for %d resources to be deleted with timeout of %v", len(deleted), w.timeout) + + ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + defer cancel() + + return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) { + for _, v := range deleted { + err := v.Get() + if err == nil || !apierrors.IsNotFound(err) { + return false, err + } + } + return true, nil + }) +} + +// SelectorsForObject returns the pod label selector for a given object +// +// Modified version of https://github.com/kubernetes/kubernetes/blob/v1.14.1/pkg/kubectl/polymorphichelpers/helpers.go#L84 +func SelectorsForObject(object runtime.Object) (selector labels.Selector, err error) { + switch t := object.(type) { + case *extensionsv1beta1.ReplicaSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1.ReplicaSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta2.ReplicaSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *corev1.ReplicationController: + selector = labels.SelectorFromSet(t.Spec.Selector) + case *appsv1.StatefulSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta1.StatefulSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta2.StatefulSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *extensionsv1beta1.DaemonSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1.DaemonSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta2.DaemonSet: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *extensionsv1beta1.Deployment: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1.Deployment: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta1.Deployment: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *appsv1beta2.Deployment: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *batchv1.Job: + selector, err = metav1.LabelSelectorAsSelector(t.Spec.Selector) + case *corev1.Service: + if t.Spec.Selector == nil || len(t.Spec.Selector) == 0 { + return nil, fmt.Errorf("invalid service '%s': Service is defined without a selector", t.Name) + } + selector = labels.SelectorFromSet(t.Spec.Selector) + + default: + return nil, fmt.Errorf("selector for %T not implemented", object) + } + + return selector, errors.Wrap(err, "invalid label selector") +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/lint.go b/vendor/helm.sh/helm/v3/pkg/lint/lint.go new file mode 100644 index 000000000..c0e79f55b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/lint.go @@ -0,0 +1,43 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lint // import "helm.sh/helm/v3/pkg/lint" + +import ( + "path/filepath" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint/rules" + "helm.sh/helm/v3/pkg/lint/support" +) + +// All runs all of the available linters on the given base directory. +func All(basedir string, values map[string]interface{}, namespace string, _ bool) support.Linter { + return AllWithKubeVersion(basedir, values, namespace, nil) +} + +// AllWithKubeVersion runs all the available linters on the given base directory, allowing to specify the kubernetes version. +func AllWithKubeVersion(basedir string, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) support.Linter { + // Using abs path to get directory context + chartDir, _ := filepath.Abs(basedir) + + linter := support.Linter{ChartDir: chartDir} + rules.Chartfile(&linter) + rules.ValuesWithOverrides(&linter, values) + rules.TemplatesWithKubeVersion(&linter, values, namespace, kubeVersion) + rules.Dependencies(&linter) + return linter +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go new file mode 100644 index 000000000..70532ad4f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go @@ -0,0 +1,209 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Masterminds/semver/v3" + "github.com/asaskevich/govalidator" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Chartfile runs a set of linter rules related to Chart.yaml file +func Chartfile(linter *support.Linter) { + chartFileName := "Chart.yaml" + chartPath := filepath.Join(linter.ChartDir, chartFileName) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlNotDirectory(chartPath)) + + chartFile, err := chartutil.LoadChartfile(chartPath) + validChartFile := linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlFormat(err)) + + // Guard clause. Following linter rules require a parsable ChartFile + if !validChartFile { + return + } + + // type check for Chart.yaml . ignoring error as any parse + // errors would already be caught in the above load function + chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile)) + + // Chart metadata + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile)) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersionType(chartFileForTypeCheck)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAppVersionType(chartFileForTypeCheck)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartSources(chartFile)) + linter.RunLinterRule(support.InfoSev, chartFileName, validateChartIconPresence(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile)) +} + +func validateChartVersionType(data map[string]interface{}) error { + return isStringValue(data, "version") +} + +func validateChartAppVersionType(data map[string]interface{}) error { + return isStringValue(data, "appVersion") +} + +func isStringValue(data map[string]interface{}, key string) error { + value, ok := data[key] + if !ok { + return nil + } + valueType := fmt.Sprintf("%T", value) + if valueType != "string" { + return errors.Errorf("%s should be of type string but it's of type %s", key, valueType) + } + return nil +} + +func validateChartYamlNotDirectory(chartPath string) error { + fi, err := os.Stat(chartPath) + + if err == nil && fi.IsDir() { + return errors.New("should be a file, not a directory") + } + return nil +} + +func validateChartYamlFormat(chartFileError error) error { + if chartFileError != nil { + return errors.Errorf("unable to parse YAML\n\t%s", chartFileError.Error()) + } + return nil +} + +func validateChartName(cf *chart.Metadata) error { + if cf.Name == "" { + return errors.New("name is required") + } + return nil +} + +func validateChartAPIVersion(cf *chart.Metadata) error { + if cf.APIVersion == "" { + return errors.New("apiVersion is required. The value must be either \"v1\" or \"v2\"") + } + + if cf.APIVersion != chart.APIVersionV1 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("apiVersion '%s' is not valid. The value must be either \"v1\" or \"v2\"", cf.APIVersion) + } + + return nil +} + +func validateChartVersion(cf *chart.Metadata) error { + if cf.Version == "" { + return errors.New("version is required") + } + + version, err := semver.NewVersion(cf.Version) + + if err != nil { + return errors.Errorf("version '%s' is not a valid SemVer", cf.Version) + } + + c, err := semver.NewConstraint(">0.0.0-0") + if err != nil { + return err + } + valid, msg := c.Validate(version) + + if !valid && len(msg) > 0 { + return errors.Errorf("version %v", msg[0]) + } + + return nil +} + +func validateChartMaintainer(cf *chart.Metadata) error { + for _, maintainer := range cf.Maintainers { + if maintainer.Name == "" { + return errors.New("each maintainer requires a name") + } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) { + return errors.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name) + } else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) { + return errors.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name) + } + } + return nil +} + +func validateChartSources(cf *chart.Metadata) error { + for _, source := range cf.Sources { + if source == "" || !govalidator.IsRequestURL(source) { + return errors.Errorf("invalid source URL '%s'", source) + } + } + return nil +} + +func validateChartIconPresence(cf *chart.Metadata) error { + if cf.Icon == "" { + return errors.New("icon is recommended") + } + return nil +} + +func validateChartIconURL(cf *chart.Metadata) error { + if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) { + return errors.Errorf("invalid icon URL '%s'", cf.Icon) + } + return nil +} + +func validateChartDependencies(cf *chart.Metadata) error { + if len(cf.Dependencies) > 0 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("dependencies are not valid in the Chart file with apiVersion '%s'. They are valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2) + } + return nil +} + +func validateChartType(cf *chart.Metadata) error { + if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2) + } + return nil +} + +// loadChartFileForTypeCheck loads the Chart.yaml +// in a generic form of a map[string]interface{}, so that the type +// of the values can be checked +func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + y := make(map[string]interface{}) + err = yaml.Unmarshal(b, &y) + return y, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go new file mode 100644 index 000000000..f1ab1dcad --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go @@ -0,0 +1,103 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Dependencies runs lints against a chart's dependencies +// +// See https://github.com/helm/helm/issues/7910 +func Dependencies(linter *support.Linter) { + c, err := loader.LoadDir(linter.ChartDir) + if !linter.RunLinterRule(support.ErrorSev, "", validateChartFormat(err)) { + return + } + + linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c)) + linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependenciesUnique(c)) + linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c)) +} + +func validateChartFormat(chartError error) error { + if chartError != nil { + return errors.Errorf("unable to load chart\n\t%s", chartError) + } + return nil +} + +func validateDependencyInChartsDir(c *chart.Chart) (err error) { + dependencies := map[string]struct{}{} + missing := []string{} + for _, dep := range c.Dependencies() { + dependencies[dep.Metadata.Name] = struct{}{} + } + for _, dep := range c.Metadata.Dependencies { + if _, ok := dependencies[dep.Name]; !ok { + missing = append(missing, dep.Name) + } + } + if len(missing) > 0 { + err = fmt.Errorf("chart directory is missing these dependencies: %s", strings.Join(missing, ",")) + } + return err +} + +func validateDependencyInMetadata(c *chart.Chart) (err error) { + dependencies := map[string]struct{}{} + missing := []string{} + for _, dep := range c.Metadata.Dependencies { + dependencies[dep.Name] = struct{}{} + } + for _, dep := range c.Dependencies() { + if _, ok := dependencies[dep.Metadata.Name]; !ok { + missing = append(missing, dep.Metadata.Name) + } + } + if len(missing) > 0 { + err = fmt.Errorf("chart metadata is missing these dependencies: %s", strings.Join(missing, ",")) + } + return err +} + +func validateDependenciesUnique(c *chart.Chart) (err error) { + dependencies := map[string]*chart.Dependency{} + shadowing := []string{} + + for _, dep := range c.Metadata.Dependencies { + key := dep.Name + if dep.Alias != "" { + key = dep.Alias + } + if dependencies[key] != nil { + shadowing = append(shadowing, key) + } + dependencies[key] = dep + } + if len(shadowing) > 0 { + err = fmt.Errorf("multiple dependencies with name or alias: %s", strings.Join(shadowing, ",")) + } + return err +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go new file mode 100644 index 000000000..90e7748a4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go @@ -0,0 +1,106 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/deprecation" + kscheme "k8s.io/client-go/kubernetes/scheme" + + "helm.sh/helm/v3/pkg/chartutil" +) + +var ( + // This should be set in the Makefile based on the version of client-go being imported. + // These constants will be overwritten with LDFLAGS. The version components must be + // strings in order for LDFLAGS to set them. + k8sVersionMajor = "1" + k8sVersionMinor = "20" +) + +// deprecatedAPIError indicates than an API is deprecated in Kubernetes +type deprecatedAPIError struct { + Deprecated string + Message string +} + +func (e deprecatedAPIError) Error() string { + msg := e.Message + return msg +} + +func validateNoDeprecations(resource *K8sYamlStruct, kubeVersion *chartutil.KubeVersion) error { + // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation + if resource.APIVersion == "" { + return nil + } + if resource.Kind == "" { + return nil + } + + majorVersion := k8sVersionMajor + minorVersion := k8sVersionMinor + + if kubeVersion != nil { + majorVersion = kubeVersion.Major + minorVersion = kubeVersion.Minor + } + + runtimeObject, err := resourceToRuntimeObject(resource) + if err != nil { + // do not error for non-kubernetes resources + if runtime.IsNotRegisteredError(err) { + return nil + } + return err + } + + maj, err := strconv.Atoi(majorVersion) + if err != nil { + return err + } + min, err := strconv.Atoi(minorVersion) + if err != nil { + return err + } + + if !deprecation.IsDeprecated(runtimeObject, maj, min) { + return nil + } + gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind) + return deprecatedAPIError{ + Deprecated: gvk, + Message: deprecation.WarningMessage(runtimeObject), + } +} + +func resourceToRuntimeObject(resource *K8sYamlStruct) (runtime.Object, error) { + scheme := runtime.NewScheme() + kscheme.AddToScheme(scheme) + + gvk := schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind) + out, err := scheme.New(gvk) + if err != nil { + return nil, err + } + out.GetObjectKind().SetGroupVersionKind(gvk) + return out, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go new file mode 100644 index 000000000..aa1dbb701 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go @@ -0,0 +1,351 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/validation" + apipath "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/engine" + "helm.sh/helm/v3/pkg/lint/support" +) + +var ( + crdHookSearch = regexp.MustCompile(`"?helm\.sh/hook"?:\s+crd-install`) + releaseTimeSearch = regexp.MustCompile(`\.Release\.Time`) +) + +// Templates lints the templates in the Linter. +func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) { + TemplatesWithKubeVersion(linter, values, namespace, nil) +} + +// TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version. +func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) { + fpath := "templates/" + templatesPath := filepath.Join(linter.ChartDir, fpath) + + templatesDirExist := linter.RunLinterRule(support.WarningSev, fpath, validateTemplatesDir(templatesPath)) + + // Templates directory is optional for now + if !templatesDirExist { + return + } + + // Load chart and parse templates + chart, err := loader.Load(linter.ChartDir) + + chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !chartLoaded { + return + } + + options := chartutil.ReleaseOptions{ + Name: "test-release", + Namespace: namespace, + } + + caps := chartutil.DefaultCapabilities.Copy() + if kubeVersion != nil { + caps.KubeVersion = *kubeVersion + } + + // lint ignores import-values + // See https://github.com/helm/helm/issues/9658 + if err := chartutil.ProcessDependenciesWithMerge(chart, values); err != nil { + return + } + + cvals, err := chartutil.CoalesceValues(chart, values) + if err != nil { + return + } + + valuesToRender, err := chartutil.ToRenderValues(chart, cvals, options, caps) + if err != nil { + linter.RunLinterRule(support.ErrorSev, fpath, err) + return + } + var e engine.Engine + e.LintMode = true + renderedContentMap, err := e.Render(chart, valuesToRender) + + renderOk := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !renderOk { + return + } + + /* Iterate over all the templates to check: + - It is a .yaml file + - All the values in the template file is defined + - {{}} include | quote + - Generated content is a valid Yaml file + - Metadata.Namespace is not set + */ + for _, template := range chart.Templates { + fileName, data := template.Name, template.Data + fpath = fileName + + linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName)) + // These are v3 specific checks to make sure and warn people if their + // chart is not compatible with v3 + linter.RunLinterRule(support.WarningSev, fpath, validateNoCRDHooks(data)) + linter.RunLinterRule(support.ErrorSev, fpath, validateNoReleaseTime(data)) + + // We only apply the following lint rules to yaml files + if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" { + continue + } + + // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1463 + // Check that all the templates have a matching value + // linter.RunLinterRule(support.WarningSev, fpath, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate)) + + // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1037 + // linter.RunLinterRule(support.WarningSev, fpath, validateQuotes(string(preExecutedTemplate))) + + renderedContent := renderedContentMap[path.Join(chart.Name(), fileName)] + if strings.TrimSpace(renderedContent) != "" { + linter.RunLinterRule(support.WarningSev, fpath, validateTopIndentLevel(renderedContent)) + + decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(renderedContent), 4096) + + // Lint all resources if the file contains multiple documents separated by --- + for { + // Even though K8sYamlStruct only defines a few fields, an error in any other + // key will be raised as well + var yamlStruct *K8sYamlStruct + + err := decoder.Decode(&yamlStruct) + if err == io.EOF { + break + } + + // If YAML linting fails here, it will always fail in the next block as well, so we should return here. + // fix https://github.com/helm/helm/issues/11391 + if !linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) { + return + } + if yamlStruct != nil { + // NOTE: set to warnings to allow users to support out-of-date kubernetes + // Refs https://github.com/helm/helm/issues/8596 + linter.RunLinterRule(support.WarningSev, fpath, validateMetadataName(yamlStruct)) + linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct, kubeVersion)) + + linter.RunLinterRule(support.ErrorSev, fpath, validateMatchSelector(yamlStruct, renderedContent)) + linter.RunLinterRule(support.ErrorSev, fpath, validateListAnnotations(yamlStruct, renderedContent)) + } + } + } + } +} + +// validateTopIndentLevel checks that the content does not start with an indent level > 0. +// +// This error can occur when a template accidentally inserts space. It can cause +// unpredictable errors depending on whether the text is normalized before being passed +// into the YAML parser. So we trap it here. +// +// See https://github.com/helm/helm/issues/8467 +func validateTopIndentLevel(content string) error { + // Read lines until we get to a non-empty one + scanner := bufio.NewScanner(bytes.NewBufferString(content)) + for scanner.Scan() { + line := scanner.Text() + // If line is empty, skip + if strings.TrimSpace(line) == "" { + continue + } + // If it starts with one or more spaces, this is an error + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + return fmt.Errorf("document starts with an illegal indent: %q, which may cause parsing problems", line) + } + // Any other condition passes. + return nil + } + return scanner.Err() +} + +// Validation functions +func validateTemplatesDir(templatesPath string) error { + if fi, err := os.Stat(templatesPath); err == nil { + if !fi.IsDir() { + return errors.New("not a directory") + } + } + return nil +} + +func validateAllowedExtension(fileName string) error { + ext := filepath.Ext(fileName) + validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"} + + for _, b := range validExtensions { + if b == ext { + return nil + } + } + + return errors.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext) +} + +func validateYamlContent(err error) error { + return errors.Wrap(err, "unable to parse YAML") +} + +// validateMetadataName uses the correct validation function for the object +// Kind, or if not set, defaults to the standard definition of a subdomain in +// DNS (RFC 1123), used by most resources. +func validateMetadataName(obj *K8sYamlStruct) error { + fn := validateMetadataNameFunc(obj) + allErrs := field.ErrorList{} + for _, msg := range fn(obj.Metadata.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg)) + } + if len(allErrs) > 0 { + return errors.Wrapf(allErrs.ToAggregate(), "object name does not conform to Kubernetes naming requirements: %q", obj.Metadata.Name) + } + return nil +} + +// validateMetadataNameFunc will return a name validation function for the +// object kind, if defined below. +// +// Rules should match those set in the various api validations: +// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/core/validation/validation.go#L205-L274 +// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/apps/validation/validation.go#L39 +// ... +// +// Implementing here to avoid importing k/k. +// +// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object +// kinds that don't have special requirements, so is the most likely to work if +// new kinds are added. +func validateMetadataNameFunc(obj *K8sYamlStruct) validation.ValidateNameFunc { + switch strings.ToLower(obj.Kind) { + case "pod", "node", "secret", "endpoints", "resourcequota", // core + "controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps + "autoscaler", // autoscaler + "cronjob", "job", // batch + "lease", // coordination + "endpointslice", // discovery + "networkpolicy", "ingress", // networking + "podsecuritypolicy", // policy + "priorityclass", // scheduling + "podpreset", // settings + "storageclass", "volumeattachment", "csinode": // storage + return validation.NameIsDNSSubdomain + case "service": + return validation.NameIsDNS1035Label + case "namespace": + return validation.ValidateNamespaceName + case "serviceaccount": + return validation.ValidateServiceAccountName + case "certificatesigningrequest": + // No validation. + // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/certificates/validation/validation.go#L137-L140 + return func(name string, prefix bool) []string { return nil } + case "role", "clusterrole", "rolebinding", "clusterrolebinding": + // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34 + return func(name string, prefix bool) []string { + return apipath.IsValidPathSegmentName(name) + } + default: + return validation.NameIsDNSSubdomain + } +} + +func validateNoCRDHooks(manifest []byte) error { + if crdHookSearch.Match(manifest) { + return errors.New("manifest is a crd-install hook. This hook is no longer supported in v3 and all CRDs should also exist the crds/ directory at the top level of the chart") + } + return nil +} + +func validateNoReleaseTime(manifest []byte) error { + if releaseTimeSearch.Match(manifest) { + return errors.New(".Release.Time has been removed in v3, please replace with the `now` function in your templates") + } + return nil +} + +// validateMatchSelector ensures that template specs have a selector declared. +// See https://github.com/helm/helm/issues/1990 +func validateMatchSelector(yamlStruct *K8sYamlStruct, manifest string) error { + switch yamlStruct.Kind { + case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet": + // verify that matchLabels or matchExpressions is present + if !(strings.Contains(manifest, "matchLabels") || strings.Contains(manifest, "matchExpressions")) { + return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name) + } + } + return nil +} +func validateListAnnotations(yamlStruct *K8sYamlStruct, manifest string) error { + if yamlStruct.Kind == "List" { + m := struct { + Items []struct { + Metadata struct { + Annotations map[string]string + } + } + }{} + + if err := yaml.Unmarshal([]byte(manifest), &m); err != nil { + return validateYamlContent(err) + } + + for _, i := range m.Items { + if _, ok := i.Metadata.Annotations["helm.sh/resource-policy"]; ok { + return errors.New("Annotation 'helm.sh/resource-policy' within List objects are ignored") + } + } + } + return nil +} + +// K8sYamlStruct stubs a Kubernetes YAML file. +// +// DEPRECATED: In Helm 4, this will be made a private type, as it is for use only within +// the rules package. +type K8sYamlStruct struct { + APIVersion string `json:"apiVersion"` + Kind string + Metadata k8sYamlMetadata +} + +type k8sYamlMetadata struct { + Namespace string + Name string +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go new file mode 100644 index 000000000..538d8381b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go @@ -0,0 +1,86 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Values lints a chart's values.yaml file. +// +// This function is deprecated and will be removed in Helm 4. +func Values(linter *support.Linter) { + ValuesWithOverrides(linter, map[string]interface{}{}) +} + +// ValuesWithOverrides tests the values.yaml file. +// +// If a schema is present in the chart, values are tested against that. Otherwise, +// they are only tested for well-formedness. +// +// If additional values are supplied, they are coalesced into the values in values.yaml. +func ValuesWithOverrides(linter *support.Linter, values map[string]interface{}) { + file := "values.yaml" + vf := filepath.Join(linter.ChartDir, file) + fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf)) + + if !fileExists { + return + } + + linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, values)) +} + +func validateValuesFileExistence(valuesPath string) error { + _, err := os.Stat(valuesPath) + if err != nil { + return errors.Errorf("file does not exist") + } + return nil +} + +func validateValuesFile(valuesPath string, overrides map[string]interface{}) error { + values, err := chartutil.ReadValuesFile(valuesPath) + if err != nil { + return errors.Wrap(err, "unable to parse YAML") + } + + // Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top + // level values against the top-level expectations. Subchart values are not linted. + // We could change that. For now, though, we retain that strategy, and thus can + // coalesce tables (like reuse-values does) instead of doing the full chart + // CoalesceValues + coalescedValues := chartutil.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides) + coalescedValues = chartutil.CoalesceTables(coalescedValues, values) + + ext := filepath.Ext(valuesPath) + schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json" + schema, err := os.ReadFile(schemaPath) + if len(schema) == 0 { + return nil + } + if err != nil { + return err + } + return chartutil.ValidateAgainstSingleSchema(coalescedValues, schema) +} diff --git a/vendor/k8s.io/helm/pkg/chartutil/transform.go b/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go similarity index 66% rename from vendor/k8s.io/helm/pkg/chartutil/transform.go rename to vendor/helm.sh/helm/v3/pkg/lint/support/doc.go index fbee8e690..bffefe8ff 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/transform.go +++ b/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go @@ -14,12 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package chartutil - -import "strings" +/* +Package support contains tools for linting charts. -// Transform performs a string replacement of the specified source for -// a given key with the replacement string -func Transform(src string, key string, replacement string) []byte { - return []byte(strings.Replace(src, key, replacement, -1)) -} +Linting is the process of testing charts for errors or warnings regarding +formatting, compilation, or standards compliance. +*/ +package support // import "helm.sh/helm/v3/pkg/lint/support" diff --git a/vendor/helm.sh/helm/v3/pkg/lint/support/message.go b/vendor/helm.sh/helm/v3/pkg/lint/support/message.go new file mode 100644 index 000000000..5efbc7a61 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/support/message.go @@ -0,0 +1,76 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package support + +import "fmt" + +// Severity indicates the severity of a Message. +const ( + // UnknownSev indicates that the severity of the error is unknown, and should not stop processing. + UnknownSev = iota + // InfoSev indicates information, for example missing values.yaml file + InfoSev + // WarningSev indicates that something does not meet code standards, but will likely function. + WarningSev + // ErrorSev indicates that something will not likely function. + ErrorSev +) + +// sev matches the *Sev states. +var sev = []string{"UNKNOWN", "INFO", "WARNING", "ERROR"} + +// Linter encapsulates a linting run of a particular chart. +type Linter struct { + Messages []Message + // The highest severity of all the failing lint rules + HighestSeverity int + ChartDir string +} + +// Message describes an error encountered while linting. +type Message struct { + // Severity is one of the *Sev constants + Severity int + Path string + Err error +} + +func (m Message) Error() string { + return fmt.Sprintf("[%s] %s: %s", sev[m.Severity], m.Path, m.Err.Error()) +} + +// NewMessage creates a new Message struct +func NewMessage(severity int, path string, err error) Message { + return Message{Severity: severity, Path: path, Err: err} +} + +// RunLinterRule returns true if the validation passed +func (l *Linter) RunLinterRule(severity int, path string, err error) bool { + // severity is out of bound + if severity < 0 || severity >= len(sev) { + return false + } + + if err != nil { + l.Messages = append(l.Messages, NewMessage(severity, path, err)) + + if severity > l.HighestSeverity { + l.HighestSeverity = severity + } + } + return err == nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go b/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go new file mode 100644 index 000000000..e3481515f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go @@ -0,0 +1,29 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin // import "helm.sh/helm/v3/pkg/plugin" + +// Types of hooks +const ( + // Install is executed after the plugin is added. + Install = "install" + // Delete is executed after the plugin is removed. + Delete = "delete" + // Update is executed after the plugin is updated. + Update = "update" +) + +// Hooks is a map of events to commands. +type Hooks map[string]string diff --git a/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go new file mode 100644 index 000000000..fb3bc5215 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go @@ -0,0 +1,284 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugin // import "helm.sh/helm/v3/pkg/plugin" + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "unicode" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/cli" +) + +const PluginFileName = "plugin.yaml" + +// Downloaders represents the plugins capability if it can retrieve +// charts from special sources +type Downloaders struct { + // Protocols are the list of schemes from the charts URL. + Protocols []string `json:"protocols"` + // Command is the executable path with which the plugin performs + // the actual download for the corresponding Protocols + Command string `json:"command"` +} + +// PlatformCommand represents a command for a particular operating system and architecture +type PlatformCommand struct { + OperatingSystem string `json:"os"` + Architecture string `json:"arch"` + Command string `json:"command"` +} + +// Metadata describes a plugin. +// +// This is the plugin equivalent of a chart.Metadata. +type Metadata struct { + // Name is the name of the plugin + Name string `json:"name"` + + // Version is a SemVer 2 version of the plugin. + Version string `json:"version"` + + // Usage is the single-line usage text shown in help + Usage string `json:"usage"` + + // Description is a long description shown in places like `helm help` + Description string `json:"description"` + + // Command is the command, as a single string. + // + // The command will be passed through environment expansion, so env vars can + // be present in this command. Unless IgnoreFlags is set, this will + // also merge the flags passed from Helm. + // + // Note that command is not executed in a shell. To do so, we suggest + // pointing the command to a shell script. + // + // The following rules will apply to processing commands: + // - If platformCommand is present, it will be searched first + // - If both OS and Arch match the current platform, search will stop and the command will be executed + // - If OS matches and there is no more specific match, the command will be executed + // - If no OS/Arch match is found, the default command will be executed + // - If no command is present and no matches are found in platformCommand, Helm will exit with an error + PlatformCommand []PlatformCommand `json:"platformCommand"` + Command string `json:"command"` + + // IgnoreFlags ignores any flags passed in from Helm + // + // For example, if the plugin is invoked as `helm --debug myplugin`, if this + // is false, `--debug` will be appended to `--command`. If this is true, + // the `--debug` flag will be discarded. + IgnoreFlags bool `json:"ignoreFlags"` + + // Hooks are commands that will run on events. + Hooks Hooks + + // Downloaders field is used if the plugin supply downloader mechanism + // for special protocols. + Downloaders []Downloaders `json:"downloaders"` + + // UseTunnelDeprecated indicates that this command needs a tunnel. + // Setting this will cause a number of side effects, such as the + // automatic setting of HELM_HOST. + // DEPRECATED and unused, but retained for backwards compatibility with Helm 2 plugins. Remove in Helm 4 + UseTunnelDeprecated bool `json:"useTunnel,omitempty"` +} + +// Plugin represents a plugin. +type Plugin struct { + // Metadata is a parsed representation of a plugin.yaml + Metadata *Metadata + // Dir is the string path to the directory that holds the plugin. + Dir string +} + +// The following rules will apply to processing the Plugin.PlatformCommand.Command: +// - If both OS and Arch match the current platform, search will stop and the command will be prepared for execution +// - If OS matches and there is no more specific match, the command will be prepared for execution +// - If no OS/Arch match is found, return nil +func getPlatformCommand(cmds []PlatformCommand) []string { + var command []string + eq := strings.EqualFold + for _, c := range cmds { + if eq(c.OperatingSystem, runtime.GOOS) { + command = strings.Split(c.Command, " ") + } + if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) { + return strings.Split(c.Command, " ") + } + } + return command +} + +// PrepareCommand takes a Plugin.PlatformCommand.Command, a Plugin.Command and will applying the following processing: +// - If platformCommand is present, it will be searched first +// - If both OS and Arch match the current platform, search will stop and the command will be prepared for execution +// - If OS matches and there is no more specific match, the command will be prepared for execution +// - If no OS/Arch match is found, the default command will be prepared for execution +// - If no command is present and no matches are found in platformCommand, will exit with an error +// +// It merges extraArgs into any arguments supplied in the plugin. It +// returns the name of the command and an args array. +// +// The result is suitable to pass to exec.Command. +func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) { + var parts []string + platCmdLen := len(p.Metadata.PlatformCommand) + if platCmdLen > 0 { + parts = getPlatformCommand(p.Metadata.PlatformCommand) + } + if platCmdLen == 0 || parts == nil { + parts = strings.Split(p.Metadata.Command, " ") + } + if len(parts) == 0 || parts[0] == "" { + return "", nil, fmt.Errorf("no plugin command is applicable") + } + + main := os.ExpandEnv(parts[0]) + baseArgs := []string{} + if len(parts) > 1 { + for _, cmdpart := range parts[1:] { + cmdexp := os.ExpandEnv(cmdpart) + baseArgs = append(baseArgs, cmdexp) + } + } + if !p.Metadata.IgnoreFlags { + baseArgs = append(baseArgs, extraArgs...) + } + return main, baseArgs, nil +} + +// validPluginName is a regular expression that validates plugin names. +// +// Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, ​_​ and ​-. +var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$") + +// validatePluginData validates a plugin's YAML data. +func validatePluginData(plug *Plugin, filepath string) error { + if !validPluginName.MatchString(plug.Metadata.Name) { + return fmt.Errorf("invalid plugin name at %q", filepath) + } + plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) + + // We could also validate SemVer, executable, and other fields should we so choose. + return nil +} + +// sanitizeString normalize spaces and removes non-printable characters. +func sanitizeString(str string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, str) +} + +func detectDuplicates(plugs []*Plugin) error { + names := map[string]string{} + + for _, plug := range plugs { + if oldpath, ok := names[plug.Metadata.Name]; ok { + return fmt.Errorf( + "two plugins claim the name %q at %q and %q", + plug.Metadata.Name, + oldpath, + plug.Dir, + ) + } + names[plug.Metadata.Name] = plug.Dir + } + + return nil +} + +// LoadDir loads a plugin from the given directory. +func LoadDir(dirname string) (*Plugin, error) { + pluginfile := filepath.Join(dirname, PluginFileName) + data, err := os.ReadFile(pluginfile) + if err != nil { + return nil, errors.Wrapf(err, "failed to read plugin at %q", pluginfile) + } + + plug := &Plugin{Dir: dirname} + if err := yaml.UnmarshalStrict(data, &plug.Metadata); err != nil { + return nil, errors.Wrapf(err, "failed to load plugin at %q", pluginfile) + } + return plug, validatePluginData(plug, pluginfile) +} + +// LoadAll loads all plugins found beneath the base directory. +// +// This scans only one directory level. +func LoadAll(basedir string) ([]*Plugin, error) { + plugins := []*Plugin{} + // We want basedir/*/plugin.yaml + scanpath := filepath.Join(basedir, "*", PluginFileName) + matches, err := filepath.Glob(scanpath) + if err != nil { + return plugins, errors.Wrapf(err, "failed to find plugins in %q", scanpath) + } + + if matches == nil { + return plugins, nil + } + + for _, yaml := range matches { + dir := filepath.Dir(yaml) + p, err := LoadDir(dir) + if err != nil { + return plugins, err + } + plugins = append(plugins, p) + } + return plugins, detectDuplicates(plugins) +} + +// FindPlugins returns a list of YAML files that describe plugins. +func FindPlugins(plugdirs string) ([]*Plugin, error) { + found := []*Plugin{} + // Let's get all UNIXy and allow path separators + for _, p := range filepath.SplitList(plugdirs) { + matches, err := LoadAll(p) + if err != nil { + return matches, err + } + found = append(found, matches...) + } + return found, nil +} + +// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because +// the plugin subsystem itself needs access to the environment variables +// created here. +func SetupPluginEnv(settings *cli.EnvSettings, name, base string) { + env := settings.EnvVars() + env["HELM_PLUGIN_NAME"] = name + env["HELM_PLUGIN_DIR"] = base + for key, val := range env { + os.Setenv(key, val) + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/postrender/exec.go b/vendor/helm.sh/helm/v3/pkg/postrender/exec.go new file mode 100644 index 000000000..167e737d6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/postrender/exec.go @@ -0,0 +1,109 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postrender + +import ( + "bytes" + "io" + "os/exec" + "path/filepath" + + "github.com/pkg/errors" +) + +type execRender struct { + binaryPath string + args []string +} + +// NewExec returns a PostRenderer implementation that calls the provided binary. +// It returns an error if the binary cannot be found. If the path does not +// contain any separators, it will search in $PATH, otherwise it will resolve +// any relative paths to a fully qualified path +func NewExec(binaryPath string, args ...string) (PostRenderer, error) { + fullPath, err := getFullPath(binaryPath) + if err != nil { + return nil, err + } + return &execRender{fullPath, args}, nil +} + +// Run the configured binary for the post render +func (p *execRender) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { + cmd := exec.Command(p.binaryPath, p.args...) + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + + var postRendered = &bytes.Buffer{} + var stderr = &bytes.Buffer{} + cmd.Stdout = postRendered + cmd.Stderr = stderr + + go func() { + defer stdin.Close() + io.Copy(stdin, renderedManifests) + }() + err = cmd.Run() + if err != nil { + return nil, errors.Wrapf(err, "error while running command %s. error output:\n%s", p.binaryPath, stderr.String()) + } + + return postRendered, nil +} + +// getFullPath returns the full filepath to the binary to execute. If the path +// does not contain any separators, it will search in $PATH, otherwise it will +// resolve any relative paths to a fully qualified path +func getFullPath(binaryPath string) (string, error) { + // NOTE(thomastaylor312): I am leaving this code commented out here. During + // the implementation of post-render, it was brought up that if we are + // relying on plugins, we should actually use the plugin system so it can + // properly handle multiple OSs. This will be a feature add in the future, + // so I left this code for reference. It can be deleted or reused once the + // feature is implemented + + // Manually check the plugin dir first + // if !strings.Contains(binaryPath, string(filepath.Separator)) { + // // First check the plugin dir + // pluginDir := helmpath.DataPath("plugins") // Default location + // // If location for plugins is explicitly set, check there + // if v, ok := os.LookupEnv("HELM_PLUGINS"); ok { + // pluginDir = v + // } + // // The plugins variable can actually contain multiple paths, so loop through those + // for _, p := range filepath.SplitList(pluginDir) { + // _, err := os.Stat(filepath.Join(p, binaryPath)) + // if err != nil && !os.IsNotExist(err) { + // return "", err + // } else if err == nil { + // binaryPath = filepath.Join(p, binaryPath) + // break + // } + // } + // } + + // Now check for the binary using the given path or check if it exists in + // the path and is executable + checkedPath, err := exec.LookPath(binaryPath) + if err != nil { + return "", errors.Wrapf(err, "unable to find binary at %s", binaryPath) + } + + return filepath.Abs(checkedPath) +} diff --git a/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go b/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go new file mode 100644 index 000000000..3af384290 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go @@ -0,0 +1,29 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package postrender contains an interface that can be implemented for custom +// post-renderers and an exec implementation that can be used for arbitrary +// binaries and scripts +package postrender + +import "bytes" + +type PostRenderer interface { + // Run expects a single buffer filled with Helm rendered manifests. It + // expects the modified results to be returned on a separate buffer or an + // error if there was an issue or failure while running the post render step + Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error) +} diff --git a/vendor/helm.sh/helm/v3/pkg/provenance/doc.go b/vendor/helm.sh/helm/v3/pkg/provenance/doc.go new file mode 100644 index 000000000..0c7ae0618 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/provenance/doc.go @@ -0,0 +1,38 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package provenance provides tools for establishing the authenticity of a chart. + +In Helm, provenance is established via several factors. The primary factor is the +cryptographic signature of a chart. Chart authors may sign charts, which in turn +provide the necessary metadata to ensure the integrity of the chart file, the +Chart.yaml, and the referenced Docker images. + +A provenance file is clear-signed. This provides cryptographic verification that +a particular block of information (Chart.yaml, archive file, images) have not +been tampered with or altered. To learn more, read the GnuPG documentation on +clear signatures: +https://www.gnupg.org/gph/en/manual/x135.html + +The cryptography used by Helm should be compatible with OpenGPG. For example, +you should be able to verify a signature by importing the desired public key +and using `gpg --verify`, `keybase pgp verify`, or similar: + + $ gpg --verify some.sig + gpg: Signature made Mon Jul 25 17:23:44 2016 MDT using RSA key ID 1FC18762 + gpg: Good signature from "Helm Testing (This key should only be used for testing. DO NOT TRUST.) " [ultimate] +*/ +package provenance // import "helm.sh/helm/v3/pkg/provenance" diff --git a/vendor/helm.sh/helm/v3/pkg/provenance/sign.go b/vendor/helm.sh/helm/v3/pkg/provenance/sign.go new file mode 100644 index 000000000..7f89ef3f5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/provenance/sign.go @@ -0,0 +1,427 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provenance + +import ( + "bytes" + "crypto" + "encoding/hex" + "io" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "golang.org/x/crypto/openpgp" //nolint + "golang.org/x/crypto/openpgp/clearsign" //nolint + "golang.org/x/crypto/openpgp/packet" //nolint + "sigs.k8s.io/yaml" + + hapi "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +var defaultPGPConfig = packet.Config{ + DefaultHash: crypto.SHA512, +} + +// SumCollection represents a collection of file and image checksums. +// +// Files are of the form: +// +// FILENAME: "sha256:SUM" +// +// Images are of the form: +// +// "IMAGE:TAG": "sha256:SUM" +// +// Docker optionally supports sha512, and if this is the case, the hash marker +// will be 'sha512' instead of 'sha256'. +type SumCollection struct { + Files map[string]string `json:"files"` + Images map[string]string `json:"images,omitempty"` +} + +// Verification contains information about a verification operation. +type Verification struct { + // SignedBy contains the entity that signed a chart. + SignedBy *openpgp.Entity + // FileHash is the hash, prepended with the scheme, for the file that was verified. + FileHash string + // FileName is the name of the file that FileHash verifies. + FileName string +} + +// Signatory signs things. +// +// Signatories can be constructed from a PGP private key file using NewFromFiles +// or they can be constructed manually by setting the Entity to a valid +// PGP entity. +// +// The same Signatory can be used to sign or validate multiple charts. +type Signatory struct { + // The signatory for this instance of Helm. This is used for signing. + Entity *openpgp.Entity + // The keyring for this instance of Helm. This is used for verification. + KeyRing openpgp.EntityList +} + +// NewFromFiles constructs a new Signatory from the PGP key in the given filename. +// +// This will emit an error if it cannot find a valid GPG keyfile (entity) at the +// given location. +// +// Note that the keyfile may have just a public key, just a private key, or +// both. The Signatory methods may have different requirements of the keys. For +// example, ClearSign must have a valid `openpgp.Entity.PrivateKey` before it +// can sign something. +func NewFromFiles(keyfile, keyringfile string) (*Signatory, error) { + e, err := loadKey(keyfile) + if err != nil { + return nil, err + } + + ring, err := loadKeyRing(keyringfile) + if err != nil { + return nil, err + } + + return &Signatory{ + Entity: e, + KeyRing: ring, + }, nil +} + +// NewFromKeyring reads a keyring file and creates a Signatory. +// +// If id is not the empty string, this will also try to find an Entity in the +// keyring whose name matches, and set that as the signing entity. It will return +// an error if the id is not empty and also not found. +func NewFromKeyring(keyringfile, id string) (*Signatory, error) { + ring, err := loadKeyRing(keyringfile) + if err != nil { + return nil, err + } + + s := &Signatory{KeyRing: ring} + + // If the ID is empty, we can return now. + if id == "" { + return s, nil + } + + // We're gonna go all GnuPG on this and look for a string that _contains_. If + // two or more keys contain the string and none are a direct match, we error + // out. + var candidate *openpgp.Entity + vague := false + for _, e := range ring { + for n := range e.Identities { + if n == id { + s.Entity = e + return s, nil + } + if strings.Contains(n, id) { + if candidate != nil { + vague = true + } + candidate = e + } + } + } + if vague { + return s, errors.Errorf("more than one key contain the id %q", id) + } + + s.Entity = candidate + return s, nil +} + +// PassphraseFetcher returns a passphrase for decrypting keys. +// +// This is used as a callback to read a passphrase from some other location. The +// given name is the Name field on the key, typically of the form: +// +// USER_NAME (COMMENT) +type PassphraseFetcher func(name string) ([]byte, error) + +// DecryptKey decrypts a private key in the Signatory. +// +// If the key is not encrypted, this will return without error. +// +// If the key does not exist, this will return an error. +// +// If the key exists, but cannot be unlocked with the passphrase returned by +// the PassphraseFetcher, this will return an error. +// +// If the key is successfully unlocked, it will return nil. +func (s *Signatory) DecryptKey(fn PassphraseFetcher) error { + if s.Entity == nil { + return errors.New("private key not found") + } else if s.Entity.PrivateKey == nil { + return errors.New("provided key is not a private key. Try providing a keyring with secret keys") + } + + // Nothing else to do if key is not encrypted. + if !s.Entity.PrivateKey.Encrypted { + return nil + } + + fname := "Unknown" + for i := range s.Entity.Identities { + if i != "" { + fname = i + break + } + } + + p, err := fn(fname) + if err != nil { + return err + } + + return s.Entity.PrivateKey.Decrypt(p) +} + +// ClearSign signs a chart with the given key. +// +// This takes the path to a chart archive file and a key, and it returns a clear signature. +// +// The Signatory must have a valid Entity.PrivateKey for this to work. If it does +// not, an error will be returned. +func (s *Signatory) ClearSign(chartpath string) (string, error) { + if s.Entity == nil { + return "", errors.New("private key not found") + } else if s.Entity.PrivateKey == nil { + return "", errors.New("provided key is not a private key. Try providing a keyring with secret keys") + } + + if fi, err := os.Stat(chartpath); err != nil { + return "", err + } else if fi.IsDir() { + return "", errors.New("cannot sign a directory") + } + + out := bytes.NewBuffer(nil) + + b, err := messageBlock(chartpath) + if err != nil { + return "", err + } + + // Sign the buffer + w, err := clearsign.Encode(out, s.Entity.PrivateKey, &defaultPGPConfig) + if err != nil { + return "", err + } + + _, err = io.Copy(w, b) + + if err != nil { + // NB: We intentionally don't call `w.Close()` here! `w.Close()` is the method which + // actually does the PGP signing, and therefore is the part which uses the private key. + // In other words, if we call Close here, there's a risk that there's an attempt to use the + // private key to sign garbage data (since we know that io.Copy failed, `w` won't contain + // anything useful). + return "", errors.Wrap(err, "failed to write to clearsign encoder") + } + + err = w.Close() + if err != nil { + return "", errors.Wrap(err, "failed to either sign or armor message block") + } + + return out.String(), nil +} + +// Verify checks a signature and verifies that it is legit for a chart. +func (s *Signatory) Verify(chartpath, sigpath string) (*Verification, error) { + ver := &Verification{} + for _, fname := range []string{chartpath, sigpath} { + if fi, err := os.Stat(fname); err != nil { + return ver, err + } else if fi.IsDir() { + return ver, errors.Errorf("%s cannot be a directory", fname) + } + } + + // First verify the signature + sig, err := s.decodeSignature(sigpath) + if err != nil { + return ver, errors.Wrap(err, "failed to decode signature") + } + + by, err := s.verifySignature(sig) + if err != nil { + return ver, err + } + ver.SignedBy = by + + // Second, verify the hash of the tarball. + sum, err := DigestFile(chartpath) + if err != nil { + return ver, err + } + _, sums, err := parseMessageBlock(sig.Plaintext) + if err != nil { + return ver, err + } + + sum = "sha256:" + sum + basename := filepath.Base(chartpath) + if sha, ok := sums.Files[basename]; !ok { + return ver, errors.Errorf("provenance does not contain a SHA for a file named %q", basename) + } else if sha != sum { + return ver, errors.Errorf("sha256 sum does not match for %s: %q != %q", basename, sha, sum) + } + ver.FileHash = sum + ver.FileName = basename + + // TODO: when image signing is added, verify that here. + + return ver, nil +} + +func (s *Signatory) decodeSignature(filename string) (*clearsign.Block, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + block, _ := clearsign.Decode(data) + if block == nil { + // There was no sig in the file. + return nil, errors.New("signature block not found") + } + + return block, nil +} + +// verifySignature verifies that the given block is validly signed, and returns the signer. +func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, error) { + return openpgp.CheckDetachedSignature( + s.KeyRing, + bytes.NewBuffer(block.Bytes), + block.ArmoredSignature.Body, + ) +} + +func messageBlock(chartpath string) (*bytes.Buffer, error) { + var b *bytes.Buffer + // Checksum the archive + chash, err := DigestFile(chartpath) + if err != nil { + return b, err + } + + base := filepath.Base(chartpath) + sums := &SumCollection{ + Files: map[string]string{ + base: "sha256:" + chash, + }, + } + + // Load the archive into memory. + chart, err := loader.LoadFile(chartpath) + if err != nil { + return b, err + } + + // Buffer a hash + checksums YAML file + data, err := yaml.Marshal(chart.Metadata) + if err != nil { + return b, err + } + + // FIXME: YAML uses ---\n as a file start indicator, but this is not legal in a PGP + // clearsign block. So we use ...\n, which is the YAML document end marker. + // http://yaml.org/spec/1.2/spec.html#id2800168 + b = bytes.NewBuffer(data) + b.WriteString("\n...\n") + + data, err = yaml.Marshal(sums) + if err != nil { + return b, err + } + b.Write(data) + + return b, nil +} + +// parseMessageBlock +func parseMessageBlock(data []byte) (*hapi.Metadata, *SumCollection, error) { + // This sucks. + parts := bytes.Split(data, []byte("\n...\n")) + if len(parts) < 2 { + return nil, nil, errors.New("message block must have at least two parts") + } + + md := &hapi.Metadata{} + sc := &SumCollection{} + + if err := yaml.Unmarshal(parts[0], md); err != nil { + return md, sc, err + } + err := yaml.Unmarshal(parts[1], sc) + return md, sc, err +} + +// loadKey loads a GPG key found at a particular path. +func loadKey(keypath string) (*openpgp.Entity, error) { + f, err := os.Open(keypath) + if err != nil { + return nil, err + } + defer f.Close() + + pr := packet.NewReader(f) + return openpgp.ReadEntity(pr) +} + +func loadKeyRing(ringpath string) (openpgp.EntityList, error) { + f, err := os.Open(ringpath) + if err != nil { + return nil, err + } + defer f.Close() + return openpgp.ReadKeyRing(f) +} + +// DigestFile calculates a SHA256 hash (like Docker) for a given file. +// +// It takes the path to the archive file, and returns a string representation of +// the SHA256 sum. +// +// The intended use of this function is to generate a sum of a chart TGZ file. +func DigestFile(filename string) (string, error) { + f, err := os.Open(filename) + if err != nil { + return "", err + } + defer f.Close() + return Digest(f) +} + +// Digest hashes a reader and returns a SHA256 digest. +// +// Helm uses SHA256 as its default hash for all non-cryptographic applications. +func Digest(in io.Reader) (string, error) { + hash := crypto.SHA256.New() + if _, err := io.Copy(hash, in); err != nil { + return "", nil + } + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/pusher/doc.go b/vendor/helm.sh/helm/v3/pkg/pusher/doc.go new file mode 100644 index 000000000..df89ab112 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/pusher/doc.go @@ -0,0 +1,21 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package pusher provides a generalized tool for uploading data by scheme. +This provides a method by which the plugin system can load arbitrary protocol +handlers based upon a URL scheme. +*/ +package pusher diff --git a/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go b/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go new file mode 100644 index 000000000..94154d389 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/pusher/ocipusher.go @@ -0,0 +1,152 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pusher + +import ( + "fmt" + "net" + "net/http" + "os" + "path" + "strings" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/registry" +) + +// OCIPusher is the default OCI backend handler +type OCIPusher struct { + opts options +} + +// Push performs a Push from repo.Pusher. +func (pusher *OCIPusher) Push(chartRef, href string, options ...Option) error { + for _, opt := range options { + opt(&pusher.opts) + } + return pusher.push(chartRef, href) +} + +func (pusher *OCIPusher) push(chartRef, href string) error { + stat, err := os.Stat(chartRef) + if err != nil { + if os.IsNotExist(err) { + return errors.Errorf("%s: no such file", chartRef) + } + return err + } + if stat.IsDir() { + return errors.New("cannot push directory, must provide chart archive (.tgz)") + } + + meta, err := loader.Load(chartRef) + if err != nil { + return err + } + + client := pusher.opts.registryClient + if client == nil { + c, err := pusher.newRegistryClient() + if err != nil { + return err + } + client = c + } + + chartBytes, err := os.ReadFile(chartRef) + if err != nil { + return err + } + + var pushOpts []registry.PushOption + provRef := fmt.Sprintf("%s.prov", chartRef) + if _, err := os.Stat(provRef); err == nil { + provBytes, err := os.ReadFile(provRef) + if err != nil { + return err + } + pushOpts = append(pushOpts, registry.PushOptProvData(provBytes)) + } + + ref := fmt.Sprintf("%s:%s", + path.Join(strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)), meta.Metadata.Name), + meta.Metadata.Version) + + _, err = client.Push(chartBytes, ref, pushOpts...) + return err +} + +// NewOCIPusher constructs a valid OCI client as a Pusher +func NewOCIPusher(ops ...Option) (Pusher, error) { + var client OCIPusher + + for _, opt := range ops { + opt(&client.opts) + } + + return &client, nil +} + +func (pusher *OCIPusher) newRegistryClient() (*registry.Client, error) { + if (pusher.opts.certFile != "" && pusher.opts.keyFile != "") || pusher.opts.caFile != "" || pusher.opts.insecureSkipTLSverify { + tlsConf, err := tlsutil.NewClientTLS(pusher.opts.certFile, pusher.opts.keyFile, pusher.opts.caFile, pusher.opts.insecureSkipTLSverify) + if err != nil { + return nil, errors.Wrap(err, "can't create TLS config for client") + } + + registryClient, err := registry.NewClient( + registry.ClientOptHTTPClient(&http.Client{ + // From https://github.com/google/go-containerregistry/blob/31786c6cbb82d6ec4fb8eb79cd9387905130534e/pkg/v1/remote/options.go#L87 + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + // By default we wrap the transport in retries, so reduce the + // default dial timeout to 5s to avoid 5x 30s of connection + // timeouts when doing the "ping" on certain http registries. + Timeout: 5 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: tlsConf, + }, + }), + registry.ClientOptEnableCache(true), + ) + if err != nil { + return nil, err + } + return registryClient, nil + } + + opts := []registry.ClientOption{registry.ClientOptEnableCache(true)} + if pusher.opts.plainHTTP { + opts = append(opts, registry.ClientOptPlainHTTP()) + } + + registryClient, err := registry.NewClient(opts...) + if err != nil { + return nil, err + } + return registryClient, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go b/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go new file mode 100644 index 000000000..5b8a9160f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go @@ -0,0 +1,122 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pusher + +import ( + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/registry" +) + +// options are generic parameters to be provided to the pusher during instantiation. +// +// Pushers may or may not ignore these parameters as they are passed in. +type options struct { + registryClient *registry.Client + certFile string + keyFile string + caFile string + insecureSkipTLSverify bool + plainHTTP bool +} + +// Option allows specifying various settings configurable by the user for overriding the defaults +// used when performing Push operations with the Pusher. +type Option func(*options) + +// WithRegistryClient sets the registryClient option. +func WithRegistryClient(client *registry.Client) Option { + return func(opts *options) { + opts.registryClient = client + } +} + +// WithTLSClientConfig sets the client auth with the provided credentials. +func WithTLSClientConfig(certFile, keyFile, caFile string) Option { + return func(opts *options) { + opts.certFile = certFile + opts.keyFile = keyFile + opts.caFile = caFile + } +} + +// WithInsecureSkipTLSVerify determines if a TLS Certificate will be checked +func WithInsecureSkipTLSVerify(insecureSkipTLSVerify bool) Option { + return func(opts *options) { + opts.insecureSkipTLSverify = insecureSkipTLSVerify + } +} + +func WithPlainHTTP(plainHTTP bool) Option { + return func(opts *options) { + opts.plainHTTP = plainHTTP + } +} + +// Pusher is an interface to support upload to the specified URL. +type Pusher interface { + // Push file content by url string + Push(chartRef, url string, options ...Option) error +} + +// Constructor is the function for every pusher which creates a specific instance +// according to the configuration +type Constructor func(options ...Option) (Pusher, error) + +// Provider represents any pusher and the schemes that it supports. +type Provider struct { + Schemes []string + New Constructor +} + +// Provides returns true if the given scheme is supported by this Provider. +func (p Provider) Provides(scheme string) bool { + for _, i := range p.Schemes { + if i == scheme { + return true + } + } + return false +} + +// Providers is a collection of Provider objects. +type Providers []Provider + +// ByScheme returns a Provider that handles the given scheme. +// +// If no provider handles this scheme, this will return an error. +func (p Providers) ByScheme(scheme string) (Pusher, error) { + for _, pp := range p { + if pp.Provides(scheme) { + return pp.New() + } + } + return nil, errors.Errorf("scheme %q not supported", scheme) +} + +var ociProvider = Provider{ + Schemes: []string{registry.OCIScheme}, + New: NewOCIPusher, +} + +// All finds all of the registered pushers as a list of Provider instances. +// Currently, just the built-in pushers are collected. +func All(_ *cli.EnvSettings) Providers { + result := Providers{ociProvider} + return result +} diff --git a/vendor/helm.sh/helm/v3/pkg/registry/client.go b/vendor/helm.sh/helm/v3/pkg/registry/client.go new file mode 100644 index 000000000..7538cf69b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/registry/client.go @@ -0,0 +1,703 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry // import "helm.sh/helm/v3/pkg/registry" + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "sort" + "strings" + + "github.com/Masterminds/semver/v3" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "oras.land/oras-go/pkg/auth" + dockerauth "oras.land/oras-go/pkg/auth/docker" + "oras.land/oras-go/pkg/content" + "oras.land/oras-go/pkg/oras" + "oras.land/oras-go/pkg/registry" + registryremote "oras.land/oras-go/pkg/registry/remote" + registryauth "oras.land/oras-go/pkg/registry/remote/auth" + + "helm.sh/helm/v3/internal/version" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/helmpath" +) + +// See https://github.com/helm/helm/issues/10166 +const registryUnderscoreMessage = ` +OCI artifact references (e.g. tags) do not support the plus sign (+). To support +storing semantic versions, Helm adopts the convention of changing plus (+) to +an underscore (_) in chart version tags when pushing to a registry and back to +a plus (+) when pulling from a registry.` + +type ( + // Client works with OCI-compliant registries + Client struct { + debug bool + enableCache bool + // path to repository config file e.g. ~/.docker/config.json + credentialsFile string + out io.Writer + authorizer auth.Client + registryAuthorizer *registryauth.Client + resolver func(ref registry.Reference) (remotes.Resolver, error) + httpClient *http.Client + plainHTTP bool + } + + // ClientOption allows specifying various settings configurable by the user for overriding the defaults + // used when creating a new default client + ClientOption func(*Client) +) + +// NewClient returns a new registry client with config +func NewClient(options ...ClientOption) (*Client, error) { + client := &Client{ + out: io.Discard, + } + for _, option := range options { + option(client) + } + if client.credentialsFile == "" { + client.credentialsFile = helmpath.ConfigPath(CredentialsFileBasename) + } + if client.authorizer == nil { + authClient, err := dockerauth.NewClientWithDockerFallback(client.credentialsFile) + if err != nil { + return nil, err + } + client.authorizer = authClient + } + + resolverFn := client.resolver // copy for avoiding recursive call + client.resolver = func(ref registry.Reference) (remotes.Resolver, error) { + if resolverFn != nil { + // validate if the resolverFn returns a valid resolver + if resolver, err := resolverFn(ref); resolver != nil && err == nil { + return resolver, nil + } + } + headers := http.Header{} + headers.Set("User-Agent", version.GetUserAgent()) + opts := []auth.ResolverOption{auth.WithResolverHeaders(headers)} + if client.httpClient != nil { + opts = append(opts, auth.WithResolverClient(client.httpClient)) + } + if client.plainHTTP { + opts = append(opts, auth.WithResolverPlainHTTP()) + } + resolver, err := client.authorizer.ResolverWithOpts(opts...) + if err != nil { + return nil, err + } + return resolver, nil + } + + // allocate a cache if option is set + var cache registryauth.Cache + if client.enableCache { + cache = registryauth.DefaultCache + } + if client.registryAuthorizer == nil { + client.registryAuthorizer = ®istryauth.Client{ + Client: client.httpClient, + Header: http.Header{ + "User-Agent": {version.GetUserAgent()}, + }, + Cache: cache, + Credential: func(ctx context.Context, reg string) (registryauth.Credential, error) { + dockerClient, ok := client.authorizer.(*dockerauth.Client) + if !ok { + return registryauth.EmptyCredential, errors.New("unable to obtain docker client") + } + + username, password, err := dockerClient.Credential(reg) + if err != nil { + return registryauth.EmptyCredential, errors.New("unable to retrieve credentials") + } + + // A blank returned username and password value is a bearer token + if username == "" && password != "" { + return registryauth.Credential{ + RefreshToken: password, + }, nil + } + + return registryauth.Credential{ + Username: username, + Password: password, + }, nil + + }, + } + + } + return client, nil +} + +// ClientOptDebug returns a function that sets the debug setting on client options set +func ClientOptDebug(debug bool) ClientOption { + return func(client *Client) { + client.debug = debug + } +} + +// ClientOptEnableCache returns a function that sets the enableCache setting on a client options set +func ClientOptEnableCache(enableCache bool) ClientOption { + return func(client *Client) { + client.enableCache = enableCache + } +} + +// ClientOptWriter returns a function that sets the writer setting on client options set +func ClientOptWriter(out io.Writer) ClientOption { + return func(client *Client) { + client.out = out + } +} + +// ClientOptCredentialsFile returns a function that sets the credentialsFile setting on a client options set +func ClientOptCredentialsFile(credentialsFile string) ClientOption { + return func(client *Client) { + client.credentialsFile = credentialsFile + } +} + +// ClientOptHTTPClient returns a function that sets the httpClient setting on a client options set +func ClientOptHTTPClient(httpClient *http.Client) ClientOption { + return func(client *Client) { + client.httpClient = httpClient + } +} + +func ClientOptPlainHTTP() ClientOption { + return func(c *Client) { + c.plainHTTP = true + } +} + +// ClientOptResolver returns a function that sets the resolver setting on a client options set +func ClientOptResolver(resolver remotes.Resolver) ClientOption { + return func(client *Client) { + client.resolver = func(ref registry.Reference) (remotes.Resolver, error) { + return resolver, nil + } + } +} + +type ( + // LoginOption allows specifying various settings on login + LoginOption func(*loginOperation) + + loginOperation struct { + username string + password string + insecure bool + certFile string + keyFile string + caFile string + } +) + +// Login logs into a registry +func (c *Client) Login(host string, options ...LoginOption) error { + operation := &loginOperation{} + for _, option := range options { + option(operation) + } + authorizerLoginOpts := []auth.LoginOption{ + auth.WithLoginContext(ctx(c.out, c.debug)), + auth.WithLoginHostname(host), + auth.WithLoginUsername(operation.username), + auth.WithLoginSecret(operation.password), + auth.WithLoginUserAgent(version.GetUserAgent()), + auth.WithLoginTLS(operation.certFile, operation.keyFile, operation.caFile), + } + if operation.insecure { + authorizerLoginOpts = append(authorizerLoginOpts, auth.WithLoginInsecure()) + } + if err := c.authorizer.LoginWithOpts(authorizerLoginOpts...); err != nil { + return err + } + fmt.Fprintln(c.out, "Login Succeeded") + return nil +} + +// LoginOptBasicAuth returns a function that sets the username/password settings on login +func LoginOptBasicAuth(username string, password string) LoginOption { + return func(operation *loginOperation) { + operation.username = username + operation.password = password + } +} + +// LoginOptInsecure returns a function that sets the insecure setting on login +func LoginOptInsecure(insecure bool) LoginOption { + return func(operation *loginOperation) { + operation.insecure = insecure + } +} + +// LoginOptTLSClientConfig returns a function that sets the TLS settings on login. +func LoginOptTLSClientConfig(certFile, keyFile, caFile string) LoginOption { + return func(operation *loginOperation) { + operation.certFile = certFile + operation.keyFile = keyFile + operation.caFile = caFile + } +} + +type ( + // LogoutOption allows specifying various settings on logout + LogoutOption func(*logoutOperation) + + logoutOperation struct{} +) + +// Logout logs out of a registry +func (c *Client) Logout(host string, opts ...LogoutOption) error { + operation := &logoutOperation{} + for _, opt := range opts { + opt(operation) + } + if err := c.authorizer.Logout(ctx(c.out, c.debug), host); err != nil { + return err + } + fmt.Fprintf(c.out, "Removing login credentials for %s\n", host) + return nil +} + +type ( + // PullOption allows specifying various settings on pull + PullOption func(*pullOperation) + + // PullResult is the result returned upon successful pull. + PullResult struct { + Manifest *DescriptorPullSummary `json:"manifest"` + Config *DescriptorPullSummary `json:"config"` + Chart *DescriptorPullSummaryWithMeta `json:"chart"` + Prov *DescriptorPullSummary `json:"prov"` + Ref string `json:"ref"` + } + + DescriptorPullSummary struct { + Data []byte `json:"-"` + Digest string `json:"digest"` + Size int64 `json:"size"` + } + + DescriptorPullSummaryWithMeta struct { + DescriptorPullSummary + Meta *chart.Metadata `json:"meta"` + } + + pullOperation struct { + withChart bool + withProv bool + ignoreMissingProv bool + } +) + +// Pull downloads a chart from a registry +func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { + parsedRef, err := parseReference(ref) + if err != nil { + return nil, err + } + + operation := &pullOperation{ + withChart: true, // By default, always download the chart layer + } + for _, option := range options { + option(operation) + } + if !operation.withChart && !operation.withProv { + return nil, errors.New( + "must specify at least one layer to pull (chart/prov)") + } + memoryStore := content.NewMemory() + allowedMediaTypes := []string{ + ConfigMediaType, + } + minNumDescriptors := 1 // 1 for the config + if operation.withChart { + minNumDescriptors++ + allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType) + } + if operation.withProv { + if !operation.ignoreMissingProv { + minNumDescriptors++ + } + allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType) + } + + var descriptors, layers []ocispec.Descriptor + remotesResolver, err := c.resolver(parsedRef) + if err != nil { + return nil, err + } + registryStore := content.Registry{Resolver: remotesResolver} + + manifest, err := oras.Copy(ctx(c.out, c.debug), registryStore, parsedRef.String(), memoryStore, "", + oras.WithPullEmptyNameAllowed(), + oras.WithAllowedMediaTypes(allowedMediaTypes), + oras.WithLayerDescriptors(func(l []ocispec.Descriptor) { + layers = l + })) + if err != nil { + return nil, err + } + + descriptors = append(descriptors, manifest) + descriptors = append(descriptors, layers...) + + numDescriptors := len(descriptors) + if numDescriptors < minNumDescriptors { + return nil, fmt.Errorf("manifest does not contain minimum number of descriptors (%d), descriptors found: %d", + minNumDescriptors, numDescriptors) + } + var configDescriptor *ocispec.Descriptor + var chartDescriptor *ocispec.Descriptor + var provDescriptor *ocispec.Descriptor + for _, descriptor := range descriptors { + d := descriptor + switch d.MediaType { + case ConfigMediaType: + configDescriptor = &d + case ChartLayerMediaType: + chartDescriptor = &d + case ProvLayerMediaType: + provDescriptor = &d + case LegacyChartLayerMediaType: + chartDescriptor = &d + fmt.Fprintf(c.out, "Warning: chart media type %s is deprecated\n", LegacyChartLayerMediaType) + } + } + if configDescriptor == nil { + return nil, fmt.Errorf("could not load config with mediatype %s", ConfigMediaType) + } + if operation.withChart && chartDescriptor == nil { + return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s", + ChartLayerMediaType) + } + var provMissing bool + if operation.withProv && provDescriptor == nil { + if operation.ignoreMissingProv { + provMissing = true + } else { + return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s", + ProvLayerMediaType) + } + } + result := &PullResult{ + Manifest: &DescriptorPullSummary{ + Digest: manifest.Digest.String(), + Size: manifest.Size, + }, + Config: &DescriptorPullSummary{ + Digest: configDescriptor.Digest.String(), + Size: configDescriptor.Size, + }, + Chart: &DescriptorPullSummaryWithMeta{}, + Prov: &DescriptorPullSummary{}, + Ref: parsedRef.String(), + } + var getManifestErr error + if _, manifestData, ok := memoryStore.Get(manifest); !ok { + getManifestErr = errors.Errorf("Unable to retrieve blob with digest %s", manifest.Digest) + } else { + result.Manifest.Data = manifestData + } + if getManifestErr != nil { + return nil, getManifestErr + } + var getConfigDescriptorErr error + if _, configData, ok := memoryStore.Get(*configDescriptor); !ok { + getConfigDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", configDescriptor.Digest) + } else { + result.Config.Data = configData + var meta *chart.Metadata + if err := json.Unmarshal(configData, &meta); err != nil { + return nil, err + } + result.Chart.Meta = meta + } + if getConfigDescriptorErr != nil { + return nil, getConfigDescriptorErr + } + if operation.withChart { + var getChartDescriptorErr error + if _, chartData, ok := memoryStore.Get(*chartDescriptor); !ok { + getChartDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", chartDescriptor.Digest) + } else { + result.Chart.Data = chartData + result.Chart.Digest = chartDescriptor.Digest.String() + result.Chart.Size = chartDescriptor.Size + } + if getChartDescriptorErr != nil { + return nil, getChartDescriptorErr + } + } + if operation.withProv && !provMissing { + var getProvDescriptorErr error + if _, provData, ok := memoryStore.Get(*provDescriptor); !ok { + getProvDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", provDescriptor.Digest) + } else { + result.Prov.Data = provData + result.Prov.Digest = provDescriptor.Digest.String() + result.Prov.Size = provDescriptor.Size + } + if getProvDescriptorErr != nil { + return nil, getProvDescriptorErr + } + } + + fmt.Fprintf(c.out, "Pulled: %s\n", result.Ref) + fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) + + if strings.Contains(result.Ref, "_") { + fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref) + fmt.Fprint(c.out, registryUnderscoreMessage+"\n") + } + + return result, nil +} + +// PullOptWithChart returns a function that sets the withChart setting on pull +func PullOptWithChart(withChart bool) PullOption { + return func(operation *pullOperation) { + operation.withChart = withChart + } +} + +// PullOptWithProv returns a function that sets the withProv setting on pull +func PullOptWithProv(withProv bool) PullOption { + return func(operation *pullOperation) { + operation.withProv = withProv + } +} + +// PullOptIgnoreMissingProv returns a function that sets the ignoreMissingProv setting on pull +func PullOptIgnoreMissingProv(ignoreMissingProv bool) PullOption { + return func(operation *pullOperation) { + operation.ignoreMissingProv = ignoreMissingProv + } +} + +type ( + // PushOption allows specifying various settings on push + PushOption func(*pushOperation) + + // PushResult is the result returned upon successful push. + PushResult struct { + Manifest *descriptorPushSummary `json:"manifest"` + Config *descriptorPushSummary `json:"config"` + Chart *descriptorPushSummaryWithMeta `json:"chart"` + Prov *descriptorPushSummary `json:"prov"` + Ref string `json:"ref"` + } + + descriptorPushSummary struct { + Digest string `json:"digest"` + Size int64 `json:"size"` + } + + descriptorPushSummaryWithMeta struct { + descriptorPushSummary + Meta *chart.Metadata `json:"meta"` + } + + pushOperation struct { + provData []byte + strictMode bool + test bool + } +) + +// Push uploads a chart to a registry. +func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResult, error) { + parsedRef, err := parseReference(ref) + if err != nil { + return nil, err + } + + operation := &pushOperation{ + strictMode: true, // By default, enable strict mode + } + for _, option := range options { + option(operation) + } + meta, err := extractChartMeta(data) + if err != nil { + return nil, err + } + if operation.strictMode { + if !strings.HasSuffix(ref, fmt.Sprintf("/%s:%s", meta.Name, meta.Version)) { + return nil, errors.New( + "strict mode enabled, ref basename and tag must match the chart name and version") + } + } + memoryStore := content.NewMemory() + chartDescriptor, err := memoryStore.Add("", ChartLayerMediaType, data) + if err != nil { + return nil, err + } + + configData, err := json.Marshal(meta) + if err != nil { + return nil, err + } + + configDescriptor, err := memoryStore.Add("", ConfigMediaType, configData) + if err != nil { + return nil, err + } + + descriptors := []ocispec.Descriptor{chartDescriptor} + var provDescriptor ocispec.Descriptor + if operation.provData != nil { + provDescriptor, err = memoryStore.Add("", ProvLayerMediaType, operation.provData) + if err != nil { + return nil, err + } + + descriptors = append(descriptors, provDescriptor) + } + + ociAnnotations := generateOCIAnnotations(meta, operation.test) + + manifestData, manifest, err := content.GenerateManifest(&configDescriptor, ociAnnotations, descriptors...) + if err != nil { + return nil, err + } + + if err := memoryStore.StoreManifest(parsedRef.String(), manifest, manifestData); err != nil { + return nil, err + } + + remotesResolver, err := c.resolver(parsedRef) + if err != nil { + return nil, err + } + registryStore := content.Registry{Resolver: remotesResolver} + _, err = oras.Copy(ctx(c.out, c.debug), memoryStore, parsedRef.String(), registryStore, "", + oras.WithNameValidation(nil)) + if err != nil { + return nil, err + } + chartSummary := &descriptorPushSummaryWithMeta{ + Meta: meta, + } + chartSummary.Digest = chartDescriptor.Digest.String() + chartSummary.Size = chartDescriptor.Size + result := &PushResult{ + Manifest: &descriptorPushSummary{ + Digest: manifest.Digest.String(), + Size: manifest.Size, + }, + Config: &descriptorPushSummary{ + Digest: configDescriptor.Digest.String(), + Size: configDescriptor.Size, + }, + Chart: chartSummary, + Prov: &descriptorPushSummary{}, // prevent nil references + Ref: parsedRef.String(), + } + if operation.provData != nil { + result.Prov = &descriptorPushSummary{ + Digest: provDescriptor.Digest.String(), + Size: provDescriptor.Size, + } + } + fmt.Fprintf(c.out, "Pushed: %s\n", result.Ref) + fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) + if strings.Contains(parsedRef.Reference, "_") { + fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref) + fmt.Fprint(c.out, registryUnderscoreMessage+"\n") + } + + return result, err +} + +// PushOptProvData returns a function that sets the prov bytes setting on push +func PushOptProvData(provData []byte) PushOption { + return func(operation *pushOperation) { + operation.provData = provData + } +} + +// PushOptStrictMode returns a function that sets the strictMode setting on push +func PushOptStrictMode(strictMode bool) PushOption { + return func(operation *pushOperation) { + operation.strictMode = strictMode + } +} + +// PushOptTest returns a function that sets whether test setting on push +func PushOptTest(test bool) PushOption { + return func(operation *pushOperation) { + operation.test = test + } +} + +// Tags provides a sorted list all semver compliant tags for a given repository +func (c *Client) Tags(ref string) ([]string, error) { + parsedReference, err := registry.ParseReference(ref) + if err != nil { + return nil, err + } + + repository := registryremote.Repository{ + Reference: parsedReference, + Client: c.registryAuthorizer, + PlainHTTP: c.plainHTTP, + } + + var registryTags []string + + registryTags, err = registry.Tags(ctx(c.out, c.debug), &repository) + if err != nil { + return nil, err + } + + var tagVersions []*semver.Version + for _, tag := range registryTags { + // Change underscore (_) back to plus (+) for Helm + // See https://github.com/helm/helm/issues/10166 + tagVersion, err := semver.StrictNewVersion(strings.ReplaceAll(tag, "_", "+")) + if err == nil { + tagVersions = append(tagVersions, tagVersion) + } + } + + // Sort the collection + sort.Sort(sort.Reverse(semver.Collection(tagVersions))) + + tags := make([]string, len(tagVersions)) + + for iTv, tv := range tagVersions { + tags[iTv] = tv.String() + } + + return tags, nil + +} diff --git a/vendor/helm.sh/helm/v3/pkg/registry/constants.go b/vendor/helm.sh/helm/v3/pkg/registry/constants.go new file mode 100644 index 000000000..570b6f0d3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/registry/constants.go @@ -0,0 +1,37 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry // import "helm.sh/helm/v3/pkg/registry" + +const ( + // OCIScheme is the URL scheme for OCI-based requests + OCIScheme = "oci" + + // CredentialsFileBasename is the filename for auth credentials file + CredentialsFileBasename = "registry/config.json" + + // ConfigMediaType is the reserved media type for the Helm chart manifest config + ConfigMediaType = "application/vnd.cncf.helm.config.v1+json" + + // ChartLayerMediaType is the reserved media type for Helm chart package content + ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip" + + // ProvLayerMediaType is the reserved media type for Helm chart provenance files + ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov" + + // LegacyChartLayerMediaType is the legacy reserved media type for Helm chart package content. + LegacyChartLayerMediaType = "application/tar+gzip" +) diff --git a/vendor/helm.sh/helm/v3/pkg/registry/util.go b/vendor/helm.sh/helm/v3/pkg/registry/util.go new file mode 100644 index 000000000..8baf0852a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/registry/util.go @@ -0,0 +1,247 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry // import "helm.sh/helm/v3/pkg/registry" + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "strings" + "time" + + helmtime "helm.sh/helm/v3/pkg/time" + + "github.com/Masterminds/semver/v3" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + orascontext "oras.land/oras-go/pkg/context" + "oras.land/oras-go/pkg/registry" + + "helm.sh/helm/v3/internal/tlsutil" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +var immutableOciAnnotations = []string{ + ocispec.AnnotationVersion, + ocispec.AnnotationTitle, +} + +// IsOCI determines whether or not a URL is to be treated as an OCI URL +func IsOCI(url string) bool { + return strings.HasPrefix(url, fmt.Sprintf("%s://", OCIScheme)) +} + +// ContainsTag determines whether a tag is found in a provided list of tags +func ContainsTag(tags []string, tag string) bool { + for _, t := range tags { + if tag == t { + return true + } + } + return false +} + +func GetTagMatchingVersionOrConstraint(tags []string, versionString string) (string, error) { + var constraint *semver.Constraints + if versionString == "" { + // If string is empty, set wildcard constraint + constraint, _ = semver.NewConstraint("*") + } else { + // when customer input exact version, check whether have exact match + // one first + for _, v := range tags { + if versionString == v { + return v, nil + } + } + + // Otherwise set constraint to the string given + var err error + constraint, err = semver.NewConstraint(versionString) + if err != nil { + return "", err + } + } + + // Otherwise try to find the first available version matching the string, + // in case it is a constraint + for _, v := range tags { + test, err := semver.NewVersion(v) + if err != nil { + continue + } + if constraint.Check(test) { + return v, nil + } + } + + return "", errors.Errorf("Could not locate a version matching provided version string %s", versionString) +} + +// extractChartMeta is used to extract a chart metadata from a byte array +func extractChartMeta(chartData []byte) (*chart.Metadata, error) { + ch, err := loader.LoadArchive(bytes.NewReader(chartData)) + if err != nil { + return nil, err + } + return ch.Metadata, nil +} + +// ctx retrieves a fresh context. +// disable verbose logging coming from ORAS (unless debug is enabled) +func ctx(out io.Writer, debug bool) context.Context { + if !debug { + return orascontext.Background() + } + ctx := orascontext.WithLoggerFromWriter(context.Background(), out) + orascontext.GetLogger(ctx).Logger.SetLevel(logrus.DebugLevel) + return ctx +} + +// parseReference will parse and validate the reference, and clean tags when +// applicable tags are only cleaned when plus (+) signs are present, and are +// converted to underscores (_) before pushing +// See https://github.com/helm/helm/issues/10166 +func parseReference(raw string) (registry.Reference, error) { + // The sole possible reference modification is replacing plus (+) signs + // present in tags with underscores (_). To do this properly, we first + // need to identify a tag, and then pass it on to the reference parser + // NOTE: Passing immediately to the reference parser will fail since (+) + // signs are an invalid tag character, and simply replacing all plus (+) + // occurrences could invalidate other portions of the URI + parts := strings.Split(raw, ":") + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { + tag := parts[len(parts)-1] + + if tag != "" { + // Replace any plus (+) signs with known underscore (_) conversion + newTag := strings.ReplaceAll(tag, "+", "_") + raw = strings.ReplaceAll(raw, tag, newTag) + } + } + + return registry.ParseReference(raw) +} + +// NewRegistryClientWithTLS is a helper function to create a new registry client with TLS enabled. +func NewRegistryClientWithTLS(out io.Writer, certFile, keyFile, caFile string, insecureSkipTLSverify bool, registryConfig string, debug bool) (*Client, error) { + tlsConf, err := tlsutil.NewClientTLS(certFile, keyFile, caFile, insecureSkipTLSverify) + if err != nil { + return nil, fmt.Errorf("can't create TLS config for client: %s", err) + } + // Create a new registry client + registryClient, err := NewClient( + ClientOptDebug(debug), + ClientOptEnableCache(true), + ClientOptWriter(out), + ClientOptCredentialsFile(registryConfig), + ClientOptHTTPClient(&http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConf, + }, + }), + ) + if err != nil { + return nil, err + } + return registryClient, nil +} + +// generateOCIAnnotations will generate OCI annotations to include within the OCI manifest +func generateOCIAnnotations(meta *chart.Metadata, test bool) map[string]string { + + // Get annotations from Chart attributes + ociAnnotations := generateChartOCIAnnotations(meta, test) + + // Copy Chart annotations +annotations: + for chartAnnotationKey, chartAnnotationValue := range meta.Annotations { + + // Avoid overriding key properties + for _, immutableOciKey := range immutableOciAnnotations { + if immutableOciKey == chartAnnotationKey { + continue annotations + } + } + + // Add chart annotation + ociAnnotations[chartAnnotationKey] = chartAnnotationValue + } + + return ociAnnotations +} + +// getChartOCIAnnotations will generate OCI annotations from the provided chart +func generateChartOCIAnnotations(meta *chart.Metadata, test bool) map[string]string { + chartOCIAnnotations := map[string]string{} + + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationDescription, meta.Description) + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationTitle, meta.Name) + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationVersion, meta.Version) + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationURL, meta.Home) + + if !test { + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationCreated, helmtime.Now().UTC().Format(time.RFC3339)) + } + + if len(meta.Sources) > 0 { + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationSource, meta.Sources[0]) + } + + if meta.Maintainers != nil && len(meta.Maintainers) > 0 { + var maintainerSb strings.Builder + + for maintainerIdx, maintainer := range meta.Maintainers { + + if len(maintainer.Name) > 0 { + maintainerSb.WriteString(maintainer.Name) + } + + if len(maintainer.Email) > 0 { + maintainerSb.WriteString(" (") + maintainerSb.WriteString(maintainer.Email) + maintainerSb.WriteString(")") + } + + if maintainerIdx < len(meta.Maintainers)-1 { + maintainerSb.WriteString(", ") + } + + } + + chartOCIAnnotations = addToMap(chartOCIAnnotations, ocispec.AnnotationAuthors, maintainerSb.String()) + + } + + return chartOCIAnnotations +} + +// addToMap takes an existing map and adds an item if the value is not empty +func addToMap(inputMap map[string]string, newKey string, newValue string) map[string]string { + + // Add item to map if its + if len(strings.TrimSpace(newValue)) > 0 { + inputMap[newKey] = newValue + } + + return inputMap + +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/hook.go b/vendor/helm.sh/helm/v3/pkg/release/hook.go new file mode 100644 index 000000000..cb9955582 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/hook.go @@ -0,0 +1,106 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +import ( + "helm.sh/helm/v3/pkg/time" +) + +// HookEvent specifies the hook event +type HookEvent string + +// Hook event types +const ( + HookPreInstall HookEvent = "pre-install" + HookPostInstall HookEvent = "post-install" + HookPreDelete HookEvent = "pre-delete" + HookPostDelete HookEvent = "post-delete" + HookPreUpgrade HookEvent = "pre-upgrade" + HookPostUpgrade HookEvent = "post-upgrade" + HookPreRollback HookEvent = "pre-rollback" + HookPostRollback HookEvent = "post-rollback" + HookTest HookEvent = "test" +) + +func (x HookEvent) String() string { return string(x) } + +// HookDeletePolicy specifies the hook delete policy +type HookDeletePolicy string + +// Hook delete policy types +const ( + HookSucceeded HookDeletePolicy = "hook-succeeded" + HookFailed HookDeletePolicy = "hook-failed" + HookBeforeHookCreation HookDeletePolicy = "before-hook-creation" +) + +func (x HookDeletePolicy) String() string { return string(x) } + +// HookAnnotation is the label name for a hook +const HookAnnotation = "helm.sh/hook" + +// HookWeightAnnotation is the label name for a hook weight +const HookWeightAnnotation = "helm.sh/hook-weight" + +// HookDeleteAnnotation is the label name for the delete policy for a hook +const HookDeleteAnnotation = "helm.sh/hook-delete-policy" + +// Hook defines a hook object. +type Hook struct { + Name string `json:"name,omitempty"` + // Kind is the Kubernetes kind. + Kind string `json:"kind,omitempty"` + // Path is the chart-relative path to the template. + Path string `json:"path,omitempty"` + // Manifest is the manifest contents. + Manifest string `json:"manifest,omitempty"` + // Events are the events that this hook fires on. + Events []HookEvent `json:"events,omitempty"` + // LastRun indicates the date/time this was last run. + LastRun HookExecution `json:"last_run,omitempty"` + // Weight indicates the sort order for execution among similar Hook type + Weight int `json:"weight,omitempty"` + // DeletePolicies are the policies that indicate when to delete the hook + DeletePolicies []HookDeletePolicy `json:"delete_policies,omitempty"` +} + +// A HookExecution records the result for the last execution of a hook for a given release. +type HookExecution struct { + // StartedAt indicates the date/time this hook was started + StartedAt time.Time `json:"started_at,omitempty"` + // CompletedAt indicates the date/time this hook was completed. + CompletedAt time.Time `json:"completed_at,omitempty"` + // Phase indicates whether the hook completed successfully + Phase HookPhase `json:"phase"` +} + +// A HookPhase indicates the state of a hook execution +type HookPhase string + +const ( + // HookPhaseUnknown indicates that a hook is in an unknown state + HookPhaseUnknown HookPhase = "Unknown" + // HookPhaseRunning indicates that a hook is currently executing + HookPhaseRunning HookPhase = "Running" + // HookPhaseSucceeded indicates that hook execution succeeded + HookPhaseSucceeded HookPhase = "Succeeded" + // HookPhaseFailed indicates that hook execution failed + HookPhaseFailed HookPhase = "Failed" +) + +// String converts a hook phase to a printable string +func (x HookPhase) String() string { return string(x) } diff --git a/vendor/helm.sh/helm/v3/pkg/release/info.go b/vendor/helm.sh/helm/v3/pkg/release/info.go new file mode 100644 index 000000000..b030a8a54 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/info.go @@ -0,0 +1,40 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +import ( + "k8s.io/apimachinery/pkg/runtime" + + "helm.sh/helm/v3/pkg/time" +) + +// Info describes release information. +type Info struct { + // FirstDeployed is when the release was first deployed. + FirstDeployed time.Time `json:"first_deployed,omitempty"` + // LastDeployed is when the release was last deployed. + LastDeployed time.Time `json:"last_deployed,omitempty"` + // Deleted tracks when this object was deleted. + Deleted time.Time `json:"deleted"` + // Description is human-friendly "log entry" about this release. + Description string `json:"description,omitempty"` + // Status is the current state of the release + Status Status `json:"status,omitempty"` + // Contains the rendered templates/NOTES.txt if available + Notes string `json:"notes,omitempty"` + // Contains the deployed resources information + Resources map[string][]runtime.Object `json:"resources,omitempty"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/mock.go b/vendor/helm.sh/helm/v3/pkg/release/mock.go new file mode 100644 index 000000000..a28e1dc16 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/mock.go @@ -0,0 +1,116 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +import ( + "fmt" + "math/rand" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/time" +) + +// MockHookTemplate is the hook template used for all mock release objects. +var MockHookTemplate = `apiVersion: v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-install +` + +// MockManifest is the manifest used for all mock release objects. +var MockManifest = `apiVersion: v1 +kind: Secret +metadata: + name: fixture +` + +// MockReleaseOptions allows for user-configurable options on mock release objects. +type MockReleaseOptions struct { + Name string + Version int + Chart *chart.Chart + Status Status + Namespace string +} + +// Mock creates a mock release object based on options set by MockReleaseOptions. This function should typically not be used outside of testing. +func Mock(opts *MockReleaseOptions) *Release { + date := time.Unix(242085845, 0).UTC() + + name := opts.Name + if name == "" { + name = "testrelease-" + fmt.Sprint(rand.Intn(100)) + } + + version := 1 + if opts.Version != 0 { + version = opts.Version + } + + namespace := opts.Namespace + if namespace == "" { + namespace = "default" + } + + ch := opts.Chart + if opts.Chart == nil { + ch = &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "foo", + Version: "0.1.0-beta.1", + AppVersion: "1.0", + }, + Templates: []*chart.File{ + {Name: "templates/foo.tpl", Data: []byte(MockManifest)}, + }, + } + } + + scode := StatusDeployed + if len(opts.Status) > 0 { + scode = opts.Status + } + + info := &Info{ + FirstDeployed: date, + LastDeployed: date, + Status: scode, + Description: "Release mock", + Notes: "Some mock release notes!", + } + + return &Release{ + Name: name, + Info: info, + Chart: ch, + Config: map[string]interface{}{"name": "value"}, + Version: version, + Namespace: namespace, + Hooks: []*Hook{ + { + Name: "pre-install-hook", + Kind: "Job", + Path: "pre-install-hook.yaml", + Manifest: MockHookTemplate, + LastRun: HookExecution{}, + Events: []HookEvent{HookPreInstall}, + }, + }, + Manifest: MockManifest, + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/release.go b/vendor/helm.sh/helm/v3/pkg/release/release.go new file mode 100644 index 000000000..b90612873 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/release.go @@ -0,0 +1,49 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +import "helm.sh/helm/v3/pkg/chart" + +// Release describes a deployment of a chart, together with the chart +// and the variables used to deploy that chart. +type Release struct { + // Name is the name of the release + Name string `json:"name,omitempty"` + // Info provides information about a release + Info *Info `json:"info,omitempty"` + // Chart is the chart that was released. + Chart *chart.Chart `json:"chart,omitempty"` + // Config is the set of extra Values added to the chart. + // These values override the default values inside of the chart. + Config map[string]interface{} `json:"config,omitempty"` + // Manifest is the string representation of the rendered template. + Manifest string `json:"manifest,omitempty"` + // Hooks are all of the hooks declared for this release. + Hooks []*Hook `json:"hooks,omitempty"` + // Version is an int which represents the revision of the release. + Version int `json:"version,omitempty"` + // Namespace is the kubernetes namespace of the release. + Namespace string `json:"namespace,omitempty"` + // Labels of the release. + // Disabled encoding into Json cause labels are stored in storage driver metadata field. + Labels map[string]string `json:"-"` +} + +// SetStatus is a helper for setting the status on a release. +func (r *Release) SetStatus(status Status, msg string) { + r.Info.Status = status + r.Info.Description = msg +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/responses.go b/vendor/helm.sh/helm/v3/pkg/release/responses.go new file mode 100644 index 000000000..7ee1fc2ee --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/responses.go @@ -0,0 +1,24 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +// UninstallReleaseResponse represents a successful response to an uninstall request. +type UninstallReleaseResponse struct { + // Release is the release that was marked deleted. + Release *Release `json:"release,omitempty"` + // Info is an uninstall message + Info string `json:"info,omitempty"` +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/status.go b/vendor/helm.sh/helm/v3/pkg/release/status.go new file mode 100644 index 000000000..e0e3ed62a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/release/status.go @@ -0,0 +1,49 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release + +// Status is the status of a release +type Status string + +// Describe the status of a release +// NOTE: Make sure to update cmd/helm/status.go when adding or modifying any of these statuses. +const ( + // StatusUnknown indicates that a release is in an uncertain state. + StatusUnknown Status = "unknown" + // StatusDeployed indicates that the release has been pushed to Kubernetes. + StatusDeployed Status = "deployed" + // StatusUninstalled indicates that a release has been uninstalled from Kubernetes. + StatusUninstalled Status = "uninstalled" + // StatusSuperseded indicates that this release object is outdated and a newer one exists. + StatusSuperseded Status = "superseded" + // StatusFailed indicates that the release was not successfully deployed. + StatusFailed Status = "failed" + // StatusUninstalling indicates that a uninstall operation is underway. + StatusUninstalling Status = "uninstalling" + // StatusPendingInstall indicates that an install operation is underway. + StatusPendingInstall Status = "pending-install" + // StatusPendingUpgrade indicates that an upgrade operation is underway. + StatusPendingUpgrade Status = "pending-upgrade" + // StatusPendingRollback indicates that an rollback operation is underway. + StatusPendingRollback Status = "pending-rollback" +) + +func (x Status) String() string { return string(x) } + +// IsPending determines if this status is a state or a transition. +func (x Status) IsPending() bool { + return x == StatusPendingInstall || x == StatusPendingUpgrade || x == StatusPendingRollback +} diff --git a/vendor/k8s.io/helm/pkg/releaseutil/filter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go similarity index 89% rename from vendor/k8s.io/helm/pkg/releaseutil/filter.go rename to vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go index 458da705f..dbd0df8e2 100644 --- a/vendor/k8s.io/helm/pkg/releaseutil/filter.go +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package releaseutil // import "k8s.io/helm/pkg/releaseutil" +package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil" -import rspb "k8s.io/helm/pkg/proto/hapi/release" +import rspb "helm.sh/helm/v3/pkg/release" // FilterFunc returns true if the release object satisfies // the predicate of the underlying filter func. @@ -68,11 +68,11 @@ func All(filters ...FilterFunc) FilterFunc { } // StatusFilter filters a set of releases by status code. -func StatusFilter(status rspb.Status_Code) FilterFunc { +func StatusFilter(status rspb.Status) FilterFunc { return FilterFunc(func(rls *rspb.Release) bool { if rls == nil { return true } - return rls.GetInfo().GetStatus().Code == status + return rls.Info.Status == status }) } diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go new file mode 100644 index 000000000..bb8e84dda --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go @@ -0,0 +1,160 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil + +import ( + "sort" + + "helm.sh/helm/v3/pkg/release" +) + +// KindSortOrder is an ordering of Kinds. +type KindSortOrder []string + +// InstallOrder is the order in which manifests should be installed (by Kind). +// +// Those occurring earlier in the list get installed before those occurring later in the list. +var InstallOrder KindSortOrder = []string{ + "PriorityClass", + "Namespace", + "NetworkPolicy", + "ResourceQuota", + "LimitRange", + "PodSecurityPolicy", + "PodDisruptionBudget", + "ServiceAccount", + "Secret", + "SecretList", + "ConfigMap", + "StorageClass", + "PersistentVolume", + "PersistentVolumeClaim", + "CustomResourceDefinition", + "ClusterRole", + "ClusterRoleList", + "ClusterRoleBinding", + "ClusterRoleBindingList", + "Role", + "RoleList", + "RoleBinding", + "RoleBindingList", + "Service", + "DaemonSet", + "Pod", + "ReplicationController", + "ReplicaSet", + "Deployment", + "HorizontalPodAutoscaler", + "StatefulSet", + "Job", + "CronJob", + "IngressClass", + "Ingress", + "APIService", +} + +// UninstallOrder is the order in which manifests should be uninstalled (by Kind). +// +// Those occurring earlier in the list get uninstalled before those occurring later in the list. +var UninstallOrder KindSortOrder = []string{ + "APIService", + "Ingress", + "IngressClass", + "Service", + "CronJob", + "Job", + "StatefulSet", + "HorizontalPodAutoscaler", + "Deployment", + "ReplicaSet", + "ReplicationController", + "Pod", + "DaemonSet", + "RoleBindingList", + "RoleBinding", + "RoleList", + "Role", + "ClusterRoleBindingList", + "ClusterRoleBinding", + "ClusterRoleList", + "ClusterRole", + "CustomResourceDefinition", + "PersistentVolumeClaim", + "PersistentVolume", + "StorageClass", + "ConfigMap", + "SecretList", + "Secret", + "ServiceAccount", + "PodDisruptionBudget", + "PodSecurityPolicy", + "LimitRange", + "ResourceQuota", + "NetworkPolicy", + "Namespace", + "PriorityClass", +} + +// sort manifests by kind. +// +// Results are sorted by 'ordering', keeping order of items with equal kind/priority +func sortManifestsByKind(manifests []Manifest, ordering KindSortOrder) []Manifest { + sort.SliceStable(manifests, func(i, j int) bool { + return lessByKind(manifests[i], manifests[j], manifests[i].Head.Kind, manifests[j].Head.Kind, ordering) + }) + + return manifests +} + +// sort hooks by kind, using an out-of-place sort to preserve the input parameters. +// +// Results are sorted by 'ordering', keeping order of items with equal kind/priority +func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.Hook { + h := hooks + sort.SliceStable(h, func(i, j int) bool { + return lessByKind(h[i], h[j], h[i].Kind, h[j].Kind, ordering) + }) + + return h +} + +func lessByKind(_ interface{}, _ interface{}, kindA string, kindB string, o KindSortOrder) bool { + ordering := make(map[string]int, len(o)) + for v, k := range o { + ordering[k] = v + } + + first, aok := ordering[kindA] + second, bok := ordering[kindB] + + if !aok && !bok { + // if both are unknown then sort alphabetically by kind, keep original order if same kind + if kindA != kindB { + return kindA < kindB + } + return first < second + } + // unknown kind is last + if !aok { + return false + } + if !bok { + return true + } + // sort different kinds, keep original order if same priority + return first < second +} diff --git a/vendor/k8s.io/helm/pkg/releaseutil/manifest.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go similarity index 70% rename from vendor/k8s.io/helm/pkg/releaseutil/manifest.go rename to vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go index 78c2979c4..0b04a4599 100644 --- a/vendor/k8s.io/helm/pkg/releaseutil/manifest.go +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go @@ -19,6 +19,7 @@ package releaseutil import ( "fmt" "regexp" + "strconv" "strings" ) @@ -37,8 +38,9 @@ var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*") // SplitManifests takes a string of manifest and returns a map contains individual manifests func SplitManifests(bigFile string) map[string]string { // Basically, we're quickly splitting a stream of YAML documents into an - // array of YAML docs. In the current implementation, the file name is just - // a place holder, and doesn't have any further meaning. + // array of YAML docs. The file name is just a place holder, but should be + // integer-sortable so that manifests get output in the same order as the + // input (see `BySplitManifestsOrder`). tpl := "manifest-%d" res := map[string]string{} // Making sure that any extra whitespace in YAML stream doesn't interfere in splitting documents correctly. @@ -46,7 +48,6 @@ func SplitManifests(bigFile string) map[string]string { docs := sep.Split(bigFileTmp, -1) var count int for _, d := range docs { - if d == "" { continue } @@ -57,3 +58,15 @@ func SplitManifests(bigFile string) map[string]string { } return res } + +// BySplitManifestsOrder sorts by in-file manifest order, as provided in function `SplitManifests` +type BySplitManifestsOrder []string + +func (a BySplitManifestsOrder) Len() int { return len(a) } +func (a BySplitManifestsOrder) Less(i, j int) bool { + // Split `manifest-%d` + anum, _ := strconv.ParseInt(a[i][len("manifest-"):], 10, 0) + bnum, _ := strconv.ParseInt(a[j][len("manifest-"):], 10, 0) + return anum < bnum +} +func (a BySplitManifestsOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go new file mode 100644 index 000000000..413de30e2 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go @@ -0,0 +1,233 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil + +import ( + "log" + "path" + "sort" + "strconv" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +// Manifest represents a manifest file, which has a name and some content. +type Manifest struct { + Name string + Content string + Head *SimpleHead +} + +// manifestFile represents a file that contains a manifest. +type manifestFile struct { + entries map[string]string + path string + apis chartutil.VersionSet +} + +// result is an intermediate structure used during sorting. +type result struct { + hooks []*release.Hook + generic []Manifest +} + +// TODO: Refactor this out. It's here because naming conventions were not followed through. +// So fix the Test hook names and then remove this. +var events = map[string]release.HookEvent{ + release.HookPreInstall.String(): release.HookPreInstall, + release.HookPostInstall.String(): release.HookPostInstall, + release.HookPreDelete.String(): release.HookPreDelete, + release.HookPostDelete.String(): release.HookPostDelete, + release.HookPreUpgrade.String(): release.HookPreUpgrade, + release.HookPostUpgrade.String(): release.HookPostUpgrade, + release.HookPreRollback.String(): release.HookPreRollback, + release.HookPostRollback.String(): release.HookPostRollback, + release.HookTest.String(): release.HookTest, + // Support test-success for backward compatibility with Helm 2 tests + "test-success": release.HookTest, +} + +// SortManifests takes a map of filename/YAML contents, splits the file +// by manifest entries, and sorts the entries into hook types. +// +// The resulting hooks struct will be populated with all of the generated hooks. +// Any file that does not declare one of the hook types will be placed in the +// 'generic' bucket. +// +// Files that do not parse into the expected format are simply placed into a map and +// returned. +func SortManifests(files map[string]string, apis chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) { + result := &result{} + + var sortedFilePaths []string + for filePath := range files { + sortedFilePaths = append(sortedFilePaths, filePath) + } + sort.Strings(sortedFilePaths) + + for _, filePath := range sortedFilePaths { + content := files[filePath] + + // Skip partials. We could return these as a separate map, but there doesn't + // seem to be any need for that at this time. + if strings.HasPrefix(path.Base(filePath), "_") { + continue + } + // Skip empty files and log this. + if strings.TrimSpace(content) == "" { + continue + } + + manifestFile := &manifestFile{ + entries: SplitManifests(content), + path: filePath, + apis: apis, + } + + if err := manifestFile.sort(result); err != nil { + return result.hooks, result.generic, err + } + } + + return sortHooksByKind(result.hooks, ordering), sortManifestsByKind(result.generic, ordering), nil +} + +// sort takes a manifestFile object which may contain multiple resource definition +// entries and sorts each entry by hook types, and saves the resulting hooks and +// generic manifests (or non-hooks) to the result struct. +// +// To determine hook type, it looks for a YAML structure like this: +// +// kind: SomeKind +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook: pre-install +// +// To determine the policy to delete the hook, it looks for a YAML structure like this: +// +// kind: SomeKind +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook-delete-policy: hook-succeeded +func (file *manifestFile) sort(result *result) error { + // Go through manifests in order found in file (function `SplitManifests` creates integer-sortable keys) + var sortedEntryKeys []string + for entryKey := range file.entries { + sortedEntryKeys = append(sortedEntryKeys, entryKey) + } + sort.Sort(BySplitManifestsOrder(sortedEntryKeys)) + + for _, entryKey := range sortedEntryKeys { + m := file.entries[entryKey] + + var entry SimpleHead + if err := yaml.Unmarshal([]byte(m), &entry); err != nil { + return errors.Wrapf(err, "YAML parse error on %s", file.path) + } + + if !hasAnyAnnotation(entry) { + result.generic = append(result.generic, Manifest{ + Name: file.path, + Content: m, + Head: &entry, + }) + continue + } + + hookTypes, ok := entry.Metadata.Annotations[release.HookAnnotation] + if !ok { + result.generic = append(result.generic, Manifest{ + Name: file.path, + Content: m, + Head: &entry, + }) + continue + } + + hw := calculateHookWeight(entry) + + h := &release.Hook{ + Name: entry.Metadata.Name, + Kind: entry.Kind, + Path: file.path, + Manifest: m, + Events: []release.HookEvent{}, + Weight: hw, + DeletePolicies: []release.HookDeletePolicy{}, + } + + isUnknownHook := false + for _, hookType := range strings.Split(hookTypes, ",") { + hookType = strings.ToLower(strings.TrimSpace(hookType)) + e, ok := events[hookType] + if !ok { + isUnknownHook = true + break + } + h.Events = append(h.Events, e) + } + + if isUnknownHook { + log.Printf("info: skipping unknown hook: %q", hookTypes) + continue + } + + result.hooks = append(result.hooks, h) + + operateAnnotationValues(entry, release.HookDeleteAnnotation, func(value string) { + h.DeletePolicies = append(h.DeletePolicies, release.HookDeletePolicy(value)) + }) + } + + return nil +} + +// hasAnyAnnotation returns true if the given entry has any annotations at all. +func hasAnyAnnotation(entry SimpleHead) bool { + return entry.Metadata != nil && + entry.Metadata.Annotations != nil && + len(entry.Metadata.Annotations) != 0 +} + +// calculateHookWeight finds the weight in the hook weight annotation. +// +// If no weight is found, the assigned weight is 0 +func calculateHookWeight(entry SimpleHead) int { + hws := entry.Metadata.Annotations[release.HookWeightAnnotation] + hw, err := strconv.Atoi(hws) + if err != nil { + hw = 0 + } + return hw +} + +// operateAnnotationValues finds the given annotation and runs the operate function with the value of that annotation +func operateAnnotationValues(entry SimpleHead, annotation string, operate func(p string)) { + if dps, ok := entry.Metadata.Annotations[annotation]; ok { + for _, dp := range strings.Split(dps, ",") { + dp = strings.ToLower(strings.TrimSpace(dp)) + operate(dp) + } + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go new file mode 100644 index 000000000..1a8aa78a6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil" + +import ( + "sort" + + rspb "helm.sh/helm/v3/pkg/release" +) + +type list []*rspb.Release + +func (s list) Len() int { return len(s) } +func (s list) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// ByName sorts releases by name +type ByName struct{ list } + +// Less compares to releases +func (s ByName) Less(i, j int) bool { return s.list[i].Name < s.list[j].Name } + +// ByDate sorts releases by date +type ByDate struct{ list } + +// Less compares to releases +func (s ByDate) Less(i, j int) bool { + ti := s.list[i].Info.LastDeployed.Unix() + tj := s.list[j].Info.LastDeployed.Unix() + return ti < tj +} + +// ByRevision sorts releases by revision number +type ByRevision struct{ list } + +// Less compares to releases +func (s ByRevision) Less(i, j int) bool { + return s.list[i].Version < s.list[j].Version +} + +// Reverse reverses the list of releases sorted by the sort func. +func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) { + sortFn(list) + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +// SortByName returns the list of releases sorted +// in lexicographical order. +func SortByName(list []*rspb.Release) { + sort.Sort(ByName{list}) +} + +// SortByDate returns the list of releases sorted by a +// release's last deployed time (in seconds). +func SortByDate(list []*rspb.Release) { + sort.Sort(ByDate{list}) +} + +// SortByRevision returns the list of releases sorted by a +// release's revision number (release.Version). +func SortByRevision(list []*rspb.Release) { + sort.Sort(ByRevision{list}) +} diff --git a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go new file mode 100644 index 000000000..d9022ee6e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go @@ -0,0 +1,317 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo // import "helm.sh/helm/v3/pkg/repo" + +import ( + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/provenance" +) + +// Entry represents a collection of parameters for chart repository +type Entry struct { + Name string `json:"name"` + URL string `json:"url"` + Username string `json:"username"` + Password string `json:"password"` + CertFile string `json:"certFile"` + KeyFile string `json:"keyFile"` + CAFile string `json:"caFile"` + InsecureSkipTLSverify bool `json:"insecure_skip_tls_verify"` + PassCredentialsAll bool `json:"pass_credentials_all"` +} + +// ChartRepository represents a chart repository +type ChartRepository struct { + Config *Entry + ChartPaths []string + IndexFile *IndexFile + Client getter.Getter + CachePath string +} + +// NewChartRepository constructs ChartRepository +func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository, error) { + u, err := url.Parse(cfg.URL) + if err != nil { + return nil, errors.Errorf("invalid chart URL format: %s", cfg.URL) + } + + client, err := getters.ByScheme(u.Scheme) + if err != nil { + return nil, errors.Errorf("could not find protocol handler for: %s", u.Scheme) + } + + return &ChartRepository{ + Config: cfg, + IndexFile: NewIndexFile(), + Client: client, + CachePath: helmpath.CachePath("repository"), + }, nil +} + +// Load loads a directory of charts as if it were a repository. +// +// It requires the presence of an index.yaml file in the directory. +// +// Deprecated: remove in Helm 4. +func (r *ChartRepository) Load() error { + dirInfo, err := os.Stat(r.Config.Name) + if err != nil { + return err + } + if !dirInfo.IsDir() { + return errors.Errorf("%q is not a directory", r.Config.Name) + } + + // FIXME: Why are we recursively walking directories? + // FIXME: Why are we not reading the repositories.yaml to figure out + // what repos to use? + filepath.Walk(r.Config.Name, func(path string, f os.FileInfo, err error) error { + if !f.IsDir() { + if strings.Contains(f.Name(), "-index.yaml") { + i, err := LoadIndexFile(path) + if err != nil { + return err + } + r.IndexFile = i + } else if strings.HasSuffix(f.Name(), ".tgz") { + r.ChartPaths = append(r.ChartPaths, path) + } + } + return nil + }) + return nil +} + +// DownloadIndexFile fetches the index from a repository. +func (r *ChartRepository) DownloadIndexFile() (string, error) { + indexURL, err := ResolveReferenceURL(r.Config.URL, "index.yaml") + if err != nil { + return "", err + } + + resp, err := r.Client.Get(indexURL, + getter.WithURL(r.Config.URL), + getter.WithInsecureSkipVerifyTLS(r.Config.InsecureSkipTLSverify), + getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile), + getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), + ) + if err != nil { + return "", err + } + + index, err := io.ReadAll(resp) + if err != nil { + return "", err + } + + indexFile, err := loadIndex(index, r.Config.URL) + if err != nil { + return "", err + } + + // Create the chart list file in the cache directory + var charts strings.Builder + for name := range indexFile.Entries { + fmt.Fprintln(&charts, name) + } + chartsFile := filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name)) + os.MkdirAll(filepath.Dir(chartsFile), 0755) + os.WriteFile(chartsFile, []byte(charts.String()), 0644) + + // Create the index file in the cache directory + fname := filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name)) + os.MkdirAll(filepath.Dir(fname), 0755) + return fname, os.WriteFile(fname, index, 0644) +} + +// Index generates an index for the chart repository and writes an index.yaml file. +func (r *ChartRepository) Index() error { + err := r.generateIndex() + if err != nil { + return err + } + return r.saveIndexFile() +} + +func (r *ChartRepository) saveIndexFile() error { + index, err := yaml.Marshal(r.IndexFile) + if err != nil { + return err + } + return os.WriteFile(filepath.Join(r.Config.Name, indexPath), index, 0644) +} + +func (r *ChartRepository) generateIndex() error { + for _, path := range r.ChartPaths { + ch, err := loader.Load(path) + if err != nil { + return err + } + + digest, err := provenance.DigestFile(path) + if err != nil { + return err + } + + if !r.IndexFile.Has(ch.Name(), ch.Metadata.Version) { + if err := r.IndexFile.MustAdd(ch.Metadata, path, r.Config.URL, digest); err != nil { + return errors.Wrapf(err, "failed adding to %s to index", path) + } + } + // TODO: If a chart exists, but has a different Digest, should we error? + } + r.IndexFile.SortEntries() + return nil +} + +// FindChartInRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories +func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { + return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters) +} + +// FindChartInAuthRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials for the chart repository. +func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { + return FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, getters) +} + +// FindChartInAuthAndTLSRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials and TLS verify flag for the chart repository. +// TODO Helm 4, FindChartInAuthAndTLSRepoURL should be integrated into FindChartInAuthRepoURL. +func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify bool, getters getter.Providers) (string, error) { + return FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, insecureSkipTLSverify, false, getters) +} + +// FindChartInAuthAndTLSAndPassRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials, TLS verify flag, and if credentials should +// be passed on to other domains. +// TODO Helm 4, FindChartInAuthAndTLSAndPassRepoURL should be integrated into FindChartInAuthRepoURL. +func FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify, passCredentialsAll bool, getters getter.Providers) (string, error) { + + // Download and write the index file to a temporary location + buf := make([]byte, 20) + rand.Read(buf) + name := strings.ReplaceAll(base64.StdEncoding.EncodeToString(buf), "/", "-") + + c := Entry{ + URL: repoURL, + Username: username, + Password: password, + PassCredentialsAll: passCredentialsAll, + CertFile: certFile, + KeyFile: keyFile, + CAFile: caFile, + Name: name, + InsecureSkipTLSverify: insecureSkipTLSverify, + } + r, err := NewChartRepository(&c, getters) + if err != nil { + return "", err + } + idx, err := r.DownloadIndexFile() + if err != nil { + return "", errors.Wrapf(err, "looks like %q is not a valid chart repository or cannot be reached", repoURL) + } + defer func() { + os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name))) + os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name))) + }() + + // Read the index file for the repository to get chart information and return chart URL + repoIndex, err := LoadIndexFile(idx) + if err != nil { + return "", err + } + + errMsg := fmt.Sprintf("chart %q", chartName) + if chartVersion != "" { + errMsg = fmt.Sprintf("%s version %q", errMsg, chartVersion) + } + cv, err := repoIndex.Get(chartName, chartVersion) + if err != nil { + return "", errors.Errorf("%s not found in %s repository", errMsg, repoURL) + } + + if len(cv.URLs) == 0 { + return "", errors.Errorf("%s has no downloadable URLs", errMsg) + } + + chartURL := cv.URLs[0] + + absoluteChartURL, err := ResolveReferenceURL(repoURL, chartURL) + if err != nil { + return "", errors.Wrap(err, "failed to make chart URL absolute") + } + + return absoluteChartURL, nil +} + +// ResolveReferenceURL resolves refURL relative to baseURL. +// If refURL is absolute, it simply returns refURL. +func ResolveReferenceURL(baseURL, refURL string) (string, error) { + parsedRefURL, err := url.Parse(refURL) + if err != nil { + return "", errors.Wrapf(err, "failed to parse %s as URL", refURL) + } + + if parsedRefURL.IsAbs() { + return refURL, nil + } + + parsedBaseURL, err := url.Parse(baseURL) + if err != nil { + return "", errors.Wrapf(err, "failed to parse %s as URL", baseURL) + } + + // We need a trailing slash for ResolveReference to work, but make sure there isn't already one + parsedBaseURL.RawPath = strings.TrimSuffix(parsedBaseURL.RawPath, "/") + "/" + parsedBaseURL.Path = strings.TrimSuffix(parsedBaseURL.Path, "/") + "/" + + resolvedURL := parsedBaseURL.ResolveReference(parsedRefURL) + resolvedURL.RawQuery = parsedBaseURL.RawQuery + return resolvedURL.String(), nil +} + +func (e *Entry) String() string { + buf, err := json.Marshal(e) + if err != nil { + log.Panic(err) + } + return string(buf) +} diff --git a/vendor/helm.sh/helm/v3/pkg/repo/doc.go b/vendor/helm.sh/helm/v3/pkg/repo/doc.go new file mode 100644 index 000000000..fc54bbf7a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/repo/doc.go @@ -0,0 +1,94 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package repo implements the Helm Chart Repository. + +A chart repository is an HTTP server that provides information on charts. A local +repository cache is an on-disk representation of a chart repository. + +There are two important file formats for chart repositories. + +The first is the 'index.yaml' format, which is expressed like this: + + apiVersion: v1 + entries: + frobnitz: + - created: 2016-09-29T12:14:34.830161306-06:00 + description: This is a frobnitz. + digest: 587bd19a9bd9d2bc4a6d25ab91c8c8e7042c47b4ac246e37bf8e1e74386190f4 + home: http://example.com + keywords: + - frobnitz + - sprocket + - dodad + maintainers: + - email: helm@example.com + name: The Helm Team + - email: nobody@example.com + name: Someone Else + name: frobnitz + urls: + - http://example-charts.com/testdata/repository/frobnitz-1.2.3.tgz + version: 1.2.3 + sprocket: + - created: 2016-09-29T12:14:34.830507606-06:00 + description: This is a sprocket" + digest: 8505ff813c39502cc849a38e1e4a8ac24b8e6e1dcea88f4c34ad9b7439685ae6 + home: http://example.com + keywords: + - frobnitz + - sprocket + - dodad + maintainers: + - email: helm@example.com + name: The Helm Team + - email: nobody@example.com + name: Someone Else + name: sprocket + urls: + - http://example-charts.com/testdata/repository/sprocket-1.2.0.tgz + version: 1.2.0 + generated: 2016-09-29T12:14:34.829721375-06:00 + +An index.yaml file contains the necessary descriptive information about what +charts are available in a repository, and how to get them. + +The second file format is the repositories.yaml file format. This file is for +facilitating local cached copies of one or more chart repositories. + +The format of a repository.yaml file is: + + apiVersion: v1 + generated: TIMESTAMP + repositories: + - name: stable + url: http://example.com/charts + cache: stable-index.yaml + - name: incubator + url: http://example.com/incubator + cache: incubator-index.yaml + +This file maps three bits of information about a repository: + + - The name the user uses to refer to it + - The fully qualified URL to the repository (index.yaml will be appended) + - The name of the local cachefile + +The format for both files was changed after Helm v2.0.0-Alpha.4. Helm is not +backwards compatible with those earlier versions. +*/ +package repo diff --git a/vendor/helm.sh/helm/v3/pkg/repo/index.go b/vendor/helm.sh/helm/v3/pkg/repo/index.go new file mode 100644 index 000000000..8a23ba060 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/repo/index.go @@ -0,0 +1,390 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo + +import ( + "bytes" + "encoding/json" + "log" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/internal/fileutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/provenance" +) + +var indexPath = "index.yaml" + +// APIVersionV1 is the v1 API version for index and repository files. +const APIVersionV1 = "v1" + +var ( + // ErrNoAPIVersion indicates that an API version was not specified. + ErrNoAPIVersion = errors.New("no API version specified") + // ErrNoChartVersion indicates that a chart with the given version is not found. + ErrNoChartVersion = errors.New("no chart version found") + // ErrNoChartName indicates that a chart with the given name is not found. + ErrNoChartName = errors.New("no chart name found") + // ErrEmptyIndexYaml indicates that the content of index.yaml is empty. + ErrEmptyIndexYaml = errors.New("empty index.yaml file") +) + +// ChartVersions is a list of versioned chart references. +// Implements a sorter on Version. +type ChartVersions []*ChartVersion + +// Len returns the length. +func (c ChartVersions) Len() int { return len(c) } + +// Swap swaps the position of two items in the versions slice. +func (c ChartVersions) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// Less returns true if the version of entry a is less than the version of entry b. +func (c ChartVersions) Less(a, b int) bool { + // Failed parse pushes to the back. + i, err := semver.NewVersion(c[a].Version) + if err != nil { + return true + } + j, err := semver.NewVersion(c[b].Version) + if err != nil { + return false + } + return i.LessThan(j) +} + +// IndexFile represents the index file in a chart repository +type IndexFile struct { + // This is used ONLY for validation against chartmuseum's index files and is discarded after validation. + ServerInfo map[string]interface{} `json:"serverInfo,omitempty"` + APIVersion string `json:"apiVersion"` + Generated time.Time `json:"generated"` + Entries map[string]ChartVersions `json:"entries"` + PublicKeys []string `json:"publicKeys,omitempty"` + + // Annotations are additional mappings uninterpreted by Helm. They are made available for + // other applications to add information to the index file. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// NewIndexFile initializes an index. +func NewIndexFile() *IndexFile { + return &IndexFile{ + APIVersion: APIVersionV1, + Generated: time.Now(), + Entries: map[string]ChartVersions{}, + PublicKeys: []string{}, + } +} + +// LoadIndexFile takes a file at the given path and returns an IndexFile object +func LoadIndexFile(path string) (*IndexFile, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + i, err := loadIndex(b, path) + if err != nil { + return nil, errors.Wrapf(err, "error loading %s", path) + } + return i, nil +} + +// MustAdd adds a file to the index +// This can leave the index in an unsorted state +func (i IndexFile) MustAdd(md *chart.Metadata, filename, baseURL, digest string) error { + if i.Entries == nil { + return errors.New("entries not initialized") + } + + if md.APIVersion == "" { + md.APIVersion = chart.APIVersionV1 + } + if err := md.Validate(); err != nil { + return errors.Wrapf(err, "validate failed for %s", filename) + } + + u := filename + if baseURL != "" { + _, file := filepath.Split(filename) + var err error + u, err = urlutil.URLJoin(baseURL, file) + if err != nil { + u = path.Join(baseURL, file) + } + } + cr := &ChartVersion{ + URLs: []string{u}, + Metadata: md, + Digest: digest, + Created: time.Now(), + } + ee := i.Entries[md.Name] + i.Entries[md.Name] = append(ee, cr) + return nil +} + +// Add adds a file to the index and logs an error. +// +// Deprecated: Use index.MustAdd instead. +func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) { + if err := i.MustAdd(md, filename, baseURL, digest); err != nil { + log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", md.Name, md.Version, filename, err) + } +} + +// Has returns true if the index has an entry for a chart with the given name and exact version. +func (i IndexFile) Has(name, version string) bool { + _, err := i.Get(name, version) + return err == nil +} + +// SortEntries sorts the entries by version in descending order. +// +// In canonical form, the individual version records should be sorted so that +// the most recent release for every version is in the 0th slot in the +// Entries.ChartVersions array. That way, tooling can predict the newest +// version without needing to parse SemVers. +func (i IndexFile) SortEntries() { + for _, versions := range i.Entries { + sort.Sort(sort.Reverse(versions)) + } +} + +// Get returns the ChartVersion for the given name. +// +// If version is empty, this will return the chart with the latest stable version, +// prerelease versions will be skipped. +func (i IndexFile) Get(name, version string) (*ChartVersion, error) { + vs, ok := i.Entries[name] + if !ok { + return nil, ErrNoChartName + } + if len(vs) == 0 { + return nil, ErrNoChartVersion + } + + var constraint *semver.Constraints + if version == "" { + constraint, _ = semver.NewConstraint("*") + } else { + var err error + constraint, err = semver.NewConstraint(version) + if err != nil { + return nil, err + } + } + + // when customer input exact version, check whether have exact match one first + if len(version) != 0 { + for _, ver := range vs { + if version == ver.Version { + return ver, nil + } + } + } + + for _, ver := range vs { + test, err := semver.NewVersion(ver.Version) + if err != nil { + continue + } + + if constraint.Check(test) { + return ver, nil + } + } + return nil, errors.Errorf("no chart version found for %s-%s", name, version) +} + +// WriteFile writes an index file to the given destination path. +// +// The mode on the file is set to 'mode'. +func (i IndexFile) WriteFile(dest string, mode os.FileMode) error { + b, err := yaml.Marshal(i) + if err != nil { + return err + } + return fileutil.AtomicWriteFile(dest, bytes.NewReader(b), mode) +} + +// WriteJSONFile writes an index file in JSON format to the given destination +// path. +// +// The mode on the file is set to 'mode'. +func (i IndexFile) WriteJSONFile(dest string, mode os.FileMode) error { + b, err := json.MarshalIndent(i, "", " ") + if err != nil { + return err + } + return fileutil.AtomicWriteFile(dest, bytes.NewReader(b), mode) +} + +// Merge merges the given index file into this index. +// +// This merges by name and version. +// +// If one of the entries in the given index does _not_ already exist, it is added. +// In all other cases, the existing record is preserved. +// +// This can leave the index in an unsorted state +func (i *IndexFile) Merge(f *IndexFile) { + for _, cvs := range f.Entries { + for _, cv := range cvs { + if !i.Has(cv.Name, cv.Version) { + e := i.Entries[cv.Name] + i.Entries[cv.Name] = append(e, cv) + } + } + } +} + +// ChartVersion represents a chart entry in the IndexFile +type ChartVersion struct { + *chart.Metadata + URLs []string `json:"urls"` + Created time.Time `json:"created,omitempty"` + Removed bool `json:"removed,omitempty"` + Digest string `json:"digest,omitempty"` + + // ChecksumDeprecated is deprecated in Helm 3, and therefore ignored. Helm 3 replaced + // this with Digest. However, with a strict YAML parser enabled, a field must be + // present on the struct for backwards compatibility. + ChecksumDeprecated string `json:"checksum,omitempty"` + + // EngineDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict + // YAML parser enabled, this field must be present. + EngineDeprecated string `json:"engine,omitempty"` + + // TillerVersionDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict + // YAML parser enabled, this field must be present. + TillerVersionDeprecated string `json:"tillerVersion,omitempty"` + + // URLDeprecated is deprecated in Helm 3, superseded by URLs. It is ignored. However, + // with a strict YAML parser enabled, this must be present on the struct. + URLDeprecated string `json:"url,omitempty"` +} + +// IndexDirectory reads a (flat) directory and generates an index. +// +// It indexes only charts that have been packaged (*.tgz). +// +// The index returned will be in an unsorted state +func IndexDirectory(dir, baseURL string) (*IndexFile, error) { + archives, err := filepath.Glob(filepath.Join(dir, "*.tgz")) + if err != nil { + return nil, err + } + moreArchives, err := filepath.Glob(filepath.Join(dir, "**/*.tgz")) + if err != nil { + return nil, err + } + archives = append(archives, moreArchives...) + + index := NewIndexFile() + for _, arch := range archives { + fname, err := filepath.Rel(dir, arch) + if err != nil { + return index, err + } + + var parentDir string + parentDir, fname = filepath.Split(fname) + // filepath.Split appends an extra slash to the end of parentDir. We want to strip that out. + parentDir = strings.TrimSuffix(parentDir, string(os.PathSeparator)) + parentURL, err := urlutil.URLJoin(baseURL, parentDir) + if err != nil { + parentURL = path.Join(baseURL, parentDir) + } + + c, err := loader.Load(arch) + if err != nil { + // Assume this is not a chart. + continue + } + hash, err := provenance.DigestFile(arch) + if err != nil { + return index, err + } + if err := index.MustAdd(c.Metadata, fname, parentURL, hash); err != nil { + return index, errors.Wrapf(err, "failed adding to %s to index", fname) + } + } + return index, nil +} + +// loadIndex loads an index file and does minimal validity checking. +// +// The source parameter is only used for logging. +// This will fail if API Version is not set (ErrNoAPIVersion) or if the unmarshal fails. +func loadIndex(data []byte, source string) (*IndexFile, error) { + i := &IndexFile{} + + if len(data) == 0 { + return i, ErrEmptyIndexYaml + } + + if err := jsonOrYamlUnmarshal(data, i); err != nil { + return i, err + } + + for name, cvs := range i.Entries { + for idx := len(cvs) - 1; idx >= 0; idx-- { + if cvs[idx] == nil { + log.Printf("skipping loading invalid entry for chart %q from %s: empty entry", name, source) + continue + } + if cvs[idx].APIVersion == "" { + cvs[idx].APIVersion = chart.APIVersionV1 + } + if err := cvs[idx].Validate(); err != nil { + log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err) + cvs = append(cvs[:idx], cvs[idx+1:]...) + } + } + } + i.SortEntries() + if i.APIVersion == "" { + return i, ErrNoAPIVersion + } + return i, nil +} + +// jsonOrYamlUnmarshal unmarshals the given byte slice containing JSON or YAML +// into the provided interface. +// +// It automatically detects whether the data is in JSON or YAML format by +// checking its validity as JSON. If the data is valid JSON, it will use the +// `encoding/json` package to unmarshal it. Otherwise, it will use the +// `sigs.k8s.io/yaml` package to unmarshal the YAML data. +func jsonOrYamlUnmarshal(b []byte, i interface{}) error { + if json.Valid(b) { + return json.Unmarshal(b, i) + } + return yaml.UnmarshalStrict(b, i) +} diff --git a/vendor/helm.sh/helm/v3/pkg/repo/repo.go b/vendor/helm.sh/helm/v3/pkg/repo/repo.go new file mode 100644 index 000000000..834d554bd --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/repo/repo.go @@ -0,0 +1,125 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package repo // import "helm.sh/helm/v3/pkg/repo" + +import ( + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +// File represents the repositories.yaml file +type File struct { + APIVersion string `json:"apiVersion"` + Generated time.Time `json:"generated"` + Repositories []*Entry `json:"repositories"` +} + +// NewFile generates an empty repositories file. +// +// Generated and APIVersion are automatically set. +func NewFile() *File { + return &File{ + APIVersion: APIVersionV1, + Generated: time.Now(), + Repositories: []*Entry{}, + } +} + +// LoadFile takes a file at the given path and returns a File object +func LoadFile(path string) (*File, error) { + r := new(File) + b, err := os.ReadFile(path) + if err != nil { + return r, errors.Wrapf(err, "couldn't load repositories file (%s)", path) + } + + err = yaml.Unmarshal(b, r) + return r, err +} + +// Add adds one or more repo entries to a repo file. +func (r *File) Add(re ...*Entry) { + r.Repositories = append(r.Repositories, re...) +} + +// Update attempts to replace one or more repo entries in a repo file. If an +// entry with the same name doesn't exist in the repo file it will add it. +func (r *File) Update(re ...*Entry) { + for _, target := range re { + r.update(target) + } +} + +func (r *File) update(e *Entry) { + for j, repo := range r.Repositories { + if repo.Name == e.Name { + r.Repositories[j] = e + return + } + } + r.Add(e) +} + +// Has returns true if the given name is already a repository name. +func (r *File) Has(name string) bool { + entry := r.Get(name) + return entry != nil +} + +// Get returns an entry with the given name if it exists, otherwise returns nil +func (r *File) Get(name string) *Entry { + for _, entry := range r.Repositories { + if entry.Name == name { + return entry + } + } + return nil +} + +// Remove removes the entry from the list of repositories. +func (r *File) Remove(name string) bool { + cp := []*Entry{} + found := false + for _, rf := range r.Repositories { + if rf == nil { + continue + } + if rf.Name == name { + found = true + continue + } + cp = append(cp, rf) + } + r.Repositories = cp + return found +} + +// WriteFile writes a repositories file to the given path. +func (r *File) WriteFile(path string, perm os.FileMode) error { + data, err := yaml.Marshal(r) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return os.WriteFile(path, data, perm) +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go new file mode 100644 index 000000000..5fd64ea59 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go @@ -0,0 +1,263 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kblabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*ConfigMaps)(nil) + +// ConfigMapsDriverName is the string name of the driver. +const ConfigMapsDriverName = "ConfigMap" + +// ConfigMaps is a wrapper around an implementation of a kubernetes +// ConfigMapsInterface. +type ConfigMaps struct { + impl corev1.ConfigMapInterface + Log func(string, ...interface{}) +} + +// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of +// the kubernetes ConfigMapsInterface. +func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps { + return &ConfigMaps{ + impl: impl, + Log: func(_ string, _ ...interface{}) {}, + } +} + +// Name returns the name of the driver. +func (cfgmaps *ConfigMaps) Name() string { + return ConfigMapsDriverName +} + +// Get fetches the release named by key. The corresponding release is returned +// or error if not found. +func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) { + // fetch the configmap holding the release named by key + obj, err := cfgmaps.impl.Get(context.Background(), key, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, ErrReleaseNotFound + } + + cfgmaps.Log("get: failed to get %q: %s", key, err) + return nil, err + } + // found the configmap, decode the base64 data string + r, err := decodeRelease(obj.Data["release"]) + if err != nil { + cfgmaps.Log("get: failed to decode data %q: %s", key, err) + return nil, err + } + r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) + // return the release object + return r, nil +} + +// List fetches all releases and returns the list releases such +// that filter(release) == true. An error is returned if the +// configmap fails to retrieve the releases. +func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + lsel := kblabels.Set{"owner": "helm"}.AsSelector() + opts := metav1.ListOptions{LabelSelector: lsel.String()} + + list, err := cfgmaps.impl.List(context.Background(), opts) + if err != nil { + cfgmaps.Log("list: failed to list: %s", err) + return nil, err + } + + var results []*rspb.Release + + // iterate over the configmaps object list + // and decode each release + for _, item := range list.Items { + rls, err := decodeRelease(item.Data["release"]) + if err != nil { + cfgmaps.Log("list: failed to decode release: %v: %s", item, err) + continue + } + + rls.Labels = item.ObjectMeta.Labels + + if filter(rls) { + results = append(results, rls) + } + } + return results, nil +} + +// Query fetches all releases that match the provided map of labels. +// An error is returned if the configmap fails to retrieve the releases. +func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, error) { + ls := kblabels.Set{} + for k, v := range labels { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + ls[k] = v + } + + opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()} + + list, err := cfgmaps.impl.List(context.Background(), opts) + if err != nil { + cfgmaps.Log("query: failed to query with labels: %s", err) + return nil, err + } + + if len(list.Items) == 0 { + return nil, ErrReleaseNotFound + } + + var results []*rspb.Release + for _, item := range list.Items { + rls, err := decodeRelease(item.Data["release"]) + if err != nil { + cfgmaps.Log("query: failed to decode release: %s", err) + continue + } + rls.Labels = item.ObjectMeta.Labels + results = append(results, rls) + } + return results, nil +} + +// Create creates a new ConfigMap holding the release. If the +// ConfigMap already exists, ErrReleaseExists is returned. +func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { + // set labels for configmaps object meta data + var lbs labels + + lbs.init() + lbs.fromMap(rls.Labels) + lbs.set("createdAt", strconv.Itoa(int(time.Now().Unix()))) + + // create a new configmap to hold the release + obj, err := newConfigMapsObject(key, rls, lbs) + if err != nil { + cfgmaps.Log("create: failed to encode release %q: %s", rls.Name, err) + return err + } + // push the configmap object out into the kubiverse + if _, err := cfgmaps.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil { + if apierrors.IsAlreadyExists(err) { + return ErrReleaseExists + } + + cfgmaps.Log("create: failed to create: %s", err) + return err + } + return nil +} + +// Update updates the ConfigMap holding the release. If not found +// the ConfigMap is created to hold the release. +func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error { + // set labels for configmaps object meta data + var lbs labels + + lbs.init() + lbs.fromMap(rls.Labels) + lbs.set("modifiedAt", strconv.Itoa(int(time.Now().Unix()))) + + // create a new configmap object to hold the release + obj, err := newConfigMapsObject(key, rls, lbs) + if err != nil { + cfgmaps.Log("update: failed to encode release %q: %s", rls.Name, err) + return err + } + // push the configmap object out into the kubiverse + _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) + if err != nil { + cfgmaps.Log("update: failed to update: %s", err) + return err + } + return nil +} + +// Delete deletes the ConfigMap holding the release named by key. +func (cfgmaps *ConfigMaps) Delete(key string) (rls *rspb.Release, err error) { + // fetch the release to check existence + if rls, err = cfgmaps.Get(key); err != nil { + return nil, err + } + // delete the release + if err = cfgmaps.impl.Delete(context.Background(), key, metav1.DeleteOptions{}); err != nil { + return rls, err + } + return rls, nil +} + +// newConfigMapsObject constructs a kubernetes ConfigMap object +// to store a release. Each configmap data entry is the base64 +// encoded gzipped string of a release. +// +// The following labels are used within each configmap: +// +// "modifiedAt" - timestamp indicating when this configmap was last modified. (set in Update) +// "createdAt" - timestamp indicating when this configmap was created. (set in Create) +// "version" - version of the release. +// "status" - status of the release (see pkg/release/status.go for variants) +// "owner" - owner of the configmap, currently "helm". +// "name" - name of the release. +func newConfigMapsObject(key string, rls *rspb.Release, lbs labels) (*v1.ConfigMap, error) { + const owner = "helm" + + // encode the release + s, err := encodeRelease(rls) + if err != nil { + return nil, err + } + + if lbs == nil { + lbs.init() + } + + // apply custom labels + lbs.fromMap(rls.Labels) + + // apply labels + lbs.set("name", rls.Name) + lbs.set("owner", owner) + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // create and return configmap object + return &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: key, + Labels: lbs.toMap(), + }, + Data: map[string]string{"release": s}, + }, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go new file mode 100644 index 000000000..9c01f3766 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go @@ -0,0 +1,105 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "fmt" + + "github.com/pkg/errors" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var ( + // ErrReleaseNotFound indicates that a release is not found. + ErrReleaseNotFound = errors.New("release: not found") + // ErrReleaseExists indicates that a release already exists. + ErrReleaseExists = errors.New("release: already exists") + // ErrInvalidKey indicates that a release key could not be parsed. + ErrInvalidKey = errors.New("release: invalid key") + // ErrNoDeployedReleases indicates that there are no releases with the given key in the deployed state + ErrNoDeployedReleases = errors.New("has no deployed releases") +) + +// StorageDriverError records an error and the release name that caused it +type StorageDriverError struct { + ReleaseName string + Err error +} + +func (e *StorageDriverError) Error() string { + return fmt.Sprintf("%q %s", e.ReleaseName, e.Err.Error()) +} + +func (e *StorageDriverError) Unwrap() error { return e.Err } + +func NewErrNoDeployedReleases(releaseName string) error { + return &StorageDriverError{ + ReleaseName: releaseName, + Err: ErrNoDeployedReleases, + } +} + +// Creator is the interface that wraps the Create method. +// +// Create stores the release or returns ErrReleaseExists +// if an identical release already exists. +type Creator interface { + Create(key string, rls *rspb.Release) error +} + +// Updator is the interface that wraps the Update method. +// +// Update updates an existing release or returns +// ErrReleaseNotFound if the release does not exist. +type Updator interface { + Update(key string, rls *rspb.Release) error +} + +// Deletor is the interface that wraps the Delete method. +// +// Delete deletes the release named by key or returns +// ErrReleaseNotFound if the release does not exist. +type Deletor interface { + Delete(key string) (*rspb.Release, error) +} + +// Queryor is the interface that wraps the Get and List methods. +// +// Get returns the release named by key or returns ErrReleaseNotFound +// if the release does not exist. +// +// List returns the set of all releases that satisfy the filter predicate. +// +// Query returns the set of all releases that match the provided label set. +type Queryor interface { + Get(key string) (*rspb.Release, error) + List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) + Query(labels map[string]string) ([]*rspb.Release, error) +} + +// Driver is the interface composed of Creator, Updator, Deletor, and Queryor +// interfaces. It defines the behavior for storing, updating, deleted, +// and retrieving Helm releases from some underlying storage mechanism, +// e.g. memory, configmaps. +type Driver interface { + Creator + Updator + Deletor + Queryor + Name() string +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go new file mode 100644 index 000000000..eb7118fe5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go @@ -0,0 +1,48 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +// labels is a map of key value pairs to be included as metadata in a configmap object. +type labels map[string]string + +func (lbs *labels) init() { *lbs = labels(make(map[string]string)) } +func (lbs labels) get(key string) string { return lbs[key] } +func (lbs labels) set(key, val string) { lbs[key] = val } + +func (lbs labels) keys() (ls []string) { + for key := range lbs { + ls = append(ls, key) + } + return +} + +func (lbs labels) match(set labels) bool { + for _, key := range set.keys() { + if lbs.get(key) != set.get(key) { + return false + } + } + return true +} + +func (lbs labels) toMap() map[string]string { return lbs } + +func (lbs *labels) fromMap(kvs map[string]string) { + for k, v := range kvs { + lbs.set(k, v) + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go new file mode 100644 index 000000000..91378f588 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go @@ -0,0 +1,240 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "strconv" + "strings" + "sync" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*Memory)(nil) + +const ( + // MemoryDriverName is the string name of this driver. + MemoryDriverName = "Memory" + + defaultNamespace = "default" +) + +// A map of release names to list of release records +type memReleases map[string]records + +// Memory is the in-memory storage driver implementation. +type Memory struct { + sync.RWMutex + namespace string + // A map of namespaces to releases + cache map[string]memReleases +} + +// NewMemory initializes a new memory driver. +func NewMemory() *Memory { + return &Memory{cache: map[string]memReleases{}, namespace: "default"} +} + +// SetNamespace sets a specific namespace in which releases will be accessed. +// An empty string indicates all namespaces (for the list operation) +func (mem *Memory) SetNamespace(ns string) { + mem.namespace = ns +} + +// Name returns the name of the driver. +func (mem *Memory) Name() string { + return MemoryDriverName +} + +// Get returns the release named by key or returns ErrReleaseNotFound. +func (mem *Memory) Get(key string) (*rspb.Release, error) { + defer unlock(mem.rlock()) + + keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.") + switch elems := strings.Split(keyWithoutPrefix, ".v"); len(elems) { + case 2: + name, ver := elems[0], elems[1] + if _, err := strconv.Atoi(ver); err != nil { + return nil, ErrInvalidKey + } + if recs, ok := mem.cache[mem.namespace][name]; ok { + if r := recs.Get(key); r != nil { + return r.rls, nil + } + } + return nil, ErrReleaseNotFound + default: + return nil, ErrInvalidKey + } +} + +// List returns the list of all releases such that filter(release) == true +func (mem *Memory) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + defer unlock(mem.rlock()) + + var ls []*rspb.Release + for namespace := range mem.cache { + if mem.namespace != "" { + // Should only list releases of this namespace + namespace = mem.namespace + } + for _, recs := range mem.cache[namespace] { + recs.Iter(func(_ int, rec *record) bool { + if filter(rec.rls) { + ls = append(ls, rec.rls) + } + return true + }) + } + if mem.namespace != "" { + // Should only list releases of this namespace + break + } + } + return ls, nil +} + +// Query returns the set of releases that match the provided set of labels +func (mem *Memory) Query(keyvals map[string]string) ([]*rspb.Release, error) { + defer unlock(mem.rlock()) + + var lbs labels + + lbs.init() + lbs.fromMap(keyvals) + + var ls []*rspb.Release + for namespace := range mem.cache { + if mem.namespace != "" { + // Should only query releases of this namespace + namespace = mem.namespace + } + for _, recs := range mem.cache[namespace] { + recs.Iter(func(_ int, rec *record) bool { + // A query for a release name that doesn't exist (has been deleted) + // can cause rec to be nil. + if rec == nil { + return false + } + if rec.lbs.match(lbs) { + ls = append(ls, rec.rls) + } + return true + }) + } + if mem.namespace != "" { + // Should only query releases of this namespace + break + } + } + + if len(ls) == 0 { + return nil, ErrReleaseNotFound + } + + return ls, nil +} + +// Create creates a new release or returns ErrReleaseExists. +func (mem *Memory) Create(key string, rls *rspb.Release) error { + defer unlock(mem.wlock()) + + // For backwards compatibility, we protect against an unset namespace + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + mem.SetNamespace(namespace) + + if _, ok := mem.cache[namespace]; !ok { + mem.cache[namespace] = memReleases{} + } + + if recs, ok := mem.cache[namespace][rls.Name]; ok { + if err := recs.Add(newRecord(key, rls)); err != nil { + return err + } + mem.cache[namespace][rls.Name] = recs + return nil + } + mem.cache[namespace][rls.Name] = records{newRecord(key, rls)} + return nil +} + +// Update updates a release or returns ErrReleaseNotFound. +func (mem *Memory) Update(key string, rls *rspb.Release) error { + defer unlock(mem.wlock()) + + // For backwards compatibility, we protect against an unset namespace + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + mem.SetNamespace(namespace) + + if _, ok := mem.cache[namespace]; ok { + if rs, ok := mem.cache[namespace][rls.Name]; ok && rs.Exists(key) { + rs.Replace(key, newRecord(key, rls)) + return nil + } + } + return ErrReleaseNotFound +} + +// Delete deletes a release or returns ErrReleaseNotFound. +func (mem *Memory) Delete(key string) (*rspb.Release, error) { + defer unlock(mem.wlock()) + + keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.") + elems := strings.Split(keyWithoutPrefix, ".v") + + if len(elems) != 2 { + return nil, ErrInvalidKey + } + + name, ver := elems[0], elems[1] + if _, err := strconv.Atoi(ver); err != nil { + return nil, ErrInvalidKey + } + if _, ok := mem.cache[mem.namespace]; ok { + if recs, ok := mem.cache[mem.namespace][name]; ok { + if r := recs.Remove(key); r != nil { + // recs.Remove changes the slice reference, so we have to re-assign it. + mem.cache[mem.namespace][name] = recs + return r.rls, nil + } + } + } + return nil, ErrReleaseNotFound +} + +// wlock locks mem for writing +func (mem *Memory) wlock() func() { + mem.Lock() + return func() { mem.Unlock() } +} + +// rlock locks mem for reading +func (mem *Memory) rlock() func() { + mem.RLock() + return func() { mem.RUnlock() } +} + +// unlock calls fn which reverses a mem.rlock or mem.wlock. e.g: +// ```defer unlock(mem.rlock())```, locks mem for reading at the +// call point of defer and unlocks upon exiting the block. +func unlock(fn func()) { fn() } diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go new file mode 100644 index 000000000..9df173384 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go @@ -0,0 +1,124 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "sort" + "strconv" + + rspb "helm.sh/helm/v3/pkg/release" +) + +// records holds a list of in-memory release records +type records []*record + +func (rs records) Len() int { return len(rs) } +func (rs records) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } +func (rs records) Less(i, j int) bool { return rs[i].rls.Version < rs[j].rls.Version } + +func (rs *records) Add(r *record) error { + if r == nil { + return nil + } + + if rs.Exists(r.key) { + return ErrReleaseExists + } + + *rs = append(*rs, r) + sort.Sort(*rs) + + return nil +} + +func (rs records) Get(key string) *record { + if i, ok := rs.Index(key); ok { + return rs[i] + } + return nil +} + +func (rs *records) Iter(fn func(int, *record) bool) { + cp := make([]*record, len(*rs)) + copy(cp, *rs) + + for i, r := range cp { + if !fn(i, r) { + return + } + } +} + +func (rs *records) Index(key string) (int, bool) { + for i, r := range *rs { + if r.key == key { + return i, true + } + } + return -1, false +} + +func (rs records) Exists(key string) bool { + _, ok := rs.Index(key) + return ok +} + +func (rs *records) Remove(key string) (r *record) { + if i, ok := rs.Index(key); ok { + return rs.removeAt(i) + } + return nil +} + +func (rs *records) Replace(key string, rec *record) *record { + if i, ok := rs.Index(key); ok { + old := (*rs)[i] + (*rs)[i] = rec + return old + } + return nil +} + +func (rs *records) removeAt(index int) *record { + r := (*rs)[index] + (*rs)[index] = nil + copy((*rs)[index:], (*rs)[index+1:]) + *rs = (*rs)[:len(*rs)-1] + return r +} + +// record is the data structure used to cache releases +// for the in-memory storage driver +type record struct { + key string + lbs labels + rls *rspb.Release +} + +// newRecord creates a new in-memory release record +func newRecord(key string, rls *rspb.Release) *record { + var lbs labels + + lbs.init() + lbs.set("name", rls.Name) + lbs.set("owner", "helm") + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // return &record{key: key, lbs: lbs, rls: proto.Clone(rls).(*rspb.Release)} + return &record{key: key, lbs: lbs, rls: rls} +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go new file mode 100644 index 000000000..9c2f805f2 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go @@ -0,0 +1,256 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kblabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*Secrets)(nil) + +// SecretsDriverName is the string name of the driver. +const SecretsDriverName = "Secret" + +// Secrets is a wrapper around an implementation of a kubernetes +// SecretsInterface. +type Secrets struct { + impl corev1.SecretInterface + Log func(string, ...interface{}) +} + +// NewSecrets initializes a new Secrets wrapping an implementation of +// the kubernetes SecretsInterface. +func NewSecrets(impl corev1.SecretInterface) *Secrets { + return &Secrets{ + impl: impl, + Log: func(_ string, _ ...interface{}) {}, + } +} + +// Name returns the name of the driver. +func (secrets *Secrets) Name() string { + return SecretsDriverName +} + +// Get fetches the release named by key. The corresponding release is returned +// or error if not found. +func (secrets *Secrets) Get(key string) (*rspb.Release, error) { + // fetch the secret holding the release named by key + obj, err := secrets.impl.Get(context.Background(), key, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, ErrReleaseNotFound + } + return nil, errors.Wrapf(err, "get: failed to get %q", key) + } + // found the secret, decode the base64 data string + r, err := decodeRelease(string(obj.Data["release"])) + r.Labels = filterSystemLabels(obj.ObjectMeta.Labels) + return r, errors.Wrapf(err, "get: failed to decode data %q", key) +} + +// List fetches all releases and returns the list releases such +// that filter(release) == true. An error is returned if the +// secret fails to retrieve the releases. +func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + lsel := kblabels.Set{"owner": "helm"}.AsSelector() + opts := metav1.ListOptions{LabelSelector: lsel.String()} + + list, err := secrets.impl.List(context.Background(), opts) + if err != nil { + return nil, errors.Wrap(err, "list: failed to list") + } + + var results []*rspb.Release + + // iterate over the secrets object list + // and decode each release + for _, item := range list.Items { + rls, err := decodeRelease(string(item.Data["release"])) + if err != nil { + secrets.Log("list: failed to decode release: %v: %s", item, err) + continue + } + + rls.Labels = item.ObjectMeta.Labels + + if filter(rls) { + results = append(results, rls) + } + } + return results, nil +} + +// Query fetches all releases that match the provided map of labels. +// An error is returned if the secret fails to retrieve the releases. +func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) { + ls := kblabels.Set{} + for k, v := range labels { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + ls[k] = v + } + + opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()} + + list, err := secrets.impl.List(context.Background(), opts) + if err != nil { + return nil, errors.Wrap(err, "query: failed to query with labels") + } + + if len(list.Items) == 0 { + return nil, ErrReleaseNotFound + } + + var results []*rspb.Release + for _, item := range list.Items { + rls, err := decodeRelease(string(item.Data["release"])) + if err != nil { + secrets.Log("query: failed to decode release: %s", err) + continue + } + rls.Labels = item.ObjectMeta.Labels + results = append(results, rls) + } + return results, nil +} + +// Create creates a new Secret holding the release. If the +// Secret already exists, ErrReleaseExists is returned. +func (secrets *Secrets) Create(key string, rls *rspb.Release) error { + // set labels for secrets object meta data + var lbs labels + + lbs.init() + lbs.fromMap(rls.Labels) + lbs.set("createdAt", strconv.Itoa(int(time.Now().Unix()))) + + // create a new secret to hold the release + obj, err := newSecretsObject(key, rls, lbs) + if err != nil { + return errors.Wrapf(err, "create: failed to encode release %q", rls.Name) + } + // push the secret object out into the kubiverse + if _, err := secrets.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil { + if apierrors.IsAlreadyExists(err) { + return ErrReleaseExists + } + + return errors.Wrap(err, "create: failed to create") + } + return nil +} + +// Update updates the Secret holding the release. If not found +// the Secret is created to hold the release. +func (secrets *Secrets) Update(key string, rls *rspb.Release) error { + // set labels for secrets object meta data + var lbs labels + + lbs.init() + lbs.fromMap(rls.Labels) + lbs.set("modifiedAt", strconv.Itoa(int(time.Now().Unix()))) + + // create a new secret object to hold the release + obj, err := newSecretsObject(key, rls, lbs) + if err != nil { + return errors.Wrapf(err, "update: failed to encode release %q", rls.Name) + } + // push the secret object out into the kubiverse + _, err = secrets.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) + return errors.Wrap(err, "update: failed to update") +} + +// Delete deletes the Secret holding the release named by key. +func (secrets *Secrets) Delete(key string) (rls *rspb.Release, err error) { + // fetch the release to check existence + if rls, err = secrets.Get(key); err != nil { + return nil, err + } + // delete the release + err = secrets.impl.Delete(context.Background(), key, metav1.DeleteOptions{}) + return rls, err +} + +// newSecretsObject constructs a kubernetes Secret object +// to store a release. Each secret data entry is the base64 +// encoded gzipped string of a release. +// +// The following labels are used within each secret: +// +// "modifiedAt" - timestamp indicating when this secret was last modified. (set in Update) +// "createdAt" - timestamp indicating when this secret was created. (set in Create) +// "version" - version of the release. +// "status" - status of the release (see pkg/release/status.go for variants) +// "owner" - owner of the secret, currently "helm". +// "name" - name of the release. +func newSecretsObject(key string, rls *rspb.Release, lbs labels) (*v1.Secret, error) { + const owner = "helm" + + // encode the release + s, err := encodeRelease(rls) + if err != nil { + return nil, err + } + + if lbs == nil { + lbs.init() + } + + // apply custom labels + lbs.fromMap(rls.Labels) + + // apply labels + lbs.set("name", rls.Name) + lbs.set("owner", owner) + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // create and return secret object. + // Helm 3 introduced setting the 'Type' field + // in the Kubernetes storage object. + // Helm defines the field content as follows: + // /.v + // Type field for Helm 3: helm.sh/release.v1 + // Note: Version starts at 'v1' for Helm 3 and + // should be incremented if the release object + // metadata is modified. + // This would potentially be a breaking change + // and should only happen between major versions. + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: key, + Labels: lbs.toMap(), + }, + Type: "helm.sh/release.v1", + Data: map[string][]byte{"release": []byte(s)}, + }, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go new file mode 100644 index 000000000..8f5714f15 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go @@ -0,0 +1,697 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "fmt" + "sort" + "strconv" + "time" + + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" + + sq "github.com/Masterminds/squirrel" + + // Import pq for postgres dialect + _ "github.com/lib/pq" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*SQL)(nil) + +var labelMap = map[string]struct{}{ + "modifiedAt": {}, + "createdAt": {}, + "version": {}, + "status": {}, + "owner": {}, + "name": {}, +} + +const postgreSQLDialect = "postgres" + +// SQLDriverName is the string name of this driver. +const SQLDriverName = "SQL" + +const sqlReleaseTableName = "releases_v1" +const sqlCustomLabelsTableName = "custom_labels_v1" + +const ( + sqlReleaseTableKeyColumn = "key" + sqlReleaseTableTypeColumn = "type" + sqlReleaseTableBodyColumn = "body" + sqlReleaseTableNameColumn = "name" + sqlReleaseTableNamespaceColumn = "namespace" + sqlReleaseTableVersionColumn = "version" + sqlReleaseTableStatusColumn = "status" + sqlReleaseTableOwnerColumn = "owner" + sqlReleaseTableCreatedAtColumn = "createdAt" + sqlReleaseTableModifiedAtColumn = "modifiedAt" + + sqlCustomLabelsTableReleaseKeyColumn = "releaseKey" + sqlCustomLabelsTableReleaseNamespaceColumn = "releaseNamespace" + sqlCustomLabelsTableKeyColumn = "key" + sqlCustomLabelsTableValueColumn = "value" +) + +// Following limits based on k8s labels limits - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set +const ( + sqlCustomLabelsTableKeyMaxLenght = 253 + 1 + 63 + sqlCustomLabelsTableValueMaxLenght = 63 +) + +const ( + sqlReleaseDefaultOwner = "helm" + sqlReleaseDefaultType = "helm.sh/release.v1" +) + +// SQL is the sql storage driver implementation. +type SQL struct { + db *sqlx.DB + namespace string + statementBuilder sq.StatementBuilderType + + Log func(string, ...interface{}) +} + +// Name returns the name of the driver. +func (s *SQL) Name() string { + return SQLDriverName +} + +// Check if all migrations al +func (s *SQL) checkAlreadyApplied(migrations []*migrate.Migration) bool { + // make map (set) of ids for fast search + migrationsIds := make(map[string]struct{}) + for _, migration := range migrations { + migrationsIds[migration.Id] = struct{}{} + } + + // get list of applied migrations + migrate.SetDisableCreateTable(true) + records, err := migrate.GetMigrationRecords(s.db.DB, postgreSQLDialect) + migrate.SetDisableCreateTable(false) + if err != nil { + s.Log("checkAlreadyApplied: failed to get migration records: %v", err) + return false + } + + for _, record := range records { + if _, ok := migrationsIds[record.Id]; ok { + s.Log("checkAlreadyApplied: found previous migration (Id: %v) applied at %v", record.Id, record.AppliedAt) + delete(migrationsIds, record.Id) + } + } + + // check if all migrations appliyed + if len(migrationsIds) != 0 { + for id := range migrationsIds { + s.Log("checkAlreadyApplied: find unapplied migration (id: %v)", id) + } + return false + } + return true +} + +func (s *SQL) ensureDBSetup() error { + + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "init", + Up: []string{ + fmt.Sprintf(` + CREATE TABLE %s ( + %s VARCHAR(90), + %s VARCHAR(64) NOT NULL, + %s TEXT NOT NULL, + %s VARCHAR(64) NOT NULL, + %s VARCHAR(64) NOT NULL, + %s INTEGER NOT NULL, + %s TEXT NOT NULL, + %s TEXT NOT NULL, + %s INTEGER NOT NULL, + %s INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY(%s, %s) + ); + CREATE INDEX ON %s (%s, %s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + + GRANT ALL ON %s TO PUBLIC; + + ALTER TABLE %s ENABLE ROW LEVEL SECURITY; + `, + sqlReleaseTableName, + sqlReleaseTableKeyColumn, + sqlReleaseTableTypeColumn, + sqlReleaseTableBodyColumn, + sqlReleaseTableNameColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableVersionColumn, + sqlReleaseTableStatusColumn, + sqlReleaseTableOwnerColumn, + sqlReleaseTableCreatedAtColumn, + sqlReleaseTableModifiedAtColumn, + sqlReleaseTableKeyColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableName, + sqlReleaseTableKeyColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableName, + sqlReleaseTableVersionColumn, + sqlReleaseTableName, + sqlReleaseTableStatusColumn, + sqlReleaseTableName, + sqlReleaseTableOwnerColumn, + sqlReleaseTableName, + sqlReleaseTableCreatedAtColumn, + sqlReleaseTableName, + sqlReleaseTableModifiedAtColumn, + sqlReleaseTableName, + sqlReleaseTableName, + ), + }, + Down: []string{ + fmt.Sprintf(` + DROP TABLE %s; + `, sqlReleaseTableName), + }, + }, + { + Id: "custom_labels", + Up: []string{ + fmt.Sprintf(` + CREATE TABLE %s ( + %s VARCHAR(64), + %s VARCHAR(67), + %s VARCHAR(%d), + %s VARCHAR(%d) + ); + CREATE INDEX ON %s (%s, %s); + + GRANT ALL ON %s TO PUBLIC; + ALTER TABLE %s ENABLE ROW LEVEL SECURITY; + `, + sqlCustomLabelsTableName, + sqlCustomLabelsTableReleaseKeyColumn, + sqlCustomLabelsTableReleaseNamespaceColumn, + sqlCustomLabelsTableKeyColumn, + sqlCustomLabelsTableKeyMaxLenght, + sqlCustomLabelsTableValueColumn, + sqlCustomLabelsTableValueMaxLenght, + sqlCustomLabelsTableName, + sqlCustomLabelsTableReleaseKeyColumn, + sqlCustomLabelsTableReleaseNamespaceColumn, + sqlCustomLabelsTableName, + sqlCustomLabelsTableName, + ), + }, + Down: []string{ + fmt.Sprintf(` + DELETE TABLE %s; + `, sqlCustomLabelsTableName), + }, + }, + }, + } + + // Check that init migration already applied + if s.checkAlreadyApplied(migrations.Migrations) { + return nil + } + + // Populate the database with the relations we need if they don't exist yet + _, err := migrate.Exec(s.db.DB, postgreSQLDialect, migrations, migrate.Up) + return err +} + +// SQLReleaseWrapper describes how Helm releases are stored in an SQL database +type SQLReleaseWrapper struct { + // The primary key, made of {release-name}.{release-version} + Key string `db:"key"` + + // See https://github.com/helm/helm/blob/c9fe3d118caec699eb2565df9838673af379ce12/pkg/storage/driver/secrets.go#L231 + Type string `db:"type"` + + // The rspb.Release body, as a base64-encoded string + Body string `db:"body"` + + // Release "labels" that can be used as filters in the storage.Query(labels map[string]string) + // we implemented. Note that allowing Helm users to filter against new dimensions will require a + // new migration to be added, and the Create and/or update functions to be updated accordingly. + Name string `db:"name"` + Namespace string `db:"namespace"` + Version int `db:"version"` + Status string `db:"status"` + Owner string `db:"owner"` + CreatedAt int `db:"createdAt"` + ModifiedAt int `db:"modifiedAt"` +} + +type SQLReleaseCustomLabelWrapper struct { + ReleaseKey string `db:"release_key"` + ReleaseNamespace string `db:"release_namespace"` + Key string `db:"key"` + Value string `db:"value"` +} + +// NewSQL initializes a new sql driver. +func NewSQL(connectionString string, logger func(string, ...interface{}), namespace string) (*SQL, error) { + db, err := sqlx.Connect(postgreSQLDialect, connectionString) + if err != nil { + return nil, err + } + + driver := &SQL{ + db: db, + Log: logger, + statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), + } + + if err := driver.ensureDBSetup(); err != nil { + return nil, err + } + + driver.namespace = namespace + + return driver, nil +} + +// Get returns the release named by key. +func (s *SQL) Get(key string) (*rspb.Release, error) { + var record SQLReleaseWrapper + + qb := s.statementBuilder. + Select(sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + + query, args, err := qb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + // Get will return an error if the result is empty + if err := s.db.Get(&record, query, args...); err != nil { + s.Log("got SQL error when getting release %s: %v", key, err) + return nil, ErrReleaseNotFound + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("get: failed to decode data %q: %v", key, err) + return nil, err + } + + if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { + s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err) + return nil, err + } + + return release, nil +} + +// List returns the list of all releases such that filter(release) == true +func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + sb := s.statementBuilder. + Select(sqlReleaseTableKeyColumn, sqlReleaseTableNamespaceColumn, sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableOwnerColumn: sqlReleaseDefaultOwner}) + + // If a namespace was specified, we only list releases from that namespace + if s.namespace != "" { + sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + } + + query, args, err := sb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + var records = []SQLReleaseWrapper{} + if err := s.db.Select(&records, query, args...); err != nil { + s.Log("list: failed to list: %v", err) + return nil, err + } + + var releases []*rspb.Release + for _, record := range records { + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("list: failed to decode release: %v: %v", record, err) + continue + } + + if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { + s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err) + return nil, err + } + for k, v := range getReleaseSystemLabels(release) { + release.Labels[k] = v + } + + if filter(release) { + releases = append(releases, release) + } + } + + return releases, nil +} + +// Query returns the set of releases that match the provided set of labels. +func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { + sb := s.statementBuilder. + Select(sqlReleaseTableKeyColumn, sqlReleaseTableNamespaceColumn, sqlReleaseTableBodyColumn). + From(sqlReleaseTableName) + + keys := make([]string, 0, len(labels)) + for key := range labels { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + if _, ok := labelMap[key]; ok { + sb = sb.Where(sq.Eq{key: labels[key]}) + } else { + s.Log("unknown label %s", key) + return nil, fmt.Errorf("unknown label %s", key) + } + } + + // If a namespace was specified, we only list releases from that namespace + if s.namespace != "" { + sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + } + + // Build our query + query, args, err := sb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + var records = []SQLReleaseWrapper{} + if err := s.db.Select(&records, query, args...); err != nil { + s.Log("list: failed to query with labels: %v", err) + return nil, err + } + + if len(records) == 0 { + return nil, ErrReleaseNotFound + } + + var releases []*rspb.Release + for _, record := range records { + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("list: failed to decode release: %v: %v", record, err) + continue + } + + if release.Labels, err = s.getReleaseCustomLabels(record.Key, record.Namespace); err != nil { + s.Log("failed to get release %s/%s custom labels: %v", record.Namespace, record.Key, err) + return nil, err + } + + releases = append(releases, release) + } + + if len(releases) == 0 { + return nil, ErrReleaseNotFound + } + + return releases, nil +} + +// Create creates a new release. +func (s *SQL) Create(key string, rls *rspb.Release) error { + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + s.namespace = namespace + + body, err := encodeRelease(rls) + if err != nil { + s.Log("failed to encode release: %v", err) + return err + } + + transaction, err := s.db.Beginx() + if err != nil { + s.Log("failed to start SQL transaction: %v", err) + return fmt.Errorf("error beginning transaction: %v", err) + } + + insertQuery, args, err := s.statementBuilder. + Insert(sqlReleaseTableName). + Columns( + sqlReleaseTableKeyColumn, + sqlReleaseTableTypeColumn, + sqlReleaseTableBodyColumn, + sqlReleaseTableNameColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableVersionColumn, + sqlReleaseTableStatusColumn, + sqlReleaseTableOwnerColumn, + sqlReleaseTableCreatedAtColumn, + ). + Values( + key, + sqlReleaseDefaultType, + body, + rls.Name, + namespace, + int(rls.Version), + rls.Info.Status.String(), + sqlReleaseDefaultOwner, + int(time.Now().Unix()), + ).ToSql() + if err != nil { + s.Log("failed to build insert query: %v", err) + return err + } + + if _, err := transaction.Exec(insertQuery, args...); err != nil { + defer transaction.Rollback() + + selectQuery, args, buildErr := s.statementBuilder. + Select(sqlReleaseTableKeyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if buildErr != nil { + s.Log("failed to build select query: %v", buildErr) + return err + } + + var record SQLReleaseWrapper + if err := transaction.Get(&record, selectQuery, args...); err == nil { + s.Log("release %s already exists", key) + return ErrReleaseExists + } + + s.Log("failed to store release %s in SQL database: %v", key, err) + return err + } + + // Filtering labels before insert cause in SQL storage driver system releases are stored in separate columns of release table + for k, v := range filterSystemLabels(rls.Labels) { + insertLabelsQuery, args, err := s.statementBuilder. + Insert(sqlCustomLabelsTableName). + Columns( + sqlCustomLabelsTableReleaseKeyColumn, + sqlCustomLabelsTableReleaseNamespaceColumn, + sqlCustomLabelsTableKeyColumn, + sqlCustomLabelsTableValueColumn, + ). + Values( + key, + namespace, + k, + v, + ).ToSql() + + if err != nil { + defer transaction.Rollback() + s.Log("failed to build insert query: %v", err) + return err + } + + if _, err := transaction.Exec(insertLabelsQuery, args...); err != nil { + defer transaction.Rollback() + s.Log("failed to write Labels: %v", err) + return err + } + } + defer transaction.Commit() + + return nil +} + +// Update updates a release. +func (s *SQL) Update(key string, rls *rspb.Release) error { + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + s.namespace = namespace + + body, err := encodeRelease(rls) + if err != nil { + s.Log("failed to encode release: %v", err) + return err + } + + query, args, err := s.statementBuilder. + Update(sqlReleaseTableName). + Set(sqlReleaseTableBodyColumn, body). + Set(sqlReleaseTableNameColumn, rls.Name). + Set(sqlReleaseTableVersionColumn, int(rls.Version)). + Set(sqlReleaseTableStatusColumn, rls.Info.Status.String()). + Set(sqlReleaseTableOwnerColumn, sqlReleaseDefaultOwner). + Set(sqlReleaseTableModifiedAtColumn, int(time.Now().Unix())). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: namespace}). + ToSql() + + if err != nil { + s.Log("failed to build update query: %v", err) + return err + } + + if _, err := s.db.Exec(query, args...); err != nil { + s.Log("failed to update release %s in SQL database: %v", key, err) + return err + } + + return nil +} + +// Delete deletes a release or returns ErrReleaseNotFound. +func (s *SQL) Delete(key string) (*rspb.Release, error) { + transaction, err := s.db.Beginx() + if err != nil { + s.Log("failed to start SQL transaction: %v", err) + return nil, fmt.Errorf("error beginning transaction: %v", err) + } + + selectQuery, args, err := s.statementBuilder. + Select(sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if err != nil { + s.Log("failed to build select query: %v", err) + return nil, err + } + + var record SQLReleaseWrapper + err = transaction.Get(&record, selectQuery, args...) + if err != nil { + s.Log("release %s not found: %v", key, err) + return nil, ErrReleaseNotFound + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("failed to decode release %s: %v", key, err) + transaction.Rollback() + return nil, err + } + defer transaction.Commit() + + deleteQuery, args, err := s.statementBuilder. + Delete(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if err != nil { + s.Log("failed to build delete query: %v", err) + return nil, err + } + + _, err = transaction.Exec(deleteQuery, args...) + if err != nil { + s.Log("failed perform delete query: %v", err) + return release, err + } + + if release.Labels, err = s.getReleaseCustomLabels(key, s.namespace); err != nil { + s.Log("failed to get release %s/%s custom labels: %v", s.namespace, key, err) + return nil, err + } + + deleteCustomLabelsQuery, args, err := s.statementBuilder. + Delete(sqlCustomLabelsTableName). + Where(sq.Eq{sqlCustomLabelsTableReleaseKeyColumn: key}). + Where(sq.Eq{sqlCustomLabelsTableReleaseNamespaceColumn: s.namespace}). + ToSql() + + if err != nil { + s.Log("failed to build delete Labels query: %v", err) + return nil, err + } + _, err = transaction.Exec(deleteCustomLabelsQuery, args...) + return release, err +} + +// Get release custom labels from database +func (s *SQL) getReleaseCustomLabels(key string, _ string) (map[string]string, error) { + query, args, err := s.statementBuilder. + Select(sqlCustomLabelsTableKeyColumn, sqlCustomLabelsTableValueColumn). + From(sqlCustomLabelsTableName). + Where(sq.Eq{sqlCustomLabelsTableReleaseKeyColumn: key, + sqlCustomLabelsTableReleaseNamespaceColumn: s.namespace}). + ToSql() + if err != nil { + return nil, err + } + + var labelsList = []SQLReleaseCustomLabelWrapper{} + if err := s.db.Select(&labelsList, query, args...); err != nil { + return nil, err + } + + labelsMap := make(map[string]string) + for _, i := range labelsList { + labelsMap[i.Key] = i.Value + } + + return filterSystemLabels(labelsMap), nil +} + +// Rebuild system labels from release object +func getReleaseSystemLabels(rls *rspb.Release) map[string]string { + return map[string]string{ + "name": rls.Name, + "owner": sqlReleaseDefaultOwner, + "status": rls.Info.Status.String(), + "version": strconv.Itoa(rls.Version), + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go new file mode 100644 index 000000000..7bda5ec96 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go @@ -0,0 +1,122 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "io" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var b64 = base64.StdEncoding + +var magicGzip = []byte{0x1f, 0x8b, 0x08} + +var systemLabels = []string{"name", "owner", "status", "version", "createdAt", "modifiedAt"} + +// encodeRelease encodes a release returning a base64 encoded +// gzipped string representation, or error. +func encodeRelease(rls *rspb.Release) (string, error) { + b, err := json.Marshal(rls) + if err != nil { + return "", err + } + var buf bytes.Buffer + w, err := gzip.NewWriterLevel(&buf, gzip.BestCompression) + if err != nil { + return "", err + } + if _, err = w.Write(b); err != nil { + return "", err + } + w.Close() + + return b64.EncodeToString(buf.Bytes()), nil +} + +// decodeRelease decodes the bytes of data into a release +// type. Data must contain a base64 encoded gzipped string of a +// valid release, otherwise an error is returned. +func decodeRelease(data string) (*rspb.Release, error) { + // base64 decode string + b, err := b64.DecodeString(data) + if err != nil { + return nil, err + } + + // For backwards compatibility with releases that were stored before + // compression was introduced we skip decompression if the + // gzip magic header is not found + if len(b) > 3 && bytes.Equal(b[0:3], magicGzip) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, err + } + defer r.Close() + b2, err := io.ReadAll(r) + if err != nil { + return nil, err + } + b = b2 + } + + var rls rspb.Release + // unmarshal release object bytes + if err := json.Unmarshal(b, &rls); err != nil { + return nil, err + } + return &rls, nil +} + +// Checks if label is system +func isSystemLabel(key string) bool { + for _, v := range GetSystemLabels() { + if key == v { + return true + } + } + return false +} + +// Removes system labels from labels map +func filterSystemLabels(lbs map[string]string) map[string]string { + result := make(map[string]string) + for k, v := range lbs { + if !isSystemLabel(k) { + result[k] = v + } + } + return result +} + +// Checks if labels array contains system labels +func ContainsSystemLabels(lbs map[string]string) bool { + for k := range lbs { + if isSystemLabel(k) { + return true + } + } + return false +} + +func GetSystemLabels() []string { + return systemLabels +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/storage.go b/vendor/helm.sh/helm/v3/pkg/storage/storage.go new file mode 100644 index 000000000..0a18b34a0 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/storage.go @@ -0,0 +1,266 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage // import "helm.sh/helm/v3/pkg/storage" + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + rspb "helm.sh/helm/v3/pkg/release" + relutil "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// HelmStorageType is the type field of the Kubernetes storage object which stores the Helm release +// version. It is modified slightly replacing the '/': sh.helm/release.v1 +// Note: The version 'v1' is incremented if the release object metadata is +// modified between major releases. +// This constant is used as a prefix for the Kubernetes storage object name. +const HelmStorageType = "sh.helm.release.v1" + +// Storage represents a storage engine for a Release. +type Storage struct { + driver.Driver + + // MaxHistory specifies the maximum number of historical releases that will + // be retained, including the most recent release. Values of 0 or less are + // ignored (meaning no limits are imposed). + MaxHistory int + + Log func(string, ...interface{}) +} + +// Get retrieves the release from storage. An error is returned +// if the storage driver failed to fetch the release, or the +// release identified by the key, version pair does not exist. +func (s *Storage) Get(name string, version int) (*rspb.Release, error) { + s.Log("getting release %q", makeKey(name, version)) + return s.Driver.Get(makeKey(name, version)) +} + +// Create creates a new storage entry holding the release. An +// error is returned if the storage driver fails to store the +// release, or a release with an identical key already exists. +func (s *Storage) Create(rls *rspb.Release) error { + s.Log("creating release %q", makeKey(rls.Name, rls.Version)) + if s.MaxHistory > 0 { + // Want to make space for one more release. + if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil && + !errors.Is(err, driver.ErrReleaseNotFound) { + return err + } + } + return s.Driver.Create(makeKey(rls.Name, rls.Version), rls) +} + +// Update updates the release in storage. An error is returned if the +// storage backend fails to update the release or if the release +// does not exist. +func (s *Storage) Update(rls *rspb.Release) error { + s.Log("updating release %q", makeKey(rls.Name, rls.Version)) + return s.Driver.Update(makeKey(rls.Name, rls.Version), rls) +} + +// Delete deletes the release from storage. An error is returned if +// the storage backend fails to delete the release or if the release +// does not exist. +func (s *Storage) Delete(name string, version int) (*rspb.Release, error) { + s.Log("deleting release %q", makeKey(name, version)) + return s.Driver.Delete(makeKey(name, version)) +} + +// ListReleases returns all releases from storage. An error is returned if the +// storage backend fails to retrieve the releases. +func (s *Storage) ListReleases() ([]*rspb.Release, error) { + s.Log("listing all releases in storage") + return s.Driver.List(func(_ *rspb.Release) bool { return true }) +} + +// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned +// if the storage backend fails to retrieve the releases. +func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { + s.Log("listing uninstalled releases in storage") + return s.Driver.List(func(rls *rspb.Release) bool { + return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls) + }) +} + +// ListDeployed returns all releases with Status == DEPLOYED. An error is returned +// if the storage backend fails to retrieve the releases. +func (s *Storage) ListDeployed() ([]*rspb.Release, error) { + s.Log("listing all deployed releases in storage") + return s.Driver.List(func(rls *rspb.Release) bool { + return relutil.StatusFilter(rspb.StatusDeployed).Check(rls) + }) +} + +// Deployed returns the last deployed release with the provided release name, or +// returns ErrReleaseNotFound if not found. +func (s *Storage) Deployed(name string) (*rspb.Release, error) { + ls, err := s.DeployedAll(name) + if err != nil { + return nil, err + } + + if len(ls) == 0 { + return nil, driver.NewErrNoDeployedReleases(name) + } + + // If executed concurrently, Helm's database gets corrupted + // and multiple releases are DEPLOYED. Take the latest. + relutil.Reverse(ls, relutil.SortByRevision) + + return ls[0], nil +} + +// DeployedAll returns all deployed releases with the provided name, or +// returns ErrReleaseNotFound if not found. +func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { + s.Log("getting deployed releases from %q history", name) + + ls, err := s.Driver.Query(map[string]string{ + "name": name, + "owner": "helm", + "status": "deployed", + }) + if err == nil { + return ls, nil + } + if strings.Contains(err.Error(), "not found") { + return nil, driver.NewErrNoDeployedReleases(name) + } + return nil, err +} + +// History returns the revision history for the release with the provided name, or +// returns ErrReleaseNotFound if no such release name exists. +func (s *Storage) History(name string) ([]*rspb.Release, error) { + s.Log("getting release history for %q", name) + + return s.Driver.Query(map[string]string{"name": name, "owner": "helm"}) +} + +// removeLeastRecent removes items from history until the length number of releases +// does not exceed max. +// +// We allow max to be set explicitly so that calling functions can "make space" +// for the new records they are going to write. +func (s *Storage) removeLeastRecent(name string, max int) error { + if max < 0 { + return nil + } + h, err := s.History(name) + if err != nil { + return err + } + if len(h) <= max { + return nil + } + + // We want oldest to newest + relutil.SortByRevision(h) + + lastDeployed, err := s.Deployed(name) + if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) { + return err + } + + var toDelete []*rspb.Release + for _, rel := range h { + // once we have enough releases to delete to reach the max, stop + if len(h)-len(toDelete) == max { + break + } + if lastDeployed != nil { + if rel.Version != lastDeployed.Version { + toDelete = append(toDelete, rel) + } + } else { + toDelete = append(toDelete, rel) + } + } + + // Delete as many as possible. In the case of API throughput limitations, + // multiple invocations of this function will eventually delete them all. + errs := []error{} + for _, rel := range toDelete { + err = s.deleteReleaseVersion(name, rel.Version) + if err != nil { + errs = append(errs, err) + } + } + + s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs)) + switch c := len(errs); c { + case 0: + return nil + case 1: + return errs[0] + default: + return errors.Errorf("encountered %d deletion errors. First is: %s", c, errs[0]) + } +} + +func (s *Storage) deleteReleaseVersion(name string, version int) error { + key := makeKey(name, version) + _, err := s.Delete(name, version) + if err != nil { + s.Log("error pruning %s from release history: %s", key, err) + return err + } + return nil +} + +// Last fetches the last revision of the named release. +func (s *Storage) Last(name string) (*rspb.Release, error) { + s.Log("getting last revision of %q", name) + h, err := s.History(name) + if err != nil { + return nil, err + } + if len(h) == 0 { + return nil, errors.Errorf("no revision for release %q", name) + } + + relutil.Reverse(h, relutil.SortByRevision) + return h[0], nil +} + +// makeKey concatenates the Kubernetes storage object type, a release name and version +// into a string with format:```..v```. +// The storage type is prepended to keep name uniqueness between different +// release storage types. An example of clash when not using the type: +// https://github.com/helm/helm/issues/6435. +// This key is used to uniquely identify storage objects. +func makeKey(rlsname string, version int) string { + return fmt.Sprintf("%s.%s.v%d", HelmStorageType, rlsname, version) +} + +// Init initializes a new storage backend with the driver d. +// If d is nil, the default in-memory driver is used. +func Init(d driver.Driver) *Storage { + // default driver is in memory + if d == nil { + d = driver.NewMemory() + } + return &Storage{ + Driver: d, + Log: func(_ string, _ ...interface{}) {}, + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/time/time.go b/vendor/helm.sh/helm/v3/pkg/time/time.go new file mode 100644 index 000000000..44f3fedfb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/time/time.go @@ -0,0 +1,91 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package time contains a wrapper for time.Time in the standard library and +// associated methods. This package mainly exists to workaround an issue in Go +// where the serializer doesn't omit an empty value for time: +// https://github.com/golang/go/issues/11939. As such, this can be removed if a +// proposal is ever accepted for Go +package time + +import ( + "bytes" + "time" +) + +// emptyString contains an empty JSON string value to be used as output +var emptyString = `""` + +// Time is a convenience wrapper around stdlib time, but with different +// marshalling and unmarshaling for zero values +type Time struct { + time.Time +} + +// Now returns the current time. It is a convenience wrapper around time.Now() +func Now() Time { + return Time{time.Now()} +} + +func (t Time) MarshalJSON() ([]byte, error) { + if t.Time.IsZero() { + return []byte(emptyString), nil + } + + return t.Time.MarshalJSON() +} + +func (t *Time) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, []byte("null")) { + return nil + } + // If it is empty, we don't have to set anything since time.Time is not a + // pointer and will be set to the zero value + if bytes.Equal([]byte(emptyString), b) { + return nil + } + + return t.Time.UnmarshalJSON(b) +} + +func Parse(layout, value string) (Time, error) { + t, err := time.Parse(layout, value) + return Time{Time: t}, err +} +func ParseInLocation(layout, value string, loc *time.Location) (Time, error) { + t, err := time.ParseInLocation(layout, value, loc) + return Time{Time: t}, err +} + +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{Time: time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +func Unix(sec int64, nsec int64) Time { return Time{Time: time.Unix(sec, nsec)} } + +func (t Time) Add(d time.Duration) Time { return Time{Time: t.Time.Add(d)} } +func (t Time) AddDate(years int, months int, days int) Time { + return Time{Time: t.Time.AddDate(years, months, days)} +} +func (t Time) After(u Time) bool { return t.Time.After(u.Time) } +func (t Time) Before(u Time) bool { return t.Time.Before(u.Time) } +func (t Time) Equal(u Time) bool { return t.Time.Equal(u.Time) } +func (t Time) In(loc *time.Location) Time { return Time{Time: t.Time.In(loc)} } +func (t Time) Local() Time { return Time{Time: t.Time.Local()} } +func (t Time) Round(d time.Duration) Time { return Time{Time: t.Time.Round(d)} } +func (t Time) Sub(u Time) time.Duration { return t.Time.Sub(u.Time) } +func (t Time) Truncate(d time.Duration) Time { return Time{Time: t.Time.Truncate(d)} } +func (t Time) UTC() Time { return Time{Time: t.Time.UTC()} } diff --git a/vendor/helm.sh/helm/v3/pkg/uploader/chart_uploader.go b/vendor/helm.sh/helm/v3/pkg/uploader/chart_uploader.go new file mode 100644 index 000000000..d7e940406 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/uploader/chart_uploader.go @@ -0,0 +1,58 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uploader + +import ( + "fmt" + "io" + "net/url" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/pusher" + "helm.sh/helm/v3/pkg/registry" +) + +// ChartUploader handles uploading a chart. +type ChartUploader struct { + // Out is the location to write warning and info messages. + Out io.Writer + // Pusher collection for the operation + Pushers pusher.Providers + // Options provide parameters to be passed along to the Pusher being initialized. + Options []pusher.Option + // RegistryClient is a client for interacting with registries. + RegistryClient *registry.Client +} + +// UploadTo uploads a chart. Depending on the settings, it may also upload a provenance file. +func (c *ChartUploader) UploadTo(ref, remote string) error { + u, err := url.Parse(remote) + if err != nil { + return errors.Errorf("invalid chart URL format: %s", remote) + } + + if u.Scheme == "" { + return fmt.Errorf("scheme prefix missing from remote (e.g. \"%s://\")", registry.OCIScheme) + } + + p, err := c.Pushers.ByScheme(u.Scheme) + if err != nil { + return err + } + + return p.Push(ref, u.String(), c.Options...) +} diff --git a/vendor/helm.sh/helm/v3/pkg/uploader/doc.go b/vendor/helm.sh/helm/v3/pkg/uploader/doc.go new file mode 100644 index 000000000..112ddbf2c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/uploader/doc.go @@ -0,0 +1,21 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package uploader provides a library for uploading charts. + +This package contains tools for uploading charts to registries. +*/ +package uploader diff --git a/vendor/k8s.io/api/admission/v1/doc.go b/vendor/k8s.io/api/admission/v1/doc.go new file mode 100644 index 000000000..cbc6bb59d --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=false + +// +groupName=admission.k8s.io + +package v1 // import "k8s.io/api/admission/v1" diff --git a/vendor/k8s.io/api/admission/v1/generated.pb.go b/vendor/k8s.io/api/admission/v1/generated.pb.go new file mode 100644 index 000000000..a2d8ff5dd --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/generated.pb.go @@ -0,0 +1,1783 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo + +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1.AdmissionRequest") + proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1.AdmissionResponse.AuditAnnotationsEntry") + proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1.AdmissionReview") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto", fileDescriptor_4b73421fd5edef9f) +} + +var fileDescriptor_4b73421fd5edef9f = []byte{ + // 921 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xba, 0xf2, 0x61, 0x6d, 0x72, 0x40, + 0x2e, 0x6a, 0x77, 0x49, 0x04, 0x55, 0x54, 0x81, 0xd4, 0x2c, 0xa9, 0x50, 0x40, 0x6a, 0xa2, 0x69, + 0x03, 0x15, 0x07, 0xa4, 0xb1, 0x3d, 0xb5, 0x07, 0xdb, 0x33, 0xcb, 0xce, 0xac, 0x83, 0x6f, 0x9c, + 0x38, 0xf3, 0x0d, 0x38, 0xf2, 0x19, 0xf8, 0x06, 0x39, 0xf6, 0xd8, 0x93, 0x45, 0xcc, 0xb7, 0xc8, + 0x09, 0xcd, 0xec, 0xec, 0x9f, 0x26, 0xb1, 0x08, 0x0d, 0xa7, 0xec, 0xfb, 0xf3, 0xfb, 0xbd, 0x97, + 0xdf, 0xdb, 0xf7, 0xd6, 0xe0, 0xc9, 0x64, 0x57, 0x78, 0x94, 0xfb, 0x93, 0xb8, 0x4f, 0x22, 0x46, + 0x24, 0x11, 0xfe, 0x9c, 0xb0, 0x21, 0x8f, 0x7c, 0x13, 0xc0, 0x21, 0xf5, 0xf1, 0x70, 0x46, 0x85, + 0xa0, 0x9c, 0xf9, 0xf3, 0x6d, 0x7f, 0x44, 0x18, 0x89, 0xb0, 0x24, 0x43, 0x2f, 0x8c, 0xb8, 0xe4, + 0xf0, 0x5e, 0x92, 0xe8, 0xe1, 0x90, 0x7a, 0x59, 0xa2, 0x37, 0xdf, 0x6e, 0x3f, 0x1c, 0x51, 0x39, + 0x8e, 0xfb, 0xde, 0x80, 0xcf, 0xfc, 0x11, 0x1f, 0x71, 0x5f, 0xe7, 0xf7, 0xe3, 0x57, 0xda, 0xd2, + 0x86, 0x7e, 0x4a, 0x78, 0xda, 0x0f, 0x8a, 0x05, 0x63, 0x39, 0x26, 0x4c, 0xd2, 0x01, 0x96, 0x57, + 0x57, 0x6d, 0x7f, 0x9a, 0x67, 0xcf, 0xf0, 0x60, 0x4c, 0x19, 0x89, 0x16, 0x7e, 0x38, 0x19, 0x29, + 0x87, 0xf0, 0x67, 0x44, 0xe2, 0xab, 0x50, 0xfe, 0x3a, 0x54, 0x14, 0x33, 0x49, 0x67, 0xe4, 0x12, + 0xe0, 0xd1, 0xbf, 0x01, 0xc4, 0x60, 0x4c, 0x66, 0xf8, 0x22, 0x6e, 0xeb, 0x77, 0x1b, 0xb4, 0xf6, + 0x52, 0x31, 0x10, 0xf9, 0x29, 0x26, 0x42, 0xc2, 0x00, 0x94, 0x63, 0x3a, 0x74, 0xac, 0xae, 0xd5, + 0xb3, 0x83, 0x4f, 0x4e, 0x97, 0x9d, 0xd2, 0x6a, 0xd9, 0x29, 0x1f, 0x1f, 0xec, 0x9f, 0x2f, 0x3b, + 0x1f, 0xae, 0x2b, 0x24, 0x17, 0x21, 0x11, 0xde, 0xf1, 0xc1, 0x3e, 0x52, 0x60, 0xf8, 0x12, 0x54, + 0x26, 0x94, 0x0d, 0x9d, 0x5b, 0x5d, 0xab, 0xd7, 0xd8, 0x79, 0xe4, 0xe5, 0xe2, 0x67, 0x30, 0x2f, + 0x9c, 0x8c, 0x94, 0x43, 0x78, 0x4a, 0x06, 0x6f, 0xbe, 0xed, 0x7d, 0x15, 0xf1, 0x38, 0xfc, 0x96, + 0x44, 0xaa, 0x99, 0x6f, 0x28, 0x1b, 0x06, 0x9b, 0xa6, 0x78, 0x45, 0x59, 0x48, 0x33, 0xc2, 0x31, + 0xa8, 0x47, 0x44, 0xf0, 0x38, 0x1a, 0x10, 0xa7, 0xac, 0xd9, 0x1f, 0xff, 0x77, 0x76, 0x64, 0x18, + 0x82, 0x96, 0xa9, 0x50, 0x4f, 0x3d, 0x28, 0x63, 0x87, 0x9f, 0x81, 0x86, 0x88, 0xfb, 0x69, 0xc0, + 0xa9, 0x68, 0x3d, 0xee, 0x1a, 0x40, 0xe3, 0x79, 0x1e, 0x42, 0xc5, 0x3c, 0x48, 0x41, 0x23, 0x4a, + 0x94, 0x54, 0x5d, 0x3b, 0xef, 0xdd, 0x48, 0x81, 0xa6, 0x2a, 0x85, 0x72, 0x3a, 0x54, 0xe4, 0x86, + 0x0b, 0xd0, 0x34, 0x66, 0xd6, 0xe5, 0xed, 0x1b, 0x4b, 0x72, 0x77, 0xb5, 0xec, 0x34, 0xd1, 0xdb, + 0xb4, 0xe8, 0x62, 0x1d, 0xf8, 0x35, 0x80, 0xc6, 0x55, 0x10, 0xc2, 0x69, 0x6a, 0x8d, 0xda, 0x46, + 0x23, 0x88, 0x2e, 0x65, 0xa0, 0x2b, 0x50, 0xb0, 0x0b, 0x2a, 0x0c, 0xcf, 0x88, 0xb3, 0xa1, 0xd1, + 0xd9, 0xd0, 0x9f, 0xe1, 0x19, 0x41, 0x3a, 0x02, 0x7d, 0x60, 0xab, 0xbf, 0x22, 0xc4, 0x03, 0xe2, + 0x54, 0x75, 0xda, 0x1d, 0x93, 0x66, 0x3f, 0x4b, 0x03, 0x28, 0xcf, 0x81, 0x9f, 0x03, 0x9b, 0x87, + 0xea, 0x55, 0xa7, 0x9c, 0x39, 0x35, 0x0d, 0x70, 0x53, 0xc0, 0x61, 0x1a, 0x38, 0x2f, 0x1a, 0x28, + 0x07, 0xc0, 0x17, 0xa0, 0x1e, 0x0b, 0x12, 0x1d, 0xb0, 0x57, 0xdc, 0xa9, 0x6b, 0x41, 0x3f, 0xf2, + 0x8a, 0xe7, 0xe3, 0xad, 0xb5, 0x57, 0x42, 0x1e, 0x9b, 0xec, 0xfc, 0x7d, 0x4a, 0x3d, 0x28, 0x63, + 0x82, 0xc7, 0xa0, 0xca, 0xfb, 0x3f, 0x92, 0x81, 0x74, 0x6c, 0xcd, 0xf9, 0x70, 0xed, 0x90, 0xcc, + 0xd6, 0x7a, 0x08, 0x9f, 0x3c, 0xfd, 0x59, 0x12, 0xa6, 0xe6, 0x13, 0xdc, 0x36, 0xd4, 0xd5, 0x43, + 0x4d, 0x82, 0x0c, 0x19, 0xfc, 0x01, 0xd8, 0x7c, 0x3a, 0x4c, 0x9c, 0x0e, 0x78, 0x17, 0xe6, 0x4c, + 0xca, 0xc3, 0x94, 0x07, 0xe5, 0x94, 0x70, 0x0b, 0x54, 0x87, 0xd1, 0x02, 0xc5, 0xcc, 0x69, 0x74, + 0xad, 0x5e, 0x3d, 0x00, 0xaa, 0x87, 0x7d, 0xed, 0x41, 0x26, 0x02, 0x5f, 0x82, 0x1a, 0x0f, 0x95, + 0x18, 0xc2, 0xd9, 0x7c, 0x97, 0x0e, 0x9a, 0xa6, 0x83, 0xda, 0x61, 0xc2, 0x82, 0x52, 0xba, 0xad, + 0x3f, 0x2a, 0xe0, 0x4e, 0xe1, 0x42, 0x89, 0x90, 0x33, 0x41, 0xfe, 0x97, 0x13, 0x75, 0x1f, 0xd4, + 0xf0, 0x74, 0xca, 0x4f, 0x48, 0x72, 0xa5, 0xea, 0x79, 0x13, 0x7b, 0x89, 0x1b, 0xa5, 0x71, 0x78, + 0x04, 0xaa, 0x42, 0x62, 0x19, 0x0b, 0x73, 0x71, 0x1e, 0x5c, 0x6f, 0xbd, 0x9e, 0x6b, 0x4c, 0x22, + 0x18, 0x22, 0x22, 0x9e, 0x4a, 0x64, 0x78, 0x60, 0x07, 0x6c, 0x84, 0x58, 0x0e, 0xc6, 0xfa, 0xaa, + 0x6c, 0x06, 0xf6, 0x6a, 0xd9, 0xd9, 0x38, 0x52, 0x0e, 0x94, 0xf8, 0xe1, 0x2e, 0xb0, 0xf5, 0xc3, + 0x8b, 0x45, 0x98, 0x2e, 0x46, 0x5b, 0x8d, 0xe8, 0x28, 0x75, 0x9e, 0x17, 0x0d, 0x94, 0x27, 0xc3, + 0x5f, 0x2d, 0xd0, 0xc2, 0xf1, 0x90, 0xca, 0x3d, 0xc6, 0xb8, 0xc4, 0xc9, 0x54, 0xaa, 0xdd, 0x72, + 0xaf, 0xb1, 0xf3, 0xc4, 0x5b, 0xf3, 0x11, 0xf4, 0x2e, 0x49, 0xec, 0xed, 0x5d, 0xa0, 0x78, 0xca, + 0x64, 0xb4, 0x08, 0x1c, 0xa3, 0x51, 0xeb, 0x62, 0x18, 0x5d, 0xaa, 0x09, 0x7b, 0xa0, 0x7e, 0x82, + 0x23, 0x46, 0xd9, 0x48, 0x38, 0xb5, 0x6e, 0x59, 0xad, 0xb6, 0xda, 0x8c, 0xef, 0x8c, 0x0f, 0x65, + 0xd1, 0xf6, 0x97, 0xe0, 0x83, 0x2b, 0xcb, 0xc1, 0x16, 0x28, 0x4f, 0xc8, 0x22, 0x99, 0x33, 0x52, + 0x8f, 0xf0, 0x7d, 0xb0, 0x31, 0xc7, 0xd3, 0x98, 0xe8, 0x99, 0xd9, 0x28, 0x31, 0x1e, 0xdf, 0xda, + 0xb5, 0xb6, 0xfe, 0xb4, 0x40, 0xb3, 0xf0, 0x6f, 0xcc, 0x29, 0x39, 0x81, 0x47, 0xa0, 0x66, 0xee, + 0x8d, 0xe6, 0x68, 0xec, 0xdc, 0xbf, 0x8e, 0x02, 0x1a, 0x10, 0x34, 0xd4, 0xab, 0x90, 0xde, 0xc1, + 0x94, 0x46, 0x9d, 0x86, 0xc8, 0x48, 0x64, 0x3e, 0x6e, 0x1f, 0x5f, 0x5f, 0xd4, 0x44, 0x80, 0xd4, + 0x42, 0x19, 0x53, 0xf0, 0xc5, 0xe9, 0x99, 0x5b, 0x7a, 0x7d, 0xe6, 0x96, 0xde, 0x9c, 0xb9, 0xa5, + 0x5f, 0x56, 0xae, 0x75, 0xba, 0x72, 0xad, 0xd7, 0x2b, 0xd7, 0x7a, 0xb3, 0x72, 0xad, 0xbf, 0x56, + 0xae, 0xf5, 0xdb, 0xdf, 0x6e, 0xe9, 0xfb, 0x7b, 0x6b, 0x7e, 0xeb, 0xfc, 0x13, 0x00, 0x00, 0xff, + 0xff, 0x5e, 0xe0, 0xad, 0x0d, 0x1e, 0x09, 0x00, 0x00, +} + +func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + if m.DryRun != nil { + i-- + if *m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kind.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubResource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OldObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DryRun != nil { + n += 2 + } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestKind != nil { + l = m.RequestKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestResource != nil { + l = m.RequestResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RequestSubResource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Patch != nil { + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PatchType != nil { + l = len(*m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdmissionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, + `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionResponse) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&AdmissionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, + `Patch:` + valueToStringGenerated(this.Patch) + `,`, + `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionReview{`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = Operation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestKind == nil { + m.RequestKind = &v1.GroupVersionKind{} + } + if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestResource == nil { + m.RequestResource = &v1.GroupVersionResource{} + } + if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestSubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &v1.Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) + if m.Patch == nil { + m.Patch = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PatchType(dAtA[iNdEx:postIndex]) + m.PatchType = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &AdmissionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &AdmissionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admission/v1/generated.proto b/vendor/k8s.io/api/admission/v1/generated.proto new file mode 100644 index 000000000..941deb4fb --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/generated.proto @@ -0,0 +1,167 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.admission.v1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/admission/v1"; + +// AdmissionRequest describes the admission.Attributes for the admission request. +message AdmissionRequest { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + + // Resource is the fully-qualified resource being requested (for example, v1.pods) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + optional string subResource = 4; + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional string requestSubResource = 15; + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + optional string name = 5; + + // Namespace is the namespace associated with the request (if any). + // +optional + optional string namespace = 6; + + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + optional string operation = 7; + + // UserInfo is information about the requesting user + optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + + // Object is the object from the incoming request. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; +} + +// AdmissionResponse describes an admission response. +message AdmissionResponse { + // UID is an identifier for the individual request/response. + // This must be copied over from the corresponding AdmissionRequest. + optional string uid = 1; + + // Allowed indicates whether or not the admission request was permitted. + optional bool allowed = 2; + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + optional bytes patch = 4; + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + optional string patchType = 5; + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + map auditAnnotations = 6; + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + repeated string warnings = 7; +} + +// AdmissionReview describes an admission review request/response. +message AdmissionReview { + // Request describes the attributes for the admission request. + // +optional + optional AdmissionRequest request = 1; + + // Response describes the attributes for the admission response. + // +optional + optional AdmissionResponse response = 2; +} + diff --git a/vendor/k8s.io/api/admission/v1/register.go b/vendor/k8s.io/api/admission/v1/register.go new file mode 100644 index 000000000..79000535c --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. +// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admission/v1/types.go b/vendor/k8s.io/api/admission/v1/types.go new file mode 100644 index 000000000..556fd1ad5 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/types.go @@ -0,0 +1,169 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionReview describes an admission review request/response. +type AdmissionReview struct { + metav1.TypeMeta `json:",inline"` + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Resource is the fully-qualified resource being requested (for example, v1.pods) + Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` + // UserInfo is information about the requesting user + UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` + // Object is the object from the incoming request. + // +optional + Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` +} + +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This must be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + + // Allowed indicates whether or not the admission request was permitted. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` +} + +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) diff --git a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..1395a7e10 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go @@ -0,0 +1,78 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AdmissionRequest = map[string]string{ + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", + "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", + "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", + "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", + "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", + "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", + "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", +} + +func (AdmissionRequest) SwaggerDoc() map[string]string { + return map_AdmissionRequest +} + +var map_AdmissionResponse = map[string]string{ + "": "AdmissionResponse describes an admission response.", + "uid": "UID is an identifier for the individual request/response. This must be copied over from the corresponding AdmissionRequest.", + "allowed": "Allowed indicates whether or not the admission request was permitted.", + "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", + "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", + "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", + "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", + "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", +} + +func (AdmissionResponse) SwaggerDoc() map[string]string { + return map_AdmissionResponse +} + +var map_AdmissionReview = map[string]string{ + "": "AdmissionReview describes an admission review request/response.", + "request": "Request describes the attributes for the admission request.", + "response": "Response describes the attributes for the admission response.", +} + +func (AdmissionReview) SwaggerDoc() map[string]string { + return map_AdmissionReview +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d35688285 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go @@ -0,0 +1,142 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { + *out = *in + out.Kind = in.Kind + out.Resource = in.Resource + if in.RequestKind != nil { + in, out := &in.RequestKind, &out.RequestKind + *out = new(metav1.GroupVersionKind) + **out = **in + } + if in.RequestResource != nil { + in, out := &in.RequestResource, &out.RequestResource + *out = new(metav1.GroupVersionResource) + **out = **in + } + in.UserInfo.DeepCopyInto(&out.UserInfo) + in.Object.DeepCopyInto(&out.Object) + in.OldObject.DeepCopyInto(&out.OldObject) + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + in.Options.DeepCopyInto(&out.Options) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { + if in == nil { + return nil + } + out := new(AdmissionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { + *out = *in + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + *out = new(PatchType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Warnings != nil { + in, out := &in.Warnings, &out.Warnings + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { + if in == nil { + return nil + } + out := new(AdmissionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go new file mode 100644 index 000000000..a5669022a --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=false +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=admission.k8s.io + +package v1beta1 // import "k8s.io/api/admission/v1beta1" diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go new file mode 100644 index 000000000..c88391814 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go @@ -0,0 +1,1783 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo + +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest") + proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry") + proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_b87c2352de86eab9) +} + +var fileDescriptor_b87c2352de86eab9 = []byte{ + // 928 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcb, 0x6e, 0xdb, 0x46, + 0x17, 0x16, 0x23, 0x59, 0x12, 0x47, 0xfe, 0x23, 0x65, 0xf2, 0x17, 0x20, 0x84, 0x80, 0x52, 0xbd, + 0x28, 0x54, 0x20, 0x19, 0xd6, 0x46, 0x1b, 0x18, 0x41, 0x37, 0x66, 0x6d, 0x14, 0x6e, 0x81, 0xd8, + 0x98, 0x44, 0x6d, 0xda, 0x45, 0x81, 0x91, 0x34, 0x91, 0x58, 0x49, 0x33, 0x2c, 0x67, 0x28, 0x57, + 0xbb, 0xee, 0xbb, 0xe9, 0x1b, 0xf4, 0x05, 0xfa, 0x16, 0xdd, 0x78, 0x99, 0x65, 0x56, 0x42, 0xad, + 0xbe, 0x85, 0x57, 0xc5, 0x0c, 0x87, 0x97, 0xc8, 0x76, 0x9a, 0x4b, 0x57, 0xe6, 0xb9, 0x7c, 0xdf, + 0x39, 0xfe, 0x0e, 0xcf, 0xa1, 0xc0, 0xd1, 0x74, 0x5f, 0xa0, 0x80, 0x7b, 0xd3, 0x78, 0x40, 0x23, + 0x46, 0x25, 0x15, 0xde, 0x82, 0xb2, 0x11, 0x8f, 0x3c, 0x13, 0x20, 0x61, 0xe0, 0x91, 0xd1, 0x3c, + 0x10, 0x22, 0xe0, 0xcc, 0x5b, 0xec, 0x0e, 0xa8, 0x24, 0xbb, 0xde, 0x98, 0x32, 0x1a, 0x11, 0x49, + 0x47, 0x28, 0x8c, 0xb8, 0xe4, 0xf0, 0x5e, 0x92, 0x8d, 0x48, 0x18, 0xa0, 0x2c, 0x1b, 0x99, 0xec, + 0xf6, 0x83, 0x71, 0x20, 0x27, 0xf1, 0x00, 0x0d, 0xf9, 0xdc, 0x1b, 0xf3, 0x31, 0xf7, 0x34, 0x68, + 0x10, 0x3f, 0xd7, 0x96, 0x36, 0xf4, 0x53, 0x42, 0xd6, 0xbe, 0x5f, 0x2c, 0x1d, 0xcb, 0x09, 0x65, + 0x32, 0x18, 0x12, 0x99, 0xd4, 0xdf, 0x2c, 0xdd, 0xfe, 0x34, 0xcf, 0x9e, 0x93, 0xe1, 0x24, 0x60, + 0x34, 0x5a, 0x7a, 0xe1, 0x74, 0xac, 0x1c, 0xc2, 0x9b, 0x53, 0x49, 0xae, 0x43, 0x79, 0x37, 0xa1, + 0xa2, 0x98, 0xc9, 0x60, 0x4e, 0xaf, 0x00, 0x1e, 0xfe, 0x1b, 0x40, 0x0c, 0x27, 0x74, 0x4e, 0x36, + 0x71, 0x3b, 0xbf, 0xdb, 0xa0, 0x75, 0x90, 0x2a, 0x82, 0xe9, 0x4f, 0x31, 0x15, 0x12, 0xfa, 0xa0, + 0x1c, 0x07, 0x23, 0xc7, 0xea, 0x5a, 0x3d, 0xdb, 0xff, 0xe4, 0x7c, 0xd5, 0x29, 0xad, 0x57, 0x9d, + 0x72, 0xff, 0xf8, 0xf0, 0x72, 0xd5, 0xf9, 0xf0, 0xa6, 0x42, 0x72, 0x19, 0x52, 0x81, 0xfa, 0xc7, + 0x87, 0x58, 0x81, 0xe1, 0x33, 0x50, 0x99, 0x06, 0x6c, 0xe4, 0xdc, 0xea, 0x5a, 0xbd, 0xc6, 0xde, + 0x43, 0x94, 0x4f, 0x20, 0x83, 0xa1, 0x70, 0x3a, 0x56, 0x0e, 0x81, 0x94, 0x0c, 0x68, 0xb1, 0x8b, + 0xbe, 0x8c, 0x78, 0x1c, 0x7e, 0x43, 0x23, 0xd5, 0xcc, 0xd7, 0x01, 0x1b, 0xf9, 0xdb, 0xa6, 0x78, + 0x45, 0x59, 0x58, 0x33, 0xc2, 0x09, 0xa8, 0x47, 0x54, 0xf0, 0x38, 0x1a, 0x52, 0xa7, 0xac, 0xd9, + 0x1f, 0xbd, 0x3d, 0x3b, 0x36, 0x0c, 0x7e, 0xcb, 0x54, 0xa8, 0xa7, 0x1e, 0x9c, 0xb1, 0xc3, 0xcf, + 0x40, 0x43, 0xc4, 0x83, 0x34, 0xe0, 0x54, 0xb4, 0x1e, 0x77, 0x0d, 0xa0, 0xf1, 0x24, 0x0f, 0xe1, + 0x62, 0x1e, 0x0c, 0x40, 0x23, 0x4a, 0x94, 0x54, 0x5d, 0x3b, 0xff, 0x7b, 0x2f, 0x05, 0x9a, 0xaa, + 0x14, 0xce, 0xe9, 0x70, 0x91, 0x1b, 0x2e, 0x41, 0xd3, 0x98, 0x59, 0x97, 0xb7, 0xdf, 0x5b, 0x92, + 0xbb, 0xeb, 0x55, 0xa7, 0x89, 0x5f, 0xa5, 0xc5, 0x9b, 0x75, 0xe0, 0x57, 0x00, 0x1a, 0x57, 0x41, + 0x08, 0xa7, 0xa9, 0x35, 0x6a, 0x1b, 0x8d, 0x20, 0xbe, 0x92, 0x81, 0xaf, 0x41, 0xc1, 0x2e, 0xa8, + 0x30, 0x32, 0xa7, 0xce, 0x96, 0x46, 0x67, 0x43, 0x7f, 0x4c, 0xe6, 0x14, 0xeb, 0x08, 0xf4, 0x80, + 0xad, 0xfe, 0x8a, 0x90, 0x0c, 0xa9, 0x53, 0xd5, 0x69, 0x77, 0x4c, 0x9a, 0xfd, 0x38, 0x0d, 0xe0, + 0x3c, 0x07, 0x7e, 0x0e, 0x6c, 0x1e, 0xaa, 0x57, 0x3d, 0xe0, 0xcc, 0xa9, 0x69, 0x80, 0x9b, 0x02, + 0x4e, 0xd2, 0xc0, 0x65, 0xd1, 0xc0, 0x39, 0x00, 0x3e, 0x05, 0xf5, 0x58, 0xd0, 0xe8, 0x98, 0x3d, + 0xe7, 0x4e, 0x5d, 0x0b, 0xfa, 0x11, 0x2a, 0xde, 0x90, 0x57, 0xd6, 0x5e, 0x09, 0xd9, 0x37, 0xd9, + 0xf9, 0xfb, 0x94, 0x7a, 0x70, 0xc6, 0x04, 0xfb, 0xa0, 0xca, 0x07, 0x3f, 0xd2, 0xa1, 0x74, 0x6c, + 0xcd, 0xf9, 0xe0, 0xc6, 0x21, 0x99, 0xad, 0x45, 0x98, 0x9c, 0x1d, 0xfd, 0x2c, 0x29, 0x53, 0xf3, + 0xf1, 0x6f, 0x1b, 0xea, 0xea, 0x89, 0x26, 0xc1, 0x86, 0x0c, 0xfe, 0x00, 0x6c, 0x3e, 0x1b, 0x25, + 0x4e, 0x07, 0xbc, 0x0b, 0x73, 0x26, 0xe5, 0x49, 0xca, 0x83, 0x73, 0x4a, 0xb8, 0x03, 0xaa, 0xa3, + 0x68, 0x89, 0x63, 0xe6, 0x34, 0xba, 0x56, 0xaf, 0xee, 0x03, 0xd5, 0xc3, 0xa1, 0xf6, 0x60, 0x13, + 0x81, 0xcf, 0x40, 0x8d, 0x87, 0x4a, 0x0c, 0xe1, 0x6c, 0xbf, 0x4b, 0x07, 0x4d, 0xd3, 0x41, 0xed, + 0x24, 0x61, 0xc1, 0x29, 0xdd, 0xce, 0x1f, 0x15, 0x70, 0xa7, 0x70, 0xa1, 0x44, 0xc8, 0x99, 0xa0, + 0xff, 0xc9, 0x89, 0xfa, 0x18, 0xd4, 0xc8, 0x6c, 0xc6, 0xcf, 0x68, 0x72, 0xa5, 0xea, 0x79, 0x13, + 0x07, 0x89, 0x1b, 0xa7, 0x71, 0x78, 0x0a, 0xaa, 0x42, 0x12, 0x19, 0x0b, 0x73, 0x71, 0xee, 0xbf, + 0xd9, 0x7a, 0x3d, 0xd1, 0x98, 0x44, 0x30, 0x4c, 0x45, 0x3c, 0x93, 0xd8, 0xf0, 0xc0, 0x0e, 0xd8, + 0x0a, 0x89, 0x1c, 0x4e, 0xf4, 0x55, 0xd9, 0xf6, 0xed, 0xf5, 0xaa, 0xb3, 0x75, 0xaa, 0x1c, 0x38, + 0xf1, 0xc3, 0x7d, 0x60, 0xeb, 0x87, 0xa7, 0xcb, 0x30, 0x5d, 0x8c, 0xb6, 0x1a, 0xd1, 0x69, 0xea, + 0xbc, 0x2c, 0x1a, 0x38, 0x4f, 0x86, 0xbf, 0x5a, 0xa0, 0x45, 0xe2, 0x51, 0x20, 0x0f, 0x18, 0xe3, + 0x92, 0x24, 0x53, 0xa9, 0x76, 0xcb, 0xbd, 0xc6, 0xde, 0x11, 0x7a, 0xdd, 0x97, 0x10, 0x5d, 0xd1, + 0x19, 0x1d, 0x6c, 0xf0, 0x1c, 0x31, 0x19, 0x2d, 0x7d, 0xc7, 0x08, 0xd5, 0xda, 0x0c, 0xe3, 0x2b, + 0x85, 0x61, 0x0f, 0xd4, 0xcf, 0x48, 0xc4, 0x02, 0x36, 0x16, 0x4e, 0xad, 0x5b, 0x56, 0xfb, 0xad, + 0xd6, 0xe3, 0x5b, 0xe3, 0xc3, 0x59, 0xb4, 0xfd, 0x05, 0xf8, 0xe0, 0xda, 0x72, 0xb0, 0x05, 0xca, + 0x53, 0xba, 0x4c, 0x86, 0x8d, 0xd5, 0x23, 0xfc, 0x3f, 0xd8, 0x5a, 0x90, 0x59, 0x4c, 0xf5, 0xe0, + 0x6c, 0x9c, 0x18, 0x8f, 0x6e, 0xed, 0x5b, 0x3b, 0x7f, 0x5a, 0xa0, 0x59, 0xf8, 0x37, 0x16, 0x01, + 0x3d, 0x83, 0x7d, 0x50, 0x33, 0x47, 0x47, 0x73, 0x34, 0xf6, 0xd0, 0x1b, 0xcb, 0xa0, 0x51, 0x7e, + 0x43, 0xbd, 0x14, 0xe9, 0x45, 0x4c, 0xb9, 0xe0, 0x77, 0xfa, 0x43, 0xa4, 0x75, 0x32, 0x9f, 0x39, + 0xef, 0x2d, 0xe5, 0x4d, 0xa4, 0x48, 0x2d, 0x9c, 0xd1, 0xf9, 0xfe, 0xf9, 0x85, 0x5b, 0x7a, 0x71, + 0xe1, 0x96, 0x5e, 0x5e, 0xb8, 0xa5, 0x5f, 0xd6, 0xae, 0x75, 0xbe, 0x76, 0xad, 0x17, 0x6b, 0xd7, + 0x7a, 0xb9, 0x76, 0xad, 0xbf, 0xd6, 0xae, 0xf5, 0xdb, 0xdf, 0x6e, 0xe9, 0xfb, 0x7b, 0xaf, 0xfb, + 0x11, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x69, 0x3c, 0x61, 0x0b, 0x3c, 0x09, 0x00, 0x00, +} + +func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + if m.DryRun != nil { + i-- + if *m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kind.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubResource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OldObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DryRun != nil { + n += 2 + } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestKind != nil { + l = m.RequestKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestResource != nil { + l = m.RequestResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RequestSubResource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Patch != nil { + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PatchType != nil { + l = len(*m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdmissionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, + `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionResponse) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&AdmissionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, + `Patch:` + valueToStringGenerated(this.Patch) + `,`, + `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionReview{`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = Operation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestKind == nil { + m.RequestKind = &v1.GroupVersionKind{} + } + if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestResource == nil { + m.RequestResource = &v1.GroupVersionResource{} + } + if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestSubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &v1.Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) + if m.Patch == nil { + m.Patch = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PatchType(dAtA[iNdEx:postIndex]) + m.PatchType = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &AdmissionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &AdmissionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto new file mode 100644 index 000000000..ff0fa46d2 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto @@ -0,0 +1,167 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.admission.v1beta1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/admission/v1beta1"; + +// AdmissionRequest describes the admission.Attributes for the admission request. +message AdmissionRequest { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + + // Resource is the fully-qualified resource being requested (for example, v1.pods) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + optional string subResource = 4; + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional string requestSubResource = 15; + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + optional string name = 5; + + // Namespace is the namespace associated with the request (if any). + // +optional + optional string namespace = 6; + + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + optional string operation = 7; + + // UserInfo is information about the requesting user + optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + + // Object is the object from the incoming request. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; +} + +// AdmissionResponse describes an admission response. +message AdmissionResponse { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + optional string uid = 1; + + // Allowed indicates whether or not the admission request was permitted. + optional bool allowed = 2; + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + optional bytes patch = 4; + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + optional string patchType = 5; + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + map auditAnnotations = 6; + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + repeated string warnings = 7; +} + +// AdmissionReview describes an admission review request/response. +message AdmissionReview { + // Request describes the attributes for the admission request. + // +optional + optional AdmissionRequest request = 1; + + // Response describes the attributes for the admission response. + // +optional + optional AdmissionResponse response = 2; +} + diff --git a/vendor/k8s.io/api/admission/v1beta1/register.go b/vendor/k8s.io/api/admission/v1beta1/register.go new file mode 100644 index 000000000..1c53e755d --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. +// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/types.go b/vendor/k8s.io/api/admission/v1beta1/types.go new file mode 100644 index 000000000..00c619d99 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/types.go @@ -0,0 +1,174 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 +// +k8s:prerelease-lifecycle-gen:deprecated=1.19 +// This API is never server served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally +// and having the generator against this group will protect future APIs which may be served. +// +k8s:prerelease-lifecycle-gen:replacement=admission.k8s.io,v1,AdmissionReview + +// AdmissionReview describes an admission review request/response. +type AdmissionReview struct { + metav1.TypeMeta `json:",inline"` + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Resource is the fully-qualified resource being requested (for example, v1.pods) + Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` + // UserInfo is information about the requesting user + UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` + // Object is the object from the incoming request. + // +optional + Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` +} + +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + + // Allowed indicates whether or not the admission request was permitted. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` + + // warnings is a list of warning messages to return to the requesting API client. + // Warning messages describe a problem the client making the API request should correct or be aware of. + // Limit warnings to 120 characters if possible. + // Warnings over 256 characters and large numbers of warnings may be truncated. + // +optional + Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` +} + +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..82598ed57 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,78 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AdmissionRequest = map[string]string{ + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", + "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", + "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", + "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", + "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", + "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", + "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", +} + +func (AdmissionRequest) SwaggerDoc() map[string]string { + return map_AdmissionRequest +} + +var map_AdmissionResponse = map[string]string{ + "": "AdmissionResponse describes an admission response.", + "uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.", + "allowed": "Allowed indicates whether or not the admission request was permitted.", + "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", + "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", + "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", + "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", + "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", +} + +func (AdmissionResponse) SwaggerDoc() map[string]string { + return map_AdmissionResponse +} + +var map_AdmissionReview = map[string]string{ + "": "AdmissionReview describes an admission review request/response.", + "request": "Request describes the attributes for the admission request.", + "response": "Response describes the attributes for the admission response.", +} + +func (AdmissionReview) SwaggerDoc() map[string]string { + return map_AdmissionReview +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8234b322f --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,142 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { + *out = *in + out.Kind = in.Kind + out.Resource = in.Resource + if in.RequestKind != nil { + in, out := &in.RequestKind, &out.RequestKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.RequestResource != nil { + in, out := &in.RequestResource, &out.RequestResource + *out = new(v1.GroupVersionResource) + **out = **in + } + in.UserInfo.DeepCopyInto(&out.UserInfo) + in.Object.DeepCopyInto(&out.Object) + in.OldObject.DeepCopyInto(&out.OldObject) + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + in.Options.DeepCopyInto(&out.Options) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { + if in == nil { + return nil + } + out := new(AdmissionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { + *out = *in + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(v1.Status) + (*in).DeepCopyInto(*out) + } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + *out = new(PatchType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Warnings != nil { + in, out := &in.Warnings, &out.Warnings + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { + if in == nil { + return nil + } + out := new(AdmissionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..f96e8a443 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,50 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *AdmissionReview) APILifecycleDeprecated() (major, minor int) { + return 1, 19 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *AdmissionReview) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "admission.k8s.io", Version: "v1", Kind: "AdmissionReview"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *AdmissionReview) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go new file mode 100644 index 000000000..5db6d52d4 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=imagepolicy.k8s.io + +package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1" diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go new file mode 100644 index 000000000..990f11c04 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go @@ -0,0 +1,1375 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ImageReview) Reset() { *m = ImageReview{} } +func (*ImageReview) ProtoMessage() {} +func (*ImageReview) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{0} +} +func (m *ImageReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReview.Merge(m, src) +} +func (m *ImageReview) XXX_Size() int { + return m.Size() +} +func (m *ImageReview) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReview proto.InternalMessageInfo + +func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} } +func (*ImageReviewContainerSpec) ProtoMessage() {} +func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{1} +} +func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src) +} +func (m *ImageReviewContainerSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo + +func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} } +func (*ImageReviewSpec) ProtoMessage() {} +func (*ImageReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{2} +} +func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewSpec.Merge(m, src) +} +func (m *ImageReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo + +func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} } +func (*ImageReviewStatus) ProtoMessage() {} +func (*ImageReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{3} +} +func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewStatus.Merge(m, src) +} +func (m *ImageReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview") + proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec") + proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry") + proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_834793af728657a5) +} + +var fileDescriptor_834793af728657a5 = []byte{ + // 609 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xcf, 0x6e, 0xd3, 0x4c, + 0x14, 0xc5, 0xe3, 0xa4, 0xff, 0x32, 0xf9, 0x3e, 0x9a, 0x0e, 0x20, 0x59, 0x59, 0x38, 0x55, 0x90, + 0x50, 0x59, 0x30, 0x43, 0x2b, 0x84, 0x0a, 0x0b, 0x50, 0x5c, 0x55, 0x2a, 0x0b, 0x40, 0x1a, 0x76, + 0x5d, 0x31, 0x71, 0x2e, 0x8e, 0x49, 0x3c, 0x63, 0x79, 0xc6, 0x29, 0xd9, 0xf1, 0x04, 0x88, 0x37, + 0xe0, 0x45, 0x78, 0x80, 0x2e, 0xbb, 0xec, 0xaa, 0xa2, 0x61, 0xc9, 0x4b, 0x20, 0x8f, 0x9d, 0xd8, + 0x24, 0x45, 0x55, 0x76, 0xbe, 0xf7, 0xce, 0xf9, 0xdd, 0xe3, 0xe3, 0x91, 0xd1, 0xc9, 0xf0, 0x50, + 0x91, 0x40, 0xd2, 0x61, 0xd2, 0x83, 0x58, 0x80, 0x06, 0x45, 0xc7, 0x20, 0xfa, 0x32, 0xa6, 0xf9, + 0x80, 0x47, 0x01, 0x0d, 0x42, 0xee, 0x43, 0x24, 0x47, 0x81, 0x37, 0xa1, 0xe3, 0x7d, 0x3e, 0x8a, + 0x06, 0x7c, 0x9f, 0xfa, 0x20, 0x20, 0xe6, 0x1a, 0xfa, 0x24, 0x8a, 0xa5, 0x96, 0xb8, 0x9d, 0x09, + 0x08, 0x8f, 0x02, 0x52, 0x12, 0x90, 0x99, 0xa0, 0xf5, 0xd8, 0x0f, 0xf4, 0x20, 0xe9, 0x11, 0x4f, + 0x86, 0xd4, 0x97, 0xbe, 0xa4, 0x46, 0xd7, 0x4b, 0x3e, 0x9a, 0xca, 0x14, 0xe6, 0x29, 0xe3, 0xb5, + 0x9e, 0x16, 0x06, 0x42, 0xee, 0x0d, 0x02, 0x01, 0xf1, 0x84, 0x46, 0x43, 0x3f, 0x6d, 0x28, 0x1a, + 0x82, 0xe6, 0x74, 0xbc, 0xe4, 0xa2, 0x45, 0xff, 0xa5, 0x8a, 0x13, 0xa1, 0x83, 0x10, 0x96, 0x04, + 0xcf, 0x6e, 0x13, 0x28, 0x6f, 0x00, 0x21, 0x5f, 0xd4, 0x75, 0xbe, 0x57, 0x51, 0xe3, 0x75, 0xfa, + 0x9a, 0x0c, 0xc6, 0x01, 0x9c, 0xe1, 0x0f, 0x68, 0x2b, 0xf5, 0xd4, 0xe7, 0x9a, 0xdb, 0xd6, 0xae, + 0xb5, 0xd7, 0x38, 0x78, 0x42, 0x8a, 0x44, 0xe6, 0x68, 0x12, 0x0d, 0xfd, 0xb4, 0xa1, 0x48, 0x7a, + 0x9a, 0x8c, 0xf7, 0xc9, 0xbb, 0xde, 0x27, 0xf0, 0xf4, 0x1b, 0xd0, 0xdc, 0xc5, 0xe7, 0x57, 0xed, + 0xca, 0xf4, 0xaa, 0x8d, 0x8a, 0x1e, 0x9b, 0x53, 0x31, 0x43, 0x6b, 0x2a, 0x02, 0xcf, 0xae, 0x2e, + 0xd1, 0x6f, 0xcc, 0x9b, 0x94, 0xdc, 0xbd, 0x8f, 0xc0, 0x73, 0xff, 0xcb, 0xe9, 0x6b, 0x69, 0xc5, + 0x0c, 0x0b, 0x9f, 0xa2, 0x0d, 0xa5, 0xb9, 0x4e, 0x94, 0x5d, 0x33, 0xd4, 0x83, 0x95, 0xa8, 0x46, + 0xe9, 0xde, 0xc9, 0xb9, 0x1b, 0x59, 0xcd, 0x72, 0x62, 0xe7, 0x15, 0xb2, 0x4b, 0x87, 0x8f, 0xa4, + 0xd0, 0x3c, 0x8d, 0x20, 0xdd, 0x8e, 0x1f, 0xa0, 0x75, 0x43, 0x37, 0x51, 0xd5, 0xdd, 0xff, 0x73, + 0xc4, 0x7a, 0x26, 0xc8, 0x66, 0x9d, 0xdf, 0x55, 0xb4, 0xbd, 0xf0, 0x12, 0x38, 0x44, 0xc8, 0x9b, + 0x91, 0x94, 0x6d, 0xed, 0xd6, 0xf6, 0x1a, 0x07, 0xcf, 0x57, 0x31, 0xfd, 0x97, 0x8f, 0x22, 0xf1, + 0x79, 0x5b, 0xb1, 0xd2, 0x02, 0xfc, 0x19, 0x35, 0xb8, 0x10, 0x52, 0x73, 0x1d, 0x48, 0xa1, 0xec, + 0xaa, 0xd9, 0xd7, 0x5d, 0x35, 0x7a, 0xd2, 0x2d, 0x18, 0xc7, 0x42, 0xc7, 0x13, 0xf7, 0x6e, 0xbe, + 0xb7, 0x51, 0x9a, 0xb0, 0xf2, 0x2a, 0x4c, 0x51, 0x5d, 0xf0, 0x10, 0x54, 0xc4, 0x3d, 0x30, 0x1f, + 0xa7, 0xee, 0xee, 0xe4, 0xa2, 0xfa, 0xdb, 0xd9, 0x80, 0x15, 0x67, 0x5a, 0x2f, 0x51, 0x73, 0x71, + 0x0d, 0x6e, 0xa2, 0xda, 0x10, 0x26, 0x59, 0xc8, 0x2c, 0x7d, 0xc4, 0xf7, 0xd0, 0xfa, 0x98, 0x8f, + 0x12, 0x30, 0xb7, 0xa8, 0xce, 0xb2, 0xe2, 0x45, 0xf5, 0xd0, 0xea, 0xfc, 0xa8, 0xa2, 0x9d, 0xa5, + 0x8f, 0x8b, 0x1f, 0xa1, 0x4d, 0x3e, 0x1a, 0xc9, 0x33, 0xe8, 0x1b, 0xca, 0x96, 0xbb, 0x9d, 0x9b, + 0xd8, 0xec, 0x66, 0x6d, 0x36, 0x9b, 0xe3, 0x87, 0x68, 0x23, 0x06, 0xae, 0xa4, 0xc8, 0xd8, 0xc5, + 0xbd, 0x60, 0xa6, 0xcb, 0xf2, 0x29, 0xfe, 0x6a, 0xa1, 0x26, 0x4f, 0xfa, 0x81, 0x2e, 0xd9, 0xb5, + 0x6b, 0x26, 0xd9, 0x93, 0xd5, 0xaf, 0x1f, 0xe9, 0x2e, 0xa0, 0xb2, 0x80, 0xed, 0x7c, 0x79, 0x73, + 0x71, 0xcc, 0x96, 0x76, 0xb7, 0x8e, 0xd0, 0xfd, 0x1b, 0x21, 0xab, 0xc4, 0xe7, 0x1e, 0x9f, 0x5f, + 0x3b, 0x95, 0x8b, 0x6b, 0xa7, 0x72, 0x79, 0xed, 0x54, 0xbe, 0x4c, 0x1d, 0xeb, 0x7c, 0xea, 0x58, + 0x17, 0x53, 0xc7, 0xba, 0x9c, 0x3a, 0xd6, 0xcf, 0xa9, 0x63, 0x7d, 0xfb, 0xe5, 0x54, 0x4e, 0xdb, + 0xb7, 0xfc, 0x55, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x2c, 0xa6, 0xdf, 0x90, 0x05, 0x00, + 0x00, +} + +func (m *ImageReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewContainerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewContainerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ImageReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewContainerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ImageReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewContainerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageReviewContainerSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ImageReviewContainerSpec{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ImageReviewSpec{`, + `Containers:` + repeatedStringForContainers + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewStatus) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&ImageReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ImageReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewContainerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ImageReviewContainerSpec{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto new file mode 100644 index 000000000..51328dde2 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.imagepolicy.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/imagepolicy/v1alpha1"; + +// ImageReview checks if the set of images in a pod are allowed. +message ImageReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the pod being evaluated + optional ImageReviewSpec spec = 2; + + // Status is filled in by the backend and indicates whether the pod should be allowed. + // +optional + optional ImageReviewStatus status = 3; +} + +// ImageReviewContainerSpec is a description of a container within the pod creation request. +message ImageReviewContainerSpec { + // This can be in the form image:tag or image@SHA:012345679abcdef. + // +optional + optional string image = 1; +} + +// ImageReviewSpec is a description of the pod creation request. +message ImageReviewSpec { + // Containers is a list of a subset of the information in each container of the Pod being created. + // +optional + repeated ImageReviewContainerSpec containers = 1; + + // Annotations is a list of key-value pairs extracted from the Pod's annotations. + // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. + // It is up to each webhook backend to determine how to interpret these annotations, if at all. + // +optional + map annotations = 2; + + // Namespace is the namespace the pod is being created in. + // +optional + optional string namespace = 3; +} + +// ImageReviewStatus is the result of the review for the pod creation request. +message ImageReviewStatus { + // Allowed indicates that all images were allowed to be run. + optional bool allowed = 1; + + // Reason should be empty unless Allowed is false in which case it + // may contain a short description of what is wrong. Kubernetes + // may truncate excessively long errors when displaying to the user. + // +optional + optional string reason = 2; + + // AuditAnnotations will be added to the attributes object of the + // admission controller request using 'AddAnnotation'. The keys should + // be prefix-less (i.e., the admission controller will add an + // appropriate prefix). + // +optional + map auditAnnotations = 3; +} + diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go new file mode 100644 index 000000000..477571bbb --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "imagepolicy.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ImageReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go new file mode 100644 index 000000000..151ffb1e9 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go @@ -0,0 +1,82 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageReview checks if the set of images in a pod are allowed. +type ImageReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the pod being evaluated + Spec ImageReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the backend and indicates whether the pod should be allowed. + // +optional + Status ImageReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageReviewSpec is a description of the pod creation request. +type ImageReviewSpec struct { + // Containers is a list of a subset of the information in each container of the Pod being created. + // +optional + Containers []ImageReviewContainerSpec `json:"containers,omitempty" protobuf:"bytes,1,rep,name=containers"` + // Annotations is a list of key-value pairs extracted from the Pod's annotations. + // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. + // It is up to each webhook backend to determine how to interpret these annotations, if at all. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"` + // Namespace is the namespace the pod is being created in. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` +} + +// ImageReviewContainerSpec is a description of a container within the pod creation request. +type ImageReviewContainerSpec struct { + // This can be in the form image:tag or image@SHA:012345679abcdef. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` + // In future, we may add command line overrides, exec health check command lines, and so on. +} + +// ImageReviewStatus is the result of the review for the pod creation request. +type ImageReviewStatus struct { + // Allowed indicates that all images were allowed to be run. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason should be empty unless Allowed is false in which case it + // may contain a short description of what is wrong. Kubernetes + // may truncate excessively long errors when displaying to the user. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // AuditAnnotations will be added to the attributes object of the + // admission controller request using 'AddAnnotation'. The keys should + // be prefix-less (i.e., the admission controller will add an + // appropriate prefix). + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,3,rep,name=auditAnnotations"` +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..dadf95e1d --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ImageReview = map[string]string{ + "": "ImageReview checks if the set of images in a pod are allowed.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the pod being evaluated", + "status": "Status is filled in by the backend and indicates whether the pod should be allowed.", +} + +func (ImageReview) SwaggerDoc() map[string]string { + return map_ImageReview +} + +var map_ImageReviewContainerSpec = map[string]string{ + "": "ImageReviewContainerSpec is a description of a container within the pod creation request.", + "image": "This can be in the form image:tag or image@SHA:012345679abcdef.", +} + +func (ImageReviewContainerSpec) SwaggerDoc() map[string]string { + return map_ImageReviewContainerSpec +} + +var map_ImageReviewSpec = map[string]string{ + "": "ImageReviewSpec is a description of the pod creation request.", + "containers": "Containers is a list of a subset of the information in each container of the Pod being created.", + "annotations": "Annotations is a list of key-value pairs extracted from the Pod's annotations. It only includes keys which match the pattern `*.image-policy.k8s.io/*`. It is up to each webhook backend to determine how to interpret these annotations, if at all.", + "namespace": "Namespace is the namespace the pod is being created in.", +} + +func (ImageReviewSpec) SwaggerDoc() map[string]string { + return map_ImageReviewSpec +} + +var map_ImageReviewStatus = map[string]string{ + "": "ImageReviewStatus is the result of the review for the pod creation request.", + "allowed": "Allowed indicates that all images were allowed to be run.", + "reason": "Reason should be empty unless Allowed is false in which case it may contain a short description of what is wrong. Kubernetes may truncate excessively long errors when displaying to the user.", + "auditAnnotations": "AuditAnnotations will be added to the attributes object of the admission controller request using 'AddAnnotation'. The keys should be prefix-less (i.e., the admission controller will add an appropriate prefix).", +} + +func (ImageReviewStatus) SwaggerDoc() map[string]string { + return map_ImageReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..f230656f3 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,121 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReview) DeepCopyInto(out *ImageReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReview. +func (in *ImageReview) DeepCopy() *ImageReview { + if in == nil { + return nil + } + out := new(ImageReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewContainerSpec) DeepCopyInto(out *ImageReviewContainerSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewContainerSpec. +func (in *ImageReviewContainerSpec) DeepCopy() *ImageReviewContainerSpec { + if in == nil { + return nil + } + out := new(ImageReviewContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewSpec) DeepCopyInto(out *ImageReviewSpec) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ImageReviewContainerSpec, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewSpec. +func (in *ImageReviewSpec) DeepCopy() *ImageReviewSpec { + if in == nil { + return nil + } + out := new(ImageReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewStatus) DeepCopyInto(out *ImageReviewStatus) { + *out = *in + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewStatus. +func (in *ImageReviewStatus) DeepCopy() *ImageReviewStatus { + if in == nil { + return nil + } + out := new(ImageReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/LICENSE b/vendor/k8s.io/apiextensions-apiserver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go new file mode 100644 index 000000000..52c65ccbe --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go @@ -0,0 +1,302 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiextensions + +import "k8s.io/apimachinery/pkg/runtime" + +// TODO: Update this after a tag is created for interface fields in DeepCopy +func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { + if in == nil { + return nil + } + out := new(JSONSchemaProps) + + *out = *in + + if in.Default != nil { + defaultJSON := JSON(runtime.DeepCopyJSONValue(*(in.Default))) + out.Default = &(defaultJSON) + } else { + out.Default = nil + } + + if in.Example != nil { + exampleJSON := JSON(runtime.DeepCopyJSONValue(*(in.Example))) + out.Example = &(exampleJSON) + } else { + out.Example = nil + } + + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.MaxItems != nil { + in, out := &in.MaxItems, &out.MaxItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinItems != nil { + in, out := &in.MinItems, &out.MinItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MultipleOf != nil { + in, out := &in.MultipleOf, &out.MultipleOf + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.Enum != nil { + out.Enum = make([]JSON, len(in.Enum)) + for i := range in.Enum { + out.Enum[i] = runtime.DeepCopyJSONValue(in.Enum[i]) + } + } + + if in.MaxProperties != nil { + in, out := &in.MaxProperties, &out.MaxProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinProperties != nil { + in, out := &in.MinProperties, &out.MinProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.Items != nil { + in, out := &in.Items, &out.Items + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrArray) + (*in).DeepCopyInto(*out) + } + } + + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.Not != nil { + in, out := &in.Not, &out.Not + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaProps) + (*in).DeepCopyInto(*out) + } + } + + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.ExternalDocs != nil { + in, out := &in.ExternalDocs, &out.ExternalDocs + if *in == nil { + *out = nil + } else { + *out = new(ExternalDocumentation) + (*in).DeepCopyInto(*out) + } + } + + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.XMapType != nil { + in, out := &in.XMapType, &out.XMapType + *out = new(string) + **out = **in + } + + if in.XValidations != nil { + inValidations, outValidations := &in.XValidations, &out.XValidations + *outValidations = make([]ValidationRule, len(*inValidations)) + for i := range *inValidations { + in.XValidations[i].DeepCopyInto(&out.XValidations[i]) + } + } + + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go new file mode 100644 index 000000000..2a6b02dcc --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=apiextensions.k8s.io + +// Package apiextensions is the internal version of the API. +package apiextensions // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go new file mode 100644 index 000000000..52d6ea866 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go @@ -0,0 +1,257 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiextensions + +import ( + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() + +// SetCRDCondition sets the status condition. It either overwrites the existing one or creates a new one. +func SetCRDCondition(crd *CustomResourceDefinition, newCondition CustomResourceDefinitionCondition) { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + + existingCondition := FindCRDCondition(crd, newCondition.Type) + if existingCondition == nil { + crd.Status.Conditions = append(crd.Status.Conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status || existingCondition.LastTransitionTime.IsZero() { + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } + + existingCondition.Status = newCondition.Status + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// RemoveCRDCondition removes the status condition. +func RemoveCRDCondition(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) { + newConditions := []CustomResourceDefinitionCondition{} + for _, condition := range crd.Status.Conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + crd.Status.Conditions = newConditions +} + +// FindCRDCondition returns the condition you're looking for or nil. +func FindCRDCondition(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) *CustomResourceDefinitionCondition { + for i := range crd.Status.Conditions { + if crd.Status.Conditions[i].Type == conditionType { + return &crd.Status.Conditions[i] + } + } + + return nil +} + +// IsCRDConditionTrue indicates if the condition is present and strictly true. +func IsCRDConditionTrue(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) bool { + return IsCRDConditionPresentAndEqual(crd, conditionType, ConditionTrue) +} + +// IsCRDConditionFalse indicates if the condition is present and false. +func IsCRDConditionFalse(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) bool { + return IsCRDConditionPresentAndEqual(crd, conditionType, ConditionFalse) +} + +// IsCRDConditionPresentAndEqual indicates if the condition is present and equal to the given status. +func IsCRDConditionPresentAndEqual(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType, status ConditionStatus) bool { + for _, condition := range crd.Status.Conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// IsCRDConditionEquivalent returns true if the lhs and rhs are equivalent except for times. +func IsCRDConditionEquivalent(lhs, rhs *CustomResourceDefinitionCondition) bool { + if lhs == nil && rhs == nil { + return true + } + if lhs == nil || rhs == nil { + return false + } + + return lhs.Message == rhs.Message && lhs.Reason == rhs.Reason && lhs.Status == rhs.Status && lhs.Type == rhs.Type +} + +// CRDHasFinalizer returns true if the finalizer is in the list. +func CRDHasFinalizer(crd *CustomResourceDefinition, needle string) bool { + for _, finalizer := range crd.Finalizers { + if finalizer == needle { + return true + } + } + + return false +} + +// CRDRemoveFinalizer removes the finalizer if present. +func CRDRemoveFinalizer(crd *CustomResourceDefinition, needle string) { + newFinalizers := []string{} + for _, finalizer := range crd.Finalizers { + if finalizer != needle { + newFinalizers = append(newFinalizers, finalizer) + } + } + crd.Finalizers = newFinalizers +} + +// HasServedCRDVersion returns true if the given version is in the list of CRD's versions and the Served flag is set. +func HasServedCRDVersion(crd *CustomResourceDefinition, version string) bool { + for _, v := range crd.Spec.Versions { + if v.Name == version { + return v.Served + } + } + return false +} + +// GetCRDStorageVersion returns the storage version for given CRD. +func GetCRDStorageVersion(crd *CustomResourceDefinition) (string, error) { + for _, v := range crd.Spec.Versions { + if v.Storage { + return v.Name, nil + } + } + // This should not happened if crd is valid + return "", fmt.Errorf("invalid CustomResourceDefinition, no storage version") +} + +// IsStoredVersion returns whether the given version is the storage version of the CRD. +func IsStoredVersion(crd *CustomResourceDefinition, version string) bool { + for _, v := range crd.Status.StoredVersions { + if version == v { + return true + } + } + return false +} + +// GetSchemaForVersion returns the validation schema for the given version or nil. +func GetSchemaForVersion(crd *CustomResourceDefinition, version string) (*CustomResourceValidation, error) { + if !HasPerVersionSchema(crd.Spec.Versions) { + return crd.Spec.Validation, nil + } + if crd.Spec.Validation != nil { + return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version schemas must be mutual exclusive", crd.Name, version) + } + for _, v := range crd.Spec.Versions { + if version == v.Name { + return v.Schema, nil + } + } + return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) +} + +// GetSubresourcesForVersion returns the subresources for given version or nil. +func GetSubresourcesForVersion(crd *CustomResourceDefinition, version string) (*CustomResourceSubresources, error) { + if !HasPerVersionSubresources(crd.Spec.Versions) { + return crd.Spec.Subresources, nil + } + if crd.Spec.Subresources != nil { + return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version subresources must be mutual exclusive", crd.Name, version) + } + for _, v := range crd.Spec.Versions { + if version == v.Name { + return v.Subresources, nil + } + } + return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) +} + +// GetColumnsForVersion returns the columns for given version or nil. +// NOTE: the newly logically-defaulted columns is not pointing to the original CRD object. +// One cannot mutate the original CRD columns using the logically-defaulted columns. Please iterate through +// the original CRD object instead. +func GetColumnsForVersion(crd *CustomResourceDefinition, version string) ([]CustomResourceColumnDefinition, error) { + if !HasPerVersionColumns(crd.Spec.Versions) { + return serveDefaultColumnsIfEmpty(crd.Spec.AdditionalPrinterColumns), nil + } + if len(crd.Spec.AdditionalPrinterColumns) > 0 { + return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version additionalPrinterColumns must be mutual exclusive", crd.Name, version) + } + for _, v := range crd.Spec.Versions { + if version == v.Name { + return serveDefaultColumnsIfEmpty(v.AdditionalPrinterColumns), nil + } + } + return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) +} + +// HasPerVersionSchema returns true if a CRD uses per-version schema. +func HasPerVersionSchema(versions []CustomResourceDefinitionVersion) bool { + for _, v := range versions { + if v.Schema != nil { + return true + } + } + return false +} + +// HasPerVersionSubresources returns true if a CRD uses per-version subresources. +func HasPerVersionSubresources(versions []CustomResourceDefinitionVersion) bool { + for _, v := range versions { + if v.Subresources != nil { + return true + } + } + return false +} + +// HasPerVersionColumns returns true if a CRD uses per-version columns. +func HasPerVersionColumns(versions []CustomResourceDefinitionVersion) bool { + for _, v := range versions { + if len(v.AdditionalPrinterColumns) > 0 { + return true + } + } + return false +} + +// serveDefaultColumnsIfEmpty applies logically defaulting to columns, if the input columns is empty. +// NOTE: in this way, the newly logically-defaulted columns is not pointing to the original CRD object. +// One cannot mutate the original CRD columns using the logically-defaulted columns. Please iterate through +// the original CRD object instead. +func serveDefaultColumnsIfEmpty(columns []CustomResourceColumnDefinition) []CustomResourceColumnDefinition { + if len(columns) > 0 { + return columns + } + return []CustomResourceColumnDefinition{ + {Name: "Age", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"], JSONPath: ".metadata.creationTimestamp"}, + } +} + +// HasVersionServed returns true if given CRD has given version served. +func HasVersionServed(crd *CustomResourceDefinition, version string) bool { + for _, v := range crd.Spec.Versions { + if !v.Served || v.Name != version { + continue + } + return true + } + return false +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go new file mode 100644 index 000000000..273f7f123 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiextensions + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiextensions.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CustomResourceDefinition{}, + &CustomResourceDefinitionList{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go new file mode 100644 index 000000000..b1c5f6f4c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -0,0 +1,422 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiextensions + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ConversionStrategyType describes different conversion types. +type ConversionStrategyType string + +const ( + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. + NoneConverter ConversionStrategyType = "None" + // WebhookConverter is a converter that calls to an external webhook to convert the CR. + WebhookConverter ConversionStrategyType = "Webhook" +) + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +type CustomResourceDefinitionSpec struct { + // Group is the group this resource belongs in + Group string + // Version is the version this resource belongs in + // Should be always first item in Versions field if provided. + // Optional, but at least one of Version or Versions must be set. + // Deprecated: Please use `Versions`. + Version string + // Names are the names used to describe this custom resource + Names CustomResourceDefinitionNames + // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + Scope ResourceScope + // Validation describes the validation methods for CustomResources + // Optional, the global validation schema for all versions. + // Top-level and per-version schemas are mutually exclusive. + // +optional + Validation *CustomResourceValidation + // Subresources describes the subresources for CustomResource + // Optional, the global subresources for all versions. + // Top-level and per-version subresources are mutually exclusive. + // +optional + Subresources *CustomResourceSubresources + // Versions is the list of all supported versions for this resource. + // If Version field is provided, this field is optional. + // Validation: All versions must use the same validation schema for now. i.e., top + // level Validation field is applied to all of these versions. + // Order: The version name will be used to compute the order. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + Versions []CustomResourceDefinitionVersion + // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // Optional, the global columns for all versions. + // Top-level and per-version columns are mutually exclusive. + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition + + // `conversion` defines conversion settings for the CRD. + Conversion *CustomResourceConversion + + // preserveUnknownFields disables pruning of object fields which are not + // specified in the OpenAPI schema. apiVersion, kind, metadata and known + // fields inside metadata are always preserved. + // Defaults to true in v1beta and will default to false in v1. + PreserveUnknownFields *bool +} + +// CustomResourceConversion describes how to convert different versions of a CR. +type CustomResourceConversion struct { + // `strategy` specifies the conversion strategy. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false. + Strategy ConversionStrategyType + + // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. + WebhookClientConfig *WebhookClientConfig + + // ConversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, conversion will fail for this object. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + // +optional + ConversionReviewVersions []string +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook. It has the same field as admissionregistration.internal.WebhookClientConfig. +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string + // `name` is the name of the service. + // Required + Name string + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string + + // If specified, the port on the service that hosting webhook. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port int32 +} + +// CustomResourceDefinitionVersion describes a version for CRD. +type CustomResourceDefinitionVersion struct { + // Name is the version name, e.g. “v1â€, “v2beta1â€, etc. + Name string + // Served is a flag enabling/disabling this version from being served via REST APIs + Served bool + // Storage flags the version as storage version. There must be exactly one flagged + // as storage version. + Storage bool + // deprecated indicates this version of the custom resource API is deprecated. + // When set to true, API requests to this version receive a warning header in the server response. + // Defaults to false. + Deprecated bool + // deprecationWarning overrides the default warning returned to API clients. + // May only be set when `deprecated` is true. + // The default warning indicates this version is deprecated and recommends use + // of the newest served version of equal or greater stability, if one exists. + DeprecationWarning *string + // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // Top-level and per-version schemas are mutually exclusive. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // +optional + Schema *CustomResourceValidation + // Subresources describes the subresources for CustomResource + // Top-level and per-version subresources are mutually exclusive. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // +optional + Subresources *CustomResourceSubresources + // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // Top-level and per-version columns are mutually exclusive. + // Per-version columns must not all be set to identical values (top-level columns should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an + // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must + // be explicitly set to null + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +type CustomResourceColumnDefinition struct { + // name is a human readable name for the column. + Name string + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string + // description is a human readable description of this column. + Description string + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 + + // JSONPath is a simple JSON path, i.e. without array notation. + JSONPath string +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +type CustomResourceDefinitionNames struct { + // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration + // too: plural.group and it must be all lowercase. + Plural string + // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + Singular string + // ShortNames are short names for the resource. It must be all lowercase. + ShortNames []string + // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + Kind string + // ListKind is the serialized kind of the list for this resource. Defaults to List. + ListKind string + // Categories is a list of grouped resources custom resources belong to (e.g. 'all') + // +optional + Categories []string +} + +// ResourceScope is an enum defining the different scopes available to a custom resource +type ResourceScope string + +const ( + ClusterScoped ResourceScope = "Cluster" + NamespaceScoped ResourceScope = "Namespaced" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type +type CustomResourceDefinitionConditionType string + +const ( + // Established means that the resource has become active. A resource is established when all names are + // accepted without a conflict for the first time. A resource stays established until deleted, even during + // a later NamesAccepted due to changed names. Note that not all names can be changed. + Established CustomResourceDefinitionConditionType = "Established" + // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in + // the group and are therefore accepted. + NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" + // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. + Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" +) + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +type CustomResourceDefinitionCondition struct { + // Type is the type of the condition. Types include Established, NamesAccepted and Terminating. + Type CustomResourceDefinitionConditionType + // Status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string + // Human-readable message indicating details about last transition. + // +optional + Message string +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +type CustomResourceDefinitionStatus struct { + // Conditions indicate state for particular aspects of a CustomResourceDefinition + // +listType=map + // +listMapKey=type + Conditions []CustomResourceDefinitionCondition + + // AcceptedNames are the names that are actually being used to serve discovery + // They may be different than the names in spec. + AcceptedNames CustomResourceDefinitionNames + + // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so the migration controller can first finish a migration to another version (i.e. + // that no old objects are left in the storage), and then remove the rest of the + // versions from this list. + // None of the versions in this list can be removed from the spec.Versions field. + StoredVersions []string +} + +// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of +// a CustomResourceDefinition +const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +type CustomResourceDefinition struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Spec describes how the user wants the resources to appear + Spec CustomResourceDefinitionSpec + // Status indicates the actual state of the CustomResourceDefinition + Status CustomResourceDefinitionStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + metav1.TypeMeta + metav1.ListMeta + + // Items individual CustomResourceDefinitions + Items []CustomResourceDefinition +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +type CustomResourceValidation struct { + // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + OpenAPIV3Schema *JSONSchemaProps +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +type CustomResourceSubresources struct { + // Status denotes the status subresource for CustomResources + Status *CustomResourceSubresourceStatus + // Scale denotes the scale subresource for CustomResources + Scale *CustomResourceSubresourceScale +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +type CustomResourceSubresourceStatus struct{} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +type CustomResourceSubresourceScale struct { + // SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under .spec. + // If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. + SpecReplicasPath string + // StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under .status. + // If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource + // will default to 0. + StatusReplicasPath string + // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under .status or .spec. + // Must be set to work with HPA. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the CustomResource, the status label selector value in the /scale + // subresource will default to the empty string. + // +optional + LabelSelectorPath *string +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go new file mode 100644 index 000000000..8c4e147f0 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -0,0 +1,319 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiextensions + +// FieldValueErrorReason is a machine-readable value providing more detail about why a field failed the validation. +// +enum +type FieldValueErrorReason string + +const ( + // FieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + FieldValueRequired FieldValueErrorReason = "FieldValueRequired" + // FieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + FieldValueDuplicate FieldValueErrorReason = "FieldValueDuplicate" + // FieldValueInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). + FieldValueInvalid FieldValueErrorReason = "FieldValueInvalid" + // FieldValueForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). + FieldValueForbidden FieldValueErrorReason = "FieldValueForbidden" +) + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +type JSONSchemaProps struct { + ID string + Schema JSONSchemaURL + Ref *string + Description string + Type string + Nullable bool + Format string + Title string + Default *JSON + Maximum *float64 + ExclusiveMaximum bool + Minimum *float64 + ExclusiveMinimum bool + MaxLength *int64 + MinLength *int64 + Pattern string + MaxItems *int64 + MinItems *int64 + UniqueItems bool + MultipleOf *float64 + Enum []JSON + MaxProperties *int64 + MinProperties *int64 + Required []string + Items *JSONSchemaPropsOrArray + AllOf []JSONSchemaProps + OneOf []JSONSchemaProps + AnyOf []JSONSchemaProps + Not *JSONSchemaProps + Properties map[string]JSONSchemaProps + AdditionalProperties *JSONSchemaPropsOrBool + PatternProperties map[string]JSONSchemaProps + Dependencies JSONSchemaDependencies + AdditionalItems *JSONSchemaPropsOrBool + Definitions JSONSchemaDefinitions + ExternalDocs *ExternalDocumentation + Example *JSON + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. Both ObjectMeta and TypeMeta + // are validated automatically. x-kubernetes-preserve-unknown-fields + // must be true. + XEmbeddedResource bool + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + XListMapKeys []string + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + XListType *string + + // x-kubernetes-map-type annotates an object to further describe its topology. + // This extension must only be used when type is object and may have 2 possible values: + // + // 1) `granular`: + // These maps are actual maps (key-value pairs) and each fields are independent + // from each other (they can each be manipulated by separate actors). This is + // the default behaviour for all maps. + // 2) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic maps will be entirely replaced when updated. + // +optional + XMapType *string + + // x-kubernetes-validations -kubernetes-validations describes a list of validation rules written in the CEL expression language. + // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. + // +patchMergeKey=rule + // +patchStrategy=merge + // +listType=map + // +listMapKey=rule + XValidations ValidationRules +} + +// ValidationRules describes a list of validation rules written in the CEL expression language. +type ValidationRules []ValidationRule + +// ValidationRule describes a validation rule written in the CEL expression language. +type ValidationRule struct { + // Rule represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. + // The `self` variable in the CEL expression is bound to the scoped value. + // Example: + // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} + // + // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable + // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as + // absent fields in CEL expressions. + // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map + // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map + // are accessible via CEL macros and functions such as `self.all(...)`. + // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and + // functions. + // If the Rule is scoped to a scalar, `self` is bound to the scalar value. + // Examples: + // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} + // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} + // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. + // + // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL + // expressions. This includes: + // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. + // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: + // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true + // - An array where the items schema is of an "unknown type" + // - An object where the additionalProperties schema is of an "unknown type" + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} + // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} + // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} + // + // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + // + Rule string + // Message represents the message displayed when validation fails. The message is required if the Rule contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + Message string + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string + // reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. + // The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. + // The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". + // If not set, default to use "FieldValueInvalid". + // All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. + // +optional + Reason *FieldValueErrorReason + // fieldPath represents the field path returned when the validation fails. + // It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. + // e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` + // If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` + // It does not support list numeric index. + // It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. + // Numeric index of array is not supported. + // For field name which contains special characters, use `['specialName']` to refer the field name. + // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` + // +optional + FieldPath string + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + OptionalOldSelf *bool +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +type JSON interface{} + +// JSONSchemaURL represents a schema url. +type JSONSchemaURL string + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +type JSONSchemaPropsOrArray struct { + Schema *JSONSchemaProps + JSONSchemas []JSONSchemaProps +} + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +type JSONSchemaPropsOrBool struct { + Allows bool + Schema *JSONSchemaProps +} + +// JSONSchemaDependencies represent a dependencies property. +type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +type JSONSchemaPropsOrStringArray struct { + Schema *JSONSchemaProps + Property []string +} + +// JSONSchemaDefinitions contains the models explicitly defined in this spec. +type JSONSchemaDefinitions map[string]JSONSchemaProps + +// ExternalDocumentation allows referencing an external resource for extended documentation. +type ExternalDocumentation struct { + Description string + URL string +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions new file mode 100644 index 000000000..7408dd121 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions @@ -0,0 +1,5 @@ +inverseRules: + # Allow use of this package in all k8s.io packages. + - selectorRegexp: k8s[.]io + allowedPrefixes: + - '' diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go new file mode 100644 index 000000000..4d29ff823 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go @@ -0,0 +1,215 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "bytes" + unsafe "unsafe" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/util/json" +) + +func Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + if err := autoConvert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in, out, s); err != nil { + return err + } + if in.Default != nil && *(in.Default) == nil { + out.Default = nil + } + if in.Example != nil && *(in.Example) == nil { + out.Example = nil + } + return nil +} + +var nullLiteral = []byte(`null`) + +func Convert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + raw, err := json.Marshal(*in) + if err != nil { + return err + } + if len(raw) == 0 || bytes.Equal(raw, nullLiteral) { + // match JSON#UnmarshalJSON treatment of literal nulls + out.Raw = nil + } else { + out.Raw = raw + } + return nil +} + +func Convert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + if in != nil { + var i interface{} + if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) { + if err := json.Unmarshal(in.Raw, &i); err != nil { + return err + } + } + *out = i + } else { + out = nil + } + return nil +} + +func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + if err := autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in, out, s); err != nil { + return err + } + + if len(out.Versions) == 0 && len(in.Version) > 0 { + // no versions were specified, and a version name was specified + out.Versions = []CustomResourceDefinitionVersion{{Name: in.Version, Served: true, Storage: true}} + } + + // If spec.{subresources,validation,additionalPrinterColumns} exists, move to versions + if in.Subresources != nil { + subresources := &CustomResourceSubresources{} + if err := Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in.Subresources, subresources, s); err != nil { + return err + } + for i := range out.Versions { + out.Versions[i].Subresources = subresources + } + } + if in.Validation != nil { + schema := &CustomResourceValidation{} + if err := Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in.Validation, schema, s); err != nil { + return err + } + for i := range out.Versions { + out.Versions[i].Schema = schema + } + } + if in.AdditionalPrinterColumns != nil { + additionalPrinterColumns := make([]CustomResourceColumnDefinition, len(in.AdditionalPrinterColumns)) + for i := range in.AdditionalPrinterColumns { + if err := Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(&in.AdditionalPrinterColumns[i], &additionalPrinterColumns[i], s); err != nil { + return err + } + } + for i := range out.Versions { + out.Versions[i].AdditionalPrinterColumns = additionalPrinterColumns + } + } + return nil +} + +func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + if err := autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s); err != nil { + return err + } + + if len(out.Versions) == 0 { + return nil + } + + // Copy versions[0] to version + out.Version = out.Versions[0].Name + + // If versions[*].{subresources,schema,additionalPrinterColumns} are identical, move to spec + subresources := out.Versions[0].Subresources + subresourcesIdentical := true + validation := out.Versions[0].Schema + validationIdentical := true + additionalPrinterColumns := out.Versions[0].AdditionalPrinterColumns + additionalPrinterColumnsIdentical := true + + // Detect if per-version fields are identical + for _, v := range out.Versions { + if subresourcesIdentical && !apiequality.Semantic.DeepEqual(v.Subresources, subresources) { + subresourcesIdentical = false + } + if validationIdentical && !apiequality.Semantic.DeepEqual(v.Schema, validation) { + validationIdentical = false + } + if additionalPrinterColumnsIdentical && !apiequality.Semantic.DeepEqual(v.AdditionalPrinterColumns, additionalPrinterColumns) { + additionalPrinterColumnsIdentical = false + } + } + + // If they are, set the top-level fields and clear the per-version fields + if subresourcesIdentical { + out.Subresources = subresources + } + if validationIdentical { + out.Validation = validation + } + if additionalPrinterColumnsIdentical { + out.AdditionalPrinterColumns = additionalPrinterColumns + } + for i := range out.Versions { + if subresourcesIdentical { + out.Versions[i].Subresources = nil + } + if validationIdentical { + out.Versions[i].Schema = nil + } + if additionalPrinterColumnsIdentical { + out.Versions[i].AdditionalPrinterColumns = nil + } + } + + return nil +} + +func Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + if err := autoConvert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in, out, s); err != nil { + return err + } + + out.WebhookClientConfig = nil + out.ConversionReviewVersions = nil + if in.Webhook != nil { + out.ConversionReviewVersions = in.Webhook.ConversionReviewVersions + if in.Webhook.ClientConfig != nil { + out.WebhookClientConfig = &apiextensions.WebhookClientConfig{} + if err := Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in.Webhook.ClientConfig, out.WebhookClientConfig, s); err != nil { + return err + } + } + } + return nil +} + +func Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + if err := autoConvert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in, out, s); err != nil { + return err + } + + out.Webhook = nil + if in.WebhookClientConfig != nil || in.ConversionReviewVersions != nil { + out.Webhook = &WebhookConversion{} + out.Webhook.ConversionReviewVersions = in.ConversionReviewVersions + if in.WebhookClientConfig != nil { + out.Webhook.ClientConfig = &WebhookClientConfig{} + if err := Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in.WebhookClientConfig, out.Webhook.ClientConfig, s); err != nil { + return err + } + } + } + return nil +} + +func Convert_apiextensions_ValidationRules_To_v1_ValidationRules(in *apiextensions.ValidationRules, out *ValidationRules, s conversion.Scope) error { + *out = *(*ValidationRules)(unsafe.Pointer(in)) + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go new file mode 100644 index 000000000..c548e642d --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go @@ -0,0 +1,262 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// TODO: Update this after a tag is created for interface fields in DeepCopy +func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { + if in == nil { + return nil + } + out := new(JSONSchemaProps) + *out = *in + + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.MaxItems != nil { + in, out := &in.MaxItems, &out.MaxItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinItems != nil { + in, out := &in.MinItems, &out.MinItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MultipleOf != nil { + in, out := &in.MultipleOf, &out.MultipleOf + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxProperties != nil { + in, out := &in.MaxProperties, &out.MaxProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinProperties != nil { + in, out := &in.MinProperties, &out.MinProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.Items != nil { + in, out := &in.Items, &out.Items + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrArray) + (*in).DeepCopyInto(*out) + } + } + + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.Not != nil { + in, out := &in.Not, &out.Not + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaProps) + (*in).DeepCopyInto(*out) + } + } + + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.ExternalDocs != nil { + in, out := &in.ExternalDocs, &out.ExternalDocs + if *in == nil { + *out = nil + } else { + *out = new(ExternalDocumentation) + (*in).DeepCopyInto(*out) + } + } + + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + if in.XMapType != nil { + in, out := &in.XMapType, &out.XMapType + *out = new(string) + **out = **in + } + + if in.XValidations != nil { + inValidations, outValidations := &in.XValidations, &out.XValidations + *outValidations = make([]ValidationRule, len(*inValidations)) + for i := range *inValidations { + in.XValidations[i].DeepCopyInto(&out.XValidations[i]) + } + } + + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go new file mode 100644 index 000000000..5cebec927 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinitionSpec(&obj.Spec) + if len(obj.Status.StoredVersions) == 0 { + for _, v := range obj.Spec.Versions { + if v.Storage { + obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) + break + } + } + } +} + +func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) { + if len(obj.Names.Singular) == 0 { + obj.Names.Singular = strings.ToLower(obj.Names.Kind) + } + if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { + obj.Names.ListKind = obj.Names.Kind + "List" + } + if obj.Conversion == nil { + obj.Conversion = &CustomResourceConversion{ + Strategy: NoneConverter, + } + } +} + +// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go new file mode 100644 index 000000000..09d4872f8 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +groupName=apiextensions.k8s.io + +// Package v1 is the v1 version of the API. +package v1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go new file mode 100644 index 000000000..6c22a5169 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -0,0 +1,9423 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{0} +} +func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionRequest.Merge(m, src) +} +func (m *ConversionRequest) XXX_Size() int { + return m.Size() +} +func (m *ConversionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{1} +} +func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionResponse.Merge(m, src) +} +func (m *ConversionResponse) XXX_Size() int { + return m.Size() +} +func (m *ConversionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo + +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{2} +} +func (m *ConversionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionReview.Merge(m, src) +} +func (m *ConversionReview) XXX_Size() int { + return m.Size() +} +func (m *ConversionReview) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionReview proto.InternalMessageInfo + +func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } +func (*CustomResourceColumnDefinition) ProtoMessage() {} +func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{3} +} +func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) +} +func (m *CustomResourceColumnDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo + +func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } +func (*CustomResourceConversion) ProtoMessage() {} +func (*CustomResourceConversion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{4} +} +func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceConversion.Merge(m, src) +} +func (m *CustomResourceConversion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceConversion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo + +func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } +func (*CustomResourceDefinition) ProtoMessage() {} +func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{5} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } +func (*CustomResourceDefinitionCondition) ProtoMessage() {} +func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{6} +} +func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) +} +func (m *CustomResourceDefinitionCondition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } +func (*CustomResourceDefinitionList) ProtoMessage() {} +func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{7} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo + +func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } +func (*CustomResourceDefinitionNames) ProtoMessage() {} +func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{8} +} +func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) +} +func (m *CustomResourceDefinitionNames) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo + +func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } +func (*CustomResourceDefinitionSpec) ProtoMessage() {} +func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{9} +} +func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) +} +func (m *CustomResourceDefinitionSpec) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo + +func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } +func (*CustomResourceDefinitionStatus) ProtoMessage() {} +func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{10} +} +func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) +} +func (m *CustomResourceDefinitionStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo + +func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } +func (*CustomResourceDefinitionVersion) ProtoMessage() {} +func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{11} +} +func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) +} +func (m *CustomResourceDefinitionVersion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo + +func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } +func (*CustomResourceSubresourceScale) ProtoMessage() {} +func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{12} +} +func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) +} +func (m *CustomResourceSubresourceScale) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo + +func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } +func (*CustomResourceSubresourceStatus) ProtoMessage() {} +func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{13} +} +func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) +} +func (m *CustomResourceSubresourceStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo + +func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } +func (*CustomResourceSubresources) ProtoMessage() {} +func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{14} +} +func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresources.Merge(m, src) +} +func (m *CustomResourceSubresources) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresources) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo + +func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } +func (*CustomResourceValidation) ProtoMessage() {} +func (*CustomResourceValidation) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{15} +} +func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceValidation.Merge(m, src) +} +func (m *CustomResourceValidation) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceValidation) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{16} +} +func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocumentation.Merge(m, src) +} +func (m *ExternalDocumentation) XXX_Size() int { + return m.Size() +} +func (m *ExternalDocumentation) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo + +func (m *JSON) Reset() { *m = JSON{} } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{17} +} +func (m *JSON) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSON) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSON.Merge(m, src) +} +func (m *JSON) XXX_Size() int { + return m.Size() +} +func (m *JSON) XXX_DiscardUnknown() { + xxx_messageInfo_JSON.DiscardUnknown(m) +} + +var xxx_messageInfo_JSON proto.InternalMessageInfo + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{18} +} +func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaProps.Merge(m, src) +} +func (m *JSONSchemaProps) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaProps) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{19} +} +func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{20} +} +func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) +} +func (m *JSONSchemaPropsOrBool) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } +func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} +func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{21} +} +func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{22} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidationRule) Reset() { *m = ValidationRule{} } +func (*ValidationRule) ProtoMessage() {} +func (*ValidationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{23} +} +func (m *ValidationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidationRule.Merge(m, src) +} +func (m *ValidationRule) XXX_Size() int { + return m.Size() +} +func (m *ValidationRule) XXX_DiscardUnknown() { + xxx_messageInfo_ValidationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidationRule proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{24} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func (m *WebhookConversion) Reset() { *m = WebhookConversion{} } +func (*WebhookConversion) ProtoMessage() {} +func (*WebhookConversion) Descriptor() ([]byte, []int) { + return fileDescriptor_f5a35c9667703937, []int{25} +} +func (m *WebhookConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookConversion.Merge(m, src) +} +func (m *WebhookConversion) XXX_Size() int { + return m.Size() +} +func (m *WebhookConversion) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookConversion proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionRequest") + proto.RegisterType((*ConversionResponse)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionResponse") + proto.RegisterType((*ConversionReview)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionReview") + proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition") + proto.RegisterType((*CustomResourceConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion") + proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition") + proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionCondition") + proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionList") + proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames") + proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec") + proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus") + proto.RegisterType((*CustomResourceDefinitionVersion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion") + proto.RegisterType((*CustomResourceSubresourceScale)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale") + proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus") + proto.RegisterType((*CustomResourceSubresources)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources") + proto.RegisterType((*CustomResourceValidation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation") + proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation") + proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSON") + proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps") + proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.DefinitionsEntry") + proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.DependenciesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.PatternPropertiesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.PropertiesEntry") + proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray") + proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool") + proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ServiceReference") + proto.RegisterType((*ValidationRule)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ValidationRule") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig") + proto.RegisterType((*WebhookConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookConversion") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto", fileDescriptor_f5a35c9667703937) +} + +var fileDescriptor_f5a35c9667703937 = []byte{ + // 3137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x5c, 0x47, + 0xf5, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0xec, 0xef, 0x8d, 0x9b, 0x78, 0x9d, + 0xcd, 0xb7, 0xc1, 0x6d, 0xd3, 0x75, 0x1b, 0x5a, 0x1a, 0xca, 0x2f, 0x79, 0x6d, 0xa7, 0x75, 0x13, + 0xc7, 0xd6, 0x6c, 0x92, 0xba, 0x2d, 0xa2, 0xbd, 0xde, 0x3b, 0xbb, 0xbe, 0xf5, 0xfd, 0x95, 0x99, + 0x7b, 0xfd, 0x43, 0x02, 0xa9, 0x02, 0x55, 0x40, 0x25, 0x28, 0x0f, 0xa8, 0x3c, 0x21, 0x84, 0x50, + 0x1f, 0xe0, 0x01, 0xde, 0xe0, 0x5f, 0xe8, 0x0b, 0x52, 0x25, 0x24, 0x54, 0x09, 0x69, 0x45, 0x97, + 0x7f, 0x00, 0x09, 0x10, 0xc2, 0x0f, 0x08, 0xcd, 0x8f, 0x3b, 0x77, 0xf6, 0xee, 0x6e, 0x12, 0xd9, + 0xeb, 0xf6, 0x6d, 0xf7, 0x9c, 0x33, 0xe7, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x67, 0xce, 0x1d, 0x60, + 0xed, 0x5c, 0xa7, 0x65, 0x27, 0x58, 0xd8, 0x89, 0xb7, 0x30, 0xf1, 0x71, 0x84, 0xe9, 0xc2, 0x2e, + 0xf6, 0xed, 0x80, 0x2c, 0x48, 0x86, 0x15, 0x3a, 0x78, 0x3f, 0xc2, 0x3e, 0x75, 0x02, 0x9f, 0x3e, + 0x6d, 0x85, 0x0e, 0xc5, 0x64, 0x17, 0x93, 0x85, 0x70, 0xa7, 0xc1, 0x78, 0xb4, 0x5d, 0x60, 0x61, + 0xf7, 0xd9, 0x85, 0x06, 0xf6, 0x31, 0xb1, 0x22, 0x6c, 0x97, 0x43, 0x12, 0x44, 0x01, 0xbc, 0x2e, + 0x34, 0x95, 0xdb, 0x04, 0xdf, 0x54, 0x9a, 0xca, 0xe1, 0x4e, 0x83, 0xf1, 0x68, 0xbb, 0x40, 0x79, + 0xf7, 0xd9, 0x99, 0xa7, 0x1b, 0x4e, 0xb4, 0x1d, 0x6f, 0x95, 0x6b, 0x81, 0xb7, 0xd0, 0x08, 0x1a, + 0xc1, 0x02, 0x57, 0xb8, 0x15, 0xd7, 0xf9, 0x3f, 0xfe, 0x87, 0xff, 0x12, 0x40, 0x33, 0xcf, 0xa5, + 0x26, 0x7b, 0x56, 0x6d, 0xdb, 0xf1, 0x31, 0x39, 0x48, 0xed, 0xf4, 0x70, 0x64, 0x75, 0x31, 0x6f, + 0x66, 0xa1, 0xd7, 0x28, 0x12, 0xfb, 0x91, 0xe3, 0xe1, 0x8e, 0x01, 0x5f, 0x7a, 0xd8, 0x00, 0x5a, + 0xdb, 0xc6, 0x9e, 0x95, 0x1d, 0x57, 0x3a, 0x34, 0xc0, 0xe4, 0x52, 0xe0, 0xef, 0x62, 0xc2, 0x26, + 0x88, 0xf0, 0xfd, 0x18, 0xd3, 0x08, 0x56, 0xc0, 0x40, 0xec, 0xd8, 0xa6, 0x31, 0x67, 0xcc, 0x8f, + 0x54, 0x9e, 0xf9, 0xa8, 0x59, 0x3c, 0xd5, 0x6a, 0x16, 0x07, 0xee, 0xae, 0x2e, 0x1f, 0x36, 0x8b, + 0x97, 0x7a, 0x21, 0x45, 0x07, 0x21, 0xa6, 0xe5, 0xbb, 0xab, 0xcb, 0x88, 0x0d, 0x86, 0x2f, 0x81, + 0x49, 0x1b, 0x53, 0x87, 0x60, 0x7b, 0x71, 0x63, 0xf5, 0x9e, 0xd0, 0x6f, 0xe6, 0xb8, 0xc6, 0xf3, + 0x52, 0xe3, 0xe4, 0x72, 0x56, 0x00, 0x75, 0x8e, 0x81, 0x9b, 0x60, 0x38, 0xd8, 0x7a, 0x1b, 0xd7, + 0x22, 0x6a, 0x0e, 0xcc, 0x0d, 0xcc, 0x8f, 0x5e, 0x7b, 0xba, 0x9c, 0x2e, 0x9e, 0x32, 0x81, 0xaf, + 0x98, 0x9c, 0x6c, 0x19, 0x59, 0x7b, 0x2b, 0xc9, 0xa2, 0x55, 0xc6, 0x25, 0xda, 0xf0, 0xba, 0xd0, + 0x82, 0x12, 0x75, 0xa5, 0x5f, 0xe5, 0x00, 0xd4, 0x27, 0x4f, 0xc3, 0xc0, 0xa7, 0xb8, 0x2f, 0xb3, + 0xa7, 0x60, 0xa2, 0xc6, 0x35, 0x47, 0xd8, 0x96, 0xb8, 0x66, 0xee, 0x28, 0xd6, 0x9b, 0x12, 0x7f, + 0x62, 0x29, 0xa3, 0x0e, 0x75, 0x00, 0xc0, 0x3b, 0x60, 0x88, 0x60, 0x1a, 0xbb, 0x91, 0x39, 0x30, + 0x67, 0xcc, 0x8f, 0x5e, 0xbb, 0xda, 0x13, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0xf2, 0xee, 0xb3, 0xe5, + 0x6a, 0x64, 0x45, 0x31, 0xad, 0x9c, 0x91, 0x48, 0x43, 0x88, 0xeb, 0x40, 0x52, 0x57, 0xe9, 0xbf, + 0x06, 0x98, 0xd0, 0xbd, 0xb4, 0xeb, 0xe0, 0x3d, 0x48, 0xc0, 0x30, 0x11, 0xc1, 0xc2, 0xfd, 0x34, + 0x7a, 0xed, 0x66, 0xf9, 0xa8, 0x3b, 0xaa, 0xdc, 0x11, 0x7f, 0x95, 0x51, 0xb6, 0x5c, 0xf2, 0x0f, + 0x4a, 0x80, 0xe0, 0x2e, 0x28, 0x10, 0xb9, 0x46, 0x3c, 0x90, 0x46, 0xaf, 0xdd, 0xea, 0x0f, 0xa8, + 0xd0, 0x59, 0x19, 0x6b, 0x35, 0x8b, 0x85, 0xe4, 0x1f, 0x52, 0x58, 0xa5, 0x5f, 0xe4, 0xc0, 0xec, + 0x52, 0x4c, 0xa3, 0xc0, 0x43, 0x98, 0x06, 0x31, 0xa9, 0xe1, 0xa5, 0xc0, 0x8d, 0x3d, 0x7f, 0x19, + 0xd7, 0x1d, 0xdf, 0x89, 0x58, 0x8c, 0xce, 0x81, 0x41, 0xdf, 0xf2, 0xb0, 0x8c, 0x99, 0x31, 0xe9, + 0xc9, 0xc1, 0xdb, 0x96, 0x87, 0x11, 0xe7, 0x30, 0x09, 0x16, 0x22, 0x72, 0x07, 0x28, 0x89, 0x3b, + 0x07, 0x21, 0x46, 0x9c, 0x03, 0xaf, 0x80, 0xa1, 0x7a, 0x40, 0x3c, 0x4b, 0xac, 0xde, 0x48, 0xba, + 0x1e, 0x37, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x1e, 0x8c, 0xda, 0x98, 0xd6, 0x88, 0x13, 0x32, 0x68, + 0x73, 0x90, 0x0b, 0x9f, 0x95, 0xc2, 0xa3, 0xcb, 0x29, 0x0b, 0xe9, 0x72, 0xf0, 0x2a, 0x28, 0x84, + 0xc4, 0x09, 0x88, 0x13, 0x1d, 0x98, 0xf9, 0x39, 0x63, 0x3e, 0x5f, 0x99, 0x90, 0x63, 0x0a, 0x1b, + 0x92, 0x8e, 0x94, 0x04, 0x93, 0x7e, 0x9b, 0x06, 0xfe, 0x86, 0x15, 0x6d, 0x9b, 0x43, 0x1c, 0x41, + 0x49, 0xbf, 0x52, 0x5d, 0xbf, 0xcd, 0xe8, 0x48, 0x49, 0x94, 0xfe, 0x6c, 0x00, 0x33, 0xeb, 0xa1, + 0xc4, 0xbd, 0xf0, 0x06, 0x28, 0xd0, 0x88, 0xe5, 0x9c, 0xc6, 0x81, 0xf4, 0xcf, 0x93, 0x89, 0xaa, + 0xaa, 0xa4, 0x1f, 0x36, 0x8b, 0xd3, 0xe9, 0x88, 0x84, 0xca, 0x7d, 0xa3, 0xc6, 0xb2, 0x90, 0xdb, + 0xc3, 0x5b, 0xdb, 0x41, 0xb0, 0x23, 0x57, 0xff, 0x18, 0x21, 0xf7, 0xaa, 0x50, 0x94, 0x62, 0x8a, + 0x90, 0x93, 0x64, 0x94, 0x00, 0x95, 0xfe, 0x93, 0xcb, 0x4e, 0x4c, 0x5b, 0xf4, 0xb7, 0x40, 0x81, + 0x6d, 0x21, 0xdb, 0x8a, 0x2c, 0xb9, 0x09, 0x9e, 0x79, 0xb4, 0x0d, 0x27, 0xf6, 0xeb, 0x1a, 0x8e, + 0xac, 0x0a, 0x94, 0xae, 0x00, 0x29, 0x0d, 0x29, 0xad, 0x70, 0x1f, 0x0c, 0xd2, 0x10, 0xd7, 0xe4, + 0x7c, 0xef, 0x1d, 0x23, 0xda, 0x7b, 0xcc, 0xa1, 0x1a, 0xe2, 0x5a, 0x1a, 0x8c, 0xec, 0x1f, 0xe2, + 0x88, 0xf0, 0x1d, 0x03, 0x0c, 0x51, 0x9e, 0x17, 0x64, 0x2e, 0xd9, 0x3c, 0x01, 0xf0, 0x4c, 0xde, + 0x11, 0xff, 0x91, 0xc4, 0x2d, 0xfd, 0x33, 0x07, 0x2e, 0xf5, 0x1a, 0xba, 0x14, 0xf8, 0xb6, 0x58, + 0x84, 0x55, 0xb9, 0xaf, 0x44, 0x64, 0x3d, 0xaf, 0xef, 0xab, 0xc3, 0x66, 0xf1, 0xf1, 0x87, 0x2a, + 0xd0, 0x36, 0xe0, 0x97, 0xd5, 0x94, 0xc5, 0x26, 0xbd, 0xd4, 0x6e, 0xd8, 0x61, 0xb3, 0x38, 0xae, + 0x86, 0xb5, 0xdb, 0x0a, 0x77, 0x01, 0x74, 0x2d, 0x1a, 0xdd, 0x21, 0x96, 0x4f, 0x85, 0x5a, 0xc7, + 0xc3, 0xd2, 0x73, 0x4f, 0x3e, 0x5a, 0x50, 0xb0, 0x11, 0x95, 0x19, 0x09, 0x09, 0x6f, 0x75, 0x68, + 0x43, 0x5d, 0x10, 0x58, 0xce, 0x20, 0xd8, 0xa2, 0x2a, 0x0d, 0x68, 0x39, 0x9c, 0x51, 0x91, 0xe4, + 0xc2, 0x27, 0xc0, 0xb0, 0x87, 0x29, 0xb5, 0x1a, 0x98, 0xef, 0xfd, 0x91, 0xf4, 0x50, 0x5c, 0x13, + 0x64, 0x94, 0xf0, 0x4b, 0xff, 0x32, 0xc0, 0x85, 0x5e, 0x5e, 0xbb, 0xe5, 0xd0, 0x08, 0x7e, 0xb3, + 0x23, 0xec, 0xcb, 0x8f, 0x36, 0x43, 0x36, 0x9a, 0x07, 0xbd, 0x4a, 0x25, 0x09, 0x45, 0x0b, 0xf9, + 0x3d, 0x90, 0x77, 0x22, 0xec, 0x25, 0xa7, 0x25, 0xea, 0x7f, 0xd8, 0x55, 0x4e, 0x4b, 0xf8, 0xfc, + 0x2a, 0x03, 0x42, 0x02, 0xaf, 0xf4, 0x61, 0x0e, 0x5c, 0xec, 0x35, 0x84, 0xe5, 0x71, 0xca, 0x9c, + 0x1d, 0xba, 0x31, 0xb1, 0x5c, 0x19, 0x6c, 0xca, 0xd9, 0x1b, 0x9c, 0x8a, 0x24, 0x97, 0xe5, 0x4e, + 0xea, 0xf8, 0x8d, 0xd8, 0xb5, 0x88, 0x8c, 0x24, 0x35, 0xe1, 0xaa, 0xa4, 0x23, 0x25, 0x01, 0xcb, + 0x00, 0xd0, 0xed, 0x80, 0x44, 0x1c, 0x83, 0x57, 0x38, 0x23, 0x95, 0x33, 0x2c, 0x23, 0x54, 0x15, + 0x15, 0x69, 0x12, 0xec, 0x20, 0xd9, 0x71, 0x7c, 0x5b, 0x2e, 0xb8, 0xda, 0xbb, 0x37, 0x1d, 0xdf, + 0x46, 0x9c, 0xc3, 0xf0, 0x5d, 0x87, 0x46, 0x8c, 0x22, 0x57, 0xbb, 0xcd, 0xe1, 0x5c, 0x52, 0x49, + 0x30, 0xfc, 0x1a, 0x4b, 0xb0, 0x01, 0x71, 0x30, 0x35, 0x87, 0x52, 0xfc, 0x25, 0x45, 0x45, 0x9a, + 0x44, 0xe9, 0x2f, 0x83, 0xbd, 0xe3, 0x83, 0x25, 0x10, 0x78, 0x19, 0xe4, 0x1b, 0x24, 0x88, 0x43, + 0xe9, 0x25, 0xe5, 0xed, 0x97, 0x18, 0x11, 0x09, 0x1e, 0xfc, 0x36, 0xc8, 0xfb, 0x72, 0xc2, 0x2c, + 0x82, 0x5e, 0xed, 0xff, 0x32, 0x73, 0x6f, 0xa5, 0xe8, 0xc2, 0x91, 0x02, 0x14, 0x3e, 0x07, 0xf2, + 0xb4, 0x16, 0x84, 0x58, 0x3a, 0x71, 0x36, 0x11, 0xaa, 0x32, 0xe2, 0x61, 0xb3, 0x78, 0x3a, 0x51, + 0xc7, 0x09, 0x48, 0x08, 0xc3, 0xef, 0x1b, 0xa0, 0x20, 0x8f, 0x0b, 0x6a, 0x0e, 0xf3, 0xf0, 0x7c, + 0xad, 0xff, 0x76, 0xcb, 0xb2, 0x37, 0x5d, 0x33, 0x49, 0xa0, 0x48, 0x81, 0xc3, 0xef, 0x1a, 0x00, + 0xd4, 0xd4, 0xd9, 0x65, 0x8e, 0x70, 0x1f, 0xf6, 0x6d, 0xab, 0x68, 0xa7, 0xa2, 0x08, 0x84, 0xb4, + 0x54, 0xd2, 0x50, 0x61, 0x15, 0x4c, 0x85, 0x04, 0x73, 0xdd, 0x77, 0xfd, 0x1d, 0x3f, 0xd8, 0xf3, + 0x6f, 0x38, 0xd8, 0xb5, 0xa9, 0x09, 0xe6, 0x8c, 0xf9, 0x42, 0xe5, 0xa2, 0xb4, 0x7f, 0x6a, 0xa3, + 0x9b, 0x10, 0xea, 0x3e, 0xb6, 0xf4, 0xee, 0x40, 0xb6, 0xd6, 0xca, 0x9e, 0x17, 0xf0, 0x7d, 0x31, + 0x79, 0x91, 0x87, 0xa9, 0x69, 0xf0, 0x85, 0x78, 0xa3, 0xff, 0x0b, 0xa1, 0x72, 0x7d, 0x7a, 0x48, + 0x2b, 0x12, 0x45, 0x9a, 0x09, 0xf0, 0xa7, 0x06, 0x38, 0x6d, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d, + 0xb6, 0x71, 0xee, 0x64, 0xa3, 0x7a, 0x4a, 0x1a, 0x74, 0x7a, 0x51, 0x47, 0x45, 0xed, 0x46, 0xc0, + 0x17, 0xc1, 0x19, 0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x5b, 0xcd, 0xe2, 0x99, + 0x6a, 0x1b, 0x07, 0x65, 0x24, 0x4b, 0x1f, 0xe7, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef, + 0x15, 0x30, 0xc4, 0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3, + 0x89, 0xe1, 0xb3, 0xe3, 0x69, 0x80, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x6b, + 0x00, 0xd8, 0x38, 0x24, 0x98, 0x65, 0x24, 0xdb, 0x1c, 0xe6, 0xd2, 0x6a, 0x7d, 0x96, 0x15, 0x07, + 0x69, 0x52, 0xf0, 0x06, 0x80, 0xc9, 0x3f, 0x27, 0xf0, 0x5f, 0xb5, 0x88, 0xef, 0xf8, 0x0d, 0xb3, + 0xc0, 0xcd, 0x9e, 0x66, 0xa7, 0xed, 0x72, 0x07, 0x17, 0x75, 0x19, 0x01, 0x77, 0xc1, 0x90, 0xb8, + 0x46, 0xf3, 0xbc, 0xd1, 0xc7, 0x1d, 0x77, 0xcf, 0x72, 0x1d, 0x9b, 0x43, 0x55, 0x00, 0x77, 0x0f, + 0x47, 0x41, 0x12, 0x0d, 0xbe, 0x67, 0x80, 0x31, 0x1a, 0x6f, 0x11, 0x29, 0x4d, 0x79, 0x56, 0x1f, + 0xbd, 0x76, 0xa7, 0x5f, 0xf0, 0x55, 0x4d, 0x77, 0x65, 0xa2, 0xd5, 0x2c, 0x8e, 0xe9, 0x14, 0xd4, + 0x86, 0x0d, 0x7f, 0x6f, 0x00, 0xd3, 0xb2, 0x45, 0xe8, 0x5b, 0xee, 0x06, 0x71, 0xfc, 0x08, 0x13, + 0x71, 0x21, 0x12, 0xc7, 0x47, 0x1f, 0x6b, 0xc5, 0xec, 0x3d, 0xab, 0x32, 0x27, 0x57, 0xda, 0x5c, + 0xec, 0x61, 0x01, 0xea, 0x69, 0x5b, 0xe9, 0xdf, 0x46, 0x36, 0xb5, 0x68, 0xb3, 0xac, 0xd6, 0x2c, + 0x17, 0xc3, 0x65, 0x30, 0xc1, 0xaa, 0x5f, 0x84, 0x43, 0xd7, 0xa9, 0x59, 0x94, 0xdf, 0x7e, 0x44, + 0x74, 0xab, 0x6b, 0x78, 0x35, 0xc3, 0x47, 0x1d, 0x23, 0xe0, 0x2b, 0x00, 0x8a, 0xb2, 0xb0, 0x4d, + 0x8f, 0xa8, 0x04, 0x54, 0x81, 0x57, 0xed, 0x90, 0x40, 0x5d, 0x46, 0xc1, 0x25, 0x30, 0xe9, 0x5a, + 0x5b, 0xd8, 0xad, 0x62, 0x17, 0xd7, 0xa2, 0x80, 0x70, 0x55, 0xe2, 0x7e, 0x38, 0xd5, 0x6a, 0x16, + 0x27, 0x6f, 0x65, 0x99, 0xa8, 0x53, 0xbe, 0x74, 0x29, 0xbb, 0x97, 0xf5, 0x89, 0x8b, 0x62, 0xfb, + 0x83, 0x1c, 0x98, 0xe9, 0x1d, 0x14, 0xf0, 0x3b, 0xaa, 0x34, 0x16, 0x15, 0xdf, 0x6b, 0x27, 0x10, + 0x7a, 0xf2, 0x3a, 0x00, 0x3a, 0xaf, 0x02, 0xf0, 0x80, 0x9d, 0xd7, 0x96, 0x9b, 0x5c, 0xfb, 0x37, + 0x4f, 0x02, 0x9d, 0xe9, 0xaf, 0x8c, 0x88, 0x2a, 0xc0, 0x72, 0xf9, 0xa1, 0x6f, 0xb9, 0xb8, 0xf4, + 0x61, 0xc7, 0xd5, 0x36, 0xdd, 0xac, 0xf0, 0x07, 0x06, 0x18, 0x0f, 0x42, 0xec, 0x2f, 0x6e, 0xac, + 0xde, 0xfb, 0xa2, 0xd8, 0xb4, 0xd2, 0x41, 0xab, 0x47, 0x37, 0x91, 0xdd, 0xaf, 0x85, 0xae, 0x0d, + 0x12, 0x84, 0xb4, 0x72, 0xb6, 0xd5, 0x2c, 0x8e, 0xaf, 0xb7, 0xa3, 0xa0, 0x2c, 0x6c, 0xc9, 0x03, + 0x53, 0x2b, 0xfb, 0x11, 0x26, 0xbe, 0xe5, 0x2e, 0x07, 0xb5, 0xd8, 0xc3, 0x7e, 0x24, 0x6c, 0xcc, + 0xb4, 0x0b, 0x8c, 0x47, 0x6c, 0x17, 0x5c, 0x04, 0x03, 0x31, 0x71, 0x65, 0xd4, 0x8e, 0xaa, 0x26, + 0x18, 0xba, 0x85, 0x18, 0xbd, 0x74, 0x09, 0x0c, 0x32, 0x3b, 0xe1, 0x79, 0x30, 0x40, 0xac, 0x3d, + 0xae, 0x75, 0xac, 0x32, 0xcc, 0x44, 0x90, 0xb5, 0x87, 0x18, 0xad, 0xf4, 0xf7, 0x39, 0x30, 0x9e, + 0x99, 0x0b, 0x9c, 0x01, 0x39, 0xd5, 0x59, 0x03, 0x52, 0x69, 0x6e, 0x75, 0x19, 0xe5, 0x1c, 0x1b, + 0xbe, 0xa0, 0xb2, 0xab, 0x00, 0x2d, 0xaa, 0xc3, 0x82, 0x53, 0x59, 0x59, 0x96, 0xaa, 0x63, 0x86, + 0x24, 0xe9, 0x91, 0xd9, 0x80, 0xeb, 0x72, 0x57, 0x08, 0x1b, 0x70, 0x1d, 0x31, 0xda, 0x51, 0x7b, + 0x25, 0x49, 0xb3, 0x26, 0xff, 0x08, 0xcd, 0x9a, 0xa1, 0x07, 0x36, 0x6b, 0x2e, 0x83, 0x7c, 0xe4, + 0x44, 0x2e, 0xe6, 0x27, 0x95, 0x56, 0x0c, 0xdf, 0x61, 0x44, 0x24, 0x78, 0x10, 0x83, 0x61, 0x1b, + 0xd7, 0xad, 0xd8, 0x8d, 0xf8, 0xa1, 0x34, 0x7a, 0xed, 0xeb, 0xc7, 0x8b, 0x1e, 0xd1, 0xcc, 0x58, + 0x16, 0x2a, 0x51, 0xa2, 0x1b, 0x3e, 0x0e, 0x86, 0x3d, 0x6b, 0xdf, 0xf1, 0x62, 0x8f, 0x57, 0x8c, + 0x86, 0x10, 0x5b, 0x13, 0x24, 0x94, 0xf0, 0x58, 0x12, 0xc4, 0xfb, 0x35, 0x37, 0xa6, 0xce, 0x2e, + 0x96, 0x4c, 0x59, 0xd2, 0xa9, 0x24, 0xb8, 0x92, 0xe1, 0xa3, 0x8e, 0x11, 0x1c, 0xcc, 0xf1, 0xf9, + 0xe0, 0x51, 0x0d, 0x4c, 0x90, 0x50, 0xc2, 0x6b, 0x07, 0x93, 0xf2, 0x63, 0xbd, 0xc0, 0xe4, 0xe0, + 0x8e, 0x11, 0xf0, 0x29, 0x30, 0xe2, 0x59, 0xfb, 0xb7, 0xb0, 0xdf, 0x88, 0xb6, 0xcd, 0xd3, 0x73, + 0xc6, 0xfc, 0x40, 0xe5, 0x74, 0xab, 0x59, 0x1c, 0x59, 0x4b, 0x88, 0x28, 0xe5, 0x73, 0x61, 0xc7, + 0x97, 0xc2, 0x67, 0x34, 0xe1, 0x84, 0x88, 0x52, 0x3e, 0xab, 0x4c, 0x42, 0x2b, 0x62, 0xfb, 0xca, + 0x1c, 0x6f, 0xbf, 0x38, 0x6f, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x1e, 0x14, 0x3c, 0x6b, 0x9f, 0xdf, + 0x29, 0xcd, 0x09, 0xae, 0x96, 0x37, 0x14, 0xd7, 0x24, 0x0d, 0x29, 0x2e, 0x97, 0x74, 0x7c, 0x21, + 0x39, 0xa9, 0x49, 0x4a, 0x1a, 0x52, 0x5c, 0x16, 0xbf, 0xb1, 0xef, 0xdc, 0x8f, 0xb1, 0x10, 0x86, + 0xdc, 0x33, 0x2a, 0x7e, 0xef, 0xa6, 0x2c, 0xa4, 0xcb, 0xb1, 0x3b, 0x9d, 0x17, 0xbb, 0x91, 0x13, + 0xba, 0x78, 0xbd, 0x6e, 0x9e, 0xe5, 0xfe, 0xe7, 0xa5, 0xfc, 0x9a, 0xa2, 0x22, 0x4d, 0x02, 0xbe, + 0x05, 0x06, 0xb1, 0x1f, 0x7b, 0xe6, 0x39, 0x7e, 0x7c, 0x1f, 0x37, 0xfa, 0xd4, 0x7e, 0x59, 0xf1, + 0x63, 0x0f, 0x71, 0xcd, 0xf0, 0x05, 0x70, 0xda, 0xb3, 0xf6, 0x59, 0x12, 0xc0, 0x24, 0x62, 0x17, + 0xcd, 0x29, 0x3e, 0xef, 0x49, 0x56, 0xc4, 0xae, 0xe9, 0x0c, 0xd4, 0x2e, 0xc7, 0x07, 0x3a, 0xbe, + 0x36, 0x70, 0x5a, 0x1b, 0xa8, 0x33, 0x50, 0xbb, 0x1c, 0x73, 0x32, 0xc1, 0xf7, 0x63, 0x87, 0x60, + 0xdb, 0xfc, 0x3f, 0x5e, 0xf7, 0xca, 0xfe, 0xae, 0xa0, 0x21, 0xc5, 0x85, 0xf7, 0x93, 0x96, 0x83, + 0xc9, 0x37, 0xdf, 0x46, 0xdf, 0x52, 0xf7, 0x3a, 0x59, 0x24, 0xc4, 0x3a, 0x10, 0xa7, 0x8a, 0xde, + 0x6c, 0x80, 0x3e, 0xc8, 0x5b, 0xae, 0xbb, 0x5e, 0x37, 0xcf, 0x73, 0x8f, 0xf7, 0xf1, 0xb4, 0x50, + 0x19, 0x66, 0x91, 0xe9, 0x47, 0x02, 0x86, 0xe1, 0x05, 0x3e, 0x8b, 0x85, 0x99, 0x13, 0xc3, 0x5b, + 0x67, 0xfa, 0x91, 0x80, 0xe1, 0xf3, 0xf3, 0x0f, 0xd6, 0xeb, 0xe6, 0x63, 0x27, 0x37, 0x3f, 0xa6, + 0x1f, 0x09, 0x18, 0x68, 0x83, 0x01, 0x3f, 0x88, 0xcc, 0x0b, 0xfd, 0x3e, 0x7b, 0xf9, 0x69, 0x72, + 0x3b, 0x88, 0x10, 0x53, 0x0f, 0x7f, 0x64, 0x00, 0x10, 0xa6, 0x91, 0x78, 0xf1, 0xb8, 0x2d, 0x80, + 0x0c, 0x5a, 0x39, 0x8d, 0xde, 0x15, 0x3f, 0x22, 0x07, 0xe9, 0xbd, 0x46, 0x8b, 0x72, 0xcd, 0x00, + 0xf8, 0x73, 0x03, 0x9c, 0xd3, 0xcb, 0x5d, 0x65, 0xd9, 0x2c, 0xf7, 0xc3, 0x7a, 0x1f, 0x03, 0xb9, + 0x12, 0x04, 0x6e, 0xc5, 0x6c, 0x35, 0x8b, 0xe7, 0x16, 0xbb, 0x00, 0xa2, 0xae, 0x66, 0xc0, 0x5f, + 0x1b, 0x60, 0x52, 0x66, 0x47, 0xcd, 0xb8, 0x22, 0x77, 0xdb, 0x5b, 0x7d, 0x74, 0x5b, 0x16, 0x42, + 0x78, 0x4f, 0x7d, 0x65, 0xec, 0xe0, 0xa3, 0x4e, 0xab, 0xe0, 0xef, 0x0c, 0x30, 0x66, 0xe3, 0x10, + 0xfb, 0x36, 0xf6, 0x6b, 0xcc, 0xcc, 0xb9, 0xe3, 0xf6, 0x15, 0xb2, 0x66, 0x2e, 0x6b, 0xda, 0x85, + 0x85, 0x65, 0x69, 0xe1, 0x98, 0xce, 0x3a, 0x6c, 0x16, 0xa7, 0xd3, 0xa1, 0x3a, 0x07, 0xb5, 0x19, + 0x08, 0x7f, 0x6c, 0x80, 0xf1, 0xd4, 0xed, 0xe2, 0x80, 0xb8, 0x74, 0x32, 0x0b, 0xcf, 0x4b, 0xd0, + 0xc5, 0x76, 0x2c, 0x94, 0x05, 0x87, 0xbf, 0x31, 0x58, 0xb5, 0x95, 0xdc, 0xd5, 0xa8, 0x59, 0xe2, + 0x1e, 0x7c, 0xbd, 0x9f, 0x1e, 0x54, 0xca, 0x85, 0x03, 0xaf, 0xa6, 0x95, 0x9c, 0xe2, 0x1c, 0x36, + 0x8b, 0x53, 0xba, 0xff, 0x14, 0x03, 0xe9, 0xc6, 0xc1, 0x77, 0x0d, 0x30, 0x86, 0xd3, 0x82, 0x99, + 0x9a, 0x97, 0x8f, 0xeb, 0xba, 0xae, 0xe5, 0xb7, 0xb8, 0x4e, 0x6b, 0x2c, 0x8a, 0xda, 0x60, 0x59, + 0xed, 0x87, 0xf7, 0x2d, 0x2f, 0x74, 0xb1, 0xf9, 0xff, 0xfd, 0xab, 0xfd, 0x56, 0x84, 0x4a, 0x94, + 0xe8, 0x86, 0x57, 0x41, 0xc1, 0x8f, 0x5d, 0xd7, 0xda, 0x72, 0xb1, 0xf9, 0x38, 0xaf, 0x22, 0x54, + 0x7f, 0xf1, 0xb6, 0xa4, 0x23, 0x25, 0x01, 0xeb, 0x60, 0x6e, 0xff, 0xa6, 0x7a, 0x7c, 0xd1, 0xb5, + 0x81, 0x67, 0x5e, 0xe1, 0x5a, 0x66, 0x5a, 0xcd, 0xe2, 0xf4, 0x66, 0xf7, 0x16, 0xdf, 0x43, 0x75, + 0xc0, 0x37, 0xc0, 0x63, 0x9a, 0xcc, 0x8a, 0xb7, 0x85, 0x6d, 0x1b, 0xdb, 0xc9, 0x45, 0xcb, 0xfc, + 0x02, 0x87, 0x50, 0xfb, 0x78, 0x33, 0x2b, 0x80, 0x1e, 0x34, 0x1a, 0xde, 0x02, 0xd3, 0x1a, 0x7b, + 0xd5, 0x8f, 0xd6, 0x49, 0x35, 0x22, 0x8e, 0xdf, 0x30, 0xe7, 0xb9, 0xde, 0x73, 0xc9, 0xee, 0xdb, + 0xd4, 0x78, 0xa8, 0xc7, 0x18, 0xf8, 0x72, 0x9b, 0x36, 0xfe, 0xe1, 0xc2, 0x0a, 0x6f, 0xe2, 0x03, + 0x6a, 0x3e, 0xc1, 0x8b, 0x0b, 0xbe, 0xce, 0x9b, 0x1a, 0x1d, 0xf5, 0x90, 0x87, 0xdf, 0x00, 0x67, + 0x33, 0x1c, 0x76, 0xaf, 0x30, 0x9f, 0x14, 0x17, 0x04, 0x56, 0x89, 0x6e, 0x26, 0x44, 0xd4, 0x4d, + 0x12, 0x7e, 0x15, 0x40, 0x8d, 0xbc, 0x66, 0x85, 0x7c, 0xfc, 0x53, 0xe2, 0xae, 0xc2, 0x56, 0x74, + 0x53, 0xd2, 0x50, 0x17, 0x39, 0xf8, 0x81, 0xd1, 0x36, 0x93, 0xf4, 0x36, 0x4b, 0xcd, 0xab, 0x7c, + 0xc3, 0xbe, 0x7c, 0xf4, 0x00, 0x4c, 0x95, 0xa1, 0xd8, 0xc5, 0x9a, 0x87, 0x35, 0x14, 0xd4, 0x03, + 0x7d, 0x86, 0x5d, 0xa6, 0x33, 0x39, 0x1c, 0x4e, 0x80, 0x81, 0x1d, 0x2c, 0x3f, 0x1b, 0x23, 0xf6, + 0x13, 0xbe, 0x09, 0xf2, 0xbb, 0x96, 0x1b, 0x27, 0xad, 0x80, 0xfe, 0x9d, 0xf5, 0x48, 0xe8, 0x7d, + 0x31, 0x77, 0xdd, 0x98, 0x79, 0xdf, 0x00, 0xd3, 0xdd, 0x4f, 0x95, 0xcf, 0xcb, 0xa2, 0x9f, 0x19, + 0x60, 0xb2, 0xe3, 0x00, 0xe9, 0x62, 0x8c, 0xdb, 0x6e, 0xcc, 0xbd, 0x3e, 0x9e, 0x04, 0x62, 0x23, + 0xf0, 0x8a, 0x56, 0xb7, 0xec, 0x87, 0x06, 0x98, 0xc8, 0x26, 0xe6, 0xcf, 0xc9, 0x4b, 0xa5, 0xf7, + 0x72, 0x60, 0xba, 0x7b, 0x0d, 0x0e, 0x3d, 0xd5, 0x5d, 0xe8, 0x7b, 0x83, 0xa6, 0x5b, 0xcb, 0xf6, + 0x1d, 0x03, 0x8c, 0xbe, 0xad, 0xe4, 0x92, 0xaf, 0x99, 0xfd, 0xec, 0x0a, 0x25, 0x47, 0x5f, 0xca, + 0xa0, 0x48, 0x87, 0x2c, 0xfd, 0xd6, 0x00, 0x53, 0x5d, 0x8f, 0x73, 0x78, 0x05, 0x0c, 0x59, 0xae, + 0x1b, 0xec, 0x89, 0x6e, 0x9e, 0xd6, 0x96, 0x5f, 0xe4, 0x54, 0x24, 0xb9, 0x9a, 0xcf, 0x72, 0x9f, + 0x81, 0xcf, 0x4a, 0x7f, 0x30, 0xc0, 0x85, 0x07, 0x45, 0xdd, 0x67, 0xbd, 0x86, 0xf3, 0xa0, 0x20, + 0x8b, 0xed, 0x03, 0xbe, 0x7e, 0x32, 0xbb, 0xca, 0x8c, 0xc0, 0x5f, 0xcb, 0x88, 0x5f, 0xa5, 0x5f, + 0x1a, 0x60, 0xa2, 0x8a, 0xc9, 0xae, 0x53, 0xc3, 0x08, 0xd7, 0x31, 0xc1, 0x7e, 0x0d, 0xc3, 0x05, + 0x30, 0xc2, 0xbf, 0x36, 0x86, 0x56, 0x2d, 0xf9, 0x46, 0x32, 0x29, 0x1d, 0x3d, 0x72, 0x3b, 0x61, + 0xa0, 0x54, 0x46, 0x7d, 0x4f, 0xc9, 0xf5, 0xfc, 0x9e, 0x72, 0x01, 0x0c, 0x86, 0x69, 0x03, 0xb8, + 0xc0, 0xb8, 0xbc, 0xe7, 0xcb, 0xa9, 0x9c, 0x1b, 0x90, 0x88, 0x77, 0xb9, 0xf2, 0x92, 0x1b, 0x90, + 0x08, 0x71, 0x6a, 0xe9, 0x4f, 0x39, 0x70, 0xa6, 0x3d, 0x3f, 0x33, 0x40, 0x12, 0xbb, 0x1d, 0x1f, + 0x70, 0x18, 0x0f, 0x71, 0x8e, 0xfe, 0x6e, 0x20, 0xf7, 0xe0, 0x77, 0x03, 0xf0, 0x25, 0x30, 0x29, + 0x7f, 0xae, 0xec, 0x87, 0x04, 0x53, 0xfe, 0x65, 0x72, 0xa0, 0xfd, 0xbd, 0xdf, 0x5a, 0x56, 0x00, + 0x75, 0x8e, 0x81, 0x5f, 0xc9, 0xbc, 0x69, 0xb8, 0x9c, 0xbe, 0x67, 0x60, 0xb5, 0x1d, 0x2f, 0x1d, + 0xee, 0xb1, 0x2d, 0xbf, 0x42, 0x48, 0x40, 0x32, 0x0f, 0x1d, 0x16, 0xc0, 0x48, 0x9d, 0x09, 0xf0, + 0x3e, 0x79, 0xbe, 0xdd, 0xe9, 0x37, 0x12, 0x06, 0x4a, 0x65, 0xe0, 0xd7, 0xc0, 0x78, 0x10, 0x8a, + 0x2a, 0x76, 0xdd, 0xb5, 0xab, 0xd8, 0xad, 0xf3, 0x8e, 0x5e, 0x21, 0x69, 0xbb, 0xb6, 0xb1, 0x50, + 0x56, 0xb6, 0xf4, 0x47, 0x03, 0x9c, 0x4d, 0x1e, 0x13, 0xb9, 0x0e, 0xf6, 0xa3, 0xa5, 0xc0, 0xaf, + 0x3b, 0x0d, 0x78, 0x5e, 0xb4, 0x4f, 0xb5, 0x9e, 0x64, 0xd2, 0x3a, 0x85, 0xf7, 0xc1, 0x30, 0x15, + 0xb1, 0x22, 0xc3, 0xf8, 0x95, 0xa3, 0x87, 0x71, 0x36, 0xe8, 0x44, 0xf5, 0x97, 0x50, 0x13, 0x1c, + 0x16, 0xc9, 0x35, 0xab, 0x12, 0xfb, 0xb6, 0x6c, 0xa1, 0x8f, 0x89, 0x48, 0x5e, 0x5a, 0x14, 0x34, + 0xa4, 0xb8, 0xa5, 0x7f, 0x18, 0x60, 0xb2, 0xe3, 0x71, 0x14, 0xfc, 0x9e, 0x01, 0xc6, 0x6a, 0xda, + 0xf4, 0x64, 0x3e, 0x58, 0x3b, 0xfe, 0x03, 0x2c, 0x4d, 0xa9, 0x28, 0xa1, 0x74, 0x0a, 0x6a, 0x03, + 0x85, 0x9b, 0xc0, 0xac, 0x65, 0xde, 0x21, 0x66, 0xbe, 0x6c, 0x5e, 0x68, 0x35, 0x8b, 0xe6, 0x52, + 0x0f, 0x19, 0xd4, 0x73, 0x74, 0xe5, 0x5b, 0x1f, 0x7d, 0x3a, 0x7b, 0xea, 0xe3, 0x4f, 0x67, 0x4f, + 0x7d, 0xf2, 0xe9, 0xec, 0xa9, 0x77, 0x5a, 0xb3, 0xc6, 0x47, 0xad, 0x59, 0xe3, 0xe3, 0xd6, 0xac, + 0xf1, 0x49, 0x6b, 0xd6, 0xf8, 0x6b, 0x6b, 0xd6, 0xf8, 0xc9, 0xdf, 0x66, 0x4f, 0xbd, 0x7e, 0xfd, + 0xa8, 0xaf, 0x8f, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x28, 0x77, 0xf5, 0x22, 0xd1, 0x2c, 0x00, + 0x00, +} + +func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DesiredAPIVersion) + copy(dAtA[i:], m.DesiredAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ConvertedObjects) > 0 { + for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ListKind) + copy(dAtA[i:], m.ListKind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) + i-- + dAtA[i] = 0x2a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x22 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Singular) + copy(dAtA[i:], m.Singular) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) + i-- + dAtA[i] = 0x12 + i -= len(m.Plural) + copy(dAtA[i:], m.Plural) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.PreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + if m.Conversion != nil { + { + size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StoredVersions) > 0 { + for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StoredVersions[iNdEx]) + copy(dAtA[i:], m.StoredVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DeprecationWarning != nil { + i -= len(*m.DeprecationWarning) + copy(dAtA[i:], *m.DeprecationWarning) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeprecationWarning))) + i-- + dAtA[i] = 0x42 + } + i-- + if m.Deprecated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LabelSelectorPath != nil { + i -= len(*m.LabelSelectorPath) + copy(dAtA[i:], *m.LabelSelectorPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.StatusReplicasPath) + copy(dAtA[i:], m.StatusReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.SpecReplicasPath) + copy(dAtA[i:], m.SpecReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scale != nil { + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OpenAPIV3Schema != nil { + { + size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSON) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSON) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.XValidations) > 0 { + for iNdEx := len(m.XValidations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.XValidations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + } + if m.XMapType != nil { + i -= len(*m.XMapType) + copy(dAtA[i:], *m.XMapType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XMapType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + if m.XListType != nil { + i -= len(*m.XListType) + copy(dAtA[i:], *m.XListType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.XListMapKeys) > 0 { + for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.XListMapKeys[iNdEx]) + copy(dAtA[i:], m.XListMapKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + } + i-- + if m.XIntOrString { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + i-- + if m.XEmbeddedResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + if m.XPreserveUnknownFields != nil { + i-- + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + i-- + if m.Nullable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + if m.Example != nil { + { + size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.ExternalDocs != nil { + { + size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if len(m.Definitions) > 0 { + keysForDefinitions := make([]string, 0, len(m.Definitions)) + for k := range m.Definitions { + keysForDefinitions = append(keysForDefinitions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Definitions[string(keysForDefinitions[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefinitions[iNdEx]) + copy(dAtA[i:], keysForDefinitions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + } + if m.AdditionalItems != nil { + { + size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.Dependencies) > 0 { + keysForDependencies := make([]string, 0, len(m.Dependencies)) + for k := range m.Dependencies { + keysForDependencies = append(keysForDependencies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { + v := m.Dependencies[string(keysForDependencies[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDependencies[iNdEx]) + copy(dAtA[i:], keysForDependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if len(m.PatternProperties) > 0 { + keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) + for k := range m.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPatternProperties[iNdEx]) + copy(dAtA[i:], keysForPatternProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + } + if m.AdditionalProperties != nil { + { + size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if len(m.Properties) > 0 { + keysForProperties := make([]string, 0, len(m.Properties)) + for k := range m.Properties { + keysForProperties = append(keysForProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.Properties[string(keysForProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForProperties[iNdEx]) + copy(dAtA[i:], keysForProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + if m.Not != nil { + { + size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + } + if len(m.OneOf) > 0 { + for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.Items != nil { + { + size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.Required) > 0 { + for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Required[iNdEx]) + copy(dAtA[i:], m.Required[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.MinProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.MaxProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.MultipleOf != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x99 + } + i-- + if m.UniqueItems { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + if m.MinItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.MaxItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0x7a + if m.MinLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + i-- + dAtA[i] = 0x70 + } + if m.MaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + i-- + dAtA[i] = 0x68 + } + i-- + if m.ExclusiveMinimum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + if m.Minimum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i-- + dAtA[i] = 0x59 + } + i-- + if m.ExclusiveMaximum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + if m.Maximum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i-- + dAtA[i] = 0x49 + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0x3a + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x32 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + if m.Ref != nil { + i -= len(*m.Ref) + copy(dAtA[i:], *m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.JSONSchemas) > 0 { + for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Allows { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Property) > 0 { + for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Property[iNdEx]) + copy(dAtA[i:], m.Property[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidationRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OptionalOldSelf != nil { + i-- + if *m.OptionalOldSelf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- + dAtA[i] = 0x2a + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x22 + } + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WebhookConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookConversion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConversionReviewVersions) > 0 { + for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConversionReviewVersions[iNdEx]) + copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.ClientConfig != nil { + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConversionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DesiredAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConversionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ConvertedObjects) > 0 { + for _, e := range m.ConvertedObjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConversionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceColumnDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Priority)) + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceConversion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Plural) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Singular) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ListKind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Names.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Conversion != nil { + l = m.Conversion.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CustomResourceDefinitionStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AcceptedNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StoredVersions) > 0 { + for _, s := range m.StoredVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Subresources != nil { + l = m.Subresources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.DeprecationWarning != nil { + l = len(*m.DeprecationWarning) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceSubresourceScale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpecReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StatusReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelectorPath != nil { + l = len(*m.LabelSelectorPath) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceSubresourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *CustomResourceSubresources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scale != nil { + l = m.Scale.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceValidation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OpenAPIV3Schema != nil { + l = m.OpenAPIV3Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalDocumentation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JSON) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaProps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Schema) + n += 1 + l + sovGenerated(uint64(l)) + if m.Ref != nil { + l = len(*m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Title) + n += 1 + l + sovGenerated(uint64(l)) + if m.Default != nil { + l = m.Default.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Maximum != nil { + n += 9 + } + n += 2 + if m.Minimum != nil { + n += 9 + } + n += 2 + if m.MaxLength != nil { + n += 1 + sovGenerated(uint64(*m.MaxLength)) + } + if m.MinLength != nil { + n += 1 + sovGenerated(uint64(*m.MinLength)) + } + l = len(m.Pattern) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxItems != nil { + n += 2 + sovGenerated(uint64(*m.MaxItems)) + } + if m.MinItems != nil { + n += 2 + sovGenerated(uint64(*m.MinItems)) + } + n += 3 + if m.MultipleOf != nil { + n += 10 + } + if len(m.Enum) > 0 { + for _, e := range m.Enum { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.MaxProperties != nil { + n += 2 + sovGenerated(uint64(*m.MaxProperties)) + } + if m.MinProperties != nil { + n += 2 + sovGenerated(uint64(*m.MinProperties)) + } + if len(m.Required) > 0 { + for _, s := range m.Required { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Items != nil { + l = m.Items.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllOf) > 0 { + for _, e := range m.AllOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.OneOf) > 0 { + for _, e := range m.OneOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AnyOf) > 0 { + for _, e := range m.AnyOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Not != nil { + l = m.Not.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Properties) > 0 { + for k, v := range m.Properties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalProperties != nil { + l = m.AdditionalProperties.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.PatternProperties) > 0 { + for k, v := range m.PatternProperties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Dependencies) > 0 { + for k, v := range m.Dependencies { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalItems != nil { + l = m.AdditionalItems.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Definitions) > 0 { + for k, v := range m.Definitions { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ExternalDocs != nil { + l = m.ExternalDocs.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Example != nil { + l = m.Example.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + n += 3 + if m.XPreserveUnknownFields != nil { + n += 3 + } + n += 3 + n += 3 + if len(m.XListMapKeys) > 0 { + for _, s := range m.XListMapKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.XListType != nil { + l = len(*m.XListType) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.XMapType != nil { + l = len(*m.XMapType) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.XValidations) > 0 { + for _, e := range m.XValidations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JSONSchemaPropsOrArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.JSONSchemas) > 0 { + for _, e := range m.JSONSchemas { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JSONSchemaPropsOrBool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Property) > 0 { + for _, s := range m.Property { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *ValidationRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.OptionalOldSelf != nil { + n += 2 + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookConversion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientConfig != nil { + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ConversionReviewVersions) > 0 { + for _, s := range m.ConversionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ConversionRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" + s := strings.Join([]string{`&ConversionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `}`, + }, "") + return s +} +func (this *ConversionResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForConvertedObjects := "[]RawExtension{" + for _, f := range this.ConvertedObjects { + repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConvertedObjects += "}" + s := strings.Join([]string{`&ConversionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConversionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConversionReview{`, + `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceColumnDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceColumnDefinition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Priority:` + fmt.Sprintf("%v", this.Priority) + `,`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceConversion{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookConversion", "WebhookConversion", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinition{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CustomResourceDefinitionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionNames) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionNames{`, + `Plural:` + fmt.Sprintf("%v", this.Plural) + `,`, + `Singular:` + fmt.Sprintf("%v", this.Singular) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `ListKind:` + fmt.Sprintf("%v", this.ListKind) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `PreserveUnknownFields:` + fmt.Sprintf("%v", this.PreserveUnknownFields) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionVersion) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" + s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Served:` + fmt.Sprintf("%v", this.Served) + `,`, + `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `Deprecated:` + fmt.Sprintf("%v", this.Deprecated) + `,`, + `DeprecationWarning:` + valueToStringGenerated(this.DeprecationWarning) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceScale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceScale{`, + `SpecReplicasPath:` + fmt.Sprintf("%v", this.SpecReplicasPath) + `,`, + `StatusReplicasPath:` + fmt.Sprintf("%v", this.StatusReplicasPath) + `,`, + `LabelSelectorPath:` + valueToStringGenerated(this.LabelSelectorPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceStatus{`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresources{`, + `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, + `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceValidation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceValidation{`, + `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalDocumentation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalDocumentation{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *JSON) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSON{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaProps) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnum := "[]JSON{" + for _, f := range this.Enum { + repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," + } + repeatedStringForEnum += "}" + repeatedStringForAllOf := "[]JSONSchemaProps{" + for _, f := range this.AllOf { + repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAllOf += "}" + repeatedStringForOneOf := "[]JSONSchemaProps{" + for _, f := range this.OneOf { + repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForOneOf += "}" + repeatedStringForAnyOf := "[]JSONSchemaProps{" + for _, f := range this.AnyOf { + repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAnyOf += "}" + repeatedStringForXValidations := "[]ValidationRule{" + for _, f := range this.XValidations { + repeatedStringForXValidations += strings.Replace(strings.Replace(f.String(), "ValidationRule", "ValidationRule", 1), `&`, ``, 1) + "," + } + repeatedStringForXValidations += "}" + keysForProperties := make([]string, 0, len(this.Properties)) + for k := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + keysForPatternProperties := make([]string, 0, len(this.PatternProperties)) + for k := range this.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + mapStringForPatternProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForPatternProperties { + mapStringForPatternProperties += fmt.Sprintf("%v: %v,", k, this.PatternProperties[k]) + } + mapStringForPatternProperties += "}" + keysForDependencies := make([]string, 0, len(this.Dependencies)) + for k := range this.Dependencies { + keysForDependencies = append(keysForDependencies, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + mapStringForDependencies := "JSONSchemaDependencies{" + for _, k := range keysForDependencies { + mapStringForDependencies += fmt.Sprintf("%v: %v,", k, this.Dependencies[k]) + } + mapStringForDependencies += "}" + keysForDefinitions := make([]string, 0, len(this.Definitions)) + for k := range this.Definitions { + keysForDefinitions = append(keysForDefinitions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + mapStringForDefinitions := "JSONSchemaDefinitions{" + for _, k := range keysForDefinitions { + mapStringForDefinitions += fmt.Sprintf("%v: %v,", k, this.Definitions[k]) + } + mapStringForDefinitions += "}" + s := strings.Join([]string{`&JSONSchemaProps{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`, + `Ref:` + valueToStringGenerated(this.Ref) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Title:` + fmt.Sprintf("%v", this.Title) + `,`, + `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, + `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, + `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, + `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, + `ExclusiveMinimum:` + fmt.Sprintf("%v", this.ExclusiveMinimum) + `,`, + `MaxLength:` + valueToStringGenerated(this.MaxLength) + `,`, + `MinLength:` + valueToStringGenerated(this.MinLength) + `,`, + `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, + `MaxItems:` + valueToStringGenerated(this.MaxItems) + `,`, + `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, + `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, + `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, + `Enum:` + repeatedStringForEnum + `,`, + `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, + `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, + `Required:` + fmt.Sprintf("%v", this.Required) + `,`, + `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, + `AllOf:` + repeatedStringForAllOf + `,`, + `OneOf:` + repeatedStringForOneOf + `,`, + `AnyOf:` + repeatedStringForAnyOf + `,`, + `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Properties:` + mapStringForProperties + `,`, + `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `PatternProperties:` + mapStringForPatternProperties + `,`, + `Dependencies:` + mapStringForDependencies + `,`, + `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `Definitions:` + mapStringForDefinitions + `,`, + `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, + `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, + `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, + `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, + `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, + `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, + `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, + `XListType:` + valueToStringGenerated(this.XListType) + `,`, + `XMapType:` + valueToStringGenerated(this.XMapType) + `,`, + `XValidations:` + repeatedStringForXValidations + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrArray) String() string { + if this == nil { + return "nil" + } + repeatedStringForJSONSchemas := "[]JSONSchemaProps{" + for _, f := range this.JSONSchemas { + repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForJSONSchemas += "}" + s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrBool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, + `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrStringArray) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Property:` + fmt.Sprintf("%v", this.Property) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidationRule{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `OptionalOldSelf:` + valueToStringGenerated(this.OptionalOldSelf) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookConversion{`, + `ClientConfig:` + strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ConversionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConvertedObjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) + if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &ConversionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &ConversionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = ConversionStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Webhook == nil { + m.Webhook = &WebhookConversion{} + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomResourceDefinition{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plural = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Singular = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conversion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conversion == nil { + m.Conversion = &CustomResourceConversion{} + } + if err := m.Conversion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PreserveUnknownFields = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Served = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Storage = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &CustomResourceValidation{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Deprecated = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecationWarning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DeprecationWarning = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StatusReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelectorPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.LabelSelectorPath = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &CustomResourceSubresourceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scale == nil { + m.Scale = &CustomResourceSubresourceScale{} + } + if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OpenAPIV3Schema == nil { + m.OpenAPIV3Schema = &JSONSchemaProps{} + } + if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSON) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSON: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = JSONSchemaURL(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Ref = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = &JSON{} + } + if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Maximum = &v2 + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMaximum = bool(v != 0) + case 11: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Minimum = &v2 + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMinimum = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxLength = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinLength = &v + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxItems = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinItems = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UniqueItems = bool(v != 0) + case 19: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.MultipleOf = &v2 + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enum = append(m.Enum, JSON{}) + if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxProperties = &v + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinProperties = &v + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Required = append(m.Required, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Items == nil { + m.Items = &JSONSchemaPropsOrArray{} + } + if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllOf = append(m.AllOf, JSONSchemaProps{}) + if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OneOf = append(m.OneOf, JSONSchemaProps{}) + if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnyOf = append(m.AnyOf, JSONSchemaProps{}) + if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Not == nil { + m.Not = &JSONSchemaProps{} + } + if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Properties == nil { + m.Properties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Properties[mapkey] = *mapvalue + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalProperties == nil { + m.AdditionalProperties = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PatternProperties == nil { + m.PatternProperties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PatternProperties[mapkey] = *mapvalue + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Dependencies == nil { + m.Dependencies = make(JSONSchemaDependencies) + } + var mapkey string + mapvalue := &JSONSchemaPropsOrStringArray{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaPropsOrStringArray{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Dependencies[mapkey] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalItems == nil { + m.AdditionalItems = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definitions == nil { + m.Definitions = make(JSONSchemaDefinitions) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Definitions[mapkey] = *mapvalue + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExternalDocs == nil { + m.ExternalDocs = &ExternalDocumentation{} + } + if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Example == nil { + m.Example = &JSON{} + } + if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Nullable = bool(v != 0) + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.XPreserveUnknownFields = &b + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XEmbeddedResource = bool(v != 0) + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XIntOrString = bool(v != 0) + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XListType = &s + iNdEx = postIndex + case 43: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XMapType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XMapType = &s + iNdEx = postIndex + case 44: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XValidations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XValidations = append(m.XValidations, ValidationRule{}) + if err := m.XValidations[len(m.XValidations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) + if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allows = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FieldValueErrorReason(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OptionalOldSelf", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalOldSelf = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookConversion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientConfig == nil { + m.ClientConfig = &WebhookClientConfig{} + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConversionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConversionReviewVersions = append(m.ConversionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto new file mode 100644 index 000000000..3c39d63a5 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -0,0 +1,792 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"; + +// ConversionRequest describes the conversion request parameters. +message ConversionRequest { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + optional string desiredAPIVersion = 2; + + // objects is the list of custom resource objects to be converted. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; +} + +// ConversionResponse describes a conversion response. +message ConversionResponse { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + optional string uid = 1; + + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; +} + +// ConversionReview describes a conversion request/response. +message ConversionReview { + // request describes the attributes for the conversion request. + // +optional + optional ConversionRequest request = 1; + + // response describes the attributes for the conversion response. + // +optional + optional ConversionResponse response = 2; +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +message CustomResourceColumnDefinition { + // name is a human readable name for the column. + optional string name = 1; + + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + optional string type = 2; + + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + optional string format = 3; + + // description is a human readable description of this column. + // +optional + optional string description = 4; + + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + optional int32 priority = 5; + + // jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + optional string jsonPath = 6; +} + +// CustomResourceConversion describes how to convert different versions of a CR. +message CustomResourceConversion { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. + optional string strategy = 1; + + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`. + // +optional + optional WebhookConversion webhook = 2; +} + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +message CustomResourceDefinition { + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes how the user wants the resources to appear + optional CustomResourceDefinitionSpec spec = 2; + + // status indicates the actual state of the CustomResourceDefinition + // +optional + optional CustomResourceDefinitionStatus status = 3; +} + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +message CustomResourceDefinitionCondition { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + optional string type = 1; + + // status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable message indicating details about last transition. + // +optional + optional string message = 5; +} + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +message CustomResourceDefinitionList { + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items list individual CustomResourceDefinition objects + repeated CustomResourceDefinition items = 2; +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +message CustomResourceDefinitionNames { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + optional string plural = 1; + + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + optional string singular = 2; + + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + repeated string shortNames = 3; + + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + optional string kind = 4; + + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + optional string listKind = 5; + + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + repeated string categories = 6; +} + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +message CustomResourceDefinitionSpec { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + optional string group = 1; + + // names specify the resource and kind names for the custom resource. + optional CustomResourceDefinitionNames names = 3; + + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. + optional string scope = 4; + + // versions is the list of all API versions of the defined custom resource. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + repeated CustomResourceDefinitionVersion versions = 7; + + // conversion defines conversion settings for the CRD. + // +optional + optional CustomResourceConversion conversion = 9; + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. + // See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details. + // +optional + optional bool preserveUnknownFields = 10; +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +message CustomResourceDefinitionStatus { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + // +listType=map + // +listMapKey=type + repeated CustomResourceDefinitionCondition conditions = 1; + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + // +optional + optional CustomResourceDefinitionNames acceptedNames = 2; + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + // +optional + repeated string storedVersions = 3; +} + +// CustomResourceDefinitionVersion describes a version for CRD. +message CustomResourceDefinitionVersion { + // name is the version name, e.g. “v1â€, “v2beta1â€, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + optional string name = 1; + + // served is a flag enabling/disabling this version from being served via REST APIs + optional bool served = 2; + + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + optional bool storage = 3; + + // deprecated indicates this version of the custom resource API is deprecated. + // When set to true, API requests to this version receive a warning header in the server response. + // Defaults to false. + // +optional + optional bool deprecated = 7; + + // deprecationWarning overrides the default warning returned to API clients. + // May only be set when `deprecated` is true. + // The default warning indicates this version is deprecated and recommends use + // of the newest served version of equal or greater stability, if one exists. + // +optional + optional string deprecationWarning = 8; + + // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. + // +optional + optional CustomResourceValidation schema = 4; + + // subresources specify what subresources this version of the defined custom resource have. + // +optional + optional CustomResourceSubresources subresources = 5; + + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If no columns are specified, a single column displaying the age of the custom resource is used. + // +optional + repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; +} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +message CustomResourceSubresourceScale { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + optional string specReplicasPath = 1; + + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + optional string statusReplicasPath = 2; + + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + optional string labelSelectorPath = 3; +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +message CustomResourceSubresourceStatus { +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +message CustomResourceSubresources { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + optional CustomResourceSubresourceStatus status = 1; + + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + optional CustomResourceSubresourceScale scale = 2; +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +message CustomResourceValidation { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + optional JSONSchemaProps openAPIV3Schema = 1; +} + +// ExternalDocumentation allows referencing an external resource for extended documentation. +message ExternalDocumentation { + optional string description = 1; + + optional string url = 2; +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +message JSON { + optional bytes raw = 1; +} + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +message JSONSchemaProps { + optional string id = 1; + + optional string schema = 2; + + optional string ref = 3; + + optional string description = 4; + + optional string type = 5; + + // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: + // + // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string + // - uri: an URI as parsed by Golang net/url.ParseRequestURI + // - email: an email address as parsed by Golang net/mail.ParseAddress + // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP + // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP + // - cidr: a CIDR as parsed by Golang net.ParseCIDR + // - mac: a MAC address as parsed by Golang net.ParseMAC + // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" + // - isbn10: an ISBN10 number string like "0321751043" + // - isbn13: an ISBN13 number string like "978-0321751041" + // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in + // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ + // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ + // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" + // - byte: base64 encoded binary data + // - password: any kind of string + // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 + // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format + // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. + optional string format = 6; + + optional string title = 7; + + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // Defaulting requires spec.preserveUnknownFields to be false. + optional JSON default = 8; + + optional double maximum = 9; + + optional bool exclusiveMaximum = 10; + + optional double minimum = 11; + + optional bool exclusiveMinimum = 12; + + optional int64 maxLength = 13; + + optional int64 minLength = 14; + + optional string pattern = 15; + + optional int64 maxItems = 16; + + optional int64 minItems = 17; + + optional bool uniqueItems = 18; + + optional double multipleOf = 19; + + repeated JSON enum = 20; + + optional int64 maxProperties = 21; + + optional int64 minProperties = 22; + + repeated string required = 23; + + optional JSONSchemaPropsOrArray items = 24; + + repeated JSONSchemaProps allOf = 25; + + repeated JSONSchemaProps oneOf = 26; + + repeated JSONSchemaProps anyOf = 27; + + optional JSONSchemaProps not = 28; + + map properties = 29; + + optional JSONSchemaPropsOrBool additionalProperties = 30; + + map patternProperties = 31; + + map dependencies = 32; + + optional JSONSchemaPropsOrBool additionalItems = 33; + + map definitions = 34; + + optional ExternalDocumentation externalDocs = 35; + + optional JSON example = 36; + + optional bool nullable = 37; + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + optional bool xKubernetesPreserveUnknownFields = 38; + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + optional bool xKubernetesEmbeddedResource = 39; + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + optional bool xKubernetesIntOrString = 40; + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // The properties specified must either be required or have a default value, + // to ensure those properties are present for all list items. + // + // +optional + repeated string xKubernetesListMapKeys = 41; + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + optional string xKubernetesListType = 42; + + // x-kubernetes-map-type annotates an object to further describe its topology. + // This extension must only be used when type is object and may have 2 possible values: + // + // 1) `granular`: + // These maps are actual maps (key-value pairs) and each fields are independent + // from each other (they can each be manipulated by separate actors). This is + // the default behaviour for all maps. + // 2) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic maps will be entirely replaced when updated. + // +optional + optional string xKubernetesMapType = 43; + + // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. + // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. + // +patchMergeKey=rule + // +patchStrategy=merge + // +listType=map + // +listMapKey=rule + repeated ValidationRule xKubernetesValidations = 44; +} + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +message JSONSchemaPropsOrArray { + optional JSONSchemaProps schema = 1; + + repeated JSONSchemaProps jSONSchemas = 2; +} + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +message JSONSchemaPropsOrBool { + optional bool allows = 1; + + optional JSONSchemaProps schema = 2; +} + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +message JSONSchemaPropsOrStringArray { + optional JSONSchemaProps schema = 1; + + repeated string property = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // namespace is the namespace of the service. + // Required + optional string namespace = 1; + + // name is the name of the service. + // Required + optional string name = 2; + + // path is an optional URL path at which the webhook will be contacted. + // +optional + optional string path = 3; + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + optional int32 port = 4; +} + +// ValidationRule describes a validation rule written in the CEL expression language. +message ValidationRule { + // Rule represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. + // The `self` variable in the CEL expression is bound to the scoped value. + // Example: + // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} + // + // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable + // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as + // absent fields in CEL expressions. + // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map + // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map + // are accessible via CEL macros and functions such as `self.all(...)`. + // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and + // functions. + // If the Rule is scoped to a scalar, `self` is bound to the scalar value. + // Examples: + // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} + // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} + // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. + // + // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL + // expressions. This includes: + // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. + // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: + // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true + // - An array where the items schema is of an "unknown type" + // - An object where the additionalProperties schema is of an "unknown type" + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} + // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} + // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} + // + // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + optional string rule = 1; + + // Message represents the message displayed when validation fails. The message is required if the Rule contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + optional string message = 2; + + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + optional string messageExpression = 3; + + // reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. + // The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. + // The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". + // If not set, default to use "FieldValueInvalid". + // All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. + // +optional + optional string reason = 4; + + // fieldPath represents the field path returned when the validation fails. + // It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. + // e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` + // If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` + // It does not support list numeric index. + // It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. + // Numeric index of array is not supported. + // For field name which contains special characters, use `['specialName']` to refer the field name. + // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` + // +optional + optional string fieldPath = 5; + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + optional bool optionalOldSelf = 6; +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +message WebhookClientConfig { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + +// WebhookConversion describes how to call a conversion webhook +message WebhookConversion { + // clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // +optional + optional WebhookClientConfig clientConfig = 2; + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + repeated string conversionReviewVersions = 3; +} + diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go new file mode 100644 index 000000000..12cc2f6f2 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go @@ -0,0 +1,136 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "bytes" + "errors" + + "k8s.io/apimachinery/pkg/util/json" +) + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrBool + switch { + case len(data) == 0: + case data[0] == '{': + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Allows = true + nw.Schema = &sch + case len(data) == 4 && string(data) == "true": + nw.Allows = true + case len(data) == 5 && string(data) == "false": + nw.Allows = false + default: + return errors.New("boolean or JSON schema expected") + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw JSONSchemaPropsOrStringArray + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return json.Marshal(s.JSONSchemas) + } + return json.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.JSONSchemas); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSON) MarshalJSON() ([]byte, error) { + if len(s.Raw) > 0 { + return s.Raw, nil + } + return []byte("null"), nil + +} + +func (s *JSON) UnmarshalJSON(data []byte) error { + if len(data) > 0 && !bytes.Equal(data, nullLiteral) { + s.Raw = data + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go new file mode 100644 index 000000000..bd6a6ed00 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiextensions.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CustomResourceDefinition{}, + &CustomResourceDefinitionList{}, + &ConversionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go new file mode 100644 index 000000000..59ec0e372 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go @@ -0,0 +1,485 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ConversionStrategyType describes different conversion types. +type ConversionStrategyType string + +const ( + // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. + // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. + // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. + KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" + + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. + NoneConverter ConversionStrategyType = "None" + // WebhookConverter is a converter that calls to an external webhook to convert the CR. + WebhookConverter ConversionStrategyType = "Webhook" +) + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +type CustomResourceDefinitionSpec struct { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + // names specify the resource and kind names for the custom resource. + Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. + Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` + // versions is the list of all API versions of the defined custom resource. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + Versions []CustomResourceDefinitionVersion `json:"versions" protobuf:"bytes,7,rep,name=versions"` + + // conversion defines conversion settings for the CRD. + // +optional + Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. + // See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details. + // +optional + PreserveUnknownFields bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` +} + +// CustomResourceConversion describes how to convert different versions of a CR. +type CustomResourceConversion struct { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. + Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` + + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`. + // +optional + Webhook *WebhookConversion `json:"webhook,omitempty" protobuf:"bytes,2,opt,name=webhook"` +} + +// WebhookConversion describes how to call a conversion webhook +type WebhookConversion struct { + // clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // +optional + ClientConfig *WebhookClientConfig `json:"clientConfig,omitempty" protobuf:"bytes,2,name=clientConfig"` + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + ConversionReviewVersions []string `json:"conversionReviewVersions" protobuf:"bytes,3,rep,name=conversionReviewVersions"` +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +type WebhookClientConfig struct { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // namespace is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // name is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // path is an optional URL path at which the webhook will be contacted. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} + +// CustomResourceDefinitionVersion describes a version for CRD. +type CustomResourceDefinitionVersion struct { + // name is the version name, e.g. “v1â€, “v2beta1â€, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // served is a flag enabling/disabling this version from being served via REST APIs + Served bool `json:"served" protobuf:"varint,2,opt,name=served"` + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` + // deprecated indicates this version of the custom resource API is deprecated. + // When set to true, API requests to this version receive a warning header in the server response. + // Defaults to false. + // +optional + Deprecated bool `json:"deprecated,omitempty" protobuf:"varint,7,opt,name=deprecated"` + // deprecationWarning overrides the default warning returned to API clients. + // May only be set when `deprecated` is true. + // The default warning indicates this version is deprecated and recommends use + // of the newest served version of equal or greater stability, if one exists. + // +optional + DeprecationWarning *string `json:"deprecationWarning,omitempty" protobuf:"bytes,8,opt,name=deprecationWarning"` + // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. + // +optional + Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` + // subresources specify what subresources this version of the defined custom resource have. + // +optional + Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If no columns are specified, a single column displaying the age of the custom resource is used. + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +type CustomResourceColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` + // description is a human readable description of this column. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` + // jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + JSONPath string `json:"jsonPath" protobuf:"bytes,6,opt,name=jsonPath"` +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +type CustomResourceDefinitionNames struct { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` +} + +// ResourceScope is an enum defining the different scopes available to a custom resource +type ResourceScope string + +const ( + ClusterScoped ResourceScope = "Cluster" + NamespaceScoped ResourceScope = "Namespaced" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type +type CustomResourceDefinitionConditionType string + +const ( + // Established means that the resource has become active. A resource is established when all names are + // accepted without a conflict for the first time. A resource stays established until deleted, even during + // a later NamesAccepted due to changed names. Note that not all names can be changed. + Established CustomResourceDefinitionConditionType = "Established" + // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in + // the group and are therefore accepted. + NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" + // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. + Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" +) + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +type CustomResourceDefinitionCondition struct { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` + // status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +type CustomResourceDefinitionStatus struct { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + // +listType=map + // +listMapKey=type + Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + // +optional + AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + // +optional + StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` +} + +// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of +// a CustomResourceDefinition +const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +type CustomResourceDefinition struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes how the user wants the resources to appear + Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status indicates the actual state of the CustomResourceDefinition + // +optional + Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items list individual CustomResourceDefinition objects + Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +type CustomResourceValidation struct { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +type CustomResourceSubresources struct { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +type CustomResourceSubresourceStatus struct{} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +type CustomResourceSubresourceScale struct { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConversionReview describes a conversion request/response. +type ConversionReview struct { + metav1.TypeMeta `json:",inline"` + // request describes the attributes for the conversion request. + // +optional + Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // response describes the attributes for the conversion response. + // +optional + Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// ConversionRequest describes the conversion request parameters. +type ConversionRequest struct { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` + // objects is the list of custom resource objects to be converted. + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` +} + +// ConversionResponse describes a conversion response. +type ConversionResponse struct { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + Result metav1.Status `json:"result" protobuf:"bytes,3,name=result"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go new file mode 100644 index 000000000..a81451ad6 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -0,0 +1,412 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// FieldValueErrorReason is a machine-readable value providing more detail about why a field failed the validation. +// +enum +type FieldValueErrorReason string + +const ( + // FieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + FieldValueRequired FieldValueErrorReason = "FieldValueRequired" + // FieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + FieldValueDuplicate FieldValueErrorReason = "FieldValueDuplicate" + // FieldValueInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). + FieldValueInvalid FieldValueErrorReason = "FieldValueInvalid" + // FieldValueForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). + FieldValueForbidden FieldValueErrorReason = "FieldValueForbidden" +) + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +type JSONSchemaProps struct { + ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` + Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` + Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + + // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: + // + // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string + // - uri: an URI as parsed by Golang net/url.ParseRequestURI + // - email: an email address as parsed by Golang net/mail.ParseAddress + // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP + // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP + // - cidr: a CIDR as parsed by Golang net.ParseCIDR + // - mac: a MAC address as parsed by Golang net.ParseMAC + // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" + // - isbn10: an ISBN10 number string like "0321751043" + // - isbn13: an ISBN13 number string like "978-0321751041" + // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in + // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ + // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ + // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" + // - byte: base64 encoded binary data + // - password: any kind of string + // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 + // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format + // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. + Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` + + Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // Defaulting requires spec.preserveUnknownFields to be false. + Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` + Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` + Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` + MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` + MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` + Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` + MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` + MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` + UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` + MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` + Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` + MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` + MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` + Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` + Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` + AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` + OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` + Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` + Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` + AdditionalProperties *JSONSchemaPropsOrBool `json:"additionalProperties,omitempty" protobuf:"bytes,30,opt,name=additionalProperties"` + PatternProperties map[string]JSONSchemaProps `json:"patternProperties,omitempty" protobuf:"bytes,31,rep,name=patternProperties"` + Dependencies JSONSchemaDependencies `json:"dependencies,omitempty" protobuf:"bytes,32,opt,name=dependencies"` + AdditionalItems *JSONSchemaPropsOrBool `json:"additionalItems,omitempty" protobuf:"bytes,33,opt,name=additionalItems"` + Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` + Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` + Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // The properties specified must either be required or have a default value, + // to ensure those properties are present for all list items. + // + // +optional + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` + + // x-kubernetes-map-type annotates an object to further describe its topology. + // This extension must only be used when type is object and may have 2 possible values: + // + // 1) `granular`: + // These maps are actual maps (key-value pairs) and each fields are independent + // from each other (they can each be manipulated by separate actors). This is + // the default behaviour for all maps. + // 2) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic maps will be entirely replaced when updated. + // +optional + XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` + + // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. + // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. + // +patchMergeKey=rule + // +patchStrategy=merge + // +listType=map + // +listMapKey=rule + XValidations ValidationRules `json:"x-kubernetes-validations,omitempty" patchStrategy:"merge" patchMergeKey:"rule" protobuf:"bytes,44,rep,name=xKubernetesValidations"` +} + +// ValidationRules describes a list of validation rules written in the CEL expression language. +type ValidationRules []ValidationRule + +// ValidationRule describes a validation rule written in the CEL expression language. +type ValidationRule struct { + // Rule represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. + // The `self` variable in the CEL expression is bound to the scoped value. + // Example: + // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} + // + // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable + // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as + // absent fields in CEL expressions. + // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map + // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map + // are accessible via CEL macros and functions such as `self.all(...)`. + // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and + // functions. + // If the Rule is scoped to a scalar, `self` is bound to the scalar value. + // Examples: + // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} + // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} + // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. + // + // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL + // expressions. This includes: + // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. + // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: + // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true + // - An array where the items schema is of an "unknown type" + // - An object where the additionalProperties schema is of an "unknown type" + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} + // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} + // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} + // + // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + // + Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"` + // Message represents the message displayed when validation fails. The message is required if the Rule contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,3,opt,name=messageExpression"` + // reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. + // The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. + // The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". + // If not set, default to use "FieldValueInvalid". + // All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. + // +optional + Reason *FieldValueErrorReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // fieldPath represents the field path returned when the validation fails. + // It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. + // e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` + // If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` + // It does not support list numeric index. + // It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. + // Numeric index of array is not supported. + // For field name which contains special characters, use `['specialName']` to refer the field name. + // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` + // +optional + FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,5,opt,name=fieldPath"` + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty" protobuf:"bytes,6,opt,name=optionalOldSelf"` +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +type JSON struct { + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSON) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSON) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaURL represents a schema url. +type JSONSchemaURL string + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +type JSONSchemaPropsOrArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +type JSONSchemaPropsOrBool struct { + Allows bool `protobuf:"varint,1,opt,name=allows"` + Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrBool) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDependencies represent a dependencies property. +type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +type JSONSchemaPropsOrStringArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + Property []string `protobuf:"bytes,2,rep,name=property"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDefinitions contains the models explicitly defined in this spec. +type JSONSchemaDefinitions map[string]JSONSchemaProps + +// ExternalDocumentation allows referencing an external resource for extended documentation. +type ExternalDocumentation struct { + Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"` + URL string `json:"url,omitempty" protobuf:"bytes,2,opt,name=url"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go new file mode 100644 index 000000000..405021bf3 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go @@ -0,0 +1,1326 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*CustomResourceColumnDefinition)(nil), (*apiextensions.CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(a.(*CustomResourceColumnDefinition), b.(*apiextensions.CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceColumnDefinition)(nil), (*CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(a.(*apiextensions.CustomResourceColumnDefinition), b.(*CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinition)(nil), (*apiextensions.CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(a.(*CustomResourceDefinition), b.(*apiextensions.CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinition)(nil), (*CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(a.(*apiextensions.CustomResourceDefinition), b.(*CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionCondition)(nil), (*apiextensions.CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(a.(*CustomResourceDefinitionCondition), b.(*apiextensions.CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionCondition)(nil), (*CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(a.(*apiextensions.CustomResourceDefinitionCondition), b.(*CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionList)(nil), (*apiextensions.CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(a.(*CustomResourceDefinitionList), b.(*apiextensions.CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionList)(nil), (*CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(a.(*apiextensions.CustomResourceDefinitionList), b.(*CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionNames)(nil), (*apiextensions.CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(a.(*CustomResourceDefinitionNames), b.(*apiextensions.CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionNames)(nil), (*CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(a.(*apiextensions.CustomResourceDefinitionNames), b.(*CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionStatus)(nil), (*apiextensions.CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(a.(*CustomResourceDefinitionStatus), b.(*apiextensions.CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionStatus)(nil), (*CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(a.(*apiextensions.CustomResourceDefinitionStatus), b.(*CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionVersion)(nil), (*apiextensions.CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(a.(*CustomResourceDefinitionVersion), b.(*apiextensions.CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionVersion)(nil), (*CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(a.(*apiextensions.CustomResourceDefinitionVersion), b.(*CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceScale)(nil), (*apiextensions.CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(a.(*CustomResourceSubresourceScale), b.(*apiextensions.CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceScale)(nil), (*CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(a.(*apiextensions.CustomResourceSubresourceScale), b.(*CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceStatus)(nil), (*apiextensions.CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(a.(*CustomResourceSubresourceStatus), b.(*apiextensions.CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceStatus)(nil), (*CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(a.(*apiextensions.CustomResourceSubresourceStatus), b.(*CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresources)(nil), (*apiextensions.CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(a.(*CustomResourceSubresources), b.(*apiextensions.CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresources)(nil), (*CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(a.(*apiextensions.CustomResourceSubresources), b.(*CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceValidation)(nil), (*apiextensions.CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(a.(*CustomResourceValidation), b.(*apiextensions.CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceValidation)(nil), (*CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(a.(*apiextensions.CustomResourceValidation), b.(*CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExternalDocumentation)(nil), (*apiextensions.ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(a.(*ExternalDocumentation), b.(*apiextensions.ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ExternalDocumentation)(nil), (*ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(a.(*apiextensions.ExternalDocumentation), b.(*ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaProps)(nil), (*apiextensions.JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(a.(*JSONSchemaProps), b.(*apiextensions.JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrArray)(nil), (*apiextensions.JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(a.(*JSONSchemaPropsOrArray), b.(*apiextensions.JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrArray)(nil), (*JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(a.(*apiextensions.JSONSchemaPropsOrArray), b.(*JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrBool)(nil), (*apiextensions.JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(a.(*JSONSchemaPropsOrBool), b.(*apiextensions.JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrBool)(nil), (*JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(a.(*apiextensions.JSONSchemaPropsOrBool), b.(*JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrStringArray)(nil), (*apiextensions.JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(a.(*JSONSchemaPropsOrStringArray), b.(*apiextensions.JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrStringArray)(nil), (*JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(a.(*apiextensions.JSONSchemaPropsOrStringArray), b.(*JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ServiceReference_To_v1_ServiceReference(a.(*apiextensions.ServiceReference), b.(*ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ValidationRule)(nil), (*apiextensions.ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ValidationRule_To_apiextensions_ValidationRule(a.(*ValidationRule), b.(*apiextensions.ValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ValidationRule)(nil), (*ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ValidationRule_To_v1_ValidationRule(a.(*apiextensions.ValidationRule), b.(*ValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookClientConfig)(nil), (*apiextensions.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(a.(*WebhookClientConfig), b.(*apiextensions.WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.WebhookClientConfig)(nil), (*WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(a.(*apiextensions.WebhookClientConfig), b.(*WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSON_To_v1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.ValidationRules)(nil), (*ValidationRules)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ValidationRules_To_v1_ValidationRules(a.(*apiextensions.ValidationRules), b.(*ValidationRules), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) + // WARNING: in.Webhook requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + out.Strategy = ConversionStrategyType(in.Strategy) + // WARNING: in.WebhookClientConfig requires manual conversion: does not exist in peer-type + // WARNING: in.ConversionReviewVersions requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = apiextensions.CustomResourceDefinitionConditionType(in.Type) + out.Status = apiextensions.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = CustomResourceDefinitionConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apiextensions.CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + if err := Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = apiextensions.ResourceScope(in.Scope) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]apiextensions.CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(apiextensions.CustomResourceConversion) + if err := Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + if err := metav1.Convert_bool_To_Pointer_bool(&in.PreserveUnknownFields, &out.PreserveUnknownFields, s); err != nil { + return err + } + return nil +} + +func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + // WARNING: in.Version requires manual conversion: does not exist in peer-type + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = ResourceScope(in.Scope) + // WARNING: in.Validation requires manual conversion: does not exist in peer-type + // WARNING: in.Subresources requires manual conversion: does not exist in peer-type + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + // WARNING: in.AdditionalPrinterColumns requires manual conversion: does not exist in peer-type + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + if err := Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + if err := metav1.Convert_Pointer_bool_To_bool(&in.PreserveUnknownFields, &out.PreserveUnknownFields, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]apiextensions.CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + out.Deprecated = in.Deprecated + out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.CustomResourceValidation) + if err := Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + out.Deprecated = in.Deprecated + out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + if err := Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*apiextensions.CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*apiextensions.CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources is an autogenerated conversion function. +func Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in, out, s) +} + +func autoConvert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation is an autogenerated conversion function. +func Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + return autoConvert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in, out, s) +} + +func autoConvert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation is an autogenerated conversion function. +func Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + return autoConvert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in, out, s) +} + +func autoConvert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation is an autogenerated conversion function. +func Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + return autoConvert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in, out, s) +} + +func autoConvert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + // WARNING: in.Raw requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + // FIXME: Type apiextensions.JSON is unsupported. + return nil +} + +func autoConvert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = apiextensions.JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(apiextensions.JSON) + if err := Convert_v1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]apiextensions.JSON, len(*in)) + for i := range *in { + if err := Convert_v1_JSON_To_apiextensions_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(apiextensions.JSONSchemaPropsOrArray) + if err := Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(apiextensions.JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaPropsOrStringArray) + if err := Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(apiextensions.JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*apiextensions.ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(apiextensions.JSON) + if err := Convert_v1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.Nullable = in.Nullable + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) + out.XValidations = *(*apiextensions.ValidationRules)(unsafe.Pointer(&in.XValidations)) + return nil +} + +// Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps is an autogenerated conversion function. +func Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Nullable = in.Nullable + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]JSON, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSON_To_v1_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(JSONSchemaPropsOrArray) + if err := Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaPropsOrStringArray) + if err := Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) + out.XValidations = *(*ValidationRules)(unsafe.Pointer(&in.XValidations)) + return nil +} + +func autoConvert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := metav1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceReference_To_apiextensions_ServiceReference is an autogenerated conversion function. +func Convert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + return autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in, out, s) +} + +func autoConvert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := metav1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_ServiceReference_To_v1_ServiceReference is an autogenerated conversion function. +func Convert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + return autoConvert_apiextensions_ServiceReference_To_v1_ServiceReference(in, out, s) +} + +func autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { + out.Rule = in.Rule + out.Message = in.Message + out.MessageExpression = in.MessageExpression + out.Reason = (*apiextensions.FieldValueErrorReason)(unsafe.Pointer(in.Reason)) + out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) + return nil +} + +// Convert_v1_ValidationRule_To_apiextensions_ValidationRule is an autogenerated conversion function. +func Convert_v1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { + return autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in, out, s) +} + +func autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { + out.Rule = in.Rule + out.Message = in.Message + out.MessageExpression = in.MessageExpression + out.Reason = (*FieldValueErrorReason)(unsafe.Pointer(in.Reason)) + out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) + return nil +} + +// Convert_apiextensions_ValidationRule_To_v1_ValidationRule is an autogenerated conversion function. +func Convert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { + return autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in, out, s) +} + +func autoConvert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiextensions.ServiceReference) + if err := Convert_v1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig is an autogenerated conversion function. +func Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in, out, s) +} + +func autoConvert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiextensions_ServiceReference_To_v1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig is an autogenerated conversion function. +func Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + return autoConvert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in, out, s) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..bc23fcd86 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -0,0 +1,717 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionRequest) DeepCopyInto(out *ConversionRequest) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionRequest. +func (in *ConversionRequest) DeepCopy() *ConversionRequest { + if in == nil { + return nil + } + out := new(ConversionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionResponse) DeepCopyInto(out *ConversionResponse) { + *out = *in + if in.ConvertedObjects != nil { + in, out := &in.ConvertedObjects, &out.ConvertedObjects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Result.DeepCopyInto(&out.Result) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionResponse. +func (in *ConversionResponse) DeepCopy() *ConversionResponse { + if in == nil { + return nil + } + out := new(ConversionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionReview) DeepCopyInto(out *ConversionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(ConversionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(ConversionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionReview. +func (in *ConversionReview) DeepCopy() *ConversionReview { + if in == nil { + return nil + } + out := new(ConversionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConversionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. +func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { + if in == nil { + return nil + } + out := new(CustomResourceColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConversion) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. +func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { + if in == nil { + return nil + } + out := new(CustomResourceConversion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. +func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { + if in == nil { + return nil + } + out := new(CustomResourceDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. +func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. +func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { + *out = *in + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. +func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { + *out = *in + in.Names.DeepCopyInto(&out.Names) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. +func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CustomResourceDefinitionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) + if in.StoredVersions != nil { + in, out := &in.StoredVersions, &out.StoredVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. +func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { + *out = *in + if in.DeprecationWarning != nil { + in, out := &in.DeprecationWarning, &out.DeprecationWarning + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. +func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { + *out = *in + if in.LabelSelectorPath != nil { + in, out := &in.LabelSelectorPath, &out.LabelSelectorPath + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. +func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceScale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. +func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CustomResourceSubresourceStatus) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(CustomResourceSubresourceScale) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. +func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { + if in == nil { + return nil + } + out := new(CustomResourceSubresources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { + *out = *in + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. +func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { + if in == nil { + return nil + } + out := new(CustomResourceValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. +func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { + if in == nil { + return nil + } + out := new(ExternalDocumentation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSON) DeepCopyInto(out *JSON) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSON. +func (in *JSON) DeepCopy() *JSON { + if in == nil { + return nil + } + out := new(JSON) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { + { + in := &in + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. +func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { + if in == nil { + return nil + } + out := new(JSONSchemaDefinitions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { + { + in := &in + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. +func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { + if in == nil { + return nil + } + out := new(JSONSchemaDependencies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. +func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. +func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrBool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. +func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrStringArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(FieldValueErrorReason) + **out = **in + } + if in.OptionalOldSelf != nil { + in, out := &in.OptionalOldSelf, &out.OptionalOldSelf + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRule. +func (in *ValidationRule) DeepCopy() *ValidationRule { + if in == nil { + return nil + } + out := new(ValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ValidationRules) DeepCopyInto(out *ValidationRules) { + { + in := &in + *out = make(ValidationRules, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRules. +func (in ValidationRules) DeepCopy() ValidationRules { + if in == nil { + return nil + } + out := new(ValidationRules) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConversion) DeepCopyInto(out *WebhookConversion) { + *out = *in + if in.ClientConfig != nil { + in, out := &in.ClientConfig, &out.ClientConfig + *out = new(WebhookClientConfig) + (*in).DeepCopyInto(*out) + } + if in.ConversionReviewVersions != nil { + in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConversion. +func (in *WebhookConversion) DeepCopy() *WebhookConversion { + if in == nil { + return nil + } + out := new(WebhookConversion) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go new file mode 100644 index 000000000..2bc705780 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetObjectDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) + scheme.AddTypeDefaultingFunc(&CustomResourceDefinitionList{}, func(obj interface{}) { + SetObjectDefaults_CustomResourceDefinitionList(obj.(*CustomResourceDefinitionList)) + }) + return nil +} + +func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinition(in) + SetDefaults_CustomResourceDefinitionSpec(&in.Spec) + if in.Spec.Conversion != nil { + if in.Spec.Conversion.Webhook != nil { + if in.Spec.Conversion.Webhook.ClientConfig != nil { + if in.Spec.Conversion.Webhook.ClientConfig.Service != nil { + SetDefaults_ServiceReference(in.Spec.Conversion.Webhook.ClientConfig.Service) + } + } + } + } +} + +func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CustomResourceDefinition(a) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions new file mode 100644 index 000000000..7408dd121 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions @@ -0,0 +1,5 @@ +inverseRules: + # Allow use of this package in all k8s.io packages. + - selectorRegexp: k8s[.]io + allowedPrefixes: + - '' diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go new file mode 100644 index 000000000..eed3fde63 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "bytes" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/util/json" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" +) + +func Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + if err := autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in, out, s); err != nil { + return err + } + if in.Default != nil && *(in.Default) == nil { + out.Default = nil + } + if in.Example != nil && *(in.Example) == nil { + out.Example = nil + } + return nil +} + +var nullLiteral = []byte(`null`) + +func Convert_apiextensions_JSON_To_v1beta1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + raw, err := json.Marshal(*in) + if err != nil { + return err + } + if len(raw) == 0 || bytes.Equal(raw, nullLiteral) { + // match JSON#UnmarshalJSON treatment of literal nulls + out.Raw = nil + } else { + out.Raw = raw + } + return nil +} + +func Convert_v1beta1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + if in != nil { + var i interface{} + if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) { + if err := json.Unmarshal(in.Raw, &i); err != nil { + return err + } + } + *out = i + } else { + out = nil + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go new file mode 100644 index 000000000..3a2ee6807 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go @@ -0,0 +1,278 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// TODO: Update this after a tag is created for interface fields in DeepCopy +func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { + if in == nil { + return nil + } + out := new(JSONSchemaProps) + *out = *in + + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.MaxItems != nil { + in, out := &in.MaxItems, &out.MaxItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinItems != nil { + in, out := &in.MinItems, &out.MinItems + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MultipleOf != nil { + in, out := &in.MultipleOf, &out.MultipleOf + if *in == nil { + *out = nil + } else { + *out = new(float64) + **out = **in + } + } + + if in.MaxProperties != nil { + in, out := &in.MaxProperties, &out.MaxProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.MinProperties != nil { + in, out := &in.MinProperties, &out.MinProperties + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.Items != nil { + in, out := &in.Items, &out.Items + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrArray) + (*in).DeepCopyInto(*out) + } + } + + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + + if in.Not != nil { + in, out := &in.Not, &out.Not + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaProps) + (*in).DeepCopyInto(*out) + } + } + + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + if *in == nil { + *out = nil + } else { + *out = new(JSONSchemaPropsOrBool) + (*in).DeepCopyInto(*out) + } + } + + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + + if in.ExternalDocs != nil { + in, out := &in.ExternalDocs, &out.ExternalDocs + if *in == nil { + *out = nil + } else { + *out = new(ExternalDocumentation) + (*in).DeepCopyInto(*out) + } + } + + if in.XPreserveUnknownFields != nil { + in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + + if in.XMapType != nil { + in, out := &in.XMapType, &out.XMapType + *out = new(string) + **out = **in + } + + if in.XValidations != nil { + inValidations, outValidations := &in.XValidations, &out.XValidations + *outValidations = make([]ValidationRule, len(*inValidations)) + for i := range *inValidations { + in.XValidations[i].DeepCopyInto(&out.XValidations[i]) + } + } + + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go new file mode 100644 index 000000000..1a9c2238e --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinitionSpec(&obj.Spec) + if len(obj.Status.StoredVersions) == 0 { + for _, v := range obj.Spec.Versions { + if v.Storage { + obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) + break + } + } + } +} + +func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) { + if len(obj.Scope) == 0 { + obj.Scope = NamespaceScoped + } + if len(obj.Names.Singular) == 0 { + obj.Names.Singular = strings.ToLower(obj.Names.Kind) + } + if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { + obj.Names.ListKind = obj.Names.Kind + "List" + } + // If there is no list of versions, create on using deprecated Version field. + if len(obj.Versions) == 0 && len(obj.Version) != 0 { + obj.Versions = []CustomResourceDefinitionVersion{{ + Name: obj.Version, + Storage: true, + Served: true, + }} + } + // For backward compatibility set the version field to the first item in versions list. + if len(obj.Version) == 0 && len(obj.Versions) != 0 { + obj.Version = obj.Versions[0].Name + } + if obj.Conversion == nil { + obj.Conversion = &CustomResourceConversion{ + Strategy: NoneConverter, + } + } + if obj.Conversion.Strategy == WebhookConverter && len(obj.Conversion.ConversionReviewVersions) == 0 { + obj.Conversion.ConversionReviewVersions = []string{SchemeGroupVersion.Version} + } + if obj.PreserveUnknownFields == nil { + obj.PreserveUnknownFields = utilpointer.BoolPtr(true) + } +} + +// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go new file mode 100644 index 000000000..7a92cb8b0 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=apiextensions.k8s.io + +// Package v1beta1 is the v1beta1 version of the API. +package v1beta1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go new file mode 100644 index 000000000..c81fa6bc3 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -0,0 +1,9463 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto + +package v1beta1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{0} +} +func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionRequest.Merge(m, src) +} +func (m *ConversionRequest) XXX_Size() int { + return m.Size() +} +func (m *ConversionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{1} +} +func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionResponse.Merge(m, src) +} +func (m *ConversionResponse) XXX_Size() int { + return m.Size() +} +func (m *ConversionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo + +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{2} +} +func (m *ConversionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionReview.Merge(m, src) +} +func (m *ConversionReview) XXX_Size() int { + return m.Size() +} +func (m *ConversionReview) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionReview proto.InternalMessageInfo + +func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } +func (*CustomResourceColumnDefinition) ProtoMessage() {} +func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{3} +} +func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) +} +func (m *CustomResourceColumnDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo + +func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } +func (*CustomResourceConversion) ProtoMessage() {} +func (*CustomResourceConversion) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{4} +} +func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceConversion.Merge(m, src) +} +func (m *CustomResourceConversion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceConversion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo + +func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } +func (*CustomResourceDefinition) ProtoMessage() {} +func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{5} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } +func (*CustomResourceDefinitionCondition) ProtoMessage() {} +func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{6} +} +func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) +} +func (m *CustomResourceDefinitionCondition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo + +func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } +func (*CustomResourceDefinitionList) ProtoMessage() {} +func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{7} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo + +func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } +func (*CustomResourceDefinitionNames) ProtoMessage() {} +func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{8} +} +func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) +} +func (m *CustomResourceDefinitionNames) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo + +func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } +func (*CustomResourceDefinitionSpec) ProtoMessage() {} +func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{9} +} +func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) +} +func (m *CustomResourceDefinitionSpec) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo + +func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } +func (*CustomResourceDefinitionStatus) ProtoMessage() {} +func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{10} +} +func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) +} +func (m *CustomResourceDefinitionStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo + +func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } +func (*CustomResourceDefinitionVersion) ProtoMessage() {} +func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{11} +} +func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) +} +func (m *CustomResourceDefinitionVersion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo + +func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } +func (*CustomResourceSubresourceScale) ProtoMessage() {} +func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{12} +} +func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) +} +func (m *CustomResourceSubresourceScale) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo + +func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } +func (*CustomResourceSubresourceStatus) ProtoMessage() {} +func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{13} +} +func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) +} +func (m *CustomResourceSubresourceStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo + +func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } +func (*CustomResourceSubresources) ProtoMessage() {} +func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{14} +} +func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresources.Merge(m, src) +} +func (m *CustomResourceSubresources) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresources) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo + +func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } +func (*CustomResourceValidation) ProtoMessage() {} +func (*CustomResourceValidation) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{15} +} +func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceValidation.Merge(m, src) +} +func (m *CustomResourceValidation) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceValidation) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{16} +} +func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocumentation.Merge(m, src) +} +func (m *ExternalDocumentation) XXX_Size() int { + return m.Size() +} +func (m *ExternalDocumentation) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo + +func (m *JSON) Reset() { *m = JSON{} } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{17} +} +func (m *JSON) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSON) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSON.Merge(m, src) +} +func (m *JSON) XXX_Size() int { + return m.Size() +} +func (m *JSON) XXX_DiscardUnknown() { + xxx_messageInfo_JSON.DiscardUnknown(m) +} + +var xxx_messageInfo_JSON proto.InternalMessageInfo + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{18} +} +func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaProps.Merge(m, src) +} +func (m *JSONSchemaProps) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaProps) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{19} +} +func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{20} +} +func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) +} +func (m *JSONSchemaPropsOrBool) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo + +func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } +func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} +func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{21} +} +func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{22} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidationRule) Reset() { *m = ValidationRule{} } +func (*ValidationRule) ProtoMessage() {} +func (*ValidationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{23} +} +func (m *ValidationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidationRule.Merge(m, src) +} +func (m *ValidationRule) XXX_Size() int { + return m.Size() +} +func (m *ValidationRule) XXX_DiscardUnknown() { + xxx_messageInfo_ValidationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidationRule proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{24} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionRequest") + proto.RegisterType((*ConversionResponse)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionResponse") + proto.RegisterType((*ConversionReview)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionReview") + proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition") + proto.RegisterType((*CustomResourceConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceConversion") + proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition") + proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition") + proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionList") + proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames") + proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec") + proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatus") + proto.RegisterType((*CustomResourceDefinitionVersion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion") + proto.RegisterType((*CustomResourceSubresourceScale)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceScale") + proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceStatus") + proto.RegisterType((*CustomResourceSubresources)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresources") + proto.RegisterType((*CustomResourceValidation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation") + proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation") + proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSON") + proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps") + proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DefinitionsEntry") + proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DependenciesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PatternPropertiesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PropertiesEntry") + proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") + proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") + proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference") + proto.RegisterType((*ValidationRule)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ValidationRule") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptor_98a4cc6918394e53) +} + +var fileDescriptor_98a4cc6918394e53 = []byte{ + // 3170 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x1c, 0x47, + 0xd9, 0xf7, 0xec, 0x6a, 0xa5, 0x55, 0x4b, 0xb6, 0xa4, 0xb6, 0xa5, 0x8c, 0x15, 0x47, 0x2b, 0xaf, + 0xdf, 0xf8, 0x55, 0x12, 0x67, 0x95, 0xf8, 0x4d, 0xde, 0xe4, 0xcd, 0x4b, 0x8a, 0xd2, 0x4a, 0x72, + 0x50, 0x62, 0x59, 0xa2, 0xd7, 0x76, 0x04, 0xf9, 0x1c, 0xed, 0xf4, 0xae, 0x27, 0x9a, 0x9d, 0x19, + 0x77, 0xcf, 0xac, 0xa4, 0x0a, 0x50, 0x7c, 0x54, 0x0a, 0x8a, 0x02, 0x42, 0x91, 0x5c, 0x28, 0xe0, + 0x10, 0x28, 0x2e, 0x1c, 0xe0, 0x00, 0x37, 0xf8, 0x03, 0x72, 0x4c, 0x01, 0x87, 0x1c, 0xa8, 0x85, + 0x2c, 0x57, 0x8e, 0x54, 0x51, 0xa5, 0x13, 0xd5, 0x1f, 0xd3, 0xd3, 0x3b, 0xbb, 0x6b, 0xbb, 0xa2, + 0xdd, 0x98, 0xdb, 0xee, 0xf3, 0xf5, 0x7b, 0xe6, 0xe9, 0xa7, 0x9f, 0x7e, 0xfa, 0x99, 0x01, 0xb5, + 0xbd, 0x67, 0x69, 0xc9, 0xf1, 0x97, 0xf7, 0xa2, 0x5d, 0x4c, 0x3c, 0x1c, 0x62, 0xba, 0xdc, 0xc4, + 0x9e, 0xed, 0x93, 0x65, 0xc9, 0xb0, 0x02, 0x07, 0x1f, 0x84, 0xd8, 0xa3, 0x8e, 0xef, 0xd1, 0xc7, + 0xad, 0xc0, 0xa1, 0x98, 0x34, 0x31, 0x59, 0x0e, 0xf6, 0xea, 0x8c, 0x47, 0x3b, 0x05, 0x96, 0x9b, + 0x4f, 0xee, 0xe2, 0xd0, 0x7a, 0x72, 0xb9, 0x8e, 0x3d, 0x4c, 0xac, 0x10, 0xdb, 0xa5, 0x80, 0xf8, + 0xa1, 0x0f, 0x9f, 0x17, 0xe6, 0x4a, 0x1d, 0xd2, 0x6f, 0x28, 0x73, 0xa5, 0x60, 0xaf, 0xce, 0x78, + 0xb4, 0x53, 0xa0, 0x24, 0xcd, 0xcd, 0x3f, 0x5e, 0x77, 0xc2, 0x5b, 0xd1, 0x6e, 0xa9, 0xea, 0x37, + 0x96, 0xeb, 0x7e, 0xdd, 0x5f, 0xe6, 0x56, 0x77, 0xa3, 0x1a, 0xff, 0xc7, 0xff, 0xf0, 0x5f, 0x02, + 0x6d, 0xfe, 0xa9, 0xc4, 0xf9, 0x86, 0x55, 0xbd, 0xe5, 0x78, 0x98, 0x1c, 0x26, 0x1e, 0x37, 0x70, + 0x68, 0x2d, 0x37, 0xbb, 0x7c, 0x9c, 0x5f, 0xee, 0xa7, 0x45, 0x22, 0x2f, 0x74, 0x1a, 0xb8, 0x4b, + 0xe1, 0x7f, 0xef, 0xa6, 0x40, 0xab, 0xb7, 0x70, 0xc3, 0x4a, 0xeb, 0x15, 0x8f, 0x0c, 0x30, 0xb3, + 0xea, 0x7b, 0x4d, 0x4c, 0xd8, 0x53, 0x22, 0x7c, 0x3b, 0xc2, 0x34, 0x84, 0x65, 0x90, 0x8d, 0x1c, + 0xdb, 0x34, 0x16, 0x8d, 0xa5, 0xf1, 0xf2, 0x13, 0x1f, 0xb6, 0x0a, 0x27, 0xda, 0xad, 0x42, 0xf6, + 0xc6, 0xc6, 0xda, 0x51, 0xab, 0x70, 0xbe, 0x1f, 0x52, 0x78, 0x18, 0x60, 0x5a, 0xba, 0xb1, 0xb1, + 0x86, 0x98, 0x32, 0x7c, 0x01, 0xcc, 0xd8, 0x98, 0x3a, 0x04, 0xdb, 0x2b, 0xdb, 0x1b, 0x37, 0x85, + 0x7d, 0x33, 0xc3, 0x2d, 0x9e, 0x95, 0x16, 0x67, 0xd6, 0xd2, 0x02, 0xa8, 0x5b, 0x07, 0xee, 0x80, + 0x31, 0x7f, 0xf7, 0x2d, 0x5c, 0x0d, 0xa9, 0x99, 0x5d, 0xcc, 0x2e, 0x4d, 0x5c, 0x7e, 0xbc, 0x94, + 0xac, 0xa0, 0x72, 0x81, 0x2f, 0x9b, 0x7c, 0xd8, 0x12, 0xb2, 0xf6, 0xd7, 0xe3, 0x95, 0x2b, 0x4f, + 0x49, 0xb4, 0xb1, 0x2d, 0x61, 0x05, 0xc5, 0xe6, 0x8a, 0xbf, 0xc8, 0x00, 0xa8, 0x3f, 0x3c, 0x0d, + 0x7c, 0x8f, 0xe2, 0x81, 0x3c, 0x3d, 0x05, 0xd3, 0x55, 0x6e, 0x39, 0xc4, 0xb6, 0xc4, 0x35, 0x33, + 0x9f, 0xc6, 0x7b, 0x53, 0xe2, 0x4f, 0xaf, 0xa6, 0xcc, 0xa1, 0x2e, 0x00, 0x78, 0x1d, 0x8c, 0x12, + 0x4c, 0x23, 0x37, 0x34, 0xb3, 0x8b, 0xc6, 0xd2, 0xc4, 0xe5, 0x4b, 0x7d, 0xa1, 0x78, 0x7e, 0xb3, + 0xe4, 0x2b, 0x35, 0x9f, 0x2c, 0x55, 0x42, 0x2b, 0x8c, 0x68, 0xf9, 0x94, 0x44, 0x1a, 0x45, 0xdc, + 0x06, 0x92, 0xb6, 0x8a, 0xdf, 0xc9, 0x80, 0x69, 0x3d, 0x4a, 0x4d, 0x07, 0xef, 0xc3, 0x7d, 0x30, + 0x46, 0x44, 0xb2, 0xf0, 0x38, 0x4d, 0x5c, 0xde, 0x2e, 0x1d, 0x6b, 0x5b, 0x95, 0xba, 0x92, 0xb0, + 0x3c, 0xc1, 0xd6, 0x4c, 0xfe, 0x41, 0x31, 0x1a, 0x7c, 0x1b, 0xe4, 0x89, 0x5c, 0x28, 0x9e, 0x4d, + 0x13, 0x97, 0xbf, 0x38, 0x40, 0x64, 0x61, 0xb8, 0x3c, 0xd9, 0x6e, 0x15, 0xf2, 0xf1, 0x3f, 0xa4, + 0x00, 0x8b, 0xef, 0x65, 0xc0, 0xc2, 0x6a, 0x44, 0x43, 0xbf, 0x81, 0x30, 0xf5, 0x23, 0x52, 0xc5, + 0xab, 0xbe, 0x1b, 0x35, 0xbc, 0x35, 0x5c, 0x73, 0x3c, 0x27, 0x64, 0xd9, 0xba, 0x08, 0x46, 0x3c, + 0xab, 0x81, 0x65, 0xf6, 0x4c, 0xca, 0x98, 0x8e, 0x5c, 0xb3, 0x1a, 0x18, 0x71, 0x0e, 0x93, 0x60, + 0xc9, 0x22, 0xf7, 0x82, 0x92, 0xb8, 0x7e, 0x18, 0x60, 0xc4, 0x39, 0xf0, 0x22, 0x18, 0xad, 0xf9, + 0xa4, 0x61, 0x89, 0x75, 0x1c, 0x4f, 0x56, 0xe6, 0x0a, 0xa7, 0x22, 0xc9, 0x85, 0x4f, 0x83, 0x09, + 0x1b, 0xd3, 0x2a, 0x71, 0x02, 0x06, 0x6d, 0x8e, 0x70, 0xe1, 0xd3, 0x52, 0x78, 0x62, 0x2d, 0x61, + 0x21, 0x5d, 0x0e, 0x5e, 0x02, 0xf9, 0x80, 0x38, 0x3e, 0x71, 0xc2, 0x43, 0x33, 0xb7, 0x68, 0x2c, + 0xe5, 0xca, 0xd3, 0x52, 0x27, 0xbf, 0x2d, 0xe9, 0x48, 0x49, 0xc0, 0x45, 0x90, 0x7f, 0xb1, 0xb2, + 0x75, 0x6d, 0xdb, 0x0a, 0x6f, 0x99, 0xa3, 0x1c, 0x61, 0x84, 0x49, 0x23, 0x45, 0x2d, 0xfe, 0x25, + 0x03, 0xcc, 0x74, 0x54, 0xe2, 0x90, 0xc2, 0x2b, 0x20, 0x4f, 0x43, 0x56, 0x71, 0xea, 0x87, 0x32, + 0x26, 0x8f, 0xc6, 0x60, 0x15, 0x49, 0x3f, 0x6a, 0x15, 0xe6, 0x12, 0x8d, 0x98, 0xca, 0xe3, 0xa1, + 0x74, 0xe1, 0xcf, 0x0c, 0x70, 0x7a, 0x1f, 0xef, 0xde, 0xf2, 0xfd, 0xbd, 0x55, 0xd7, 0xc1, 0x5e, + 0xb8, 0xea, 0x7b, 0x35, 0xa7, 0x2e, 0x73, 0x00, 0x1d, 0x33, 0x07, 0x5e, 0xee, 0xb6, 0x5c, 0x7e, + 0xa0, 0xdd, 0x2a, 0x9c, 0xee, 0xc1, 0x40, 0xbd, 0xfc, 0x80, 0x3b, 0xc0, 0xac, 0xa6, 0x36, 0x89, + 0x2c, 0x60, 0xa2, 0x6c, 0x8d, 0x97, 0xcf, 0xb5, 0x5b, 0x05, 0x73, 0xb5, 0x8f, 0x0c, 0xea, 0xab, + 0x5d, 0xfc, 0x56, 0x36, 0x1d, 0x5e, 0x2d, 0xdd, 0xde, 0x04, 0x79, 0xb6, 0x8d, 0x6d, 0x2b, 0xb4, + 0xe4, 0x46, 0x7c, 0xe2, 0xde, 0x36, 0xbd, 0xa8, 0x19, 0x9b, 0x38, 0xb4, 0xca, 0x50, 0x2e, 0x08, + 0x48, 0x68, 0x48, 0x59, 0x85, 0x5f, 0x05, 0x23, 0x34, 0xc0, 0x55, 0x19, 0xe8, 0x57, 0x8e, 0xbb, + 0xd9, 0xfa, 0x3c, 0x48, 0x25, 0xc0, 0xd5, 0x64, 0x2f, 0xb0, 0x7f, 0x88, 0xc3, 0xc2, 0x77, 0x0c, + 0x30, 0x4a, 0x79, 0x81, 0x92, 0x45, 0xed, 0xb5, 0x61, 0x79, 0x90, 0xaa, 0x82, 0xe2, 0x3f, 0x92, + 0xe0, 0xc5, 0x7f, 0x66, 0xc0, 0xf9, 0x7e, 0xaa, 0xab, 0xbe, 0x67, 0x8b, 0xe5, 0xd8, 0x90, 0x7b, + 0x5b, 0x64, 0xfa, 0xd3, 0xfa, 0xde, 0x3e, 0x6a, 0x15, 0x1e, 0xbe, 0xab, 0x01, 0xad, 0x08, 0xfc, + 0x9f, 0x7a, 0x6e, 0x51, 0x28, 0xce, 0x77, 0x3a, 0x76, 0xd4, 0x2a, 0x4c, 0x29, 0xb5, 0x4e, 0x5f, + 0x61, 0x13, 0x40, 0xd7, 0xa2, 0xe1, 0x75, 0x62, 0x79, 0x54, 0x98, 0x75, 0x1a, 0x58, 0x86, 0xef, + 0xd1, 0x7b, 0x4b, 0x0f, 0xa6, 0x51, 0x9e, 0x97, 0x90, 0xf0, 0x6a, 0x97, 0x35, 0xd4, 0x03, 0x81, + 0xd5, 0x2d, 0x82, 0x2d, 0xaa, 0x4a, 0x91, 0x76, 0xa2, 0x30, 0x2a, 0x92, 0x5c, 0xf8, 0x08, 0x18, + 0x6b, 0x60, 0x4a, 0xad, 0x3a, 0xe6, 0xf5, 0x67, 0x3c, 0x39, 0xa2, 0x37, 0x05, 0x19, 0xc5, 0x7c, + 0xd6, 0x9f, 0x9c, 0xeb, 0x17, 0xb5, 0xab, 0x0e, 0x0d, 0xe1, 0xab, 0x5d, 0x1b, 0xa0, 0x74, 0x6f, + 0x4f, 0xc8, 0xb4, 0x79, 0xfa, 0xab, 0xe2, 0x17, 0x53, 0xb4, 0xe4, 0xff, 0x0a, 0xc8, 0x39, 0x21, + 0x6e, 0xc4, 0x67, 0xf7, 0xcb, 0x43, 0xca, 0xbd, 0xf2, 0x49, 0xe9, 0x43, 0x6e, 0x83, 0xa1, 0x21, + 0x01, 0x5a, 0xfc, 0x65, 0x06, 0x3c, 0xd4, 0x4f, 0x85, 0x1d, 0x28, 0x94, 0x45, 0x3c, 0x70, 0x23, + 0x62, 0xb9, 0x32, 0xe3, 0x54, 0xc4, 0xb7, 0x39, 0x15, 0x49, 0x2e, 0x2b, 0xf9, 0xd4, 0xf1, 0xea, + 0x91, 0x6b, 0x11, 0x99, 0x4e, 0xea, 0xa9, 0x2b, 0x92, 0x8e, 0x94, 0x04, 0x2c, 0x01, 0x40, 0x6f, + 0xf9, 0x24, 0xe4, 0x18, 0xb2, 0x7a, 0x9d, 0x62, 0x05, 0xa2, 0xa2, 0xa8, 0x48, 0x93, 0x60, 0x27, + 0xda, 0x9e, 0xe3, 0xd9, 0x72, 0xd5, 0xd5, 0x2e, 0x7e, 0xc9, 0xf1, 0x6c, 0xc4, 0x39, 0x0c, 0xdf, + 0x75, 0x68, 0xc8, 0x28, 0x72, 0xc9, 0x3b, 0xa2, 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf, 0xb2, 0xaa, + 0xef, 0x13, 0x07, 0x53, 0x73, 0x34, 0xc1, 0x5f, 0x55, 0x54, 0xa4, 0x49, 0x14, 0xff, 0x91, 0xef, + 0x9f, 0x24, 0xac, 0x94, 0xc0, 0x0b, 0x20, 0x57, 0x27, 0x7e, 0x14, 0xc8, 0x28, 0xa9, 0x68, 0xbf, + 0xc0, 0x88, 0x48, 0xf0, 0x58, 0x56, 0x36, 0x3b, 0xda, 0x54, 0x95, 0x95, 0x71, 0x73, 0x1a, 0xf3, + 0xe1, 0x37, 0x0c, 0x90, 0xf3, 0x64, 0x70, 0x58, 0xca, 0xbd, 0x3a, 0xa4, 0xbc, 0xe0, 0xe1, 0x4d, + 0xdc, 0x15, 0x91, 0x17, 0xc8, 0xf0, 0x29, 0x90, 0xa3, 0x55, 0x3f, 0xc0, 0x32, 0xea, 0x0b, 0xb1, + 0x50, 0x85, 0x11, 0x8f, 0x5a, 0x85, 0x93, 0xb1, 0x39, 0x4e, 0x40, 0x42, 0x18, 0x7e, 0xdb, 0x00, + 0xa0, 0x69, 0xb9, 0x8e, 0x6d, 0xf1, 0x96, 0x21, 0xc7, 0xdd, 0x1f, 0x6c, 0x5a, 0xdf, 0x54, 0xe6, + 0xc5, 0xa2, 0x25, 0xff, 0x91, 0x06, 0x0d, 0xdf, 0x35, 0xc0, 0x24, 0x8d, 0x76, 0x89, 0xd4, 0xa2, + 0xbc, 0xb9, 0x98, 0xb8, 0xfc, 0xa5, 0x81, 0xfa, 0x52, 0xd1, 0x00, 0xca, 0xd3, 0xed, 0x56, 0x61, + 0x52, 0xa7, 0xa0, 0x0e, 0x07, 0xe0, 0xf7, 0x0c, 0x90, 0x6f, 0xc6, 0x67, 0xf6, 0x18, 0xdf, 0xf0, + 0xaf, 0x0f, 0x69, 0x61, 0x65, 0x46, 0x25, 0xbb, 0x40, 0xf5, 0x01, 0xca, 0x03, 0xf8, 0x7b, 0x03, + 0x98, 0x96, 0x2d, 0x0a, 0xbc, 0xe5, 0x6e, 0x13, 0xc7, 0x0b, 0x31, 0x11, 0xfd, 0x26, 0x35, 0xf3, + 0xdc, 0xbd, 0xc1, 0x9e, 0x85, 0xe9, 0x5e, 0xb6, 0xbc, 0x28, 0xbd, 0x33, 0x57, 0xfa, 0xb8, 0x81, + 0xfa, 0x3a, 0xc8, 0x13, 0x2d, 0x69, 0x69, 0xcc, 0xf1, 0x21, 0x24, 0x5a, 0xd2, 0x4b, 0xc9, 0xea, + 0x90, 0x74, 0x50, 0x1a, 0x34, 0xdc, 0x02, 0xb3, 0x01, 0xc1, 0x1c, 0xe0, 0x86, 0xb7, 0xe7, 0xf9, + 0xfb, 0xde, 0x15, 0x07, 0xbb, 0x36, 0x35, 0xc1, 0xa2, 0xb1, 0x94, 0x2f, 0x9f, 0x6d, 0xb7, 0x0a, + 0xb3, 0xdb, 0xbd, 0x04, 0x50, 0x6f, 0xbd, 0xe2, 0xbb, 0xd9, 0xf4, 0x2d, 0x20, 0xdd, 0x45, 0xc0, + 0xf7, 0xc5, 0xd3, 0x8b, 0xd8, 0x50, 0xd3, 0xe0, 0xab, 0xf5, 0xe6, 0x90, 0x92, 0x49, 0xb5, 0x01, + 0x49, 0x27, 0xa7, 0x48, 0x14, 0x69, 0x7e, 0xc0, 0x1f, 0x1b, 0xe0, 0xa4, 0x55, 0xad, 0xe2, 0x20, + 0xc4, 0xb6, 0x28, 0xee, 0x99, 0xcf, 0xa0, 0x7e, 0xcd, 0x4a, 0xaf, 0x4e, 0xae, 0xe8, 0xd0, 0xa8, + 0xd3, 0x13, 0xf8, 0x1c, 0x38, 0x45, 0x43, 0x9f, 0x60, 0x3b, 0xd5, 0x36, 0xc3, 0x76, 0xab, 0x70, + 0xaa, 0xd2, 0xc1, 0x41, 0x29, 0xc9, 0xe2, 0x5f, 0x73, 0xa0, 0x70, 0x97, 0xad, 0x76, 0x0f, 0x17, + 0xb3, 0x8b, 0x60, 0x94, 0x3f, 0xae, 0xcd, 0xa3, 0x92, 0xd7, 0x5a, 0x41, 0x4e, 0x45, 0x92, 0xcb, + 0x0e, 0x0a, 0x86, 0xcf, 0xda, 0x97, 0x2c, 0x17, 0x54, 0x07, 0x45, 0x45, 0x90, 0x51, 0xcc, 0x87, + 0x97, 0x01, 0xb0, 0x71, 0x40, 0x30, 0x3b, 0xac, 0x6c, 0x73, 0x8c, 0x4b, 0xab, 0x45, 0x5a, 0x53, + 0x1c, 0xa4, 0x49, 0xc1, 0x2b, 0x00, 0xc6, 0xff, 0x1c, 0xdf, 0x7b, 0xd9, 0x22, 0x9e, 0xe3, 0xd5, + 0xcd, 0x3c, 0x77, 0x7b, 0x8e, 0x75, 0x63, 0x6b, 0x5d, 0x5c, 0xd4, 0x43, 0x03, 0xbe, 0x0d, 0x46, + 0xc5, 0xd0, 0x87, 0x9f, 0x10, 0x43, 0xac, 0xf2, 0x80, 0xc7, 0x88, 0x43, 0x21, 0x09, 0xd9, 0x5d, + 0xdd, 0x73, 0xf7, 0xbb, 0xba, 0xdf, 0xb1, 0x9c, 0x8e, 0xfe, 0x87, 0x97, 0xd3, 0xe2, 0xbf, 0x8c, + 0x74, 0xcd, 0xd1, 0x1e, 0xb5, 0x52, 0xb5, 0x5c, 0x0c, 0xd7, 0xc0, 0x34, 0xbb, 0x31, 0x21, 0x1c, + 0xb8, 0x4e, 0xd5, 0xa2, 0xfc, 0xc2, 0x2e, 0x92, 0x5d, 0xcd, 0x90, 0x2a, 0x29, 0x3e, 0xea, 0xd2, + 0x80, 0x2f, 0x02, 0x28, 0x6e, 0x11, 0x1d, 0x76, 0x44, 0x43, 0xa4, 0xee, 0x03, 0x95, 0x2e, 0x09, + 0xd4, 0x43, 0x0b, 0xae, 0x82, 0x19, 0xd7, 0xda, 0xc5, 0x6e, 0x05, 0xbb, 0xb8, 0x1a, 0xfa, 0x84, + 0x9b, 0x12, 0x23, 0x8d, 0xd9, 0x76, 0xab, 0x30, 0x73, 0x35, 0xcd, 0x44, 0xdd, 0xf2, 0xc5, 0xf3, + 0xe9, 0xad, 0xad, 0x3f, 0xb8, 0xb8, 0x9b, 0x7d, 0x90, 0x01, 0xf3, 0xfd, 0x33, 0x03, 0x7e, 0x33, + 0xb9, 0x42, 0x8a, 0x1b, 0xc2, 0xeb, 0xc3, 0xca, 0x42, 0x79, 0x87, 0x04, 0xdd, 0xf7, 0x47, 0xf8, + 0x35, 0xd6, 0xae, 0x59, 0x6e, 0x3c, 0xb4, 0x7a, 0x6d, 0x68, 0x2e, 0x30, 0x90, 0xf2, 0xb8, 0xe8, + 0x04, 0x2d, 0x97, 0x37, 0x7e, 0x96, 0x8b, 0x8b, 0xbf, 0x32, 0xd2, 0x53, 0x84, 0x64, 0x07, 0xc3, + 0xef, 0x1b, 0x60, 0xca, 0x0f, 0xb0, 0xb7, 0xb2, 0xbd, 0x71, 0xf3, 0x7f, 0xc4, 0x4e, 0x96, 0xa1, + 0xba, 0x76, 0x4c, 0x3f, 0x5f, 0xac, 0x6c, 0x5d, 0x13, 0x06, 0xb7, 0x89, 0x1f, 0xd0, 0xf2, 0xe9, + 0x76, 0xab, 0x30, 0xb5, 0xd5, 0x09, 0x85, 0xd2, 0xd8, 0xc5, 0x06, 0x98, 0x5d, 0x3f, 0x08, 0x31, + 0xf1, 0x2c, 0x77, 0xcd, 0xaf, 0x46, 0x0d, 0xec, 0x85, 0xc2, 0xd1, 0xd4, 0xc4, 0xcb, 0xb8, 0xc7, + 0x89, 0xd7, 0x43, 0x20, 0x1b, 0x11, 0x57, 0x66, 0xf1, 0x84, 0x9a, 0xe8, 0xa2, 0xab, 0x88, 0xd1, + 0x8b, 0xe7, 0xc1, 0x08, 0xf3, 0x13, 0x9e, 0x05, 0x59, 0x62, 0xed, 0x73, 0xab, 0x93, 0xe5, 0x31, + 0x26, 0x82, 0xac, 0x7d, 0xc4, 0x68, 0xc5, 0x3f, 0x9f, 0x07, 0x53, 0xa9, 0x67, 0x81, 0xf3, 0x20, + 0xa3, 0xc6, 0xc4, 0x40, 0x1a, 0xcd, 0x6c, 0xac, 0xa1, 0x8c, 0x63, 0xc3, 0x67, 0x54, 0xf1, 0x15, + 0xa0, 0x05, 0x75, 0x96, 0x70, 0x2a, 0xeb, 0xcf, 0x13, 0x73, 0xcc, 0x91, 0xb8, 0x70, 0x32, 0x1f, + 0x70, 0x4d, 0xee, 0x12, 0xe1, 0x03, 0xae, 0x21, 0x46, 0xfb, 0xb4, 0xe3, 0xbe, 0x78, 0xde, 0x98, + 0xbb, 0x87, 0x79, 0xe3, 0xe8, 0x1d, 0xe7, 0x8d, 0x17, 0x40, 0x2e, 0x74, 0x42, 0x17, 0xf3, 0x83, + 0x4c, 0xbb, 0x46, 0x5d, 0x67, 0x44, 0x24, 0x78, 0xf0, 0x2d, 0x30, 0x66, 0xe3, 0x9a, 0x15, 0xb9, + 0x21, 0x3f, 0xb3, 0x26, 0x2e, 0xaf, 0x0e, 0x20, 0x85, 0xc4, 0x30, 0x78, 0x4d, 0xd8, 0x45, 0x31, + 0x00, 0x7c, 0x18, 0x8c, 0x35, 0xac, 0x03, 0xa7, 0x11, 0x35, 0x78, 0x83, 0x69, 0x08, 0xb1, 0x4d, + 0x41, 0x42, 0x31, 0x8f, 0x55, 0x46, 0x7c, 0x50, 0x75, 0x23, 0xea, 0x34, 0xb1, 0x64, 0xca, 0xe6, + 0x4f, 0x55, 0xc6, 0xf5, 0x14, 0x1f, 0x75, 0x69, 0x70, 0x30, 0xc7, 0xe3, 0xca, 0x13, 0x1a, 0x98, + 0x20, 0xa1, 0x98, 0xd7, 0x09, 0x26, 0xe5, 0x27, 0xfb, 0x81, 0x49, 0xe5, 0x2e, 0x0d, 0xf8, 0x18, + 0x18, 0x6f, 0x58, 0x07, 0x57, 0xb1, 0x57, 0x0f, 0x6f, 0x99, 0x27, 0x17, 0x8d, 0xa5, 0x6c, 0xf9, + 0x64, 0xbb, 0x55, 0x18, 0xdf, 0x8c, 0x89, 0x28, 0xe1, 0x73, 0x61, 0xc7, 0x93, 0xc2, 0xa7, 0x34, + 0xe1, 0x98, 0x88, 0x12, 0x3e, 0xeb, 0x5e, 0x02, 0x2b, 0x64, 0x9b, 0xcb, 0x9c, 0xea, 0xbc, 0xe6, + 0x6e, 0x0b, 0x32, 0x8a, 0xf9, 0x70, 0x09, 0xe4, 0x1b, 0xd6, 0x01, 0x1f, 0x49, 0x98, 0xd3, 0xdc, + 0x2c, 0x1f, 0x8c, 0x6f, 0x4a, 0x1a, 0x52, 0x5c, 0x2e, 0xe9, 0x78, 0x42, 0x72, 0x46, 0x93, 0x94, + 0x34, 0xa4, 0xb8, 0x2c, 0x89, 0x23, 0xcf, 0xb9, 0x1d, 0x61, 0x21, 0x0c, 0x79, 0x64, 0x54, 0x12, + 0xdf, 0x48, 0x58, 0x48, 0x97, 0x83, 0x25, 0x00, 0x1a, 0x91, 0x1b, 0x3a, 0x81, 0x8b, 0xb7, 0x6a, + 0xe6, 0x69, 0x1e, 0x7f, 0xde, 0xf4, 0x6f, 0x2a, 0x2a, 0xd2, 0x24, 0x20, 0x06, 0x23, 0xd8, 0x8b, + 0x1a, 0xe6, 0x19, 0x7e, 0xb0, 0x0f, 0x24, 0x05, 0xd5, 0xce, 0x59, 0xf7, 0xa2, 0x06, 0xe2, 0xe6, + 0xe1, 0x33, 0xe0, 0x64, 0xc3, 0x3a, 0x60, 0xe5, 0x00, 0x93, 0xd0, 0xc1, 0xd4, 0x9c, 0xe5, 0x0f, + 0x3f, 0xc3, 0xba, 0xdd, 0x4d, 0x9d, 0x81, 0x3a, 0xe5, 0xb8, 0xa2, 0xe3, 0x69, 0x8a, 0x73, 0x9a, + 0xa2, 0xce, 0x40, 0x9d, 0x72, 0x2c, 0xd2, 0x04, 0xdf, 0x8e, 0x1c, 0x82, 0x6d, 0xf3, 0x01, 0xde, + 0x20, 0xcb, 0x97, 0x15, 0x82, 0x86, 0x14, 0x17, 0x36, 0xe3, 0xd9, 0x95, 0xc9, 0xb7, 0xe1, 0x8d, + 0xc1, 0x56, 0xf2, 0x2d, 0xb2, 0x42, 0x88, 0x75, 0x28, 0x4e, 0x1a, 0x7d, 0x6a, 0x05, 0x29, 0xc8, + 0x59, 0xae, 0xbb, 0x55, 0x33, 0xcf, 0xf2, 0xd8, 0x0f, 0xfa, 0x04, 0x51, 0x55, 0x67, 0x85, 0x81, + 0x20, 0x81, 0xc5, 0x40, 0x7d, 0x8f, 0xa5, 0xc6, 0xfc, 0x70, 0x41, 0xb7, 0x18, 0x08, 0x12, 0x58, + 0xfc, 0x49, 0xbd, 0xc3, 0xad, 0x9a, 0xf9, 0xe0, 0x90, 0x9f, 0x94, 0x81, 0x20, 0x81, 0x05, 0x1d, + 0x90, 0xf5, 0xfc, 0xd0, 0x3c, 0x37, 0x94, 0xe3, 0x99, 0x1f, 0x38, 0xd7, 0xfc, 0x10, 0x31, 0x0c, + 0xf8, 0x23, 0x03, 0x80, 0x20, 0x49, 0xd1, 0x87, 0x06, 0x32, 0x12, 0x49, 0x41, 0x96, 0x92, 0xdc, + 0x5e, 0xf7, 0x42, 0x72, 0x98, 0x5c, 0x8f, 0xb4, 0x3d, 0xa0, 0x79, 0x01, 0x7f, 0x6e, 0x80, 0x33, + 0x7a, 0x9b, 0xac, 0xdc, 0x5b, 0xe0, 0x11, 0xb9, 0x3e, 0xe8, 0x34, 0x2f, 0xfb, 0xbe, 0x5b, 0x36, + 0xdb, 0xad, 0xc2, 0x99, 0x95, 0x1e, 0xa8, 0xa8, 0xa7, 0x2f, 0xf0, 0xd7, 0x06, 0x98, 0x91, 0x55, + 0x54, 0xf3, 0xb0, 0xc0, 0x03, 0x88, 0x07, 0x1d, 0xc0, 0x34, 0x8e, 0x88, 0xa3, 0x7a, 0xc9, 0xde, + 0xc5, 0x47, 0xdd, 0xae, 0xc1, 0xdf, 0x19, 0x60, 0xd2, 0xc6, 0x01, 0xf6, 0x6c, 0xec, 0x55, 0x99, + 0xaf, 0x8b, 0x03, 0x19, 0x59, 0xa4, 0x7d, 0x5d, 0xd3, 0x20, 0x84, 0x9b, 0x25, 0xe9, 0xe6, 0xa4, + 0xce, 0x3a, 0x6a, 0x15, 0xe6, 0x12, 0x55, 0x9d, 0x83, 0x3a, 0xbc, 0x84, 0xef, 0x19, 0x60, 0x2a, + 0x59, 0x00, 0x71, 0xa4, 0x9c, 0x1f, 0x62, 0x1e, 0xf0, 0xf6, 0x75, 0xa5, 0x13, 0x10, 0xa5, 0x3d, + 0x80, 0xbf, 0x31, 0x58, 0xa7, 0x16, 0xdf, 0xfb, 0xa8, 0x59, 0xe4, 0xb1, 0x7c, 0x63, 0xe0, 0xb1, + 0x54, 0x08, 0x22, 0x94, 0x97, 0x92, 0x56, 0x50, 0x71, 0x8e, 0x5a, 0x85, 0x59, 0x3d, 0x92, 0x8a, + 0x81, 0x74, 0x0f, 0xe1, 0x77, 0x0d, 0x30, 0x89, 0x93, 0x8e, 0x9b, 0x9a, 0x17, 0x06, 0x12, 0xc4, + 0x9e, 0x4d, 0xbc, 0xb8, 0xa9, 0x6b, 0x2c, 0x8a, 0x3a, 0xb0, 0x59, 0x07, 0x89, 0x0f, 0xac, 0x46, + 0xe0, 0x62, 0xf3, 0xbf, 0x06, 0xdc, 0x41, 0xae, 0x0b, 0xbb, 0x28, 0x06, 0x80, 0x97, 0x40, 0xde, + 0x8b, 0x5c, 0xd7, 0xda, 0x75, 0xb1, 0xf9, 0x30, 0xef, 0x45, 0xd4, 0x48, 0xf6, 0x9a, 0xa4, 0x23, + 0x25, 0x01, 0x6b, 0x60, 0xf1, 0xe0, 0x25, 0xf5, 0x79, 0x52, 0xcf, 0xa1, 0xa1, 0x79, 0x91, 0x5b, + 0x99, 0x6f, 0xb7, 0x0a, 0x73, 0x3b, 0xbd, 0xc7, 0x8a, 0x77, 0xb5, 0x01, 0x5f, 0x01, 0x0f, 0x6a, + 0x32, 0xeb, 0x8d, 0x5d, 0x6c, 0xdb, 0xd8, 0x8e, 0x2f, 0x6e, 0xe6, 0x7f, 0x8b, 0xc1, 0x65, 0xbc, + 0xc1, 0x77, 0xd2, 0x02, 0xe8, 0x4e, 0xda, 0xf0, 0x2a, 0x98, 0xd3, 0xd8, 0x1b, 0x5e, 0xb8, 0x45, + 0x2a, 0x21, 0x71, 0xbc, 0xba, 0xb9, 0xc4, 0xed, 0x9e, 0x89, 0x77, 0xe4, 0x8e, 0xc6, 0x43, 0x7d, + 0x74, 0xe0, 0x17, 0x3a, 0xac, 0xf1, 0x57, 0x68, 0x56, 0xf0, 0x12, 0x3e, 0xa4, 0xe6, 0x23, 0xbc, + 0x3b, 0xe1, 0x8b, 0xbd, 0xa3, 0xd1, 0x51, 0x1f, 0x79, 0xf8, 0x79, 0x70, 0x3a, 0xc5, 0x61, 0x57, + 0x14, 0xf3, 0x51, 0x71, 0xd7, 0x60, 0xfd, 0xec, 0x4e, 0x4c, 0x44, 0xbd, 0x24, 0xe1, 0xe7, 0x00, + 0xd4, 0xc8, 0x9b, 0x56, 0xc0, 0xf5, 0x1f, 0x13, 0xd7, 0x1e, 0xb6, 0xa2, 0x3b, 0x92, 0x86, 0x7a, + 0xc8, 0xc1, 0x9f, 0x18, 0x1d, 0x4f, 0x92, 0xdc, 0x8e, 0xa9, 0x79, 0x89, 0xef, 0xdf, 0xcd, 0x63, + 0x66, 0xa1, 0xf6, 0x1e, 0x24, 0x72, 0xb1, 0x16, 0x66, 0x0d, 0x0a, 0xf5, 0x71, 0x61, 0x9e, 0xdd, + 0xd0, 0x53, 0x15, 0x1e, 0x4e, 0x83, 0xec, 0x1e, 0x96, 0x5f, 0x55, 0x20, 0xf6, 0x13, 0xda, 0x20, + 0xd7, 0xb4, 0xdc, 0x28, 0x1e, 0x32, 0x0c, 0xb8, 0x3b, 0x40, 0xc2, 0xf8, 0x73, 0x99, 0x67, 0x8d, + 0xf9, 0xf7, 0x0d, 0x30, 0xd7, 0xfb, 0xe0, 0xb9, 0xaf, 0x6e, 0xfd, 0xd4, 0x00, 0x33, 0x5d, 0x67, + 0x4c, 0x0f, 0x8f, 0x6e, 0x77, 0x7a, 0xf4, 0xca, 0xa0, 0x0f, 0x0b, 0xb1, 0x39, 0x78, 0x87, 0xac, + 0xbb, 0xf7, 0x03, 0x03, 0x4c, 0xa7, 0xcb, 0xf6, 0xfd, 0x8c, 0x57, 0xf1, 0xfd, 0x0c, 0x98, 0xeb, + 0xdd, 0xd8, 0x43, 0xa2, 0x26, 0x18, 0xc3, 0x99, 0x04, 0xf5, 0x9a, 0x1a, 0xbf, 0x63, 0x80, 0x89, + 0xb7, 0x94, 0x5c, 0xfc, 0xd6, 0x7d, 0xe0, 0x33, 0xa8, 0xf8, 0x9c, 0x4c, 0x18, 0x14, 0xe9, 0xb8, + 0xc5, 0xdf, 0x1a, 0x60, 0xb6, 0x67, 0x03, 0x00, 0x2f, 0x82, 0x51, 0xcb, 0x75, 0xfd, 0x7d, 0x31, + 0x4a, 0xd4, 0xde, 0x11, 0xac, 0x70, 0x2a, 0x92, 0x5c, 0x2d, 0x7a, 0x99, 0xcf, 0x2a, 0x7a, 0xc5, + 0x3f, 0x18, 0xe0, 0xdc, 0x9d, 0x32, 0xf1, 0xbe, 0x2c, 0xe9, 0x12, 0xc8, 0xcb, 0xe6, 0xfd, 0x90, + 0x2f, 0xa7, 0x2c, 0xc5, 0xb2, 0x68, 0xf0, 0x0f, 0xcd, 0xc4, 0xaf, 0xe2, 0x07, 0x06, 0x98, 0xae, + 0x60, 0xd2, 0x74, 0xaa, 0x18, 0xe1, 0x1a, 0x26, 0xd8, 0xab, 0x62, 0xb8, 0x0c, 0xc6, 0xf9, 0xeb, + 0xee, 0xc0, 0xaa, 0xc6, 0xaf, 0x6e, 0x66, 0x64, 0xc8, 0xc7, 0xaf, 0xc5, 0x0c, 0x94, 0xc8, 0xa8, + 0xd7, 0x3c, 0x99, 0xbe, 0xaf, 0x79, 0xce, 0x81, 0x91, 0x20, 0x19, 0x44, 0xe7, 0x19, 0x97, 0xcf, + 0x9e, 0x39, 0x95, 0x73, 0x7d, 0x12, 0xf2, 0xe9, 0x5a, 0x4e, 0x72, 0x7d, 0x12, 0x22, 0x4e, 0x2d, + 0xfe, 0x29, 0x03, 0x4e, 0x75, 0xd6, 0x71, 0x06, 0x48, 0x22, 0xb7, 0xeb, 0xbd, 0x12, 0xe3, 0x21, + 0xce, 0xd1, 0x3f, 0x77, 0xc9, 0xdc, 0xf9, 0x73, 0x17, 0xf8, 0x02, 0x98, 0x91, 0x3f, 0xd7, 0x0f, + 0x02, 0x82, 0x29, 0x7f, 0x77, 0x9a, 0xed, 0xfc, 0x68, 0x76, 0x33, 0x2d, 0x80, 0xba, 0x75, 0xe0, + 0xff, 0xa7, 0x3e, 0xc5, 0xb9, 0x90, 0x7c, 0x86, 0xc3, 0x5a, 0x42, 0xde, 0x67, 0xdc, 0x64, 0x65, + 0x60, 0x9d, 0x10, 0x9f, 0xa4, 0xbe, 0xcf, 0x59, 0x06, 0xe3, 0x35, 0x26, 0xc0, 0xe7, 0xf5, 0xb9, + 0xce, 0xa0, 0x5f, 0x89, 0x19, 0x28, 0x91, 0x81, 0xcf, 0x83, 0x29, 0x3f, 0x10, 0x1d, 0xf0, 0x96, + 0x6b, 0x57, 0xb0, 0x5b, 0xe3, 0x93, 0xc4, 0x7c, 0x3c, 0xee, 0xed, 0x60, 0xa1, 0xb4, 0x6c, 0xf1, + 0x8f, 0x06, 0xe8, 0xf5, 0xa1, 0x1d, 0x3c, 0x2b, 0xc6, 0xb6, 0xda, 0x2c, 0x34, 0x1e, 0xd9, 0xc2, + 0x26, 0x18, 0xa3, 0x22, 0x57, 0x64, 0x2e, 0x6f, 0x1d, 0x33, 0x97, 0xd3, 0x99, 0x27, 0xfa, 0xc5, + 0x98, 0x1a, 0x83, 0xb1, 0x74, 0xae, 0x5a, 0xe5, 0xc8, 0xb3, 0xe5, 0x24, 0x7f, 0x52, 0xa4, 0xf3, + 0xea, 0x8a, 0xa0, 0x21, 0xc5, 0x2d, 0x57, 0x3f, 0xfc, 0x64, 0xe1, 0xc4, 0x47, 0x9f, 0x2c, 0x9c, + 0xf8, 0xf8, 0x93, 0x85, 0x13, 0x5f, 0x6f, 0x2f, 0x18, 0x1f, 0xb6, 0x17, 0x8c, 0x8f, 0xda, 0x0b, + 0xc6, 0xc7, 0xed, 0x05, 0xe3, 0x6f, 0xed, 0x05, 0xe3, 0x87, 0x7f, 0x5f, 0x38, 0xf1, 0xe5, 0xe7, + 0x8f, 0xf5, 0x6d, 0xfb, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x16, 0x56, 0x59, 0x35, 0x34, 0x2f, + 0x00, 0x00, +} + +func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DesiredAPIVersion) + copy(dAtA[i:], m.DesiredAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ConvertedObjects) > 0 { + for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConversionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConversionReviewVersions) > 0 { + for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConversionReviewVersions[iNdEx]) + copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.WebhookClientConfig != nil { + { + size, err := m.WebhookClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ListKind) + copy(dAtA[i:], m.ListKind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) + i-- + dAtA[i] = 0x2a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x22 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Singular) + copy(dAtA[i:], m.Singular) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) + i-- + dAtA[i] = 0x12 + i -= len(m.Plural) + copy(dAtA[i:], m.Plural) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreserveUnknownFields != nil { + i-- + if *m.PreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.Conversion != nil { + { + size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Validation != nil { + { + size, err := m.Validation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StoredVersions) > 0 { + for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StoredVersions[iNdEx]) + copy(dAtA[i:], m.StoredVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DeprecationWarning != nil { + i -= len(*m.DeprecationWarning) + copy(dAtA[i:], *m.DeprecationWarning) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeprecationWarning))) + i-- + dAtA[i] = 0x42 + } + i-- + if m.Deprecated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LabelSelectorPath != nil { + i -= len(*m.LabelSelectorPath) + copy(dAtA[i:], *m.LabelSelectorPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.StatusReplicasPath) + copy(dAtA[i:], m.StatusReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.SpecReplicasPath) + copy(dAtA[i:], m.SpecReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scale != nil { + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OpenAPIV3Schema != nil { + { + size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSON) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSON) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.XValidations) > 0 { + for iNdEx := len(m.XValidations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.XValidations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + } + if m.XMapType != nil { + i -= len(*m.XMapType) + copy(dAtA[i:], *m.XMapType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XMapType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + if m.XListType != nil { + i -= len(*m.XListType) + copy(dAtA[i:], *m.XListType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.XListMapKeys) > 0 { + for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.XListMapKeys[iNdEx]) + copy(dAtA[i:], m.XListMapKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + } + i-- + if m.XIntOrString { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + i-- + if m.XEmbeddedResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + if m.XPreserveUnknownFields != nil { + i-- + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + i-- + if m.Nullable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + if m.Example != nil { + { + size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.ExternalDocs != nil { + { + size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if len(m.Definitions) > 0 { + keysForDefinitions := make([]string, 0, len(m.Definitions)) + for k := range m.Definitions { + keysForDefinitions = append(keysForDefinitions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Definitions[string(keysForDefinitions[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefinitions[iNdEx]) + copy(dAtA[i:], keysForDefinitions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + } + if m.AdditionalItems != nil { + { + size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.Dependencies) > 0 { + keysForDependencies := make([]string, 0, len(m.Dependencies)) + for k := range m.Dependencies { + keysForDependencies = append(keysForDependencies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { + v := m.Dependencies[string(keysForDependencies[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDependencies[iNdEx]) + copy(dAtA[i:], keysForDependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if len(m.PatternProperties) > 0 { + keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) + for k := range m.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPatternProperties[iNdEx]) + copy(dAtA[i:], keysForPatternProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + } + if m.AdditionalProperties != nil { + { + size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if len(m.Properties) > 0 { + keysForProperties := make([]string, 0, len(m.Properties)) + for k := range m.Properties { + keysForProperties = append(keysForProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.Properties[string(keysForProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForProperties[iNdEx]) + copy(dAtA[i:], keysForProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + if m.Not != nil { + { + size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + } + if len(m.OneOf) > 0 { + for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.Items != nil { + { + size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.Required) > 0 { + for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Required[iNdEx]) + copy(dAtA[i:], m.Required[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.MinProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.MaxProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.MultipleOf != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x99 + } + i-- + if m.UniqueItems { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + if m.MinItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.MaxItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0x7a + if m.MinLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + i-- + dAtA[i] = 0x70 + } + if m.MaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + i-- + dAtA[i] = 0x68 + } + i-- + if m.ExclusiveMinimum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + if m.Minimum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i-- + dAtA[i] = 0x59 + } + i-- + if m.ExclusiveMaximum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + if m.Maximum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i-- + dAtA[i] = 0x49 + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0x3a + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x32 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + if m.Ref != nil { + i -= len(*m.Ref) + copy(dAtA[i:], *m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.JSONSchemas) > 0 { + for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Allows { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Property) > 0 { + for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Property[iNdEx]) + copy(dAtA[i:], m.Property[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidationRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OptionalOldSelf != nil { + i-- + if *m.OptionalOldSelf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- + dAtA[i] = 0x2a + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x22 + } + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConversionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DesiredAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConversionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ConvertedObjects) > 0 { + for _, e := range m.ConvertedObjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConversionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceColumnDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Priority)) + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceConversion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + if m.WebhookClientConfig != nil { + l = m.WebhookClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ConversionReviewVersions) > 0 { + for _, s := range m.ConversionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Plural) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Singular) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ListKind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Names.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + if m.Validation != nil { + l = m.Validation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Subresources != nil { + l = m.Subresources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Conversion != nil { + l = m.Conversion.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreserveUnknownFields != nil { + n += 2 + } + return n +} + +func (m *CustomResourceDefinitionStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AcceptedNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StoredVersions) > 0 { + for _, s := range m.StoredVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Subresources != nil { + l = m.Subresources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.DeprecationWarning != nil { + l = len(*m.DeprecationWarning) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceSubresourceScale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpecReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StatusReplicasPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelectorPath != nil { + l = len(*m.LabelSelectorPath) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceSubresourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *CustomResourceSubresources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scale != nil { + l = m.Scale.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceValidation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OpenAPIV3Schema != nil { + l = m.OpenAPIV3Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalDocumentation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JSON) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaProps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Schema) + n += 1 + l + sovGenerated(uint64(l)) + if m.Ref != nil { + l = len(*m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Title) + n += 1 + l + sovGenerated(uint64(l)) + if m.Default != nil { + l = m.Default.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Maximum != nil { + n += 9 + } + n += 2 + if m.Minimum != nil { + n += 9 + } + n += 2 + if m.MaxLength != nil { + n += 1 + sovGenerated(uint64(*m.MaxLength)) + } + if m.MinLength != nil { + n += 1 + sovGenerated(uint64(*m.MinLength)) + } + l = len(m.Pattern) + n += 1 + l + sovGenerated(uint64(l)) + if m.MaxItems != nil { + n += 2 + sovGenerated(uint64(*m.MaxItems)) + } + if m.MinItems != nil { + n += 2 + sovGenerated(uint64(*m.MinItems)) + } + n += 3 + if m.MultipleOf != nil { + n += 10 + } + if len(m.Enum) > 0 { + for _, e := range m.Enum { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.MaxProperties != nil { + n += 2 + sovGenerated(uint64(*m.MaxProperties)) + } + if m.MinProperties != nil { + n += 2 + sovGenerated(uint64(*m.MinProperties)) + } + if len(m.Required) > 0 { + for _, s := range m.Required { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Items != nil { + l = m.Items.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllOf) > 0 { + for _, e := range m.AllOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.OneOf) > 0 { + for _, e := range m.OneOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AnyOf) > 0 { + for _, e := range m.AnyOf { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.Not != nil { + l = m.Not.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Properties) > 0 { + for k, v := range m.Properties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalProperties != nil { + l = m.AdditionalProperties.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.PatternProperties) > 0 { + for k, v := range m.PatternProperties { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Dependencies) > 0 { + for k, v := range m.Dependencies { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AdditionalItems != nil { + l = m.AdditionalItems.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Definitions) > 0 { + for k, v := range m.Definitions { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ExternalDocs != nil { + l = m.ExternalDocs.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Example != nil { + l = m.Example.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + n += 3 + if m.XPreserveUnknownFields != nil { + n += 3 + } + n += 3 + n += 3 + if len(m.XListMapKeys) > 0 { + for _, s := range m.XListMapKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.XListType != nil { + l = len(*m.XListType) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.XMapType != nil { + l = len(*m.XMapType) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.XValidations) > 0 { + for _, e := range m.XValidations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JSONSchemaPropsOrArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.JSONSchemas) > 0 { + for _, e := range m.JSONSchemas { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JSONSchemaPropsOrBool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Property) > 0 { + for _, s := range m.Property { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *ValidationRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.OptionalOldSelf != nil { + n += 2 + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ConversionRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" + s := strings.Join([]string{`&ConversionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `}`, + }, "") + return s +} +func (this *ConversionResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForConvertedObjects := "[]RawExtension{" + for _, f := range this.ConvertedObjects { + repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConvertedObjects += "}" + s := strings.Join([]string{`&ConversionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConversionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConversionReview{`, + `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceColumnDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceColumnDefinition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Priority:` + fmt.Sprintf("%v", this.Priority) + `,`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceConversion{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `WebhookClientConfig:` + strings.Replace(this.WebhookClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinition{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CustomResourceDefinitionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionNames) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionNames{`, + `Plural:` + fmt.Sprintf("%v", this.Plural) + `,`, + `Singular:` + fmt.Sprintf("%v", this.Singular) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `ListKind:` + fmt.Sprintf("%v", this.ListKind) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" + s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `Validation:` + strings.Replace(this.Validation.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `PreserveUnknownFields:` + valueToStringGenerated(this.PreserveUnknownFields) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionVersion) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" + s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Served:` + fmt.Sprintf("%v", this.Served) + `,`, + `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `Deprecated:` + fmt.Sprintf("%v", this.Deprecated) + `,`, + `DeprecationWarning:` + valueToStringGenerated(this.DeprecationWarning) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceScale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceScale{`, + `SpecReplicasPath:` + fmt.Sprintf("%v", this.SpecReplicasPath) + `,`, + `StatusReplicasPath:` + fmt.Sprintf("%v", this.StatusReplicasPath) + `,`, + `LabelSelectorPath:` + valueToStringGenerated(this.LabelSelectorPath) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresourceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresourceStatus{`, + `}`, + }, "") + return s +} +func (this *CustomResourceSubresources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceSubresources{`, + `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, + `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceValidation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceValidation{`, + `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalDocumentation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalDocumentation{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *JSON) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSON{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaProps) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnum := "[]JSON{" + for _, f := range this.Enum { + repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," + } + repeatedStringForEnum += "}" + repeatedStringForAllOf := "[]JSONSchemaProps{" + for _, f := range this.AllOf { + repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAllOf += "}" + repeatedStringForOneOf := "[]JSONSchemaProps{" + for _, f := range this.OneOf { + repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForOneOf += "}" + repeatedStringForAnyOf := "[]JSONSchemaProps{" + for _, f := range this.AnyOf { + repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAnyOf += "}" + repeatedStringForXValidations := "[]ValidationRule{" + for _, f := range this.XValidations { + repeatedStringForXValidations += strings.Replace(strings.Replace(f.String(), "ValidationRule", "ValidationRule", 1), `&`, ``, 1) + "," + } + repeatedStringForXValidations += "}" + keysForProperties := make([]string, 0, len(this.Properties)) + for k := range this.Properties { + keysForProperties = append(keysForProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) + mapStringForProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForProperties { + mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) + } + mapStringForProperties += "}" + keysForPatternProperties := make([]string, 0, len(this.PatternProperties)) + for k := range this.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + mapStringForPatternProperties := "map[string]JSONSchemaProps{" + for _, k := range keysForPatternProperties { + mapStringForPatternProperties += fmt.Sprintf("%v: %v,", k, this.PatternProperties[k]) + } + mapStringForPatternProperties += "}" + keysForDependencies := make([]string, 0, len(this.Dependencies)) + for k := range this.Dependencies { + keysForDependencies = append(keysForDependencies, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + mapStringForDependencies := "JSONSchemaDependencies{" + for _, k := range keysForDependencies { + mapStringForDependencies += fmt.Sprintf("%v: %v,", k, this.Dependencies[k]) + } + mapStringForDependencies += "}" + keysForDefinitions := make([]string, 0, len(this.Definitions)) + for k := range this.Definitions { + keysForDefinitions = append(keysForDefinitions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + mapStringForDefinitions := "JSONSchemaDefinitions{" + for _, k := range keysForDefinitions { + mapStringForDefinitions += fmt.Sprintf("%v: %v,", k, this.Definitions[k]) + } + mapStringForDefinitions += "}" + s := strings.Join([]string{`&JSONSchemaProps{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`, + `Ref:` + valueToStringGenerated(this.Ref) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Title:` + fmt.Sprintf("%v", this.Title) + `,`, + `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, + `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, + `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, + `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, + `ExclusiveMinimum:` + fmt.Sprintf("%v", this.ExclusiveMinimum) + `,`, + `MaxLength:` + valueToStringGenerated(this.MaxLength) + `,`, + `MinLength:` + valueToStringGenerated(this.MinLength) + `,`, + `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, + `MaxItems:` + valueToStringGenerated(this.MaxItems) + `,`, + `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, + `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, + `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, + `Enum:` + repeatedStringForEnum + `,`, + `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, + `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, + `Required:` + fmt.Sprintf("%v", this.Required) + `,`, + `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, + `AllOf:` + repeatedStringForAllOf + `,`, + `OneOf:` + repeatedStringForOneOf + `,`, + `AnyOf:` + repeatedStringForAnyOf + `,`, + `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Properties:` + mapStringForProperties + `,`, + `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `PatternProperties:` + mapStringForPatternProperties + `,`, + `Dependencies:` + mapStringForDependencies + `,`, + `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `Definitions:` + mapStringForDefinitions + `,`, + `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, + `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, + `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, + `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, + `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, + `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, + `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, + `XListType:` + valueToStringGenerated(this.XListType) + `,`, + `XMapType:` + valueToStringGenerated(this.XMapType) + `,`, + `XValidations:` + repeatedStringForXValidations + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrArray) String() string { + if this == nil { + return "nil" + } + repeatedStringForJSONSchemas := "[]JSONSchemaProps{" + for _, f := range this.JSONSchemas { + repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForJSONSchemas += "}" + s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrBool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, + `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JSONSchemaPropsOrStringArray) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Property:` + fmt.Sprintf("%v", this.Property) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidationRule{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `OptionalOldSelf:` + valueToStringGenerated(this.OptionalOldSelf) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ConversionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DesiredAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConvertedObjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) + if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConversionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConversionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConversionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &ConversionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &ConversionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = ConversionStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WebhookClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WebhookClientConfig == nil { + m.WebhookClientConfig = &WebhookClientConfig{} + } + if err := m.WebhookClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConversionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConversionReviewVersions = append(m.ConversionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomResourceDefinition{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plural = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Singular = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validation == nil { + m.Validation = &CustomResourceValidation{} + } + if err := m.Validation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conversion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conversion == nil { + m.Conversion = &CustomResourceConversion{} + } + if err := m.Conversion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PreserveUnknownFields = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Served = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Storage = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &CustomResourceValidation{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Deprecated = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecationWarning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DeprecationWarning = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceScale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusReplicasPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StatusReplicasPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelectorPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.LabelSelectorPath = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceSubresources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceSubresources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &CustomResourceSubresourceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scale == nil { + m.Scale = &CustomResourceSubresourceScale{} + } + if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OpenAPIV3Schema == nil { + m.OpenAPIV3Schema = &JSONSchemaProps{} + } + if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSON) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSON: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = JSONSchemaURL(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Ref = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = &JSON{} + } + if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Maximum = &v2 + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMaximum = bool(v != 0) + case 11: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Minimum = &v2 + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExclusiveMinimum = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxLength = &v + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinLength = &v + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxItems = &v + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinItems = &v + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UniqueItems = bool(v != 0) + case 19: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.MultipleOf = &v2 + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enum = append(m.Enum, JSON{}) + if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxProperties = &v + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinProperties = &v + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Required = append(m.Required, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Items == nil { + m.Items = &JSONSchemaPropsOrArray{} + } + if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllOf = append(m.AllOf, JSONSchemaProps{}) + if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OneOf = append(m.OneOf, JSONSchemaProps{}) + if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnyOf = append(m.AnyOf, JSONSchemaProps{}) + if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Not == nil { + m.Not = &JSONSchemaProps{} + } + if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Properties == nil { + m.Properties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Properties[mapkey] = *mapvalue + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalProperties == nil { + m.AdditionalProperties = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PatternProperties == nil { + m.PatternProperties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PatternProperties[mapkey] = *mapvalue + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Dependencies == nil { + m.Dependencies = make(JSONSchemaDependencies) + } + var mapkey string + mapvalue := &JSONSchemaPropsOrStringArray{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaPropsOrStringArray{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Dependencies[mapkey] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AdditionalItems == nil { + m.AdditionalItems = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definitions == nil { + m.Definitions = make(JSONSchemaDefinitions) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Definitions[mapkey] = *mapvalue + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExternalDocs == nil { + m.ExternalDocs = &ExternalDocumentation{} + } + if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Example == nil { + m.Example = &JSON{} + } + if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Nullable = bool(v != 0) + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.XPreserveUnknownFields = &b + case 39: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XEmbeddedResource = bool(v != 0) + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.XIntOrString = bool(v != 0) + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XListType = &s + iNdEx = postIndex + case 43: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XMapType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XMapType = &s + iNdEx = postIndex + case 44: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XValidations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XValidations = append(m.XValidations, ValidationRule{}) + if err := m.XValidations[len(m.XValidations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) + if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allows = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FieldValueErrorReason(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OptionalOldSelf", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalOldSelf = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto new file mode 100644 index 000000000..b8477322b --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -0,0 +1,829 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"; + +// ConversionRequest describes the conversion request parameters. +message ConversionRequest { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + optional string desiredAPIVersion = 2; + + // objects is the list of custom resource objects to be converted. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; +} + +// ConversionResponse describes a conversion response. +message ConversionResponse { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + optional string uid = 1; + + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; +} + +// ConversionReview describes a conversion request/response. +message ConversionReview { + // request describes the attributes for the conversion request. + // +optional + optional ConversionRequest request = 1; + + // response describes the attributes for the conversion response. + // +optional + optional ConversionResponse response = 2; +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +message CustomResourceColumnDefinition { + // name is a human readable name for the column. + optional string name = 1; + + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + optional string type = 2; + + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + optional string format = 3; + + // description is a human readable description of this column. + // +optional + optional string description = 4; + + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + optional int32 priority = 5; + + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + optional string JSONPath = 6; +} + +// CustomResourceConversion describes how to convert different versions of a CR. +message CustomResourceConversion { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. + optional string strategy = 1; + + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. + // +optional + optional WebhookClientConfig webhookClientConfig = 2; + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + // Defaults to `["v1beta1"]`. + // +optional + repeated string conversionReviewVersions = 3; +} + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. +message CustomResourceDefinition { + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes how the user wants the resources to appear + optional CustomResourceDefinitionSpec spec = 2; + + // status indicates the actual state of the CustomResourceDefinition + // +optional + optional CustomResourceDefinitionStatus status = 3; +} + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +message CustomResourceDefinitionCondition { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + optional string type = 1; + + // status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable message indicating details about last transition. + // +optional + optional string message = 5; +} + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +message CustomResourceDefinitionList { + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items list individual CustomResourceDefinition objects + repeated CustomResourceDefinition items = 2; +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +message CustomResourceDefinitionNames { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + optional string plural = 1; + + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + optional string singular = 2; + + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + repeated string shortNames = 3; + + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + optional string kind = 4; + + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + optional string listKind = 5; + + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + repeated string categories = 6; +} + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +message CustomResourceDefinitionSpec { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + optional string group = 1; + + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. + // +optional + optional string version = 2; + + // names specify the resource and kind names for the custom resource. + optional CustomResourceDefinitionNames names = 3; + + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. + optional string scope = 4; + + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. + // Top-level and per-version schemas are mutually exclusive. + // +optional + optional CustomResourceValidation validation = 5; + + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. + // Top-level and per-version subresources are mutually exclusive. + // +optional + optional CustomResourceSubresources subresources = 6; + + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // +optional + repeated CustomResourceDefinitionVersion versions = 7; + + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. + // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. + // +optional + repeated CustomResourceColumnDefinition additionalPrinterColumns = 8; + + // conversion defines conversion settings for the CRD. + // +optional + optional CustomResourceConversion conversion = 9; + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details. + // +optional + optional bool preserveUnknownFields = 10; +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +message CustomResourceDefinitionStatus { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + // +listType=map + // +listMapKey=type + repeated CustomResourceDefinitionCondition conditions = 1; + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + // +optional + optional CustomResourceDefinitionNames acceptedNames = 2; + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + // +optional + repeated string storedVersions = 3; +} + +// CustomResourceDefinitionVersion describes a version for CRD. +message CustomResourceDefinitionVersion { + // name is the version name, e.g. “v1â€, “v2beta1â€, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + optional string name = 1; + + // served is a flag enabling/disabling this version from being served via REST APIs + optional bool served = 2; + + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + optional bool storage = 3; + + // deprecated indicates this version of the custom resource API is deprecated. + // When set to true, API requests to this version receive a warning header in the server response. + // Defaults to false. + // +optional + optional bool deprecated = 7; + + // deprecationWarning overrides the default warning returned to API clients. + // May only be set when `deprecated` is true. + // The default warning indicates this version is deprecated and recommends use + // of the newest served version of equal or greater stability, if one exists. + // +optional + optional string deprecationWarning = 8; + + // schema describes the schema used for validation and pruning of this version of the custom resource. + // Top-level and per-version schemas are mutually exclusive. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). + // +optional + optional CustomResourceValidation schema = 4; + + // subresources specify what subresources this version of the defined custom resource have. + // Top-level and per-version subresources are mutually exclusive. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). + // +optional + optional CustomResourceSubresources subresources = 5; + + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // Top-level and per-version columns are mutually exclusive. + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. + // +optional + repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; +} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +message CustomResourceSubresourceScale { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + optional string specReplicasPath = 1; + + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + optional string statusReplicasPath = 2; + + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + optional string labelSelectorPath = 3; +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +message CustomResourceSubresourceStatus { +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +message CustomResourceSubresources { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + optional CustomResourceSubresourceStatus status = 1; + + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + optional CustomResourceSubresourceScale scale = 2; +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +message CustomResourceValidation { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + optional JSONSchemaProps openAPIV3Schema = 1; +} + +// ExternalDocumentation allows referencing an external resource for extended documentation. +message ExternalDocumentation { + optional string description = 1; + + optional string url = 2; +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +message JSON { + optional bytes raw = 1; +} + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +message JSONSchemaProps { + optional string id = 1; + + optional string schema = 2; + + optional string ref = 3; + + optional string description = 4; + + optional string type = 5; + + // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: + // + // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string + // - uri: an URI as parsed by Golang net/url.ParseRequestURI + // - email: an email address as parsed by Golang net/mail.ParseAddress + // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP + // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP + // - cidr: a CIDR as parsed by Golang net.ParseCIDR + // - mac: a MAC address as parsed by Golang net.ParseMAC + // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" + // - isbn10: an ISBN10 number string like "0321751043" + // - isbn13: an ISBN13 number string like "978-0321751041" + // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in + // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ + // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ + // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" + // - byte: base64 encoded binary data + // - password: any kind of string + // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 + // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format + // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. + optional string format = 6; + + optional string title = 7; + + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. + optional JSON default = 8; + + optional double maximum = 9; + + optional bool exclusiveMaximum = 10; + + optional double minimum = 11; + + optional bool exclusiveMinimum = 12; + + optional int64 maxLength = 13; + + optional int64 minLength = 14; + + optional string pattern = 15; + + optional int64 maxItems = 16; + + optional int64 minItems = 17; + + optional bool uniqueItems = 18; + + optional double multipleOf = 19; + + repeated JSON enum = 20; + + optional int64 maxProperties = 21; + + optional int64 minProperties = 22; + + repeated string required = 23; + + optional JSONSchemaPropsOrArray items = 24; + + repeated JSONSchemaProps allOf = 25; + + repeated JSONSchemaProps oneOf = 26; + + repeated JSONSchemaProps anyOf = 27; + + optional JSONSchemaProps not = 28; + + map properties = 29; + + optional JSONSchemaPropsOrBool additionalProperties = 30; + + map patternProperties = 31; + + map dependencies = 32; + + optional JSONSchemaPropsOrBool additionalItems = 33; + + map definitions = 34; + + optional ExternalDocumentation externalDocs = 35; + + optional JSON example = 36; + + optional bool nullable = 37; + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + optional bool xKubernetesPreserveUnknownFields = 38; + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + optional bool xKubernetesEmbeddedResource = 39; + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + optional bool xKubernetesIntOrString = 40; + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // The properties specified must either be required or have a default value, + // to ensure those properties are present for all list items. + // + // +optional + repeated string xKubernetesListMapKeys = 41; + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + optional string xKubernetesListType = 42; + + // x-kubernetes-map-type annotates an object to further describe its topology. + // This extension must only be used when type is object and may have 2 possible values: + // + // 1) `granular`: + // These maps are actual maps (key-value pairs) and each fields are independent + // from each other (they can each be manipulated by separate actors). This is + // the default behaviour for all maps. + // 2) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic maps will be entirely replaced when updated. + // +optional + optional string xKubernetesMapType = 43; + + // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. + // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. + // +patchMergeKey=rule + // +patchStrategy=merge + // +listType=map + // +listMapKey=rule + repeated ValidationRule xKubernetesValidations = 44; +} + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +message JSONSchemaPropsOrArray { + optional JSONSchemaProps schema = 1; + + repeated JSONSchemaProps jSONSchemas = 2; +} + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +message JSONSchemaPropsOrBool { + optional bool allows = 1; + + optional JSONSchemaProps schema = 2; +} + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +message JSONSchemaPropsOrStringArray { + optional JSONSchemaProps schema = 1; + + repeated string property = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // namespace is the namespace of the service. + // Required + optional string namespace = 1; + + // name is the name of the service. + // Required + optional string name = 2; + + // path is an optional URL path at which the webhook will be contacted. + // +optional + optional string path = 3; + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + optional int32 port = 4; +} + +// ValidationRule describes a validation rule written in the CEL expression language. +message ValidationRule { + // Rule represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. + // The `self` variable in the CEL expression is bound to the scoped value. + // Example: + // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} + // + // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable + // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as + // absent fields in CEL expressions. + // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map + // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map + // are accessible via CEL macros and functions such as `self.all(...)`. + // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and + // functions. + // If the Rule is scoped to a scalar, `self` is bound to the scalar value. + // Examples: + // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} + // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} + // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. + // + // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL + // expressions. This includes: + // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. + // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: + // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true + // - An array where the items schema is of an "unknown type" + // - An object where the additionalProperties schema is of an "unknown type" + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} + // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} + // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} + // + // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + optional string rule = 1; + + // Message represents the message displayed when validation fails. The message is required if the Rule contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + optional string message = 2; + + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + optional string messageExpression = 3; + + // reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. + // The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. + // The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". + // If not set, default to use "FieldValueInvalid". + // All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. + // +optional + optional string reason = 4; + + // fieldPath represents the field path returned when the validation fails. + // It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. + // e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` + // If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` + // It does not support list numeric index. + // It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. + // Numeric index of array is not supported. + // For field name which contains special characters, use `['specialName']` to refer the field name. + // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` + // +optional + optional string fieldPath = 5; + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + optional bool optionalOldSelf = 6; +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +message WebhookClientConfig { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go new file mode 100644 index 000000000..44941d82e --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go @@ -0,0 +1,136 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "bytes" + "errors" + + "k8s.io/apimachinery/pkg/util/json" +) + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrBool + switch { + case len(data) == 0: + case data[0] == '{': + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Allows = true + nw.Schema = &sch + case len(data) == 4 && string(data) == "true": + nw.Allows = true + case len(data) == 5 && string(data) == "false": + nw.Allows = false + default: + return errors.New("boolean or JSON schema expected") + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw JSONSchemaPropsOrStringArray + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return json.Marshal(s.JSONSchemas) + } + return json.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { + var nw JSONSchemaPropsOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch JSONSchemaProps + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.JSONSchemas); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s JSON) MarshalJSON() ([]byte, error) { + if len(s.Raw) > 0 { + return s.Raw, nil + } + return []byte("null"), nil + +} + +func (s *JSON) UnmarshalJSON(data []byte) error { + if len(data) > 0 && !bytes.Equal(data, nullLiteral) { + s.Raw = data + } + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go new file mode 100644 index 000000000..ac807211b --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiextensions.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CustomResourceDefinition{}, + &CustomResourceDefinitionList{}, + &ConversionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go new file mode 100644 index 000000000..db445b10d --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -0,0 +1,531 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ConversionStrategyType describes different conversion types. +type ConversionStrategyType string + +const ( + // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. + // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. + // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. + KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" + + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. + NoneConverter ConversionStrategyType = "None" + // WebhookConverter is a converter that calls to an external webhook to convert the CR. + WebhookConverter ConversionStrategyType = "Webhook" +) + +// CustomResourceDefinitionSpec describes how a user wants their resource to appear +type CustomResourceDefinitionSpec struct { + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + // names specify the resource and kind names for the custom resource. + Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. + Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. + // Top-level and per-version schemas are mutually exclusive. + // +optional + Validation *CustomResourceValidation `json:"validation,omitempty" protobuf:"bytes,5,opt,name=validation"` + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. + // Top-level and per-version subresources are mutually exclusive. + // +optional + Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,6,opt,name=subresources"` + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // +optional + Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. + // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` + + // conversion defines conversion settings for the CRD. + // +optional + Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` + + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details. + // +optional + PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` +} + +// CustomResourceConversion describes how to convert different versions of a CR. +type CustomResourceConversion struct { + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. + Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` + + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. + // +optional + WebhookClientConfig *WebhookClientConfig `json:"webhookClientConfig,omitempty" protobuf:"bytes,2,name=webhookClientConfig"` + + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in + // the list which it supports. If none of the versions specified in this list + // are supported by API server, conversion will fail for the custom resource. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + // Defaults to `["v1beta1"]`. + // +optional + ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty" protobuf:"bytes,3,rep,name=conversionReviewVersions"` +} + +// WebhookClientConfig contains the information to make a TLS connection with the webhook. +type WebhookClientConfig struct { + // url gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // service is a reference to the service for this webhook. Either + // service or url must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // namespace is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // name is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // path is an optional URL path at which the webhook will be contacted. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // port is an optional service port at which the webhook will be contacted. + // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} + +// CustomResourceDefinitionVersion describes a version for CRD. +type CustomResourceDefinitionVersion struct { + // name is the version name, e.g. “v1â€, “v2beta1â€, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // served is a flag enabling/disabling this version from being served via REST APIs + Served bool `json:"served" protobuf:"varint,2,opt,name=served"` + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. + Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` + // deprecated indicates this version of the custom resource API is deprecated. + // When set to true, API requests to this version receive a warning header in the server response. + // Defaults to false. + // +optional + Deprecated bool `json:"deprecated,omitempty" protobuf:"varint,7,opt,name=deprecated"` + // deprecationWarning overrides the default warning returned to API clients. + // May only be set when `deprecated` is true. + // The default warning indicates this version is deprecated and recommends use + // of the newest served version of equal or greater stability, if one exists. + // +optional + DeprecationWarning *string `json:"deprecationWarning,omitempty" protobuf:"bytes,8,opt,name=deprecationWarning"` + // schema describes the schema used for validation and pruning of this version of the custom resource. + // Top-level and per-version schemas are mutually exclusive. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). + // +optional + Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` + // subresources specify what subresources this version of the defined custom resource have. + // Top-level and per-version subresources are mutually exclusive. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). + // +optional + Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // Top-level and per-version columns are mutually exclusive. + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` +} + +// CustomResourceColumnDefinition specifies a column for server side printing. +type CustomResourceColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. + // +optional + Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` + // description is a human readable description of this column. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a priority greater than 0. + // +optional + Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. + JSONPath string `json:"JSONPath" protobuf:"bytes,6,opt,name=JSONPath"` +} + +// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition +type CustomResourceDefinitionNames struct { + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. + Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. + // +optional + Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. + // +optional + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. + Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". + // +optional + ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. + // +optional + Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` +} + +// ResourceScope is an enum defining the different scopes available to a custom resource +type ResourceScope string + +const ( + ClusterScoped ResourceScope = "Cluster" + NamespaceScoped ResourceScope = "Namespaced" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type +type CustomResourceDefinitionConditionType string + +const ( + // Established means that the resource has become active. A resource is established when all names are + // accepted without a conflict for the first time. A resource stays established until deleted, even during + // a later NamesAccepted due to changed names. Note that not all names can be changed. + Established CustomResourceDefinitionConditionType = "Established" + // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in + // the group and are therefore accepted. + NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" + // NonStructuralSchema means that one or more OpenAPI schema is not structural. + // + // A schema is structural if it specifies types for all values, with the only exceptions of those with + // - x-kubernetes-int-or-string: true — for fields which can be integer or string + // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values + // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions + // specified under allOf, anyOf, oneOf or not. + // + // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be + // available for non-structural CRDs: + // - pruning + // - defaulting + // - read-only + // - OpenAPI publishing + // - webhook conversion + NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" + // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. + Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" +) + +// CustomResourceDefinitionCondition contains details for the current condition of this pod. +type CustomResourceDefinitionCondition struct { + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. + Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` + // status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // lastTransitionTime last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition +type CustomResourceDefinitionStatus struct { + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional + // +listType=map + // +listMapKey=type + Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` + + // acceptedNames are the names that are actually being used to serve discovery. + // They may be different than the names in spec. + // +optional + AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` + + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the + // versions from this list. + // Versions may not be removed from `spec.versions` while they exist in this list. + // +optional + StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` +} + +// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of +// a CustomResourceDefinition +const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 +// +k8s:prerelease-lifecycle-gen:deprecated=1.16 +// +k8s:prerelease-lifecycle-gen:removed=1.22 +// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,CustomResourceDefinition + +// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format +// <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. +type CustomResourceDefinition struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes how the user wants the resources to appear + Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status indicates the actual state of the CustomResourceDefinition + // +optional + Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 +// +k8s:prerelease-lifecycle-gen:deprecated=1.16 +// +k8s:prerelease-lifecycle-gen:removed=1.22 +// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,CustomResourceDefinitionList + +// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. +type CustomResourceDefinitionList struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items list individual CustomResourceDefinition objects + Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CustomResourceValidation is a list of validation methods for CustomResources. +type CustomResourceValidation struct { + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. + // +optional + OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` +} + +// CustomResourceSubresources defines the status and scale subresources for CustomResources. +type CustomResourceSubresources struct { + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + // +optional + Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. + // +optional + Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` +} + +// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. +// Status is represented by the `.status` JSON path inside of a CustomResource. When set, +// * exposes a /status subresource for the custom resource +// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza +// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza +type CustomResourceSubresourceStatus struct{} + +// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. +type CustomResourceSubresourceScale struct { + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. + SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource + // will default to 0. + StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. + // Only JSON paths without the array notation are allowed. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. + // The field pointed by this JSON path must be a string field (not a complex selector struct) + // which contains a serialized label selector in string form. + // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` + // subresource will default to the empty string. + // +optional + LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 +// +k8s:prerelease-lifecycle-gen:deprecated=1.19 +// This API is never served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally +// and having the generator against this group will protect future APIs which may be served. +// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,ConversionReview + +// ConversionReview describes a conversion request/response. +type ConversionReview struct { + metav1.TypeMeta `json:",inline"` + // request describes the attributes for the conversion request. + // +optional + Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // response describes the attributes for the conversion response. + // +optional + Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// ConversionRequest describes the conversion request parameters. +type ConversionRequest struct { + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" + DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` + // objects is the list of custom resource objects to be converted. + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` +} + +// ConversionResponse describes a conversion response. +type ConversionResponse struct { + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). + // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. + ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + Result metav1.Status `json:"result" protobuf:"bytes,3,name=result"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go new file mode 100644 index 000000000..24c45bb04 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -0,0 +1,412 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// FieldValueErrorReason is a machine-readable value providing more detail about why a field failed the validation. +// +enum +type FieldValueErrorReason string + +const ( + // FieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + FieldValueRequired FieldValueErrorReason = "FieldValueRequired" + // FieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + FieldValueDuplicate FieldValueErrorReason = "FieldValueDuplicate" + // FieldValueInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). + FieldValueInvalid FieldValueErrorReason = "FieldValueInvalid" + // FieldValueForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). + FieldValueForbidden FieldValueErrorReason = "FieldValueForbidden" +) + +// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). +type JSONSchemaProps struct { + ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` + Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` + Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` + + // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: + // + // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string + // - uri: an URI as parsed by Golang net/url.ParseRequestURI + // - email: an email address as parsed by Golang net/mail.ParseAddress + // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP + // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP + // - cidr: a CIDR as parsed by Golang net.ParseCIDR + // - mac: a MAC address as parsed by Golang net.ParseMAC + // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ + // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ + // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" + // - isbn10: an ISBN10 number string like "0321751043" + // - isbn13: an ISBN13 number string like "978-0321751041" + // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in + // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ + // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ + // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" + // - byte: base64 encoded binary data + // - password: any kind of string + // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 + // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format + // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. + Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` + + Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` + // default is a default value for undefined object fields. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. + Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` + Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` + Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` + MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` + MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` + Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` + MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` + MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` + UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` + MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` + Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` + MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` + MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` + Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` + Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` + AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` + OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` + AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` + Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` + Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` + AdditionalProperties *JSONSchemaPropsOrBool `json:"additionalProperties,omitempty" protobuf:"bytes,30,opt,name=additionalProperties"` + PatternProperties map[string]JSONSchemaProps `json:"patternProperties,omitempty" protobuf:"bytes,31,rep,name=patternProperties"` + Dependencies JSONSchemaDependencies `json:"dependencies,omitempty" protobuf:"bytes,32,opt,name=dependencies"` + AdditionalItems *JSONSchemaPropsOrBool `json:"additionalItems,omitempty" protobuf:"bytes,33,opt,name=additionalItems"` + Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` + Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` + Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` + + // x-kubernetes-preserve-unknown-fields stops the API server + // decoding step from pruning fields which are not specified + // in the validation schema. This affects fields recursively, + // but switches back to normal pruning behaviour if nested + // properties or additionalProperties are specified in the schema. + // This can either be true or undefined. False is forbidden. + XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` + + // x-kubernetes-embedded-resource defines that the value is an + // embedded Kubernetes runtime.Object, with TypeMeta and + // ObjectMeta. The type must be object. It is allowed to further + // restrict the embedded object. kind, apiVersion and metadata + // are validated automatically. x-kubernetes-preserve-unknown-fields + // is allowed to be true, but does not have to be if the object + // is fully specified (up to kind, apiVersion, metadata). + XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` + + // x-kubernetes-int-or-string specifies that this value is + // either an integer or a string. If this is true, an empty + // type is allowed and type as child of anyOf is permitted + // if following one of the following patterns: + // + // 1) anyOf: + // - type: integer + // - type: string + // 2) allOf: + // - anyOf: + // - type: integer + // - type: string + // - ... zero or more + XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // The properties specified must either be required or have a default value, + // to ensure those properties are present for all list items. + // + // +optional + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + // array with x-kubernetes-list-type `atomic`. + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` + + // x-kubernetes-map-type annotates an object to further describe its topology. + // This extension must only be used when type is object and may have 2 possible values: + // + // 1) `granular`: + // These maps are actual maps (key-value pairs) and each fields are independent + // from each other (they can each be manipulated by separate actors). This is + // the default behaviour for all maps. + // 2) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic maps will be entirely replaced when updated. + // +optional + XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` + + // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. + // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. + // +patchMergeKey=rule + // +patchStrategy=merge + // +listType=map + // +listMapKey=rule + XValidations ValidationRules `json:"x-kubernetes-validations,omitempty" patchStrategy:"merge" patchMergeKey:"rule" protobuf:"bytes,44,rep,name=xKubernetesValidations"` +} + +// ValidationRules describes a list of validation rules written in the CEL expression language. +type ValidationRules []ValidationRule + +// ValidationRule describes a validation rule written in the CEL expression language. +type ValidationRule struct { + // Rule represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. + // The `self` variable in the CEL expression is bound to the scoped value. + // Example: + // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} + // + // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable + // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as + // absent fields in CEL expressions. + // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map + // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map + // are accessible via CEL macros and functions such as `self.all(...)`. + // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and + // functions. + // If the Rule is scoped to a scalar, `self` is bound to the scalar value. + // Examples: + // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} + // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} + // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. + // + // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL + // expressions. This includes: + // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. + // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: + // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true + // - An array where the items schema is of an "unknown type" + // - An object where the additionalProperties schema is of an "unknown type" + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} + // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} + // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} + // + // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + // + Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"` + // Message represents the message displayed when validation fails. The message is required if the Rule contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,3,opt,name=messageExpression"` + // reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. + // The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. + // The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate". + // If not set, default to use "FieldValueInvalid". + // All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. + // +optional + Reason *FieldValueErrorReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // fieldPath represents the field path returned when the validation fails. + // It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. + // e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` + // If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` + // It does not support list numeric index. + // It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. + // Numeric index of array is not supported. + // For field name which contains special characters, use `['specialName']` to refer the field name. + // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` + // +optional + FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,5,opt,name=fieldPath"` + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty" protobuf:"bytes,6,opt,name=optionalOldSelf"` +} + +// JSON represents any valid JSON value. +// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +type JSON struct { + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSON) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSON) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaURL represents a schema url. +type JSONSchemaURL string + +// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps +// or an array of JSONSchemaProps. Mainly here for serialization purposes. +type JSONSchemaPropsOrArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. +// Defaults to true for the boolean property. +type JSONSchemaPropsOrBool struct { + Allows bool `protobuf:"varint,1,opt,name=allows"` + Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrBool) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDependencies represent a dependencies property. +type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray + +// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +type JSONSchemaPropsOrStringArray struct { + Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` + Property []string `protobuf:"bytes,2,rep,name=property"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string { + // TODO: return actual types when anyOf is supported + return nil +} + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaFormat() string { return "" } + +// JSONSchemaDefinitions contains the models explicitly defined in this spec. +type JSONSchemaDefinitions map[string]JSONSchemaProps + +// ExternalDocumentation allows referencing an external resource for extended documentation. +type ExternalDocumentation struct { + Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"` + URL string `json:"url,omitempty" protobuf:"bytes,2,opt,name=url"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..fa6e0ef24 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -0,0 +1,1374 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*CustomResourceColumnDefinition)(nil), (*apiextensions.CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(a.(*CustomResourceColumnDefinition), b.(*apiextensions.CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceColumnDefinition)(nil), (*CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(a.(*apiextensions.CustomResourceColumnDefinition), b.(*CustomResourceColumnDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinition)(nil), (*apiextensions.CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(a.(*CustomResourceDefinition), b.(*apiextensions.CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinition)(nil), (*CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(a.(*apiextensions.CustomResourceDefinition), b.(*CustomResourceDefinition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionCondition)(nil), (*apiextensions.CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(a.(*CustomResourceDefinitionCondition), b.(*apiextensions.CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionCondition)(nil), (*CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(a.(*apiextensions.CustomResourceDefinitionCondition), b.(*CustomResourceDefinitionCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionList)(nil), (*apiextensions.CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(a.(*CustomResourceDefinitionList), b.(*apiextensions.CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionList)(nil), (*CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(a.(*apiextensions.CustomResourceDefinitionList), b.(*CustomResourceDefinitionList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionNames)(nil), (*apiextensions.CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(a.(*CustomResourceDefinitionNames), b.(*apiextensions.CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionNames)(nil), (*CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(a.(*apiextensions.CustomResourceDefinitionNames), b.(*CustomResourceDefinitionNames), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionStatus)(nil), (*apiextensions.CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(a.(*CustomResourceDefinitionStatus), b.(*apiextensions.CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionStatus)(nil), (*CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(a.(*apiextensions.CustomResourceDefinitionStatus), b.(*CustomResourceDefinitionStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionVersion)(nil), (*apiextensions.CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(a.(*CustomResourceDefinitionVersion), b.(*apiextensions.CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionVersion)(nil), (*CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(a.(*apiextensions.CustomResourceDefinitionVersion), b.(*CustomResourceDefinitionVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceScale)(nil), (*apiextensions.CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(a.(*CustomResourceSubresourceScale), b.(*apiextensions.CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceScale)(nil), (*CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(a.(*apiextensions.CustomResourceSubresourceScale), b.(*CustomResourceSubresourceScale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceStatus)(nil), (*apiextensions.CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(a.(*CustomResourceSubresourceStatus), b.(*apiextensions.CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceStatus)(nil), (*CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(a.(*apiextensions.CustomResourceSubresourceStatus), b.(*CustomResourceSubresourceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceSubresources)(nil), (*apiextensions.CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(a.(*CustomResourceSubresources), b.(*apiextensions.CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresources)(nil), (*CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(a.(*apiextensions.CustomResourceSubresources), b.(*CustomResourceSubresources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CustomResourceValidation)(nil), (*apiextensions.CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(a.(*CustomResourceValidation), b.(*apiextensions.CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceValidation)(nil), (*CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(a.(*apiextensions.CustomResourceValidation), b.(*CustomResourceValidation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExternalDocumentation)(nil), (*apiextensions.ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(a.(*ExternalDocumentation), b.(*apiextensions.ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ExternalDocumentation)(nil), (*ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(a.(*apiextensions.ExternalDocumentation), b.(*ExternalDocumentation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaProps)(nil), (*apiextensions.JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(a.(*JSONSchemaProps), b.(*apiextensions.JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrArray)(nil), (*apiextensions.JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(a.(*JSONSchemaPropsOrArray), b.(*apiextensions.JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrArray)(nil), (*JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(a.(*apiextensions.JSONSchemaPropsOrArray), b.(*JSONSchemaPropsOrArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrBool)(nil), (*apiextensions.JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(a.(*JSONSchemaPropsOrBool), b.(*apiextensions.JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrBool)(nil), (*JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(a.(*apiextensions.JSONSchemaPropsOrBool), b.(*JSONSchemaPropsOrBool), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrStringArray)(nil), (*apiextensions.JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(a.(*JSONSchemaPropsOrStringArray), b.(*apiextensions.JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrStringArray)(nil), (*JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(a.(*apiextensions.JSONSchemaPropsOrStringArray), b.(*JSONSchemaPropsOrStringArray), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(a.(*apiextensions.ServiceReference), b.(*ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ValidationRule)(nil), (*apiextensions.ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(a.(*ValidationRule), b.(*apiextensions.ValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ValidationRule)(nil), (*ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(a.(*apiextensions.ValidationRule), b.(*ValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookClientConfig)(nil), (*apiextensions.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(a.(*WebhookClientConfig), b.(*apiextensions.WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.WebhookClientConfig)(nil), (*WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(a.(*apiextensions.WebhookClientConfig), b.(*WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_JSON_To_v1beta1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(apiextensions.WebhookClientConfig) + if err := Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(*in, *out, s); err != nil { + return err + } + } else { + out.WebhookClientConfig = nil + } + out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) + return nil +} + +// Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + out.Strategy = ConversionStrategyType(in.Strategy) + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + if err := Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(*in, *out, s); err != nil { + return err + } + } else { + out.WebhookClientConfig = nil + } + out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) + return nil +} + +// Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = apiextensions.CustomResourceDefinitionConditionType(in.Type) + out.Status = apiextensions.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + out.Type = CustomResourceDefinitionConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apiextensions.CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + out.Plural = in.Plural + out.Singular = in.Singular + out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) + out.Kind = in.Kind + out.ListKind = in.ListKind + out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + out.Version = in.Version + if err := Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = apiextensions.ResourceScope(in.Scope) + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(apiextensions.CustomResourceValidation) + if err := Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Validation = nil + } + out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]apiextensions.CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(apiextensions.CustomResourceConversion) + if err := Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + out.Group = in.Group + out.Version = in.Version + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { + return err + } + out.Scope = ResourceScope(in.Scope) + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(CustomResourceValidation) + if err := Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Validation = nil + } + out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + if err := Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(*in, *out, s); err != nil { + return err + } + } else { + out.Conversion = nil + } + out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]apiextensions.CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + out.Conditions = *(*[]CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) + if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { + return err + } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + out.Deprecated = in.Deprecated + out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.CustomResourceValidation) + if err := Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + out.Deprecated = in.Deprecated + out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + if err := Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + out.SpecReplicasPath = in.SpecReplicasPath + out.StatusReplicasPath = in.StatusReplicasPath + out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return nil +} + +// Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*apiextensions.CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*apiextensions.CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + out.Status = (*CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) + out.Scale = (*CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) + return nil +} + +// Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in, out, s) +} + +func autoConvert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.OpenAPIV3Schema = nil + } + return nil +} + +// Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in, out, s) +} + +func autoConvert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation is an autogenerated conversion function. +func Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { + return autoConvert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in, out, s) +} + +func autoConvert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + out.Description = in.Description + out.URL = in.URL + return nil +} + +// Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation is an autogenerated conversion function. +func Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { + return autoConvert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in, out, s) +} + +func autoConvert_v1beta1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { + // WARNING: in.Raw requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_apiextensions_JSON_To_v1beta1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { + // FIXME: Type apiextensions.JSON is unsupported. + return nil +} + +func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = apiextensions.JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(apiextensions.JSON) + if err := Convert_v1beta1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]apiextensions.JSON, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSON_To_apiextensions_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(apiextensions.JSONSchemaPropsOrArray) + if err := Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(apiextensions.JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaPropsOrStringArray) + if err := Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(apiextensions.JSONSchemaPropsOrBool) + if err := Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(apiextensions.JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*apiextensions.ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(apiextensions.JSON) + if err := Convert_v1beta1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.Nullable = in.Nullable + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) + out.XValidations = *(*apiextensions.ValidationRules)(unsafe.Pointer(&in.XValidations)) + return nil +} + +// Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps is an autogenerated conversion function. +func Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { + return autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { + out.ID = in.ID + out.Schema = JSONSchemaURL(in.Schema) + out.Ref = (*string)(unsafe.Pointer(in.Ref)) + out.Description = in.Description + out.Type = in.Type + out.Nullable = in.Nullable + out.Format = in.Format + out.Title = in.Title + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1beta1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Default = nil + } + out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) + out.ExclusiveMaximum = in.ExclusiveMaximum + out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) + out.ExclusiveMinimum = in.ExclusiveMinimum + out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) + out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) + out.Pattern = in.Pattern + out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) + out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) + out.UniqueItems = in.UniqueItems + out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) + if in.Enum != nil { + in, out := &in.Enum, &out.Enum + *out = make([]JSON, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSON_To_v1beta1_JSON(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Enum = nil + } + out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) + out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) + out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = new(JSONSchemaPropsOrArray) + if err := Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(*in, *out, s); err != nil { + return err + } + } else { + out.Items = nil + } + if in.AllOf != nil { + in, out := &in.AllOf, &out.AllOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AllOf = nil + } + if in.OneOf != nil { + in, out := &in.OneOf, &out.OneOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.OneOf = nil + } + if in.AnyOf != nil { + in, out := &in.AnyOf, &out.AnyOf + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.AnyOf = nil + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Not = nil + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Properties = nil + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalProperties = nil + } + if in.PatternProperties != nil { + in, out := &in.PatternProperties, &out.PatternProperties + *out = make(map[string]JSONSchemaProps, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.PatternProperties = nil + } + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaPropsOrStringArray) + if err := Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Dependencies = nil + } + if in.AdditionalItems != nil { + in, out := &in.AdditionalItems, &out.AdditionalItems + *out = new(JSONSchemaPropsOrBool) + if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { + return err + } + } else { + out.AdditionalItems = nil + } + if in.Definitions != nil { + in, out := &in.Definitions, &out.Definitions + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + newVal := new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Definitions = nil + } + out.ExternalDocs = (*ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = new(JSON) + if err := Convert_apiextensions_JSON_To_v1beta1_JSON(*in, *out, s); err != nil { + return err + } + } else { + out.Example = nil + } + out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) + out.XEmbeddedResource = in.XEmbeddedResource + out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) + out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) + out.XValidations = *(*ValidationRules)(unsafe.Pointer(&in.XValidations)) + return nil +} + +func autoConvert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]apiextensions.JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONSchemas = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in, out, s) +} + +func autoConvert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + out.Allows = in.Allows + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in, out, s) +} + +func autoConvert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.JSONSchemaProps) + if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(JSONSchemaProps) + if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) + return nil +} + +// Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray is an autogenerated conversion function. +func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { + return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in, out, s) +} + +func autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference is an autogenerated conversion function. +func Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + return autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in, out, s) +} + +func autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function. +func Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + return autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in, out, s) +} + +func autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { + out.Rule = in.Rule + out.Message = in.Message + out.MessageExpression = in.MessageExpression + out.Reason = (*apiextensions.FieldValueErrorReason)(unsafe.Pointer(in.Reason)) + out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) + return nil +} + +// Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule is an autogenerated conversion function. +func Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { + return autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in, out, s) +} + +func autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { + out.Rule = in.Rule + out.Message = in.Message + out.MessageExpression = in.MessageExpression + out.Reason = (*FieldValueErrorReason)(unsafe.Pointer(in.Reason)) + out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) + return nil +} + +// Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule is an autogenerated conversion function. +func Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { + return autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in, out, s) +} + +func autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiextensions.ServiceReference) + if err := Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig is an autogenerated conversion function. +func Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in, out, s) +} + +func autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig is an autogenerated conversion function. +func Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + return autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in, out, s) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..bb8ab06cb --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,716 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionRequest) DeepCopyInto(out *ConversionRequest) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionRequest. +func (in *ConversionRequest) DeepCopy() *ConversionRequest { + if in == nil { + return nil + } + out := new(ConversionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionResponse) DeepCopyInto(out *ConversionResponse) { + *out = *in + if in.ConvertedObjects != nil { + in, out := &in.ConvertedObjects, &out.ConvertedObjects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Result.DeepCopyInto(&out.Result) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionResponse. +func (in *ConversionResponse) DeepCopy() *ConversionResponse { + if in == nil { + return nil + } + out := new(ConversionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionReview) DeepCopyInto(out *ConversionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(ConversionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(ConversionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionReview. +func (in *ConversionReview) DeepCopy() *ConversionReview { + if in == nil { + return nil + } + out := new(ConversionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConversionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. +func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { + if in == nil { + return nil + } + out := new(CustomResourceColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { + *out = *in + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + (*in).DeepCopyInto(*out) + } + if in.ConversionReviewVersions != nil { + in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. +func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { + if in == nil { + return nil + } + out := new(CustomResourceConversion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. +func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { + if in == nil { + return nil + } + out := new(CustomResourceDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. +func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. +func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { + *out = *in + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. +func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { + *out = *in + in.Names.DeepCopyInto(&out.Names) + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + (*in).DeepCopyInto(*out) + } + if in.PreserveUnknownFields != nil { + in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. +func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CustomResourceDefinitionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) + if in.StoredVersions != nil { + in, out := &in.StoredVersions, &out.StoredVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. +func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { + *out = *in + if in.DeprecationWarning != nil { + in, out := &in.DeprecationWarning, &out.DeprecationWarning + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. +func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { + *out = *in + if in.LabelSelectorPath != nil { + in, out := &in.LabelSelectorPath, &out.LabelSelectorPath + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. +func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceScale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. +func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CustomResourceSubresourceStatus) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(CustomResourceSubresourceScale) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. +func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { + if in == nil { + return nil + } + out := new(CustomResourceSubresources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { + *out = *in + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. +func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { + if in == nil { + return nil + } + out := new(CustomResourceValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. +func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { + if in == nil { + return nil + } + out := new(ExternalDocumentation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSON) DeepCopyInto(out *JSON) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSON. +func (in *JSON) DeepCopy() *JSON { + if in == nil { + return nil + } + out := new(JSON) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { + { + in := &in + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. +func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { + if in == nil { + return nil + } + out := new(JSONSchemaDefinitions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { + { + in := &in + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. +func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { + if in == nil { + return nil + } + out := new(JSONSchemaDependencies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. +func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. +func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrBool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. +func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrStringArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(FieldValueErrorReason) + **out = **in + } + if in.OptionalOldSelf != nil { + in, out := &in.OptionalOldSelf, &out.OptionalOldSelf + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRule. +func (in *ValidationRule) DeepCopy() *ValidationRule { + if in == nil { + return nil + } + out := new(ValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ValidationRules) DeepCopyInto(out *ValidationRules) { + { + in := &in + *out = make(ValidationRules, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRules. +func (in ValidationRules) DeepCopy() ValidationRules { + if in == nil { + return nil + } + out := new(ValidationRules) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..225c6ff51 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go @@ -0,0 +1,56 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetObjectDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) + scheme.AddTypeDefaultingFunc(&CustomResourceDefinitionList{}, func(obj interface{}) { + SetObjectDefaults_CustomResourceDefinitionList(obj.(*CustomResourceDefinitionList)) + }) + return nil +} + +func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { + SetDefaults_CustomResourceDefinition(in) + SetDefaults_CustomResourceDefinitionSpec(&in.Spec) + if in.Spec.Conversion != nil { + if in.Spec.Conversion.WebhookClientConfig != nil { + if in.Spec.Conversion.WebhookClientConfig.Service != nil { + SetDefaults_ServiceReference(in.Spec.Conversion.WebhookClientConfig.Service) + } + } + } +} + +func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CustomResourceDefinition(a) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..9c22ae5c1 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,98 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConversionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ConversionReview) APILifecycleDeprecated() (major, minor int) { + return 1, 19 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ConversionReview) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "ConversionReview"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ConversionReview) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CustomResourceDefinition) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *CustomResourceDefinition) APILifecycleDeprecated() (major, minor int) { + return 1, 16 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *CustomResourceDefinition) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *CustomResourceDefinition) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CustomResourceDefinitionList) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *CustomResourceDefinitionList) APILifecycleDeprecated() (major, minor int) { + return 1, 16 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *CustomResourceDefinitionList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinitionList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *CustomResourceDefinitionList) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go new file mode 100644 index 000000000..b5e5c35c5 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -0,0 +1,608 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package apiextensions + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. +func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { + if in == nil { + return nil + } + out := new(CustomResourceColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { + *out = *in + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + (*in).DeepCopyInto(*out) + } + if in.ConversionReviewVersions != nil { + in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. +func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { + if in == nil { + return nil + } + out := new(CustomResourceConversion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. +func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { + if in == nil { + return nil + } + out := new(CustomResourceDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. +func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomResourceDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. +func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { + *out = *in + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. +func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionNames) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { + *out = *in + in.Names.DeepCopyInto(&out.Names) + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + (*in).DeepCopyInto(*out) + } + if in.PreserveUnknownFields != nil { + in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. +func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CustomResourceDefinitionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) + if in.StoredVersions != nil { + in, out := &in.StoredVersions, &out.StoredVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. +func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { + *out = *in + if in.DeprecationWarning != nil { + in, out := &in.DeprecationWarning, &out.DeprecationWarning + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. +func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { + *out = *in + if in.LabelSelectorPath != nil { + in, out := &in.LabelSelectorPath, &out.LabelSelectorPath + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. +func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceScale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. +func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { + if in == nil { + return nil + } + out := new(CustomResourceSubresourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CustomResourceSubresourceStatus) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(CustomResourceSubresourceScale) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. +func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { + if in == nil { + return nil + } + out := new(CustomResourceSubresources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { + *out = *in + if in.OpenAPIV3Schema != nil { + in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. +func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { + if in == nil { + return nil + } + out := new(CustomResourceValidation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. +func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { + if in == nil { + return nil + } + out := new(ExternalDocumentation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { + { + in := &in + *out = make(JSONSchemaDefinitions, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. +func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { + if in == nil { + return nil + } + out := new(JSONSchemaDefinitions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { + { + in := &in + *out = make(JSONSchemaDependencies, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. +func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { + if in == nil { + return nil + } + out := new(JSONSchemaDependencies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.JSONSchemas != nil { + in, out := &in.JSONSchemas, &out.JSONSchemas + *out = make([]JSONSchemaProps, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. +func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. +func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrBool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = (*in).DeepCopy() + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. +func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { + if in == nil { + return nil + } + out := new(JSONSchemaPropsOrStringArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(FieldValueErrorReason) + **out = **in + } + if in.OptionalOldSelf != nil { + in, out := &in.OptionalOldSelf, &out.OptionalOldSelf + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRule. +func (in *ValidationRule) DeepCopy() *ValidationRule { + if in == nil { + return nil + } + out := new(ValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ValidationRules) DeepCopyInto(out *ValidationRules) { + { + in := &in + *out = make(ValidationRules, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRules. +func (in ValidationRules) DeepCopy() ValidationRules { + if in == nil { + return nil + } + out := new(ValidationRules) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go b/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go new file mode 100644 index 000000000..ffb9f56d8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import ( + "fmt" + "strings" +) + +// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store) +var NameMayNotBe = []string{".", ".."} + +// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store) +var NameMayNotContain = []string{"/", "%"} + +// IsValidPathSegmentName validates the name can be safely encoded as a path segment +func IsValidPathSegmentName(name string) []string { + for _, illegalName := range NameMayNotBe { + if name == illegalName { + return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} + } + } + + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment +// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid +func IsValidPathSegmentPrefix(name string) []string { + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// ValidatePathSegmentName validates the name can be safely encoded as a path segment +func ValidatePathSegmentName(name string, prefix bool) []string { + if prefix { + return IsValidPathSegmentPrefix(name) + } + + return IsValidPathSegmentName(name) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go new file mode 100644 index 000000000..cff91d3da --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructuredscheme + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" +) + +var scheme = runtime.NewScheme() + +// NewUnstructuredNegotiatedSerializer returns a simple, negotiated serializer +func NewUnstructuredNegotiatedSerializer() runtime.NegotiatedSerializer { + return unstructuredNegotiatedSerializer{ + scheme: scheme, + typer: NewUnstructuredObjectTyper(), + creator: NewUnstructuredCreator(), + } +} + +type unstructuredNegotiatedSerializer struct { + scheme *runtime.Scheme + typer runtime.ObjectTyper + creator runtime.ObjectCreater +} + +func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { + return []runtime.SerializerInfo{ + { + MediaType: "application/json", + MediaTypeType: "application", + MediaTypeSubType: "json", + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false), + PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, true), + StreamSerializer: &runtime.StreamSerializerInfo{ + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false), + Framer: json.Framer, + }, + }, + { + MediaType: "application/yaml", + MediaTypeType: "application", + MediaTypeSubType: "yaml", + EncodesAsText: true, + Serializer: json.NewYAMLSerializer(json.DefaultMetaFactory, s.creator, s.typer), + }, + } +} + +func (s unstructuredNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return versioning.NewDefaultingCodecForScheme(s.scheme, encoder, nil, gv, nil) +} + +func (s unstructuredNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return versioning.NewDefaultingCodecForScheme(s.scheme, nil, decoder, nil, gv) +} + +type unstructuredObjectTyper struct { +} + +// NewUnstructuredObjectTyper returns an object typer that can deal with unstructured things +func NewUnstructuredObjectTyper() runtime.ObjectTyper { + return unstructuredObjectTyper{} +} + +func (t unstructuredObjectTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + // Delegate for things other than Unstructured. + if _, ok := obj.(runtime.Unstructured); !ok { + return nil, false, fmt.Errorf("cannot type %T", obj) + } + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, runtime.NewMissingKindErr("object has no kind field ") + } + if len(gvk.Version) == 0 { + return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field") + } + + return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil +} + +func (t unstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + return true +} + +type unstructuredCreator struct{} + +// NewUnstructuredCreator returns a simple object creator that always returns an unstructured +func NewUnstructuredCreator() runtime.ObjectCreater { + return unstructuredCreator{} +} + +func (c unstructuredCreator) New(kind schema.GroupVersionKind) (runtime.Object, error) { + ret := &unstructured.Unstructured{} + ret.SetGroupVersionKind(kind) + return ret, nil +} + +type unstructuredDefaulter struct { +} + +// NewUnstructuredDefaulter returns defaulter suitable for unstructured types that doesn't default anything +func NewUnstructuredDefaulter() runtime.ObjectDefaulter { + return unstructuredDefaulter{} +} + +func (d unstructuredDefaulter) Default(in runtime.Object) { +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go new file mode 100644 index 000000000..1396274c7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -0,0 +1,202 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "container/heap" + "sync" + "time" + + "k8s.io/utils/clock" +) + +// NewExpiring returns an initialized expiring cache. +func NewExpiring() *Expiring { + return NewExpiringWithClock(clock.RealClock{}) +} + +// NewExpiringWithClock is like NewExpiring but allows passing in a custom +// clock for testing. +func NewExpiringWithClock(clock clock.Clock) *Expiring { + return &Expiring{ + clock: clock, + cache: make(map[interface{}]entry), + } +} + +// Expiring is a map whose entries expire after a per-entry timeout. +type Expiring struct { + // AllowExpiredGet causes the expiration check to be skipped on Get. + // It should only be used when a key always corresponds to the exact same value. + // Thus when this field is true, expired keys are considered valid + // until the next call to Set (which causes the GC to run). + // It may not be changed concurrently with calls to Get. + AllowExpiredGet bool + + clock clock.Clock + + // mu protects the below fields + mu sync.RWMutex + // cache is the internal map that backs the cache. + cache map[interface{}]entry + // generation is used as a cheap resource version for cache entries. Cleanups + // are scheduled with a key and generation. When the cleanup runs, it first + // compares its generation with the current generation of the entry. It + // deletes the entry iff the generation matches. This prevents cleanups + // scheduled for earlier versions of an entry from deleting later versions of + // an entry when Set() is called multiple times with the same key. + // + // The integer value of the generation of an entry is meaningless. + generation uint64 + + heap expiringHeap +} + +type entry struct { + val interface{} + expiry time.Time + generation uint64 +} + +// Get looks up an entry in the cache. +func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { + c.mu.RLock() + defer c.mu.RUnlock() + e, ok := c.cache[key] + if !ok { + return nil, false + } + if !c.AllowExpiredGet && !c.clock.Now().Before(e.expiry) { + return nil, false + } + return e.val, true +} + +// Set sets a key/value/expiry entry in the map, overwriting any previous entry +// with the same key. The entry expires at the given expiry time, but its TTL +// may be lengthened or shortened by additional calls to Set(). Garbage +// collection of expired entries occurs during calls to Set(), however calls to +// Get() will not return expired entries that have not yet been garbage +// collected. +func (c *Expiring) Set(key interface{}, val interface{}, ttl time.Duration) { + now := c.clock.Now() + expiry := now.Add(ttl) + + c.mu.Lock() + defer c.mu.Unlock() + + c.generation++ + + c.cache[key] = entry{ + val: val, + expiry: expiry, + generation: c.generation, + } + + // Run GC inline before pushing the new entry. + c.gc(now) + + heap.Push(&c.heap, &expiringHeapEntry{ + key: key, + expiry: expiry, + generation: c.generation, + }) +} + +// Delete deletes an entry in the map. +func (c *Expiring) Delete(key interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.del(key, 0) +} + +// del deletes the entry for the given key. The generation argument is the +// generation of the entry that should be deleted. If the generation has been +// changed (e.g. if a set has occurred on an existing element but the old +// cleanup still runs), this is a noop. If the generation argument is 0, the +// entry's generation is ignored and the entry is deleted. +// +// del must be called under the write lock. +func (c *Expiring) del(key interface{}, generation uint64) { + e, ok := c.cache[key] + if !ok { + return + } + if generation != 0 && generation != e.generation { + return + } + delete(c.cache, key) +} + +// Len returns the number of items in the cache. +func (c *Expiring) Len() int { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.cache) +} + +func (c *Expiring) gc(now time.Time) { + for { + // Return from gc if the heap is empty or the next element is not yet + // expired. + // + // heap[0] is a peek at the next element in the heap, which is not obvious + // from looking at the (*expiringHeap).Pop() implementation below. + // heap.Pop() swaps the first entry with the last entry of the heap, then + // calls (*expiringHeap).Pop() which returns the last element. + if len(c.heap) == 0 || now.Before(c.heap[0].expiry) { + return + } + cleanup := heap.Pop(&c.heap).(*expiringHeapEntry) + c.del(cleanup.key, cleanup.generation) + } +} + +type expiringHeapEntry struct { + key interface{} + expiry time.Time + generation uint64 +} + +// expiringHeap is a min-heap ordered by expiration time of its entries. The +// expiring cache uses this as a priority queue to efficiently organize entries +// which will be garbage collected once they expire. +type expiringHeap []*expiringHeapEntry + +var _ heap.Interface = &expiringHeap{} + +func (cq expiringHeap) Len() int { + return len(cq) +} + +func (cq expiringHeap) Less(i, j int) bool { + return cq[i].expiry.Before(cq[j].expiry) +} + +func (cq expiringHeap) Swap(i, j int) { + cq[i], cq[j] = cq[j], cq[i] +} + +func (cq *expiringHeap) Push(c interface{}) { + *cq = append(*cq, c.(*expiringHeapEntry)) +} + +func (cq *expiringHeap) Pop() interface{} { + c := (*cq)[cq.Len()-1] + *cq = (*cq)[:cq.Len()-1] + return c +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go new file mode 100644 index 000000000..ad486d580 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -0,0 +1,173 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "container/list" + "sync" + "time" +) + +// Clock defines an interface for obtaining the current time +type Clock interface { + Now() time.Time +} + +// realClock implements the Clock interface by calling time.Now() +type realClock struct{} + +func (realClock) Now() time.Time { return time.Now() } + +// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with +// a ttl beyond which keys are forcibly expired. +type LRUExpireCache struct { + // clock is used to obtain the current time + clock Clock + + lock sync.Mutex + + maxSize int + evictionList list.List + entries map[interface{}]*list.Element +} + +// NewLRUExpireCache creates an expiring cache with the given size +func NewLRUExpireCache(maxSize int) *LRUExpireCache { + return NewLRUExpireCacheWithClock(maxSize, realClock{}) +} + +// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time. +func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache { + if maxSize <= 0 { + panic("maxSize must be > 0") + } + + return &LRUExpireCache{ + clock: clock, + maxSize: maxSize, + entries: map[interface{}]*list.Element{}, + } +} + +type cacheEntry struct { + key interface{} + value interface{} + expireTime time.Time +} + +// Add adds the value to the cache at key with the specified maximum duration. +func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) { + c.lock.Lock() + defer c.lock.Unlock() + + // Key already exists + oldElement, ok := c.entries[key] + if ok { + c.evictionList.MoveToFront(oldElement) + oldElement.Value.(*cacheEntry).value = value + oldElement.Value.(*cacheEntry).expireTime = c.clock.Now().Add(ttl) + return + } + + // Make space if necessary + if c.evictionList.Len() >= c.maxSize { + toEvict := c.evictionList.Back() + c.evictionList.Remove(toEvict) + delete(c.entries, toEvict.Value.(*cacheEntry).key) + } + + // Add new entry + entry := &cacheEntry{ + key: key, + value: value, + expireTime: c.clock.Now().Add(ttl), + } + element := c.evictionList.PushFront(entry) + c.entries[key] = element +} + +// Get returns the value at the specified key from the cache if it exists and is not +// expired, or returns false. +func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + element, ok := c.entries[key] + if !ok { + return nil, false + } + + if c.clock.Now().After(element.Value.(*cacheEntry).expireTime) { + c.evictionList.Remove(element) + delete(c.entries, key) + return nil, false + } + + c.evictionList.MoveToFront(element) + + return element.Value.(*cacheEntry).value, true +} + +// Remove removes the specified key from the cache if it exists +func (c *LRUExpireCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + element, ok := c.entries[key] + if !ok { + return + } + + c.evictionList.Remove(element) + delete(c.entries, key) +} + +// RemoveAll removes all keys that match predicate. +func (c *LRUExpireCache) RemoveAll(predicate func(key any) bool) { + c.lock.Lock() + defer c.lock.Unlock() + + for key, element := range c.entries { + if predicate(key) { + c.evictionList.Remove(element) + delete(c.entries, key) + } + } +} + +// Keys returns all unexpired keys in the cache. +// +// Keep in mind that subsequent calls to Get() for any of the returned keys +// might return "not found". +// +// Keys are returned ordered from least recently used to most recently used. +func (c *LRUExpireCache) Keys() []interface{} { + c.lock.Lock() + defer c.lock.Unlock() + + now := c.clock.Now() + + val := make([]interface{}, 0, c.evictionList.Len()) + for element := c.evictionList.Back(); element != nil; element = element.Prev() { + // Only return unexpired keys + if !now.After(element.Value.(*cacheEntry).expireTime) { + val = append(val, element.Value.(*cacheEntry).key) + } + } + + return val +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go new file mode 100644 index 000000000..fc0301844 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -0,0 +1,138 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diff + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "text/tabwriter" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/dump" +) + +func legacyDiff(a, b interface{}) string { + return cmp.Diff(a, b) +} + +// StringDiff diffs a and b and returns a human readable diff. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func StringDiff(a, b string) string { + return legacyDiff(a, b) +} + +// ObjectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectGoPrintDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectReflectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectReflectDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectGoPrintSideBySide prints a and b as textual dumps side by side, +// enabling easy visual scanning for mismatches. +func ObjectGoPrintSideBySide(a, b interface{}) string { + sA := dump.Pretty(a) + sB := dump.Pretty(b) + + linesA := strings.Split(sA, "\n") + linesB := strings.Split(sB, "\n") + width := 0 + for _, s := range linesA { + l := len(s) + if l > width { + width = l + } + } + for _, s := range linesB { + l := len(s) + if l > width { + width = l + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0) + max := len(linesA) + if len(linesB) > max { + max = len(linesB) + } + for i := 0; i < max; i++ { + var a, b string + if i < len(linesA) { + a = linesA[i] + } + if i < len(linesB) { + b = linesB[i] + } + fmt.Fprintf(w, "%s\t%s\n", a, b) + } + w.Flush() + return buf.String() +} + +// IgnoreUnset is an option that ignores fields that are unset on the right +// hand side of a comparison. This is useful in testing to assert that an +// object is a derivative. +func IgnoreUnset() cmp.Option { + return cmp.Options{ + // ignore unset fields in v2 + cmp.FilterPath(func(path cmp.Path) bool { + _, v2 := path.Last().Values() + switch v2.Kind() { + case reflect.Slice, reflect.Map: + if v2.IsNil() || v2.Len() == 0 { + return true + } + case reflect.String: + if v2.Len() == 0 { + return true + } + case reflect.Interface, reflect.Pointer: + if v2.IsNil() { + return true + } + } + return false + }, cmp.Ignore()), + // ignore map entries that aren't set in v2 + cmp.FilterPath(func(path cmp.Path) bool { + switch i := path.Last().(type) { + case cmp.MapIndex: + if _, v2 := i.Values(); !v2.IsValid() { + fmt.Println("E") + return true + } + } + return false + }, cmp.Ignore()), + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go b/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go new file mode 100644 index 000000000..a20136a4e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package duration + +import ( + "fmt" + "time" +) + +// ShortHumanDuration returns a succinct representation of the provided duration +// with limited precision for consumption by humans. +func ShortHumanDuration(d time.Duration) string { + // Allow deviation no more than 2 seconds(excluded) to tolerate machine time + // inconsistence, it can be considered as almost now. + if seconds := int(d.Seconds()); seconds < -1 { + return "" + } else if seconds < 0 { + return "0s" + } else if seconds < 60 { + return fmt.Sprintf("%ds", seconds) + } else if minutes := int(d.Minutes()); minutes < 60 { + return fmt.Sprintf("%dm", minutes) + } else if hours := int(d.Hours()); hours < 24 { + return fmt.Sprintf("%dh", hours) + } else if hours < 24*365 { + return fmt.Sprintf("%dd", hours/24) + } + return fmt.Sprintf("%dy", int(d.Hours()/24/365)) +} + +// HumanDuration returns a succinct representation of the provided duration +// with limited precision for consumption by humans. It provides ~2-3 significant +// figures of duration. +func HumanDuration(d time.Duration) string { + // Allow deviation no more than 2 seconds(excluded) to tolerate machine time + // inconsistence, it can be considered as almost now. + if seconds := int(d.Seconds()); seconds < -1 { + return "" + } else if seconds < 0 { + return "0s" + } else if seconds < 60*2 { + return fmt.Sprintf("%ds", seconds) + } + minutes := int(d / time.Minute) + if minutes < 10 { + s := int(d/time.Second) % 60 + if s == 0 { + return fmt.Sprintf("%dm", minutes) + } + return fmt.Sprintf("%dm%ds", minutes, s) + } else if minutes < 60*3 { + return fmt.Sprintf("%dm", minutes) + } + hours := int(d / time.Hour) + if hours < 8 { + m := int(d/time.Minute) % 60 + if m == 0 { + return fmt.Sprintf("%dh", hours) + } + return fmt.Sprintf("%dh%dm", hours, m) + } else if hours < 48 { + return fmt.Sprintf("%dh", hours) + } else if hours < 24*8 { + h := hours % 24 + if h == 0 { + return fmt.Sprintf("%dd", hours/24) + } + return fmt.Sprintf("%dd%dh", hours/24, h) + } else if hours < 24*365*2 { + return fmt.Sprintf("%dd", hours/24) + } else if hours < 24*365*8 { + dy := int(hours/24) % 365 + if dy == 0 { + return fmt.Sprintf("%dy", hours/24/365) + } + return fmt.Sprintf("%dy%dd", hours/24/365, dy) + } + return fmt.Sprintf("%dy", int(hours/24/365)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go new file mode 100644 index 000000000..5b2b22b6d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version provides utilities for version number comparisons +package version // import "k8s.io/apimachinery/pkg/util/version" diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go new file mode 100644 index 000000000..2292ba137 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -0,0 +1,372 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Version is an opaque representation of a version number +type Version struct { + components []uint + semver bool + preRelease string + buildMetadata string +} + +var ( + // versionMatchRE splits a version string into numeric and "extra" parts + versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`) + // extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release + extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`) +) + +func parse(str string, semver bool) (*Version, error) { + parts := versionMatchRE.FindStringSubmatch(str) + if parts == nil { + return nil, fmt.Errorf("could not parse %q as version", str) + } + numbers, extra := parts[1], parts[2] + + components := strings.Split(numbers, ".") + if (semver && len(components) != 3) || (!semver && len(components) < 2) { + return nil, fmt.Errorf("illegal version string %q", str) + } + + v := &Version{ + components: make([]uint, len(components)), + semver: semver, + } + for i, comp := range components { + if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + num, err := strconv.ParseUint(comp, 10, 0) + if err != nil { + return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err) + } + v.components[i] = uint(num) + } + + if semver && extra != "" { + extraParts := extraMatchRE.FindStringSubmatch(extra) + if extraParts == nil { + return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str) + } + v.preRelease, v.buildMetadata = extraParts[1], extraParts[2] + + for _, comp := range strings.Split(v.preRelease, ".") { + if _, err := strconv.ParseUint(comp, 10, 0); err == nil { + if strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + } + } + } + + return v, nil +} + +// HighestSupportedVersion returns the highest supported version +// This function assumes that the highest supported version must be v1.x. +func HighestSupportedVersion(versions []string) (*Version, error) { + if len(versions) == 0 { + return nil, errors.New("empty array for supported versions") + } + + var ( + highestSupportedVersion *Version + theErr error + ) + + for i := len(versions) - 1; i >= 0; i-- { + currentHighestVer, err := ParseGeneric(versions[i]) + if err != nil { + theErr = err + continue + } + + if currentHighestVer.Major() > 1 { + continue + } + + if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { + highestSupportedVersion = currentHighestVer + } + } + + if highestSupportedVersion == nil { + return nil, fmt.Errorf( + "could not find a highest supported version from versions (%v) reported: %+v", + versions, theErr) + } + + if highestSupportedVersion.Major() != 1 { + return nil, fmt.Errorf("highest supported version reported is %v, must be v1.x", highestSupportedVersion) + } + + return highestSupportedVersion, nil +} + +// ParseGeneric parses a "generic" version string. The version string must consist of two +// or more dot-separated numeric fields (the first of which can't have leading zeroes), +// followed by arbitrary uninterpreted data (which need not be separated from the final +// numeric field by punctuation). For convenience, leading and trailing whitespace is +// ignored, and the version can be preceded by the letter "v". See also ParseSemantic. +func ParseGeneric(str string) (*Version, error) { + return parse(str, false) +} + +// MustParseGeneric is like ParseGeneric except that it panics on error +func MustParseGeneric(str string) *Version { + v, err := ParseGeneric(str) + if err != nil { + panic(err) + } + return v +} + +// ParseSemantic parses a version string that exactly obeys the syntax and semantics of +// the "Semantic Versioning" specification (http://semver.org/) (although it ignores +// leading and trailing whitespace, and allows the version to be preceded by "v"). For +// version strings that are not guaranteed to obey the Semantic Versioning syntax, use +// ParseGeneric. +func ParseSemantic(str string) (*Version, error) { + return parse(str, true) +} + +// MustParseSemantic is like ParseSemantic except that it panics on error +func MustParseSemantic(str string) *Version { + v, err := ParseSemantic(str) + if err != nil { + panic(err) + } + return v +} + +// MajorMinor returns a version with the provided major and minor version. +func MajorMinor(major, minor uint) *Version { + return &Version{components: []uint{major, minor}} +} + +// Major returns the major release number +func (v *Version) Major() uint { + return v.components[0] +} + +// Minor returns the minor release number +func (v *Version) Minor() uint { + return v.components[1] +} + +// Patch returns the patch release number if v is a Semantic Version, or 0 +func (v *Version) Patch() uint { + if len(v.components) < 3 { + return 0 + } + return v.components[2] +} + +// BuildMetadata returns the build metadata, if v is a Semantic Version, or "" +func (v *Version) BuildMetadata() string { + return v.buildMetadata +} + +// PreRelease returns the prerelease metadata, if v is a Semantic Version, or "" +func (v *Version) PreRelease() string { + return v.preRelease +} + +// Components returns the version number components +func (v *Version) Components() []uint { + return v.components +} + +// WithMajor returns copy of the version object with requested major number +func (v *Version) WithMajor(major uint) *Version { + result := *v + result.components = []uint{major, v.Minor(), v.Patch()} + return &result +} + +// WithMinor returns copy of the version object with requested minor number +func (v *Version) WithMinor(minor uint) *Version { + result := *v + result.components = []uint{v.Major(), minor, v.Patch()} + return &result +} + +// WithPatch returns copy of the version object with requested patch number +func (v *Version) WithPatch(patch uint) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), patch} + return &result +} + +// WithPreRelease returns copy of the version object with requested prerelease +func (v *Version) WithPreRelease(preRelease string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.preRelease = preRelease + return &result +} + +// WithBuildMetadata returns copy of the version object with requested buildMetadata +func (v *Version) WithBuildMetadata(buildMetadata string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.buildMetadata = buildMetadata + return &result +} + +// String converts a Version back to a string; note that for versions parsed with +// ParseGeneric, this will not include the trailing uninterpreted portion of the version +// number. +func (v *Version) String() string { + if v == nil { + return "" + } + var buffer bytes.Buffer + + for i, comp := range v.components { + if i > 0 { + buffer.WriteString(".") + } + buffer.WriteString(fmt.Sprintf("%d", comp)) + } + if v.preRelease != "" { + buffer.WriteString("-") + buffer.WriteString(v.preRelease) + } + if v.buildMetadata != "" { + buffer.WriteString("+") + buffer.WriteString(v.buildMetadata) + } + + return buffer.String() +} + +// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0 +// if they are equal +func (v *Version) compareInternal(other *Version) int { + + vLen := len(v.components) + oLen := len(other.components) + for i := 0; i < vLen && i < oLen; i++ { + switch { + case other.components[i] < v.components[i]: + return 1 + case other.components[i] > v.components[i]: + return -1 + } + } + + // If components are common but one has more items and they are not zeros, it is bigger + switch { + case oLen < vLen && !onlyZeros(v.components[oLen:]): + return 1 + case oLen > vLen && !onlyZeros(other.components[vLen:]): + return -1 + } + + if !v.semver || !other.semver { + return 0 + } + + switch { + case v.preRelease == "" && other.preRelease != "": + return 1 + case v.preRelease != "" && other.preRelease == "": + return -1 + case v.preRelease == other.preRelease: // includes case where both are "" + return 0 + } + + vPR := strings.Split(v.preRelease, ".") + oPR := strings.Split(other.preRelease, ".") + for i := 0; i < len(vPR) && i < len(oPR); i++ { + vNum, err := strconv.ParseUint(vPR[i], 10, 0) + if err == nil { + oNum, err := strconv.ParseUint(oPR[i], 10, 0) + if err == nil { + switch { + case oNum < vNum: + return 1 + case oNum > vNum: + return -1 + default: + continue + } + } + } + if oPR[i] < vPR[i] { + return 1 + } else if oPR[i] > vPR[i] { + return -1 + } + } + + switch { + case len(oPR) < len(vPR): + return 1 + case len(oPR) > len(vPR): + return -1 + } + + return 0 +} + +// returns false if array contain any non-zero element +func onlyZeros(array []uint) bool { + for _, num := range array { + if num != 0 { + return false + } + } + return true +} + +// AtLeast tests if a version is at least equal to a given minimum version. If both +// Versions are Semantic Versions, this will use the Semantic Version comparison +// algorithm. Otherwise, it will compare only the numeric components, with non-present +// components being considered "0" (ie, "1.4" is equal to "1.4.0"). +func (v *Version) AtLeast(min *Version) bool { + return v.compareInternal(min) != -1 +} + +// LessThan tests if a version is less than a given version. (It is exactly the opposite +// of AtLeast, for situations where asking "is v too old?" makes more sense than asking +// "is v new enough?".) +func (v *Version) LessThan(other *Version) bool { + return v.compareInternal(other) == -1 +} + +// Compare compares v against a version string (which will be parsed as either Semantic +// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if +// it is greater than other, or 0 if they are equal. +func (v *Version) Compare(other string) (int, error) { + ov, err := parse(other, v.semver) + if err != nil { + return 0, err + } + return v.compareInternal(ov), nil +} diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go b/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go new file mode 100644 index 000000000..7f8986fae --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go @@ -0,0 +1,134 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deprecation + +import ( + "fmt" + "regexp" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" +) + +type apiLifecycleDeprecated interface { + APILifecycleDeprecated() (major, minor int) +} + +type apiLifecycleRemoved interface { + APILifecycleRemoved() (major, minor int) +} + +type apiLifecycleReplacement interface { + APILifecycleReplacement() schema.GroupVersionKind +} + +// extract all digits at the beginning of the string +var leadingDigits = regexp.MustCompile(`^(\d+)`) + +// MajorMinor parses a numeric major/minor version from the provided version info. +// The minor version drops all characters after the first non-digit character: +// +// version.Info{Major:"1", Minor:"2+"} -> 1,2 +// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2 +func MajorMinor(v version.Info) (int, int, error) { + major, err := strconv.Atoi(v.Major) + if err != nil { + return 0, 0, err + } + minor, err := strconv.Atoi(leadingDigits.FindString(v.Minor)) + if err != nil { + return 0, 0, err + } + return major, minor, nil +} + +// IsDeprecated returns true if obj implements APILifecycleDeprecated() and returns +// a major/minor version that is non-zero and is <= the specified current major/minor version. +func IsDeprecated(obj runtime.Object, currentMajor, currentMinor int) bool { + deprecated, isDeprecated := obj.(apiLifecycleDeprecated) + if !isDeprecated { + return false + } + + deprecatedMajor, deprecatedMinor := deprecated.APILifecycleDeprecated() + // no deprecation version expressed + if deprecatedMajor == 0 && deprecatedMinor == 0 { + return false + } + // no current version info available + if currentMajor == 0 && currentMinor == 0 { + return true + } + // compare deprecation version to current version + if deprecatedMajor > currentMajor { + return false + } + if deprecatedMajor == currentMajor && deprecatedMinor > currentMinor { + return false + } + return true +} + +// RemovedRelease returns the major/minor version in which the given object is unavailable (in the form ".") +// if the object implements APILifecycleRemoved() to indicate a non-zero removal version, and returns an empty string otherwise. +func RemovedRelease(obj runtime.Object) string { + if removed, hasRemovalInfo := obj.(apiLifecycleRemoved); hasRemovalInfo { + removedMajor, removedMinor := removed.APILifecycleRemoved() + if removedMajor != 0 || removedMinor != 0 { + return fmt.Sprintf("%d.%d", removedMajor, removedMinor) + } + } + return "" +} + +// WarningMessage returns a human-readable deprecation warning if the object implements APILifecycleDeprecated() +// to indicate a non-zero deprecated major/minor version and has a populated GetObjectKind().GroupVersionKind(). +func WarningMessage(obj runtime.Object) string { + deprecated, isDeprecated := obj.(apiLifecycleDeprecated) + if !isDeprecated { + return "" + } + + deprecatedMajor, deprecatedMinor := deprecated.APILifecycleDeprecated() + if deprecatedMajor == 0 && deprecatedMinor == 0 { + return "" + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Empty() { + return "" + } + deprecationWarning := fmt.Sprintf("%s %s is deprecated in v%d.%d+", gvk.GroupVersion().String(), gvk.Kind, deprecatedMajor, deprecatedMinor) + + if removed, hasRemovalInfo := obj.(apiLifecycleRemoved); hasRemovalInfo { + removedMajor, removedMinor := removed.APILifecycleRemoved() + if removedMajor != 0 || removedMinor != 0 { + deprecationWarning = deprecationWarning + fmt.Sprintf(", unavailable in v%d.%d+", removedMajor, removedMinor) + } + } + + if replaced, hasReplacement := obj.(apiLifecycleReplacement); hasReplacement { + replacement := replaced.APILifecycleReplacement() + if !replacement.Empty() { + deprecationWarning = deprecationWarning + fmt.Sprintf("; use %s %s", replacement.GroupVersion().String(), replacement.Kind) + } + } + + return deprecationWarning +} diff --git a/vendor/k8s.io/cli-runtime/LICENSE b/vendor/k8s.io/cli-runtime/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go new file mode 100644 index 000000000..acf576a36 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go @@ -0,0 +1,231 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" +) + +// ResourceBuilderFlags are flags for finding resources +// TODO(juanvallejo): wire --local flag from commands through +type ResourceBuilderFlags struct { + FileNameFlags *FileNameFlags + + LabelSelector *string + FieldSelector *string + AllNamespaces *bool + All *bool + Local *bool + + Scheme *runtime.Scheme + Latest bool + StopOnFirstError bool +} + +// NewResourceBuilderFlags returns a default ResourceBuilderFlags +func NewResourceBuilderFlags() *ResourceBuilderFlags { + filenames := []string{} + + return &ResourceBuilderFlags{ + FileNameFlags: &FileNameFlags{ + Usage: "identifying the resource.", + Filenames: &filenames, + Recursive: boolPtr(true), + }, + } +} + +// WithFile sets the FileNameFlags. +// If recurse is set, it will process directory recursively. Useful when you want to manage related manifests +// organized within the same directory. +func (o *ResourceBuilderFlags) WithFile(recurse bool, files ...string) *ResourceBuilderFlags { + o.FileNameFlags = &FileNameFlags{ + Usage: "identifying the resource.", + Filenames: &files, + Recursive: boolPtr(recurse), + } + + return o +} + +// WithLabelSelector sets the LabelSelector flag +func (o *ResourceBuilderFlags) WithLabelSelector(selector string) *ResourceBuilderFlags { + o.LabelSelector = &selector + return o +} + +// WithFieldSelector sets the FieldSelector flag +func (o *ResourceBuilderFlags) WithFieldSelector(selector string) *ResourceBuilderFlags { + o.FieldSelector = &selector + return o +} + +// WithAllNamespaces sets the AllNamespaces flag +func (o *ResourceBuilderFlags) WithAllNamespaces(defaultVal bool) *ResourceBuilderFlags { + o.AllNamespaces = &defaultVal + return o +} + +// WithAll sets the All flag +func (o *ResourceBuilderFlags) WithAll(defaultVal bool) *ResourceBuilderFlags { + o.All = &defaultVal + return o +} + +// WithLocal sets the Local flag +func (o *ResourceBuilderFlags) WithLocal(defaultVal bool) *ResourceBuilderFlags { + o.Local = &defaultVal + return o +} + +// WithScheme sets the Scheme flag +func (o *ResourceBuilderFlags) WithScheme(scheme *runtime.Scheme) *ResourceBuilderFlags { + o.Scheme = scheme + return o +} + +// WithLatest sets the Latest flag +func (o *ResourceBuilderFlags) WithLatest() *ResourceBuilderFlags { + o.Latest = true + return o +} + +// StopOnError sets the StopOnFirstError flag +func (o *ResourceBuilderFlags) StopOnError() *ResourceBuilderFlags { + o.StopOnFirstError = true + return o +} + +// AddFlags registers flags for finding resources +func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { + o.FileNameFlags.AddFlags(flagset) + + if o.LabelSelector != nil { + flagset.StringVarP(o.LabelSelector, "selector", "l", *o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") + } + if o.FieldSelector != nil { + flagset.StringVar(o.FieldSelector, "field-selector", *o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") + } + if o.AllNamespaces != nil { + flagset.BoolVarP(o.AllNamespaces, "all-namespaces", "A", *o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + } + if o.All != nil { + flagset.BoolVar(o.All, "all", *o.All, "Select all resources in the namespace of the specified resource types") + } + if o.Local != nil { + flagset.BoolVar(o.Local, "local", *o.Local, "If true, annotation will NOT contact api-server but run locally.") + } +} + +// ToBuilder gives you back a resource finder to visit resources that are located +func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, resources []string) ResourceFinder { + namespace, enforceNamespace, namespaceErr := restClientGetter.ToRawKubeConfigLoader().Namespace() + + builder := resource.NewBuilder(restClientGetter). + NamespaceParam(namespace).DefaultNamespace() + + if o.AllNamespaces != nil { + builder.AllNamespaces(*o.AllNamespaces) + } + + if o.Scheme != nil { + builder.WithScheme(o.Scheme, o.Scheme.PrioritizedVersionsAllGroups()...) + } else { + builder.Unstructured() + } + + if o.FileNameFlags != nil { + opts := o.FileNameFlags.ToOptions() + builder.FilenameParam(enforceNamespace, &opts) + } + + if o.Local == nil || !*o.Local { + // resource type/name tuples only work non-local + if o.All != nil { + builder.ResourceTypeOrNameArgs(*o.All, resources...) + } else { + builder.ResourceTypeOrNameArgs(false, resources...) + } + // label selectors only work non-local (for now) + if o.LabelSelector != nil { + builder.LabelSelectorParam(*o.LabelSelector) + } + // field selectors only work non-local (forever) + if o.FieldSelector != nil { + builder.FieldSelectorParam(*o.FieldSelector) + } + // latest only works non-local (forever) + if o.Latest { + builder.Latest() + } + + } else { + builder.Local() + + if len(resources) > 0 { + builder.AddError(resource.LocalResourceError) + } + } + + if !o.StopOnFirstError { + builder.ContinueOnError() + } + + return &ResourceFindBuilderWrapper{ + builder: builder. + Flatten(). // I think we're going to recommend this everywhere + AddError(namespaceErr), + } +} + +// ResourceFindBuilderWrapper wraps a builder in an interface +type ResourceFindBuilderWrapper struct { + builder *resource.Builder +} + +// Do finds you resources to check +func (b *ResourceFindBuilderWrapper) Do() resource.Visitor { + return b.builder.Do() +} + +// ResourceFinder allows mocking the resource builder +// TODO resource builders needs to become more interfacey +type ResourceFinder interface { + Do() resource.Visitor +} + +// ResourceFinderFunc is a handy way to make a ResourceFinder +type ResourceFinderFunc func() resource.Visitor + +// Do implements ResourceFinder +func (fn ResourceFinderFunc) Do() resource.Visitor { + return fn() +} + +// ResourceFinderForResult skins a visitor for re-use as a ResourceFinder +func ResourceFinderForResult(result resource.Visitor) ResourceFinder { + return ResourceFinderFunc(func() resource.Visitor { + return result + }) +} + +func boolPtr(val bool) *bool { + return &val +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go new file mode 100644 index 000000000..d43b0c25e --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "k8s.io/cli-runtime/pkg/resource" +) + +// NewSimpleFakeResourceFinder builds a super simple ResourceFinder that just iterates over the objects you provided +func NewSimpleFakeResourceFinder(infos ...*resource.Info) ResourceFinder { + return &fakeResourceFinder{ + Infos: infos, + } +} + +type fakeResourceFinder struct { + Infos []*resource.Info +} + +// Do implements the interface +func (f *fakeResourceFinder) Do() resource.Visitor { + return &fakeResourceResult{ + Infos: f.Infos, + } +} + +type fakeResourceResult struct { + Infos []*resource.Info +} + +// Visit just iterates over info +func (r *fakeResourceResult) Visit(fn resource.VisitorFunc) error { + for _, info := range r.Infos { + err := fn(info, nil) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/client_config.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/client_config.go new file mode 100644 index 000000000..0e22d7140 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/client_config.go @@ -0,0 +1,72 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + // ErrEmptyConfig is the error message to be displayed if the configuration info is missing or incomplete + ErrEmptyConfig = clientcmd.NewEmptyConfigError(`Missing or incomplete configuration info. Please point to an existing, complete config file: + + + 1. Via the command-line flag --kubeconfig + 2. Via the KUBECONFIG environment variable + 3. In your home directory as ~/.kube/config + +To view or setup config directly use the 'config' command.`) +) + +var _ = clientcmd.ClientConfig(&clientConfig{}) + +type clientConfig struct { + defaultClientConfig clientcmd.ClientConfig +} + +func (c *clientConfig) RawConfig() (clientcmdapi.Config, error) { + config, err := c.defaultClientConfig.RawConfig() + // replace client-go's ErrEmptyConfig error with our custom, more verbose version + if clientcmd.IsEmptyConfig(err) { + return config, ErrEmptyConfig + } + return config, err +} + +func (c *clientConfig) ClientConfig() (*restclient.Config, error) { + config, err := c.defaultClientConfig.ClientConfig() + // replace client-go's ErrEmptyConfig error with our custom, more verbose version + if clientcmd.IsEmptyConfig(err) { + return config, ErrEmptyConfig + } + return config, err +} + +func (c *clientConfig) Namespace() (string, bool, error) { + namespace, ok, err := c.defaultClientConfig.Namespace() + // replace client-go's ErrEmptyConfig error with our custom, more verbose version + if clientcmd.IsEmptyConfig(err) { + return namespace, ok, ErrEmptyConfig + } + return namespace, ok, err +} + +func (c *clientConfig) ConfigAccess() clientcmd.ConfigAccess { + return c.defaultClientConfig.ConfigAccess() +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go new file mode 100644 index 000000000..ed47e9994 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "net/http" + "strings" + + "github.com/google/uuid" + "github.com/spf13/cobra" +) + +const ( + kubectlCommandHeader = "Kubectl-Command" + kubectlSessionHeader = "Kubectl-Session" +) + +// CommandHeaderRoundTripper adds a layer around the standard +// round tripper to add Request headers before delegation. Implements +// the go standard library "http.RoundTripper" interface. +type CommandHeaderRoundTripper struct { + Delegate http.RoundTripper + Headers map[string]string +} + +// CommandHeaderRoundTripper adds Request headers before delegating to standard +// round tripper. These headers are kubectl command headers which +// detail the kubectl command. See SIG CLI KEP 859: +// +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers +func (c *CommandHeaderRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + for header, value := range c.Headers { + req.Header.Set(header, value) + } + return c.Delegate.RoundTrip(req) +} + +// ParseCommandHeaders fills in a map of custom headers into the CommandHeaderRoundTripper. These +// headers are then filled into each request. For details on the custom headers see: +// +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers +// +// Each call overwrites the previously parsed command headers (not additive). +// TODO(seans3): Parse/add flags removing PII from flag values. +func (c *CommandHeaderRoundTripper) ParseCommandHeaders(cmd *cobra.Command, args []string) { + if cmd == nil { + return + } + // Overwrites previously parsed command headers (headers not additive). + c.Headers = map[string]string{} + // Session identifier to aggregate multiple Requests from single kubectl command. + uid := uuid.New().String() + c.Headers[kubectlSessionHeader] = uid + // Iterate up the hierarchy of commands from the leaf command to create + // the full command string. Example: kubectl create secret generic + cmdStrs := []string{} + for cmd.HasParent() { + parent := cmd.Parent() + currName := strings.TrimSpace(cmd.Name()) + cmdStrs = append([]string{currName}, cmdStrs...) + cmd = parent + } + currName := strings.TrimSpace(cmd.Name()) + cmdStrs = append([]string{currName}, cmdStrs...) + if len(cmdStrs) > 0 { + c.Headers[kubectlCommandHeader] = strings.Join(cmdStrs, " ") + } +} + +// CancelRequest is propagated to the Delegate RoundTripper within +// if the wrapped RoundTripper implements this function. +func (c *CommandHeaderRoundTripper) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + // If possible, call "CancelRequest" on the wrapped Delegate RoundTripper. + if cr, ok := c.Delegate.(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go new file mode 100644 index 000000000..9c1683509 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go @@ -0,0 +1,491 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "sync" + "time" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/client-go/discovery" + diskcached "k8s.io/client-go/discovery/cached/disk" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" + utilpointer "k8s.io/utils/pointer" +) + +const ( + flagClusterName = "cluster" + flagAuthInfoName = "user" + flagContext = "context" + flagNamespace = "namespace" + flagAPIServer = "server" + flagTLSServerName = "tls-server-name" + flagInsecure = "insecure-skip-tls-verify" + flagCertFile = "client-certificate" + flagKeyFile = "client-key" + flagCAFile = "certificate-authority" + flagBearerToken = "token" + flagImpersonate = "as" + flagImpersonateUID = "as-uid" + flagImpersonateGroup = "as-group" + flagUsername = "username" + flagPassword = "password" + flagTimeout = "request-timeout" + flagCacheDir = "cache-dir" + flagDisableCompression = "disable-compression" +) + +// RESTClientGetter is an interface that the ConfigFlags describe to provide an easier way to mock for commands +// and eliminate the direct coupling to a struct type. Users may wish to duplicate this type in their own packages +// as per the golang type overlapping. +type RESTClientGetter interface { + // ToRESTConfig returns restconfig + ToRESTConfig() (*rest.Config, error) + // ToDiscoveryClient returns discovery client + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + // ToRESTMapper returns a restmapper + ToRESTMapper() (meta.RESTMapper, error) + // ToRawKubeConfigLoader return kubeconfig loader as-is + ToRawKubeConfigLoader() clientcmd.ClientConfig +} + +var _ RESTClientGetter = &ConfigFlags{} + +// ConfigFlags composes the set of values necessary +// for obtaining a REST client config +type ConfigFlags struct { + CacheDir *string + KubeConfig *string + + // config flags + ClusterName *string + AuthInfoName *string + Context *string + Namespace *string + APIServer *string + TLSServerName *string + Insecure *bool + CertFile *string + KeyFile *string + CAFile *string + BearerToken *string + Impersonate *string + ImpersonateUID *string + ImpersonateGroup *[]string + Username *string + Password *string + Timeout *string + DisableCompression *bool + // If non-nil, wrap config function can transform the Config + // before it is returned in ToRESTConfig function. + WrapConfigFn func(*rest.Config) *rest.Config + + clientConfig clientcmd.ClientConfig + clientConfigLock sync.Mutex + + restMapper meta.RESTMapper + restMapperLock sync.Mutex + + discoveryClient discovery.CachedDiscoveryInterface + discoveryClientLock sync.Mutex + + // If set to true, will use persistent client config, rest mapper, discovery client, and + // propagate them to the places that need them, rather than + // instantiating them multiple times. + usePersistentConfig bool + // Allows increasing burst used for discovery, this is useful + // in clusters with many registered resources + discoveryBurst int + // Allows increasing qps used for discovery, this is useful + // in clusters with many registered resources + discoveryQPS float32 + // Allows all possible warnings are printed in a standardized + // format. + warningPrinter *printers.WarningPrinter +} + +// ToRESTConfig implements RESTClientGetter. +// Returns a REST client configuration based on a provided path +// to a .kubeconfig file, loading rules, and config flag overrides. +// Expects the AddFlags method to have been called. If WrapConfigFn +// is non-nil this function can transform config before return. +func (f *ConfigFlags) ToRESTConfig() (*rest.Config, error) { + c, err := f.ToRawKubeConfigLoader().ClientConfig() + if err != nil { + return nil, err + } + if f.WrapConfigFn != nil { + return f.WrapConfigFn(c), nil + } + return c, nil +} + +// ToRawKubeConfigLoader binds config flag values to config overrides +// Returns an interactive clientConfig if the password flag is enabled, +// or a non-interactive clientConfig otherwise. +func (f *ConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig { + if f.usePersistentConfig { + return f.toRawKubePersistentConfigLoader() + } + return f.toRawKubeConfigLoader() +} + +func (f *ConfigFlags) toRawKubeConfigLoader() clientcmd.ClientConfig { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + // use the standard defaults for this client command + // DEPRECATED: remove and replace with something more accurate + loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig + + if f.KubeConfig != nil { + loadingRules.ExplicitPath = *f.KubeConfig + } + + overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} + + // bind auth info flag values to overrides + if f.CertFile != nil { + overrides.AuthInfo.ClientCertificate = *f.CertFile + } + if f.KeyFile != nil { + overrides.AuthInfo.ClientKey = *f.KeyFile + } + if f.BearerToken != nil { + overrides.AuthInfo.Token = *f.BearerToken + } + if f.Impersonate != nil { + overrides.AuthInfo.Impersonate = *f.Impersonate + } + if f.ImpersonateUID != nil { + overrides.AuthInfo.ImpersonateUID = *f.ImpersonateUID + } + if f.ImpersonateGroup != nil { + overrides.AuthInfo.ImpersonateGroups = *f.ImpersonateGroup + } + if f.Username != nil { + overrides.AuthInfo.Username = *f.Username + } + if f.Password != nil { + overrides.AuthInfo.Password = *f.Password + } + + // bind cluster flags + if f.APIServer != nil { + overrides.ClusterInfo.Server = *f.APIServer + } + if f.TLSServerName != nil { + overrides.ClusterInfo.TLSServerName = *f.TLSServerName + } + if f.CAFile != nil { + overrides.ClusterInfo.CertificateAuthority = *f.CAFile + } + if f.Insecure != nil { + overrides.ClusterInfo.InsecureSkipTLSVerify = *f.Insecure + } + if f.DisableCompression != nil { + overrides.ClusterInfo.DisableCompression = *f.DisableCompression + } + + // bind context flags + if f.Context != nil { + overrides.CurrentContext = *f.Context + } + if f.ClusterName != nil { + overrides.Context.Cluster = *f.ClusterName + } + if f.AuthInfoName != nil { + overrides.Context.AuthInfo = *f.AuthInfoName + } + if f.Namespace != nil { + overrides.Context.Namespace = *f.Namespace + } + + if f.Timeout != nil { + overrides.Timeout = *f.Timeout + } + + // we only have an interactive prompt when a password is allowed + if f.Password == nil { + return &clientConfig{clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)} + } + return &clientConfig{clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)} +} + +// toRawKubePersistentConfigLoader binds config flag values to config overrides +// Returns a persistent clientConfig for propagation. +func (f *ConfigFlags) toRawKubePersistentConfigLoader() clientcmd.ClientConfig { + f.clientConfigLock.Lock() + defer f.clientConfigLock.Unlock() + + if f.clientConfig == nil { + f.clientConfig = f.toRawKubeConfigLoader() + } + + return f.clientConfig +} + +// ToDiscoveryClient implements RESTClientGetter. +// Expects the AddFlags method to have been called. +// Returns a CachedDiscoveryInterface using a computed RESTConfig. +func (f *ConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + if f.usePersistentConfig { + return f.toPersistentDiscoveryClient() + } + return f.toDiscoveryClient() +} + +func (f *ConfigFlags) toPersistentDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + f.discoveryClientLock.Lock() + defer f.discoveryClientLock.Unlock() + + if f.discoveryClient == nil { + discoveryClient, err := f.toDiscoveryClient() + if err != nil { + return nil, err + } + f.discoveryClient = discoveryClient + } + return f.discoveryClient, nil +} + +func (f *ConfigFlags) toDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + config, err := f.ToRESTConfig() + if err != nil { + return nil, err + } + + config.Burst = f.discoveryBurst + config.QPS = f.discoveryQPS + + cacheDir := getDefaultCacheDir() + + // retrieve a user-provided value for the "cache-dir" + // override httpCacheDir and discoveryCacheDir if user-value is given. + // user-provided value has higher precedence than default + // and KUBECACHEDIR environment variable. + if f.CacheDir != nil && *f.CacheDir != "" && *f.CacheDir != getDefaultCacheDir() { + cacheDir = *f.CacheDir + } + + httpCacheDir := filepath.Join(cacheDir, "http") + discoveryCacheDir := computeDiscoverCacheDir(filepath.Join(cacheDir, "discovery"), config.Host) + + return diskcached.NewCachedDiscoveryClientForConfig(config, discoveryCacheDir, httpCacheDir, time.Duration(6*time.Hour)) +} + +// getDefaultCacheDir returns default caching directory path. +// it first looks at KUBECACHEDIR env var if it is set, otherwise +// it returns standard kube cache dir. +func getDefaultCacheDir() string { + if kcd := os.Getenv("KUBECACHEDIR"); kcd != "" { + return kcd + } + + return filepath.Join(homedir.HomeDir(), ".kube", "cache") +} + +// ToRESTMapper returns a mapper. +func (f *ConfigFlags) ToRESTMapper() (meta.RESTMapper, error) { + if f.usePersistentConfig { + return f.toPersistentRESTMapper() + } + return f.toRESTMapper() +} + +func (f *ConfigFlags) toPersistentRESTMapper() (meta.RESTMapper, error) { + f.restMapperLock.Lock() + defer f.restMapperLock.Unlock() + + if f.restMapper == nil { + restMapper, err := f.toRESTMapper() + if err != nil { + return nil, err + } + f.restMapper = restMapper + } + return f.restMapper, nil +} + +func (f *ConfigFlags) toRESTMapper() (meta.RESTMapper, error) { + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return nil, err + } + + mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, discoveryClient, func(a string) { + if f.warningPrinter != nil { + f.warningPrinter.Print(a) + } + }) + return expander, nil +} + +// AddFlags binds client configuration flags to a given flagset +func (f *ConfigFlags) AddFlags(flags *pflag.FlagSet) { + if f.KubeConfig != nil { + flags.StringVar(f.KubeConfig, "kubeconfig", *f.KubeConfig, "Path to the kubeconfig file to use for CLI requests.") + } + if f.CacheDir != nil { + flags.StringVar(f.CacheDir, flagCacheDir, *f.CacheDir, "Default cache directory") + } + + // add config options + if f.CertFile != nil { + flags.StringVar(f.CertFile, flagCertFile, *f.CertFile, "Path to a client certificate file for TLS") + } + if f.KeyFile != nil { + flags.StringVar(f.KeyFile, flagKeyFile, *f.KeyFile, "Path to a client key file for TLS") + } + if f.BearerToken != nil { + flags.StringVar(f.BearerToken, flagBearerToken, *f.BearerToken, "Bearer token for authentication to the API server") + } + if f.Impersonate != nil { + flags.StringVar(f.Impersonate, flagImpersonate, *f.Impersonate, "Username to impersonate for the operation. User could be a regular user or a service account in a namespace.") + } + if f.ImpersonateUID != nil { + flags.StringVar(f.ImpersonateUID, flagImpersonateUID, *f.ImpersonateUID, "UID to impersonate for the operation.") + } + if f.ImpersonateGroup != nil { + flags.StringArrayVar(f.ImpersonateGroup, flagImpersonateGroup, *f.ImpersonateGroup, "Group to impersonate for the operation, this flag can be repeated to specify multiple groups.") + } + if f.Username != nil { + flags.StringVar(f.Username, flagUsername, *f.Username, "Username for basic authentication to the API server") + } + if f.Password != nil { + flags.StringVar(f.Password, flagPassword, *f.Password, "Password for basic authentication to the API server") + } + if f.ClusterName != nil { + flags.StringVar(f.ClusterName, flagClusterName, *f.ClusterName, "The name of the kubeconfig cluster to use") + } + if f.AuthInfoName != nil { + flags.StringVar(f.AuthInfoName, flagAuthInfoName, *f.AuthInfoName, "The name of the kubeconfig user to use") + } + if f.Namespace != nil { + flags.StringVarP(f.Namespace, flagNamespace, "n", *f.Namespace, "If present, the namespace scope for this CLI request") + } + if f.Context != nil { + flags.StringVar(f.Context, flagContext, *f.Context, "The name of the kubeconfig context to use") + } + + if f.APIServer != nil { + flags.StringVarP(f.APIServer, flagAPIServer, "s", *f.APIServer, "The address and port of the Kubernetes API server") + } + if f.TLSServerName != nil { + flags.StringVar(f.TLSServerName, flagTLSServerName, *f.TLSServerName, "Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used") + } + if f.Insecure != nil { + flags.BoolVar(f.Insecure, flagInsecure, *f.Insecure, "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure") + } + if f.CAFile != nil { + flags.StringVar(f.CAFile, flagCAFile, *f.CAFile, "Path to a cert file for the certificate authority") + } + if f.Timeout != nil { + flags.StringVar(f.Timeout, flagTimeout, *f.Timeout, "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests.") + } + if f.DisableCompression != nil { + flags.BoolVar(f.DisableCompression, flagDisableCompression, *f.DisableCompression, "If true, opt-out of response compression for all requests to the server") + } +} + +// WithDeprecatedPasswordFlag enables the username and password config flags +func (f *ConfigFlags) WithDeprecatedPasswordFlag() *ConfigFlags { + f.Username = utilpointer.String("") + f.Password = utilpointer.String("") + return f +} + +// WithDiscoveryBurst sets the RESTClient burst for discovery. +func (f *ConfigFlags) WithDiscoveryBurst(discoveryBurst int) *ConfigFlags { + f.discoveryBurst = discoveryBurst + return f +} + +// WithDiscoveryQPS sets the RESTClient QPS for discovery. +func (f *ConfigFlags) WithDiscoveryQPS(discoveryQPS float32) *ConfigFlags { + f.discoveryQPS = discoveryQPS + return f +} + +// WithWrapConfigFn allows providing a wrapper function for the client Config. +func (f *ConfigFlags) WithWrapConfigFn(wrapConfigFn func(*rest.Config) *rest.Config) *ConfigFlags { + f.WrapConfigFn = wrapConfigFn + return f +} + +// WithWarningPrinter initializes WarningPrinter with the given IOStreams +func (f *ConfigFlags) WithWarningPrinter(ioStreams genericiooptions.IOStreams) *ConfigFlags { + f.warningPrinter = printers.NewWarningPrinter(ioStreams.ErrOut, printers.WarningPrinterOptions{Color: printers.AllowsColorOutput(ioStreams.ErrOut)}) + return f +} + +// NewConfigFlags returns ConfigFlags with default values set +func NewConfigFlags(usePersistentConfig bool) *ConfigFlags { + impersonateGroup := []string{} + insecure := false + disableCompression := false + + return &ConfigFlags{ + Insecure: &insecure, + Timeout: utilpointer.String("0"), + KubeConfig: utilpointer.String(""), + + CacheDir: utilpointer.String(getDefaultCacheDir()), + ClusterName: utilpointer.String(""), + AuthInfoName: utilpointer.String(""), + Context: utilpointer.String(""), + Namespace: utilpointer.String(""), + APIServer: utilpointer.String(""), + TLSServerName: utilpointer.String(""), + CertFile: utilpointer.String(""), + KeyFile: utilpointer.String(""), + CAFile: utilpointer.String(""), + BearerToken: utilpointer.String(""), + Impersonate: utilpointer.String(""), + ImpersonateUID: utilpointer.String(""), + ImpersonateGroup: &impersonateGroup, + DisableCompression: &disableCompression, + + usePersistentConfig: usePersistentConfig, + // The more groups you have, the more discovery requests you need to make. + // with a burst of 300, we will not be rate-limiting for most clusters but + // the safeguard will still be here. This config is only used for discovery. + discoveryBurst: 300, + } +} + +// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. Windows is really restrictive, so this is really restrictive +var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/.)]`) + +// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name. +func computeDiscoverCacheDir(parentDir, host string) string { + // strip the optional scheme from host if its there: + schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1) + // now do a simple collapse of non-AZ09 characters. Collisions are possible but unlikely. Even if we do collide the problem is short lived + safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_") + return filepath.Join(parentDir, safeHost) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go new file mode 100644 index 000000000..9bb5e28c9 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// TestConfigFlags contains clientConfig struct +// and interfaces that implements RESTClientGetter +type TestConfigFlags struct { + clientConfig clientcmd.ClientConfig + discoveryClient discovery.CachedDiscoveryInterface + restMapper meta.RESTMapper +} + +// ToRawKubeConfigLoader implements RESTClientGetter +// Returns a clientconfig if it's set +func (f *TestConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig { + if f.clientConfig == nil { + panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified") + } + return f.clientConfig +} + +// ToRESTConfig implements RESTClientGetter. +// Returns a REST client configuration based on a provided path +// to a .kubeconfig file, loading rules, and config flag overrides. +// Expects the AddFlags method to have been called. +func (f *TestConfigFlags) ToRESTConfig() (*rest.Config, error) { + return f.ToRawKubeConfigLoader().ClientConfig() +} + +// ToDiscoveryClient implements RESTClientGetter. +// Returns a CachedDiscoveryInterface +func (f *TestConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + return f.discoveryClient, nil +} + +// ToRESTMapper implements RESTClientGetter. +// Returns a mapper. +func (f *TestConfigFlags) ToRESTMapper() (meta.RESTMapper, error) { + if f.restMapper != nil { + return f.restMapper, nil + } + if f.discoveryClient != nil { + mapper := restmapper.NewDeferredDiscoveryRESTMapper(f.discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, f.discoveryClient, nil) + return expander, nil + } + return nil, fmt.Errorf("no restmapper") +} + +// WithClientConfig sets the clientConfig flag +func (f *TestConfigFlags) WithClientConfig(clientConfig clientcmd.ClientConfig) *TestConfigFlags { + f.clientConfig = clientConfig + return f +} + +// WithRESTMapper sets the restMapper flag +func (f *TestConfigFlags) WithRESTMapper(mapper meta.RESTMapper) *TestConfigFlags { + f.restMapper = mapper + return f +} + +// WithDiscoveryClient sets the discoveryClient flag +func (f *TestConfigFlags) WithDiscoveryClient(c discovery.CachedDiscoveryInterface) *TestConfigFlags { + f.discoveryClient = c + return f +} + +// WithNamespace sets the clientConfig flag by modifying delagate and namespace +func (f *TestConfigFlags) WithNamespace(ns string) *TestConfigFlags { + if f.clientConfig == nil { + panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified") + } + f.clientConfig = &namespacedClientConfig{ + delegate: f.clientConfig, + namespace: ns, + } + return f +} + +// NewTestConfigFlags builds a TestConfigFlags struct to test ConfigFlags +func NewTestConfigFlags() *TestConfigFlags { + return &TestConfigFlags{} +} + +type namespacedClientConfig struct { + delegate clientcmd.ClientConfig + namespace string +} + +func (c *namespacedClientConfig) Namespace() (string, bool, error) { + return c.namespace, len(c.namespace) > 0, nil +} + +func (c *namespacedClientConfig) RawConfig() (clientcmdapi.Config, error) { + return c.delegate.RawConfig() +} +func (c *namespacedClientConfig) ClientConfig() (*rest.Config, error) { + return c.delegate.ClientConfig() +} +func (c *namespacedClientConfig) ConfigAccess() clientcmd.ConfigAccess { + return c.delegate.ConfigAccess() +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go new file mode 100644 index 000000000..303da330b --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package genericclioptions contains flags which can be added to your command, bound, completed, and produce +// useful helper functions. Nothing in this package can depend on kube/kube +package genericclioptions // import "k8s.io/cli-runtime/pkg/genericclioptions" diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go new file mode 100644 index 000000000..74259e417 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/cli-runtime/pkg/resource" +) + +// FileNameFlags are flags for processing files. +// Usage of this struct by itself is discouraged. +// These flags are composed by ResourceBuilderFlags +// which should be used instead. +type FileNameFlags struct { + Usage string + + Filenames *[]string + Kustomize *string + Recursive *bool +} + +// ToOptions creates a new FileNameOptions struct and sets FilenameOptions based on FileNameflags +func (o *FileNameFlags) ToOptions() resource.FilenameOptions { + options := resource.FilenameOptions{} + + if o == nil { + return options + } + + if o.Recursive != nil { + options.Recursive = *o.Recursive + } + if o.Filenames != nil { + options.Filenames = *o.Filenames + } + if o.Kustomize != nil { + options.Kustomize = *o.Kustomize + } + + return options +} + +// AddFlags binds file name flags to a given flagset +func (o *FileNameFlags) AddFlags(flags *pflag.FlagSet) { + if o == nil { + return + } + + if o.Recursive != nil { + flags.BoolVarP(o.Recursive, "recursive", "R", *o.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") + } + if o.Filenames != nil { + flags.StringSliceVarP(o.Filenames, "filename", "f", *o.Filenames, o.Usage) + annotations := make([]string, 0, len(resource.FileExtensions)) + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } + flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) + } + if o.Kustomize != nil { + flags.StringVarP(o.Kustomize, "kustomize", "k", *o.Kustomize, + "Process a kustomization directory. This flag can't be used together with -f or -R.") + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go new file mode 100644 index 000000000..b83135139 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "bytes" + "io" + + "k8s.io/cli-runtime/pkg/genericiooptions" +) + +// IOStreams provides the standard names for iostreams. This is useful for embedding and for unit testing. +// Inconsistent and different names make it hard to read and review code +// DEPRECATED: use genericiooptions.IOStreams +type IOStreams = genericiooptions.IOStreams + +// NewTestIOStreams returns a valid IOStreams and in, out, errout buffers for unit tests +// DEPRECATED: use genericiooptions.NewTestIOStreams +func NewTestIOStreams() (genericiooptions.IOStreams, *bytes.Buffer, *bytes.Buffer, *bytes.Buffer) { + in := &bytes.Buffer{} + out := &bytes.Buffer{} + errOut := &bytes.Buffer{} + + return IOStreams{ + In: in, + Out: out, + ErrOut: errOut, + }, in, out, errOut +} + +// NewTestIOStreamsDiscard returns a valid IOStreams that just discards +// DEPRECATED: use genericiooptions.NewTestIOStreamsDiscard +func NewTestIOStreamsDiscard() genericiooptions.IOStreams { + in := &bytes.Buffer{} + return IOStreams{ + In: in, + Out: io.Discard, + ErrOut: io.Discard, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go new file mode 100644 index 000000000..ea8789614 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/printers" +) + +// AllowedFormats returns slice of string of allowed JSONYaml printing format +func (f *JSONYamlPrintFlags) AllowedFormats() []string { + if f == nil { + return []string{} + } + return []string{"json", "yaml"} +} + +// JSONYamlPrintFlags provides default flags necessary for json/yaml printing. +// Given the following flag values, a printer can be requested that knows +// how to handle printing based on these values. +type JSONYamlPrintFlags struct { + ShowManagedFields bool +} + +// ToPrinter receives an outputFormat and returns a printer capable of +// handling --output=(yaml|json) printing. +// Returns false if the specified outputFormat does not match a supported format. +// Supported Format types can be found in pkg/printers/printers.go +func (f *JSONYamlPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + var printer printers.ResourcePrinter + + outputFormat = strings.ToLower(outputFormat) + switch outputFormat { + case "json": + printer = &printers.JSONPrinter{} + case "yaml": + printer = &printers.YAMLPrinter{} + default: + return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()} + } + + if !f.ShowManagedFields { + printer = &printers.OmitManagedFieldsPrinter{Delegate: printer} + } + return printer, nil +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to JSON or Yaml printing to it +func (f *JSONYamlPrintFlags) AddFlags(c *cobra.Command) { + if f == nil { + return + } + + c.Flags().BoolVar(&f.ShowManagedFields, "show-managed-fields", f.ShowManagedFields, "If true, keep the managedFields when printing objects in JSON or YAML format.") +} + +// NewJSONYamlPrintFlags returns flags associated with +// yaml or json printing, with default values set. +func NewJSONYamlPrintFlags() *JSONYamlPrintFlags { + return &JSONYamlPrintFlags{} +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go new file mode 100644 index 000000000..a3d86d761 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go @@ -0,0 +1,137 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + "os" + "sort" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/printers" +) + +// templates are logically optional for specifying a format. +// this allows a user to specify a template format value +// as --output=jsonpath= +var jsonFormats = map[string]bool{ + "jsonpath": true, + "jsonpath-file": true, + "jsonpath-as-json": true, +} + +// JSONPathPrintFlags provides default flags necessary for template printing. +// Given the following flag values, a printer can be requested that knows +// how to handle printing based on these values. +type JSONPathPrintFlags struct { + // indicates if it is OK to ignore missing keys for rendering + // an output template. + AllowMissingKeys *bool + TemplateArgument *string +} + +// AllowedFormats returns slice of string of allowed JSONPath printing format +func (f *JSONPathPrintFlags) AllowedFormats() []string { + formats := make([]string, 0, len(jsonFormats)) + for format := range jsonFormats { + formats = append(formats, format) + } + sort.Strings(formats) + return formats +} + +// ToPrinter receives an templateFormat and returns a printer capable of +// handling --template format printing. +// Returns false if the specified templateFormat does not match a template format. +func (f *JSONPathPrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) { + if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 { + return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} + } + + templateValue := "" + + if f.TemplateArgument == nil || len(*f.TemplateArgument) == 0 { + for format := range jsonFormats { + format = format + "=" + if strings.HasPrefix(templateFormat, format) { + templateValue = templateFormat[len(format):] + templateFormat = format[:len(format)-1] + break + } + } + } else { + templateValue = *f.TemplateArgument + } + + if _, supportedFormat := jsonFormats[templateFormat]; !supportedFormat { + return nil, NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()} + } + + if len(templateValue) == 0 { + return nil, fmt.Errorf("template format specified but no template given") + } + + if templateFormat == "jsonpath-file" { + data, err := os.ReadFile(templateValue) + if err != nil { + return nil, fmt.Errorf("error reading --template %s, %v", templateValue, err) + } + + templateValue = string(data) + } + + p, err := printers.NewJSONPathPrinter(templateValue) + if err != nil { + return nil, fmt.Errorf("error parsing jsonpath %s, %v", templateValue, err) + } + + allowMissingKeys := true + if f.AllowMissingKeys != nil { + allowMissingKeys = *f.AllowMissingKeys + } + + p.AllowMissingKeys(allowMissingKeys) + + if templateFormat == "jsonpath-as-json" { + p.EnableJSONOutput(true) + } + + return p, nil +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to template printing to it +func (f *JSONPathPrintFlags) AddFlags(c *cobra.Command) { + if f.TemplateArgument != nil { + c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when --output=jsonpath, --output=jsonpath-file.") + c.MarkFlagFilename("template") + } + if f.AllowMissingKeys != nil { + c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") + } +} + +// NewJSONPathPrintFlags returns flags associated with +// --template printing, with default values set. +func NewJSONPathPrintFlags(templateValue string, allowMissingKeys bool) *JSONPathPrintFlags { + return &JSONPathPrintFlags{ + TemplateArgument: &templateValue, + AllowMissingKeys: &allowMissingKeys, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go new file mode 100644 index 000000000..518a20ac6 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/printers" +) + +// KubeTemplatePrintFlags composes print flags that provide both a JSONPath and a go-template printer. +// This is necessary if dealing with cases that require support both both printers, since both sets of flags +// require overlapping flags. +type KubeTemplatePrintFlags struct { + GoTemplatePrintFlags *GoTemplatePrintFlags + JSONPathPrintFlags *JSONPathPrintFlags + + AllowMissingKeys *bool + TemplateArgument *string +} + +// AllowedFormats returns slice of string of allowed GoTemplete and JSONPathPrint printing formats +func (f *KubeTemplatePrintFlags) AllowedFormats() []string { + if f == nil { + return []string{} + } + return append(f.GoTemplatePrintFlags.AllowedFormats(), f.JSONPathPrintFlags.AllowedFormats()...) +} + +// ToPrinter receives an outputFormat and returns a printer capable of +// handling --template printing. +// Returns false if the specified outputFormat does not match a supported format. +// Supported Format types can be found in pkg/printers/printers.go +func (f *KubeTemplatePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + if f == nil { + return nil, NoCompatiblePrinterError{} + } + + if p, err := f.JSONPathPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + return p, err + } + return f.GoTemplatePrintFlags.ToPrinter(outputFormat) +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to template printing to it +func (f *KubeTemplatePrintFlags) AddFlags(c *cobra.Command) { + if f == nil { + return + } + + if f.TemplateArgument != nil { + c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") + c.MarkFlagFilename("template") + } + if f.AllowMissingKeys != nil { + c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") + } +} + +// NewKubeTemplatePrintFlags returns flags associated with +// --template printing, with default values set. +func NewKubeTemplatePrintFlags() *KubeTemplatePrintFlags { + allowMissingKeysPtr := true + templateArgPtr := "" + + return &KubeTemplatePrintFlags{ + GoTemplatePrintFlags: &GoTemplatePrintFlags{ + TemplateArgument: &templateArgPtr, + AllowMissingKeys: &allowMissingKeysPtr, + }, + JSONPathPrintFlags: &JSONPathPrintFlags{ + TemplateArgument: &templateArgPtr, + AllowMissingKeys: &allowMissingKeysPtr, + }, + + TemplateArgument: &templateArgPtr, + AllowMissingKeys: &allowMissingKeysPtr, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go new file mode 100644 index 000000000..16bf42265 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/printers" +) + +// NamePrintFlags provides default flags necessary for printing +// a resource's fully-qualified Kind.group/name, or a successful +// message about that resource if an Operation is provided. +type NamePrintFlags struct { + // Operation describes the name of the action that + // took place on an object, to be included in the + // finalized "successful" message. + Operation string +} + +// Complete sets NamePrintFlags operation flag from successTemplate +func (f *NamePrintFlags) Complete(successTemplate string) error { + f.Operation = fmt.Sprintf(successTemplate, f.Operation) + return nil +} + +// AllowedFormats returns slice of string of allowed Name printing format +func (f *NamePrintFlags) AllowedFormats() []string { + if f == nil { + return []string{} + } + return []string{"name"} +} + +// ToPrinter receives an outputFormat and returns a printer capable of +// handling --output=name printing. +// Returns false if the specified outputFormat does not match a supported format. +// Supported format types can be found in pkg/printers/printers.go +func (f *NamePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + namePrinter := &printers.NamePrinter{ + Operation: f.Operation, + } + + outputFormat = strings.ToLower(outputFormat) + switch outputFormat { + case "name": + namePrinter.ShortOutput = true + fallthrough + case "": + return namePrinter, nil + default: + return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()} + } +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to name printing to it +func (f *NamePrintFlags) AddFlags(c *cobra.Command) {} + +// NewNamePrintFlags returns flags associated with +// --name printing, with default values set. +func NewNamePrintFlags(operation string) *NamePrintFlags { + return &NamePrintFlags{ + Operation: operation, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go new file mode 100644 index 000000000..815f19bbd --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go @@ -0,0 +1,171 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + "sort" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/printers" +) + +// NoCompatiblePrinterError is a struct that contains error information. +// It will be constructed when a invalid printing format is provided +type NoCompatiblePrinterError struct { + OutputFormat *string + AllowedFormats []string + Options interface{} +} + +func (e NoCompatiblePrinterError) Error() string { + output := "" + if e.OutputFormat != nil { + output = *e.OutputFormat + } + + sort.Strings(e.AllowedFormats) + return fmt.Sprintf("unable to match a printer suitable for the output format %q, allowed formats are: %s", output, strings.Join(e.AllowedFormats, ",")) +} + +// IsNoCompatiblePrinterError returns true if it is a not a compatible printer +// otherwise it will return false +func IsNoCompatiblePrinterError(err error) bool { + if err == nil { + return false + } + + _, ok := err.(NoCompatiblePrinterError) + return ok +} + +// PrintFlags composes common printer flag structs +// used across all commands, and provides a method +// of retrieving a known printer based on flag values provided. +type PrintFlags struct { + JSONYamlPrintFlags *JSONYamlPrintFlags + NamePrintFlags *NamePrintFlags + TemplatePrinterFlags *KubeTemplatePrintFlags + + TypeSetterPrinter *printers.TypeSetterPrinter + + OutputFormat *string + + // OutputFlagSpecified indicates whether the user specifically requested a certain kind of output. + // Using this function allows a sophisticated caller to change the flag binding logic if they so desire. + OutputFlagSpecified func() bool +} + +// Complete sets NamePrintFlags operation flag from successTemplate +func (f *PrintFlags) Complete(successTemplate string) error { + return f.NamePrintFlags.Complete(successTemplate) +} + +// AllowedFormats returns slice of string of allowed JSONYaml/Name/Template printing format +func (f *PrintFlags) AllowedFormats() []string { + ret := []string{} + ret = append(ret, f.JSONYamlPrintFlags.AllowedFormats()...) + ret = append(ret, f.NamePrintFlags.AllowedFormats()...) + ret = append(ret, f.TemplatePrinterFlags.AllowedFormats()...) + return ret +} + +// ToPrinter returns a printer capable of +// handling --output or --template printing. +// Returns false if the specified outputFormat does not match a supported format. +// Supported format types can be found in pkg/printers/printers.go +func (f *PrintFlags) ToPrinter() (printers.ResourcePrinter, error) { + outputFormat := "" + if f.OutputFormat != nil { + outputFormat = *f.OutputFormat + } + // For backwards compatibility we want to support a --template argument given, even when no --output format is provided. + // If no explicit output format has been provided via the --output flag, fallback + // to honoring the --template argument. + templateFlagSpecified := f.TemplatePrinterFlags != nil && + f.TemplatePrinterFlags.TemplateArgument != nil && + len(*f.TemplatePrinterFlags.TemplateArgument) > 0 + outputFlagSpecified := f.OutputFlagSpecified != nil && f.OutputFlagSpecified() + if templateFlagSpecified && !outputFlagSpecified { + outputFormat = "go-template" + } + + if f.JSONYamlPrintFlags != nil { + if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + return f.TypeSetterPrinter.WrapToPrinter(p, err) + } + } + + if f.NamePrintFlags != nil { + if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + return f.TypeSetterPrinter.WrapToPrinter(p, err) + } + } + + if f.TemplatePrinterFlags != nil { + if p, err := f.TemplatePrinterFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + return f.TypeSetterPrinter.WrapToPrinter(p, err) + } + } + + return nil, NoCompatiblePrinterError{OutputFormat: f.OutputFormat, AllowedFormats: f.AllowedFormats()} +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to JSON/Yaml/Name/Template printing to it +func (f *PrintFlags) AddFlags(cmd *cobra.Command) { + f.JSONYamlPrintFlags.AddFlags(cmd) + f.NamePrintFlags.AddFlags(cmd) + f.TemplatePrinterFlags.AddFlags(cmd) + + if f.OutputFormat != nil { + cmd.Flags().StringVarP(f.OutputFormat, "output", "o", *f.OutputFormat, fmt.Sprintf(`Output format. One of: (%s).`, strings.Join(f.AllowedFormats(), ", "))) + if f.OutputFlagSpecified == nil { + f.OutputFlagSpecified = func() bool { + return cmd.Flag("output").Changed + } + } + } +} + +// WithDefaultOutput sets a default output format if one is not provided through a flag value +func (f *PrintFlags) WithDefaultOutput(output string) *PrintFlags { + f.OutputFormat = &output + return f +} + +// WithTypeSetter sets a wrapper than will surround the returned printer with a printer to type resources +func (f *PrintFlags) WithTypeSetter(scheme *runtime.Scheme) *PrintFlags { + f.TypeSetterPrinter = printers.NewTypeSetter(scheme) + return f +} + +// NewPrintFlags returns a default *PrintFlags +func NewPrintFlags(operation string) *PrintFlags { + outputFormat := "" + + return &PrintFlags{ + OutputFormat: &outputFormat, + + JSONYamlPrintFlags: NewJSONYamlPrintFlags(), + NamePrintFlags: NewNamePrintFlags(operation), + TemplatePrinterFlags: NewKubeTemplatePrintFlags(), + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go new file mode 100644 index 000000000..a5f307de5 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go @@ -0,0 +1,201 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "os" + "path/filepath" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" +) + +// ChangeCauseAnnotation is the annotation indicating a guess at "why" something was changed +const ChangeCauseAnnotation = "kubernetes.io/change-cause" + +// RecordFlags contains all flags associated with the "--record" operation +type RecordFlags struct { + // Record indicates the state of the recording flag. It is a pointer so a caller can opt out or rebind + Record *bool + + changeCause string +} + +// ToRecorder returns a ChangeCause recorder if --record=false was not +// explicitly given by the user +func (f *RecordFlags) ToRecorder() (Recorder, error) { + if f == nil { + return NoopRecorder{}, nil + } + + shouldRecord := false + if f.Record != nil { + shouldRecord = *f.Record + } + + // if flag was explicitly set to false by the user, + // do not record + if !shouldRecord { + return NoopRecorder{}, nil + } + + return &ChangeCauseRecorder{ + changeCause: f.changeCause, + }, nil +} + +// Complete is called before the command is run, but after it is invoked to finish the state of the struct before use. +func (f *RecordFlags) Complete(cmd *cobra.Command) error { + if f == nil { + return nil + } + + f.changeCause = parseCommandArguments(cmd) + return nil +} + +// CompleteWithChangeCause alters changeCause value with a new cause +func (f *RecordFlags) CompleteWithChangeCause(cause string) error { + if f == nil { + return nil + } + + f.changeCause = cause + return nil +} + +// AddFlags binds the requested flags to the provided flagset +// TODO have this only take a flagset +func (f *RecordFlags) AddFlags(cmd *cobra.Command) { + if f == nil { + return + } + + if f.Record != nil { + cmd.Flags().BoolVar(f.Record, "record", *f.Record, "Record current kubectl command in the resource annotation. If set to false, do not record the command. If set to true, record the command. If not set, default to updating the existing annotation value only if one already exists.") + cmd.Flags().MarkDeprecated("record", "--record will be removed in the future") + } +} + +// NewRecordFlags provides a RecordFlags with reasonable default values set for use +func NewRecordFlags() *RecordFlags { + record := false + + return &RecordFlags{ + Record: &record, + } +} + +// Recorder is used to record why a runtime.Object was changed in an annotation. +type Recorder interface { + // Record records why a runtime.Object was changed in an annotation. + Record(runtime.Object) error + MakeRecordMergePatch(runtime.Object) ([]byte, error) +} + +// NoopRecorder does nothing. It is a "do nothing" that can be returned so code doesn't switch on it. +type NoopRecorder struct{} + +// Record implements Recorder +func (r NoopRecorder) Record(obj runtime.Object) error { + return nil +} + +// MakeRecordMergePatch implements Recorder +func (r NoopRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) { + return nil, nil +} + +// ChangeCauseRecorder annotates a "change-cause" to an input runtime object +type ChangeCauseRecorder struct { + changeCause string +} + +// Record annotates a "change-cause" to a given info if either "shouldRecord" is true, +// or the resource info previously contained a "change-cause" annotation. +func (r *ChangeCauseRecorder) Record(obj runtime.Object) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + annotations := accessor.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[ChangeCauseAnnotation] = r.changeCause + accessor.SetAnnotations(annotations) + return nil +} + +// MakeRecordMergePatch produces a merge patch for updating the recording annotation. +func (r *ChangeCauseRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) { + // copy so we don't mess with the original + objCopy := obj.DeepCopyObject() + if err := r.Record(objCopy); err != nil { + return nil, err + } + + oldData, err := json.Marshal(obj) + if err != nil { + return nil, err + } + newData, err := json.Marshal(objCopy) + if err != nil { + return nil, err + } + + return jsonpatch.CreateMergePatch(oldData, newData) +} + +// parseCommandArguments will stringify and return all environment arguments ie. a command run by a client +// using the factory. +// Set showSecrets false to filter out stuff like secrets. +func parseCommandArguments(cmd *cobra.Command) string { + if len(os.Args) == 0 { + return "" + } + + flags := "" + parseFunc := func(flag *pflag.Flag, value string) error { + flags = flags + " --" + flag.Name + if set, ok := flag.Annotations["classified"]; !ok || len(set) == 0 { + flags = flags + "=" + value + } else { + flags = flags + "=CLASSIFIED" + } + return nil + } + var err error + err = cmd.Flags().ParseAll(os.Args[1:], parseFunc) + if err != nil || !cmd.Flags().Parsed() { + return "" + } + + args := "" + if arguments := cmd.Flags().Args(); len(arguments) > 0 { + args = " " + strings.Join(arguments, " ") + } + + base := filepath.Base(os.Args[0]) + return base + args + flags +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go new file mode 100644 index 000000000..9d670daa4 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go @@ -0,0 +1,136 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + "os" + "sort" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/printers" +) + +// templates are logically optional for specifying a format. +// this allows a user to specify a template format value +// as --output=go-template= +var templateFormats = map[string]bool{ + "template": true, + "go-template": true, + "go-template-file": true, + "templatefile": true, +} + +// GoTemplatePrintFlags provides default flags necessary for template printing. +// Given the following flag values, a printer can be requested that knows +// how to handle printing based on these values. +type GoTemplatePrintFlags struct { + // indicates if it is OK to ignore missing keys for rendering + // an output template. + AllowMissingKeys *bool + TemplateArgument *string +} + +// AllowedFormats returns slice of string of allowed GoTemplatePrint printing format +func (f *GoTemplatePrintFlags) AllowedFormats() []string { + formats := make([]string, 0, len(templateFormats)) + for format := range templateFormats { + formats = append(formats, format) + } + sort.Strings(formats) + return formats +} + +// ToPrinter receives an templateFormat and returns a printer capable of +// handling --template format printing. +// Returns false if the specified templateFormat does not match a template format. +func (f *GoTemplatePrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) { + if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 { + return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} + } + + templateValue := "" + + if f.TemplateArgument == nil || len(*f.TemplateArgument) == 0 { + for format := range templateFormats { + format = format + "=" + if strings.HasPrefix(templateFormat, format) { + templateValue = templateFormat[len(format):] + templateFormat = format[:len(format)-1] + break + } + } + } else { + templateValue = *f.TemplateArgument + } + + if _, supportedFormat := templateFormats[templateFormat]; !supportedFormat { + return nil, NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()} + } + + if len(templateValue) == 0 { + return nil, fmt.Errorf("template format specified but no template given") + } + + if templateFormat == "templatefile" || templateFormat == "go-template-file" { + data, err := os.ReadFile(templateValue) + if err != nil { + return nil, fmt.Errorf("error reading --template %s, %v", templateValue, err) + } + + templateValue = string(data) + } + + p, err := printers.NewGoTemplatePrinter([]byte(templateValue)) + if err != nil { + return nil, fmt.Errorf("error parsing template %s, %v", templateValue, err) + } + + allowMissingKeys := true + if f.AllowMissingKeys != nil { + allowMissingKeys = *f.AllowMissingKeys + } + + p.AllowMissingKeys(allowMissingKeys) + return p, nil +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to template printing to it +func (f *GoTemplatePrintFlags) AddFlags(c *cobra.Command) { + if f.TemplateArgument != nil { + c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") + c.MarkFlagFilename("template") + } + if f.AllowMissingKeys != nil { + c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") + } +} + +// NewGoTemplatePrintFlags returns flags associated with +// --template printing, with default values set. +func NewGoTemplatePrintFlags() *GoTemplatePrintFlags { + allowMissingKeysPtr := true + templateValuePtr := "" + + return &GoTemplatePrintFlags{ + TemplateArgument: &templateValuePtr, + AllowMissingKeys: &allowMissingKeysPtr, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericiooptions/io_options.go b/vendor/k8s.io/cli-runtime/pkg/genericiooptions/io_options.go new file mode 100644 index 000000000..247b1c2ee --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericiooptions/io_options.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericiooptions + +import ( + "bytes" + "io" +) + +// IOStreams provides the standard names for iostreams. This is useful for embedding and for unit testing. +// Inconsistent and different names make it hard to read and review code +type IOStreams struct { + // In think, os.Stdin + In io.Reader + // Out think, os.Stdout + Out io.Writer + // ErrOut think, os.Stderr + ErrOut io.Writer +} + +// NewTestIOStreams returns a valid IOStreams and in, out, errout buffers for unit tests +func NewTestIOStreams() (IOStreams, *bytes.Buffer, *bytes.Buffer, *bytes.Buffer) { + in := &bytes.Buffer{} + out := &bytes.Buffer{} + errOut := &bytes.Buffer{} + + return IOStreams{ + In: in, + Out: out, + ErrOut: errOut, + }, in, out, errOut +} + +// NewTestIOStreamsDiscard returns a valid IOStreams that just discards +func NewTestIOStreamsDiscard() IOStreams { + in := &bytes.Buffer{} + return IOStreams{ + In: in, + Out: io.Discard, + ErrOut: io.Discard, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/discard.go b/vendor/k8s.io/cli-runtime/pkg/printers/discard.go new file mode 100644 index 000000000..cd934976d --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/discard.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDiscardingPrinter is a printer that discards all objects +func NewDiscardingPrinter() ResourcePrinterFunc { + return ResourcePrinterFunc(func(runtime.Object, io.Writer) error { + return nil + }) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/doc.go b/vendor/k8s.io/cli-runtime/pkg/printers/doc.go new file mode 100644 index 000000000..ee205371d --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package printers is helper for formatting and printing runtime objects into +// primitives io.writer. +package printers // import "k8s.io/cli-runtime/pkg/printers" diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/interface.go b/vendor/k8s.io/cli-runtime/pkg/printers/interface.go new file mode 100644 index 000000000..e88ff63ae --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/interface.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ResourcePrinterFunc is a function that can print objects +type ResourcePrinterFunc func(runtime.Object, io.Writer) error + +// PrintObj implements ResourcePrinter +func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error { + return fn(obj, w) +} + +// ResourcePrinter is an interface that knows how to print runtime objects. +type ResourcePrinter interface { + // PrintObj receives a runtime object, formats it and prints it to a writer. + PrintObj(runtime.Object, io.Writer) error +} + +// PrintOptions struct defines a struct for various print options +type PrintOptions struct { + NoHeaders bool + WithNamespace bool + WithKind bool + Wide bool + ShowLabels bool + Kind schema.GroupKind + ColumnLabels []string + + SortBy string + + // indicates if it is OK to ignore missing keys for rendering an output template. + AllowMissingKeys bool +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/json.go b/vendor/k8s.io/cli-runtime/pkg/printers/json.go new file mode 100644 index 000000000..8ab2235f8 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/json.go @@ -0,0 +1,79 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// JSONPrinter is an implementation of ResourcePrinter which outputs an object as JSON. +type JSONPrinter struct{} + +// PrintObj is an implementation of ResourcePrinter.PrintObj which simply writes the object to the Writer. +func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // we need an actual value in order to retrieve the package path for an object. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + switch obj := obj.(type) { + case *metav1.WatchEvent: + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + data, err := json.Marshal(obj) + if err != nil { + return err + } + _, err = w.Write(data) + if err != nil { + return err + } + _, err = w.Write([]byte{'\n'}) + return err + case *runtime.Unknown: + var buf bytes.Buffer + err := json.Indent(&buf, obj.Raw, "", " ") + if err != nil { + return err + } + buf.WriteRune('\n') + _, err = buf.WriteTo(w) + return err + } + + if obj.GetObjectKind().GroupVersionKind().Empty() { + return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") + } + + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + data = append(data, '\n') + _, err = w.Write(data) + return err +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go b/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go new file mode 100644 index 000000000..769960d66 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go @@ -0,0 +1,147 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/jsonpath" +) + +// exists returns true if it would be possible to call the index function +// with these arguments. +// +// TODO: how to document this for users? +// +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func exists(item interface{}, indices ...interface{}) bool { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return false + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return false + } + if x < 0 || x >= int64(v.Len()) { + return false + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return false + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return false + } + } + if _, isNil := indirect(v); isNil { + return false + } + return true +} + +// stolen from text/template +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Pointer || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// JSONPathPrinter is an implementation of ResourcePrinter which formats data with jsonpath expression. +type JSONPathPrinter struct { + rawTemplate string + *jsonpath.JSONPath +} + +func NewJSONPathPrinter(tmpl string) (*JSONPathPrinter, error) { + j := jsonpath.New("out") + if err := j.Parse(tmpl); err != nil { + return nil, err + } + return &JSONPathPrinter{ + rawTemplate: tmpl, + JSONPath: j, + }, nil +} + +// PrintObj formats the obj with the JSONPath Template. +func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // we need an actual value in order to retrieve the package path for an object. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + var queryObj interface{} = obj + if unstructured, ok := obj.(runtime.Unstructured); ok { + queryObj = unstructured.UnstructuredContent() + } else { + data, err := json.Marshal(obj) + if err != nil { + return err + } + queryObj = map[string]interface{}{} + if err := json.Unmarshal(data, &queryObj); err != nil { + return err + } + } + + if err := j.JSONPath.Execute(w, queryObj); err != nil { + buf := bytes.NewBuffer(nil) + fmt.Fprintf(buf, "Error executing template: %v. Printing more information for debugging the template:\n", err) + fmt.Fprintf(buf, "\ttemplate was:\n\t\t%v\n", j.rawTemplate) + fmt.Fprintf(buf, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj) + return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, buf.String()) + } + return nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/managedfields.go b/vendor/k8s.io/cli-runtime/pkg/printers/managedfields.go new file mode 100644 index 000000000..cab54d058 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/managedfields.go @@ -0,0 +1,59 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +// OmitManagedFieldsPrinter wraps an existing printer and omits the managed fields from the object +// before printing it. +type OmitManagedFieldsPrinter struct { + Delegate ResourcePrinter +} + +var _ ResourcePrinter = (*OmitManagedFieldsPrinter)(nil) + +func omitManagedFields(o runtime.Object) runtime.Object { + a, err := meta.Accessor(o) + if err != nil { + // The object is not a `metav1.Object`, ignore it. + return o + } + a.SetManagedFields(nil) + return o +} + +// PrintObj copies the object and omits the managed fields from the copied object before printing it. +func (p *OmitManagedFieldsPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + if obj == nil { + return p.Delegate.PrintObj(obj, w) + } + if meta.IsListType(obj) { + obj = obj.DeepCopyObject() + _ = meta.EachListItem(obj, func(item runtime.Object) error { + omitManagedFields(item) + return nil + }) + } else if _, err := meta.Accessor(obj); err == nil { + obj = omitManagedFields(obj.DeepCopyObject()) + } + return p.Delegate.PrintObj(obj, w) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/name.go b/vendor/k8s.io/cli-runtime/pkg/printers/name.go new file mode 100644 index 000000000..086166af2 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/name.go @@ -0,0 +1,130 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "fmt" + "io" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NamePrinter is an implementation of ResourcePrinter which outputs "resource/name" pair of an object. +type NamePrinter struct { + // ShortOutput indicates whether an operation should be + // printed along side the "resource/name" pair for an object. + ShortOutput bool + // Operation describes the name of the action that + // took place on an object, to be included in the + // finalized "successful" message. + Operation string +} + +// PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object +// and print "resource/name" pair. If the object is a List, print all items in it. +func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { + switch castObj := obj.(type) { + case *metav1.WatchEvent: + obj = castObj.Object.Object + } + + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + // we need an actual value in order to retrieve the package path for an object. + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + if meta.IsListType(obj) { + // we allow unstructured lists for now because they always contain the GVK information. We should chase down + // callers and stop them from passing unflattened lists + // TODO chase the caller that is setting this and remove it. + if _, ok := obj.(*unstructured.UnstructuredList); !ok { + return fmt.Errorf("list types are not supported by name printing: %T", obj) + } + + items, err := meta.ExtractList(obj) + if err != nil { + return err + } + for _, obj := range items { + if err := p.PrintObj(obj, w); err != nil { + return err + } + } + return nil + } + + if obj.GetObjectKind().GroupVersionKind().Empty() { + return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") + } + + name := "" + if acc, err := meta.Accessor(obj); err == nil { + if n := acc.GetName(); len(n) > 0 { + name = n + } + } + + return printObj(w, name, p.Operation, p.ShortOutput, GetObjectGroupKind(obj)) +} + +func GetObjectGroupKind(obj runtime.Object) schema.GroupKind { + if obj == nil { + return schema.GroupKind{Kind: ""} + } + groupVersionKind := obj.GetObjectKind().GroupVersionKind() + if len(groupVersionKind.Kind) > 0 { + return groupVersionKind.GroupKind() + } + + if uns, ok := obj.(*unstructured.Unstructured); ok { + if len(uns.GroupVersionKind().Kind) > 0 { + return uns.GroupVersionKind().GroupKind() + } + } + + return schema.GroupKind{Kind: ""} +} + +func printObj(w io.Writer, name string, operation string, shortOutput bool, groupKind schema.GroupKind) error { + if len(groupKind.Kind) == 0 { + return fmt.Errorf("missing kind for resource with name %v", name) + } + + if len(operation) > 0 { + operation = " " + operation + } + + if shortOutput { + operation = "" + } + + if len(groupKind.Group) == 0 { + fmt.Fprintf(w, "%s/%s%s\n", strings.ToLower(groupKind.Kind), name, operation) + return nil + } + + fmt.Fprintf(w, "%s.%s/%s%s\n", strings.ToLower(groupKind.Kind), groupKind.Group, name, operation) + return nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go b/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go new file mode 100644 index 000000000..e360c8fe0 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "strings" +) + +var ( + InternalObjectPrinterErr = "a versioned object must be passed to a printer" + + // disallowedPackagePrefixes contains regular expression templates + // for object package paths that are not allowed by printers. + disallowedPackagePrefixes = []string{ + "k8s.io/kubernetes/pkg/apis/", + } +) + +var InternalObjectPreventer = &illegalPackageSourceChecker{disallowedPackagePrefixes} + +func IsInternalObjectError(err error) bool { + if err == nil { + return false + } + + return err.Error() == InternalObjectPrinterErr +} + +// illegalPackageSourceChecker compares a given +// object's package path, and determines if the +// object originates from a disallowed source. +type illegalPackageSourceChecker struct { + // disallowedPrefixes is a slice of disallowed package path + // prefixes for a given runtime.Object that we are printing. + disallowedPrefixes []string +} + +func (c *illegalPackageSourceChecker) IsForbidden(pkgPath string) bool { + for _, forbiddenPrefix := range c.disallowedPrefixes { + if strings.HasPrefix(pkgPath, forbiddenPrefix) || strings.Contains(pkgPath, "/vendor/"+forbiddenPrefix) { + return true + } + } + + return false +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go b/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go new file mode 100644 index 000000000..548596659 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go @@ -0,0 +1,589 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/liggitt/tabwriter" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/duration" + "k8s.io/apimachinery/pkg/watch" +) + +var _ ResourcePrinter = &HumanReadablePrinter{} + +type printHandler struct { + columnDefinitions []metav1.TableColumnDefinition + printFunc reflect.Value +} + +var ( + statusHandlerEntry = &printHandler{ + columnDefinitions: statusColumnDefinitions, + printFunc: reflect.ValueOf(printStatus), + } + + statusColumnDefinitions = []metav1.TableColumnDefinition{ + {Name: "Status", Type: "string"}, + {Name: "Reason", Type: "string"}, + {Name: "Message", Type: "string"}, + } + + defaultHandlerEntry = &printHandler{ + columnDefinitions: objectMetaColumnDefinitions, + printFunc: reflect.ValueOf(printObjectMeta), + } + + objectMetaColumnDefinitions = []metav1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, + {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, + } + + withEventTypePrefixColumns = []string{"EVENT"} + withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. +) + +// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide +// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers +// will only be printed if the object type changes. This makes it useful for printing items +// received from watches. +type HumanReadablePrinter struct { + options PrintOptions + lastType interface{} + lastColumns []metav1.TableColumnDefinition + printedHeaders bool +} + +// NewTablePrinter creates a printer suitable for calling PrintObj(). +func NewTablePrinter(options PrintOptions) ResourcePrinter { + printer := &HumanReadablePrinter{ + options: options, + } + return printer +} + +func printHeader(columnNames []string, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { + return err + } + return nil +} + +// PrintObj prints the obj in a human-friendly format according to the type of the obj. +func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error { + + if _, found := output.(*tabwriter.Writer); !found { + w := GetNewTabWriter(output) + output = w + defer w.Flush() + } + + var eventType string + if event, isEvent := obj.(*metav1.WatchEvent); isEvent { + eventType = event.Type + obj = event.Object.Object + } + + // Parameter "obj" is a table from server; print it. + // display tables following the rules of options + if table, ok := obj.(*metav1.Table); ok { + // Do not print headers if this table has no column definitions, or they are the same as the last ones we printed + localOptions := h.options + if h.printedHeaders && (len(table.ColumnDefinitions) == 0 || reflect.DeepEqual(table.ColumnDefinitions, h.lastColumns)) { + localOptions.NoHeaders = true + } + + if len(table.ColumnDefinitions) == 0 { + // If this table has no column definitions, use the columns from the last table we printed for decoration and layout. + // This is done when receiving tables in watch events to save bandwidth. + table.ColumnDefinitions = h.lastColumns + } else if !reflect.DeepEqual(table.ColumnDefinitions, h.lastColumns) { + // If this table has column definitions, remember them for future use. + h.lastColumns = table.ColumnDefinitions + h.printedHeaders = false + } + + if len(table.Rows) > 0 { + h.printedHeaders = true + } + + if err := decorateTable(table, localOptions); err != nil { + return err + } + if len(eventType) > 0 { + if err := addColumns(beginning, table, + []metav1.TableColumnDefinition{{Name: "Event", Type: "string"}}, + []cellValueFunc{func(metav1.TableRow) (interface{}, error) { return formatEventType(eventType), nil }}, + ); err != nil { + return err + } + } + return printTable(table, output, localOptions) + } + + // Could not find print handler for "obj"; use the default or status print handler. + // Print with the default or status handler, and use the columns from the last time + var handler *printHandler + if _, isStatus := obj.(*metav1.Status); isStatus { + handler = statusHandlerEntry + } else { + handler = defaultHandlerEntry + } + + includeHeaders := h.lastType != handler && !h.options.NoHeaders + + if h.lastType != nil && h.lastType != handler && !h.options.NoHeaders { + fmt.Fprintln(output) + } + + if err := printRowsForHandlerEntry(output, handler, eventType, obj, h.options, includeHeaders); err != nil { + return err + } + h.lastType = handler + + return nil +} + +// printTable prints a table to the provided output respecting the filtering rules for options +// for wide columns and filtered rows. It filters out rows that are Completed. You should call +// decorateTable if you receive a table from a remote server before calling printTable. +func printTable(table *metav1.Table, output io.Writer, options PrintOptions) error { + if !options.NoHeaders { + // avoid printing headers if we have no rows to display + if len(table.Rows) == 0 { + return nil + } + + first := true + for _, column := range table.ColumnDefinitions { + if !options.Wide && column.Priority != 0 { + continue + } + if first { + first = false + } else { + fmt.Fprint(output, "\t") + } + fmt.Fprint(output, strings.ToUpper(column.Name)) + } + fmt.Fprintln(output) + } + for _, row := range table.Rows { + first := true + for i, cell := range row.Cells { + if i >= len(table.ColumnDefinitions) { + // https://issue.k8s.io/66379 + // don't panic in case of bad output from the server, with more cells than column definitions + break + } + column := table.ColumnDefinitions[i] + if !options.Wide && column.Priority != 0 { + continue + } + if first { + first = false + } else { + fmt.Fprint(output, "\t") + } + if cell != nil { + switch val := cell.(type) { + case string: + print := val + truncated := false + // Truncate at the first newline, carriage return or formfeed + // (treated as a newline by tabwriter). + breakchar := strings.IndexAny(print, "\f\n\r") + if breakchar >= 0 { + truncated = true + print = print[:breakchar] + } + WriteEscaped(output, print) + if truncated { + fmt.Fprint(output, "...") + } + default: + WriteEscaped(output, fmt.Sprint(val)) + } + } + } + fmt.Fprintln(output) + } + return nil +} + +type cellValueFunc func(metav1.TableRow) (interface{}, error) + +type columnAddPosition int + +const ( + beginning columnAddPosition = 1 + end columnAddPosition = 2 +) + +func addColumns(pos columnAddPosition, table *metav1.Table, columns []metav1.TableColumnDefinition, valueFuncs []cellValueFunc) error { + if len(columns) != len(valueFuncs) { + return fmt.Errorf("cannot prepend columns, unmatched value functions") + } + if len(columns) == 0 { + return nil + } + + // Compute the new rows + newRows := make([][]interface{}, len(table.Rows)) + for i := range table.Rows { + newCells := make([]interface{}, 0, len(columns)+len(table.Rows[i].Cells)) + + if pos == end { + // If we're appending, start with the existing cells, + // then add nil cells to match the number of columns + newCells = append(newCells, table.Rows[i].Cells...) + for len(newCells) < len(table.ColumnDefinitions) { + newCells = append(newCells, nil) + } + } + + // Compute cells for new columns + for _, f := range valueFuncs { + newCell, err := f(table.Rows[i]) + if err != nil { + return err + } + newCells = append(newCells, newCell) + } + + if pos == beginning { + // If we're prepending, add existing cells + newCells = append(newCells, table.Rows[i].Cells...) + } + + // Remember the new cells for this row + newRows[i] = newCells + } + + // All cells successfully computed, now replace columns and rows + newColumns := make([]metav1.TableColumnDefinition, 0, len(columns)+len(table.ColumnDefinitions)) + switch pos { + case beginning: + newColumns = append(newColumns, columns...) + newColumns = append(newColumns, table.ColumnDefinitions...) + case end: + newColumns = append(newColumns, table.ColumnDefinitions...) + newColumns = append(newColumns, columns...) + default: + return fmt.Errorf("invalid column add position: %v", pos) + } + table.ColumnDefinitions = newColumns + for i := range table.Rows { + table.Rows[i].Cells = newRows[i] + } + + return nil +} + +// decorateTable takes a table and attempts to add label columns and the +// namespace column. It will fill empty columns with nil (if the object +// does not expose metadata). It returns an error if the table cannot +// be decorated. +func decorateTable(table *metav1.Table, options PrintOptions) error { + width := len(table.ColumnDefinitions) + len(options.ColumnLabels) + if options.WithNamespace { + width++ + } + if options.ShowLabels { + width++ + } + + columns := table.ColumnDefinitions + + nameColumn := -1 + if options.WithKind && !options.Kind.Empty() { + for i := range columns { + if columns[i].Format == "name" && columns[i].Type == "string" { + nameColumn = i + break + } + } + } + + if width != len(table.ColumnDefinitions) { + columns = make([]metav1.TableColumnDefinition, 0, width) + if options.WithNamespace { + columns = append(columns, metav1.TableColumnDefinition{ + Name: "Namespace", + Type: "string", + }) + } + columns = append(columns, table.ColumnDefinitions...) + for _, label := range formatLabelHeaders(options.ColumnLabels) { + columns = append(columns, metav1.TableColumnDefinition{ + Name: label, + Type: "string", + }) + } + if options.ShowLabels { + columns = append(columns, metav1.TableColumnDefinition{ + Name: "Labels", + Type: "string", + }) + } + } + + rows := table.Rows + + includeLabels := len(options.ColumnLabels) > 0 || options.ShowLabels + if includeLabels || options.WithNamespace || nameColumn != -1 { + for i := range rows { + row := rows[i] + + if nameColumn != -1 { + row.Cells[nameColumn] = fmt.Sprintf("%s/%s", strings.ToLower(options.Kind.String()), row.Cells[nameColumn]) + } + + var m metav1.Object + if obj := row.Object.Object; obj != nil { + if acc, err := meta.Accessor(obj); err == nil { + m = acc + } + } + // if we can't get an accessor, fill out the appropriate columns with empty spaces + if m == nil { + if options.WithNamespace { + r := make([]interface{}, 1, width) + row.Cells = append(r, row.Cells...) + } + for j := 0; j < width-len(row.Cells); j++ { + row.Cells = append(row.Cells, nil) + } + rows[i] = row + continue + } + + if options.WithNamespace { + r := make([]interface{}, 1, width) + r[0] = m.GetNamespace() + row.Cells = append(r, row.Cells...) + } + if includeLabels { + row.Cells = appendLabelCells(row.Cells, m.GetLabels(), options) + } + rows[i] = row + } + } + + table.ColumnDefinitions = columns + table.Rows = rows + return nil +} + +// printRowsForHandlerEntry prints the incremental table output (headers if the current type is +// different from lastType) including all the rows in the object. It returns the current type +// or an error, if any. +func printRowsForHandlerEntry(output io.Writer, handler *printHandler, eventType string, obj runtime.Object, options PrintOptions, includeHeaders bool) error { + var results []reflect.Value + + args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} + results = handler.printFunc.Call(args) + if !results[1].IsNil() { + return results[1].Interface().(error) + } + + if includeHeaders { + var headers []string + for _, column := range handler.columnDefinitions { + if column.Priority != 0 && !options.Wide { + continue + } + headers = append(headers, strings.ToUpper(column.Name)) + } + headers = append(headers, formatLabelHeaders(options.ColumnLabels)...) + // LABELS is always the last column. + headers = append(headers, formatShowLabelsHeader(options.ShowLabels)...) + // prepend namespace header + if options.WithNamespace { + headers = append(withNamespacePrefixColumns, headers...) + } + // prepend event type header + if len(eventType) > 0 { + headers = append(withEventTypePrefixColumns, headers...) + } + printHeader(headers, output) + } + + if results[1].IsNil() { + rows := results[0].Interface().([]metav1.TableRow) + printRows(output, eventType, rows, options) + return nil + } + return results[1].Interface().(error) +} + +var formattedEventType = map[string]string{ + string(watch.Added): "ADDED ", + string(watch.Modified): "MODIFIED", + string(watch.Deleted): "DELETED ", + string(watch.Error): "ERROR ", +} + +func formatEventType(eventType string) string { + if formatted, ok := formattedEventType[eventType]; ok { + return formatted + } + return eventType +} + +// printRows writes the provided rows to output. +func printRows(output io.Writer, eventType string, rows []metav1.TableRow, options PrintOptions) { + for _, row := range rows { + if len(eventType) > 0 { + fmt.Fprint(output, formatEventType(eventType)) + fmt.Fprint(output, "\t") + } + if options.WithNamespace { + if obj := row.Object.Object; obj != nil { + if m, err := meta.Accessor(obj); err == nil { + fmt.Fprint(output, m.GetNamespace()) + } + } + fmt.Fprint(output, "\t") + } + + for i, cell := range row.Cells { + if i != 0 { + fmt.Fprint(output, "\t") + } else { + // TODO: remove this once we drop the legacy printers + if options.WithKind && !options.Kind.Empty() { + fmt.Fprintf(output, "%s/%s", strings.ToLower(options.Kind.String()), cell) + continue + } + } + fmt.Fprint(output, cell) + } + + hasLabels := len(options.ColumnLabels) > 0 + if obj := row.Object.Object; obj != nil && (hasLabels || options.ShowLabels) { + if m, err := meta.Accessor(obj); err == nil { + for _, value := range labelValues(m.GetLabels(), options) { + output.Write([]byte("\t")) + output.Write([]byte(value)) + } + } + } + + output.Write([]byte("\n")) + } +} + +func formatLabelHeaders(columnLabels []string) []string { + formHead := make([]string, len(columnLabels)) + for i, l := range columnLabels { + p := strings.Split(l, "/") + formHead[i] = strings.ToUpper(p[len(p)-1]) + } + return formHead +} + +// headers for --show-labels=true +func formatShowLabelsHeader(showLabels bool) []string { + if showLabels { + return []string{"LABELS"} + } + return nil +} + +// labelValues returns a slice of value columns matching the requested print options. +func labelValues(itemLabels map[string]string, opts PrintOptions) []string { + var values []string + for _, key := range opts.ColumnLabels { + values = append(values, itemLabels[key]) + } + if opts.ShowLabels { + values = append(values, labels.FormatLabels(itemLabels)) + } + return values +} + +// appendLabelCells returns a slice of value columns matching the requested print options. +// Intended for use with tables. +func appendLabelCells(values []interface{}, itemLabels map[string]string, opts PrintOptions) []interface{} { + for _, key := range opts.ColumnLabels { + values = append(values, itemLabels[key]) + } + if opts.ShowLabels { + values = append(values, labels.FormatLabels(itemLabels)) + } + return values +} + +func printStatus(obj runtime.Object, options PrintOptions) ([]metav1.TableRow, error) { + status, ok := obj.(*metav1.Status) + if !ok { + return nil, fmt.Errorf("expected *v1.Status, got %T", obj) + } + return []metav1.TableRow{{ + Object: runtime.RawExtension{Object: obj}, + Cells: []interface{}{status.Status, status.Reason, status.Message}, + }}, nil +} + +func printObjectMeta(obj runtime.Object, options PrintOptions) ([]metav1.TableRow, error) { + if meta.IsListType(obj) { + rows := make([]metav1.TableRow, 0, 16) + err := meta.EachListItem(obj, func(obj runtime.Object) error { + nestedRows, err := printObjectMeta(obj, options) + if err != nil { + return err + } + rows = append(rows, nestedRows...) + return nil + }) + if err != nil { + return nil, err + } + return rows, nil + } + + rows := make([]metav1.TableRow, 0, 1) + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + row := metav1.TableRow{ + Object: runtime.RawExtension{Object: obj}, + } + row.Cells = append(row.Cells, m.GetName(), translateTimestampSince(m.GetCreationTimestamp())) + rows = append(rows, row) + return rows, nil +} + +// translateTimestampSince returns the elapsed time since timestamp in +// human-readable approximation. +func translateTimestampSince(timestamp metav1.Time) string { + if timestamp.IsZero() { + return "" + } + + return duration.HumanDuration(time.Since(timestamp.Time)) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go b/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go new file mode 100644 index 000000000..21d60e1c4 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + + "github.com/liggitt/tabwriter" +) + +const ( + tabwriterMinWidth = 6 + tabwriterWidth = 4 + tabwriterPadding = 3 + tabwriterPadChar = ' ' + tabwriterFlags = tabwriter.RememberWidths +) + +// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text. +func GetNewTabWriter(output io.Writer) *tabwriter.Writer { + return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/template.go b/vendor/k8s.io/cli-runtime/pkg/printers/template.go new file mode 100644 index 000000000..ccff54226 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/template.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "encoding/base64" + "fmt" + "io" + "reflect" + "text/template" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" +) + +// GoTemplatePrinter is an implementation of ResourcePrinter which formats data with a Go Template. +type GoTemplatePrinter struct { + rawTemplate string + template *template.Template +} + +func NewGoTemplatePrinter(tmpl []byte) (*GoTemplatePrinter, error) { + t, err := template.New("output"). + Funcs(template.FuncMap{ + "exists": exists, + "base64decode": base64decode, + }). + Parse(string(tmpl)) + if err != nil { + return nil, err + } + return &GoTemplatePrinter{ + rawTemplate: string(tmpl), + template: t, + }, nil +} + +// AllowMissingKeys tells the template engine if missing keys are allowed. +func (p *GoTemplatePrinter) AllowMissingKeys(allow bool) { + if allow { + p.template.Option("missingkey=default") + } else { + p.template.Option("missingkey=error") + } +} + +// PrintObj formats the obj with the Go Template. +func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error { + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + var data []byte + var err error + data, err = json.Marshal(obj) + if err != nil { + return err + } + + out := map[string]interface{}{} + if err := json.Unmarshal(data, &out); err != nil { + return err + } + if err = p.safeExecute(w, out); err != nil { + // It is way easier to debug this stuff when it shows up in + // stdout instead of just stdin. So in addition to returning + // a nice error, also print useful stuff with the writer. + fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) + fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", p.rawTemplate) + fmt.Fprintf(w, "\traw data was:\n\t\t%v\n", string(data)) + fmt.Fprintf(w, "\tobject given to template engine was:\n\t\t%+v\n\n", out) + return fmt.Errorf("error executing template %q: %v", p.rawTemplate, err) + } + return nil +} + +// safeExecute tries to execute the template, but catches panics and returns an error +// should the template engine panic. +func (p *GoTemplatePrinter) safeExecute(w io.Writer, obj interface{}) error { + var panicErr error + // Sorry for the double anonymous function. There's probably a clever way + // to do this that has the defer'd func setting the value to be returned, but + // that would be even less obvious. + retErr := func() error { + defer func() { + if x := recover(); x != nil { + panicErr = fmt.Errorf("caught panic: %+v", x) + } + }() + return p.template.Execute(w, obj) + }() + if panicErr != nil { + return panicErr + } + return retErr +} + +func base64decode(v string) (string, error) { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return "", fmt.Errorf("base64 decode failed: %v", err) + } + return string(data), nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/terminal.go b/vendor/k8s.io/cli-runtime/pkg/printers/terminal.go new file mode 100644 index 000000000..9dc904e59 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/terminal.go @@ -0,0 +1,75 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + "os" + "runtime" + "strings" + + "github.com/moby/term" +) + +// terminalEscaper replaces ANSI escape sequences and other terminal special +// characters to avoid terminal escape character attacks (issue #101695). +var terminalEscaper = strings.NewReplacer("\x1b", "^[", "\r", "\\r") + +// WriteEscaped replaces unsafe terminal characters with replacement strings +// and writes them to the given writer. +func WriteEscaped(writer io.Writer, output string) error { + _, err := terminalEscaper.WriteString(writer, output) + return err +} + +// EscapeTerminal escapes terminal special characters in a human readable (but +// non-reversible) format. +func EscapeTerminal(in string) string { + return terminalEscaper.Replace(in) +} + +// IsTerminal returns whether the passed object is a terminal or not +func IsTerminal(i interface{}) bool { + _, terminal := term.GetFdInfo(i) + return terminal +} + +// AllowsColorOutput returns true if the specified writer is a terminal and +// the process environment indicates color output is supported and desired. +func AllowsColorOutput(w io.Writer) bool { + if !IsTerminal(w) { + return false + } + + // https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals + if os.Getenv("TERM") == "dumb" { + return false + } + + // https://no-color.org/ + if _, nocolor := os.LookupEnv("NO_COLOR"); nocolor { + return false + } + + // On Windows WT_SESSION is set by the modern terminal component. + // Older terminals have poor support for UTF-8, VT escape codes, etc. + if runtime.GOOS == "windows" && os.Getenv("WT_SESSION") == "" { + return false + } + + return true +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go b/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go new file mode 100644 index 000000000..8d2d9b56e --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// TypeSetterPrinter is an implementation of ResourcePrinter wraps another printer with types set on the objects +type TypeSetterPrinter struct { + Delegate ResourcePrinter + + Typer runtime.ObjectTyper +} + +// NewTypeSetter constructs a wrapping printer with required params +func NewTypeSetter(typer runtime.ObjectTyper) *TypeSetterPrinter { + return &TypeSetterPrinter{Typer: typer} +} + +// PrintObj is an implementation of ResourcePrinter.PrintObj which sets type information on the obj for the duration +// of printing. It is NOT threadsafe. +func (p *TypeSetterPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + if obj == nil { + return p.Delegate.PrintObj(obj, w) + } + if !obj.GetObjectKind().GroupVersionKind().Empty() { + return p.Delegate.PrintObj(obj, w) + } + + // we were empty coming in, make sure we're empty going out. This makes the call thread-unsafe + defer func() { + obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + }() + + gvks, _, err := p.Typer.ObjectKinds(obj) + if err != nil { + // printers wrapped by us expect to find the type information present + return fmt.Errorf("missing apiVersion or kind and cannot assign it; %v", err) + } + + for _, gvk := range gvks { + if len(gvk.Kind) == 0 { + continue + } + if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal { + continue + } + obj.GetObjectKind().SetGroupVersionKind(gvk) + break + } + + return p.Delegate.PrintObj(obj, w) +} + +// ToPrinter returns a printer (not threadsafe!) that has been wrapped +func (p *TypeSetterPrinter) ToPrinter(delegate ResourcePrinter) ResourcePrinter { + if p == nil { + return delegate + } + + p.Delegate = delegate + return p +} + +// WrapToPrinter wraps the common ToPrinter method +func (p *TypeSetterPrinter) WrapToPrinter(delegate ResourcePrinter, err error) (ResourcePrinter, error) { + if err != nil { + return delegate, err + } + if p == nil { + return delegate, nil + } + + p.Delegate = delegate + return p, nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/warningprinter.go b/vendor/k8s.io/cli-runtime/pkg/printers/warningprinter.go new file mode 100644 index 000000000..b3a8264f7 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/warningprinter.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "fmt" + "io" +) + +const ( + yellowColor = "\u001b[33;1m" + resetColor = "\u001b[0m" +) + +type WarningPrinter struct { + // out is the writer to output warnings to + out io.Writer + // opts contains options controlling warning output + opts WarningPrinterOptions +} + +// WarningPrinterOptions controls the behavior of a WarningPrinter constructed using NewWarningPrinter() +type WarningPrinterOptions struct { + // Color indicates that warning output can include ANSI color codes + Color bool +} + +// NewWarningPrinter returns an implementation of warningPrinter that outputs warnings to the specified writer. +func NewWarningPrinter(out io.Writer, opts WarningPrinterOptions) *WarningPrinter { + h := &WarningPrinter{out: out, opts: opts} + return h +} + +// Print prints warnings to the configured writer. +func (w *WarningPrinter) Print(message string) { + if w.opts.Color { + fmt.Fprintf(w.out, "%sWarning:%s %s\n", yellowColor, resetColor, message) + } else { + fmt.Fprintf(w.out, "Warning: %s\n", message) + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go b/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go new file mode 100644 index 000000000..9c444bdc2 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go @@ -0,0 +1,85 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "fmt" + "io" + "reflect" + "sync/atomic" + + "sigs.k8s.io/yaml" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. +// The input object is assumed to be in the internal version of an API and is converted +// to the given version first. +// If PrintObj() is called multiple times, objects are separated with a '---' separator. +type YAMLPrinter struct { + printCount int64 +} + +// PrintObj prints the data as YAML. +func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // we need an actual value in order to retrieve the package path for an object. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + count := atomic.AddInt64(&p.printCount, 1) + if count > 1 { + if _, err := w.Write([]byte("---\n")); err != nil { + return err + } + } + + switch obj := obj.(type) { + case *metav1.WatchEvent: + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + data, err := yaml.Marshal(obj) + if err != nil { + return err + } + _, err = w.Write(data) + return err + case *runtime.Unknown: + data, err := yaml.JSONToYAML(obj.Raw) + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + + if obj.GetObjectKind().GroupVersionKind().Empty() { + return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") + } + + output, err := yaml.Marshal(obj) + if err != nil { + return err + } + _, err = fmt.Fprint(w, string(output)) + return err +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/builder.go b/vendor/k8s.io/cli-runtime/pkg/resource/builder.go new file mode 100644 index 000000000..47ec83bbb --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/builder.go @@ -0,0 +1,1259 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +var FileExtensions = []string{".json", ".yaml", ".yml"} +var InputExtensions = append(FileExtensions, "stdin") + +const defaultHttpGetAttempts = 3 +const pathNotExistError = "the path %q does not exist" + +// Builder provides convenience functions for taking arguments and parameters +// from the command line and converting them to a list of resources to iterate +// over using the Visitor interface. +type Builder struct { + categoryExpanderFn CategoryExpanderFunc + + // mapper is set explicitly by resource builders + mapper *mapper + + // clientConfigFn is a function to produce a client, *if* you need one + clientConfigFn ClientConfigFunc + + restMapperFn RESTMapperFunc + + // objectTyper is statically determinant per-command invocation based on your internal or unstructured choice + // it does not ever need to rely upon discovery. + objectTyper runtime.ObjectTyper + + // codecFactory describes which codecs you want to use + negotiatedSerializer runtime.NegotiatedSerializer + + // local indicates that we cannot make server calls + local bool + + errs []error + + paths []Visitor + stream bool + stdinInUse bool + dir bool + + visitorConcurrency int + + labelSelector *string + fieldSelector *string + selectAll bool + limitChunks int64 + requestTransforms []RequestTransform + + resources []string + subresource string + + namespace string + allNamespace bool + names []string + + resourceTuples []resourceTuple + + defaultNamespace bool + requireNamespace bool + + flatten bool + latest bool + + requireObject bool + + singleResourceType bool + continueOnError bool + + singleItemImplied bool + + schema ContentValidator + + // fakeClientFn is used for testing + fakeClientFn FakeClientFunc +} + +var missingResourceError = fmt.Errorf(`You must provide one or more resources by argument or filename. +Example resource specifications include: + '-f rsrc.yaml' + '--filename=rsrc.json' + ' ' + ''`) + +var LocalResourceError = errors.New(`error: you must specify resources by --filename when --local is set. +Example resource specifications include: + '-f rsrc.yaml' + '--filename=rsrc.json'`) + +var StdinMultiUseError = errors.New("standard input cannot be used for multiple arguments") + +// TODO: expand this to include other errors. +func IsUsageError(err error) bool { + if err == nil { + return false + } + return err == missingResourceError +} + +type FilenameOptions struct { + Filenames []string + Kustomize string + Recursive bool +} + +func (o *FilenameOptions) validate() []error { + var errs []error + if len(o.Filenames) > 0 && len(o.Kustomize) > 0 { + errs = append(errs, fmt.Errorf("only one of -f or -k can be specified")) + } + if len(o.Kustomize) > 0 && o.Recursive { + errs = append(errs, fmt.Errorf("the -k flag can't be used with -f or -R")) + } + return errs +} + +func (o *FilenameOptions) RequireFilenameOrKustomize() error { + if len(o.Filenames) == 0 && len(o.Kustomize) == 0 { + return fmt.Errorf("must specify one of -f and -k") + } + return nil +} + +type resourceTuple struct { + Resource string + Name string +} + +type FakeClientFunc func(version schema.GroupVersion) (RESTClient, error) + +func NewFakeBuilder(fakeClientFn FakeClientFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder { + ret := newBuilder(nil, restMapper, categoryExpander) + ret.fakeClientFn = fakeClientFn + return ret +} + +// NewBuilder creates a builder that operates on generic objects. At least one of +// internal or unstructured must be specified. +// TODO: Add versioned client (although versioned is still lossy) +// TODO remove internal and unstructured mapper and instead have them set the negotiated serializer for use in the client +func newBuilder(clientConfigFn ClientConfigFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder { + return &Builder{ + clientConfigFn: clientConfigFn, + restMapperFn: restMapper, + categoryExpanderFn: categoryExpander, + requireObject: true, + } +} + +// noopClientGetter implements RESTClientGetter returning only errors. +// used as a dummy getter in a local-only builder. +type noopClientGetter struct{} + +func (noopClientGetter) ToRESTConfig() (*rest.Config, error) { + return nil, fmt.Errorf("local operation only") +} +func (noopClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + return nil, fmt.Errorf("local operation only") +} +func (noopClientGetter) ToRESTMapper() (meta.RESTMapper, error) { + return nil, fmt.Errorf("local operation only") +} + +// NewLocalBuilder returns a builder that is configured not to create REST clients and avoids asking the server for results. +func NewLocalBuilder() *Builder { + return NewBuilder(noopClientGetter{}).Local() +} + +func NewBuilder(restClientGetter RESTClientGetter) *Builder { + categoryExpanderFn := func() (restmapper.CategoryExpander, error) { + discoveryClient, err := restClientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryCategoryExpander(discoveryClient), err + } + + return newBuilder( + restClientGetter.ToRESTConfig, + restClientGetter.ToRESTMapper, + (&cachingCategoryExpanderFunc{delegate: categoryExpanderFn}).ToCategoryExpander, + ) +} + +func (b *Builder) Schema(schema ContentValidator) *Builder { + b.schema = schema + return b +} + +func (b *Builder) AddError(err error) *Builder { + if err == nil { + return b + } + b.errs = append(b.errs, err) + return b +} + +// VisitorConcurrency sets the number of concurrent visitors to use when +// visiting lists. +func (b *Builder) VisitorConcurrency(concurrency int) *Builder { + b.visitorConcurrency = concurrency + return b +} + +// FilenameParam groups input in two categories: URLs and files (files, directories, STDIN) +// If enforceNamespace is false, namespaces in the specs will be allowed to +// override the default namespace. If it is true, namespaces that don't match +// will cause an error. +// If ContinueOnError() is set prior to this method, objects on the path that are not +// recognized will be ignored (but logged at V(2)). +func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *FilenameOptions) *Builder { + if errs := filenameOptions.validate(); len(errs) > 0 { + b.errs = append(b.errs, errs...) + return b + } + recursive := filenameOptions.Recursive + paths := filenameOptions.Filenames + for _, s := range paths { + switch { + case s == "-": + b.Stdin() + case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0: + url, err := url.Parse(s) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)) + continue + } + b.URL(defaultHttpGetAttempts, url) + default: + matches, err := expandIfFilePattern(s) + if err != nil { + b.errs = append(b.errs, err) + continue + } + if !recursive && len(matches) == 1 { + b.singleItemImplied = true + } + b.Path(recursive, matches...) + } + } + if filenameOptions.Kustomize != "" { + b.paths = append( + b.paths, + &KustomizeVisitor{ + mapper: b.mapper, + dirPath: filenameOptions.Kustomize, + schema: b.schema, + fSys: filesys.MakeFsOnDisk(), + }) + } + + if enforceNamespace { + b.RequireNamespace() + } + + return b +} + +// Unstructured updates the builder so that it will request and send unstructured +// objects. Unstructured objects preserve all fields sent by the server in a map format +// based on the object's JSON structure which means no data is lost when the client +// reads and then writes an object. Use this mode in preference to Internal unless you +// are working with Go types directly. +func (b *Builder) Unstructured() *Builder { + if b.mapper != nil { + b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use unstructured types")) + return b + } + b.objectTyper = unstructuredscheme.NewUnstructuredObjectTyper() + b.mapper = &mapper{ + localFn: b.isLocal, + restMapperFn: b.restMapperFn, + clientFn: b.getClient, + decoder: &metadataValidatingDecoder{unstructured.UnstructuredJSONScheme}, + } + + return b +} + +// WithScheme uses the scheme to manage typing, conversion (optional), and decoding. If decodingVersions +// is empty, then you can end up with internal types. You have been warned. +func (b *Builder) WithScheme(scheme *runtime.Scheme, decodingVersions ...schema.GroupVersion) *Builder { + if b.mapper != nil { + b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use internal types")) + return b + } + b.objectTyper = scheme + codecFactory := serializer.NewCodecFactory(scheme) + negotiatedSerializer := runtime.NegotiatedSerializer(codecFactory) + // if you specified versions, you're specifying a desire for external types, which you don't want to round-trip through + // internal types + if len(decodingVersions) > 0 { + negotiatedSerializer = codecFactory.WithoutConversion() + } + b.negotiatedSerializer = negotiatedSerializer + + b.mapper = &mapper{ + localFn: b.isLocal, + restMapperFn: b.restMapperFn, + clientFn: b.getClient, + decoder: codecFactory.UniversalDecoder(decodingVersions...), + } + + return b +} + +// LocalParam calls Local() if local is true. +func (b *Builder) LocalParam(local bool) *Builder { + if local { + b.Local() + } + return b +} + +// Local will avoid asking the server for results. +func (b *Builder) Local() *Builder { + b.local = true + return b +} + +func (b *Builder) isLocal() bool { + return b.local +} + +// Mapper returns a copy of the current mapper. +func (b *Builder) Mapper() *mapper { + mapper := *b.mapper + return &mapper +} + +// URL accepts a number of URLs directly. +func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { + for _, u := range urls { + b.paths = append(b.paths, &URLVisitor{ + URL: u, + StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), + HttpAttemptCount: httpAttemptCount, + }) + } + return b +} + +// Stdin will read objects from the standard input. If ContinueOnError() is set +// prior to this method being called, objects in the stream that are unrecognized +// will be ignored (but logged at V(2)). If StdinInUse() is set prior to this method +// being called, an error will be recorded as there are multiple entities trying to use +// the single standard input stream. +func (b *Builder) Stdin() *Builder { + b.stream = true + if b.stdinInUse { + b.errs = append(b.errs, StdinMultiUseError) + } + b.stdinInUse = true + b.paths = append(b.paths, FileVisitorForSTDIN(b.mapper, b.schema)) + return b +} + +// StdinInUse will mark standard input as in use by this Builder, and therefore standard +// input should not be used by another entity. If Stdin() is set prior to this method +// being called, an error will be recorded as there are multiple entities trying to use +// the single standard input stream. +func (b *Builder) StdinInUse() *Builder { + if b.stdinInUse { + b.errs = append(b.errs, StdinMultiUseError) + } + b.stdinInUse = true + return b +} + +// Stream will read objects from the provided reader, and if an error occurs will +// include the name string in the error message. If ContinueOnError() is set +// prior to this method being called, objects in the stream that are unrecognized +// will be ignored (but logged at V(2)). +func (b *Builder) Stream(r io.Reader, name string) *Builder { + b.stream = true + b.paths = append(b.paths, NewStreamVisitor(r, b.mapper, name, b.schema)) + return b +} + +// Path accepts a set of paths that may be files, directories (all can containing +// one or more resources). Creates a FileVisitor for each file and then each +// FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set +// prior to this method being called, objects on the path that are unrecognized will be +// ignored (but logged at V(2)). +func (b *Builder) Path(recursive bool, paths ...string) *Builder { + for _, p := range paths { + _, err := os.Stat(p) + if os.IsNotExist(err) { + b.errs = append(b.errs, fmt.Errorf(pathNotExistError, p)) + continue + } + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the path %q cannot be accessed: %v", p, err)) + continue + } + + visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) + } + if len(visitors) > 1 { + b.dir = true + } + + b.paths = append(b.paths, visitors...) + } + if len(b.paths) == 0 && len(b.errs) == 0 { + b.errs = append(b.errs, fmt.Errorf("error reading %v: recognized file extensions are %v", paths, FileExtensions)) + } + return b +} + +// ResourceTypes is a list of types of resources to operate on, when listing objects on +// the server or retrieving objects that match a selector. +func (b *Builder) ResourceTypes(types ...string) *Builder { + b.resources = append(b.resources, types...) + return b +} + +// ResourceNames accepts a default type and one or more names, and creates tuples of +// resources +func (b *Builder) ResourceNames(resource string, names ...string) *Builder { + for _, name := range names { + // See if this input string is of type/name format + tuple, ok, err := splitResourceTypeName(name) + if err != nil { + b.errs = append(b.errs, err) + return b + } + + if ok { + b.resourceTuples = append(b.resourceTuples, tuple) + continue + } + if len(resource) == 0 { + b.errs = append(b.errs, fmt.Errorf("the argument %q must be RESOURCE/NAME", name)) + continue + } + + // Use the given default type to create a resource tuple + b.resourceTuples = append(b.resourceTuples, resourceTuple{Resource: resource, Name: name}) + } + return b +} + +// LabelSelectorParam defines a selector that should be applied to the object types to load. +// This will not affect files loaded from disk or URL. If the parameter is empty it is +// a no-op - to select all resources invoke `b.LabelSelector(labels.Everything.String)`. +func (b *Builder) LabelSelectorParam(s string) *Builder { + selector := strings.TrimSpace(s) + if len(selector) == 0 { + return b + } + if b.selectAll { + b.errs = append(b.errs, fmt.Errorf("found non-empty label selector %q with previously set 'all' parameter. ", s)) + return b + } + return b.LabelSelector(selector) +} + +// LabelSelector accepts a selector directly and will filter the resulting list by that object. +// Use LabelSelectorParam instead for user input. +func (b *Builder) LabelSelector(selector string) *Builder { + if len(selector) == 0 { + return b + } + + b.labelSelector = &selector + return b +} + +// FieldSelectorParam defines a selector that should be applied to the object types to load. +// This will not affect files loaded from disk or URL. If the parameter is empty it is +// a no-op - to select all resources. +func (b *Builder) FieldSelectorParam(s string) *Builder { + s = strings.TrimSpace(s) + if len(s) == 0 { + return b + } + if b.selectAll { + b.errs = append(b.errs, fmt.Errorf("found non-empty field selector %q with previously set 'all' parameter. ", s)) + return b + } + b.fieldSelector = &s + return b +} + +// NamespaceParam accepts the namespace that these resources should be +// considered under from - used by DefaultNamespace() and RequireNamespace() +func (b *Builder) NamespaceParam(namespace string) *Builder { + b.namespace = namespace + return b +} + +// DefaultNamespace instructs the builder to set the namespace value for any object found +// to NamespaceParam() if empty. +func (b *Builder) DefaultNamespace() *Builder { + b.defaultNamespace = true + return b +} + +// AllNamespaces instructs the builder to metav1.NamespaceAll as a namespace to request resources +// across all of the namespace. This overrides the namespace set by NamespaceParam(). +func (b *Builder) AllNamespaces(allNamespace bool) *Builder { + if allNamespace { + b.namespace = metav1.NamespaceAll + } + b.allNamespace = allNamespace + return b +} + +// RequireNamespace instructs the builder to set the namespace value for any object found +// to NamespaceParam() if empty, and if the value on the resource does not match +// NamespaceParam() an error will be returned. +func (b *Builder) RequireNamespace() *Builder { + b.requireNamespace = true + return b +} + +// RequestChunksOf attempts to load responses from the server in batches of size limit +// to avoid long delays loading and transferring very large lists. If unset defaults to +// no chunking. +func (b *Builder) RequestChunksOf(chunkSize int64) *Builder { + b.limitChunks = chunkSize + return b +} + +// TransformRequests alters API calls made by clients requested from this builder. Pass +// an empty list to clear modifiers. +func (b *Builder) TransformRequests(opts ...RequestTransform) *Builder { + b.requestTransforms = opts + return b +} + +// Subresource instructs the builder to retrieve the object at the +// subresource path instead of the main resource path. +func (b *Builder) Subresource(subresource string) *Builder { + b.subresource = subresource + return b +} + +// SelectEverythingParam +func (b *Builder) SelectAllParam(selectAll bool) *Builder { + if selectAll && (b.labelSelector != nil || b.fieldSelector != nil) { + b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. ")) + return b + } + b.selectAll = selectAll + return b +} + +// ResourceTypeOrNameArgs indicates that the builder should accept arguments +// of the form `([,,...]| [,,...])`. When one argument is +// received, the types provided will be retrieved from the server (and be comma delimited). +// When two or more arguments are received, they must be a single type and resource name(s). +// The allowEmptySelector permits to select all the resources (via Everything func). +func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder { + args = normalizeMultipleResourcesArgs(args) + if ok, err := hasCombinedTypeArgs(args); ok { + if err != nil { + b.errs = append(b.errs, err) + return b + } + for _, s := range args { + tuple, ok, err := splitResourceTypeName(s) + if err != nil { + b.errs = append(b.errs, err) + return b + } + if ok { + b.resourceTuples = append(b.resourceTuples, tuple) + } + } + return b + } + if len(args) > 0 { + // Try replacing aliases only in types + args[0] = b.ReplaceAliases(args[0]) + } + switch { + case len(args) > 2: + b.names = append(b.names, args[1:]...) + b.ResourceTypes(SplitResourceArgument(args[0])...) + case len(args) == 2: + b.names = append(b.names, args[1]) + b.ResourceTypes(SplitResourceArgument(args[0])...) + case len(args) == 1: + b.ResourceTypes(SplitResourceArgument(args[0])...) + if b.labelSelector == nil && allowEmptySelector { + selector := labels.Everything().String() + b.labelSelector = &selector + } + case len(args) == 0: + default: + b.errs = append(b.errs, fmt.Errorf("arguments must consist of a resource or a resource and name")) + } + return b +} + +// ReplaceAliases accepts an argument and tries to expand any existing +// aliases found in it +func (b *Builder) ReplaceAliases(input string) string { + replaced := []string{} + for _, arg := range strings.Split(input, ",") { + if b.categoryExpanderFn == nil { + continue + } + categoryExpander, err := b.categoryExpanderFn() + if err != nil { + b.AddError(err) + continue + } + + if resources, ok := categoryExpander.Expand(arg); ok { + asStrings := []string{} + for _, resource := range resources { + if len(resource.Group) == 0 { + asStrings = append(asStrings, resource.Resource) + continue + } + asStrings = append(asStrings, resource.Resource+"."+resource.Group) + } + arg = strings.Join(asStrings, ",") + } + replaced = append(replaced, arg) + } + return strings.Join(replaced, ",") +} + +func hasCombinedTypeArgs(args []string) (bool, error) { + hasSlash := 0 + for _, s := range args { + if strings.Contains(s, "/") { + hasSlash++ + } + } + switch { + case hasSlash > 0 && hasSlash == len(args): + return true, nil + case hasSlash > 0 && hasSlash != len(args): + baseCmd := "cmd" + if len(os.Args) > 0 { + baseCmdSlice := strings.Split(os.Args[0], "/") + baseCmd = baseCmdSlice[len(baseCmdSlice)-1] + } + return true, fmt.Errorf("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '%s get resource/' instead of '%s get resource resource/'", baseCmd, baseCmd) + default: + return false, nil + } +} + +// Normalize args convert multiple resources to resource tuples, a,b,c d +// as a transform to a/d b/d c/d +func normalizeMultipleResourcesArgs(args []string) []string { + if len(args) >= 2 { + resources := []string{} + resources = append(resources, SplitResourceArgument(args[0])...) + if len(resources) > 1 { + names := []string{} + names = append(names, args[1:]...) + newArgs := []string{} + for _, resource := range resources { + for _, name := range names { + newArgs = append(newArgs, strings.Join([]string{resource, name}, "/")) + } + } + return newArgs + } + } + return args +} + +// splitResourceTypeName handles type/name resource formats and returns a resource tuple +// (empty or not), whether it successfully found one, and an error +func splitResourceTypeName(s string) (resourceTuple, bool, error) { + if !strings.Contains(s, "/") { + return resourceTuple{}, false, nil + } + seg := strings.Split(s, "/") + if len(seg) != 2 { + return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form may not have more than one slash") + } + resource, name := seg[0], seg[1] + if len(resource) == 0 || len(name) == 0 || len(SplitResourceArgument(resource)) != 1 { + return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form must have a single resource and name") + } + return resourceTuple{Resource: resource, Name: name}, true, nil +} + +// Flatten will convert any objects with a field named "Items" that is an array of runtime.Object +// compatible types into individual entries and give them their own items. The original object +// is not passed to any visitors. +func (b *Builder) Flatten() *Builder { + b.flatten = true + return b +} + +// Latest will fetch the latest copy of any objects loaded from URLs or files from the server. +func (b *Builder) Latest() *Builder { + b.latest = true + return b +} + +// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set. +func (b *Builder) RequireObject(require bool) *Builder { + b.requireObject = require + return b +} + +// ContinueOnError will attempt to load and visit as many objects as possible, even if some visits +// return errors or some objects cannot be loaded. The default behavior is to terminate after +// the first error is returned from a VisitorFunc. +func (b *Builder) ContinueOnError() *Builder { + b.continueOnError = true + return b +} + +// SingleResourceType will cause the builder to error if the user specifies more than a single type +// of resource. +func (b *Builder) SingleResourceType() *Builder { + b.singleResourceType = true + return b +} + +// mappingFor returns the RESTMapping for the Kind given, or the Kind referenced by the resource. +// Prefers a fully specified GroupVersionResource match. If one is not found, we match on a fully +// specified GroupVersionKind, or fallback to a match on GroupKind. +func (b *Builder) mappingFor(resourceOrKindArg string) (*meta.RESTMapping, error) { + fullySpecifiedGVR, groupResource := schema.ParseResourceArg(resourceOrKindArg) + gvk := schema.GroupVersionKind{} + restMapper, err := b.restMapperFn() + if err != nil { + return nil, err + } + + if fullySpecifiedGVR != nil { + gvk, _ = restMapper.KindFor(*fullySpecifiedGVR) + } + if gvk.Empty() { + gvk, _ = restMapper.KindFor(groupResource.WithVersion("")) + } + if !gvk.Empty() { + return restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + } + + fullySpecifiedGVK, groupKind := schema.ParseKindArg(resourceOrKindArg) + if fullySpecifiedGVK == nil { + gvk := groupKind.WithVersion("") + fullySpecifiedGVK = &gvk + } + + if !fullySpecifiedGVK.Empty() { + if mapping, err := restMapper.RESTMapping(fullySpecifiedGVK.GroupKind(), fullySpecifiedGVK.Version); err == nil { + return mapping, nil + } + } + + mapping, err := restMapper.RESTMapping(groupKind, gvk.Version) + if err != nil { + // if we error out here, it is because we could not match a resource or a kind + // for the given argument. To maintain consistency with previous behavior, + // announce that a resource type could not be found. + // if the error is _not_ a *meta.NoKindMatchError, then we had trouble doing discovery, + // so we should return the original error since it may help a user diagnose what is actually wrong + if meta.IsNoMatchError(err) { + return nil, fmt.Errorf("the server doesn't have a resource type %q", groupResource.Resource) + } + return nil, err + } + + return mapping, nil +} + +func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) { + if len(b.resources) > 1 && b.singleResourceType { + return nil, fmt.Errorf("you may only specify a single resource type") + } + mappings := []*meta.RESTMapping{} + seen := map[schema.GroupVersionKind]bool{} + for _, r := range b.resources { + mapping, err := b.mappingFor(r) + if err != nil { + return nil, err + } + // This ensures the mappings for resources(shortcuts, plural) unique + if seen[mapping.GroupVersionKind] { + continue + } + seen[mapping.GroupVersionKind] = true + + mappings = append(mappings, mapping) + } + return mappings, nil +} + +func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error) { + mappings := make(map[string]*meta.RESTMapping) + canonical := make(map[schema.GroupVersionResource]struct{}) + for _, r := range b.resourceTuples { + if _, ok := mappings[r.Resource]; ok { + continue + } + mapping, err := b.mappingFor(r.Resource) + if err != nil { + return nil, err + } + + mappings[r.Resource] = mapping + canonical[mapping.Resource] = struct{}{} + } + if len(canonical) > 1 && b.singleResourceType { + return nil, fmt.Errorf("you may only specify a single resource type") + } + return mappings, nil +} + +func (b *Builder) visitorResult() *Result { + if len(b.errs) > 0 { + return &Result{err: utilerrors.NewAggregate(b.errs)} + } + + if b.selectAll { + selector := labels.Everything().String() + b.labelSelector = &selector + } + + // visit items specified by paths + if len(b.paths) != 0 { + return b.visitByPaths() + } + + // visit selectors + if b.labelSelector != nil || b.fieldSelector != nil { + return b.visitBySelector() + } + + // visit items specified by resource and name + if len(b.resourceTuples) != 0 { + return b.visitByResource() + } + + // visit items specified by name + if len(b.names) != 0 { + return b.visitByName() + } + + if len(b.resources) != 0 { + for _, r := range b.resources { + _, err := b.mappingFor(r) + if err != nil { + return &Result{err: err} + } + } + return &Result{err: fmt.Errorf("resource(s) were provided, but no name was specified")} + } + return &Result{err: missingResourceError} +} + +func (b *Builder) visitBySelector() *Result { + result := &Result{ + targetsSingleItems: false, + } + + if len(b.names) != 0 { + return result.withError(fmt.Errorf("name cannot be provided when a selector is specified")) + } + if len(b.resourceTuples) != 0 { + return result.withError(fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments")) + } + if len(b.resources) == 0 { + return result.withError(fmt.Errorf("at least one resource must be specified to use a selector")) + } + if len(b.subresource) != 0 { + return result.withError(fmt.Errorf("subresource cannot be used when bulk resources are specified")) + } + + mappings, err := b.resourceMappings() + if err != nil { + result.err = err + return result + } + + var labelSelector, fieldSelector string + if b.labelSelector != nil { + labelSelector = *b.labelSelector + } + if b.fieldSelector != nil { + fieldSelector = *b.fieldSelector + } + + visitors := []Visitor{} + for _, mapping := range mappings { + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } + visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, labelSelector, fieldSelector, b.limitChunks)) + } + if b.continueOnError { + result.visitor = EagerVisitorList(visitors) + } else { + result.visitor = VisitorList(visitors) + } + result.sources = visitors + return result +} + +func (b *Builder) getClient(gv schema.GroupVersion) (RESTClient, error) { + var ( + client RESTClient + err error + ) + + switch { + case b.fakeClientFn != nil: + client, err = b.fakeClientFn(gv) + case b.negotiatedSerializer != nil: + client, err = b.clientConfigFn.withStdinUnavailable(b.stdinInUse).clientForGroupVersion(gv, b.negotiatedSerializer) + default: + client, err = b.clientConfigFn.withStdinUnavailable(b.stdinInUse).unstructuredClientForGroupVersion(gv) + } + + if err != nil { + return nil, err + } + + return NewClientWithOptions(client, b.requestTransforms...), nil +} + +func (b *Builder) visitByResource() *Result { + // if b.singleItemImplied is false, this could be by default, so double-check length + // of resourceTuples to determine if in fact it is singleItemImplied or not + isSingleItemImplied := b.singleItemImplied + if !isSingleItemImplied { + isSingleItemImplied = len(b.resourceTuples) == 1 + } + + result := &Result{ + singleItemImplied: isSingleItemImplied, + targetsSingleItems: true, + } + + if len(b.resources) != 0 { + return result.withError(fmt.Errorf("you may not specify individual resources and bulk resources in the same call")) + } + + // retrieve one client for each resource + mappings, err := b.resourceTupleMappings() + if err != nil { + result.err = err + return result + } + clients := make(map[string]RESTClient) + for _, mapping := range mappings { + s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource) + if _, ok := clients[s]; ok { + continue + } + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + clients[s] = client + } + + items := []Visitor{} + for _, tuple := range b.resourceTuples { + mapping, ok := mappings[tuple.Resource] + if !ok { + return result.withError(fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings)) + } + s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource) + client, ok := clients[s] + if !ok { + return result.withError(fmt.Errorf("could not find a client for resource %q", tuple.Resource)) + } + + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } else { + if len(b.namespace) == 0 { + errMsg := "namespace may not be empty when retrieving a resource by name" + if b.allNamespace { + errMsg = "a resource cannot be retrieved by name across all namespaces" + } + return result.withError(fmt.Errorf(errMsg)) + } + } + + info := &Info{ + Client: client, + Mapping: mapping, + Namespace: selectorNamespace, + Name: tuple.Name, + Subresource: b.subresource, + } + items = append(items, info) + } + + var visitors Visitor + if b.continueOnError { + visitors = EagerVisitorList(items) + } else { + visitors = VisitorList(items) + } + result.visitor = visitors + result.sources = items + return result +} + +func (b *Builder) visitByName() *Result { + result := &Result{ + singleItemImplied: len(b.names) == 1, + targetsSingleItems: true, + } + + if len(b.paths) != 0 { + return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")) + } + if len(b.resources) == 0 { + return result.withError(fmt.Errorf("you must provide a resource and a resource name together")) + } + if len(b.resources) > 1 { + return result.withError(fmt.Errorf("you must specify only one resource")) + } + + mappings, err := b.resourceMappings() + if err != nil { + result.err = err + return result + } + mapping := mappings[0] + + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } else { + if len(b.namespace) == 0 { + errMsg := "namespace may not be empty when retrieving a resource by name" + if b.allNamespace { + errMsg = "a resource cannot be retrieved by name across all namespaces" + } + return result.withError(fmt.Errorf(errMsg)) + } + } + + visitors := []Visitor{} + for _, name := range b.names { + info := &Info{ + Client: client, + Mapping: mapping, + Namespace: selectorNamespace, + Name: name, + Subresource: b.subresource, + } + visitors = append(visitors, info) + } + result.visitor = VisitorList(visitors) + result.sources = visitors + return result +} + +func (b *Builder) visitByPaths() *Result { + result := &Result{ + singleItemImplied: !b.dir && !b.stream && len(b.paths) == 1, + targetsSingleItems: true, + } + + if len(b.resources) != 0 { + return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well")) + } + if len(b.names) != 0 { + return result.withError(fmt.Errorf("name cannot be provided when a path is specified")) + } + if len(b.resourceTuples) != 0 { + return result.withError(fmt.Errorf("resource/name arguments cannot be provided when a path is specified")) + } + + var visitors Visitor + if b.continueOnError { + visitors = EagerVisitorList(b.paths) + } else { + visitors = ConcurrentVisitorList{ + visitors: b.paths, + concurrency: b.visitorConcurrency, + } + } + + if b.flatten { + visitors = NewFlattenListVisitor(visitors, b.objectTyper, b.mapper) + } + + // only items from disk can be refetched + if b.latest { + // must set namespace prior to fetching + if b.defaultNamespace { + visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace)) + } + visitors = NewDecoratedVisitor(visitors, RetrieveLatest) + } + if b.labelSelector != nil { + selector, err := labels.Parse(*b.labelSelector) + if err != nil { + return result.withError(fmt.Errorf("the provided selector %q is not valid: %v", *b.labelSelector, err)) + } + visitors = NewFilteredVisitor(visitors, FilterByLabelSelector(selector)) + } + result.visitor = visitors + result.sources = b.paths + return result +} + +// Do returns a Result object with a Visitor for the resources identified by the Builder. +// The visitor will respect the error behavior specified by ContinueOnError. Note that stream +// inputs are consumed by the first execution - use Infos() or Object() on the Result to capture a list +// for further iteration. +func (b *Builder) Do() *Result { + r := b.visitorResult() + r.mapper = b.Mapper() + if r.err != nil { + return r + } + if b.flatten { + r.visitor = NewFlattenListVisitor(r.visitor, b.objectTyper, b.mapper) + } + helpers := []VisitorFunc{} + if b.defaultNamespace { + helpers = append(helpers, SetNamespace(b.namespace)) + } + if b.requireNamespace { + helpers = append(helpers, RequireNamespace(b.namespace)) + } + helpers = append(helpers, FilterNamespace) + if b.requireObject { + helpers = append(helpers, RetrieveLazy) + } + if b.continueOnError { + r.visitor = ContinueOnErrorVisitor{Visitor: r.visitor} + } + r.visitor = NewDecoratedVisitor(r.visitor, helpers...) + return r +} + +// SplitResourceArgument splits the argument with commas and returns unique +// strings in the original order. +func SplitResourceArgument(arg string) []string { + out := []string{} + set := sets.NewString() + for _, s := range strings.Split(arg, ",") { + if set.Has(s) { + continue + } + set.Insert(s) + out = append(out, s) + } + return out +} + +// HasNames returns true if the provided args contain resource names +func HasNames(args []string) (bool, error) { + args = normalizeMultipleResourcesArgs(args) + hasCombinedTypes, err := hasCombinedTypeArgs(args) + if err != nil { + return false, err + } + return hasCombinedTypes || len(args) > 1, nil +} + +// expandIfFilePattern returns all the filenames that match the input pattern +// or the filename if it is a specific filename and not a pattern. +// If the input is a pattern and it yields no result it will result in an error. +func expandIfFilePattern(pattern string) ([]string, error) { + if _, err := os.Stat(pattern); os.IsNotExist(err) { + matches, err := filepath.Glob(pattern) + if err == nil && len(matches) == 0 { + return nil, fmt.Errorf(pathNotExistError, pattern) + } + if err == filepath.ErrBadPattern { + return nil, fmt.Errorf("pattern %q is not valid: %v", pattern, err) + } + return matches, err + } + return []string{pattern}, nil +} + +type cachingCategoryExpanderFunc struct { + delegate CategoryExpanderFunc + + lock sync.Mutex + cached restmapper.CategoryExpander +} + +func (c *cachingCategoryExpanderFunc) ToCategoryExpander() (restmapper.CategoryExpander, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.cached != nil { + return c.cached, nil + } + + ret, err := c.delegate() + if err != nil { + return nil, err + } + c.cached = ret + return c.cached, nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/client.go b/vendor/k8s.io/cli-runtime/pkg/resource/client.go new file mode 100644 index 000000000..cd52c3043 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/client.go @@ -0,0 +1,69 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" +) + +// TODO require negotiatedSerializer. leaving it optional lets us plumb current behavior and deal with the difference after major plumbing is complete +func (clientConfigFn ClientConfigFunc) clientForGroupVersion(gv schema.GroupVersion, negotiatedSerializer runtime.NegotiatedSerializer) (RESTClient, error) { + cfg, err := clientConfigFn() + if err != nil { + return nil, err + } + if negotiatedSerializer != nil { + cfg.ContentConfig.NegotiatedSerializer = negotiatedSerializer + } + cfg.GroupVersion = &gv + if len(gv.Group) == 0 { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + + return rest.RESTClientFor(cfg) +} + +func (clientConfigFn ClientConfigFunc) unstructuredClientForGroupVersion(gv schema.GroupVersion) (RESTClient, error) { + cfg, err := clientConfigFn() + if err != nil { + return nil, err + } + cfg.ContentConfig = UnstructuredPlusDefaultContentConfig() + cfg.GroupVersion = &gv + if len(gv.Group) == 0 { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + + return rest.RESTClientFor(cfg) +} + +func (clientConfigFn ClientConfigFunc) withStdinUnavailable(stdinUnavailable bool) ClientConfigFunc { + return func() (*rest.Config, error) { + cfg, err := clientConfigFn() + if stdinUnavailable && cfg != nil && cfg.ExecProvider != nil { + cfg.ExecProvider.StdinUnavailable = stdinUnavailable + cfg.ExecProvider.StdinUnavailableMessage = "used by stdin resource manifest reader" + } + return cfg, err + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go b/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go new file mode 100644 index 000000000..4694f7791 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go @@ -0,0 +1,110 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + "fmt" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +// CRDGetter is a function that can download the list of GVK for all +// CRDs. +type CRDGetter func() ([]schema.GroupKind, error) + +func CRDFromDynamic(client dynamic.Interface) CRDGetter { + return func() ([]schema.GroupKind, error) { + list, err := client.Resource(schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + }).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list CRDs: %v", err) + } + if list == nil { + return nil, nil + } + + gks := []schema.GroupKind{} + + // We need to parse the list to get the gvk, I guess that's fine. + for _, crd := range (*list).Items { + // Look for group, version, and kind + group, _, _ := unstructured.NestedString(crd.Object, "spec", "group") + kind, _, _ := unstructured.NestedString(crd.Object, "spec", "names", "kind") + + gks = append(gks, schema.GroupKind{ + Group: group, + Kind: kind, + }) + } + + return gks, nil + } +} + +// CRDFinder keeps a cache of known CRDs and finds a given GVK in the +// list. +type CRDFinder interface { + HasCRD(gvk schema.GroupKind) (bool, error) +} + +func NewCRDFinder(getter CRDGetter) CRDFinder { + return &crdFinder{ + getter: getter, + } +} + +type crdFinder struct { + getter CRDGetter + cache *[]schema.GroupKind +} + +func (f *crdFinder) cacheCRDs() error { + if f.cache != nil { + return nil + } + + list, err := f.getter() + if err != nil { + return err + } + f.cache = &list + return nil +} + +func (f *crdFinder) findCRD(gvk schema.GroupKind) bool { + for _, crd := range *f.cache { + if reflect.DeepEqual(gvk, crd) { + return true + } + } + return false +} + +func (f *crdFinder) HasCRD(gvk schema.GroupKind) (bool, error) { + if err := f.cacheCRDs(); err != nil { + return false, err + } + return f.findCRD(gvk), nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/doc.go b/vendor/k8s.io/cli-runtime/pkg/resource/doc.go new file mode 100644 index 000000000..f83fdcbf8 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resource assists clients in dealing with RESTful objects that match the +// Kubernetes API conventions. The Helper object provides simple CRUD operations +// on resources. The Visitor interface makes it easy to deal with multiple resources +// in bulk for retrieval and operation. The Builder object simplifies converting +// standard command line arguments and parameters into a Visitor that can iterate +// over all of the identified resources, whether on the server or on the local +// filesystem. +package resource // import "k8s.io/cli-runtime/pkg/resource" diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/fake.go b/vendor/k8s.io/cli-runtime/pkg/resource/fake.go new file mode 100644 index 000000000..276c343e2 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/restmapper" +) + +// FakeCategoryExpander is for testing only +var FakeCategoryExpander restmapper.CategoryExpander = restmapper.SimpleCategoryExpander{ + Expansions: map[string][]schema.GroupResource{ + "all": { + {Group: "", Resource: "pods"}, + {Group: "", Resource: "replicationcontrollers"}, + {Group: "", Resource: "services"}, + {Group: "apps", Resource: "statefulsets"}, + {Group: "autoscaling", Resource: "horizontalpodautoscalers"}, + {Group: "batch", Resource: "jobs"}, + {Group: "batch", Resource: "cronjobs"}, + {Group: "extensions", Resource: "daemonsets"}, + {Group: "extensions", Resource: "deployments"}, + {Group: "extensions", Resource: "replicasets"}, + }, + }, +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/fallback_query_param_verifier.go b/vendor/k8s.io/cli-runtime/pkg/resource/fallback_query_param_verifier.go new file mode 100644 index 000000000..05418801e --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/fallback_query_param_verifier.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" +) + +// fallbackQueryParamVerifier encapsulates the primary Verifier that +// is invoked, and the secondary/fallback Verifier. +type fallbackQueryParamVerifier struct { + primary Verifier + secondary Verifier +} + +var _ Verifier = &fallbackQueryParamVerifier{} + +// NewFallbackQueryParamVerifier returns a new Verifier which will invoke the +// initial/primary Verifier. If the primary Verifier is "NotFound", then the +// secondary Verifier is invoked as a fallback. +func NewFallbackQueryParamVerifier(primary Verifier, secondary Verifier) Verifier { + return &fallbackQueryParamVerifier{ + primary: primary, + secondary: secondary, + } +} + +// HasSupport returns an error if the passed GVK does not support the +// query param (fieldValidation), as determined by the primary and +// secondary OpenAPI endpoints. The primary endoint is checked first, +// but if there is an error retrieving the OpenAPI V3 document, the +// secondary attempts to determine support. If the GVK supports the query param, +// nil is returned. +func (f *fallbackQueryParamVerifier) HasSupport(gvk schema.GroupVersionKind) error { + err := f.primary.HasSupport(gvk) + // If an error was returned from the primary OpenAPI endpoint, + // we fallback to check the secondary OpenAPI endpoint for + // any error *except* "paramUnsupportedError". + if err != nil && !IsParamUnsupportedError(err) { + klog.V(7).Infof("openapi v3 error...falling back to legacy: %s", err) + err = f.secondary.HasSupport(gvk) + } + return err +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/helper.go b/vendor/k8s.io/cli-runtime/pkg/resource/helper.go new file mode 100644 index 000000000..aa400ae0e --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/helper.go @@ -0,0 +1,321 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +var metadataAccessor = meta.NewAccessor() + +// Helper provides methods for retrieving or mutating a RESTful +// resource. +type Helper struct { + // The name of this resource as the server would recognize it + Resource string + // The name of the subresource as the server would recognize it + Subresource string + // A RESTClient capable of mutating this resource. + RESTClient RESTClient + // True if the resource type is scoped to namespaces + NamespaceScoped bool + // If true, then use server-side dry-run to not persist changes to storage + // for verbs and resources that support server-side dry-run. + // + // Note this should only be used against an apiserver with dry-run enabled, + // and on resources that support dry-run. If the apiserver or the resource + // does not support dry-run, then the change will be persisted to storage. + ServerDryRun bool + + // FieldManager is the name associated with the actor or entity that is making + // changes. + FieldManager string + + // FieldValidation is the directive used to indicate how the server should perform + // field validation (Ignore, Warn, or Strict) + FieldValidation string +} + +// NewHelper creates a Helper from a ResourceMapping +func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper { + return &Helper{ + Resource: mapping.Resource.Resource, + RESTClient: client, + NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace, + } +} + +// DryRun, if true, will use server-side dry-run to not persist changes to storage. +// Otherwise, changes will be persisted to storage. +func (m *Helper) DryRun(dryRun bool) *Helper { + m.ServerDryRun = dryRun + return m +} + +// WithFieldManager sets the field manager option to indicate the actor or entity +// that is making changes in a create or update operation. +func (m *Helper) WithFieldManager(fieldManager string) *Helper { + m.FieldManager = fieldManager + return m +} + +// WithFieldValidation sets the field validation option to indicate +// how the server should perform field validation (Ignore, Warn, or Strict). +func (m *Helper) WithFieldValidation(validationDirective string) *Helper { + m.FieldValidation = validationDirective + return m +} + +// Subresource sets the helper to access (/[ns//]/) +func (m *Helper) WithSubresource(subresource string) *Helper { + m.Subresource = subresource + return m +} + +func (m *Helper) Get(namespace, name string) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + SubResource(m.Subresource) + return req.Do(context.TODO()).Get() +} + +func (m *Helper) List(namespace, apiVersion string, options *metav1.ListOptions) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(options, metav1.ParameterCodec) + return req.Do(context.TODO()).Get() +} + +// FollowContinue handles the continue parameter returned by the API server when using list +// chunking. To take advantage of this, the initial ListOptions provided by the consumer +// should include a non-zero Limit parameter. +func FollowContinue(initialOpts *metav1.ListOptions, + listFunc func(metav1.ListOptions) (runtime.Object, error)) error { + opts := initialOpts + for { + list, err := listFunc(*opts) + if err != nil { + return err + } + nextContinueToken, _ := metadataAccessor.Continue(list) + if len(nextContinueToken) == 0 { + return nil + } + opts.Continue = nextContinueToken + } +} + +// EnhanceListError augments errors typically returned by List operations with additional context, +// making sure to retain the StatusError type when applicable. +func EnhanceListError(err error, opts metav1.ListOptions, subj string) error { + if apierrors.IsResourceExpired(err) { + return err + } + if apierrors.IsBadRequest(err) || apierrors.IsNotFound(err) { + if se, ok := err.(*apierrors.StatusError); ok { + // modify the message without hiding this is an API error + if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { + se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", subj, + se.ErrStatus.Message) + } else { + se.ErrStatus.Message = fmt.Sprintf( + "Unable to find %q that match label selector %q, field selector %q: %v", subj, + opts.LabelSelector, + opts.FieldSelector, se.ErrStatus.Message) + } + return se + } + if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { + return fmt.Errorf("Unable to list %q: %v", subj, err) + } + return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", + subj, opts.LabelSelector, opts.FieldSelector, err) + } + return err +} + +func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + return m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(options, metav1.ParameterCodec). + Watch(context.TODO()) +} + +func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) { + return m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(&metav1.ListOptions{ + ResourceVersion: resourceVersion, + Watch: true, + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), + }, metav1.ParameterCodec). + Watch(context.TODO()) +} + +func (m *Helper) Delete(namespace, name string) (runtime.Object, error) { + return m.DeleteWithOptions(namespace, name, nil) +} + +func (m *Helper) DeleteWithOptions(namespace, name string, options *metav1.DeleteOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.DeleteOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + + return m.RESTClient.Delete(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + Body(options). + Do(context.TODO()). + Get() +} + +func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) { + return m.CreateWithOptions(namespace, modify, obj, nil) +} + +func (m *Helper) CreateWithOptions(namespace string, modify bool, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.CreateOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + if modify { + // Attempt to version the object based on client logic. + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + // We don't know how to clear the version on this object, so send it to the server as is + return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) + } + if version != "" { + if err := metadataAccessor.SetResourceVersion(obj, ""); err != nil { + return nil, err + } + } + } + + return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) +} + +func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { + return c.Post(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(resource). + VersionedParams(options, metav1.ParameterCodec). + Body(obj). + Do(context.TODO()). + Get() +} +func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte, options *metav1.PatchOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.PatchOptions{} + } + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + return m.RESTClient.Patch(pt). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + SubResource(m.Subresource). + VersionedParams(options, metav1.ParameterCodec). + Body(data). + Do(context.TODO()). + Get() +} + +func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) { + c := m.RESTClient + var options = &metav1.UpdateOptions{} + if m.ServerDryRun { + options.DryRun = []string{metav1.DryRunAll} + } + if m.FieldManager != "" { + options.FieldManager = m.FieldManager + } + if m.FieldValidation != "" { + options.FieldValidation = m.FieldValidation + } + + // Attempt to version the object based on client logic. + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + // We don't know how to version this object, so send it to the server as is + return m.replaceResource(c, m.Resource, namespace, name, obj, options) + } + if version == "" && overwrite { + // Retrieve the current version of the object to overwrite the server object + serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).SubResource(m.Subresource).Do(context.TODO()).Get() + if err != nil { + // The object does not exist, but we want it to be created + return m.replaceResource(c, m.Resource, namespace, name, obj, options) + } + serverVersion, err := metadataAccessor.ResourceVersion(serverObj) + if err != nil { + return nil, err + } + if err := metadataAccessor.SetResourceVersion(obj, serverVersion); err != nil { + return nil, err + } + } + + return m.replaceResource(c, m.Resource, namespace, name, obj, options) +} + +func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object, options *metav1.UpdateOptions) (runtime.Object, error) { + return c.Put(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(resource). + Name(name). + SubResource(m.Subresource). + VersionedParams(options, metav1.ParameterCodec). + Body(obj). + Do(context.TODO()). + Get() +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go b/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go new file mode 100644 index 000000000..29d7b34ab --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go @@ -0,0 +1,103 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +type RESTClientGetter interface { + ToRESTConfig() (*rest.Config, error) + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + ToRESTMapper() (meta.RESTMapper, error) +} + +type ClientConfigFunc func() (*rest.Config, error) +type RESTMapperFunc func() (meta.RESTMapper, error) +type CategoryExpanderFunc func() (restmapper.CategoryExpander, error) + +// RESTClient is a client helper for dealing with RESTful resources +// in a generic way. +type RESTClient interface { + Get() *rest.Request + Post() *rest.Request + Patch(types.PatchType) *rest.Request + Delete() *rest.Request + Put() *rest.Request +} + +// RequestTransform is a function that is given a chance to modify the outgoing request. +type RequestTransform func(*rest.Request) + +// NewClientWithOptions wraps the provided RESTClient and invokes each transform on each +// newly created request. +func NewClientWithOptions(c RESTClient, transforms ...RequestTransform) RESTClient { + if len(transforms) == 0 { + return c + } + return &clientOptions{c: c, transforms: transforms} +} + +type clientOptions struct { + c RESTClient + transforms []RequestTransform +} + +func (c *clientOptions) modify(req *rest.Request) *rest.Request { + for _, transform := range c.transforms { + transform(req) + } + return req +} + +func (c *clientOptions) Get() *rest.Request { + return c.modify(c.c.Get()) +} + +func (c *clientOptions) Post() *rest.Request { + return c.modify(c.c.Post()) +} +func (c *clientOptions) Patch(t types.PatchType) *rest.Request { + return c.modify(c.c.Patch(t)) +} +func (c *clientOptions) Delete() *rest.Request { + return c.modify(c.c.Delete()) +} +func (c *clientOptions) Put() *rest.Request { + return c.modify(c.c.Put()) +} + +// ContentValidator is an interface that knows how to validate an API object serialized to a byte array. +type ContentValidator interface { + ValidateBytes(data []byte) error +} + +// Visitor lets clients walk a list of resources. +type Visitor interface { + Visit(VisitorFunc) error +} + +// VisitorFunc implements the Visitor interface for a matching function. +// If there was a problem walking a list of resources, the incoming error +// will describe the problem and the function can decide how to handle that error. +// A nil returned indicates to accept an error to continue loops even when errors happen. +// This is useful for ignoring certain kinds of errors or aggregating errors in some way. +type VisitorFunc func(*Info, error) error diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/kustomizevisitor.go b/vendor/k8s.io/cli-runtime/pkg/resource/kustomizevisitor.go new file mode 100644 index 000000000..32895ea4f --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/kustomizevisitor.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + + "sigs.k8s.io/kustomize/api/krusty" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +// KustomizeVisitor handles kustomization.yaml files. +type KustomizeVisitor struct { + mapper *mapper + schema ContentValidator + // Directory expected to contain a kustomization file. + dirPath string + // File system containing dirPath. + fSys filesys.FileSystem + // Holds result of kustomize build, retained for tests. + yml []byte +} + +// Visit passes the result of a kustomize build to a StreamVisitor. +func (v *KustomizeVisitor) Visit(fn VisitorFunc) error { + kOpts := krusty.MakeDefaultOptions() + kOpts.Reorder = krusty.ReorderOptionLegacy + k := krusty.MakeKustomizer(kOpts) + m, err := k.Run(v.fSys, v.dirPath) + if err != nil { + return err + } + v.yml, err = m.AsYaml() + if err != nil { + return err + } + sv := NewStreamVisitor( + bytes.NewReader(v.yml), v.mapper, v.dirPath, v.schema) + return sv.Visit(fn) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go b/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go new file mode 100644 index 000000000..5180610e2 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go @@ -0,0 +1,166 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Mapper is a convenience struct for holding references to the interfaces +// needed to create Info for arbitrary objects. +type mapper struct { + // localFn indicates the call can't make server requests + localFn func() bool + + restMapperFn RESTMapperFunc + clientFn func(version schema.GroupVersion) (RESTClient, error) + decoder runtime.Decoder +} + +// InfoForData creates an Info object for the given data. An error is returned +// if any of the decoding or client lookup steps fail. Name and namespace will be +// set into Info if the mapping's MetadataAccessor can retrieve them. +func (m *mapper) infoForData(data []byte, source string) (*Info, error) { + obj, gvk, err := m.decoder.Decode(data, nil, nil) + if err != nil { + return nil, fmt.Errorf("unable to decode %q: %v", source, err) + } + + name, _ := metadataAccessor.Name(obj) + namespace, _ := metadataAccessor.Namespace(obj) + resourceVersion, _ := metadataAccessor.ResourceVersion(obj) + + ret := &Info{ + Source: source, + Namespace: namespace, + Name: name, + ResourceVersion: resourceVersion, + + Object: obj, + } + + if m.localFn == nil || !m.localFn() { + restMapper, err := m.restMapperFn() + if err != nil { + return nil, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + if _, ok := err.(*meta.NoKindMatchError); ok { + return nil, fmt.Errorf("resource mapping not found for name: %q namespace: %q from %q: %v\nensure CRDs are installed first", + name, namespace, source, err) + } + return nil, fmt.Errorf("unable to recognize %q: %v", source, err) + } + ret.Mapping = mapping + + client, err := m.clientFn(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + ret.Client = client + } + + return ret, nil +} + +// InfoForObject creates an Info object for the given Object. An error is returned +// if the object cannot be introspected. Name and namespace will be set into Info +// if the mapping's MetadataAccessor can retrieve them. +func (m *mapper) infoForObject(obj runtime.Object, typer runtime.ObjectTyper, preferredGVKs []schema.GroupVersionKind) (*Info, error) { + groupVersionKinds, _, err := typer.ObjectKinds(obj) + if err != nil { + return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err) + } + + gvk := groupVersionKinds[0] + if len(groupVersionKinds) > 1 && len(preferredGVKs) > 0 { + gvk = preferredObjectKind(groupVersionKinds, preferredGVKs) + } + + name, _ := metadataAccessor.Name(obj) + namespace, _ := metadataAccessor.Namespace(obj) + resourceVersion, _ := metadataAccessor.ResourceVersion(obj) + ret := &Info{ + Namespace: namespace, + Name: name, + ResourceVersion: resourceVersion, + + Object: obj, + } + + if m.localFn == nil || !m.localFn() { + restMapper, err := m.restMapperFn() + if err != nil { + return nil, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, fmt.Errorf("unable to recognize %v", err) + } + ret.Mapping = mapping + + client, err := m.clientFn(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + ret.Client = client + } + + return ret, nil +} + +// preferredObjectKind picks the possibility that most closely matches the priority list in this order: +// GroupVersionKind matches (exact match) +// GroupKind matches +// Group matches +func preferredObjectKind(possibilities []schema.GroupVersionKind, preferences []schema.GroupVersionKind) schema.GroupVersionKind { + // Exact match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility == priority { + return possibility + } + } + } + + // GroupKind match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility.GroupKind() == priority.GroupKind() { + return possibility + } + } + } + + // Group match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility.Group == priority.Group { + return possibility + } + } + } + + // Just pick the first + return possibilities[0] +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go b/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go new file mode 100644 index 000000000..d688c3a08 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utiljson "k8s.io/apimachinery/pkg/util/json" +) + +// metadataValidatingDecoder wraps a decoder and additionally ensures metadata schema fields decode before returning an unstructured object +type metadataValidatingDecoder struct { + decoder runtime.Decoder +} + +func (m *metadataValidatingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := m.decoder.Decode(data, defaults, into) + + // if we already errored, return + if err != nil { + return obj, gvk, err + } + + // if we're not unstructured, return + if _, isUnstructured := obj.(runtime.Unstructured); !isUnstructured { + return obj, gvk, err + } + + // make sure the data can decode into ObjectMeta before we return, + // so we don't silently truncate schema errors in metadata later with accesser get/set calls + v := &metadataOnlyObject{} + if typedErr := utiljson.Unmarshal(data, v); typedErr != nil { + return obj, gvk, typedErr + } + return obj, gvk, err +} + +type metadataOnlyObject struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier.go b/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier.go new file mode 100644 index 000000000..b9000a08a --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier.go @@ -0,0 +1,176 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "errors" + "fmt" + + openapi_v2 "github.com/google/gnostic-models/openapiv2" + yaml "gopkg.in/yaml.v2" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" +) + +func NewQueryParamVerifier(dynamicClient dynamic.Interface, openAPIGetter discovery.OpenAPISchemaInterface, queryParam VerifiableQueryParam) *QueryParamVerifier { + return &QueryParamVerifier{ + finder: NewCRDFinder(CRDFromDynamic(dynamicClient)), + openAPIGetter: openAPIGetter, + queryParam: queryParam, + } +} + +// QueryParamVerifier verifies if a given group-version-kind supports a +// given VerifiableQueryParam against the current server. +// +// Currently supported query params are: fieldValidation +// +// Support for each of these query params needs to be verified because +// we determine whether or not to perform server-side or client-side +// schema validation based on whether the fieldValidation query param is +// supported or not. +// +// It reads the OpenAPI to see if the given GVK supports the given query param. +// If the GVK can not be found, we assume that CRDs will have the same level of +// support as "namespaces", and non-CRDs will not be supported. We +// delay the check for CRDs as much as possible though, since it +// requires an extra round-trip to the server. +type QueryParamVerifier struct { + finder CRDFinder + openAPIGetter discovery.OpenAPISchemaInterface + queryParam VerifiableQueryParam +} + +// Verifier is the generic verifier interface used for testing QueryParamVerifier +type Verifier interface { + HasSupport(gvk schema.GroupVersionKind) error +} + +// VerifiableQueryParam is a query parameter who's enablement on the +// apiserver can be determined by evaluating the OpenAPI for a specific +// GVK. +type VerifiableQueryParam string + +const ( + QueryParamFieldValidation VerifiableQueryParam = "fieldValidation" +) + +// HasSupport checks if the given gvk supports the query param configured on v +func (v *QueryParamVerifier) HasSupport(gvk schema.GroupVersionKind) error { + if (gvk == schema.GroupVersionKind{Version: "v1", Kind: "List"}) { + return NewParamUnsupportedError(gvk, v.queryParam) + } + + oapi, err := v.openAPIGetter.OpenAPISchema() + if err != nil { + return fmt.Errorf("failed to download openapi: %v", err) + } + supports, err := supportsQueryParam(oapi, gvk, v.queryParam) + if err != nil { + // We assume that we couldn't find the type, then check for namespace: + supports, _ = supportsQueryParam(oapi, schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}, v.queryParam) + // If namespace supports the query param, then we will support the query param for CRDs only. + if supports { + supports, err = v.finder.HasCRD(gvk.GroupKind()) + if err != nil { + return fmt.Errorf("failed to check CRD: %v", err) + } + } + } + if !supports { + return NewParamUnsupportedError(gvk, v.queryParam) + } + return nil +} + +type paramUnsupportedError struct { + gvk schema.GroupVersionKind + param VerifiableQueryParam +} + +func NewParamUnsupportedError(gvk schema.GroupVersionKind, param VerifiableQueryParam) error { + return ¶mUnsupportedError{ + gvk: gvk, + param: param, + } +} + +func (e *paramUnsupportedError) Error() string { + return fmt.Sprintf("%v doesn't support %s", e.gvk, e.param) +} + +func IsParamUnsupportedError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*paramUnsupportedError) + return ok +} + +func hasGVKExtension(extensions []*openapi_v2.NamedAny, gvk schema.GroupVersionKind) bool { + for _, extension := range extensions { + if extension.GetValue().GetYaml() == "" || + extension.GetName() != "x-kubernetes-group-version-kind" { + continue + } + var value map[string]string + err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + if value["group"] == gvk.Group && value["kind"] == gvk.Kind && value["version"] == gvk.Version { + return true + } + return false + } + return false +} + +// supportsQueryParam is a method that let's us look in the OpenAPI if the +// specific group-version-kind supports the specific query parameter for +// the PATCH end-point. +func supportsQueryParam(doc *openapi_v2.Document, gvk schema.GroupVersionKind, queryParam VerifiableQueryParam) (bool, error) { + globalParams := map[string]*openapi_v2.NamedParameter{} + for _, p := range doc.GetParameters().GetAdditionalProperties() { + globalParams["#/parameters/"+p.GetName()] = p + } + + for _, path := range doc.GetPaths().GetPath() { + // Is this describing the gvk we're looking for? + if !hasGVKExtension(path.GetValue().GetPatch().GetVendorExtension(), gvk) { + continue + } + for _, param := range path.GetValue().GetPatch().GetParameters() { + if param.GetParameter().GetNonBodyParameter().GetQueryParameterSubSchema().GetName() == string(queryParam) { + return true, nil + } + + // lookup global parameters + if ref := param.GetJsonReference().GetXRef(); ref != "" { + if globalParam, ok := globalParams[ref]; ok && globalParam != nil && globalParam.GetValue().GetNonBodyParameter().GetQueryParameterSubSchema().GetName() == string(queryParam) { + return true, nil + } + } + } + return false, nil + } + + return false, errors.New("couldn't find GVK in openapi") +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier_v3.go b/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier_v3.go new file mode 100644 index 000000000..7183ea09a --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier_v3.go @@ -0,0 +1,145 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/openapi" + "k8s.io/client-go/openapi3" + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +var _ Verifier = &queryParamVerifierV3{} + +// NewQueryParamVerifierV3 returns a pointer to the created queryParamVerifier3 struct, +// which implements the Verifier interface. The caching characteristics of the +// OpenAPI V3 specs are determined by the passed oapiClient. For memory caching, the +// client should be wrapped beforehand as: cached.NewClient(oapiClient). The disk +// caching is determined by the discovery client the oapiClient is created from. +func NewQueryParamVerifierV3(dynamicClient dynamic.Interface, oapiClient openapi.Client, queryParam VerifiableQueryParam) Verifier { + return &queryParamVerifierV3{ + finder: NewCRDFinder(CRDFromDynamic(dynamicClient)), + root: openapi3.NewRoot(oapiClient), + queryParam: queryParam, + } +} + +// queryParamVerifierV3 encapsulates info necessary to determine if +// the queryParam is a parameter for the Patch endpoint for a +// passed GVK. +type queryParamVerifierV3 struct { + finder CRDFinder + root openapi3.Root + queryParam VerifiableQueryParam +} + +var namespaceGVK = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} + +// HasSupport returns nil error if the passed GVK supports the parameter +// (stored in struct; usually "fieldValidation") for Patch endpoint. +// Returns an error if the passed GVK does not support the query param, +// or if another error occurred. If the Open API V3 spec for a CRD is not +// found, then the spec for Namespace is checked for query param support instead. +func (v *queryParamVerifierV3) HasSupport(gvk schema.GroupVersionKind) error { + if (gvk == schema.GroupVersionKind{Version: "v1", Kind: "List"}) { + return NewParamUnsupportedError(gvk, v.queryParam) + } + gvSpec, err := v.root.GVSpec(gvk.GroupVersion()) + if err == nil { + return supportsQueryParamV3(gvSpec, gvk, v.queryParam) + } + if _, isErr := err.(*openapi3.GroupVersionNotFoundError); !isErr { + return err + } + // If the spec for the passed GVK is not found, then check if it is a CRD. + // For CRD's substitute Namespace OpenAPI V3 spec to check if query param is supported. + if found, _ := v.finder.HasCRD(gvk.GroupKind()); found { + namespaceSpec, err := v.root.GVSpec(namespaceGVK.GroupVersion()) + if err != nil { + // If error retrieving Namespace spec, propagate error. + return err + } + return supportsQueryParamV3(namespaceSpec, namespaceGVK, v.queryParam) + } + return NewParamUnsupportedError(gvk, v.queryParam) +} + +// hasGVKExtensionV3 returns true if the passed OpenAPI extensions map contains +// the passed GVK; false otherwise. +func hasGVKExtensionV3(extensions spec.Extensions, gvk schema.GroupVersionKind) bool { + var oapiGVK map[string]string + err := extensions.GetObject("x-kubernetes-group-version-kind", &oapiGVK) + if err != nil { + return false + } + if oapiGVK["group"] == gvk.Group && + oapiGVK["version"] == gvk.Version && + oapiGVK["kind"] == gvk.Kind { + return true + } + return false +} + +// supportsQueryParam is a method that let's us look in the OpenAPI if the +// specific group-version-kind supports the specific query parameter for +// the PATCH end-point. Returns nil if the passed GVK supports the passed +// query parameter; otherwise, a "paramUnsupportedError" is returned (except +// when an invalid document error is returned when an invalid OpenAPI V3 +// is passed in). +func supportsQueryParamV3(doc *spec3.OpenAPI, gvk schema.GroupVersionKind, queryParam VerifiableQueryParam) error { + if doc == nil || doc.Paths == nil { + return fmt.Errorf("Invalid OpenAPI V3 document") + } + for _, path := range doc.Paths.Paths { + // If operation is not PATCH, then continue. + if path == nil { + continue + } + op := path.PathProps.Patch + if op == nil { + continue + } + // Is this PATCH operation for the passed GVK? + if !hasGVKExtensionV3(op.VendorExtensible.Extensions, gvk) { + continue + } + // Now look for the query parameter among the parameters + // for the PATCH operation. + for _, param := range op.OperationProps.Parameters { + if param.ParameterProps.Name == string(queryParam) && param.In == "query" { + return nil + } + + // lookup global parameters + if ref := param.Refable.Ref.Ref.String(); strings.HasPrefix(ref, "#/parameters/") && doc.Components != nil { + k := strings.TrimPrefix(ref, "#/parameters/") + if globalParam, ok := doc.Components.Parameters[k]; ok && globalParam != nil { + if globalParam.In == "query" && globalParam.Name == string(queryParam) { + return nil + } + } + } + } + return NewParamUnsupportedError(gvk, queryParam) + } + return fmt.Errorf("Path not found for GVK (%s) in OpenAPI V3 doc", gvk) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/result.go b/vendor/k8s.io/cli-runtime/pkg/resource/result.go new file mode 100644 index 000000000..b8722afe6 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/result.go @@ -0,0 +1,242 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "reflect" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" +) + +// ErrMatchFunc can be used to filter errors that may not be true failures. +type ErrMatchFunc func(error) bool + +// Result contains helper methods for dealing with the outcome of a Builder. +type Result struct { + err error + visitor Visitor + + sources []Visitor + singleItemImplied bool + targetsSingleItems bool + + mapper *mapper + ignoreErrors []utilerrors.Matcher + + // populated by a call to Infos + info []*Info +} + +// withError allows a fluent style for internal result code. +func (r *Result) withError(err error) *Result { + r.err = err + return r +} + +// TargetsSingleItems returns true if any of the builder arguments pointed +// to non-list calls (if the user explicitly asked for any object by name). +// This includes directories, streams, URLs, and resource name tuples. +func (r *Result) TargetsSingleItems() bool { + return r.targetsSingleItems +} + +// IgnoreErrors will filter errors that occur when by visiting the result +// (but not errors that occur by creating the result in the first place), +// eliminating any that match fns. This is best used in combination with +// Builder.ContinueOnError(), where the visitors accumulate errors and return +// them after visiting as a slice of errors. If no errors remain after +// filtering, the various visitor methods on Result will return nil for +// err. +func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result { + for _, fn := range fns { + r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn)) + } + return r +} + +// Mapper returns a copy of the builder's mapper. +func (r *Result) Mapper() *mapper { + return r.mapper +} + +// Err returns one or more errors (via a util.ErrorList) that occurred prior +// to visiting the elements in the visitor. To see all errors including those +// that occur during visitation, invoke Infos(). +func (r *Result) Err() error { + return r.err +} + +// Visit implements the Visitor interface on the items described in the Builder. +// Note that some visitor sources are not traversable more than once, or may +// return different results. If you wish to operate on the same set of resources +// multiple times, use the Infos() method. +func (r *Result) Visit(fn VisitorFunc) error { + if r.err != nil { + return r.err + } + err := r.visitor.Visit(fn) + return utilerrors.FilterOut(err, r.ignoreErrors...) +} + +// IntoSingleItemImplied sets the provided boolean pointer to true if the Builder input +// implies a single item, or multiple. +func (r *Result) IntoSingleItemImplied(b *bool) *Result { + *b = r.singleItemImplied + return r +} + +// Infos returns an array of all of the resource infos retrieved via traversal. +// Will attempt to traverse the entire set of visitors only once, and will return +// a cached list on subsequent calls. +func (r *Result) Infos() ([]*Info, error) { + if r.err != nil { + return nil, r.err + } + if r.info != nil { + return r.info, nil + } + + infos := []*Info{} + err := r.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + infos = append(infos, info) + return nil + }) + err = utilerrors.FilterOut(err, r.ignoreErrors...) + + r.info, r.err = infos, err + return infos, err +} + +// Object returns a single object representing the output of a single visit to all +// found resources. If the Builder was a singular context (expected to return a +// single resource by user input) and only a single resource was found, the resource +// will be returned as is. Otherwise, the returned resources will be part of an +// v1.List. The ResourceVersion of the v1.List will be set only if it is identical +// across all infos returned. +func (r *Result) Object() (runtime.Object, error) { + infos, err := r.Infos() + if err != nil { + return nil, err + } + + versions := sets.String{} + objects := []runtime.Object{} + for _, info := range infos { + if info.Object != nil { + objects = append(objects, info.Object) + versions.Insert(info.ResourceVersion) + } + } + + if len(objects) == 1 { + if r.singleItemImplied { + return objects[0], nil + } + // if the item is a list already, don't create another list + if meta.IsListType(objects[0]) { + return objects[0], nil + } + } + + version := "" + if len(versions) == 1 { + version = versions.List()[0] + } + + return toV1List(objects, version), err +} + +// Compile time check to enforce that list implements the necessary interface +var _ metav1.ListInterface = &v1.List{} +var _ metav1.ListMetaAccessor = &v1.List{} + +// toV1List takes a slice of Objects + their version, and returns +// a v1.List Object containing the objects in the Items field +func toV1List(objects []runtime.Object, version string) runtime.Object { + raw := []runtime.RawExtension{} + for _, o := range objects { + raw = append(raw, runtime.RawExtension{Object: o}) + } + return &v1.List{ + ListMeta: metav1.ListMeta{ + ResourceVersion: version, + }, + Items: raw, + } +} + +// ResourceMapping returns a single meta.RESTMapping representing the +// resources located by the builder, or an error if more than one +// mapping was found. +func (r *Result) ResourceMapping() (*meta.RESTMapping, error) { + if r.err != nil { + return nil, r.err + } + mappings := map[schema.GroupVersionResource]*meta.RESTMapping{} + for i := range r.sources { + m, ok := r.sources[i].(ResourceMapping) + if !ok { + return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i])) + } + mapping := m.ResourceMapping() + mappings[mapping.Resource] = mapping + } + if len(mappings) != 1 { + return nil, fmt.Errorf("expected only a single resource type") + } + for _, mapping := range mappings { + return mapping, nil + } + return nil, nil +} + +// Watch retrieves changes that occur on the server to the specified resource. +// It currently supports watching a single source - if the resource source +// (selectors or pure types) can be watched, they will be, otherwise the list +// will be visited (equivalent to the Infos() call) and if there is a single +// resource present, it will be watched, otherwise an error will be returned. +func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { + if r.err != nil { + return nil, r.err + } + if len(r.sources) != 1 { + return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time") + } + w, ok := r.sources[0].(Watchable) + if !ok { + info, err := r.Infos() + if err != nil { + return nil, err + } + if len(info) != 1 { + return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info)) + } + return info[0].Watch(resourceVersion) + } + return w.Watch(resourceVersion) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go b/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go new file mode 100644 index 000000000..858a46202 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "encoding/json" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" +) + +// dynamicCodec is a codec that wraps the standard unstructured codec +// with special handling for Status objects. +// Deprecated only used by test code and its wrong +type dynamicCodec struct{} + +func (dynamicCodec) Decode(data []byte, gvk *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := unstructured.UnstructuredJSONScheme.Decode(data, gvk, obj) + if err != nil { + return nil, nil, err + } + + if strings.EqualFold(gvk.Kind, "status") && gvk.Version == "v1" && (gvk.Group == "" || gvk.Group == "meta.k8s.io") { + if _, ok := obj.(*metav1.Status); !ok { + obj = &metav1.Status{} + err := json.Unmarshal(data, obj) + if err != nil { + return nil, nil, err + } + } + } + + return obj, gvk, nil +} + +func (dynamicCodec) Encode(obj runtime.Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we only + // fallback to other encoders here. + return unstructured.UnstructuredJSONScheme.Encode(obj, w) +} + +// Identifier implements runtime.Encoder interface. +func (dynamicCodec) Identifier() runtime.Identifier { + return unstructured.UnstructuredJSONScheme.Identifier() +} + +// UnstructuredPlusDefaultContentConfig returns a rest.ContentConfig for dynamic types. It includes enough codecs to act as a "normal" +// serializer for the rest.client with options, status and the like. +func UnstructuredPlusDefaultContentConfig() rest.ContentConfig { + // TODO: scheme.Codecs here should become "pkg/apis/server/scheme" which is the minimal core you need + // to talk to a kubernetes server + jsonInfo, _ := runtime.SerializerInfoForMediaType(scheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) + + jsonInfo.Serializer = dynamicCodec{} + jsonInfo.PrettySerializer = nil + return rest.ContentConfig{ + AcceptContentTypes: runtime.ContentTypeJSON, + ContentType: runtime.ContentTypeJSON, + NegotiatedSerializer: serializer.NegotiatedSerializerWrapper(jsonInfo), + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/selector.go b/vendor/k8s.io/cli-runtime/pkg/resource/selector.go new file mode 100644 index 000000000..2a283d4e0 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/selector.go @@ -0,0 +1,92 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" +) + +// Selector is a Visitor for resources that match a label selector. +type Selector struct { + Client RESTClient + Mapping *meta.RESTMapping + Namespace string + LabelSelector string + FieldSelector string + LimitChunks int64 +} + +// NewSelector creates a resource selector which hides details of getting items by their label selector. +func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace, labelSelector, fieldSelector string, limitChunks int64) *Selector { + return &Selector{ + Client: client, + Mapping: mapping, + Namespace: namespace, + LabelSelector: labelSelector, + FieldSelector: fieldSelector, + LimitChunks: limitChunks, + } +} + +// Visit implements Visitor and uses request chunking by default. +func (r *Selector) Visit(fn VisitorFunc) error { + helper := NewHelper(r.Client, r.Mapping) + initialOpts := metav1.ListOptions{ + LabelSelector: r.LabelSelector, + FieldSelector: r.FieldSelector, + Limit: r.LimitChunks, + } + return FollowContinue(&initialOpts, func(options metav1.ListOptions) (runtime.Object, error) { + list, err := helper.List( + r.Namespace, + r.ResourceMapping().GroupVersionKind.GroupVersion().String(), + &options, + ) + if err != nil { + return nil, EnhanceListError(err, options, r.Mapping.Resource.String()) + } + resourceVersion, _ := metadataAccessor.ResourceVersion(list) + + info := &Info{ + Client: r.Client, + Mapping: r.Mapping, + + Namespace: r.Namespace, + ResourceVersion: resourceVersion, + + Object: list, + } + + if err := fn(info, nil); err != nil { + return nil, err + } + return list, nil + }) +} + +func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { + return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), + &metav1.ListOptions{ResourceVersion: resourceVersion, LabelSelector: r.LabelSelector, FieldSelector: r.FieldSelector}) +} + +// ResourceMapping returns the mapping for this resource and implements ResourceMapping +func (r *Selector) ResourceMapping() *meta.RESTMapping { + return r.Mapping +} diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go b/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go new file mode 100644 index 000000000..76cfbbda8 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go @@ -0,0 +1,770 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apimachinery/pkg/watch" +) + +const ( + constSTDINstr = "STDIN" + stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" +) + +// Watchable describes a resource that can be watched for changes that occur on the server, +// beginning after the provided resource version. +type Watchable interface { + Watch(resourceVersion string) (watch.Interface, error) +} + +// ResourceMapping allows an object to return the resource mapping associated with +// the resource or resources it represents. +type ResourceMapping interface { + ResourceMapping() *meta.RESTMapping +} + +// Info contains temporary info to execute a REST call, or show the results +// of an already completed REST call. +type Info struct { + // Client will only be present if this builder was not local + Client RESTClient + // Mapping will only be present if this builder was not local + Mapping *meta.RESTMapping + + // Namespace will be set if the object is namespaced and has a specified value. + Namespace string + Name string + + // Optional, Source is the filename or URL to template file (.json or .yaml), + // or stdin to use to handle the resource + Source string + // Optional, this is the most recent value returned by the server if available. It will + // typically be in unstructured or internal forms, depending on how the Builder was + // defined. If retrieved from the server, the Builder expects the mapping client to + // decide the final form. Use the AsVersioned, AsUnstructured, and AsInternal helpers + // to alter the object versions. + // If Subresource is specified, this will be the object for the subresource. + Object runtime.Object + // Optional, this is the most recent resource version the server knows about for + // this type of resource. It may not match the resource version of the object, + // but if set it should be equal to or newer than the resource version of the + // object (however the server defines resource version). + ResourceVersion string + // Optional, if specified, the object is the most recent value of the subresource + // returned by the server if available. + Subresource string +} + +// Visit implements Visitor +func (i *Info) Visit(fn VisitorFunc) error { + return fn(i, nil) +} + +// Get retrieves the object from the Namespace and Name fields +func (i *Info) Get() (err error) { + obj, err := NewHelper(i.Client, i.Mapping).WithSubresource(i.Subresource).Get(i.Namespace, i.Name) + if err != nil { + if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != metav1.NamespaceDefault && i.Namespace != metav1.NamespaceAll { + err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do(context.TODO()).Error() + if err2 != nil && errors.IsNotFound(err2) { + return err2 + } + } + return err + } + i.Object = obj + i.ResourceVersion, _ = metadataAccessor.ResourceVersion(obj) + return nil +} + +// Refresh updates the object with another object. If ignoreError is set +// the Object will be updated even if name, namespace, or resourceVersion +// attributes cannot be loaded from the object. +func (i *Info) Refresh(obj runtime.Object, ignoreError bool) error { + name, err := metadataAccessor.Name(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.Name = name + } + namespace, err := metadataAccessor.Namespace(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.Namespace = namespace + } + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.ResourceVersion = version + } + i.Object = obj + return nil +} + +// ObjectName returns an approximate form of the resource's kind/name. +func (i *Info) ObjectName() string { + if i.Mapping != nil { + return fmt.Sprintf("%s/%s", i.Mapping.Resource.Resource, i.Name) + } + gvk := i.Object.GetObjectKind().GroupVersionKind() + if len(gvk.Group) == 0 { + return fmt.Sprintf("%s/%s", strings.ToLower(gvk.Kind), i.Name) + } + return fmt.Sprintf("%s.%s/%s\n", strings.ToLower(gvk.Kind), gvk.Group, i.Name) +} + +// String returns the general purpose string representation +func (i *Info) String() string { + basicInfo := fmt.Sprintf("Name: %q, Namespace: %q", i.Name, i.Namespace) + if i.Mapping != nil { + mappingInfo := fmt.Sprintf("Resource: %q, GroupVersionKind: %q", i.Mapping.Resource.String(), + i.Mapping.GroupVersionKind.String()) + return fmt.Sprint(mappingInfo, "\n", basicInfo) + } + return basicInfo +} + +// Namespaced returns true if the object belongs to a namespace +func (i *Info) Namespaced() bool { + if i.Mapping != nil { + // if we have RESTMapper info, use it + return i.Mapping.Scope.Name() == meta.RESTScopeNameNamespace + } + // otherwise, use the presence of a namespace in the info as an indicator + return len(i.Namespace) > 0 +} + +// Watch returns server changes to this object after it was retrieved. +func (i *Info) Watch(resourceVersion string) (watch.Interface, error) { + return NewHelper(i.Client, i.Mapping).WatchSingle(i.Namespace, i.Name, resourceVersion) +} + +// ResourceMapping returns the mapping for this resource and implements ResourceMapping +func (i *Info) ResourceMapping() *meta.RESTMapping { + return i.Mapping +} + +// VisitorList implements Visit for the sub visitors it contains. The first error +// returned from a child Visitor will terminate iteration. +type VisitorList []Visitor + +// Visit implements Visitor +func (l VisitorList) Visit(fn VisitorFunc) error { + for i := range l { + if err := l[i].Visit(fn); err != nil { + return err + } + } + return nil +} + +type ConcurrentVisitorList struct { + visitors []Visitor + concurrency int +} + +func (l ConcurrentVisitorList) Visit(fn VisitorFunc) error { + g := errgroup.Group{} + + // Concurrency 1 just runs the visitors sequentially, this is the default + // as it preserves the previous behavior, but allows components to opt into + // concurrency. + concurrency := 1 + if l.concurrency > concurrency { + concurrency = l.concurrency + } + g.SetLimit(concurrency) + + for i := range l.visitors { + i := i + g.Go(func() error { + return l.visitors[i].Visit(fn) + }) + } + + return g.Wait() +} + +// EagerVisitorList implements Visit for the sub visitors it contains. All errors +// will be captured and returned at the end of iteration. +type EagerVisitorList []Visitor + +// Visit implements Visitor, and gathers errors that occur during processing until +// all sub visitors have been visited. +func (l EagerVisitorList) Visit(fn VisitorFunc) error { + var errs []error + for i := range l { + err := l[i].Visit(func(info *Info, err error) error { + if err != nil { + errs = append(errs, err) + return nil + } + if err := fn(info, nil); err != nil { + errs = append(errs, err) + } + return nil + }) + if err != nil { + errs = append(errs, err) + } + } + return utilerrors.NewAggregate(errs) +} + +func ValidateSchema(data []byte, schema ContentValidator) error { + if schema == nil { + return nil + } + if err := schema.ValidateBytes(data); err != nil { + return fmt.Errorf("error validating data: %v; %s", err, stopValidateMessage) + } + return nil +} + +// URLVisitor downloads the contents of a URL, and if successful, returns +// an info object representing the downloaded object. +type URLVisitor struct { + URL *url.URL + *StreamVisitor + HttpAttemptCount int +} + +func (v *URLVisitor) Visit(fn VisitorFunc) error { + body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount) + if err != nil { + return err + } + defer body.Close() + v.StreamVisitor.Reader = body + return v.StreamVisitor.Visit(fn) +} + +// readHttpWithRetries tries to http.Get the v.URL retries times before giving up. +func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) { + var err error + if attempts <= 0 { + return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts) + } + for i := 0; i < attempts; i++ { + var ( + statusCode int + status string + body io.ReadCloser + ) + if i > 0 { + time.Sleep(duration) + } + + // Try to get the URL + statusCode, status, body, err = get(u) + + // Retry Errors + if err != nil { + continue + } + + if statusCode == http.StatusOK { + return body, nil + } + body.Close() + // Error - Set the error condition from the StatusCode + err = fmt.Errorf("unable to read URL %q, server reported %s, status code=%d", u, status, statusCode) + + if statusCode >= 500 && statusCode < 600 { + // Retry 500's + continue + } else { + // Don't retry other StatusCodes + break + } + } + return nil, err +} + +// httpget Defines function to retrieve a url and return the results. Exists for unit test stubbing. +type httpget func(url string) (int, string, io.ReadCloser, error) + +// httpgetImpl Implements a function to retrieve a url and return the results. +func httpgetImpl(url string) (int, string, io.ReadCloser, error) { + resp, err := http.Get(url) + if err != nil { + return 0, "", nil, err + } + return resp.StatusCode, resp.Status, resp.Body, nil +} + +// DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function +// passed to Visit. An error will terminate the visit. +type DecoratedVisitor struct { + visitor Visitor + decorators []VisitorFunc +} + +// NewDecoratedVisitor will create a visitor that invokes the provided visitor functions before +// the user supplied visitor function is invoked, giving them the opportunity to mutate the Info +// object or terminate early with an error. +func NewDecoratedVisitor(v Visitor, fn ...VisitorFunc) Visitor { + if len(fn) == 0 { + return v + } + return DecoratedVisitor{v, fn} +} + +// Visit implements Visitor +func (v DecoratedVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + for i := range v.decorators { + if err := v.decorators[i](info, nil); err != nil { + return err + } + } + return fn(info, nil) + }) +} + +// ContinueOnErrorVisitor visits each item and, if an error occurs on +// any individual item, returns an aggregate error after all items +// are visited. +type ContinueOnErrorVisitor struct { + Visitor +} + +// Visit returns nil if no error occurs during traversal, a regular +// error if one occurs, or if multiple errors occur, an aggregate +// error. If the provided visitor fails on any individual item it +// will not prevent the remaining items from being visited. An error +// returned by the visitor directly may still result in some items +// not being visited. +func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error { + var errs []error + err := v.Visitor.Visit(func(info *Info, err error) error { + if err != nil { + errs = append(errs, err) + return nil + } + if err := fn(info, nil); err != nil { + errs = append(errs, err) + } + return nil + }) + if err != nil { + errs = append(errs, err) + } + if len(errs) == 1 { + return errs[0] + } + return utilerrors.NewAggregate(errs) +} + +// FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list +// - has an "Items" public field that is a slice of runtime.Objects or objects satisfying +// that interface - into multiple Infos. Returns nil in the case of no errors. +// When an error is hit on sub items (for instance, if a List contains an object that does +// not have a registered client or resource), returns an aggregate error. +type FlattenListVisitor struct { + visitor Visitor + typer runtime.ObjectTyper + mapper *mapper +} + +// NewFlattenListVisitor creates a visitor that will expand list style runtime.Objects +// into individual items and then visit them individually. +func NewFlattenListVisitor(v Visitor, typer runtime.ObjectTyper, mapper *mapper) Visitor { + return FlattenListVisitor{v, typer, mapper} +} + +func (v FlattenListVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + if info.Object == nil { + return fn(info, nil) + } + if !meta.IsListType(info.Object) { + return fn(info, nil) + } + + items := []runtime.Object{} + itemsToProcess := []runtime.Object{info.Object} + + for i := 0; i < len(itemsToProcess); i++ { + currObj := itemsToProcess[i] + if !meta.IsListType(currObj) { + items = append(items, currObj) + continue + } + + currItems, err := meta.ExtractList(currObj) + if err != nil { + return err + } + if errs := runtime.DecodeList(currItems, v.mapper.decoder); len(errs) > 0 { + return utilerrors.NewAggregate(errs) + } + itemsToProcess = append(itemsToProcess, currItems...) + } + + // If we have a GroupVersionKind on the list, prioritize that when asking for info on the objects contained in the list + var preferredGVKs []schema.GroupVersionKind + if info.Mapping != nil && !info.Mapping.GroupVersionKind.Empty() { + preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind) + } + var errs []error + for i := range items { + item, err := v.mapper.infoForObject(items[i], v.typer, preferredGVKs) + if err != nil { + errs = append(errs, err) + continue + } + if len(info.ResourceVersion) != 0 { + item.ResourceVersion = info.ResourceVersion + } + // propagate list source to items source + if len(info.Source) != 0 { + item.Source = info.Source + } + if err := fn(item, nil); err != nil { + errs = append(errs, err) + } + } + return utilerrors.NewAggregate(errs) + }) +} + +func ignoreFile(path string, extensions []string) bool { + if len(extensions) == 0 { + return false + } + ext := filepath.Ext(path) + for _, s := range extensions { + if s == ext { + return false + } + } + return true +} + +// FileVisitorForSTDIN return a special FileVisitor just for STDIN +func FileVisitorForSTDIN(mapper *mapper, schema ContentValidator) Visitor { + return &FileVisitor{ + Path: constSTDINstr, + StreamVisitor: NewStreamVisitor(nil, mapper, constSTDINstr, schema), + } +} + +// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path. +// After FileVisitors open the files, they will pass an io.Reader to a StreamVisitor to do the reading. (stdin +// is also taken care of). Paths argument also accepts a single file, and will return a single visitor +func ExpandPathsToFileVisitors(mapper *mapper, paths string, recursive bool, extensions []string, schema ContentValidator) ([]Visitor, error) { + var visitors []Visitor + err := filepath.Walk(paths, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if fi.IsDir() { + if path != paths && !recursive { + return filepath.SkipDir + } + return nil + } + // Don't check extension if the filepath was passed explicitly + if path != paths && ignoreFile(path, extensions) { + return nil + } + + visitor := &FileVisitor{ + Path: path, + StreamVisitor: NewStreamVisitor(nil, mapper, path, schema), + } + + visitors = append(visitors, visitor) + return nil + }) + + if err != nil { + return nil, err + } + return visitors, nil +} + +// FileVisitor is wrapping around a StreamVisitor, to handle open/close files +type FileVisitor struct { + Path string + *StreamVisitor +} + +// Visit in a FileVisitor is just taking care of opening/closing files +func (v *FileVisitor) Visit(fn VisitorFunc) error { + var f *os.File + if v.Path == constSTDINstr { + f = os.Stdin + } else { + var err error + f, err = os.Open(v.Path) + if err != nil { + return err + } + defer f.Close() + } + + // TODO: Consider adding a flag to force to UTF16, apparently some + // Windows tools don't write the BOM + utf16bom := unicode.BOMOverride(unicode.UTF8.NewDecoder()) + v.StreamVisitor.Reader = transform.NewReader(f, utf16bom) + + return v.StreamVisitor.Visit(fn) +} + +// StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be +// visited once. +// TODO: depends on objects being in JSON format before being passed to decode - need to implement +// a stream decoder method on runtime.Codec to properly handle this. +type StreamVisitor struct { + io.Reader + *mapper + + Source string + Schema ContentValidator +} + +// NewStreamVisitor is a helper function that is useful when we want to change the fields of the struct but keep calls the same. +func NewStreamVisitor(r io.Reader, mapper *mapper, source string, schema ContentValidator) *StreamVisitor { + return &StreamVisitor{ + Reader: r, + mapper: mapper, + Source: source, + Schema: schema, + } +} + +// Visit implements Visitor over a stream. StreamVisitor is able to distinct multiple resources in one stream. +func (v *StreamVisitor) Visit(fn VisitorFunc) error { + d := yaml.NewYAMLOrJSONDecoder(v.Reader, 4096) + for { + ext := runtime.RawExtension{} + if err := d.Decode(&ext); err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("error parsing %s: %v", v.Source, err) + } + // TODO: This needs to be able to handle object in other encodings and schemas. + ext.Raw = bytes.TrimSpace(ext.Raw) + if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { + continue + } + if err := ValidateSchema(ext.Raw, v.Schema); err != nil { + return fmt.Errorf("error validating %q: %v", v.Source, err) + } + info, err := v.infoForData(ext.Raw, v.Source) + if err != nil { + if fnErr := fn(info, err); fnErr != nil { + return fnErr + } + continue + } + if err := fn(info, nil); err != nil { + return err + } + } +} + +func UpdateObjectNamespace(info *Info, err error) error { + if err != nil { + return err + } + if info.Object != nil { + return metadataAccessor.SetNamespace(info.Object, info.Namespace) + } + return nil +} + +// FilterNamespace omits the namespace if the object is not namespace scoped +func FilterNamespace(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + info.Namespace = "" + UpdateObjectNamespace(info, nil) + } + return nil +} + +// SetNamespace ensures that every Info object visited will have a namespace +// set. If info.Object is set, it will be mutated as well. +func SetNamespace(namespace string) VisitorFunc { + return func(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + return nil + } + if len(info.Namespace) == 0 { + info.Namespace = namespace + UpdateObjectNamespace(info, nil) + } + return nil + } +} + +// RequireNamespace will either set a namespace if none is provided on the +// Info object, or if the namespace is set and does not match the provided +// value, returns an error. This is intended to guard against administrators +// accidentally operating on resources outside their namespace. +func RequireNamespace(namespace string) VisitorFunc { + return func(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + return nil + } + if len(info.Namespace) == 0 { + info.Namespace = namespace + UpdateObjectNamespace(info, nil) + return nil + } + if info.Namespace != namespace { + return fmt.Errorf("the namespace from the provided object %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", info.Namespace, namespace, info.Namespace) + } + return nil + } +} + +// RetrieveLatest updates the Object on each Info by invoking a standard client +// Get. +func RetrieveLatest(info *Info, err error) error { + if err != nil { + return err + } + if meta.IsListType(info.Object) { + return fmt.Errorf("watch is only supported on individual resources and resource collections, but a list of resources is found") + } + if len(info.Name) == 0 { + return nil + } + if info.Namespaced() && len(info.Namespace) == 0 { + return fmt.Errorf("no namespace set on resource %s %q", info.Mapping.Resource, info.Name) + } + return info.Get() +} + +// RetrieveLazy updates the object if it has not been loaded yet. +func RetrieveLazy(info *Info, err error) error { + if err != nil { + return err + } + if info.Object == nil { + return info.Get() + } + return nil +} + +type FilterFunc func(info *Info, err error) (bool, error) + +type FilteredVisitor struct { + visitor Visitor + filters []FilterFunc +} + +func NewFilteredVisitor(v Visitor, fn ...FilterFunc) Visitor { + if len(fn) == 0 { + return v + } + return FilteredVisitor{v, fn} +} + +func (v FilteredVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + for _, filter := range v.filters { + ok, err := filter(info, nil) + if err != nil { + return err + } + if !ok { + return nil + } + } + return fn(info, nil) + }) +} + +func FilterByLabelSelector(s labels.Selector) FilterFunc { + return func(info *Info, err error) (bool, error) { + if err != nil { + return false, err + } + a, err := meta.Accessor(info.Object) + if err != nil { + return false, err + } + if !s.Matches(labels.Set(a.GetLabels())) { + return false, nil + } + return true, nil + } +} + +type InfoListVisitor []*Info + +func (infos InfoListVisitor) Visit(fn VisitorFunc) error { + var err error + for _, i := range infos { + err = fn(i, err) + } + return err +} diff --git a/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go new file mode 100644 index 000000000..158810ee5 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go @@ -0,0 +1,325 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package disk + +import ( + "errors" + "io" + "net/http" + "os" + "path/filepath" + "sync" + "time" + + openapi_v2 "github.com/google/gnostic-models/openapiv2" + "k8s.io/klog/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/openapi" + cachedopenapi "k8s.io/client-go/openapi/cached" + restclient "k8s.io/client-go/rest" +) + +// CachedDiscoveryClient implements the functions that discovery server-supported API groups, +// versions and resources. +type CachedDiscoveryClient struct { + delegate discovery.DiscoveryInterface + + // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. + cacheDirectory string + + // ttl is how long the cache should be considered valid + ttl time.Duration + + // mutex protects the variables below + mutex sync.Mutex + + // ourFiles are all filenames of cache files created by this process + ourFiles map[string]struct{} + // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) + invalidated bool + // fresh is true if all used cache files were ours + fresh bool + + // caching openapi v3 client which wraps the delegate's client + openapiClient openapi.Client +} + +var _ discovery.CachedDiscoveryInterface = &CachedDiscoveryClient{} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") + cachedBytes, err := d.getCachedFile(filename) + // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. + if err == nil { + cachedResources := &metav1.APIResourceList{} + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { + klog.V(10).Infof("returning cached discovery info from %v", filename) + return cachedResources, nil + } + } + + liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + klog.V(3).Infof("skipped caching discovery info due to %v", err) + return liveResources, err + } + if liveResources == nil || len(liveResources.APIResources) == 0 { + klog.V(3).Infof("skipped caching discovery info, no resources found") + return liveResources, err + } + + if err := d.writeCachedFile(filename, liveResources); err != nil { + klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + } + + return liveResources, nil +} + +// ServerGroupsAndResources returns the supported groups and resources for all groups and versions. +func (d *CachedDiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return discovery.ServerGroupsAndResources(d) +} + +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. +func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { + filename := filepath.Join(d.cacheDirectory, "servergroups.json") + cachedBytes, err := d.getCachedFile(filename) + // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. + if err == nil { + cachedGroups := &metav1.APIGroupList{} + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { + klog.V(10).Infof("returning cached discovery info from %v", filename) + return cachedGroups, nil + } + } + + liveGroups, err := d.delegate.ServerGroups() + if err != nil { + klog.V(3).Infof("skipped caching discovery info due to %v", err) + return liveGroups, err + } + if liveGroups == nil || len(liveGroups.Groups) == 0 { + klog.V(3).Infof("skipped caching discovery info, no groups found") + return liveGroups, err + } + + if err := d.writeCachedFile(filename, liveGroups); err != nil { + klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + } + + return liveGroups, nil +} + +func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { + // after invalidation ignore cache files not created by this process + d.mutex.Lock() + _, ourFile := d.ourFiles[filename] + if d.invalidated && !ourFile { + d.mutex.Unlock() + return nil, errors.New("cache invalidated") + } + d.mutex.Unlock() + + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return nil, err + } + + if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { + return nil, errors.New("cache expired") + } + + // the cache is present and its valid. Try to read and use it. + cachedBytes, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + d.mutex.Lock() + defer d.mutex.Unlock() + d.fresh = d.fresh && ourFile + + return cachedBytes, nil +} + +func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { + if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil { + return err + } + + bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) + if err != nil { + return err + } + + f, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)+".") + if err != nil { + return err + } + defer os.Remove(f.Name()) + _, err = f.Write(bytes) + if err != nil { + return err + } + + err = os.Chmod(f.Name(), 0660) + if err != nil { + return err + } + + name := f.Name() + err = f.Close() + if err != nil { + return err + } + + // atomic rename + d.mutex.Lock() + defer d.mutex.Unlock() + err = os.Rename(name, filename) + if err == nil { + d.ourFiles[filename] = struct{}{} + } + return err +} + +// RESTClient returns a RESTClient that is used to communicate with API server +// by this client implementation. +func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +// ServerPreferredResources returns the supported resources with the version preferred by the +// server. +func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredResources(d) +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources with the +// version preferred by the server. +func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredNamespacedResources(d) +} + +// ServerVersion retrieves and parses the server's version (git version). +func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +// OpenAPISchema retrieves and parses the swagger API schema the server supports. +func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +// OpenAPIV3 retrieves and parses the OpenAPIV3 specs exposed by the server +func (d *CachedDiscoveryClient) OpenAPIV3() openapi.Client { + // Must take lock since Invalidate call may modify openapiClient + d.mutex.Lock() + defer d.mutex.Unlock() + + if d.openapiClient == nil { + // Delegate is discovery client created with special HTTP client which + // respects E-Tag cache responses to serve cache from disk. + d.openapiClient = cachedopenapi.NewClient(d.delegate.OpenAPIV3()) + } + + return d.openapiClient +} + +// Fresh is supposed to tell the caller whether or not to retry if the cache +// fails to find something (false = retry, true = no need to retry). +func (d *CachedDiscoveryClient) Fresh() bool { + d.mutex.Lock() + defer d.mutex.Unlock() + + return d.fresh +} + +// Invalidate enforces that no cached data is used in the future that is older than the current time. +func (d *CachedDiscoveryClient) Invalidate() { + d.mutex.Lock() + defer d.mutex.Unlock() + + d.ourFiles = map[string]struct{}{} + d.fresh = true + d.invalidated = true + d.openapiClient = nil + if ad, ok := d.delegate.(discovery.CachedDiscoveryInterface); ok { + ad.Invalidate() + } +} + +// WithLegacy returns current cached discovery client; +// current client does not support legacy-only discovery. +func (d *CachedDiscoveryClient) WithLegacy() discovery.DiscoveryInterface { + return d +} + +// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps +// the created client in a CachedDiscoveryClient. The provided configuration is updated with a +// custom transport that understands cache responses. +// We receive two distinct cache directories for now, in order to preserve old behavior +// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper, +// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing +// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not +// be updated with a roundtripper that understands cache responses. +// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory. +func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) { + if len(httpCacheDir) > 0 { + // update the given restconfig with a custom roundtripper that + // understands how to handle cache responses. + config = restclient.CopyConfig(config) + config.Wrap(func(rt http.RoundTripper) http.RoundTripper { + return newCacheRoundTripper(httpCacheDir, rt) + }) + } + + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + + // The delegate caches the discovery groups and resources (memcache). "ServerGroups", + // which usually only returns (and caches) the groups, can now store the resources as + // well if the server supports the newer aggregated discovery format. + return newCachedDiscoveryClient(memory.NewMemCacheClient(discoveryClient), discoveryCacheDir, ttl), nil +} + +// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. +func newCachedDiscoveryClient(delegate discovery.DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { + return &CachedDiscoveryClient{ + delegate: delegate, + cacheDirectory: cacheDirectory, + ttl: ttl, + ourFiles: map[string]struct{}{}, + fresh: true, + } +} diff --git a/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go b/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go new file mode 100644 index 000000000..f3a4b2947 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go @@ -0,0 +1,120 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package disk + +import ( + "bytes" + "crypto/sha256" + "fmt" + "net/http" + "os" + "path/filepath" + + "github.com/gregjones/httpcache" + "github.com/peterbourgon/diskv" + "k8s.io/klog/v2" +) + +type cacheRoundTripper struct { + rt *httpcache.Transport +} + +// newCacheRoundTripper creates a roundtripper that reads the ETag on +// response headers and send the If-None-Match header on subsequent +// corresponding requests. +func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { + d := diskv.New(diskv.Options{ + PathPerm: os.FileMode(0750), + FilePerm: os.FileMode(0660), + BasePath: cacheDir, + TempDir: filepath.Join(cacheDir, ".diskv-temp"), + }) + t := httpcache.NewTransport(&sumDiskCache{disk: d}) + t.Transport = rt + + return &cacheRoundTripper{rt: t} +} + +func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.rt.RoundTrip(req) +} + +func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := rt.rt.Transport.(canceler); ok { + cr.CancelRequest(req) + } else { + klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) + } +} + +func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } + +// A sumDiskCache is a cache backend for github.com/gregjones/httpcache. It is +// similar to httpcache's diskcache package, but uses SHA256 sums to ensure +// cache integrity at read time rather than fsyncing each cache entry to +// increase the likelihood they will be persisted at write time. This avoids +// significant performance degradation on MacOS. +// +// See https://github.com/kubernetes/kubernetes/issues/110753 for more. +type sumDiskCache struct { + disk *diskv.Diskv +} + +// Get the requested key from the cache on disk. If Get encounters an error, or +// the returned value is not a SHA256 sum followed by bytes with a matching +// checksum it will return false to indicate a cache miss. +func (c *sumDiskCache) Get(key string) ([]byte, bool) { + b, err := c.disk.Read(sanitize(key)) + if err != nil || len(b) < sha256.Size { + return []byte{}, false + } + + response := b[sha256.Size:] + want := b[:sha256.Size] // The first 32 bytes of the file should be the SHA256 sum. + got := sha256.Sum256(response) + if !bytes.Equal(want, got[:]) { + return []byte{}, false + } + + return response, true +} + +// Set writes the response to a file on disk. The filename will be the SHA256 +// sum of the key. The file will contain a SHA256 sum of the response bytes, +// followed by said response bytes. +func (c *sumDiskCache) Set(key string, response []byte) { + s := sha256.Sum256(response) + _ = c.disk.Write(sanitize(key), append(s[:], response...)) // Nothing we can do with this error. +} + +func (c *sumDiskCache) Delete(key string) { + _ = c.disk.Erase(sanitize(key)) // Nothing we can do with this error. +} + +// Sanitize an httpcache key such that it can be used as a diskv key, which must +// be a valid filename. The httpcache key will either be the requested URL (if +// the request method was GET) or " " for other methods, per the +// httpcache.cacheKey function. +func sanitize(key string) string { + // These keys are not sensitive. We use sha256 to avoid a (potentially + // malicious) collision causing the wrong cache data to be written or + // accessed. + return fmt.Sprintf("%x", sha256.Sum256([]byte(key))) +} diff --git a/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go new file mode 100644 index 000000000..3829b3cc0 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go @@ -0,0 +1,332 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memory + +import ( + "errors" + "fmt" + "sync" + "syscall" + + openapi_v2 "github.com/google/gnostic-models/openapiv2" + + errorsutil "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/openapi" + cachedopenapi "k8s.io/client-go/openapi/cached" + restclient "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +type cacheEntry struct { + resourceList *metav1.APIResourceList + err error +} + +// memCacheClient can Invalidate() to stay up-to-date with discovery +// information. +// +// TODO: Switch to a watch interface. Right now it will poll after each +// Invalidate() call. +type memCacheClient struct { + delegate discovery.DiscoveryInterface + + lock sync.RWMutex + groupToServerResources map[string]*cacheEntry + groupList *metav1.APIGroupList + cacheValid bool + openapiClient openapi.Client + receivedAggregatedDiscovery bool +} + +// Error Constants +var ( + ErrCacheNotFound = errors.New("not found") +) + +// Server returning empty ResourceList for Group/Version. +type emptyResponseError struct { + gv string +} + +func (e *emptyResponseError) Error() string { + return fmt.Sprintf("received empty response for: %s", e.gv) +} + +var _ discovery.CachedDiscoveryInterface = &memCacheClient{} + +// isTransientConnectionError checks whether given error is "Connection refused" or +// "Connection reset" error which usually means that apiserver is temporarily +// unavailable. +func isTransientConnectionError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == syscall.ECONNREFUSED || errno == syscall.ECONNRESET + } + return false +} + +func isTransientError(err error) bool { + if isTransientConnectionError(err) { + return true + } + + if t, ok := err.(errorsutil.APIStatus); ok && t.Status().Code >= 500 { + return true + } + + return errorsutil.IsTooManyRequests(err) +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *memCacheClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + d.lock.Lock() + defer d.lock.Unlock() + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, err + } + } + cachedVal, ok := d.groupToServerResources[groupVersion] + if !ok { + return nil, ErrCacheNotFound + } + + if cachedVal.err != nil && isTransientError(cachedVal.err) { + r, err := d.serverResourcesForGroupVersion(groupVersion) + if err != nil { + // Don't log "empty response" as an error; it is a common response for metrics. + if _, emptyErr := err.(*emptyResponseError); emptyErr { + // Log at same verbosity as disk cache. + klog.V(3).Infof("%v", err) + } else { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", groupVersion, err)) + } + } + cachedVal = &cacheEntry{r, err} + d.groupToServerResources[groupVersion] = cachedVal + } + + return cachedVal.resourceList, cachedVal.err +} + +// ServerGroupsAndResources returns the groups and supported resources for all groups and versions. +func (d *memCacheClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return discovery.ServerGroupsAndResources(d) +} + +// GroupsAndMaybeResources returns the list of APIGroups, and possibly the map of group/version +// to resources. The returned groups will never be nil, but the resources map can be nil +// if there are no cached resources. +func (d *memCacheClient) GroupsAndMaybeResources() (*metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) { + d.lock.Lock() + defer d.lock.Unlock() + + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, nil, nil, err + } + } + // Build the resourceList from the cache? + var resourcesMap map[schema.GroupVersion]*metav1.APIResourceList + var failedGVs map[schema.GroupVersion]error + if d.receivedAggregatedDiscovery && len(d.groupToServerResources) > 0 { + resourcesMap = map[schema.GroupVersion]*metav1.APIResourceList{} + failedGVs = map[schema.GroupVersion]error{} + for gv, cacheEntry := range d.groupToServerResources { + groupVersion, err := schema.ParseGroupVersion(gv) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to parse group version (%v): %v", gv, err) + } + if cacheEntry.err != nil { + failedGVs[groupVersion] = cacheEntry.err + } else { + resourcesMap[groupVersion] = cacheEntry.resourceList + } + } + } + return d.groupList, resourcesMap, failedGVs, nil +} + +func (d *memCacheClient) ServerGroups() (*metav1.APIGroupList, error) { + groups, _, _, err := d.GroupsAndMaybeResources() + if err != nil { + return nil, err + } + return groups, nil +} + +func (d *memCacheClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +func (d *memCacheClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredResources(d) +} + +func (d *memCacheClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredNamespacedResources(d) +} + +func (d *memCacheClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +func (d *memCacheClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +func (d *memCacheClient) OpenAPIV3() openapi.Client { + // Must take lock since Invalidate call may modify openapiClient + d.lock.Lock() + defer d.lock.Unlock() + + if d.openapiClient == nil { + d.openapiClient = cachedopenapi.NewClient(d.delegate.OpenAPIV3()) + } + + return d.openapiClient +} + +func (d *memCacheClient) Fresh() bool { + d.lock.RLock() + defer d.lock.RUnlock() + // Return whether the cache is populated at all. It is still possible that + // a single entry is missing due to transient errors and the attempt to read + // that entry will trigger retry. + return d.cacheValid +} + +// Invalidate enforces that no cached data that is older than the current time +// is used. +func (d *memCacheClient) Invalidate() { + d.lock.Lock() + defer d.lock.Unlock() + d.cacheValid = false + d.groupToServerResources = nil + d.groupList = nil + d.openapiClient = nil + d.receivedAggregatedDiscovery = false + if ad, ok := d.delegate.(discovery.CachedDiscoveryInterface); ok { + ad.Invalidate() + } +} + +// refreshLocked refreshes the state of cache. The caller must hold d.lock for +// writing. +func (d *memCacheClient) refreshLocked() error { + // TODO: Could this multiplicative set of calls be replaced by a single call + // to ServerResources? If it's possible for more than one resulting + // APIResourceList to have the same GroupVersion, the lists would need merged. + var gl *metav1.APIGroupList + var err error + + if ad, ok := d.delegate.(discovery.AggregatedDiscoveryInterface); ok { + var resources map[schema.GroupVersion]*metav1.APIResourceList + var failedGVs map[schema.GroupVersion]error + gl, resources, failedGVs, err = ad.GroupsAndMaybeResources() + if resources != nil && err == nil { + // Cache the resources. + d.groupToServerResources = map[string]*cacheEntry{} + d.groupList = gl + for gv, resources := range resources { + d.groupToServerResources[gv.String()] = &cacheEntry{resources, nil} + } + // Cache GroupVersion discovery errors + for gv, err := range failedGVs { + d.groupToServerResources[gv.String()] = &cacheEntry{nil, err} + } + d.receivedAggregatedDiscovery = true + d.cacheValid = true + return nil + } + } else { + gl, err = d.delegate.ServerGroups() + } + if err != nil || len(gl.Groups) == 0 { + utilruntime.HandleError(fmt.Errorf("couldn't get current server API group list: %v", err)) + return err + } + + wg := &sync.WaitGroup{} + resultLock := &sync.Mutex{} + rl := map[string]*cacheEntry{} + for _, g := range gl.Groups { + for _, v := range g.Versions { + gv := v.GroupVersion + wg.Add(1) + go func() { + defer wg.Done() + defer utilruntime.HandleCrash() + + r, err := d.serverResourcesForGroupVersion(gv) + if err != nil { + // Don't log "empty response" as an error; it is a common response for metrics. + if _, emptyErr := err.(*emptyResponseError); emptyErr { + // Log at same verbosity as disk cache. + klog.V(3).Infof("%v", err) + } else { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", gv, err)) + } + } + + resultLock.Lock() + defer resultLock.Unlock() + rl[gv] = &cacheEntry{r, err} + }() + } + } + wg.Wait() + + d.groupToServerResources, d.groupList = rl, gl + d.cacheValid = true + return nil +} + +func (d *memCacheClient) serverResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + r, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + return r, err + } + if len(r.APIResources) == 0 { + return r, &emptyResponseError{gv: groupVersion} + } + return r, nil +} + +// WithLegacy returns current memory-cached discovery client; +// current client does not support legacy-only discovery. +func (d *memCacheClient) WithLegacy() discovery.DiscoveryInterface { + return d +} + +// NewMemCacheClient creates a new CachedDiscoveryInterface which caches +// discovery information in memory and will stay up-to-date if Invalidate is +// called with regularity. +// +// NOTE: The client will NOT resort to live lookups on cache misses. +func NewMemCacheClient(delegate discovery.DiscoveryInterface) discovery.CachedDiscoveryInterface { + return &memCacheClient{ + delegate: delegate, + groupToServerResources: map[string]*cacheEntry{}, + receivedAggregatedDiscovery: false, + } +} diff --git a/vendor/k8s.io/client-go/openapi/cached/client.go b/vendor/k8s.io/client-go/openapi/cached/client.go new file mode 100644 index 000000000..17f63ed26 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/cached/client.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cached + +import ( + "sync" + + "k8s.io/client-go/openapi" +) + +type client struct { + delegate openapi.Client + + once sync.Once + result map[string]openapi.GroupVersion + err error +} + +func NewClient(other openapi.Client) openapi.Client { + return &client{ + delegate: other, + } +} + +func (c *client) Paths() (map[string]openapi.GroupVersion, error) { + c.once.Do(func() { + uncached, err := c.delegate.Paths() + if err != nil { + c.err = err + return + } + + result := make(map[string]openapi.GroupVersion, len(uncached)) + for k, v := range uncached { + result[k] = newGroupVersion(v) + } + c.result = result + }) + return c.result, c.err +} diff --git a/vendor/k8s.io/client-go/openapi/cached/groupversion.go b/vendor/k8s.io/client-go/openapi/cached/groupversion.go new file mode 100644 index 000000000..65a4189f7 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/cached/groupversion.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cached + +import ( + "sync" + + "k8s.io/client-go/openapi" +) + +type groupversion struct { + delegate openapi.GroupVersion + + lock sync.Mutex + docs map[string]docInfo +} + +type docInfo struct { + data []byte + err error +} + +func newGroupVersion(delegate openapi.GroupVersion) *groupversion { + return &groupversion{ + delegate: delegate, + } +} + +func (g *groupversion) Schema(contentType string) ([]byte, error) { + g.lock.Lock() + defer g.lock.Unlock() + + cachedInfo, ok := g.docs[contentType] + if !ok { + if g.docs == nil { + g.docs = make(map[string]docInfo) + } + + cachedInfo.data, cachedInfo.err = g.delegate.Schema(contentType) + g.docs[contentType] = cachedInfo + } + + return cachedInfo.data, cachedInfo.err +} diff --git a/vendor/k8s.io/client-go/openapi3/root.go b/vendor/k8s.io/client-go/openapi3/root.go new file mode 100644 index 000000000..4333e8628 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi3/root.go @@ -0,0 +1,182 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi3 + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/openapi" + "k8s.io/kube-openapi/pkg/spec3" +) + +// Root interface defines functions implemented against the root +// OpenAPI V3 document. The root OpenAPI V3 document maps the +// API Server relative url for all GroupVersions to the relative +// url for the OpenAPI relative url. Example for single GroupVersion +// apps/v1: +// +// "apis/apps/v1": { +// "ServerRelativeURL": "/openapi/v3/apis/apps/v1?hash=" +// } +type Root interface { + // GroupVersions returns every GroupVersion for which there is an + // OpenAPI V3 GroupVersion document. Returns an error for problems + // retrieving or parsing the OpenAPI V3 root document. + GroupVersions() ([]schema.GroupVersion, error) + // GVSpec returns the specification for all the resources in a + // GroupVersion as a pointer to a spec3.OpenAPI struct. + // Returns an error for problems retrieving or parsing the root + // document or GroupVersion OpenAPI V3 document. + GVSpec(gv schema.GroupVersion) (*spec3.OpenAPI, error) + // GVSpecAsMap returns the specification for all the resources in a + // GroupVersion as unstructured bytes. Returns an error for + // problems retrieving or parsing the root or GroupVersion + // OpenAPI V3 document. + GVSpecAsMap(gv schema.GroupVersion) (map[string]interface{}, error) +} + +// root implements the Root interface, and encapsulates the +// fields to retrieve, store the parsed OpenAPI V3 root document. +type root struct { + // OpenAPI client to retrieve the OpenAPI V3 documents. + client openapi.Client +} + +// Validate root implements the Root interface. +var _ Root = &root{} + +// NewRoot returns a structure implementing the Root interface, +// created with the passed rest client. +func NewRoot(client openapi.Client) Root { + return &root{client: client} +} + +func (r *root) GroupVersions() ([]schema.GroupVersion, error) { + paths, err := r.client.Paths() + if err != nil { + return nil, err + } + // Example GroupVersion API path: "apis/apps/v1" + gvs := make([]schema.GroupVersion, 0, len(paths)) + for gvAPIPath := range paths { + gv, err := pathToGroupVersion(gvAPIPath) + if err != nil { + // Ignore paths which do not parse to GroupVersion + continue + } + gvs = append(gvs, gv) + } + // Sort GroupVersions alphabetically + sort.Slice(gvs, func(i, j int) bool { + return gvs[i].String() < gvs[j].String() + }) + return gvs, nil +} + +func (r *root) GVSpec(gv schema.GroupVersion) (*spec3.OpenAPI, error) { + openAPISchemaBytes, err := r.retrieveGVBytes(gv) + if err != nil { + return nil, err + } + // Unmarshal the downloaded Group/Version bytes into the spec3.OpenAPI struct. + var parsedV3Schema spec3.OpenAPI + err = json.Unmarshal(openAPISchemaBytes, &parsedV3Schema) + return &parsedV3Schema, err +} + +func (r *root) GVSpecAsMap(gv schema.GroupVersion) (map[string]interface{}, error) { + gvOpenAPIBytes, err := r.retrieveGVBytes(gv) + if err != nil { + return nil, err + } + // GroupVersion bytes into unstructured map[string] -> empty interface. + var gvMap map[string]interface{} + err = json.Unmarshal(gvOpenAPIBytes, &gvMap) + return gvMap, err +} + +// retrieveGVBytes returns the schema for a passed GroupVersion as an +// unstructured slice of bytes or an error if there is a problem downloading +// or if the passed GroupVersion is not supported. +func (r *root) retrieveGVBytes(gv schema.GroupVersion) ([]byte, error) { + paths, err := r.client.Paths() + if err != nil { + return nil, err + } + apiPath := gvToAPIPath(gv) + gvOpenAPI, found := paths[apiPath] + if !found { + return nil, &GroupVersionNotFoundError{gv: gv} + } + return gvOpenAPI.Schema(runtime.ContentTypeJSON) +} + +// gvToAPIPath maps the passed GroupVersion to a relative api +// server url. Example: +// +// GroupVersion{Group: "apps", Version: "v1"} -> "apis/apps/v1". +func gvToAPIPath(gv schema.GroupVersion) string { + var resourcePath string + if len(gv.Group) == 0 { + resourcePath = fmt.Sprintf("api/%s", gv.Version) + } else { + resourcePath = fmt.Sprintf("apis/%s/%s", gv.Group, gv.Version) + } + return resourcePath +} + +// pathToGroupVersion is a helper function parsing the passed relative +// url into a GroupVersion. +// +// Example: apis/apps/v1 -> GroupVersion{Group: "apps", Version: "v1"} +// Example: api/v1 -> GroupVersion{Group: "", Version: "v1"} +func pathToGroupVersion(path string) (schema.GroupVersion, error) { + var gv schema.GroupVersion + parts := strings.Split(path, "/") + if len(parts) < 2 { + return gv, fmt.Errorf("Unable to parse api relative path: %s", path) + } + apiPrefix := parts[0] + if apiPrefix == "apis" { + // Example: apis/apps (without version) + if len(parts) < 3 { + return gv, fmt.Errorf("Group without Version not allowed") + } + gv.Group = parts[1] + gv.Version = parts[2] + } else if apiPrefix == "api" { + gv.Version = parts[1] + } else { + return gv, fmt.Errorf("Unable to parse api relative path: %s", path) + } + return gv, nil +} + +// Encapsulates GroupVersion not found as one of the paths +// at OpenAPI V3 endpoint. +type GroupVersionNotFoundError struct { + gv schema.GroupVersion +} + +func (r *GroupVersionNotFoundError) Error() string { + return fmt.Sprintf("GroupVersion (%v) not found as OpenAPI V3 path", r.gv) +} diff --git a/vendor/k8s.io/client-go/scale/client.go b/vendor/k8s.io/client-go/scale/client.go new file mode 100644 index 000000000..1306b37d9 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/client.go @@ -0,0 +1,238 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + "context" + "fmt" + + autoscaling "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + restclient "k8s.io/client-go/rest" +) + +var scaleConverter = NewScaleConverter() +var codecs = serializer.NewCodecFactory(scaleConverter.Scheme()) +var parameterScheme = runtime.NewScheme() +var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) + +var versionV1 = schema.GroupVersion{Version: "v1"} + +func init() { + metav1.AddToGroupVersion(parameterScheme, versionV1) +} + +// scaleClient is an implementation of ScalesGetter +// which makes use of a RESTMapper and a generic REST +// client to support an discoverable resource. +// It behaves somewhat similarly to the dynamic ClientPool, +// but is more specifically scoped to Scale. +type scaleClient struct { + mapper PreferredResourceMapper + + apiPathResolverFunc dynamic.APIPathResolverFunc + scaleKindResolver ScaleKindResolver + clientBase restclient.Interface +} + +// NewForConfig creates a new ScalesGetter which resolves kinds +// to resources using the given RESTMapper, and API paths using +// the given dynamic.APIPathResolverFunc. +func NewForConfig(cfg *restclient.Config, mapper PreferredResourceMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) (ScalesGetter, error) { + // so that the RESTClientFor doesn't complain + cfg.GroupVersion = &schema.GroupVersion{} + + cfg.NegotiatedSerializer = codecs.WithoutConversion() + if len(cfg.UserAgent) == 0 { + cfg.UserAgent = restclient.DefaultKubernetesUserAgent() + } + + client, err := restclient.RESTClientFor(cfg) + if err != nil { + return nil, err + } + + return New(client, mapper, resolver, scaleKindResolver), nil +} + +// New creates a new ScalesGetter using the given client to make requests. +// The GroupVersion on the client is ignored. +func New(baseClient restclient.Interface, mapper PreferredResourceMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) ScalesGetter { + return &scaleClient{ + mapper: mapper, + + apiPathResolverFunc: resolver, + scaleKindResolver: scaleKindResolver, + clientBase: baseClient, + } +} + +// apiPathFor returns the absolute api path for the given GroupVersion +func (c *scaleClient) apiPathFor(groupVer schema.GroupVersion) string { + // we need to set the API path based on GroupVersion (defaulting to the legacy path if none is set) + // TODO: we "cheat" here since the API path really only depends on group ATM, but this should + // *probably* take GroupVersionResource and not GroupVersionKind. + apiPath := c.apiPathResolverFunc(groupVer.WithKind("")) + if apiPath == "" { + apiPath = "/api" + } + + return restclient.DefaultVersionedAPIPath(apiPath, groupVer) +} + +// pathAndVersionFor returns the appropriate base path and the associated full GroupVersionResource +// for the given GroupResource +func (c *scaleClient) pathAndVersionFor(resource schema.GroupResource) (string, schema.GroupVersionResource, error) { + gvr, err := c.mapper.ResourceFor(resource.WithVersion("")) + if err != nil { + return "", gvr, fmt.Errorf("unable to get full preferred group-version-resource for %s: %v", resource.String(), err) + } + + groupVer := gvr.GroupVersion() + + return c.apiPathFor(groupVer), gvr, nil +} + +// namespacedScaleClient is an ScaleInterface for fetching +// Scales in a given namespace. +type namespacedScaleClient struct { + client *scaleClient + namespace string +} + +// convertToScale converts the response body to autoscaling/v1.Scale +func convertToScale(result *restclient.Result) (*autoscaling.Scale, error) { + scaleBytes, err := result.Raw() + if err != nil { + return nil, err + } + decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) + rawScaleObj, err := runtime.Decode(decoder, scaleBytes) + if err != nil { + return nil, err + } + + // convert whatever this is to autoscaling/v1.Scale + scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) + if err != nil { + return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) + } + + return scaleObj.(*autoscaling.Scale), nil +} + +func (c *scaleClient) Scales(namespace string) ScaleInterface { + return &namespacedScaleClient{ + client: c, + namespace: namespace, + } +} + +func (c *namespacedScaleClient) Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscaling.Scale, error) { + // Currently, a /scale endpoint can return different scale types. + // Until we have support for the alternative API representations proposal, + // we need to deal with accepting different API versions. + // In practice, this is autoscaling/v1.Scale and extensions/v1beta1.Scale + + path, gvr, err := c.client.pathAndVersionFor(resource) + if err != nil { + return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) + } + + result := c.client.clientBase.Get(). + AbsPath(path). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(gvr.Resource). + Name(name). + SubResource("scale"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + + return convertToScale(&result) +} + +func (c *namespacedScaleClient) Update(ctx context.Context, resource schema.GroupResource, scale *autoscaling.Scale, opts metav1.UpdateOptions) (*autoscaling.Scale, error) { + path, gvr, err := c.client.pathAndVersionFor(resource) + if err != nil { + return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) + } + + // Currently, a /scale endpoint can receive and return different scale types. + // Until we have support for the alternative API representations proposal, + // we need to deal with sending and accepting different API versions. + + // figure out what scale we actually need here + desiredGVK, err := c.client.scaleKindResolver.ScaleForResource(gvr) + if err != nil { + return nil, fmt.Errorf("could not find proper group-version for scale subresource of %s: %v", gvr.String(), err) + } + + // convert this to whatever this endpoint wants + scaleUpdate, err := scaleConverter.ConvertToVersion(scale, desiredGVK.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("could not convert scale update to external Scale: %v", err) + } + encoder := scaleConverter.codecs.LegacyCodec(desiredGVK.GroupVersion()) + scaleUpdateBytes, err := runtime.Encode(encoder, scaleUpdate) + if err != nil { + return nil, fmt.Errorf("could not encode scale update to external Scale: %v", err) + } + + result := c.client.clientBase.Put(). + AbsPath(path). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(gvr.Resource). + Name(scale.Name). + SubResource("scale"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Body(scaleUpdateBytes). + Do(ctx) + if err := result.Error(); err != nil { + // propagate "raw" error from the API + // this allows callers to interpret underlying Reason field + // for example: errors.IsConflict(err) + return nil, err + } + + return convertToScale(&result) +} + +func (c *namespacedScaleClient) Patch(ctx context.Context, gvr schema.GroupVersionResource, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*autoscaling.Scale, error) { + groupVersion := gvr.GroupVersion() + result := c.client.clientBase.Patch(pt). + AbsPath(c.client.apiPathFor(groupVersion)). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(gvr.Resource). + Name(name). + SubResource("scale"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Body(data). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + + return convertToScale(&result) +} diff --git a/vendor/k8s.io/client-go/scale/doc.go b/vendor/k8s.io/client-go/scale/doc.go new file mode 100644 index 000000000..b6fa3f5f2 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scale provides a polymorphic scale client capable of fetching +// and updating Scale for any resource which implements the `scale` subresource, +// as long as that subresource operates on a version of scale convertable to +// autoscaling.Scale. +package scale // import "k8s.io/client-go/scale" diff --git a/vendor/k8s.io/client-go/scale/interfaces.go b/vendor/k8s.io/client-go/scale/interfaces.go new file mode 100644 index 000000000..a7bb3e6cb --- /dev/null +++ b/vendor/k8s.io/client-go/scale/interfaces.go @@ -0,0 +1,47 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + "context" + + autoscalingapi "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// ScalesGetter can produce a ScaleInterface +type ScalesGetter interface { + // Scales produces a ScaleInterface for a particular namespace. + // Set namespace to the empty string for non-namespaced resources. + Scales(namespace string) ScaleInterface +} + +// ScaleInterface can fetch and update scales for +// resources in a particular namespace which implement +// the scale subresource. +type ScaleInterface interface { + // Get fetches the scale of the given scalable resource. + Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscalingapi.Scale, error) + + // Update updates the scale of the given scalable resource. + Update(ctx context.Context, resource schema.GroupResource, scale *autoscalingapi.Scale, opts metav1.UpdateOptions) (*autoscalingapi.Scale, error) + + // Patch patches the scale of the given scalable resource. + Patch(ctx context.Context, gvr schema.GroupVersionResource, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*autoscalingapi.Scale, error) +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go new file mode 100644 index 000000000..16f29e2af --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package appsint contains the necessary scaffolding of the +// internal version of extensions as required by conversion logic. +// It doesn't have any of its own types -- it's just necessary to +// get the expected behavior out of runtime.Scheme.ConvertToVersion +// and associated methods. +package appsint diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/register.go b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go new file mode 100644 index 000000000..d3a76b518 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsint + +import ( + appsv1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + scalescheme "k8s.io/client-go/scale/scheme" +) + +// GroupName is the group name use in this package +const GroupName = appsv1beta2.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &scalescheme.Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go new file mode 100644 index 000000000..f271c8259 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta1 + +import ( + "fmt" + + v1beta1 "k8s.io/api/apps/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + scheme "k8s.io/client-go/scale/scheme" +) + +func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go new file mode 100644 index 000000000..830619b44 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta1 + +package appsv1beta1 // import "k8s.io/client-go/scale/scheme/appsv1beta1" diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go new file mode 100644 index 000000000..f11fcbd00 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta1 + +import ( + appsapiv1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = appsapiv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &appsapiv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register() +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..d56861ead --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go @@ -0,0 +1,134 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package appsv1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1beta1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Scale_To_scheme_Scale(a.(*v1beta1.Scale), b.(*scheme.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_Scale_To_v1beta1_Scale(a.(*scheme.Scale), b.(*v1beta1.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta1.ScaleSpec), b.(*scheme.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta1.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go new file mode 100644 index 000000000..35d15c30d --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta2 + +import ( + "fmt" + + v1beta2 "k8s.io/api/apps/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + scheme "k8s.io/client-go/scale/scheme" +) + +func Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go new file mode 100644 index 000000000..c21a56d56 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta2 + +package appsv1beta2 // import "k8s.io/client-go/scale/scheme/appsv1beta2" diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go new file mode 100644 index 000000000..5e8a5d200 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta2 + +import ( + appsapiv1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = appsapiv1beta2.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &appsapiv1beta2.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register() +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go new file mode 100644 index 000000000..09c17420e --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go @@ -0,0 +1,134 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package appsv1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1beta2.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_Scale_To_scheme_Scale(a.(*v1beta2.Scale), b.(*scheme.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta2.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_Scale_To_v1beta2_Scale(a.(*scheme.Scale), b.(*v1beta2.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta2.ScaleSpec), b.(*scheme.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta2.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta2.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta2.ScaleStatus), b.(*scheme.ScaleStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta2_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta2_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta2_Scale(in, out, s) +} + +func autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go new file mode 100644 index 000000000..36ef82b92 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscalingv1 + +import ( + "fmt" + + v1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + scheme "k8s.io/client-go/scale/scheme" +) + +func Convert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = "" + if in.Selector != nil { + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.Selector = selector.String() + } + + return nil +} + +func Convert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + if in.Selector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.Selector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go new file mode 100644 index 000000000..03684dd90 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/autoscaling/v1 + +package autoscalingv1 // import "k8s.io/client-go/scale/scheme/autoscalingv1" diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go new file mode 100644 index 000000000..4339376c9 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscalingv1 + +import ( + autoscalingapiv1 "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = autoscalingapiv1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &autoscalingapiv1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register() +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go new file mode 100644 index 000000000..09e73584e --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go @@ -0,0 +1,133 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package autoscalingv1 + +import ( + v1 "k8s.io/api/autoscaling/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Scale_To_scheme_Scale(a.(*v1.Scale), b.(*scheme.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_Scale_To_v1_Scale(a.(*scheme.Scale), b.(*v1.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1.ScaleSpec), b.(*scheme.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleSpec_To_v1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleStatus_To_v1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1.ScaleStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1.ScaleStatus), b.(*scheme.ScaleStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1_Scale(in, out, s) +} + +func autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in, out, s) +} + +func autoConvert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/doc.go b/vendor/k8s.io/client-go/scale/scheme/doc.go new file mode 100644 index 000000000..0203d6d5a --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package scheme contains a runtime.Scheme to be used for serializing +// and deserializing different versions of Scale, and for converting +// in between them. +package scheme diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go new file mode 100644 index 000000000..9aaac6086 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package extensionsint contains the necessary scaffolding of the +// internal version of extensions as required by conversion logic. +// It doesn't have any of its own types -- it's just necessary to +// get the expected behavior out of runtime.Scheme.ConvertToVersion +// and associated methods. +package extensionsint diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go new file mode 100644 index 000000000..570a8a54a --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsint + +import ( + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + scalescheme "k8s.io/client-go/scale/scheme" +) + +// GroupName is the group name use in this package +const GroupName = extensionsv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &scalescheme.Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go new file mode 100644 index 000000000..821eb33d7 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsv1beta1 + +import ( + "fmt" + + v1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + scheme "k8s.io/client-go/scale/scheme" +) + +func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go new file mode 100644 index 000000000..1e719884f --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/extensions/v1beta1 + +package extensionsv1beta1 // import "k8s.io/client-go/scale/scheme/extensionsv1beta1" diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go new file mode 100644 index 000000000..248a00712 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsv1beta1 + +import ( + extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = extensionsapiv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &extensionsapiv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register() +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..46b29f171 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go @@ -0,0 +1,134 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package extensionsv1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1beta1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Scale_To_scheme_Scale(a.(*v1beta1.Scale), b.(*scheme.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_Scale_To_v1beta1_Scale(a.(*scheme.Scale), b.(*v1beta1.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta1.ScaleSpec), b.(*scheme.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta1.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/register.go b/vendor/k8s.io/client-go/scale/scheme/register.go new file mode 100644 index 000000000..4339e6173 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + autoscalingv1 "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = autoscalingv1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/types.go b/vendor/k8s.io/client-go/scale/scheme/types.go new file mode 100644 index 000000000..5c5d0a6f2 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/types.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This file contains our own "internal" version of scale that we use for conversions, +// since we can't use the main Kubernetes internal versions. + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector +} diff --git a/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go new file mode 100644 index 000000000..6ee3c2071 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go @@ -0,0 +1,92 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/scale/util.go b/vendor/k8s.io/client-go/scale/util.go new file mode 100644 index 000000000..2f43a7a79 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/util.go @@ -0,0 +1,197 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/discovery" + scalescheme "k8s.io/client-go/scale/scheme" + scaleappsint "k8s.io/client-go/scale/scheme/appsint" + scaleappsv1beta1 "k8s.io/client-go/scale/scheme/appsv1beta1" + scaleappsv1beta2 "k8s.io/client-go/scale/scheme/appsv1beta2" + scaleautoscaling "k8s.io/client-go/scale/scheme/autoscalingv1" + scaleextint "k8s.io/client-go/scale/scheme/extensionsint" + scaleext "k8s.io/client-go/scale/scheme/extensionsv1beta1" +) + +// PreferredResourceMapper determines the preferred version of a resource to scale +type PreferredResourceMapper interface { + // ResourceFor takes a partial resource and returns the preferred resource. + ResourceFor(resource schema.GroupVersionResource) (preferredResource schema.GroupVersionResource, err error) +} + +// Ensure a RESTMapper satisfies the PreferredResourceMapper interface +var _ PreferredResourceMapper = meta.RESTMapper(nil) + +// ScaleKindResolver knows about the relationship between +// resources and the GroupVersionKind of their scale subresources. +type ScaleKindResolver interface { + // ScaleForResource returns the GroupVersionKind of the + // scale subresource for the given GroupVersionResource. + ScaleForResource(resource schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) +} + +// discoveryScaleResolver is a ScaleKindResolver that uses +// a DiscoveryInterface to associate resources with their +// scale-kinds +type discoveryScaleResolver struct { + discoveryClient discovery.ServerResourcesInterface +} + +func (r *discoveryScaleResolver) ScaleForResource(inputRes schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) { + groupVerResources, err := r.discoveryClient.ServerResourcesForGroupVersion(inputRes.GroupVersion().String()) + if err != nil { + return schema.GroupVersionKind{}, fmt.Errorf("unable to fetch discovery information for %s: %v", inputRes.String(), err) + } + + for _, resource := range groupVerResources.APIResources { + resourceParts := strings.SplitN(resource.Name, "/", 2) + if len(resourceParts) != 2 || resourceParts[0] != inputRes.Resource || resourceParts[1] != "scale" { + // skip non-scale resources, or scales for resources that we're not looking for + continue + } + + scaleGV := inputRes.GroupVersion() + if resource.Group != "" && resource.Version != "" { + scaleGV = schema.GroupVersion{ + Group: resource.Group, + Version: resource.Version, + } + } + + return scaleGV.WithKind(resource.Kind), nil + } + + return schema.GroupVersionKind{}, fmt.Errorf("could not find scale subresource for %s in discovery information", inputRes.String()) +} + +// cachedScaleKindResolver is a ScaleKindResolver that caches results +// from another ScaleKindResolver, re-fetching on cache misses. +type cachedScaleKindResolver struct { + base ScaleKindResolver + + cache map[schema.GroupVersionResource]schema.GroupVersionKind + mu sync.RWMutex +} + +func (r *cachedScaleKindResolver) ScaleForResource(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + r.mu.RLock() + gvk, isCached := r.cache[resource] + r.mu.RUnlock() + if isCached { + return gvk, nil + } + + // we could have multiple fetches of the same resources, but that's probably + // better than limiting to only one reader at once (mu.Mutex), + // or blocking checks for other resources while we fetch + // (mu.Lock before fetch). + gvk, err := r.base.ScaleForResource(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + + r.mu.Lock() + defer r.mu.Unlock() + r.cache[resource] = gvk + + return gvk, nil +} + +// NewDiscoveryScaleKindResolver creates a new ScaleKindResolver which uses information from the given +// disovery client to resolve the correct Scale GroupVersionKind for different resources. +func NewDiscoveryScaleKindResolver(client discovery.ServerResourcesInterface) ScaleKindResolver { + base := &discoveryScaleResolver{ + discoveryClient: client, + } + + return &cachedScaleKindResolver{ + base: base, + cache: make(map[schema.GroupVersionResource]schema.GroupVersionKind), + } +} + +// ScaleConverter knows how to convert between external scale versions. +type ScaleConverter struct { + scheme *runtime.Scheme + codecs serializer.CodecFactory + internalVersioner runtime.GroupVersioner +} + +// NewScaleConverter creates a new ScaleConverter for converting between +// Scales in autoscaling/v1 and extensions/v1beta1. +func NewScaleConverter() *ScaleConverter { + scheme := runtime.NewScheme() + utilruntime.Must(scaleautoscaling.AddToScheme(scheme)) + utilruntime.Must(scalescheme.AddToScheme(scheme)) + utilruntime.Must(scaleext.AddToScheme(scheme)) + utilruntime.Must(scaleextint.AddToScheme(scheme)) + utilruntime.Must(scaleappsint.AddToScheme(scheme)) + utilruntime.Must(scaleappsv1beta1.AddToScheme(scheme)) + utilruntime.Must(scaleappsv1beta2.AddToScheme(scheme)) + + return &ScaleConverter{ + scheme: scheme, + codecs: serializer.NewCodecFactory(scheme), + internalVersioner: runtime.NewMultiGroupVersioner( + scalescheme.SchemeGroupVersion, + schema.GroupKind{Group: scaleext.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleautoscaling.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleappsv1beta1.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleappsv1beta2.GroupName, Kind: "Scale"}, + ), + } +} + +// Scheme returns the scheme used by this scale converter. +func (c *ScaleConverter) Scheme() *runtime.Scheme { + return c.scheme +} + +func (c *ScaleConverter) Codecs() serializer.CodecFactory { + return c.codecs +} + +func (c *ScaleConverter) ScaleVersions() []schema.GroupVersion { + return []schema.GroupVersion{ + scaleautoscaling.SchemeGroupVersion, + scalescheme.SchemeGroupVersion, + scaleext.SchemeGroupVersion, + scaleextint.SchemeGroupVersion, + scaleappsint.SchemeGroupVersion, + scaleappsv1beta1.SchemeGroupVersion, + scaleappsv1beta2.SchemeGroupVersion, + } +} + +// ConvertToVersion converts the given *external* input object to the given output *external* output group-version. +func (c *ScaleConverter) ConvertToVersion(in runtime.Object, outVersion schema.GroupVersion) (runtime.Object, error) { + scaleInt, err := c.scheme.ConvertToVersion(in, c.internalVersioner) + if err != nil { + return nil, err + } + + return c.scheme.ConvertToVersion(scaleInt, outVersion) +} diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go b/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go new file mode 100644 index 000000000..7cf29524c --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go @@ -0,0 +1,52 @@ +//This package is copied from Go library text/template. +//The original private functions indirect and printableValue +//are exported as public functions. +package template + +import ( + "fmt" + "reflect" +) + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// Indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func Indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Pointer || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// PrintableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func PrintableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Pointer { + v, _ = Indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PointerTo(v.Type()).Implements(errorType) || reflect.PointerTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go b/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go new file mode 100644 index 000000000..f0c8e712c --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go @@ -0,0 +1,177 @@ +//This package is copied from Go library text/template. +//The original private functions eq, ge, gt, le, lt, and ne +//are exported as public functions. +package template + +import ( + "errors" + "reflect" +) + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// Equal evaluates the comparison a == b || a == c || ... +func Equal(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// NotEqual evaluates the comparison a != b. +func NotEqual(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := Equal(arg1, arg2) + return !equal, err +} + +// Less evaluates the comparison a < b. +func Less(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// LessEqual evaluates the comparison <= b. +func LessEqual(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := Less(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return Equal(arg1, arg2) +} + +// Greater evaluates the comparison a > b. +func Greater(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := LessEqual(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// GreaterEqual evaluates the comparison a >= b. +func GreaterEqual(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := Less(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS new file mode 100644 index 000000000..921ac2fa0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -0,0 +1,28 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt + - ncdc +reviewers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - justinsb + - soltysh + - jsafrane + - dims + - ingvagabund + - ncdc +emeritus_approvers: + - lavalamp diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go new file mode 100644 index 000000000..8a1104bde --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -0,0 +1,515 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/clock" +) + +// This file implements a low-level controller that is used in +// sharedIndexInformer, which is an implementation of +// SharedIndexInformer. Such informers, in turn, are key components +// in the high level controllers that form the backbone of the +// Kubernetes control plane. Look at those for examples, or the +// example in +// https://github.com/kubernetes/client-go/tree/master/examples/workqueue +// . + +// Config contains all the settings for one of these low-level controllers. +type Config struct { + // The queue for your objects - has to be a DeltaFIFO due to + // assumptions in the implementation. Your Process() function + // should accept the output of this Queue's Pop() method. + Queue + + // Something that can list and watch your objects. + ListerWatcher + + // Something that can process a popped Deltas. + Process ProcessFunc + + // ObjectType is an example object of the type this controller is + // expected to handle. + ObjectType runtime.Object + + // ObjectDescription is the description to use when logging type-specific information about this controller. + ObjectDescription string + + // FullResyncPeriod is the period at which ShouldResync is considered. + FullResyncPeriod time.Duration + + // ShouldResync is periodically used by the reflector to determine + // whether to Resync the Queue. If ShouldResync is `nil` or + // returns true, it means the reflector should proceed with the + // resync. + ShouldResync ShouldResyncFunc + + // If true, when Process() returns an error, re-enqueue the object. + // TODO: add interface to let you inject a delay/backoff or drop + // the object completely if desired. Pass the object in + // question to this interface as a parameter. This is probably moot + // now that this functionality appears at a higher level. + RetryOnError bool + + // Called whenever the ListAndWatch drops the connection with an error. + WatchErrorHandler WatchErrorHandler + + // WatchListPageSize is the requested chunk size of initial and relist watch lists. + WatchListPageSize int64 +} + +// ShouldResyncFunc is a type of function that indicates if a reflector should perform a +// resync or not. It can be used by a shared informer to support multiple event handlers with custom +// resync periods. +type ShouldResyncFunc func() bool + +// ProcessFunc processes a single object. +type ProcessFunc func(obj interface{}, isInInitialList bool) error + +// `*controller` implements Controller +type controller struct { + config Config + reflector *Reflector + reflectorMutex sync.RWMutex + clock clock.Clock +} + +// Controller is a low-level controller that is parameterized by a +// Config and used in sharedIndexInformer. +type Controller interface { + // Run does two things. One is to construct and run a Reflector + // to pump objects/notifications from the Config's ListerWatcher + // to the Config's Queue and possibly invoke the occasional Resync + // on that Queue. The other is to repeatedly Pop from the Queue + // and process with the Config's ProcessFunc. Both of these + // continue until `stopCh` is closed. + Run(stopCh <-chan struct{}) + + // HasSynced delegates to the Config's Queue + HasSynced() bool + + // LastSyncResourceVersion delegates to the Reflector when there + // is one, otherwise returns the empty string + LastSyncResourceVersion() string +} + +// New makes a new Controller from the given Config. +func New(c *Config) Controller { + ctlr := &controller{ + config: *c, + clock: &clock.RealClock{}, + } + return ctlr +} + +// Run begins processing items, and will continue until a value is sent down stopCh or it is closed. +// It's an error to call Run more than once. +// Run blocks; call via go. +func (c *controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + go func() { + <-stopCh + c.config.Queue.Close() + }() + r := NewReflectorWithOptions( + c.config.ListerWatcher, + c.config.ObjectType, + c.config.Queue, + ReflectorOptions{ + ResyncPeriod: c.config.FullResyncPeriod, + TypeDescription: c.config.ObjectDescription, + Clock: c.clock, + }, + ) + r.ShouldResync = c.config.ShouldResync + r.WatchListPageSize = c.config.WatchListPageSize + if c.config.WatchErrorHandler != nil { + r.watchErrorHandler = c.config.WatchErrorHandler + } + + c.reflectorMutex.Lock() + c.reflector = r + c.reflectorMutex.Unlock() + + var wg wait.Group + + wg.StartWithChannel(stopCh, r.Run) + + wait.Until(c.processLoop, time.Second, stopCh) + wg.Wait() +} + +// Returns true once this controller has completed an initial resource listing +func (c *controller) HasSynced() bool { + return c.config.Queue.HasSynced() +} + +func (c *controller) LastSyncResourceVersion() string { + c.reflectorMutex.RLock() + defer c.reflectorMutex.RUnlock() + if c.reflector == nil { + return "" + } + return c.reflector.LastSyncResourceVersion() +} + +// processLoop drains the work queue. +// TODO: Consider doing the processing in parallel. This will require a little thought +// to make sure that we don't end up processing the same object multiple times +// concurrently. +// +// TODO: Plumb through the stopCh here (and down to the queue) so that this can +// actually exit when the controller is stopped. Or just give up on this stuff +// ever being stoppable. Converting this whole package to use Context would +// also be helpful. +func (c *controller) processLoop() { + for { + obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) + if err != nil { + if err == ErrFIFOClosed { + return + } + if c.config.RetryOnError { + // This is the safe way to re-enqueue. + c.config.Queue.AddIfNotPresent(obj) + } + } + } +} + +// ResourceEventHandler can handle notifications for events that +// happen to a resource. The events are informational only, so you +// can't return an error. The handlers MUST NOT modify the objects +// received; this concerns not only the top level of structure but all +// the data structures reachable from it. +// - OnAdd is called when an object is added. +// - OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// - OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. +type ResourceEventHandler interface { + OnAdd(obj interface{}, isInInitialList bool) + OnUpdate(oldObj, newObj interface{}) + OnDelete(obj interface{}) +} + +// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or +// as few of the notification functions as you want while still implementing +// ResourceEventHandler. This adapter does not remove the prohibition against +// modifying the objects. +// +// See ResourceEventHandlerDetailedFuncs if your use needs to propagate +// HasSynced. +type ResourceEventHandlerFuncs struct { + AddFunc func(obj interface{}) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) { + if r.AddFunc != nil { + r.AddFunc(obj) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + +// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs +// except its AddFunc accepts the isInInitialList parameter, for propagating +// HasSynced. +type ResourceEventHandlerDetailedFuncs struct { + AddFunc func(obj interface{}, isInInitialList bool) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) { + if r.AddFunc != nil { + r.AddFunc(obj, isInInitialList) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + +// FilteringResourceEventHandler applies the provided filter to all events coming +// in, ensuring the appropriate nested handler method is invoked. An object +// that starts passing the filter after an update is considered an add, and an +// object that stops passing the filter after an update is considered a delete. +// Like the handlers, the filter MUST NOT modify the objects it is given. +type FilteringResourceEventHandler struct { + FilterFunc func(obj interface{}) bool + Handler ResourceEventHandler +} + +// OnAdd calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnAdd(obj, isInInitialList) +} + +// OnUpdate ensures the proper handler is called depending on whether the filter matches +func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { + newer := r.FilterFunc(newObj) + older := r.FilterFunc(oldObj) + switch { + case newer && older: + r.Handler.OnUpdate(oldObj, newObj) + case newer && !older: + r.Handler.OnAdd(newObj, false) + case !newer && older: + r.Handler.OnDelete(oldObj) + default: + // do nothing + } +} + +// OnDelete calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnDelete(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnDelete(obj) +} + +// DeletionHandlingMetaNamespaceKeyFunc checks for +// DeletedFinalStateUnknown objects before calling +// MetaNamespaceKeyFunc. +func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return MetaNamespaceKeyFunc(obj) +} + +// NewInformer returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +func NewInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) +} + +// NewIndexerInformer returns an Indexer and a Controller for populating the index +// while also providing event notifications. You should only used the returned +// Index for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +// - indexers is the indexer for the received object type. +func NewIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) +} + +// NewTransformingInformer returns a Store and a controller for populating +// the store while also providing event notifications. You should only used +// the returned Store for Get/List operations; Add/Modify/Deletes will cause +// the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// put into the Store and corresponding Add/Modify/Delete handlers will +// be invoked for them. +func NewTransformingInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + transformer TransformFunc, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) +} + +// NewTransformingIndexerInformer returns an Indexer and a controller for +// populating the index while also providing event notifications. You should +// only used the returned Index for Get/List operations; Add/Modify/Deletes +// will cause the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// be put into the Index and corresponding Add/Modify/Delete handlers will +// be invoked for them. +func NewTransformingIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, + transformer TransformFunc, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) +} + +// Multiplexes updates in the form of a list of Deltas into a Store, and informs +// a given handler of events OnUpdate, OnAdd, OnDelete +func processDeltas( + // Object which receives event notifications from the given deltas + handler ResourceEventHandler, + clientState Store, + deltas Deltas, + isInInitialList bool, +) error { + // from oldest to newest + for _, d := range deltas { + obj := d.Object + + switch d.Type { + case Sync, Replaced, Added, Updated: + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + handler.OnUpdate(old, obj) + } else { + if err := clientState.Add(obj); err != nil { + return err + } + handler.OnAdd(obj, isInInitialList) + } + case Deleted: + if err := clientState.Delete(obj); err != nil { + return err + } + handler.OnDelete(obj) + } + } + return nil +} + +// newInformer returns a controller for populating the store while also +// providing event notifications. +// +// Parameters +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +// - clientState is the store you want to populate +func newInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + clientState Store, + transformer TransformFunc, +) Controller { + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: clientState, + EmitDeltaTypeReplaced: true, + Transformer: transformer, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}, isInInitialList bool) error { + if deltas, ok := obj.(Deltas); ok { + return processDeltas(h, clientState, deltas, isInInitialList) + } + return errors.New("object given as Process argument is not Deltas") + }, + } + return New(cfg) +} diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go new file mode 100644 index 000000000..7160bb1ee --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -0,0 +1,803 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are +// optional. +type DeltaFIFOOptions struct { + + // KeyFunction is used to figure out what key an object should have. (It's + // exposed in the returned DeltaFIFO's KeyOf() method, with additional + // handling around deleted objects and queue state). + // Optional, the default is MetaNamespaceKeyFunc. + KeyFunction KeyFunc + + // KnownObjects is expected to return a list of keys that the consumer of + // this queue "knows about". It is used to decide which items are missing + // when Replace() is called; 'Deleted' deltas are produced for the missing items. + // KnownObjects may be nil if you can tolerate missing deletions on Replace(). + KnownObjects KeyListerGetter + + // EmitDeltaTypeReplaced indicates that the queue consumer + // understands the Replaced DeltaType. Before the `Replaced` event type was + // added, calls to Replace() were handled the same as Sync(). For + // backwards-compatibility purposes, this is false by default. + // When true, `Replaced` events will be sent for items passed to a Replace() call. + // When false, `Sync` events will be sent instead. + EmitDeltaTypeReplaced bool + + // If set, will be called for objects before enqueueing them. Please + // see the comment on TransformFunc for details. + Transformer TransformFunc +} + +// DeltaFIFO is like FIFO, but differs in two ways. One is that the +// accumulator associated with a given object's key is not that object +// but rather a Deltas, which is a slice of Delta values for that +// object. Applying an object to a Deltas means to append a Delta +// except when the potentially appended Delta is a Deleted and the +// Deltas already ends with a Deleted. In that case the Deltas does +// not grow, although the terminal Deleted will be replaced by the new +// Deleted if the older Deleted's object is a +// DeletedFinalStateUnknown. +// +// The other difference is that DeltaFIFO has two additional ways that +// an object can be applied to an accumulator: Replaced and Sync. +// If EmitDeltaTypeReplaced is not set to true, Sync will be used in +// replace events for backwards compatibility. Sync is used for periodic +// resync events. +// +// DeltaFIFO is a producer-consumer queue, where a Reflector is +// intended to be the producer, and the consumer is whatever calls +// the Pop() method. +// +// DeltaFIFO solves this use case: +// - You want to process every object change (delta) at most once. +// - When you process an object, you want to see everything +// that's happened to it since you last processed it. +// - You want to process the deletion of some of the objects. +// - You might want to periodically reprocess objects. +// +// DeltaFIFO's Pop(), Get(), and GetByKey() methods return +// interface{} to satisfy the Store/Queue interfaces, but they +// will always return an object of type Deltas. List() returns +// the newest object from each accumulator in the FIFO. +// +// A DeltaFIFO's knownObjects KeyListerGetter provides the abilities +// to list Store keys and to get objects by Store key. The objects in +// question are called "known objects" and this set of objects +// modifies the behavior of the Delete, Replace, and Resync methods +// (each in a different way). +// +// A note on threading: If you call Pop() in parallel from multiple +// threads, you could end up with multiple threads processing slightly +// different versions of the same object. +type DeltaFIFO struct { + // lock/cond protects access to 'items' and 'queue'. + lock sync.RWMutex + cond sync.Cond + + // `items` maps a key to a Deltas. + // Each such Deltas has at least one Delta. + items map[string]Deltas + + // `queue` maintains FIFO order of keys for consumption in Pop(). + // There are no duplicates in `queue`. + // A key is in `queue` if and only if it is in `items`. + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update/AddIfNotPresent was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item + // insertion and retrieval, and should be deterministic. + keyFunc KeyFunc + + // knownObjects list keys that are "known" --- affecting Delete(), + // Replace(), and Resync() + knownObjects KeyListerGetter + + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRUD operations. + closed bool + + // emitDeltaTypeReplaced is whether to emit the Replaced or Sync + // DeltaType when Replace() is called (to preserve backwards compat). + emitDeltaTypeReplaced bool + + // Called with every object if non-nil. + transformer TransformFunc +} + +// TransformFunc allows for transforming an object before it will be processed. +// TransformFunc (similarly to ResourceEventHandler functions) should be able +// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown. +// +// New in v1.27: In such cases, the contained object will already have gone +// through the transform object separately (when it was added / updated prior +// to the delete), so the TransformFunc can likely safely ignore such objects +// (i.e., just return the input object). +// +// The most common usage pattern is to clean-up some parts of the object to +// reduce component memory usage if a given component doesn't care about them. +// +// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc +// sees the object before any other actor, and it is now safe to mutate the +// object in place instead of making a copy. +// +// Note that TransformFunc is called while inserting objects into the +// notification queue and is therefore extremely performance sensitive; please +// do not do anything that will take a long time. +type TransformFunc func(interface{}) (interface{}, error) + +// DeltaType is the type of a change (addition, deletion, etc) +type DeltaType string + +// Change type definition +const ( + Added DeltaType = "Added" + Updated DeltaType = "Updated" + Deleted DeltaType = "Deleted" + // Replaced is emitted when we encountered watch errors and had to do a + // relist. We don't know if the replaced object has changed. + // + // NOTE: Previous versions of DeltaFIFO would use Sync for Replace events + // as well. Hence, Replaced is only emitted when the option + // EmitDeltaTypeReplaced is true. + Replaced DeltaType = "Replaced" + // Sync is for synthetic events during a periodic resync. + Sync DeltaType = "Sync" +) + +// Delta is a member of Deltas (a list of Delta objects) which +// in its turn is the type stored by a DeltaFIFO. It tells you what +// change happened, and the object's state after* that change. +// +// [*] Unless the change is a deletion, and then you'll get the final +// state of the object before it was deleted. +type Delta struct { + Type DeltaType + Object interface{} +} + +// Deltas is a list of one or more 'Delta's to an individual object. +// The oldest delta is at index 0, the newest delta is the last one. +type Deltas []Delta + +// NewDeltaFIFO returns a Queue which can be used to process changes to items. +// +// keyFunc is used to figure out what key an object should have. (It is +// exposed in the returned DeltaFIFO's KeyOf() method, with additional handling +// around deleted objects and queue state). +// +// 'knownObjects' may be supplied to modify the behavior of Delete, +// Replace, and Resync. It may be nil if you do not need those +// modifications. +// +// TODO: consider merging keyLister with this object, tracking a list of +// "known" keys when Pop() is called. Have to think about how that +// affects error retrying. +// +// NOTE: It is possible to misuse this and cause a race when using an +// external known object source. +// Whether there is a potential race depends on how the consumer +// modifies knownObjects. In Pop(), process function is called under +// lock, so it is safe to update data structures in it that need to be +// in sync with the queue (e.g. knownObjects). +// +// Example: +// In case of sharedIndexInformer being a consumer +// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), +// there is no race as knownObjects (s.indexer) is modified safely +// under DeltaFIFO's lock. The only exceptions are GetStore() and +// GetIndexer() methods, which expose ways to modify the underlying +// storage. Currently these two methods are used for creating Lister +// and internal tests. +// +// Also see the comment on DeltaFIFO. +// +// Warning: This constructs a DeltaFIFO that does not differentiate between +// events caused by a call to Replace (e.g., from a relist, which may +// contain object updates), and synthetic events caused by a periodic resync +// (which just emit the existing object). See https://issue.k8s.io/86015 for details. +// +// Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., EmitDeltaTypeReplaced: true})` +// instead to receive a `Replaced` event depending on the type. +// +// Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects}) +func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO { + return NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KeyFunction: keyFunc, + KnownObjects: knownObjects, + }) +} + +// NewDeltaFIFOWithOptions returns a Queue which can be used to process changes to +// items. See also the comment on DeltaFIFO. +func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { + if opts.KeyFunction == nil { + opts.KeyFunction = MetaNamespaceKeyFunc + } + + f := &DeltaFIFO{ + items: map[string]Deltas{}, + queue: []string{}, + keyFunc: opts.KeyFunction, + knownObjects: opts.KnownObjects, + + emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced, + transformer: opts.Transformer, + } + f.cond.L = &f.lock + return f +} + +var ( + _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue +) + +var ( + // ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas + // object with zero length is encountered (should be impossible, + // but included for completeness). + ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") +) + +// Close the queue. +func (f *DeltaFIFO) Close() { + f.lock.Lock() + defer f.lock.Unlock() + f.closed = true + f.cond.Broadcast() +} + +// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or +// DeletedFinalStateUnknown objects. +func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { + if d, ok := obj.(Deltas); ok { + if len(d) == 0 { + return "", KeyError{obj, ErrZeroLengthDeltasObject} + } + obj = d.Newest().Object + } + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return f.keyFunc(obj) +} + +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, +// or the first batch of items inserted by Replace() has been popped. +func (f *DeltaFIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *DeltaFIFO) hasSynced_locked() bool { + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *DeltaFIFO) Add(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Added, obj) +} + +// Update is just like Add, but makes an Updated Delta. +func (f *DeltaFIFO) Update(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Updated, obj) +} + +// Delete is just like Add, but makes a Deleted Delta. If the given +// object does not already exist, it will be ignored. (It may have +// already been deleted by a Replace (re-list), for example.) In this +// method `f.knownObjects`, if not nil, provides (via GetByKey) +// _additional_ objects that are considered to already exist. +func (f *DeltaFIFO) Delete(obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if f.knownObjects == nil { + if _, exists := f.items[id]; !exists { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } else { + // We only want to skip the "deletion" action if the object doesn't + // exist in knownObjects and it doesn't have corresponding item in items. + // Note that even if there is a "deletion" action in items, we can ignore it, + // because it will be deduped automatically in "queueActionLocked" + _, exists, err := f.knownObjects.GetByKey(id) + _, itemsExist := f.items[id] + if err == nil && !exists && !itemsExist { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } + + // exist in items and/or KnownObjects + return f.queueActionLocked(Deleted, obj) +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +// +// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is +// different from the Add/Update/Delete functions. +func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error { + deltas, ok := obj.(Deltas) + if !ok { + return fmt.Errorf("object must be of type deltas, but got: %#v", obj) + } + id, err := f.KeyOf(deltas) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.addIfNotPresent(id, deltas) + return nil +} + +// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller +// already holds the fifo lock. +func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) { + f.populated = true + if _, exists := f.items[id]; exists { + return + } + + f.queue = append(f.queue, id) + f.items[id] = deltas + f.cond.Broadcast() +} + +// re-listing and watching can deliver the same update multiple times in any +// order. This will combine the most recent two deltas if they are the same. +func dedupDeltas(deltas Deltas) Deltas { + n := len(deltas) + if n < 2 { + return deltas + } + a := &deltas[n-1] + b := &deltas[n-2] + if out := isDup(a, b); out != nil { + deltas[n-2] = *out + return deltas[:n-1] + } + return deltas +} + +// If a & b represent the same event, returns the delta that ought to be kept. +// Otherwise, returns nil. +// TODO: is there anything other than deletions that need deduping? +func isDup(a, b *Delta) *Delta { + if out := isDeletionDup(a, b); out != nil { + return out + } + // TODO: Detect other duplicate situations? Are there any? + return nil +} + +// keep the one with the most information if both are deletions. +func isDeletionDup(a, b *Delta) *Delta { + if b.Type != Deleted || a.Type != Deleted { + return nil + } + // Do more sophisticated checks, or is this sufficient? + if _, ok := b.Object.(DeletedFinalStateUnknown); ok { + return a + } + return b +} + +// queueActionLocked appends to the delta list for the object. +// Caller must lock first. +func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + + // Every object comes through this code path once, so this is a good + // place to call the transform func. If obj is a + // DeletedFinalStateUnknown tombstone, then the containted inner object + // will already have gone through the transformer, but we document that + // this can happen. In cases involving Replace(), such an object can + // come through multiple times. + if f.transformer != nil { + var err error + obj, err = f.transformer(obj) + if err != nil { + return err + } + } + + oldDeltas := f.items[id] + newDeltas := append(oldDeltas, Delta{actionType, obj}) + newDeltas = dedupDeltas(newDeltas) + + if len(newDeltas) > 0 { + if _, exists := f.items[id]; !exists { + f.queue = append(f.queue, id) + } + f.items[id] = newDeltas + f.cond.Broadcast() + } else { + // This never happens, because dedupDeltas never returns an empty list + // when given a non-empty list (as it is here). + // If somehow it happens anyway, deal with it but complain. + if oldDeltas == nil { + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj) + return nil + } + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj) + f.items[id] = newDeltas + return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj) + } + return nil +} + +// List returns a list of all the items; it returns the object +// from the most recent Delta. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + return f.listLocked() +} + +func (f *DeltaFIFO) listLocked() []interface{} { + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + list = append(list, item.Newest().Object) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *DeltaFIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.queue)) + for _, key := range f.queue { + list = append(list, key) + } + return list +} + +// Get returns the complete list of deltas for the requested item, +// or sets exists=false. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.KeyOf(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the complete list of deltas for the requested item, +// setting exists=false if that list is empty. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + d, exists := f.items[key] + if exists { + // Copy item's slice so operations on this slice + // won't interfere with the object we return. + d = copyDeltas(d) + } + return d, exists, nil +} + +// IsClosed checks if the queue is closed +func (f *DeltaFIFO) IsClosed() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.closed +} + +// Pop blocks until the queue has some items, and then returns one. If +// multiple items are ready, they are returned in the order in which they were +// added/updated. The item is removed from the queue (and the store) before it +// is returned, so if you don't successfully process it, you need to add it back +// with AddIfNotPresent(). +// process function is called under lock, so it is safe to update data structures +// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc +// may return an instance of ErrRequeue with a nested error to indicate the current +// item should be requeued (equivalent to calling AddIfNotPresent under the lock). +// process should avoid expensive I/O operation so that other queue operations, i.e. +// Add() and Get(), won't be blocked for too long. +// +// Pop returns a 'Deltas', which has a complete list of all the things +// that happened to the object (deltas) while it was sitting in the queue. +func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.closed { + return nil, ErrFIFOClosed + } + + f.cond.Wait() + } + isInInitialList := !f.hasSynced_locked() + id := f.queue[0] + f.queue = f.queue[1:] + depth := len(f.queue) + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + item, ok := f.items[id] + if !ok { + // This should never happen + klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id) + continue + } + delete(f.items, id) + // Only log traces if the queue depth is greater than 10 and it takes more than + // 100 milliseconds to process one item from the queue. + // Queue depth never goes high because processing an item is locking the queue, + // and new items can't be added until processing finish. + // https://github.com/kubernetes/kubernetes/issues/103789 + if depth > 10 { + trace := utiltrace.New("DeltaFIFO Pop Process", + utiltrace.Field{Key: "ID", Value: id}, + utiltrace.Field{Key: "Depth", Value: depth}, + utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) + defer trace.LogIfLong(100 * time.Millisecond) + } + err := process(item, isInInitialList) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + // Don't need to copyDeltas here, because we're transferring + // ownership to the caller. + return item, err + } +} + +// Replace atomically does two things: (1) it adds the given objects +// using the Sync or Replace DeltaType and then (2) it does some deletions. +// In particular: for every pre-existing key K that is not the key of +// an object in `list` there is the effect of +// `Delete(DeletedFinalStateUnknown{K, O})` where O is the latest known +// object of K. The pre-existing keys are those in the union set of the keys in +// `f.items` and `f.knownObjects` (if not nil). The last known object for key K is +// the one present in the last delta in `f.items`. If there is no delta for K +// in `f.items`, it is the object in `f.knownObjects` +func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { + f.lock.Lock() + defer f.lock.Unlock() + keys := make(sets.String, len(list)) + + // keep backwards compat for old clients + action := Sync + if f.emitDeltaTypeReplaced { + action = Replaced + } + + // Add Sync/Replaced action for each new item. + for _, item := range list { + key, err := f.KeyOf(item) + if err != nil { + return KeyError{item, err} + } + keys.Insert(key) + if err := f.queueActionLocked(action, item); err != nil { + return fmt.Errorf("couldn't enqueue object: %v", err) + } + } + + // Do deletion detection against objects in the queue + queuedDeletions := 0 + for k, oldItem := range f.items { + if keys.Has(k) { + continue + } + // Delete pre-existing items not in the new list. + // This could happen if watch deletion event was missed while + // disconnected from apiserver. + var deletedObj interface{} + if n := oldItem.Newest(); n != nil { + deletedObj = n.Object + + // if the previous object is a DeletedFinalStateUnknown, we have to extract the actual Object + if d, ok := deletedObj.(DeletedFinalStateUnknown); ok { + deletedObj = d.Obj + } + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + + if f.knownObjects != nil { + // Detect deletions for objects not present in the queue, but present in KnownObjects + knownKeys := f.knownObjects.ListKeys() + for _, k := range knownKeys { + if keys.Has(k) { + continue + } + if len(f.items[k]) > 0 { + continue + } + + deletedObj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + deletedObj = nil + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + } else if !exists { + deletedObj = nil + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + } + + if !f.populated { + f.populated = true + f.initialPopulationCount = keys.Len() + queuedDeletions + } + + return nil +} + +// Resync adds, with a Sync type of Delta, every object listed by +// `f.knownObjects` whose key is not already queued for processing. +// If `f.knownObjects` is `nil` then Resync does nothing. +func (f *DeltaFIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.knownObjects == nil { + return nil + } + + keys := f.knownObjects.ListKeys() + for _, k := range keys { + if err := f.syncKeyLocked(k); err != nil { + return err + } + } + return nil +} + +func (f *DeltaFIFO) syncKeyLocked(key string) error { + obj, exists, err := f.knownObjects.GetByKey(key) + if err != nil { + klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + return nil + } else if !exists { + klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + return nil + } + + // If we are doing Resync() and there is already an event queued for that object, + // we ignore the Resync for it. This is to avoid the race, in which the resync + // comes with the previous value of object (since queueing an event for the object + // doesn't trigger changing the underlying store . + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + if len(f.items[id]) > 0 { + return nil + } + + if err := f.queueActionLocked(Sync, obj); err != nil { + return fmt.Errorf("couldn't queue object: %v", err) + } + return nil +} + +// A KeyListerGetter is anything that knows how to list its keys and look up by key. +type KeyListerGetter interface { + KeyLister + KeyGetter +} + +// A KeyLister is anything that knows how to list its keys. +type KeyLister interface { + ListKeys() []string +} + +// A KeyGetter is anything that knows how to get the value stored under a given key. +type KeyGetter interface { + // GetByKey returns the value associated with the key, or sets exists=false. + GetByKey(key string) (value interface{}, exists bool, err error) +} + +// Oldest is a convenience function that returns the oldest delta, or +// nil if there are no deltas. +func (d Deltas) Oldest() *Delta { + if len(d) > 0 { + return &d[0] + } + return nil +} + +// Newest is a convenience function that returns the newest delta, or +// nil if there are no deltas. +func (d Deltas) Newest() *Delta { + if n := len(d); n > 0 { + return &d[n-1] + } + return nil +} + +// copyDeltas returns a shallow copy of d; that is, it copies the slice but not +// the objects in the slice. This allows Get/List to return an object that we +// know won't be clobbered by a subsequent modifications. +func copyDeltas(d Deltas) Deltas { + d2 := make(Deltas, len(d)) + copy(d2, d) + return d2 +} + +// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where an object +// was deleted but the watch deletion event was missed while disconnected from +// apiserver. In this case we don't know the final "resting" state of the object, so +// there's a chance the included `Obj` is stale. +type DeletedFinalStateUnknown struct { + Key string + Obj interface{} +} diff --git a/vendor/k8s.io/client-go/tools/cache/doc.go b/vendor/k8s.io/client-go/tools/cache/doc.go new file mode 100644 index 000000000..56b61d300 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache is a client-side caching mechanism. It is useful for +// reducing the number of server calls you'd otherwise need to make. +// Reflector watches a server and updates a Store. Two stores are provided; +// one that simply caches objects (for example, to allow a scheduler to +// list currently available nodes), and one that additionally acts as +// a FIFO queue (for example, to allow a scheduler to process incoming +// pods). +package cache // import "k8s.io/client-go/tools/cache" diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go new file mode 100644 index 000000000..813916ebf --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -0,0 +1,214 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "k8s.io/utils/clock" +) + +// ExpirationCache implements the store interface +// 1. All entries are automatically time stamped on insert +// a. The key is computed based off the original item/keyFunc +// b. The value inserted under that key is the timestamped item +// 2. Expiration happens lazily on read based on the expiration policy +// a. No item can be inserted into the store while we're expiring +// *any* item in the cache. +// 3. Time-stamps are stripped off unexpired entries before return +// +// Note that the ExpirationCache is inherently slower than a normal +// threadSafeStore because it takes a write lock every time it checks if +// an item has expired. +type ExpirationCache struct { + cacheStorage ThreadSafeStore + keyFunc KeyFunc + clock clock.Clock + expirationPolicy ExpirationPolicy + // expirationLock is a write lock used to guarantee that we don't clobber + // newly inserted objects because of a stale expiration timestamp comparison + expirationLock sync.Mutex +} + +// ExpirationPolicy dictates when an object expires. Currently only abstracted out +// so unittests don't rely on the system clock. +type ExpirationPolicy interface { + IsExpired(obj *TimestampedEntry) bool +} + +// TTLPolicy implements a ttl based ExpirationPolicy. +type TTLPolicy struct { + // >0: Expire entries with an age > ttl + // <=0: Don't expire any entry + TTL time.Duration + + // Clock used to calculate ttl expiration + Clock clock.Clock +} + +// IsExpired returns true if the given object is older than the ttl, or it can't +// determine its age. +func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { + return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL +} + +// TimestampedEntry is the only type allowed in a ExpirationCache. +// Keep in mind that it is not safe to share timestamps between computers. +// Behavior may be inconsistent if you get a timestamp from the API Server and +// use it on the client machine as part of your ExpirationCache. +type TimestampedEntry struct { + Obj interface{} + Timestamp time.Time + key string +} + +// getTimestampedEntry returns the TimestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { + item, _ := c.cacheStorage.Get(key) + if tsEntry, ok := item.(*TimestampedEntry); ok { + return tsEntry, true + } + return nil, false +} + +// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't +// already expired. It holds a write lock across deletion. +func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { + // Prevent all inserts from the time we deem an item as "expired" to when we + // delete it, so an un-expired item doesn't sneak in under the same key, just + // before the Delete. + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + timestampedItem, exists := c.getTimestampedEntry(key) + if !exists { + return nil, false + } + if c.expirationPolicy.IsExpired(timestampedItem) { + c.cacheStorage.Delete(key) + return nil, false + } + return timestampedItem.Obj, true +} + +// GetByKey returns the item stored under the key, or sets exists=false. +func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) { + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// Get returns unexpired items. It purges the cache of expired items in the +// process. +func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// List retrieves a list of unexpired items. It purges the cache of expired +// items in the process. +func (c *ExpirationCache) List() []interface{} { + items := c.cacheStorage.List() + + list := make([]interface{}, 0, len(items)) + for _, item := range items { + key := item.(*TimestampedEntry).key + if obj, exists := c.getOrExpire(key); exists { + list = append(list, obj) + } + } + return list +} + +// ListKeys returns a list of all keys in the expiration cache. +func (c *ExpirationCache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// Add timestamps an item and inserts it into the cache, overwriting entries +// that might exist under the same key. +func (c *ExpirationCache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + + c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) + return nil +} + +// Update has not been implemented yet for lack of a use case, so this method +// simply calls `Add`. This effectively refreshes the timestamp. +func (c *ExpirationCache) Update(obj interface{}) error { + return c.Add(obj) +} + +// Delete removes an item from the cache. +func (c *ExpirationCache) Delete(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + c.cacheStorage.Delete(key) + return nil +} + +// Replace will convert all items in the given list to TimestampedEntries +// before attempting the replace operation. The replace operation will +// delete the contents of the ExpirationCache `c`. +func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + ts := c.clock.Now() + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = &TimestampedEntry{item, ts, key} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync is a no-op for one of these +func (c *ExpirationCache) Resync() error { + return nil +} + +// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy +func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { + return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) +} + +// NewExpirationStore creates and returns a ExpirationCache for a given policy +func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { + return &ExpirationCache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + clock: clock.RealClock{}, + expirationPolicy: expirationPolicy, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go new file mode 100644 index 000000000..a16f4735e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/clock" +) + +type fakeThreadSafeMap struct { + ThreadSafeStore + deletedKeys chan<- string +} + +func (c *fakeThreadSafeMap) Delete(key string) { + if c.deletedKeys != nil { + c.ThreadSafeStore.Delete(key) + c.deletedKeys <- key + } +} + +// FakeExpirationPolicy keeps the list for keys which never expires. +type FakeExpirationPolicy struct { + NeverExpire sets.String + RetrieveKeyFunc KeyFunc +} + +// IsExpired used to check if object is expired. +func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool { + key, _ := p.RetrieveKeyFunc(obj) + return !p.NeverExpire.Has(key) +} + +// NewFakeExpirationStore creates a new instance for the ExpirationCache. +func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { + cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) + return &ExpirationCache{ + cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys}, + keyFunc: keyFunc, + clock: cacheClock, + expirationPolicy: expirationPolicy, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go new file mode 100644 index 000000000..462d22660 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// FakeCustomStore lets you define custom functions for store operations. +type FakeCustomStore struct { + AddFunc func(obj interface{}) error + UpdateFunc func(obj interface{}) error + DeleteFunc func(obj interface{}) error + ListFunc func() []interface{} + ListKeysFunc func() []string + GetFunc func(obj interface{}) (item interface{}, exists bool, err error) + GetByKeyFunc func(key string) (item interface{}, exists bool, err error) + ReplaceFunc func(list []interface{}, resourceVersion string) error + ResyncFunc func() error +} + +// Add calls the custom Add function if defined +func (f *FakeCustomStore) Add(obj interface{}) error { + if f.AddFunc != nil { + return f.AddFunc(obj) + } + return nil +} + +// Update calls the custom Update function if defined +func (f *FakeCustomStore) Update(obj interface{}) error { + if f.UpdateFunc != nil { + return f.UpdateFunc(obj) + } + return nil +} + +// Delete calls the custom Delete function if defined +func (f *FakeCustomStore) Delete(obj interface{}) error { + if f.DeleteFunc != nil { + return f.DeleteFunc(obj) + } + return nil +} + +// List calls the custom List function if defined +func (f *FakeCustomStore) List() []interface{} { + if f.ListFunc != nil { + return f.ListFunc() + } + return nil +} + +// ListKeys calls the custom ListKeys function if defined +func (f *FakeCustomStore) ListKeys() []string { + if f.ListKeysFunc != nil { + return f.ListKeysFunc() + } + return nil +} + +// Get calls the custom Get function if defined +func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + if f.GetFunc != nil { + return f.GetFunc(obj) + } + return nil, false, nil +} + +// GetByKey calls the custom GetByKey function if defined +func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) { + if f.GetByKeyFunc != nil { + return f.GetByKeyFunc(key) + } + return nil, false, nil +} + +// Replace calls the custom Replace function if defined +func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error { + if f.ReplaceFunc != nil { + return f.ReplaceFunc(list, resourceVersion) + } + return nil +} + +// Resync calls the custom Resync function if defined +func (f *FakeCustomStore) Resync() error { + if f.ResyncFunc != nil { + return f.ResyncFunc() + } + return nil +} diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go new file mode 100644 index 000000000..dd13c4ea7 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -0,0 +1,382 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// PopProcessFunc is passed to Pop() method of Queue interface. +// It is supposed to process the accumulator popped from the queue. +type PopProcessFunc func(obj interface{}, isInInitialList bool) error + +// ErrRequeue may be returned by a PopProcessFunc to safely requeue +// the current item. The value of Err will be returned from Pop. +type ErrRequeue struct { + // Err is returned by the Pop function + Err error +} + +// ErrFIFOClosed used when FIFO is closed +var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue") + +func (e ErrRequeue) Error() string { + if e.Err == nil { + return "the popped item should be requeued without returning an error" + } + return e.Err.Error() +} + +// Queue extends Store with a collection of Store keys to "process". +// Every Add, Update, or Delete may put the object's key in that collection. +// A Queue has a way to derive the corresponding key given an accumulator. +// A Queue can be accessed concurrently from multiple goroutines. +// A Queue can be "closed", after which Pop operations return an error. +type Queue interface { + Store + + // Pop blocks until there is at least one key to process or the + // Queue is closed. In the latter case Pop returns with an error. + // In the former case Pop atomically picks one key to process, + // removes that (key, accumulator) association from the Store, and + // processes the accumulator. Pop returns the accumulator that + // was processed and the result of processing. The PopProcessFunc + // may return an ErrRequeue{inner} and in this case Pop will (a) + // return that (key, accumulator) association to the Queue as part + // of the atomic processing and (b) return the inner error from + // Pop. + Pop(PopProcessFunc) (interface{}, error) + + // AddIfNotPresent puts the given accumulator into the Queue (in + // association with the accumulator's key) if and only if that key + // is not already associated with a non-empty accumulator. + AddIfNotPresent(interface{}) error + + // HasSynced returns true if the first batch of keys have all been + // popped. The first batch of keys are those of the first Replace + // operation if that happened before any Add, AddIfNotPresent, + // Update, or Delete; otherwise the first batch is empty. + HasSynced() bool + + // Close the queue + Close() +} + +// Pop is helper function for popping from Queue. +// WARNING: Do NOT use this function in non-test code to avoid races +// unless you really really really really know what you are doing. +// +// NOTE: This function is deprecated and may be removed in the future without +// additional warning. +func Pop(queue Queue) interface{} { + var result interface{} + queue.Pop(func(obj interface{}, isInInitialList bool) error { + result = obj + return nil + }) + return result +} + +// FIFO is a Queue in which (a) each accumulator is simply the most +// recently provided object and (b) the collection of keys to process +// is a FIFO. The accumulators all start out empty, and deleting an +// object from its accumulator empties the accumulator. The Resync +// operation is a no-op. +// +// Thus: if multiple adds/updates of a single object happen while that +// object's key is in the queue before it has been processed then it +// will only be processed once, and when it is processed the most +// recent version will be processed. This can't be done with a channel +// +// FIFO solves this use case: +// - You want to process every object (exactly) once. +// - You want to process the most recent version of the object when you process it. +// - You do not want to process deleted objects, they should be removed from the queue. +// - You do not want to periodically reprocess objects. +// +// Compare with DeltaFIFO for other use cases. +type FIFO struct { + lock sync.RWMutex + cond sync.Cond + // We depend on the property that every key in `items` is also in `queue` + items map[string]interface{} + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRUD operations. + closed bool +} + +var ( + _ = Queue(&FIFO{}) // FIFO is a Queue +) + +// Close the queue. +func (f *FIFO) Close() { + f.lock.Lock() + defer f.lock.Unlock() + f.closed = true + f.cond.Broadcast() +} + +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, +// or the first batch of items inserted by Replace() has been popped. +func (f *FIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *FIFO) hasSynced_locked() bool { + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *FIFO) Add(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if _, exists := f.items[id]; !exists { + f.queue = append(f.queue, id) + } + f.items[id] = obj + f.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (f *FIFO) AddIfNotPresent(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.addIfNotPresent(id, obj) + return nil +} + +// addIfNotPresent assumes the fifo lock is already held and adds the provided +// item to the queue under id if it does not already exist. +func (f *FIFO) addIfNotPresent(id string, obj interface{}) { + f.populated = true + if _, exists := f.items[id]; exists { + return + } + + f.queue = append(f.queue, id) + f.items[id] = obj + f.cond.Broadcast() +} + +// Update is the same as Add in this implementation. +func (f *FIFO) Update(obj interface{}) error { + return f.Add(obj) +} + +// Delete removes an item. It doesn't add it to the queue, because +// this implementation assumes the consumer only cares about the objects, +// not the order in which they were created/added. +func (f *FIFO) Delete(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + delete(f.items, id) + return err +} + +// List returns a list of all the items. +func (f *FIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *FIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.items)) + for key := range f.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + item, exists = f.items[key] + return item, exists, nil +} + +// IsClosed checks if the queue is closed +func (f *FIFO) IsClosed() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.closed +} + +// Pop waits until an item is ready and processes it. If multiple items are +// ready, they are returned in the order in which they were added/updated. +// The item is removed from the queue (and the store) before it is processed, +// so if you don't successfully process it, it should be added back with +// AddIfNotPresent(). process function is called under lock, so it is safe +// update data structures in it that need to be in sync with the queue. +func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.closed { + return nil, ErrFIFOClosed + } + + f.cond.Wait() + } + isInInitialList := !f.hasSynced_locked() + id := f.queue[0] + f.queue = f.queue[1:] + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + item, ok := f.items[id] + if !ok { + // Item may have been deleted subsequently. + continue + } + delete(f.items, id) + err := process(item, isInInitialList) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + return item, err + } +} + +// Replace will delete the contents of 'f', using instead the given map. +// 'f' takes ownership of the map, you should not reference the map again +// after calling this function. f's queue is reset, too; upon return, it +// will contain the items in the map, in no particular order. +func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + for _, item := range list { + key, err := f.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + + f.lock.Lock() + defer f.lock.Unlock() + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(items) + } + + f.items = items + f.queue = f.queue[:0] + for id := range items { + f.queue = append(f.queue, id) + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// Resync will ensure that every object in the Store has its key in the queue. +// This should be a no-op, because that property is maintained by all operations. +func (f *FIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + inQueue := sets.NewString() + for _, id := range f.queue { + inQueue.Insert(id) + } + for id := range f.items { + if !inQueue.Has(id) { + f.queue = append(f.queue, id) + } + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// NewFIFO returns a Store which can be used to queue up items to +// process. +func NewFIFO(keyFunc KeyFunc) *FIFO { + f := &FIFO{ + items: map[string]interface{}{}, + queue: []string{}, + keyFunc: keyFunc, + } + f.cond.L = &f.lock + return f +} diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go new file mode 100644 index 000000000..819325e9e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -0,0 +1,322 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file implements a heap data structure. + +package cache + +import ( + "container/heap" + "fmt" + "sync" +) + +const ( + closedMsg = "heap is closed" +) + +// LessFunc is used to compare two objects in the heap. +type LessFunc func(interface{}, interface{}) bool + +type heapItem struct { + obj interface{} // The object which is stored in the heap. + index int // The index of the object's key in the Heap.queue. +} + +type itemKeyValue struct { + key string + obj interface{} +} + +// heapData is an internal struct that implements the standard heap interface +// and keeps the data stored in the heap. +type heapData struct { + // items is a map from key of the objects to the objects and their index. + // We depend on the property that items in the map are in the queue and vice versa. + items map[string]*heapItem + // queue implements a heap data structure and keeps the order of elements + // according to the heap invariant. The queue keeps the keys of objects stored + // in "items". + queue []string + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + // lessFunc is used to compare two objects in the heap. + lessFunc LessFunc +} + +var ( + _ = heap.Interface(&heapData{}) // heapData is a standard heap +) + +// Less compares two objects and returns true if the first one should go +// in front of the second one in the heap. +func (h *heapData) Less(i, j int) bool { + if i > len(h.queue) || j > len(h.queue) { + return false + } + itemi, ok := h.items[h.queue[i]] + if !ok { + return false + } + itemj, ok := h.items[h.queue[j]] + if !ok { + return false + } + return h.lessFunc(itemi.obj, itemj.obj) +} + +// Len returns the number of items in the Heap. +func (h *heapData) Len() int { return len(h.queue) } + +// Swap implements swapping of two elements in the heap. This is a part of standard +// heap interface and should never be called directly. +func (h *heapData) Swap(i, j int) { + h.queue[i], h.queue[j] = h.queue[j], h.queue[i] + item := h.items[h.queue[i]] + item.index = i + item = h.items[h.queue[j]] + item.index = j +} + +// Push is supposed to be called by heap.Push only. +func (h *heapData) Push(kv interface{}) { + keyValue := kv.(*itemKeyValue) + n := len(h.queue) + h.items[keyValue.key] = &heapItem{keyValue.obj, n} + h.queue = append(h.queue, keyValue.key) +} + +// Pop is supposed to be called by heap.Pop only. +func (h *heapData) Pop() interface{} { + key := h.queue[len(h.queue)-1] + h.queue = h.queue[0 : len(h.queue)-1] + item, ok := h.items[key] + if !ok { + // This is an error + return nil + } + delete(h.items, key) + return item.obj +} + +// Heap is a thread-safe producer/consumer queue that implements a heap data structure. +// It can be used to implement priority queues and similar data structures. +type Heap struct { + lock sync.RWMutex + cond sync.Cond + + // data stores objects and has a queue that keeps their ordering according + // to the heap invariant. + data *heapData + + // closed indicates that the queue is closed. + // It is mainly used to let Pop() exit its control loop while waiting for an item. + closed bool +} + +// Close the Heap and signals condition variables that may be waiting to pop +// items from the heap. +func (h *Heap) Close() { + h.lock.Lock() + defer h.lock.Unlock() + h.closed = true + h.cond.Broadcast() +} + +// Add inserts an item, and puts it in the queue. The item is updated if it +// already exists. +func (h *Heap) Add(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + h.cond.Broadcast() + return nil +} + +// BulkAdd adds all the items in the list to the queue and then signals the condition +// variable. It is useful when the caller would like to add all of the items +// to the queue before consumer starts processing them. +func (h *Heap) BulkAdd(list []interface{}) error { + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + for _, obj := range list { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + } + h.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If an item with +// the key is present in the map, no changes is made to the item. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (h *Heap) AddIfNotPresent(obj interface{}) error { + id, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + h.addIfNotPresentLocked(id, obj) + h.cond.Broadcast() + return nil +} + +// addIfNotPresentLocked assumes the lock is already held and adds the provided +// item to the queue if it does not already exist. +func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { + if _, exists := h.data.items[key]; exists { + return + } + heap.Push(h.data, &itemKeyValue{key, obj}) +} + +// Update is the same as Add in this implementation. When the item does not +// exist, it is added. +func (h *Heap) Update(obj interface{}) error { + return h.Add(obj) +} + +// Delete removes an item. +func (h *Heap) Delete(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if item, ok := h.data.items[key]; ok { + heap.Remove(h.data, item.index) + return nil + } + return fmt.Errorf("object not found") +} + +// Pop waits until an item is ready. If multiple items are +// ready, they are returned in the order given by Heap.data.lessFunc. +func (h *Heap) Pop() (interface{}, error) { + h.lock.Lock() + defer h.lock.Unlock() + for len(h.data.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the h.closed is set and the condition is broadcast, + // which causes this loop to continue and return from the Pop(). + if h.closed { + return nil, fmt.Errorf("heap is closed") + } + h.cond.Wait() + } + obj := heap.Pop(h.data) + if obj == nil { + return nil, fmt.Errorf("object was removed from heap data") + } + + return obj, nil +} + +// List returns a list of all the items. +func (h *Heap) List() []interface{} { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]interface{}, 0, len(h.data.items)) + for _, item := range h.data.items { + list = append(list, item.obj) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently in the Heap. +func (h *Heap) ListKeys() []string { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]string, 0, len(h.data.items)) + for key := range h.data.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { + key, err := h.data.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return h.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (h *Heap) GetByKey(key string) (interface{}, bool, error) { + h.lock.RLock() + defer h.lock.RUnlock() + item, exists := h.data.items[key] + if !exists { + return nil, false, nil + } + return item.obj, true, nil +} + +// IsClosed returns true if the queue is closed. +func (h *Heap) IsClosed() bool { + h.lock.RLock() + defer h.lock.RUnlock() + return h.closed +} + +// NewHeap returns a Heap which can be used to queue up items to process. +func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { + h := &Heap{ + data: &heapData{ + items: map[string]*heapItem{}, + queue: []string{}, + keyFunc: keyFn, + lessFunc: lessFn, + }, + } + h.cond.L = &h.lock + return h +} diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go new file mode 100644 index 000000000..b78d3086b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -0,0 +1,101 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Indexer extends Store with multiple indices and restricts each +// accumulator to simply hold the current object (and be empty after +// Delete). +// +// There are three kinds of strings here: +// 1. a storage key, as defined in the Store interface, +// 2. a name of an index, and +// 3. an "indexed value", which is produced by an IndexFunc and +// can be a field value or any other string computed from the object. +type Indexer interface { + Store + // Index returns the stored objects whose set of indexed values + // intersects the set of indexed values of the given object, for + // the named index + Index(indexName string, obj interface{}) ([]interface{}, error) + // IndexKeys returns the storage keys of the stored objects whose + // set of indexed values for the named index includes the given + // indexed value + IndexKeys(indexName, indexedValue string) ([]string, error) + // ListIndexFuncValues returns all the indexed values of the given index + ListIndexFuncValues(indexName string) []string + // ByIndex returns the stored objects whose set of indexed values + // for the named index includes the given indexed value + ByIndex(indexName, indexedValue string) ([]interface{}, error) + // GetIndexers return the indexers + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error +} + +// IndexFunc knows how to compute the set of indexed values for an object. +type IndexFunc func(obj interface{}) ([]string, error) + +// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns +// unique values for every object. This conversion can create errors when more than one key is found. You +// should prefer to make proper key and index functions. +func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { + return func(obj interface{}) (string, error) { + indexKeys, err := indexFunc(obj) + if err != nil { + return "", err + } + if len(indexKeys) > 1 { + return "", fmt.Errorf("too many keys: %v", indexKeys) + } + if len(indexKeys) == 0 { + return "", fmt.Errorf("unexpected empty indexKeys") + } + return indexKeys[0], nil + } +} + +const ( + // NamespaceIndex is the lookup name for the most common index function, which is to index by the namespace field. + NamespaceIndex string = "namespace" +) + +// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace +func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return []string{""}, fmt.Errorf("object has no meta: %v", err) + } + return []string{meta.GetNamespace()}, nil +} + +// Index maps the indexed value to a set of keys in the store that match on that value +type Index map[string]sets.String + +// Indexers maps a name to an IndexFunc +type Indexers map[string]IndexFunc + +// Indices maps a name to an Index +type Indices map[string]Index diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go new file mode 100644 index 000000000..420ca7b2a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -0,0 +1,169 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AppendFunc is used to add a matching item to whatever list the caller is using +type AppendFunc func(interface{}) + +// ListAll calls appendFn with each value retrieved from store which matches the selector. +func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { + selectAll := selector.Empty() + for _, m := range store.List() { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil +} + +// ListAllByNamespace used to list items belongs to namespace from Indexer. +func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { + if namespace == metav1.NamespaceAll { + return ListAll(indexer, selector, appendFn) + } + + items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) + if err != nil { + // Ignore error; do slow search without index. + klog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + + } + return nil + } + + selectAll := selector.Empty() + for _, m := range items { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + + return nil +} + +// GenericLister is a lister skin on a generic Indexer +type GenericLister interface { + // List will return all objects across namespaces + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve assuming that name==key + Get(name string) (runtime.Object, error) + // ByNamespace will give you a GenericNamespaceLister for one namespace + ByNamespace(namespace string) GenericNamespaceLister +} + +// GenericNamespaceLister is a lister skin on a generic Indexer +type GenericNamespaceLister interface { + // List will return all objects in this namespace + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve by namespace and name + Get(name string) (runtime.Object, error) +} + +// NewGenericLister creates a new instance for the genericLister. +func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { + return &genericLister{indexer: indexer, resource: resource} +} + +type genericLister struct { + indexer Indexer + resource schema.GroupResource +} + +func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister { + return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource} +} + +func (s *genericLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} + +type genericNamespaceLister struct { + indexer Indexer + namespace string + resource schema.GroupResource +} + +func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go new file mode 100644 index 000000000..10b7e6512 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -0,0 +1,112 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// Lister is any object that knows how to perform an initial list. +type Lister interface { + // List should return a list type object; the Items field will be extracted, and the + // ResourceVersion field will be used to start the watch in the right place. + List(options metav1.ListOptions) (runtime.Object, error) +} + +// Watcher is any object that knows how to start a watch on a resource. +type Watcher interface { + // Watch should begin a watch at the specified version. + Watch(options metav1.ListOptions) (watch.Interface, error) +} + +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + Lister + Watcher +} + +// ListFunc knows how to list resources +type ListFunc func(options metav1.ListOptions) (runtime.Object, error) + +// WatchFunc knows how to watch resources +type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) + +// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. +// It is a convenience function for users of NewReflector, etc. +// ListFunc and WatchFunc must not be nil +type ListWatch struct { + ListFunc ListFunc + WatchFunc WatchFunc + // DisableChunking requests no chunking for this list watcher. + DisableChunking bool +} + +// Getter interface knows how to access Get method from RESTClient. +type Getter interface { + Get() *restclient.Request +} + +// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. +func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { + optionsModifier := func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector.String() + } + return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier) +} + +// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier. +// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function +// to apply modification to ListOptions with a field selector, a label selector, or any other desired options. +func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch { + listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + optionsModifier(&options) + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + Do(context.TODO()). + Get() + } + watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + optionsModifier(&options) + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + Watch(context.TODO()) + } + return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} +} + +// List a set of apiserver resources +func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { + // ListWatch is used in Reflector, which already supports pagination. + // Don't paginate here to avoid duplication. + return lw.ListFunc(options) +} + +// Watch a set of apiserver resources +func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) { + return lw.WatchFunc(options) +} diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go new file mode 100644 index 000000000..c6f953d8e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -0,0 +1,262 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilcache "k8s.io/apimachinery/pkg/util/cache" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// MutationCache is able to take the result of update operations and stores them in an LRU +// that can be used to provide a more current view of a requested object. It requires interpreting +// resourceVersions for comparisons. +// Implementations must be thread-safe. +// TODO find a way to layer this into an informer/lister +type MutationCache interface { + GetByKey(key string) (interface{}, bool, error) + ByIndex(indexName, indexKey string) ([]interface{}, error) + Mutation(interface{}) +} + +// ResourceVersionComparator is able to compare object versions. +type ResourceVersionComparator interface { + CompareResourceVersion(lhs, rhs runtime.Object) int +} + +// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to +// deal with objects that have a resource version that: +// +// - is an integer +// - increases when updated +// - is comparable across the same resource in a namespace +// +// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item +// remains in the mutation cache before it is removed. +// +// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist +// in the underlying store. This is only safe if your use of the cache can handle mutation entries +// remaining in the cache for up to ttl when mutations and deletes occur very closely in time. +func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache { + return &mutationCache{ + backingCache: backingCache, + indexer: indexer, + mutationCache: utilcache.NewLRUExpireCache(100), + comparator: etcdObjectVersioner{}, + ttl: ttl, + includeAdds: includeAdds, + } +} + +// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and +// since you can't distinguish between, "didn't observe create" and "was deleted after create", +// if the key is missing from the backing cache, we always return it as missing +type mutationCache struct { + lock sync.Mutex + backingCache Store + indexer Indexer + mutationCache *utilcache.LRUExpireCache + includeAdds bool + ttl time.Duration + + comparator ResourceVersionComparator +} + +// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could +// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key. +// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache. +func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) { + c.lock.Lock() + defer c.lock.Unlock() + + obj, exists, err := c.backingCache.GetByKey(key) + if err != nil { + return nil, false, err + } + if !exists { + if !c.includeAdds { + // we can't distinguish between, "didn't observe create" and "was deleted after create", so + // if the key is missing, we always return it as missing + return nil, false, nil + } + obj, exists = c.mutationCache.Get(key) + if !exists { + return nil, false, nil + } + } + objRuntime, ok := obj.(runtime.Object) + if !ok { + return obj, true, nil + } + return c.newerObject(key, objRuntime), true, nil +} + +// ByIndex returns the newer objects that match the provided index and indexer key. +// Will return an error if no indexer was provided. +func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.indexer == nil { + return nil, fmt.Errorf("no indexer has been provided to the mutation cache") + } + keys, err := c.indexer.IndexKeys(name, indexKey) + if err != nil { + return nil, err + } + var items []interface{} + keySet := sets.NewString() + for _, key := range keys { + keySet.Insert(key) + obj, exists, err := c.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + continue + } + if objRuntime, ok := obj.(runtime.Object); ok { + items = append(items, c.newerObject(key, objRuntime)) + } else { + items = append(items, obj) + } + } + + if c.includeAdds { + fn := c.indexer.GetIndexers()[name] + // Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior + for _, key := range c.mutationCache.Keys() { + updated, ok := c.mutationCache.Get(key) + if !ok { + continue + } + if keySet.Has(key.(string)) { + continue + } + elements, err := fn(updated) + if err != nil { + klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + continue + } + for _, inIndex := range elements { + if inIndex != indexKey { + continue + } + items = append(items, updated) + break + } + } + } + + return items, nil +} + +// newerObject checks the mutation cache for a newer object and returns one if found. If the +// mutated object is older than the backing object, it is removed from the Must be +// called while the lock is held. +func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object { + mutatedObj, exists := c.mutationCache.Get(key) + if !exists { + return backing + } + mutatedObjRuntime, ok := mutatedObj.(runtime.Object) + if !ok { + return backing + } + if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 { + c.mutationCache.Remove(key) + return backing + } + return mutatedObjRuntime +} + +// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache +// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined +// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is +// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to +// "one that was valid at some point in time", not "the one that I want". +func (c *mutationCache) Mutation(obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + key, err := DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + // this is a "nice to have", so failures shouldn't do anything weird + utilruntime.HandleError(err) + return + } + + if objRuntime, ok := obj.(runtime.Object); ok { + if mutatedObj, exists := c.mutationCache.Get(key); exists { + if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok { + if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 { + return + } + } + } + } + c.mutationCache.Add(key, obj, c.ttl) +} + +// etcdObjectVersioner implements versioning and extracting etcd node information +// for objects that have an embedded ObjectMeta or ListMeta field. +type etcdObjectVersioner struct{} + +// ObjectResourceVersion implements Versioner +func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + version := accessor.GetResourceVersion() + if len(version) == 0 { + return 0, nil + } + return strconv.ParseUint(version, 10, 64) +} + +// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, +// but etcd resource versions are special, they're actually ints, so we can easily compare them. +func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { + lhsVersion, err := a.ObjectResourceVersion(lhs) + if err != nil { + // coder error + panic(err) + } + rhsVersion, err := a.ObjectResourceVersion(rhs) + if err != nil { + // coder error + panic(err) + } + + if lhsVersion == rhsVersion { + return 0 + } + if lhsVersion < rhsVersion { + return -1 + } + + return 1 +} diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go new file mode 100644 index 000000000..b37537cbd --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -0,0 +1,166 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "os" + "reflect" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +var mutationDetectionEnabled = false + +func init() { + mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) +} + +// MutationDetector is able to monitor objects for mutation within a limited window of time +type MutationDetector interface { + // AddObject adds the given object to the set being monitored for a while from now + AddObject(obj interface{}) + + // Run starts the monitoring and does not return until the monitoring is stopped. + Run(stopCh <-chan struct{}) +} + +// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector. +func NewCacheMutationDetector(name string) MutationDetector { + if !mutationDetectionEnabled { + return dummyMutationDetector{} + } + klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") + return &defaultCacheMutationDetector{name: name, period: 1 * time.Second, retainDuration: 2 * time.Minute} +} + +type dummyMutationDetector struct{} + +func (dummyMutationDetector) Run(stopCh <-chan struct{}) { +} +func (dummyMutationDetector) AddObject(obj interface{}) { +} + +// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated +// It has a list of cached objects and their copies. I haven't thought of a way +// to see WHO is mutating it, just that it's getting mutated. +type defaultCacheMutationDetector struct { + name string + period time.Duration + + // compareLock ensures only a single call to CompareObjects runs at a time + compareObjectsLock sync.Mutex + + // addLock guards addedObjs between AddObject and CompareObjects + addedObjsLock sync.Mutex + addedObjs []cacheObj + + cachedObjs []cacheObj + + retainDuration time.Duration + lastRotated time.Time + retainedCachedObjs []cacheObj + + // failureFunc is injectable for unit testing. If you don't have it, the process will panic. + // This panic is intentional, since turning on this detection indicates you want a strong + // failure signal. This failure is effectively a p0 bug and you can't trust process results + // after a mutation anyway. + failureFunc func(message string) +} + +// cacheObj holds the actual object and a copy +type cacheObj struct { + cached interface{} + copied interface{} +} + +func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { + // we DON'T want protection from panics. If we're running this code, we want to die + for { + if d.lastRotated.IsZero() { + d.lastRotated = time.Now() + } else if time.Since(d.lastRotated) > d.retainDuration { + d.retainedCachedObjs = d.cachedObjs + d.cachedObjs = nil + d.lastRotated = time.Now() + } + + d.CompareObjects() + + select { + case <-stopCh: + return + case <-time.After(d.period): + } + } +} + +// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object +// but that covers the vast majority of our cached objects +func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { + if _, ok := obj.(DeletedFinalStateUnknown); ok { + return + } + if obj, ok := obj.(runtime.Object); ok { + copiedObj := obj.DeepCopyObject() + + d.addedObjsLock.Lock() + defer d.addedObjsLock.Unlock() + d.addedObjs = append(d.addedObjs, cacheObj{cached: obj, copied: copiedObj}) + } +} + +func (d *defaultCacheMutationDetector) CompareObjects() { + d.compareObjectsLock.Lock() + defer d.compareObjectsLock.Unlock() + + // move addedObjs into cachedObjs under lock + // this keeps the critical section small to avoid blocking AddObject while we compare cachedObjs + d.addedObjsLock.Lock() + d.cachedObjs = append(d.cachedObjs, d.addedObjs...) + d.addedObjs = nil + d.addedObjsLock.Unlock() + + altered := false + for i, obj := range d.cachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) + altered = true + } + } + for i, obj := range d.retainedCachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) + altered = true + } + } + + if altered { + msg := fmt.Sprintf("cache %s modified", d.name) + if d.failureFunc != nil { + d.failureFunc(msg) + return + } + panic(msg) + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/object-names.go b/vendor/k8s.io/client-go/tools/cache/object-names.go new file mode 100644 index 000000000..aa8dbb199 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/object-names.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/types" +) + +// ObjectName is a reference to an object of some implicit kind +type ObjectName struct { + Namespace string + Name string +} + +// NewObjectName constructs a new one +func NewObjectName(namespace, name string) ObjectName { + return ObjectName{Namespace: namespace, Name: name} +} + +// Parts is the inverse of the constructor +func (objName ObjectName) Parts() (namespace, name string) { + return objName.Namespace, objName.Name +} + +// String returns the standard string encoding, +// which is designed to match the historical behavior of MetaNamespaceKeyFunc. +// Note this behavior is different from the String method of types.NamespacedName. +func (objName ObjectName) String() string { + if len(objName.Namespace) > 0 { + return objName.Namespace + "/" + objName.Name + } + return objName.Name +} + +// ParseObjectName tries to parse the standard encoding +func ParseObjectName(str string) (ObjectName, error) { + var objName ObjectName + var err error + objName.Namespace, objName.Name, err = SplitMetaNamespaceKey(str) + return objName, err +} + +// NamespacedNameAsObjectName rebrands the given NamespacedName as an ObjectName +func NamespacedNameAsObjectName(nn types.NamespacedName) ObjectName { + return NewObjectName(nn.Namespace, nn.Name) +} + +// AsNamespacedName rebrands as a NamespacedName +func (objName ObjectName) AsNamespacedName() types.NamespacedName { + return types.NamespacedName{Namespace: objName.Namespace, Name: objName.Name} +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go new file mode 100644 index 000000000..c1ea13de5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -0,0 +1,912 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "errors" + "fmt" + "io" + "math/rand" + "os" + "reflect" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/naming" + utilnet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/pager" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + "k8s.io/utils/pointer" + "k8s.io/utils/trace" +) + +const defaultExpectedTypeName = "" + +// Reflector watches a specified resource and causes all changes to be reflected in the given store. +type Reflector struct { + // name identifies this reflector. By default it will be a file:line if possible. + name string + // The name of the type we expect to place in the store. The name + // will be the stringification of expectedGVK if provided, and the + // stringification of expectedType otherwise. It is for display + // only, and should not be used for parsing or comparison. + typeDescription string + // An example object of the type we expect to place in the store. + // Only the type needs to be right, except that when that is + // `unstructured.Unstructured` the object's `"apiVersion"` and + // `"kind"` must also be right. + expectedType reflect.Type + // The GVK of the object we expect to place in the store if unstructured. + expectedGVK *schema.GroupVersionKind + // The destination to sync up with the watch source + store Store + // listerWatcher is used to perform lists and watches. + listerWatcher ListerWatcher + // backoff manages backoff of ListWatch + backoffManager wait.BackoffManager + resyncPeriod time.Duration + // clock allows tests to manipulate time + clock clock.Clock + // paginatedResult defines whether pagination should be forced for list calls. + // It is set based on the result of the initial list call. + paginatedResult bool + // lastSyncResourceVersion is the resource version token last + // observed when doing a sync with the underlying store + // it is thread safe, but not synchronized with the underlying store + lastSyncResourceVersion string + // isLastSyncResourceVersionUnavailable is true if the previous list or watch request with + // lastSyncResourceVersion failed with an "expired" or "too large resource version" error. + isLastSyncResourceVersionUnavailable bool + // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion + lastSyncResourceVersionMutex sync.RWMutex + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler + // WatchListPageSize is the requested chunk size of initial and resync watch lists. + // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data + // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") + // it will turn off pagination to allow serving them from watch cache. + // NOTE: It should be used carefully as paginated lists are always served directly from + // etcd, which is significantly less efficient and may lead to serious performance and + // scalability problems. + WatchListPageSize int64 + // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked + ShouldResync func() bool + // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. + MaxInternalErrorRetryDuration time.Duration + // UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server. + // Streaming has the primary advantage of using fewer server's resources to fetch data. + // + // The old behaviour establishes a LIST request which gets data in chunks. + // Paginated list is less efficient and depending on the actual size of objects + // might result in an increased memory consumption of the APIServer. + // + // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details + UseWatchList bool +} + +// ResourceVersionUpdater is an interface that allows store implementation to +// track the current resource version of the reflector. This is especially +// important if storage bookmarks are enabled. +type ResourceVersionUpdater interface { + // UpdateResourceVersion is called each time current resource version of the reflector + // is updated. + UpdateResourceVersion(resourceVersion string) +} + +// The WatchErrorHandler is called whenever ListAndWatch drops the +// connection with an error. After calling this handler, the informer +// will backoff and retry. +// +// The default implementation looks at the error type and tries to log +// the error message at an appropriate level. +// +// Implementations of this handler may display the error message in other +// ways. Implementations should return quickly - any expensive processing +// should be offloaded. +type WatchErrorHandler func(r *Reflector, err error) + +// DefaultWatchErrorHandler is the default implementation of WatchErrorHandler +func DefaultWatchErrorHandler(r *Reflector, err error) { + switch { + case isExpiredError(err): + // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already + // has a semantic that it returns data at least as fresh as provided RV. + // So first try to LIST with setting RV to resource version of last observed object. + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) + case err == io.EOF: + // watch closed normally + case err == io.ErrUnexpectedEOF: + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err) + default: + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err)) + } +} + +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + minWatchTimeout = 5 * time.Minute +) + +// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector +// The indexer is configured to key on namespace +func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) + reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) + return indexer, reflector +} + +// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack +// that is outside this package. See NewReflectorWithOptions for further information. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod}) +} + +// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further +// information. +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod}) +} + +// ReflectorOptions configures a Reflector. +type ReflectorOptions struct { + // Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line + // in the call stack that is outside this package. + Name string + + // TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted + // using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is + // "". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields + // are set, the type description is the string encoding of those. Otherwise, the type description is set to the + // go type of expectedType.. + TypeDescription string + + // ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0 + // (do not resync). + ResyncPeriod time.Duration + + // Clock allows tests to control time. If unset defaults to clock.RealClock{} + Clock clock.Clock +} + +// NewReflectorWithOptions creates a new Reflector object which will keep the +// given store up to date with the server's contents for the given +// resource. Reflector promises to only put things in the store that +// have the type of expectedType, unless expectedType is nil. If +// resyncPeriod is non-zero, then the reflector will periodically +// consult its ShouldResync function to determine whether to invoke +// the Store's Resync operation; `ShouldResync==nil` means always +// "yes". This enables you to use reflectors to periodically process +// everything as well as incrementally processing the things that +// change. +func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector { + reflectorClock := options.Clock + if reflectorClock == nil { + reflectorClock = clock.RealClock{} + } + r := &Reflector{ + name: options.Name, + resyncPeriod: options.ResyncPeriod, + typeDescription: options.TypeDescription, + listerWatcher: lw, + store: store, + // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when + // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is + // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + clock: reflectorClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + expectedType: reflect.TypeOf(expectedType), + } + + if r.name == "" { + r.name = naming.GetNameFromCallsite(internalPackages...) + } + + if r.typeDescription == "" { + r.typeDescription = getTypeDescriptionFromObject(expectedType) + } + + if r.expectedGVK == nil { + r.expectedGVK = getExpectedGVKFromObject(expectedType) + } + + if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { + r.UseWatchList = true + } + + return r +} + +func getTypeDescriptionFromObject(expectedType interface{}) string { + if expectedType == nil { + return defaultExpectedTypeName + } + + reflectDescription := reflect.TypeOf(expectedType).String() + + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return reflectDescription + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return reflectDescription + } + + return gvk.String() +} + +func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind { + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return nil + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return nil + } + + return &gvk +} + +// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common +// call chains to NewReflector, so they'd be low entropy names for reflectors +var internalPackages = []string{"client-go/tools/cache/"} + +// Run repeatedly uses the reflector's ListAndWatch to fetch all the +// objects and subsequent deltas. +// Run will exit when stopCh is closed. +func (r *Reflector) Run(stopCh <-chan struct{}) { + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) + wait.BackoffUntil(func() { + if err := r.ListAndWatch(stopCh); err != nil { + r.watchErrorHandler(r, err) + } + }, r.backoffManager, true, stopCh) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) +} + +var ( + // nothing will ever be sent down this channel + neverExitWatch <-chan time.Time = make(chan time.Time) + + // Used to indicate that watching stopped because of a signal from the stop + // channel passed in from a client of the reflector. + errorStopRequested = errors.New("stop requested") +) + +// resyncChan returns a channel which will receive something when a resync is +// required, and a cleanup function. +func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { + if r.resyncPeriod == 0 { + return neverExitWatch, func() bool { return false } + } + // The cleanup function is required: imagine the scenario where watches + // always fail so we end up listing frequently. Then, if we don't + // manually stop the timer, we could end up with many timers active + // concurrently. + t := r.clock.NewTimer(r.resyncPeriod) + return t.C(), t.Stop +} + +// ListAndWatch first lists all items and get the resource version at the moment of call, +// and then use the resource version to watch. +// It returns error if ListAndWatch didn't even try to initialize watch. +func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { + klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) + var err error + var w watch.Interface + fallbackToList := !r.UseWatchList + + if r.UseWatchList { + w, err = r.watchList(stopCh) + if w == nil && err == nil { + // stopCh was closed + return nil + } + if err != nil { + klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err) + fallbackToList = true + // ensure that we won't accidentally pass some garbage down the watch. + w = nil + } + } + + if fallbackToList { + err = r.list(stopCh) + if err != nil { + return err + } + } + + klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) + + resyncerrc := make(chan error, 1) + cancelCh := make(chan struct{}) + defer close(cancelCh) + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + +// startResync periodically calls r.store.Resync() method. +// Note that this method is blocking and should be +// called in a separate goroutine. +func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return + } + if r.ShouldResync == nil || r.ShouldResync() { + klog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } + } + cleanup() + resyncCh, cleanup = r.resyncChan() + } +} + +// watch simply starts a watch request with the server. +func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { + var err error + retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock) + + for { + // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors + select { + case <-stopCh: + // we can only end up here when the stopCh + // was closed after a successful watchlist or list request + if w != nil { + w.Stop() + } + return nil + default: + } + + // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent + start := r.clock.Now() + + if w == nil { + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: r.LastSyncResourceVersion(), + // We want to avoid situations of hanging watchers. Stop any watchers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, + } + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err) + select { + case <-stopCh: + return nil + case <-r.backoffManager.Backoff().C(): + continue + } + } + return err + } + } + + err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh) + // Ensure that watch will not be reused across iterations. + w.Stop() + w = nil + retry.After(err) + if err != nil { + if err != errorStopRequested { + switch { + case isExpiredError(err): + // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already + // has a semantic that it returns data at least as fresh as provided RV. + // So first try to LIST with setting RV to resource version of last observed object. + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) + case apierrors.IsTooManyRequests(err): + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription) + select { + case <-stopCh: + return nil + case <-r.backoffManager.Backoff().C(): + continue + } + case apierrors.IsInternalError(err) && retry.ShouldRetry(): + klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err) + continue + default: + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err) + } + } + return nil + } + } +} + +// list simply lists all items and records a resource version obtained from the server at the moment of the call. +// the resource version can be used for further progress notification (aka. watch). +func (r *Reflector) list(stopCh <-chan struct{}) error { + var resourceVersion string + options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} + + initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + var list runtime.Object + var paginatedResult bool + var err error + listCh := make(chan struct{}, 1) + panicCh := make(chan interface{}, 1) + go func() { + defer func() { + if r := recover(); r != nil { + panicCh <- r + } + }() + // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first + // list request will return the full response. + pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + return r.listerWatcher.List(opts) + })) + switch { + case r.WatchListPageSize != 0: + pager.PageSize = r.WatchListPageSize + case r.paginatedResult: + // We got a paginated result initially. Assume this resource and server honor + // paging requests (i.e. watch cache is probably disabled) and leave the default + // pager size set. + case options.ResourceVersion != "" && options.ResourceVersion != "0": + // User didn't explicitly request pagination. + // + // With ResourceVersion != "", we have a possibility to list from watch cache, + // but we do that (for ResourceVersion != "0") only if Limit is unset. + // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly + // switch off pagination to force listing from watch cache (if enabled). + // With the existing semantic of RV (result is at least as fresh as provided RV), + // this is correct and doesn't lead to going back in time. + // + // We also don't turn off pagination for ResourceVersion="0", since watch cache + // is ignoring Limit in that case anyway, and if watch cache is not enabled + // we don't introduce regression. + pager.PageSize = 0 + } + + list, paginatedResult, err = pager.ListWithAlloc(context.Background(), options) + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + r.setIsLastSyncResourceVersionUnavailable(true) + // Retry immediately if the resource version used to list is unavailable. + // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on + // continuation pages, but the pager might not be enabled, the full list might fail because the + // resource version it is listing at is expired or the cache may not yet be synced to the provided + // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure + // the reflector makes forward progress. + list, paginatedResult, err = pager.ListWithAlloc(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) + } + close(listCh) + }() + select { + case <-stopCh: + return nil + case r := <-panicCh: + panic(r) + case <-listCh: + } + initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) + if err != nil { + klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err) + return fmt.Errorf("failed to list %v: %w", r.typeDescription, err) + } + + // We check if the list was paginated and if so set the paginatedResult based on that. + // However, we want to do that only for the initial list (which is the only case + // when we set ResourceVersion="0"). The reasoning behind it is that later, in some + // situations we may force listing directly from etcd (by setting ResourceVersion="") + // which will return paginated result, even if watch cache is enabled. However, in + // that case, we still want to prefer sending requests to watch cache if possible. + // + // Paginated result returned for request with ResourceVersion="0" mean that watch + // cache is disabled and there are a lot of objects of a given type. In such case, + // there is no need to prefer listing from watch cache. + if options.ResourceVersion == "0" && paginatedResult { + r.paginatedResult = true + } + + r.setIsLastSyncResourceVersionUnavailable(false) // list was successful + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v: %v", list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + initTrace.Step("Resource version extracted") + items, err := meta.ExtractListWithAlloc(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v (%v)", list, err) + } + initTrace.Step("Objects extracted") + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("unable to sync list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + initTrace.Step("Resource version updated") + return nil +} + +// watchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a consistent stream with the server. +// That means the returned data is consistent, as if, served directly from etcd via a quorum read. +// It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion. +// It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +// +// case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a stream with the server at the provided resource version. +// To establish the initial state the server begins with synthetic "Added" events. +// It ends with a synthetic "Bookmark" event containing the provided or newer resource version. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { + var w watch.Interface + var err error + var temporaryStore Store + var resourceVersion string + // TODO(#115478): see if this function could be turned + // into a method and see if error handling + // could be unified with the r.watch method + isErrorRetriableWithSideEffectsFn := func(err error) bool { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err) + <-r.backoffManager.Backoff().C() + return true + } + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + // we tried to re-establish a watch request but the provided RV + // has either expired or it is greater than the server knows about. + // In that case we reset the RV and + // try to get a consistent snapshot from the watch cache (case 1) + r.setIsLastSyncResourceVersionUnavailable(true) + return true + } + return false + } + + initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + for { + select { + case <-stopCh: + return nil, nil + default: + } + + resourceVersion = "" + lastKnownRV := r.rewatchResourceVersion() + temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + // TODO(#115478): large "list", slow clients, slow network, p&f + // might slow down streaming and eventually fail. + // maybe in such a case we should retry with an increased timeout? + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: lastKnownRV, + AllowWatchBookmarks: true, + SendInitialEvents: pointer.Bool(true), + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + TimeoutSeconds: &timeoutSeconds, + } + start := r.clock.Now() + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + bookmarkReceived := pointer.Bool(false) + err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + func(rv string) { resourceVersion = rv }, + bookmarkReceived, + r.clock, make(chan error), stopCh) + if err != nil { + w.Stop() // stop and retry with clean state + if err == errorStopRequested { + return nil, nil + } + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + if *bookmarkReceived { + break + } + } + // We successfully got initial state from watch-list confirmed by the + // "k8s.io/initial-events-end" bookmark. + initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) + r.setIsLastSyncResourceVersionUnavailable(false) + + // we utilize the temporaryStore to ensure independence from the current store implementation. + // as of today, the store is implemented as a queue and will be drained by the higher-level + // component as soon as it finishes replacing the content. + checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore) + + if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + + return w, nil +} + +// syncWith replaces the store's items with the given list. +func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { + found := make([]interface{}, 0, len(items)) + for _, item := range items { + found = append(found, item) + } + return r.store.Replace(found, resourceVersion) +} + +// watchHandler watches w and sets setLastSyncResourceVersion +func watchHandler(start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + exitOnInitialEventsEndBookmark *bool, + clock clock.Clock, + errc chan error, + stopCh <-chan struct{}, +) error { + eventCount := 0 + if exitOnInitialEventsEndBookmark != nil { + // set it to false just in case somebody + // made it positive + *exitOnInitialEventsEndBookmark = false + } + +loop: + for { + select { + case <-stopCh: + return errorStopRequested + case err := <-errc: + return err + case event, ok := <-w.ResultChan(): + if !ok { + break loop + } + if event.Type == watch.Error { + return apierrors.FromObject(event.Object) + } + if expectedType != nil { + if e, a := expectedType, reflect.TypeOf(event.Object); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", name, e, a)) + continue + } + } + if expectedGVK != nil { + if e, a := *expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", name, e, a)) + continue + } + } + meta, err := meta.Accessor(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) + continue + } + resourceVersion := meta.GetResourceVersion() + switch event.Type { + case watch.Added: + err := store.Add(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", name, event.Object, err)) + } + case watch.Modified: + err := store.Update(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", name, event.Object, err)) + } + case watch.Deleted: + // TODO: Will any consumers need access to the "last known + // state", which is passed in event.Object? If so, may need + // to change this. + err := store.Delete(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", name, event.Object, err)) + } + case watch.Bookmark: + // A `Bookmark` means watch has synced here, just update the resourceVersion + if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" { + if exitOnInitialEventsEndBookmark != nil { + *exitOnInitialEventsEndBookmark = true + } + } + default: + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) + } + setLastSyncResourceVersion(resourceVersion) + if rvu, ok := store.(ResourceVersionUpdater); ok { + rvu.UpdateResourceVersion(resourceVersion) + } + eventCount++ + if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark { + watchDuration := clock.Since(start) + klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) + return nil + } + } + } + + watchDuration := clock.Since(start) + if watchDuration < 1*time.Second && eventCount == 0 { + return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) + } + klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount) + return nil +} + +// LastSyncResourceVersion is the resource version observed when last sync with the underlying store +// The value returned is not synchronized with access to the underlying store and is not thread-safe +func (r *Reflector) LastSyncResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + return r.lastSyncResourceVersion +} + +func (r *Reflector) setLastSyncResourceVersion(v string) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.lastSyncResourceVersion = v +} + +// relistResourceVersion determines the resource version the reflector should list or relist from. +// Returns either the lastSyncResourceVersion so that this reflector will relist with a resource +// versions no older than has already been observed in relist results or watch events, or, if the last relist resulted +// in an HTTP 410 (Gone) status code, returns "" so that the relist will use the latest resource version available in +// etcd via a quorum read. +func (r *Reflector) relistResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + + if r.isLastSyncResourceVersionUnavailable { + // Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache + // if the lastSyncResourceVersion is unavailable, we set ResourceVersion="" and list again to re-establish reflector + // to the latest available ResourceVersion, using a consistent read from etcd. + return "" + } + if r.lastSyncResourceVersion == "" { + // For performance reasons, initial list performed by reflector uses "0" as resource version to allow it to + // be served from the watch cache if it is enabled. + return "0" + } + return r.lastSyncResourceVersion +} + +// rewatchResourceVersion determines the resource version the reflector should start streaming from. +func (r *Reflector) rewatchResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + if r.isLastSyncResourceVersionUnavailable { + // initial stream should return data at the most recent resource version. + // the returned data must be consistent i.e. as if served from etcd via a quorum read + return "" + } + return r.lastSyncResourceVersion +} + +// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned +// "expired" or "too large resource version" error. +func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.isLastSyncResourceVersionUnavailable = isUnavailable +} + +func isExpiredError(err error) bool { + // In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and + // apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent + // and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone + // check when we fully drop support for Kubernetes 1.17 servers from reflectors. + return apierrors.IsResourceExpired(err) || apierrors.IsGone(err) +} + +func isTooLargeResourceVersionError(err error) bool { + if apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) { + return true + } + // In Kubernetes 1.17.0-1.18.5, the api server doesn't set the error status cause to + // metav1.CauseTypeResourceVersionTooLarge to indicate that the requested minimum resource + // version is larger than the largest currently available resource version. To ensure backward + // compatibility with these server versions we also need to detect the error based on the content + // of the error message field. + if !apierrors.IsTimeout(err) { + return false + } + apierr, ok := err.(apierrors.APIStatus) + if !ok || apierr == nil || apierr.Status().Details == nil { + return false + } + for _, cause := range apierr.Status().Details.Causes { + // Matches the message returned by api server 1.17.0-1.18.5 for this error condition + if cause.Message == "Too large resource version" { + return true + } + } + + // Matches the message returned by api server before 1.17.0 + if strings.Contains(apierr.Status().Message, "Too large resource version") { + return true + } + + return false +} + +// isWatchErrorRetriable determines if it is safe to retry +// a watch error retrieved from the server. +func isWatchErrorRetriable(err error) bool { + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case begin exponentially backing off and resend watch request. + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { + return true + } + return false +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go new file mode 100644 index 000000000..aa3027d71 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -0,0 +1,119 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "os" + "sort" + "strconv" + "time" + + "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +var dataConsistencyDetectionEnabled = false + +func init() { + dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) +} + +// checkWatchListConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { + if !dataConsistencyDetectionEnabled { + return + } + checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store) +} + +// checkWatchListConsistency exists solely for testing purposes. +// we cannot use checkWatchListConsistencyIfRequested because +// it is guarded by an environmental variable. +// we cannot manipulate the environmental variable because +// it will affect other tests in this package. +func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { + klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity) + opts := metav1.ListOptions{ + ResourceVersion: lastSyncedResourceVersion, + ResourceVersionMatch: metav1.ResourceVersionMatchExact, + } + var list runtime.Object + err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) { + list, err = listerWatcher.List(opts) + if err != nil { + // the consistency check will only be enabled in the CI + // and LIST calls in general will be retired by the client-go library + // if we fail simply log and retry + klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err) + return + } + + rawListItems, err := meta.ExtractListWithAlloc(list) + if err != nil { + panic(err) // this should never happen + } + + listItems := toMetaObjectSliceOrDie(rawListItems) + storeItems := toMetaObjectSliceOrDie(store.List()) + + sort.Sort(byUID(listItems)) + sort.Sort(byUID(storeItems)) + + if !cmp.Equal(listItems, storeItems) { + klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems)) + msg := "data inconsistency detected for the watch-list feature, panicking!" + panic(msg) + } +} + +type byUID []metav1.Object + +func (a byUID) Len() int { return len(a) } +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } +func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { + result := make([]metav1.Object, len(s)) + for i, v := range s { + m, err := meta.Accessor(v) + if err != nil { + panic(err) + } + result[i] = m + } + return result +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go new file mode 100644 index 000000000..5c00115f5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -0,0 +1,89 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +package cache + +import ( + "sync" +) + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Set(float64) +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Observe(float64) {} +func (noopMetric) Set(float64) {} + +// MetricsProvider generates various metrics used by the reflector. +type MetricsProvider interface { + NewListsMetric(name string) CounterMetric + NewListDurationMetric(name string) SummaryMetric + NewItemsInListMetric(name string) SummaryMetric + + NewWatchesMetric(name string) CounterMetric + NewShortWatchesMetric(name string) CounterMetric + NewWatchDurationMetric(name string) SummaryMetric + NewItemsInWatchMetric(name string) SummaryMetric + + NewLastResourceVersionMetric(name string) GaugeMetric +} + +type noopMetricsProvider struct{} + +func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric { + return noopMetric{} +} + +var metricsFactory = struct { + metricsProvider MetricsProvider + setProviders sync.Once +}{ + metricsProvider: noopMetricsProvider{}, +} + +// SetReflectorMetricsProvider sets the metrics provider +func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { + metricsFactory.setProviders.Do(func() { + metricsFactory.metricsProvider = metricsProvider + }) +} diff --git a/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go b/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go new file mode 100644 index 000000000..8201fb153 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/utils/clock" + "time" +) + +type RetryWithDeadline interface { + After(error) + ShouldRetry() bool +} + +type retryWithDeadlineImpl struct { + firstErrorTime time.Time + lastErrorTime time.Time + maxRetryDuration time.Duration + minResetPeriod time.Duration + isRetryable func(error) bool + clock clock.Clock +} + +func NewRetryWithDeadline(maxRetryDuration, minResetPeriod time.Duration, isRetryable func(error) bool, clock clock.Clock) RetryWithDeadline { + return &retryWithDeadlineImpl{ + firstErrorTime: time.Time{}, + lastErrorTime: time.Time{}, + maxRetryDuration: maxRetryDuration, + minResetPeriod: minResetPeriod, + isRetryable: isRetryable, + clock: clock, + } +} + +func (r *retryWithDeadlineImpl) reset() { + r.firstErrorTime = time.Time{} + r.lastErrorTime = time.Time{} +} + +func (r *retryWithDeadlineImpl) After(err error) { + if r.isRetryable(err) { + if r.clock.Now().Sub(r.lastErrorTime) >= r.minResetPeriod { + r.reset() + } + + if r.firstErrorTime.IsZero() { + r.firstErrorTime = r.clock.Now() + } + r.lastErrorTime = r.clock.Now() + } +} + +func (r *retryWithDeadlineImpl) ShouldRetry() bool { + if r.maxRetryDuration <= time.Duration(0) { + return false + } + + if r.clock.Now().Sub(r.firstErrorTime) <= r.maxRetryDuration { + return true + } + + r.reset() + return false +} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go new file mode 100644 index 000000000..b3f37431d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -0,0 +1,1012 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache/synctrack" + "k8s.io/utils/buffer" + "k8s.io/utils/clock" + + "k8s.io/klog/v2" +) + +// SharedInformer provides eventually consistent linkage of its +// clients to the authoritative state of a given collection of +// objects. An object is identified by its API group, kind/resource, +// namespace (if any), and name; the `ObjectMeta.UID` is not part of +// an object's ID as far as this contract is concerned. One +// SharedInformer provides linkage to objects of a particular API +// group and kind/resource. The linked object collection of a +// SharedInformer may be further restricted to one namespace (if +// applicable) and/or by label selector and/or field selector. +// +// The authoritative state of an object is what apiservers provide +// access to, and an object goes through a strict sequence of states. +// An object state is either (1) present with a ResourceVersion and +// other appropriate content or (2) "absent". +// +// A SharedInformer maintains a local cache --- exposed by GetStore(), +// by GetIndexer() in the case of an indexed informer, and possibly by +// machinery involved in creating and/or accessing the informer --- of +// the state of each relevant object. This cache is eventually +// consistent with the authoritative state. This means that, unless +// prevented by persistent communication problems, if ever a +// particular object ID X is authoritatively associated with a state S +// then for every SharedInformer I whose collection includes (X, S) +// eventually either (1) I's cache associates X with S or a later +// state of X, (2) I is stopped, or (3) the authoritative state +// service for X terminates. To be formally complete, we say that the +// absent state meets any restriction by label selector or field +// selector. +// +// For a given informer and relevant object ID X, the sequence of +// states that appears in the informer's cache is a subsequence of the +// states authoritatively associated with X. That is, some states +// might never appear in the cache but ordering among the appearing +// states is correct. Note, however, that there is no promise about +// ordering between states seen for different objects. +// +// The local cache starts out empty, and gets populated and updated +// during `Run()`. +// +// As a simple example, if a collection of objects is henceforth +// unchanging, a SharedInformer is created that links to that +// collection, and that SharedInformer is `Run()` then that +// SharedInformer's cache eventually holds an exact copy of that +// collection (unless it is stopped too soon, the authoritative state +// service ends, or communication problems between the two +// persistently thwart achievement). +// +// As another simple example, if the local cache ever holds a +// non-absent state for some object ID and the object is eventually +// removed from the authoritative state then eventually the object is +// removed from the local cache (unless the SharedInformer is stopped +// too soon, the authoritative state service ends, or communication +// problems persistently thwart the desired result). +// +// The keys in the Store are of the form namespace/name for namespaced +// objects, and are simply the name for non-namespaced objects. +// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for +// a given object, and `SplitMetaNamespaceKey(key)` to split a key +// into its constituent parts. +// +// Every query against the local cache is answered entirely from one +// snapshot of the cache's state. Thus, the result of a `List` call +// will not contain two entries with the same namespace and name. +// +// A client is identified here by a ResourceEventHandler. For every +// update to the SharedInformer's local cache and for every client +// added before `Run()`, eventually either the SharedInformer is +// stopped or the client is notified of the update. A client added +// after `Run()` starts gets a startup batch of notifications of +// additions of the objects existing in the cache at the time that +// client was added; also, for every update to the SharedInformer's +// local cache after that client was added, eventually either the +// SharedInformer is stopped or that client is notified of that +// update. Client notifications happen after the corresponding cache +// update and, in the case of a SharedIndexInformer, after the +// corresponding index updates. It is possible that additional cache +// and index updates happen before such a prescribed notification. +// For a given SharedInformer and client, the notifications are +// delivered sequentially. For a given SharedInformer, client, and +// object ID, the notifications are delivered in order. Because +// `ObjectMeta.UID` has no role in identifying objects, it is possible +// that when (1) object O1 with ID (e.g. namespace and name) X and +// `ObjectMeta.UID` U1 in the SharedInformer's local cache is deleted +// and later (2) another object O2 with ID X and ObjectMeta.UID U2 is +// created the informer's clients are not notified of (1) and (2) but +// rather are notified only of an update from O1 to O2. Clients that +// need to detect such cases might do so by comparing the `ObjectMeta.UID` +// field of the old and the new object in the code that handles update +// notifications (i.e. `OnUpdate` method of ResourceEventHandler). +// +// A client must process each notification promptly; a SharedInformer +// is not engineered to deal well with a large backlog of +// notifications to deliver. Lengthy processing should be passed off +// to something else, for example through a +// `client-go/util/workqueue`. +// +// A delete notification exposes the last locally known non-absent +// state, except that its ResourceVersion is replaced with a +// ResourceVersion in which the object is actually absent. +type SharedInformer interface { + // AddEventHandler adds an event handler to the shared informer using + // the shared informer's resync period. Events to a single handler are + // delivered sequentially, but there is no coordination between + // different handlers. + // It returns a registration handle for the handler that can be used to + // remove the handler again, or to tell if the handler is synced (has + // seen every item in the initial list). + AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) + // AddEventHandlerWithResyncPeriod adds an event handler to the + // shared informer with the requested resync period; zero means + // this handler does not care about resyncs. The resync operation + // consists of delivering to the handler an update notification + // for every object in the informer's local cache; it does not add + // any interactions with the authoritative storage. Some + // informers do no resyncs at all, not even for handlers added + // with a non-zero resyncPeriod. For an informer that does + // resyncs, and for each handler that requests resyncs, that + // informer develops a nominal resync period that is no shorter + // than the requested period but may be longer. The actual time + // between any two resyncs may be longer than the nominal period + // because the implementation takes time to do work and there may + // be competing load and scheduling noise. + // It returns a registration handle for the handler that can be used to remove + // the handler again and an error if the handler cannot be added. + AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) + // RemoveEventHandler removes a formerly added event handler given by + // its registration handle. + // This function is guaranteed to be idempotent, and thread-safe. + RemoveEventHandler(handle ResourceEventHandlerRegistration) error + // GetStore returns the informer's local cache as a Store. + GetStore() Store + // GetController is deprecated, it does nothing useful + GetController() Controller + // Run starts and runs the shared informer, returning after it stops. + // The informer will be stopped when stopCh is closed. + Run(stopCh <-chan struct{}) + // HasSynced returns true if the shared informer's store has been + // informed by at least one full LIST of the authoritative state + // of the informer's object collection. This is unrelated to "resync". + // + // Note that this doesn't tell you if an individual handler is synced!! + // For that, please call HasSynced on the handle returned by + // AddEventHandler. + HasSynced() bool + // LastSyncResourceVersion is the resource version observed when last synced with the underlying + // store. The value returned is not synchronized with access to the underlying store and is not + // thread-safe. + LastSyncResourceVersion() string + + // The WatchErrorHandler is called whenever ListAndWatch drops the + // connection with an error. After calling this handler, the informer + // will backoff and retry. + // + // The default implementation looks at the error type and tries to log + // the error message at an appropriate level. + // + // There's only one handler, so if you call this multiple times, last one + // wins; calling after the informer has been started returns an error. + // + // The handler is intended for visibility, not to e.g. pause the consumers. + // The handler should return quickly - any expensive processing should be + // offloaded. + SetWatchErrorHandler(handler WatchErrorHandler) error + + // The TransformFunc is called for each object which is about to be stored. + // + // This function is intended for you to take the opportunity to + // remove, transform, or normalize fields. One use case is to strip unused + // metadata fields out of objects to save on RAM cost. + // + // Must be set before starting the informer. + // + // Please see the comment on TransformFunc for more details. + SetTransform(handler TransformFunc) error + + // IsStopped reports whether the informer has already been stopped. + // Adding event handlers to already stopped informers is not possible. + // An informer already stopped will never be started again. + IsStopped() bool +} + +// Opaque interface representing the registration of ResourceEventHandler for +// a SharedInformer. Must be supplied back to the same SharedInformer's +// `RemoveEventHandler` to unregister the handlers. +// +// Also used to tell if the handler is synced (has had all items in the initial +// list delivered). +type ResourceEventHandlerRegistration interface { + // HasSynced reports if both the parent has synced and all pre-sync + // events have been delivered. + HasSynced() bool +} + +// SharedIndexInformer provides add and get Indexers ability based on SharedInformer. +type SharedIndexInformer interface { + SharedInformer + // AddIndexers add indexers to the informer before it starts. + AddIndexers(indexers Indexers) error + GetIndexer() Indexer +} + +// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details. +func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { + return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) +} + +// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See +// NewSharedIndexInformerWithOptions for full details. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + return NewSharedIndexInformerWithOptions( + lw, + exampleObject, + SharedIndexInformerOptions{ + ResyncPeriod: defaultEventHandlerResyncPeriod, + Indexers: indexers, + }, + ) +} + +// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher. +// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each +// handler that with a non-zero requested resync period, whether added +// before or after the informer starts, the nominal resync period is +// the requested resync period rounded up to a multiple of the +// informer's resync checking period. Such an informer's resync +// checking period is established when the informer starts running, +// and is the maximum of (a) the minimum of the resync periods +// requested before the informer starts and the +// options.ResyncPeriod given here and (b) the constant +// `minimumResyncPeriod` defined in this file. +func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer { + realClock := &clock.RealClock{} + + return &sharedIndexInformer{ + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers), + processor: &sharedProcessor{clock: realClock}, + listerWatcher: lw, + objectType: exampleObject, + objectDescription: options.ObjectDescription, + resyncCheckPeriod: options.ResyncPeriod, + defaultEventHandlerResyncPeriod: options.ResyncPeriod, + clock: realClock, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + } +} + +// SharedIndexInformerOptions configures a sharedIndexInformer. +type SharedIndexInformerOptions struct { + // ResyncPeriod is the default event handler resync period and resync check + // period. If unset/unspecified, these are defaulted to 0 (do not resync). + ResyncPeriod time.Duration + + // Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured. + Indexers Indexers + + // ObjectDescription is the sharedIndexInformer's object description. This is passed through to the + // underlying Reflector's type description. + ObjectDescription string +} + +// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. +type InformerSynced func() bool + +const ( + // syncedPollPeriod controls how often you look at the status of your sync funcs + syncedPollPeriod = 100 * time.Millisecond + + // initialBufferSize is the initial number of event notifications that can be buffered. + initialBufferSize = 1024 +) + +// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages +// indicating that the caller identified by name is waiting for syncs, followed by +// either a successful or failed sync. +func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !WaitForCacheSync(stopCh, cacheSyncs...) { + utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName)) + return false + } + + klog.Infof("Caches are synced for %s", controllerName) + return true +} + +// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false +// if the controller should shutdown +// callers should prefer WaitForNamedCacheSync() +func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + err := wait.PollImmediateUntil(syncedPollPeriod, + func() (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + stopCh) + if err != nil { + return false + } + + return true +} + +// `*sharedIndexInformer` implements SharedIndexInformer and has three +// main components. One is an indexed local cache, `indexer Indexer`. +// The second main component is a Controller that pulls +// objects/notifications using the ListerWatcher and pushes them into +// a DeltaFIFO --- whose knownObjects is the informer's local cache +// --- while concurrently Popping Deltas values from that fifo and +// processing them with `sharedIndexInformer::HandleDeltas`. Each +// invocation of HandleDeltas, which is done with the fifo's lock +// held, processes each Delta in turn. For each Delta this both +// updates the local cache and stuffs the relevant notification into +// the sharedProcessor. The third main component is that +// sharedProcessor, which is responsible for relaying those +// notifications to each of the informer's clients. +type sharedIndexInformer struct { + indexer Indexer + controller Controller + + processor *sharedProcessor + cacheMutationDetector MutationDetector + + listerWatcher ListerWatcher + + // objectType is an example object of the type this informer is expected to handle. If set, an event + // with an object with a mismatching type is dropped instead of being delivered to listeners. + objectType runtime.Object + + // objectDescription is the description of this informer's objects. This typically defaults to + objectDescription string + + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call + // shouldResync to check if any of our listeners need a resync. + resyncCheckPeriod time.Duration + // defaultEventHandlerResyncPeriod is the default resync period for any handlers added via + // AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default + // value). + defaultEventHandlerResyncPeriod time.Duration + // clock allows for testability + clock clock.Clock + + started, stopped bool + startedLock sync.Mutex + + // blockDeltas gives a way to stop all event distribution so that a late event handler + // can safely join the shared informer. + blockDeltas sync.Mutex + + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler + + transform TransformFunc +} + +// dummyController hides the fact that a SharedInformer is different from a dedicated one +// where a caller can `Run`. The run method is disconnected in this case, because higher +// level logic will decide when to start the SharedInformer and related controller. +// Because returning information back is always asynchronous, the legacy callers shouldn't +// notice any change in behavior. +type dummyController struct { + informer *sharedIndexInformer +} + +func (v *dummyController) Run(stopCh <-chan struct{}) { +} + +func (v *dummyController) HasSynced() bool { + return v.informer.HasSynced() +} + +func (v *dummyController) LastSyncResourceVersion() string { + return "" +} + +type updateNotification struct { + oldObj interface{} + newObj interface{} +} + +type addNotification struct { + newObj interface{} + isInInitialList bool +} + +type deleteNotification struct { + oldObj interface{} +} + +func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + s.watchErrorHandler = handler + return nil +} + +func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + s.transform = handler + return nil +} + +func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + if s.HasStarted() { + klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") + return + } + + func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: s.indexer, + EmitDeltaTypeReplaced: true, + Transformer: s.transform, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + ObjectDescription: s.objectDescription, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + WatchErrorHandler: s.watchErrorHandler, + } + + s.controller = New(cfg) + s.controller.(*controller).clock = s.clock + s.started = true + }() + + // Separate stop channel because Processor should be stopped strictly after controller + processorStopCh := make(chan struct{}) + var wg wait.Group + defer wg.Wait() // Wait for Processor to stop + defer close(processorStopCh) // Tell Processor to stop + wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run) + wg.StartWithChannel(processorStopCh, s.processor.run) + + defer func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + s.stopped = true // Don't want any new listeners + }() + s.controller.Run(stopCh) +} + +func (s *sharedIndexInformer) HasStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + +func (s *sharedIndexInformer) HasSynced() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return false + } + return s.controller.HasSynced() +} + +func (s *sharedIndexInformer) LastSyncResourceVersion() string { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return "" + } + return s.controller.LastSyncResourceVersion() +} + +func (s *sharedIndexInformer) GetStore() Store { + return s.indexer +} + +func (s *sharedIndexInformer) GetIndexer() Indexer { + return s.indexer +} + +func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + return s.indexer.AddIndexers(indexers) +} + +func (s *sharedIndexInformer) GetController() Controller { + return &dummyController{informer: s} +} + +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) { + return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) +} + +func determineResyncPeriod(desired, check time.Duration) time.Duration { + if desired == 0 { + return desired + } + if check == 0 { + klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + return 0 + } + if desired < check { + klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + return check + } + return desired +} + +const minimumResyncPeriod = 1 * time.Second + +func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.stopped { + return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler) + } + + if resyncPeriod > 0 { + if resyncPeriod < minimumResyncPeriod { + klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod) + resyncPeriod = minimumResyncPeriod + } + + if resyncPeriod < s.resyncCheckPeriod { + if s.started { + klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + resyncPeriod = s.resyncCheckPeriod + } else { + // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update + // resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners + // accordingly + s.resyncCheckPeriod = resyncPeriod + s.processor.resyncCheckPeriodChanged(resyncPeriod) + } + } + } + + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced) + + if !s.started { + return s.processor.addListener(listener), nil + } + + // in order to safely join, we have to + // 1. stop sending add/update/delete notifications + // 2. do a list against the store + // 3. send synthetic "Add" events to the new handler + // 4. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + handle := s.processor.addListener(listener) + for _, item := range s.indexer.List() { + // Note that we enqueue these notifications with the lock held + // and before returning the handle. That means there is never a + // chance for anyone to call the handle's HasSynced method in a + // state when it would falsely return true (i.e., when the + // shared informer is synced but it has not observed an Add + // with isInitialList being true, nor when the thread + // processing notifications somehow goes faster than this + // thread adding them and the counter is temporarily zero). + listener.add(addNotification{newObj: item, isInInitialList: true}) + } + return handle, nil +} + +func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error { + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + if deltas, ok := obj.(Deltas); ok { + return processDeltas(s, s.indexer, deltas, isInInitialList) + } + return errors.New("object given as Process argument is not Deltas") +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) { + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.cacheMutationDetector.AddObject(obj) + s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false) +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnUpdate(old, new interface{}) { + isSync := false + + // If is a Sync event, isSync should be true + // If is a Replaced event, isSync is true if resource version is unchanged. + // If RV is unchanged: this is a Sync/Replaced event, so isSync is true + + if accessor, err := meta.Accessor(new); err == nil { + if oldAccessor, err := meta.Accessor(old); err == nil { + // Events that didn't change resourceVersion are treated as resync events + // and only propagated to listeners that requested resync + isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion() + } + } + + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.cacheMutationDetector.AddObject(new) + s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync) +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnDelete(old interface{}) { + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.processor.distribute(deleteNotification{oldObj: old}, false) +} + +// IsStopped reports whether the informer has already been stopped +func (s *sharedIndexInformer) IsStopped() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.stopped +} + +func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + // in order to safely remove, we have to + // 1. stop sending add/update/delete notifications + // 2. remove and stop listener + // 3. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + return s.processor.removeListener(handle) +} + +// sharedProcessor has a collection of processorListener and can +// distribute a notification object to its listeners. There are two +// kinds of distribute operations. The sync distributions go to a +// subset of the listeners that (a) is recomputed in the occasional +// calls to shouldResync and (b) every listener is initially put in. +// The non-sync distributions go to every listener. +type sharedProcessor struct { + listenersStarted bool + listenersLock sync.RWMutex + // Map from listeners to whether or not they are currently syncing + listeners map[*processorListener]bool + clock clock.Clock + wg wait.Group +} + +func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + if p.listeners == nil { + return nil + } + + if result, ok := registration.(*processorListener); ok { + if _, exists := p.listeners[result]; exists { + return result + } + } + + return nil +} + +func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + if p.listeners == nil { + p.listeners = make(map[*processorListener]bool) + } + + p.listeners[listener] = true + + if p.listenersStarted { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } + + return listener +} + +func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + listener, ok := handle.(*processorListener) + if !ok { + return fmt.Errorf("invalid key type %t", handle) + } else if p.listeners == nil { + // No listeners are registered, do nothing + return nil + } else if _, exists := p.listeners[listener]; !exists { + // Listener is not registered, just do nothing + return nil + } + + delete(p.listeners, listener) + + if p.listenersStarted { + close(listener.addCh) + } + + return nil +} + +func (p *sharedProcessor) distribute(obj interface{}, sync bool) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for listener, isSyncing := range p.listeners { + switch { + case !sync: + // non-sync messages are delivered to every listener + listener.add(obj) + case isSyncing: + // sync messages are delivered to every syncing listener + listener.add(obj) + default: + // skipping a sync obj for a non-syncing listener + } + } +} + +func (p *sharedProcessor) run(stopCh <-chan struct{}) { + func() { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + for listener := range p.listeners { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } + p.listenersStarted = true + }() + <-stopCh + + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + for listener := range p.listeners { + close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop + } + + // Wipe out list of listeners since they are now closed + // (processorListener cannot be re-used) + p.listeners = nil + + // Reset to false since no listeners are running + p.listenersStarted = false + + p.wg.Wait() // Wait for all .pop() and .run() to stop +} + +// shouldResync queries every listener to determine if any of them need a resync, based on each +// listener's resyncPeriod. +func (p *sharedProcessor) shouldResync() bool { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + resyncNeeded := false + now := p.clock.Now() + for listener := range p.listeners { + // need to loop through all the listeners to see if they need to resync so we can prepare any + // listeners that are going to be resyncing. + shouldResync := listener.shouldResync(now) + p.listeners[listener] = shouldResync + + if shouldResync { + resyncNeeded = true + listener.determineNextResync(now) + } + } + return resyncNeeded +} + +func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for listener := range p.listeners { + resyncPeriod := determineResyncPeriod( + listener.requestedResyncPeriod, resyncCheckPeriod) + listener.setResyncPeriod(resyncPeriod) + } +} + +// processorListener relays notifications from a sharedProcessor to +// one ResourceEventHandler --- using two goroutines, two unbuffered +// channels, and an unbounded ring buffer. The `add(notification)` +// function sends the given notification to `addCh`. One goroutine +// runs `pop()`, which pumps notifications from `addCh` to `nextCh` +// using storage in the ring buffer while `nextCh` is not keeping up. +// Another goroutine runs `run()`, which receives notifications from +// `nextCh` and synchronously invokes the appropriate handler method. +// +// processorListener also keeps track of the adjusted requested resync +// period of the listener. +type processorListener struct { + nextCh chan interface{} + addCh chan interface{} + + handler ResourceEventHandler + + syncTracker *synctrack.SingleFileTracker + + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. + // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better. + pendingNotifications buffer.RingGrowing + + // requestedResyncPeriod is how frequently the listener wants a + // full resync from the shared informer, but modified by two + // adjustments. One is imposing a lower bound, + // `minimumResyncPeriod`. The other is another lower bound, the + // sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only + // in AddEventHandlerWithResyncPeriod invocations made after the + // sharedIndexInformer starts and (b) only if the informer does + // resyncs at all. + requestedResyncPeriod time.Duration + // resyncPeriod is the threshold that will be used in the logic + // for this listener. This value differs from + // requestedResyncPeriod only when the sharedIndexInformer does + // not do resyncs, in which case the value here is zero. The + // actual time between resyncs depends on when the + // sharedProcessor's `shouldResync` function is invoked and when + // the sharedIndexInformer processes `Sync` type Delta objects. + resyncPeriod time.Duration + // nextResync is the earliest time the listener should get a full resync + nextResync time.Time + // resyncLock guards access to resyncPeriod and nextResync + resyncLock sync.Mutex +} + +// HasSynced returns true if the source informer has synced, and all +// corresponding events have been delivered. +func (p *processorListener) HasSynced() bool { + return p.syncTracker.HasSynced() +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener { + ret := &processorListener{ + nextCh: make(chan interface{}), + addCh: make(chan interface{}), + handler: handler, + syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced}, + pendingNotifications: *buffer.NewRingGrowing(bufferSize), + requestedResyncPeriod: requestedResyncPeriod, + resyncPeriod: resyncPeriod, + } + + ret.determineNextResync(now) + + return ret +} + +func (p *processorListener) add(notification interface{}) { + if a, ok := notification.(addNotification); ok && a.isInInitialList { + p.syncTracker.Start() + } + p.addCh <- notification +} + +func (p *processorListener) pop() { + defer utilruntime.HandleCrash() + defer close(p.nextCh) // Tell .run() to stop + + var nextCh chan<- interface{} + var notification interface{} + for { + select { + case nextCh <- notification: + // Notification dispatched + var ok bool + notification, ok = p.pendingNotifications.ReadOne() + if !ok { // Nothing to pop + nextCh = nil // Disable this select case + } + case notificationToAdd, ok := <-p.addCh: + if !ok { + return + } + if notification == nil { // No notification to pop (and pendingNotifications is empty) + // Optimize the case - skip adding to pendingNotifications + notification = notificationToAdd + nextCh = p.nextCh + } else { // There is already a notification waiting to be dispatched + p.pendingNotifications.WriteOne(notificationToAdd) + } + } + } +} + +func (p *processorListener) run() { + // this call blocks until the channel is closed. When a panic happens during the notification + // we will catch it, **the offending item will be skipped!**, and after a short delay (one second) + // the next notification will be attempted. This is usually better than the alternative of never + // delivering again. + stopCh := make(chan struct{}) + wait.Until(func() { + for next := range p.nextCh { + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj, notification.isInInitialList) + if notification.isInInitialList { + p.syncTracker.Finished() + } + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next)) + } + } + // the only way to get here is if the p.nextCh is empty and closed + close(stopCh) + }, 1*time.Second, stopCh) +} + +// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0, +// this always returns false. +func (p *processorListener) shouldResync(now time.Time) bool { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + if p.resyncPeriod == 0 { + return false + } + + return now.After(p.nextResync) || now.Equal(p.nextResync) +} + +func (p *processorListener) determineNextResync(now time.Time) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.nextResync = now.Add(p.resyncPeriod) +} + +func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.resyncPeriod = resyncPeriod +} diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go new file mode 100644 index 000000000..5cc3f42ec --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -0,0 +1,295 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Store is a generic object storage and processing interface. A +// Store holds a map from string keys to accumulators, and has +// operations to add, update, and delete a given object to/from the +// accumulator currently associated with a given key. A Store also +// knows how to extract the key from a given object, so many operations +// are given only the object. +// +// In the simplest Store implementations each accumulator is simply +// the last given object, or empty after Delete, and thus the Store's +// behavior is simple storage. +// +// Reflector knows how to watch a server and update a Store. This +// package provides a variety of implementations of Store. +type Store interface { + + // Add adds the given object to the accumulator associated with the given object's key + Add(obj interface{}) error + + // Update updates the given object in the accumulator associated with the given object's key + Update(obj interface{}) error + + // Delete deletes the given object from the accumulator associated with the given object's key + Delete(obj interface{}) error + + // List returns a list of all the currently non-empty accumulators + List() []interface{} + + // ListKeys returns a list of all the keys currently associated with non-empty accumulators + ListKeys() []string + + // Get returns the accumulator associated with the given object's key + Get(obj interface{}) (item interface{}, exists bool, err error) + + // GetByKey returns the accumulator associated with the given key + GetByKey(key string) (item interface{}, exists bool, err error) + + // Replace will delete the contents of the store, using instead the + // given list. Store takes ownership of the list, you should not reference + // it after calling this function. + Replace([]interface{}, string) error + + // Resync is meaningless in the terms appearing here but has + // meaning in some implementations that have non-trivial + // additional behavior (e.g., DeltaFIFO). + Resync() error +} + +// KeyFunc knows how to make a key from an object. Implementations should be deterministic. +type KeyFunc func(obj interface{}) (string, error) + +// KeyError will be returned any time a KeyFunc gives an error; it includes the object +// at fault. +type KeyError struct { + Obj interface{} + Err error +} + +// Error gives a human-readable description of the error. +func (k KeyError) Error() string { + return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err) +} + +// Unwrap implements errors.Unwrap +func (k KeyError) Unwrap() error { + return k.Err +} + +// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for +// the object but not the object itself. +type ExplicitKey string + +// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make +// keys for API objects which implement meta.Interface. +// The key uses the format / unless is empty, then +// it's just . +// +// Clients that want a structured alternative can use ObjectToName or MetaObjectToName. +// Note: this would not be a client that wants a key for a Store because those are +// necessarily strings. +// +// TODO maybe some day?: change Store to be keyed differently +func MetaNamespaceKeyFunc(obj interface{}) (string, error) { + if key, ok := obj.(ExplicitKey); ok { + return string(key), nil + } + objName, err := ObjectToName(obj) + if err != nil { + return "", err + } + return objName.String(), nil +} + +// ObjectToName returns the structured name for the given object, +// if indeed it can be viewed as a metav1.Object. +func ObjectToName(obj interface{}) (ObjectName, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return ObjectName{}, fmt.Errorf("object has no meta: %v", err) + } + return MetaObjectToName(meta), nil +} + +// MetaObjectToName returns the structured name for the given object +func MetaObjectToName(obj metav1.Object) ObjectName { + if len(obj.GetNamespace()) > 0 { + return ObjectName{Namespace: obj.GetNamespace(), Name: obj.GetName()} + } + return ObjectName{Namespace: "", Name: obj.GetName()} +} + +// SplitMetaNamespaceKey returns the namespace and name that +// MetaNamespaceKeyFunc encoded into key. +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func SplitMetaNamespaceKey(key string) (namespace, name string, err error) { + parts := strings.Split(key, "/") + switch len(parts) { + case 1: + // name only, no namespace + return "", parts[0], nil + case 2: + // namespace and name + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected key format: %q", key) +} + +// `*cache` implements Indexer in terms of a ThreadSafeStore and an +// associated KeyFunc. +type cache struct { + // cacheStorage bears the burden of thread safety for the cache + cacheStorage ThreadSafeStore + // keyFunc is used to make the key for objects stored in and retrieved from items, and + // should be deterministic. + keyFunc KeyFunc +} + +var _ Store = &cache{} + +// Add inserts an item into the cache. +func (c *cache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Add(key, obj) + return nil +} + +// Update sets an item in the cache to its updated state. +func (c *cache) Update(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Update(key, obj) + return nil +} + +// Delete removes an item from the cache. +func (c *cache) Delete(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Delete(key) + return nil +} + +// List returns a list of all the items. +// List is completely threadsafe as long as you treat all items as immutable. +func (c *cache) List() []interface{} { + return c.cacheStorage.List() +} + +// ListKeys returns a list of all the keys of the objects currently +// in the cache. +func (c *cache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// GetIndexers returns the indexers of cache +func (c *cache) GetIndexers() Indexers { + return c.cacheStorage.GetIndexers() +} + +// Index returns a list of items that match on the index function +// Index is thread-safe so long as you treat all items as immutable +func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) { + return c.cacheStorage.Index(indexName, obj) +} + +// IndexKeys returns the storage keys of the stored objects whose set of +// indexed values for the named index includes the given indexed value. +// The returned keys are suitable to pass to GetByKey(). +func (c *cache) IndexKeys(indexName, indexedValue string) ([]string, error) { + return c.cacheStorage.IndexKeys(indexName, indexedValue) +} + +// ListIndexFuncValues returns the list of generated values of an Index func +func (c *cache) ListIndexFuncValues(indexName string) []string { + return c.cacheStorage.ListIndexFuncValues(indexName) +} + +// ByIndex returns the stored objects whose set of indexed values +// for the named index includes the given indexed value. +func (c *cache) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + return c.cacheStorage.ByIndex(indexName, indexedValue) +} + +func (c *cache) AddIndexers(newIndexers Indexers) error { + return c.cacheStorage.AddIndexers(newIndexers) +} + +// Get returns the requested item, or sets exists=false. +// Get is completely threadsafe as long as you treat all items as immutable. +func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return c.GetByKey(key) +} + +// GetByKey returns the request item, or exists=false. +// GetByKey is completely threadsafe as long as you treat all items as immutable. +func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) { + item, exists = c.cacheStorage.Get(key) + return item, exists, nil +} + +// Replace will delete the contents of 'c', using instead the given list. +// 'c' takes ownership of the list, you should not reference the list again +// after calling this function. +func (c *cache) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync is meaningless for one of these +func (c *cache) Resync() error { + return nil +} + +// NewStore returns a Store implemented simply with a map and a lock. +func NewStore(keyFunc KeyFunc) Store { + return &cache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + } +} + +// NewIndexer returns an Indexer implemented simply with a map and a lock. +func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer { + return &cache{ + cacheStorage: NewThreadSafeStore(indexers, Indices{}), + keyFunc: keyFunc, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go new file mode 100644 index 000000000..ce51da9af --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synctrack + +import ( + "sync" + "sync/atomic" +) + +// Lazy defers the computation of `Evaluate` to when it is necessary. It is +// possible that Evaluate will be called in parallel from multiple goroutines. +type Lazy[T any] struct { + Evaluate func() (T, error) + + cache atomic.Pointer[cacheEntry[T]] +} + +type cacheEntry[T any] struct { + eval func() (T, error) + lock sync.RWMutex + result *T +} + +func (e *cacheEntry[T]) get() (T, error) { + if cur := func() *T { + e.lock.RLock() + defer e.lock.RUnlock() + return e.result + }(); cur != nil { + return *cur, nil + } + + e.lock.Lock() + defer e.lock.Unlock() + if e.result != nil { + return *e.result, nil + } + r, err := e.eval() + if err == nil { + e.result = &r + } + return r, err +} + +func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] { + return &cacheEntry[T]{eval: z.Evaluate} +} + +// Notify should be called when something has changed necessitating a new call +// to Evaluate. +func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) } + +// Get should be called to get the current result of a call to Evaluate. If the +// current cached value is stale (due to a call to Notify), then Evaluate will +// be called synchronously. If subsequent calls to Get happen (without another +// Notify), they will all wait for the same return value. +// +// Error returns are not cached and will cause multiple calls to evaluate! +func (z *Lazy[T]) Get() (T, error) { + e := z.cache.Load() + if e == nil { + // Since we don't force a constructor, nil is a possible value. + // If multiple Gets race to set this, the swap makes sure only + // one wins. + z.cache.CompareAndSwap(nil, z.newCacheEntry()) + e = z.cache.Load() + } + return e.get() +} diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go new file mode 100644 index 000000000..3fa2beb6b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go @@ -0,0 +1,120 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package synctrack contains utilities for helping controllers track whether +// they are "synced" or not, that is, whether they have processed all items +// from the informer's initial list. +package synctrack + +import ( + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// AsyncTracker helps propagate HasSynced in the face of multiple worker threads. +type AsyncTracker[T comparable] struct { + UpstreamHasSynced func() bool + + lock sync.Mutex + waiting sets.Set[T] +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *AsyncTracker[T]) Start(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting == nil { + t.waiting = sets.New[T](key) + } else { + t.waiting.Insert(key) + } +} + +// Finished should be called when finished processing a key which was part of +// the initial list. Since keys are tracked individually, nothing bad happens +// if you call Finished without a corresponding call to Start. This makes it +// easier to use this in combination with e.g. queues which don't make it easy +// to plumb through the isInInitialList boolean. +func (t *AsyncTracker[T]) Finished(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting != nil { + t.waiting.Delete(key) + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *AsyncTracker[T]) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we can't hold our lock while + // waiting on that or a user is likely to get a deadlock. + if !t.UpstreamHasSynced() { + return false + } + t.lock.Lock() + defer t.lock.Unlock() + return t.waiting.Len() == 0 +} + +// SingleFileTracker helps propagate HasSynced when events are processed in +// order (i.e. via a queue). +type SingleFileTracker struct { + // Important: count is used with atomic operations so it must be 64-bit + // aligned, otherwise atomic operations will panic. Having it at the top of + // the struct will guarantee that, even on 32-bit arches. + // See https://pkg.go.dev/sync/atomic#pkg-note-BUG for more information. + count int64 + + UpstreamHasSynced func() bool +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *SingleFileTracker) Start() { + atomic.AddInt64(&t.count, 1) +} + +// Finished should be called when finished processing a key which was part of +// the initial list. You must never call Finished() before (or without) its +// corresponding Start(), that is a logic error that could cause HasSynced to +// return a wrong value. To help you notice this should it happen, Finished() +// will panic if the internal counter goes negative. +func (t *SingleFileTracker) Finished() { + result := atomic.AddInt64(&t.count, -1) + if result < 0 { + panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value") + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *SingleFileTracker) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we don't want to then act on a + // stale count value. + if !t.UpstreamHasSynced() { + return false + } + return atomic.LoadInt64(&t.count) <= 0 +} diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go new file mode 100644 index 000000000..145e93ee5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -0,0 +1,363 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// ThreadSafeStore is an interface that allows concurrent indexed +// access to a storage backend. It is like Indexer but does not +// (necessarily) know how to extract the Store key from a given +// object. +// +// TL;DR caveats: you must not modify anything returned by Get or List as it will break +// the indexing feature in addition to not being thread safe. +// +// The guarantees of thread safety provided by List/Get are only valid if the caller +// treats returned items as read-only. For example, a pointer inserted in the store +// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get` +// on the same key and modify the pointer in a non-thread-safe way. Also note that +// modifying objects stored by the indexers (if any) will *not* automatically lead +// to a re-index. So it's not a good idea to directly modify the objects returned by +// Get/List, in general. +type ThreadSafeStore interface { + Add(key string, obj interface{}) + Update(key string, obj interface{}) + Delete(key string) + Get(key string) (item interface{}, exists bool) + List() []interface{} + ListKeys() []string + Replace(map[string]interface{}, string) + Index(indexName string, obj interface{}) ([]interface{}, error) + IndexKeys(indexName, indexedValue string) ([]string, error) + ListIndexFuncValues(name string) []string + ByIndex(indexName, indexedValue string) ([]interface{}, error) + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error + // Resync is a no-op and is deprecated + Resync() error +} + +// storeIndex implements the indexing functionality for Store interface +type storeIndex struct { + // indexers maps a name to an IndexFunc + indexers Indexers + // indices maps a name to an Index + indices Indices +} + +func (i *storeIndex) reset() { + i.indices = Indices{} +} + +func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + indexedValues, err := indexFunc(obj) + if err != nil { + return nil, err + } + index := i.indices[indexName] + + var storeKeySet sets.String + if len(indexedValues) == 1 { + // In majority of cases, there is exactly one value matching. + // Optimize the most common path - deduping is not needed here. + storeKeySet = index[indexedValues[0]] + } else { + // Need to de-dupe the return list. + // Since multiple keys are allowed, this can happen. + storeKeySet = sets.String{} + for _, indexedValue := range indexedValues { + for key := range index[indexedValue] { + storeKeySet.Insert(key) + } + } + } + + return storeKeySet, nil +} + +func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := i.indices[indexName] + return index[indexedValue], nil +} + +func (i *storeIndex) getIndexValues(indexName string) []string { + index := i.indices[indexName] + names := make([]string, 0, len(index)) + for key := range index { + names = append(names, key) + } + return names +} + +func (i *storeIndex) addIndexers(newIndexers Indexers) error { + oldKeys := sets.StringKeySet(i.indexers) + newKeys := sets.StringKeySet(newIndexers) + + if oldKeys.HasAny(newKeys.List()...) { + return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) + } + + for k, v := range newIndexers { + i.indexers[k] = v + } + return nil +} + +// updateIndices modifies the objects location in the managed indexes: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj +// updateIndices must be called from a function that already has a lock on the cache +func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) { + var oldIndexValues, indexValues []string + var err error + for name, indexFunc := range i.indexers { + if oldObj != nil { + oldIndexValues, err = indexFunc(oldObj) + } else { + oldIndexValues = oldIndexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + if newObj != nil { + indexValues, err = indexFunc(newObj) + } else { + indexValues = indexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + index := i.indices[name] + if index == nil { + index = Index{} + i.indices[name] = index + } + + if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { + // We optimize for the most common case where indexFunc returns a single value which has not been changed + continue + } + + for _, value := range oldIndexValues { + i.deleteKeyFromIndex(key, value, index) + } + for _, value := range indexValues { + i.addKeyToIndex(key, value, index) + } + } +} + +func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) +} + +func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + return + } + set.Delete(key) + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } +} + +// threadSafeMap implements ThreadSafeStore +type threadSafeMap struct { + lock sync.RWMutex + items map[string]interface{} + + // index implements the indexing functionality + index *storeIndex +} + +func (c *threadSafeMap) Add(key string, obj interface{}) { + c.Update(key, obj) +} + +func (c *threadSafeMap) Update(key string, obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + oldObject := c.items[key] + c.items[key] = obj + c.index.updateIndices(oldObject, obj, key) +} + +func (c *threadSafeMap) Delete(key string) { + c.lock.Lock() + defer c.lock.Unlock() + if obj, exists := c.items[key]; exists { + c.index.updateIndices(obj, nil, key) + delete(c.items, key) + } +} + +func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) { + c.lock.RLock() + defer c.lock.RUnlock() + item, exists = c.items[key] + return item, exists +} + +func (c *threadSafeMap) List() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]interface{}, 0, len(c.items)) + for _, item := range c.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the threadSafeMap. +func (c *threadSafeMap) ListKeys() []string { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]string, 0, len(c.items)) + for key := range c.items { + list = append(list, key) + } + return list +} + +func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) { + c.lock.Lock() + defer c.lock.Unlock() + c.items = items + + // rebuild any index + c.index.reset() + for key, item := range c.items { + c.index.updateIndices(nil, item, key) + } +} + +// Index returns a list of items that match the given object on the index function. +// Index is thread-safe so long as you treat all items as immutable. +func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + storeKeySet, err := c.index.getKeysFromIndex(indexName, obj) + if err != nil { + return nil, err + } + + list := make([]interface{}, 0, storeKeySet.Len()) + for storeKey := range storeKeySet { + list = append(list, c.items[storeKey]) + } + return list, nil +} + +// ByIndex returns a list of the items whose indexed values in the given index include the given indexed value +func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + set, err := c.index.getKeysByIndex(indexName, indexedValue) + if err != nil { + return nil, err + } + list := make([]interface{}, 0, set.Len()) + for key := range set { + list = append(list, c.items[key]) + } + + return list, nil +} + +// IndexKeys returns a list of the Store keys of the objects whose indexed values in the given index include the given indexed value. +// IndexKeys is thread-safe so long as you treat all items as immutable. +func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + set, err := c.index.getKeysByIndex(indexName, indexedValue) + if err != nil { + return nil, err + } + return set.List(), nil +} + +func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.index.getIndexValues(indexName) +} + +func (c *threadSafeMap) GetIndexers() Indexers { + return c.index.indexers +} + +func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { + c.lock.Lock() + defer c.lock.Unlock() + + if len(c.items) > 0 { + return fmt.Errorf("cannot add indexers to running index") + } + + return c.index.addIndexers(newIndexers) +} + +func (c *threadSafeMap) Resync() error { + // Nothing to do + return nil +} + +// NewThreadSafeStore creates a new instance of ThreadSafeStore. +func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { + return &threadSafeMap{ + items: map[string]interface{}{}, + index: &storeIndex{ + indexers: indexers, + indices: indices, + }, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go new file mode 100644 index 000000000..220845dd3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -0,0 +1,89 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// UndeltaStore listens to incremental updates and sends complete state on every change. +// It implements the Store interface so that it can receive a stream of mirrored objects +// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change +// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc. +// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results +// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values. +// PushFunc should be thread safe. +type UndeltaStore struct { + Store + PushFunc func([]interface{}) +} + +// Assert that it implements the Store interface. +var _ Store = &UndeltaStore{} + +// Add inserts an object into the store and sends complete state by calling PushFunc. +// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. +// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} +// and the List. So, the following can happen, resulting in two identical calls to PushFunc. +// time thread 1 thread 2 +// 0 UndeltaStore.Add(a) +// 1 UndeltaStore.Add(b) +// 2 Store.Add(a) +// 3 Store.Add(b) +// 4 Store.List() -> [a,b] +// 5 Store.List() -> [a,b] +func (u *UndeltaStore) Add(obj interface{}) error { + if err := u.Store.Add(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc. +func (u *UndeltaStore) Update(obj interface{}) error { + if err := u.Store.Update(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Delete removes an item from the cache and sends complete state by calling PushFunc. +func (u *UndeltaStore) Delete(obj interface{}) error { + if err := u.Store.Delete(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Replace will delete the contents of current store, using instead the given list. +// 'u' takes ownership of the list, you should not reference the list again +// after calling this function. +// The new contents complete state will be sent by calling PushFunc after replacement. +func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { + if err := u.Store.Replace(list, resourceVersion); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// NewUndeltaStore returns an UndeltaStore implemented with a Store. +func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore { + return &UndeltaStore{ + Store: NewStore(keyFunc), + PushFunc: pushFunc, + } +} diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go new file mode 100644 index 000000000..3c77cc37f --- /dev/null +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -0,0 +1,289 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pager + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +const defaultPageSize = 500 +const defaultPageBufferSize = 10 + +// ListPageFunc returns a list object for the given list options. +type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) + +// SimplePageFunc adapts a context-less list function into one that accepts a context. +func SimplePageFunc(fn func(opts metav1.ListOptions) (runtime.Object, error)) ListPageFunc { + return func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + return fn(opts) + } +} + +// ListPager assists client code in breaking large list queries into multiple +// smaller chunks of PageSize or smaller. PageFn is expected to accept a +// metav1.ListOptions that supports paging and return a list. The pager does +// not alter the field or label selectors on the initial options list. +type ListPager struct { + PageSize int64 + PageFn ListPageFunc + + FullListIfExpired bool + + // Number of pages to buffer + PageBufferSize int32 +} + +// New creates a new pager from the provided pager function using the default +// options. It will fall back to a full list if an expiration error is encountered +// as a last resort. +func New(fn ListPageFunc) *ListPager { + return &ListPager{ + PageSize: defaultPageSize, + PageFn: fn, + FullListIfExpired: true, + PageBufferSize: defaultPageBufferSize, + } +} + +// TODO: introduce other types of paging functions - such as those that retrieve from a list +// of namespaces. + +// List returns a single list object, but attempts to retrieve smaller chunks from the +// server to reduce the impact on the server. If the chunk attempt fails, it will load +// the full list instead. The Limit field on options, if unset, will default to the page size. +// +// If items in the returned list are retained for different durations, and you want to avoid +// retaining the whole slice returned by p.PageFn as long as any item is referenced, +// use ListWithAlloc instead. +func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + return p.list(ctx, options, false) +} + +// ListWithAlloc works like List, but avoids retaining references to the items slice returned by p.PageFn. +// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn. +// +// If the items in the returned list are not retained, or are retained for the same duration, use List instead for memory efficiency. +func (p *ListPager) ListWithAlloc(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + return p.list(ctx, options, true) +} + +func (p *ListPager) list(ctx context.Context, options metav1.ListOptions, allocNew bool) (runtime.Object, bool, error) { + if options.Limit == 0 { + options.Limit = p.PageSize + } + requestedResourceVersion := options.ResourceVersion + requestedResourceVersionMatch := options.ResourceVersionMatch + var list *metainternalversion.List + paginatedResult := false + + for { + select { + case <-ctx.Done(): + return nil, paginatedResult, ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + // Only fallback to full list if an "Expired" errors is returned, FullListIfExpired is true, and + // the "Expired" error occurred in page 2 or later (since full list is intended to prevent a pager.List from + // failing when the resource versions is established by the first page request falls out of the compaction + // during the subsequent list requests). + if !errors.IsResourceExpired(err) || !p.FullListIfExpired || options.Continue == "" { + return nil, paginatedResult, err + } + // the list expired while we were processing, fall back to a full list at + // the requested ResourceVersion. + options.Limit = 0 + options.Continue = "" + options.ResourceVersion = requestedResourceVersion + options.ResourceVersionMatch = requestedResourceVersionMatch + result, err := p.PageFn(ctx, options) + return result, paginatedResult, err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return nil, paginatedResult, fmt.Errorf("returned object must be a list: %v", err) + } + + // exit early and return the object we got if we haven't processed any pages + if len(m.GetContinue()) == 0 && list == nil { + return obj, paginatedResult, nil + } + + // initialize the list and fill its contents + if list == nil { + list = &metainternalversion.List{Items: make([]runtime.Object, 0, options.Limit+1)} + list.ResourceVersion = m.GetResourceVersion() + list.SelfLink = m.GetSelfLink() + } + eachListItemFunc := meta.EachListItem + if allocNew { + eachListItemFunc = meta.EachListItemWithAlloc + } + if err := eachListItemFunc(obj, func(obj runtime.Object) error { + list.Items = append(list.Items, obj) + return nil + }); err != nil { + return nil, paginatedResult, err + } + + // if we have no more items, return the list + if len(m.GetContinue()) == 0 { + return list, paginatedResult, nil + } + + // set the next loop up + options.Continue = m.GetContinue() + // Clear the ResourceVersion(Match) on the subsequent List calls to avoid the + // `specifying resource version is not allowed when using continue` error. + // See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143. + options.ResourceVersion = "" + options.ResourceVersionMatch = "" + // At this point, result is already paginated. + paginatedResult = true + } +} + +// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If +// fn returns an error, processing stops and that error is returned. If fn does not return an error, +// any error encountered while retrieving the list from the server is returned. If the context +// cancels or times out, the context error is returned. Since the list is retrieved in paginated +// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list +// requests exceed the expiration limit of the apiserver being called. +// +// Items are retrieved in chunks from the server to reduce the impact on the server with up to +// ListPager.PageBufferSize chunks buffered concurrently in the background. +// +// If items passed to fn are retained for different durations, and you want to avoid +// retaining the whole slice returned by p.PageFn as long as any item is referenced, +// use EachListItemWithAlloc instead. +func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItem(obj, fn) + }) +} + +// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice returned by p.PageFn. +// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn. +// +// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. +func (p *ListPager) EachListItemWithAlloc(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItemWithAlloc(obj, fn) + }) +} + +// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on +// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does +// not return an error, any error encountered while retrieving the list from the server is +// returned. If the context cancels or times out, the context error is returned. Since the list is +// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if +// the pagination list requests exceed the expiration limit of the apiserver being called. +// +// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background. +func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if p.PageBufferSize < 0 { + return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize) + } + + // Ensure background goroutine is stopped if this call exits before all list items are + // processed. Cancelation error from this deferred cancel call is never returned to caller; + // either the list result has already been sent to bgResultC or the fn error is returned and + // the cancelation error is discarded. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + chunkC := make(chan runtime.Object, p.PageBufferSize) + bgResultC := make(chan error, 1) + go func() { + defer utilruntime.HandleCrash() + + var err error + defer func() { + close(chunkC) + bgResultC <- err + }() + err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error { + select { + case chunkC <- chunk: // buffer the chunk, this can block + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }() + + for o := range chunkC { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + err := fn(o) + if err != nil { + return err // any fn error should be returned immediately + } + } + // promote the results of our background goroutine to the foreground + return <-bgResultC +} + +// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list +// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return +// an error, any error encountered while retrieving the list from the server is returned. If the +// context cancels or times out, the context error is returned. Since the list is retrieved in +// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the +// pagination list requests exceed the expiration limit of the apiserver being called. +func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if options.Limit == 0 { + options.Limit = p.PageSize + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + return err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return fmt.Errorf("returned object must be a list: %v", err) + } + if err := fn(obj); err != nil { + return err + } + // if we have no more items, return. + if len(m.GetContinue()) == 0 { + return nil + } + // set the next loop up + options.Continue = m.GetContinue() + } +} diff --git a/vendor/k8s.io/client-go/tools/watch/informerwatcher.go b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go new file mode 100644 index 000000000..5e6aad5cf --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +func newEventProcessor(out chan<- watch.Event) *eventProcessor { + return &eventProcessor{ + out: out, + cond: sync.NewCond(&sync.Mutex{}), + done: make(chan struct{}), + } +} + +// eventProcessor buffers events and writes them to an out chan when a reader +// is waiting. Because of the requirement to buffer events, it synchronizes +// input with a condition, and synchronizes output with a channels. It needs to +// be able to yield while both waiting on an input condition and while blocked +// on writing to the output channel. +type eventProcessor struct { + out chan<- watch.Event + + cond *sync.Cond + buff []watch.Event + + done chan struct{} +} + +func (e *eventProcessor) run() { + for { + batch := e.takeBatch() + e.writeBatch(batch) + if e.stopped() { + return + } + } +} + +func (e *eventProcessor) takeBatch() []watch.Event { + e.cond.L.Lock() + defer e.cond.L.Unlock() + + for len(e.buff) == 0 && !e.stopped() { + e.cond.Wait() + } + + batch := e.buff + e.buff = nil + return batch +} + +func (e *eventProcessor) writeBatch(events []watch.Event) { + for _, event := range events { + select { + case e.out <- event: + case <-e.done: + return + } + } +} + +func (e *eventProcessor) push(event watch.Event) { + e.cond.L.Lock() + defer e.cond.L.Unlock() + defer e.cond.Signal() + e.buff = append(e.buff, event) +} + +func (e *eventProcessor) stopped() bool { + select { + case <-e.done: + return true + default: + return false + } +} + +func (e *eventProcessor) stop() { + close(e.done) + e.cond.Signal() +} + +// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface +// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method. +// it also returns a channel you can use to wait for the informers to fully shutdown. +func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface, <-chan struct{}) { + ch := make(chan watch.Event) + w := watch.NewProxyWatcher(ch) + e := newEventProcessor(ch) + + indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + e.push(watch.Event{ + Type: watch.Added, + Object: obj.(runtime.Object), + }) + }, + UpdateFunc: func(old, new interface{}) { + e.push(watch.Event{ + Type: watch.Modified, + Object: new.(runtime.Object), + }) + }, + DeleteFunc: func(obj interface{}) { + staleObj, stale := obj.(cache.DeletedFinalStateUnknown) + if stale { + // We have no means of passing the additional information down using + // watch API based on watch.Event but the caller can filter such + // objects by checking if metadata.deletionTimestamp is set + obj = staleObj.Obj + } + + e.push(watch.Event{ + Type: watch.Deleted, + Object: obj.(runtime.Object), + }) + }, + }, cache.Indexers{}) + + go e.run() + + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer e.stop() + informer.Run(w.StopChan()) + }() + + return indexer, informer, w, doneCh +} diff --git a/vendor/k8s.io/client-go/tools/watch/retrywatcher.go b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go new file mode 100644 index 000000000..d81dc4357 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go @@ -0,0 +1,295 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/dump" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +// resourceVersionGetter is an interface used to get resource version from events. +// We can't reuse an interface from meta otherwise it would be a cyclic dependency and we need just this one method +type resourceVersionGetter interface { + GetResourceVersion() string +} + +// RetryWatcher will make sure that in case the underlying watcher is closed (e.g. due to API timeout or etcd timeout) +// it will get restarted from the last point without the consumer even knowing about it. +// RetryWatcher does that by inspecting events and keeping track of resourceVersion. +// Especially useful when using watch.UntilWithoutRetry where premature termination is causing issues and flakes. +// Please note that this is not resilient to etcd cache not having the resource version anymore - you would need to +// use Informers for that. +type RetryWatcher struct { + lastResourceVersion string + watcherClient cache.Watcher + resultChan chan watch.Event + stopChan chan struct{} + doneChan chan struct{} + minRestartDelay time.Duration +} + +// NewRetryWatcher creates a new RetryWatcher. +// It will make sure that watches gets restarted in case of recoverable errors. +// The initialResourceVersion will be given to watch method when first called. +func NewRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher) (*RetryWatcher, error) { + return newRetryWatcher(initialResourceVersion, watcherClient, 1*time.Second) +} + +func newRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher, minRestartDelay time.Duration) (*RetryWatcher, error) { + switch initialResourceVersion { + case "", "0": + // TODO: revisit this if we ever get WATCH v2 where it means start "now" + // without doing the synthetic list of objects at the beginning (see #74022) + return nil, fmt.Errorf("initial RV %q is not supported due to issues with underlying WATCH", initialResourceVersion) + default: + break + } + + rw := &RetryWatcher{ + lastResourceVersion: initialResourceVersion, + watcherClient: watcherClient, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + resultChan: make(chan watch.Event, 0), + minRestartDelay: minRestartDelay, + } + + go rw.receive() + return rw, nil +} + +func (rw *RetryWatcher) send(event watch.Event) bool { + // Writing to an unbuffered channel is blocking operation + // and we need to check if stop wasn't requested while doing so. + select { + case rw.resultChan <- event: + return true + case <-rw.stopChan: + return false + } +} + +// doReceive returns true when it is done, false otherwise. +// If it is not done the second return value holds the time to wait before calling it again. +func (rw *RetryWatcher) doReceive() (bool, time.Duration) { + watcher, err := rw.watcherClient.Watch(metav1.ListOptions{ + ResourceVersion: rw.lastResourceVersion, + AllowWatchBookmarks: true, + }) + // We are very unlikely to hit EOF here since we are just establishing the call, + // but it may happen that the apiserver is just shutting down (e.g. being restarted) + // This is consistent with how it is handled for informers + switch err { + case nil: + break + + case io.EOF: + // watch closed normally + return false, 0 + + case io.ErrUnexpectedEOF: + klog.V(1).InfoS("Watch closed with unexpected EOF", "err", err) + return false, 0 + + default: + msg := "Watch failed" + if net.IsProbableEOF(err) || net.IsTimeout(err) { + klog.V(5).InfoS(msg, "err", err) + // Retry + return false, 0 + } + + klog.ErrorS(err, msg) + // Retry + return false, 0 + } + + if watcher == nil { + klog.ErrorS(nil, "Watch returned nil watcher") + // Retry + return false, 0 + } + + ch := watcher.ResultChan() + defer watcher.Stop() + + for { + select { + case <-rw.stopChan: + klog.V(4).InfoS("Stopping RetryWatcher.") + return true, 0 + case event, ok := <-ch: + if !ok { + klog.V(4).InfoS("Failed to get event! Re-creating the watcher.", "resourceVersion", rw.lastResourceVersion) + return false, 0 + } + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark: + metaObject, ok := event.Object.(resourceVersionGetter) + if !ok { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(errors.New("retryWatcher: doesn't support resourceVersion")).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + resourceVersion := metaObject.GetResourceVersion() + if resourceVersion == "" { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher: object %#v doesn't support resourceVersion", event.Object)).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + // All is fine; send the non-bookmark events and update resource version. + if event.Type != watch.Bookmark { + ok = rw.send(event) + if !ok { + return true, 0 + } + } + rw.lastResourceVersion = resourceVersion + + continue + + case watch.Error: + // This round trip allows us to handle unstructured status + errObject := apierrors.FromObject(event.Object) + statusErr, ok := errObject.(*apierrors.StatusError) + if !ok { + klog.Error(fmt.Sprintf("Received an error which is not *metav1.Status but %s", dump.Pretty(event.Object))) + // Retry unknown errors + return false, 0 + } + + status := statusErr.ErrStatus + + statusDelay := time.Duration(0) + if status.Details != nil { + statusDelay = time.Duration(status.Details.RetryAfterSeconds) * time.Second + } + + switch status.Code { + case http.StatusGone: + // Never retry RV too old errors + _ = rw.send(event) + return true, 0 + + case http.StatusGatewayTimeout, http.StatusInternalServerError: + // Retry + return false, statusDelay + + default: + // We retry by default. RetryWatcher is meant to proceed unless it is certain + // that it can't. If we are not certain, we proceed with retry and leave it + // up to the user to timeout if needed. + + // Log here so we have a record of hitting the unexpected error + // and we can whitelist some error codes if we missed any that are expected. + klog.V(5).Info(fmt.Sprintf("Retrying after unexpected error: %s", dump.Pretty(event.Object))) + + // Retry + return false, statusDelay + } + + default: + klog.Errorf("Failed to recognize Event type %q", event.Type) + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher failed to recognize Event type %q", event.Type)).ErrStatus, + }) + // We are unable to restart the watch and have to stop the loop or this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + } + } +} + +// receive reads the result from a watcher, restarting it if necessary. +func (rw *RetryWatcher) receive() { + defer close(rw.doneChan) + defer close(rw.resultChan) + + klog.V(4).Info("Starting RetryWatcher.") + defer klog.V(4).Info("Stopping RetryWatcher.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-rw.stopChan: + cancel() + return + case <-ctx.Done(): + return + } + }() + + // We use non sliding until so we don't introduce delays on happy path when WATCH call + // timeouts or gets closed and we need to reestablish it while also avoiding hot loops. + wait.NonSlidingUntilWithContext(ctx, func(ctx context.Context) { + done, retryAfter := rw.doReceive() + if done { + cancel() + return + } + + timer := time.NewTimer(retryAfter) + select { + case <-ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + + klog.V(4).Infof("Restarting RetryWatcher at RV=%q", rw.lastResourceVersion) + }, rw.minRestartDelay) +} + +// ResultChan implements Interface. +func (rw *RetryWatcher) ResultChan() <-chan watch.Event { + return rw.resultChan +} + +// Stop implements Interface. +func (rw *RetryWatcher) Stop() { + close(rw.stopChan) +} + +// Done allows the caller to be notified when Retry watcher stops. +func (rw *RetryWatcher) Done() <-chan struct{} { + return rw.doneChan +} diff --git a/vendor/k8s.io/client-go/tools/watch/until.go b/vendor/k8s.io/client-go/tools/watch/until.go new file mode 100644 index 000000000..a2474556b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/until.go @@ -0,0 +1,168 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition failed or detected an error state. +type PreconditionFunc func(store cache.Store) (bool, error) + +// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition cannot be checked and should terminate. In general, it is better to define +// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed +// from false to true). +type ConditionFunc func(event watch.Event) (bool, error) + +// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry. +var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout") + +// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch +// encountered. The first condition that returns an error terminates the watch (and the event is also returned). +// If no event has been received, the returned event will be nil. +// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. +// Waits until context deadline or until context is canceled. +// +// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!! +// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error. +// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below, +// Warning: solving such issues. +// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone. +func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) { + ch := watcher.ResultChan() + defer watcher.Stop() + var lastEvent *watch.Event + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + continue + } + } + ConditionSucceeded: + for { + select { + case event, ok := <-ch: + if !ok { + return lastEvent, ErrWatchClosed + } + lastEvent = &event + + done, err := condition(event) + if err != nil { + return lastEvent, err + } + if done { + break ConditionSucceeded + } + + case <-ctx.Done(): + return lastEvent, wait.ErrWaitTimeout + } + } + } + return lastEvent, nil +} + +// Until wraps the watcherClient's watch function with RetryWatcher making sure that watcher gets restarted in case of errors. +// The initialResourceVersion will be given to watch method when first called. It shall not be "" or "0" +// given the underlying WATCH call issues (#74022). +// Remaining behaviour is identical to function UntilWithoutRetry. (See above.) +// Until can deal with API timeouts and lost connections. +// It guarantees you to see all events and in the order they happened. +// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case. +// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing +// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// +// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges"). +func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) { + w, err := NewRetryWatcher(initialResourceVersion, watcherClient) + if err != nil { + return nil, err + } + + return UntilWithoutRetry(ctx, w, conditions...) +} + +// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced, +// and watches the output until each provided condition succeeds, in a way that is identical +// to function UntilWithoutRetry. (See above.) +// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'. +// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will +// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple +// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will +// re-list to recover and you always get an event, if there has been a change, after recovery. +// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for +// particular object, not between more of them even it's the same resource. +// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like: +// waiting for object reaching a state, "small" controllers, ... +func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) { + indexer, informer, watcher, done := NewIndexerInformerWatcher(lw, objType) + // We need to wait for the internal informers to fully stop so it's easier to reason about + // and it works with non-thread safe clients. + defer func() { <-done }() + // Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and + // let UntilWithoutRetry to stop it + defer watcher.Stop() + + if precondition != nil { + if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %w", ctx.Err()) + } + + done, err := precondition(indexer) + if err != nil { + return nil, err + } + + if done { + return nil, nil + } + } + + return UntilWithoutRetry(ctx, watcher, conditions...) +} + +// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration. +func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if timeout < 0 { + // This should be handled in validation + klog.Errorf("Timeout for context shall not be negative!") + timeout = 0 + } + + if timeout == 0 { + return context.WithCancel(parent) + } + + return context.WithTimeout(parent, timeout) +} diff --git a/vendor/k8s.io/client-go/util/jsonpath/doc.go b/vendor/k8s.io/client-go/util/jsonpath/doc.go new file mode 100644 index 000000000..0effb15c4 --- /dev/null +++ b/vendor/k8s.io/client-go/util/jsonpath/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package jsonpath is a template engine using jsonpath syntax, +// which can be seen at http://goessner.net/articles/JsonPath/. +// In addition, it has {range} {end} function to iterate list and slice. +package jsonpath // import "k8s.io/client-go/util/jsonpath" diff --git a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go new file mode 100644 index 000000000..86a3d6dde --- /dev/null +++ b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go @@ -0,0 +1,582 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + + "k8s.io/client-go/third_party/forked/golang/template" +) + +type JSONPath struct { + name string + parser *Parser + beginRange int + inRange int + endRange int + + lastEndNode *Node + + allowMissingKeys bool + outputJSON bool +} + +// New creates a new JSONPath with the given name. +func New(name string) *JSONPath { + return &JSONPath{ + name: name, + beginRange: 0, + inRange: 0, + endRange: 0, + } +} + +// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key +// cannot be located, or simply an empty result. The receiver is returned for chaining. +func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath { + j.allowMissingKeys = allow + return j +} + +// Parse parses the given template and returns an error. +func (j *JSONPath) Parse(text string) error { + var err error + j.parser, err = Parse(j.name, text) + return err +} + +// Execute bounds data into template and writes the result. +func (j *JSONPath) Execute(wr io.Writer, data interface{}) error { + fullResults, err := j.FindResults(data) + if err != nil { + return err + } + for ix := range fullResults { + if err := j.PrintResults(wr, fullResults[ix]); err != nil { + return err + } + } + return nil +} + +func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { + if j.parser == nil { + return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name) + } + + cur := []reflect.Value{reflect.ValueOf(data)} + nodes := j.parser.Root.Nodes + fullResult := [][]reflect.Value{} + for i := 0; i < len(nodes); i++ { + node := nodes[i] + results, err := j.walk(cur, node) + if err != nil { + return nil, err + } + + // encounter an end node, break the current block + if j.endRange > 0 && j.endRange <= j.inRange { + j.endRange-- + j.lastEndNode = &nodes[i] + break + } + // encounter a range node, start a range loop + if j.beginRange > 0 { + j.beginRange-- + j.inRange++ + if len(results) > 0 { + for _, value := range results { + j.parser.Root.Nodes = nodes[i+1:] + nextResults, err := j.FindResults(value.Interface()) + if err != nil { + return nil, err + } + fullResult = append(fullResult, nextResults...) + } + } else { + // If the range has no results, we still need to process the nodes within the range + // so the position will advance to the end node + j.parser.Root.Nodes = nodes[i+1:] + _, err := j.FindResults(nil) + if err != nil { + return nil, err + } + } + j.inRange-- + + // Fast forward to resume processing after the most recent end node that was encountered + for k := i + 1; k < len(nodes); k++ { + if &nodes[k] == j.lastEndNode { + i = k + break + } + } + continue + } + fullResult = append(fullResult, results) + } + return fullResult, nil +} + +// EnableJSONOutput changes the PrintResults behavior to return a JSON array of results +func (j *JSONPath) EnableJSONOutput(v bool) { + j.outputJSON = v +} + +// PrintResults writes the results into writer +func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error { + if j.outputJSON { + // convert the []reflect.Value to something that json + // will be able to marshal + r := make([]interface{}, 0, len(results)) + for i := range results { + r = append(r, results[i].Interface()) + } + results = []reflect.Value{reflect.ValueOf(r)} + } + for i, r := range results { + var text []byte + var err error + outputJSON := true + kind := r.Kind() + if kind == reflect.Interface { + kind = r.Elem().Kind() + } + switch kind { + case reflect.Map: + case reflect.Array: + case reflect.Slice: + case reflect.Struct: + default: + outputJSON = false + } + switch { + case outputJSON || j.outputJSON: + if j.outputJSON { + text, err = json.MarshalIndent(r.Interface(), "", " ") + text = append(text, '\n') + } else { + text, err = json.Marshal(r.Interface()) + } + default: + text, err = j.evalToText(r) + } + if err != nil { + return err + } + if i != len(results)-1 { + text = append(text, ' ') + } + if _, err = wr.Write(text); err != nil { + return err + } + } + + return nil + +} + +// walk visits tree rooted at the given node in DFS order +func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) { + switch node := node.(type) { + case *ListNode: + return j.evalList(value, node) + case *TextNode: + return []reflect.Value{reflect.ValueOf(node.Text)}, nil + case *FieldNode: + return j.evalField(value, node) + case *ArrayNode: + return j.evalArray(value, node) + case *FilterNode: + return j.evalFilter(value, node) + case *IntNode: + return j.evalInt(value, node) + case *BoolNode: + return j.evalBool(value, node) + case *FloatNode: + return j.evalFloat(value, node) + case *WildcardNode: + return j.evalWildcard(value, node) + case *RecursiveNode: + return j.evalRecursive(value, node) + case *UnionNode: + return j.evalUnion(value, node) + case *IdentifierNode: + return j.evalIdentifier(value, node) + default: + return value, fmt.Errorf("unexpected Node %v", node) + } +} + +// evalInt evaluates IntNode +func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalFloat evaluates FloatNode +func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalBool evaluates BoolNode +func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalList evaluates ListNode +func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) { + var err error + curValue := value + for _, node := range node.Nodes { + curValue, err = j.walk(curValue, node) + if err != nil { + return curValue, err + } + } + return curValue, nil +} + +// evalIdentifier evaluates IdentifierNode +func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) { + results := []reflect.Value{} + switch node.Name { + case "range": + j.beginRange++ + results = input + case "end": + if j.inRange > 0 { + j.endRange++ + } else { + return results, fmt.Errorf("not in range, nothing to end") + } + default: + return input, fmt.Errorf("unrecognized identifier %v", node.Name) + } + return results, nil +} + +// evalArray evaluates ArrayNode +func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, value := range input { + + value, isNil := template.Indirect(value) + if isNil { + continue + } + if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { + return input, fmt.Errorf("%v is not array or slice", value.Type()) + } + params := node.Params + if !params[0].Known { + params[0].Value = 0 + } + if params[0].Value < 0 { + params[0].Value += value.Len() + } + if !params[1].Known { + params[1].Value = value.Len() + } + + if params[1].Value < 0 || (params[1].Value == 0 && params[1].Derived) { + params[1].Value += value.Len() + } + sliceLength := value.Len() + if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through. + if params[0].Value >= sliceLength || params[0].Value < 0 { + return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength) + } + if params[1].Value > sliceLength || params[1].Value < 0 { + return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength) + } + if params[0].Value > params[1].Value { + return input, fmt.Errorf("starting index %d is greater than ending index %d", params[0].Value, params[1].Value) + } + } else { + return result, nil + } + + value = value.Slice(params[0].Value, params[1].Value) + + step := 1 + if params[2].Known { + if params[2].Value <= 0 { + return input, fmt.Errorf("step must be > 0") + } + step = params[2].Value + } + for i := 0; i < value.Len(); i += step { + result = append(result, value.Index(i)) + } + } + return result, nil +} + +// evalUnion evaluates UnionNode +func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, listNode := range node.Nodes { + temp, err := j.evalList(input, listNode) + if err != nil { + return input, err + } + result = append(result, temp...) + } + return result, nil +} + +func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) { + t := value.Type() + var inlineValue *reflect.Value + for ix := 0; ix < t.NumField(); ix++ { + f := t.Field(ix) + jsonTag := f.Tag.Get("json") + parts := strings.Split(jsonTag, ",") + if len(parts) == 0 { + continue + } + if parts[0] == node.Value { + return value.Field(ix), nil + } + if len(parts[0]) == 0 { + val := value.Field(ix) + inlineValue = &val + } + } + if inlineValue != nil { + if inlineValue.Kind() == reflect.Struct { + // handle 'inline' + match, err := j.findFieldInValue(inlineValue, node) + if err != nil { + return reflect.Value{}, err + } + if match.IsValid() { + return match, nil + } + } + } + return value.FieldByName(node.Value), nil +} + +// evalField evaluates field of struct or key of map. +func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) { + results := []reflect.Value{} + // If there's no input, there's no output + if len(input) == 0 { + return results, nil + } + for _, value := range input { + var result reflect.Value + value, isNil := template.Indirect(value) + if isNil { + continue + } + + if value.Kind() == reflect.Struct { + var err error + if result, err = j.findFieldInValue(&value, node); err != nil { + return nil, err + } + } else if value.Kind() == reflect.Map { + mapKeyType := value.Type().Key() + nodeValue := reflect.ValueOf(node.Value) + // node value type must be convertible to map key type + if !nodeValue.Type().ConvertibleTo(mapKeyType) { + return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType) + } + result = value.MapIndex(nodeValue.Convert(mapKeyType)) + } + if result.IsValid() { + results = append(results, result) + } + } + if len(results) == 0 { + if j.allowMissingKeys { + return results, nil + } + return results, fmt.Errorf("%s is not found", node.Value) + } + return results, nil +} + +// evalWildcard extracts all contents of the given value +func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + value, isNil := template.Indirect(value) + if isNil { + continue + } + + kind := value.Kind() + if kind == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + results = append(results, value.Field(i)) + } + } else if kind == reflect.Map { + for _, key := range value.MapKeys() { + results = append(results, value.MapIndex(key)) + } + } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + for i := 0; i < value.Len(); i++ { + results = append(results, value.Index(i)) + } + } + } + return results, nil +} + +// evalRecursive visits the given value recursively and pushes all of them to result +func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, value := range input { + results := []reflect.Value{} + value, isNil := template.Indirect(value) + if isNil { + continue + } + + kind := value.Kind() + if kind == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + results = append(results, value.Field(i)) + } + } else if kind == reflect.Map { + for _, key := range value.MapKeys() { + results = append(results, value.MapIndex(key)) + } + } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + for i := 0; i < value.Len(); i++ { + results = append(results, value.Index(i)) + } + } + if len(results) != 0 { + result = append(result, value) + output, err := j.evalRecursive(results, node) + if err != nil { + return result, err + } + result = append(result, output...) + } + } + return result, nil +} + +// evalFilter filters array according to FilterNode +func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + value, _ = template.Indirect(value) + + if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { + return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value) + } + for i := 0; i < value.Len(); i++ { + temp := []reflect.Value{value.Index(i)} + lefts, err := j.evalList(temp, node.Left) + + //case exists + if node.Operator == "exists" { + if len(lefts) > 0 { + results = append(results, value.Index(i)) + } + continue + } + + if err != nil { + return input, err + } + + var left, right interface{} + switch { + case len(lefts) == 0: + continue + case len(lefts) > 1: + return input, fmt.Errorf("can only compare one element at a time") + } + left = lefts[0].Interface() + + rights, err := j.evalList(temp, node.Right) + if err != nil { + return input, err + } + switch { + case len(rights) == 0: + continue + case len(rights) > 1: + return input, fmt.Errorf("can only compare one element at a time") + } + right = rights[0].Interface() + + pass := false + switch node.Operator { + case "<": + pass, err = template.Less(left, right) + case ">": + pass, err = template.Greater(left, right) + case "==": + pass, err = template.Equal(left, right) + case "!=": + pass, err = template.NotEqual(left, right) + case "<=": + pass, err = template.LessEqual(left, right) + case ">=": + pass, err = template.GreaterEqual(left, right) + default: + return results, fmt.Errorf("unrecognized filter operator %s", node.Operator) + } + if err != nil { + return results, err + } + if pass { + results = append(results, value.Index(i)) + } + } + } + return results, nil +} + +// evalToText translates reflect value to corresponding text +func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { + iface, ok := template.PrintableValue(v) + if !ok { + return nil, fmt.Errorf("can't print type %s", v.Type()) + } + if iface == nil { + return []byte("null"), nil + } + var buffer bytes.Buffer + fmt.Fprint(&buffer, iface) + return buffer.Bytes(), nil +} diff --git a/vendor/k8s.io/client-go/util/jsonpath/node.go b/vendor/k8s.io/client-go/util/jsonpath/node.go new file mode 100644 index 000000000..83abe8b03 --- /dev/null +++ b/vendor/k8s.io/client-go/util/jsonpath/node.go @@ -0,0 +1,256 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import "fmt" + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Type returns itself and provides an easy default implementation +func (t NodeType) Type() NodeType { + return t +} + +func (t NodeType) String() string { + return NodeTypeName[t] +} + +const ( + NodeText NodeType = iota + NodeArray + NodeList + NodeField + NodeIdentifier + NodeFilter + NodeInt + NodeFloat + NodeWildcard + NodeRecursive + NodeUnion + NodeBool +) + +var NodeTypeName = map[NodeType]string{ + NodeText: "NodeText", + NodeArray: "NodeArray", + NodeList: "NodeList", + NodeField: "NodeField", + NodeIdentifier: "NodeIdentifier", + NodeFilter: "NodeFilter", + NodeInt: "NodeInt", + NodeFloat: "NodeFloat", + NodeWildcard: "NodeWildcard", + NodeRecursive: "NodeRecursive", + NodeUnion: "NodeUnion", + NodeBool: "NodeBool", +} + +type Node interface { + Type() NodeType + String() string +} + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Nodes []Node // The element nodes in lexical order. +} + +func newList() *ListNode { + return &ListNode{NodeType: NodeList} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) String() string { + return l.Type().String() +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Text string // The text; may span newlines. +} + +func newText(text string) *TextNode { + return &TextNode{NodeType: NodeText, Text: text} +} + +func (t *TextNode) String() string { + return fmt.Sprintf("%s: %s", t.Type(), t.Text) +} + +// FieldNode holds field of struct +type FieldNode struct { + NodeType + Value string +} + +func newField(value string) *FieldNode { + return &FieldNode{NodeType: NodeField, Value: value} +} + +func (f *FieldNode) String() string { + return fmt.Sprintf("%s: %s", f.Type(), f.Value) +} + +// IdentifierNode holds an identifier +type IdentifierNode struct { + NodeType + Name string +} + +func newIdentifier(value string) *IdentifierNode { + return &IdentifierNode{ + NodeType: NodeIdentifier, + Name: value, + } +} + +func (f *IdentifierNode) String() string { + return fmt.Sprintf("%s: %s", f.Type(), f.Name) +} + +// ParamsEntry holds param information for ArrayNode +type ParamsEntry struct { + Value int + Known bool // whether the value is known when parse it + Derived bool +} + +// ArrayNode holds start, end, step information for array index selection +type ArrayNode struct { + NodeType + Params [3]ParamsEntry // start, end, step +} + +func newArray(params [3]ParamsEntry) *ArrayNode { + return &ArrayNode{ + NodeType: NodeArray, + Params: params, + } +} + +func (a *ArrayNode) String() string { + return fmt.Sprintf("%s: %v", a.Type(), a.Params) +} + +// FilterNode holds operand and operator information for filter +type FilterNode struct { + NodeType + Left *ListNode + Right *ListNode + Operator string +} + +func newFilter(left, right *ListNode, operator string) *FilterNode { + return &FilterNode{ + NodeType: NodeFilter, + Left: left, + Right: right, + Operator: operator, + } +} + +func (f *FilterNode) String() string { + return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right) +} + +// IntNode holds integer value +type IntNode struct { + NodeType + Value int +} + +func newInt(num int) *IntNode { + return &IntNode{NodeType: NodeInt, Value: num} +} + +func (i *IntNode) String() string { + return fmt.Sprintf("%s: %d", i.Type(), i.Value) +} + +// FloatNode holds float value +type FloatNode struct { + NodeType + Value float64 +} + +func newFloat(num float64) *FloatNode { + return &FloatNode{NodeType: NodeFloat, Value: num} +} + +func (i *FloatNode) String() string { + return fmt.Sprintf("%s: %f", i.Type(), i.Value) +} + +// WildcardNode means a wildcard +type WildcardNode struct { + NodeType +} + +func newWildcard() *WildcardNode { + return &WildcardNode{NodeType: NodeWildcard} +} + +func (i *WildcardNode) String() string { + return i.Type().String() +} + +// RecursiveNode means a recursive descent operator +type RecursiveNode struct { + NodeType +} + +func newRecursive() *RecursiveNode { + return &RecursiveNode{NodeType: NodeRecursive} +} + +func (r *RecursiveNode) String() string { + return r.Type().String() +} + +// UnionNode is union of ListNode +type UnionNode struct { + NodeType + Nodes []*ListNode +} + +func newUnion(nodes []*ListNode) *UnionNode { + return &UnionNode{NodeType: NodeUnion, Nodes: nodes} +} + +func (u *UnionNode) String() string { + return u.Type().String() +} + +// BoolNode holds bool value +type BoolNode struct { + NodeType + Value bool +} + +func newBool(value bool) *BoolNode { + return &BoolNode{NodeType: NodeBool, Value: value} +} + +func (b *BoolNode) String() string { + return fmt.Sprintf("%s: %t", b.Type(), b.Value) +} diff --git a/vendor/k8s.io/client-go/util/jsonpath/parser.go b/vendor/k8s.io/client-go/util/jsonpath/parser.go new file mode 100644 index 000000000..40bab188d --- /dev/null +++ b/vendor/k8s.io/client-go/util/jsonpath/parser.go @@ -0,0 +1,527 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +const eof = -1 + +const ( + leftDelim = "{" + rightDelim = "}" +) + +type Parser struct { + Name string + Root *ListNode + input string + pos int + start int + width int +} + +var ( + ErrSyntax = errors.New("invalid syntax") + dictKeyRex = regexp.MustCompile(`^'([^']*)'$`) + sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:-?[\d]*)?$`) +) + +// Parse parsed the given text and return a node Parser. +// If an error is encountered, parsing stops and an empty +// Parser is returned with the error +func Parse(name, text string) (*Parser, error) { + p := NewParser(name) + err := p.Parse(text) + if err != nil { + p = nil + } + return p, err +} + +func NewParser(name string) *Parser { + return &Parser{ + Name: name, + } +} + +// parseAction parsed the expression inside delimiter +func parseAction(name, text string) (*Parser, error) { + p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim)) + // when error happens, p will be nil, so we need to return here + if err != nil { + return p, err + } + p.Root = p.Root.Nodes[0].(*ListNode) + return p, nil +} + +func (p *Parser) Parse(text string) error { + p.input = text + p.Root = newList() + p.pos = 0 + return p.parseText(p.Root) +} + +// consumeText return the parsed text since last cosumeText +func (p *Parser) consumeText() string { + value := p.input[p.start:p.pos] + p.start = p.pos + return value +} + +// next returns the next rune in the input. +func (p *Parser) next() rune { + if p.pos >= len(p.input) { + p.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(p.input[p.pos:]) + p.width = w + p.pos += p.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (p *Parser) peek() rune { + r := p.next() + p.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (p *Parser) backup() { + p.pos -= p.width +} + +func (p *Parser) parseText(cur *ListNode) error { + for { + if strings.HasPrefix(p.input[p.pos:], leftDelim) { + if p.pos > p.start { + cur.append(newText(p.consumeText())) + } + return p.parseLeftDelim(cur) + } + if p.next() == eof { + break + } + } + // Correctly reached EOF. + if p.pos > p.start { + cur.append(newText(p.consumeText())) + } + return nil +} + +// parseLeftDelim scans the left delimiter, which is known to be present. +func (p *Parser) parseLeftDelim(cur *ListNode) error { + p.pos += len(leftDelim) + p.consumeText() + newNode := newList() + cur.append(newNode) + cur = newNode + return p.parseInsideAction(cur) +} + +func (p *Parser) parseInsideAction(cur *ListNode) error { + prefixMap := map[string]func(*ListNode) error{ + rightDelim: p.parseRightDelim, + "[?(": p.parseFilter, + "..": p.parseRecursive, + } + for prefix, parseFunc := range prefixMap { + if strings.HasPrefix(p.input[p.pos:], prefix) { + return parseFunc(cur) + } + } + + switch r := p.next(); { + case r == eof || isEndOfLine(r): + return fmt.Errorf("unclosed action") + case r == ' ': + p.consumeText() + case r == '@' || r == '$': //the current object, just pass it + p.consumeText() + case r == '[': + return p.parseArray(cur) + case r == '"' || r == '\'': + return p.parseQuote(cur, r) + case r == '.': + return p.parseField(cur) + case r == '+' || r == '-' || unicode.IsDigit(r): + p.backup() + return p.parseNumber(cur) + case isAlphaNumeric(r): + p.backup() + return p.parseIdentifier(cur) + default: + return fmt.Errorf("unrecognized character in action: %#U", r) + } + return p.parseInsideAction(cur) +} + +// parseRightDelim scans the right delimiter, which is known to be present. +func (p *Parser) parseRightDelim(cur *ListNode) error { + p.pos += len(rightDelim) + p.consumeText() + return p.parseText(p.Root) +} + +// parseIdentifier scans build-in keywords, like "range" "end" +func (p *Parser) parseIdentifier(cur *ListNode) error { + var r rune + for { + r = p.next() + if isTerminator(r) { + p.backup() + break + } + } + value := p.consumeText() + + if isBool(value) { + v, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("can not parse bool '%s': %s", value, err.Error()) + } + + cur.append(newBool(v)) + } else { + cur.append(newIdentifier(value)) + } + + return p.parseInsideAction(cur) +} + +// parseRecursive scans the recursive descent operator .. +func (p *Parser) parseRecursive(cur *ListNode) error { + if lastIndex := len(cur.Nodes) - 1; lastIndex >= 0 && cur.Nodes[lastIndex].Type() == NodeRecursive { + return fmt.Errorf("invalid multiple recursive descent") + } + p.pos += len("..") + p.consumeText() + cur.append(newRecursive()) + if r := p.peek(); isAlphaNumeric(r) { + return p.parseField(cur) + } + return p.parseInsideAction(cur) +} + +// parseNumber scans number +func (p *Parser) parseNumber(cur *ListNode) error { + r := p.peek() + if r == '+' || r == '-' { + p.next() + } + for { + r = p.next() + if r != '.' && !unicode.IsDigit(r) { + p.backup() + break + } + } + value := p.consumeText() + i, err := strconv.Atoi(value) + if err == nil { + cur.append(newInt(i)) + return p.parseInsideAction(cur) + } + d, err := strconv.ParseFloat(value, 64) + if err == nil { + cur.append(newFloat(d)) + return p.parseInsideAction(cur) + } + return fmt.Errorf("cannot parse number %s", value) +} + +// parseArray scans array index selection +func (p *Parser) parseArray(cur *ListNode) error { +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated array") + case ']': + break Loop + } + } + text := p.consumeText() + text = text[1 : len(text)-1] + if text == "*" { + text = ":" + } + + //union operator + strs := strings.Split(text, ",") + if len(strs) > 1 { + union := []*ListNode{} + for _, str := range strs { + parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " "))) + if err != nil { + return err + } + union = append(union, parser.Root) + } + cur.append(newUnion(union)) + return p.parseInsideAction(cur) + } + + // dict key + value := dictKeyRex.FindStringSubmatch(text) + if value != nil { + parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) + if err != nil { + return err + } + for _, node := range parser.Root.Nodes { + cur.append(node) + } + return p.parseInsideAction(cur) + } + + //slice operator + value = sliceOperatorRex.FindStringSubmatch(text) + if value == nil { + return fmt.Errorf("invalid array index %s", text) + } + value = value[1:] + params := [3]ParamsEntry{} + for i := 0; i < 3; i++ { + if value[i] != "" { + if i > 0 { + value[i] = value[i][1:] + } + if i > 0 && value[i] == "" { + params[i].Known = false + } else { + var err error + params[i].Known = true + params[i].Value, err = strconv.Atoi(value[i]) + if err != nil { + return fmt.Errorf("array index %s is not a number", value[i]) + } + } + } else { + if i == 1 { + params[i].Known = true + params[i].Value = params[0].Value + 1 + params[i].Derived = true + } else { + params[i].Known = false + params[i].Value = 0 + } + } + } + cur.append(newArray(params)) + return p.parseInsideAction(cur) +} + +// parseFilter scans filter inside array selection +func (p *Parser) parseFilter(cur *ListNode) error { + p.pos += len("[?(") + p.consumeText() + begin := false + end := false + var pair rune + +Loop: + for { + r := p.next() + switch r { + case eof, '\n': + return fmt.Errorf("unterminated filter") + case '"', '\'': + if begin == false { + //save the paired rune + begin = true + pair = r + continue + } + //only add when met paired rune + if p.input[p.pos-2] != '\\' && r == pair { + end = true + } + case ')': + //in rightParser below quotes only appear zero or once + //and must be paired at the beginning and end + if begin == end { + break Loop + } + } + } + if p.next() != ']' { + return fmt.Errorf("unclosed array expect ]") + } + reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`) + text := p.consumeText() + text = text[:len(text)-2] + value := reg.FindStringSubmatch(text) + if value == nil { + parser, err := parseAction("text", text) + if err != nil { + return err + } + cur.append(newFilter(parser.Root, newList(), "exists")) + } else { + leftParser, err := parseAction("left", value[1]) + if err != nil { + return err + } + rightParser, err := parseAction("right", value[3]) + if err != nil { + return err + } + cur.append(newFilter(leftParser.Root, rightParser.Root, value[2])) + } + return p.parseInsideAction(cur) +} + +// parseQuote unquotes string inside double or single quote +func (p *Parser) parseQuote(cur *ListNode, end rune) error { +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated quoted string") + case end: + //if it's not escape break the Loop + if p.input[p.pos-2] != '\\' { + break Loop + } + } + } + value := p.consumeText() + s, err := UnquoteExtend(value) + if err != nil { + return fmt.Errorf("unquote string %s error %v", value, err) + } + cur.append(newText(s)) + return p.parseInsideAction(cur) +} + +// parseField scans a field until a terminator +func (p *Parser) parseField(cur *ListNode) error { + p.consumeText() + for p.advance() { + } + value := p.consumeText() + if value == "*" { + cur.append(newWildcard()) + } else { + cur.append(newField(strings.Replace(value, "\\", "", -1))) + } + return p.parseInsideAction(cur) +} + +// advance scans until next non-escaped terminator +func (p *Parser) advance() bool { + r := p.next() + if r == '\\' { + p.next() + } else if isTerminator(r) { + p.backup() + return false + } + return true +} + +// isTerminator reports whether the input is at valid termination character to appear after an identifier. +func isTerminator(r rune) bool { + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '[', ']', '$', '@', '{', '}': + return true + } + return false +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} + +// isBool reports whether s is a boolean value. +func isBool(s string) bool { + return s == "true" || s == "false" +} + +// UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string +func UnquoteExtend(s string) (string, error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' && quote != '\'' { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + return s, nil + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := strconv.UnquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + } + return string(buf), nil +} + +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} diff --git a/vendor/k8s.io/component-base/LICENSE b/vendor/k8s.io/component-base/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/component-base/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/component-base/version/OWNERS b/vendor/k8s.io/component-base/version/OWNERS new file mode 100644 index 000000000..08c7aea9c --- /dev/null +++ b/vendor/k8s.io/component-base/version/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Currently assigned this directory to sig-api-machinery since this is +# an interface to the version definition in "k8s.io/apimachinery/pkg/version", +# and also to sig-release since this version information is set up for +# each release. + +approvers: + - sig-api-machinery-api-approvers + - release-engineering-approvers +reviewers: + - sig-api-machinery-api-reviewers + - release-managers +labels: + - sig/api-machinery + - sig/release diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go new file mode 100644 index 000000000..b753b7d19 --- /dev/null +++ b/vendor/k8s.io/component-base/version/base.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags. It provides an approximation of the Kubernetes +// version for ad-hoc builds (e.g. `go build`) that cannot get the version +// information from git. +// +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. +// +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// component-base/version/base.go +var ( + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string // major version, always numeric + gitMinor string // minor version, numeric possibly followed by "+" + + // semantic version, derived by build scripts (see + // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion = "v0.0.0-master+$Format:%H$" + gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState = "" // state of git tree, either "clean" or "dirty" + + buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/k8s.io/component-base/version/dynamic.go b/vendor/k8s.io/component-base/version/dynamic.go new file mode 100644 index 000000000..46ade9f5e --- /dev/null +++ b/vendor/k8s.io/component-base/version/dynamic.go @@ -0,0 +1,77 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "sync/atomic" + + utilversion "k8s.io/apimachinery/pkg/util/version" +) + +var dynamicGitVersion atomic.Value + +func init() { + // initialize to static gitVersion + dynamicGitVersion.Store(gitVersion) +} + +// SetDynamicVersion overrides the version returned as the GitVersion from Get(). +// The specified version must be non-empty, a valid semantic version, and must +// match the major/minor/patch version of the default gitVersion. +func SetDynamicVersion(dynamicVersion string) error { + if err := ValidateDynamicVersion(dynamicVersion); err != nil { + return err + } + dynamicGitVersion.Store(dynamicVersion) + return nil +} + +// ValidateDynamicVersion ensures the given version is non-empty, a valid semantic version, +// and matched the major/minor/patch version of the default gitVersion. +func ValidateDynamicVersion(dynamicVersion string) error { + return validateDynamicVersion(dynamicVersion, gitVersion) +} + +func validateDynamicVersion(dynamicVersion, defaultVersion string) error { + if len(dynamicVersion) == 0 { + return fmt.Errorf("version must not be empty") + } + if dynamicVersion == defaultVersion { + // allow no-op + return nil + } + vRuntime, err := utilversion.ParseSemantic(dynamicVersion) + if err != nil { + return err + } + // must match major/minor/patch of default version + var vDefault *utilversion.Version + if defaultVersion == "v0.0.0-master+$Format:%H$" { + // special-case the placeholder value which doesn't parse as a semantic version + vDefault, err = utilversion.ParseSemantic("v0.0.0-master") + } else { + vDefault, err = utilversion.ParseSemantic(defaultVersion) + } + if err != nil { + return err + } + if vRuntime.Major() != vDefault.Major() || vRuntime.Minor() != vDefault.Minor() || vRuntime.Patch() != vDefault.Patch() { + return fmt.Errorf("version %q must match major/minor/patch of default version %q", dynamicVersion, defaultVersion) + } + return nil +} diff --git a/vendor/k8s.io/component-base/version/version.go b/vendor/k8s.io/component-base/version/version.go new file mode 100644 index 000000000..1d268d4c6 --- /dev/null +++ b/vendor/k8s.io/component-base/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in ./base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: dynamicGitVersion.Load().(string), + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/k8s.io/helm/pkg/chartutil/capabilities.go b/vendor/k8s.io/helm/pkg/chartutil/capabilities.go deleted file mode 100644 index a6808c702..000000000 --- a/vendor/k8s.io/helm/pkg/chartutil/capabilities.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -//go:generate go run generator/capabilities_default_versions_generate.go - -package chartutil - -import ( - "fmt" - "runtime" - - "k8s.io/apimachinery/pkg/version" - tversion "k8s.io/helm/pkg/proto/hapi/version" -) - -var ( - // DefaultVersionSet is the default version set in included in Kubernetes for workloads - // Default versions as of Kubernetes 1.14 - DefaultVersionSet = NewVersionSet(defaultVersions()...) - - // DefaultKubeVersion is the default kubernetes version - DefaultKubeVersion = &version.Info{ - Major: "1", - Minor: "14", - GitVersion: "v1.14.0", - GoVersion: runtime.Version(), - Compiler: runtime.Compiler, - Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), - } -) - -// Capabilities describes the capabilities of the Kubernetes cluster that Tiller is attached to. -type Capabilities struct { - // APIVersions list of all supported API versions - APIVersions VersionSet - // KubeVersion is the Kubernetes version - KubeVersion *version.Info - // TillerVersion is the Tiller version - // - // This always comes from pkg/version.GetVersionProto(). - TillerVersion *tversion.Version -} - -// VersionSet is a set of Kubernetes API versions. -type VersionSet map[string]interface{} - -// NewVersionSet creates a new version set from a list of strings. -func NewVersionSet(apiVersions ...string) VersionSet { - vs := VersionSet{} - for _, v := range apiVersions { - vs[v] = struct{}{} - } - return vs -} - -// Has returns true if the version string is in the set. -// -// vs.Has("extensions/v1beta1") -func (v VersionSet) Has(apiVersion string) bool { - _, ok := v[apiVersion] - return ok -} diff --git a/vendor/k8s.io/helm/pkg/chartutil/capabilities_versions_generated.go b/vendor/k8s.io/helm/pkg/chartutil/capabilities_versions_generated.go deleted file mode 100644 index 370ad216c..000000000 --- a/vendor/k8s.io/helm/pkg/chartutil/capabilities_versions_generated.go +++ /dev/null @@ -1,576 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by capabilities_default_versions_generate.go; DO NOT EDIT. - -package chartutil - -func defaultVersions() []string { - return []string{ - "__internal", - "__internal/WatchEvent", - "admissionregistration.k8s.io/__internal", - "admissionregistration.k8s.io/__internal/WatchEvent", - "admissionregistration.k8s.io/v1beta1", - "admissionregistration.k8s.io/v1beta1/CreateOptions", - "admissionregistration.k8s.io/v1beta1/DeleteOptions", - "admissionregistration.k8s.io/v1beta1/ExportOptions", - "admissionregistration.k8s.io/v1beta1/GetOptions", - "admissionregistration.k8s.io/v1beta1/ListOptions", - "admissionregistration.k8s.io/v1beta1/MutatingWebhookConfiguration", - "admissionregistration.k8s.io/v1beta1/MutatingWebhookConfigurationList", - "admissionregistration.k8s.io/v1beta1/PatchOptions", - "admissionregistration.k8s.io/v1beta1/UpdateOptions", - "admissionregistration.k8s.io/v1beta1/ValidatingWebhookConfiguration", - "admissionregistration.k8s.io/v1beta1/ValidatingWebhookConfigurationList", - "admissionregistration.k8s.io/v1beta1/WatchEvent", - "apps/__internal", - "apps/__internal/WatchEvent", - "apps/v1", - "apps/v1/ControllerRevision", - "apps/v1/ControllerRevisionList", - "apps/v1/CreateOptions", - "apps/v1/DaemonSet", - "apps/v1/DaemonSetList", - "apps/v1/DeleteOptions", - "apps/v1/Deployment", - "apps/v1/DeploymentList", - "apps/v1/ExportOptions", - "apps/v1/GetOptions", - "apps/v1/ListOptions", - "apps/v1/PatchOptions", - "apps/v1/ReplicaSet", - "apps/v1/ReplicaSetList", - "apps/v1/StatefulSet", - "apps/v1/StatefulSetList", - "apps/v1/UpdateOptions", - "apps/v1/WatchEvent", - "apps/v1beta1", - "apps/v1beta1/ControllerRevision", - "apps/v1beta1/ControllerRevisionList", - "apps/v1beta1/CreateOptions", - "apps/v1beta1/DeleteOptions", - "apps/v1beta1/Deployment", - "apps/v1beta1/DeploymentList", - "apps/v1beta1/DeploymentRollback", - "apps/v1beta1/ExportOptions", - "apps/v1beta1/GetOptions", - "apps/v1beta1/ListOptions", - "apps/v1beta1/PatchOptions", - "apps/v1beta1/Scale", - "apps/v1beta1/StatefulSet", - "apps/v1beta1/StatefulSetList", - "apps/v1beta1/UpdateOptions", - "apps/v1beta1/WatchEvent", - "apps/v1beta2", - "apps/v1beta2/ControllerRevision", - "apps/v1beta2/ControllerRevisionList", - "apps/v1beta2/CreateOptions", - "apps/v1beta2/DaemonSet", - "apps/v1beta2/DaemonSetList", - "apps/v1beta2/DeleteOptions", - "apps/v1beta2/Deployment", - "apps/v1beta2/DeploymentList", - "apps/v1beta2/ExportOptions", - "apps/v1beta2/GetOptions", - "apps/v1beta2/ListOptions", - "apps/v1beta2/PatchOptions", - "apps/v1beta2/ReplicaSet", - "apps/v1beta2/ReplicaSetList", - "apps/v1beta2/Scale", - "apps/v1beta2/StatefulSet", - "apps/v1beta2/StatefulSetList", - "apps/v1beta2/UpdateOptions", - "apps/v1beta2/WatchEvent", - "auditregistration.k8s.io/__internal", - "auditregistration.k8s.io/__internal/WatchEvent", - "auditregistration.k8s.io/v1alpha1", - "auditregistration.k8s.io/v1alpha1/AuditSink", - "auditregistration.k8s.io/v1alpha1/AuditSinkList", - "auditregistration.k8s.io/v1alpha1/CreateOptions", - "auditregistration.k8s.io/v1alpha1/DeleteOptions", - "auditregistration.k8s.io/v1alpha1/ExportOptions", - "auditregistration.k8s.io/v1alpha1/GetOptions", - "auditregistration.k8s.io/v1alpha1/ListOptions", - "auditregistration.k8s.io/v1alpha1/PatchOptions", - "auditregistration.k8s.io/v1alpha1/UpdateOptions", - "auditregistration.k8s.io/v1alpha1/WatchEvent", - "authentication.k8s.io/__internal", - "authentication.k8s.io/__internal/WatchEvent", - "authentication.k8s.io/v1", - "authentication.k8s.io/v1/CreateOptions", - "authentication.k8s.io/v1/DeleteOptions", - "authentication.k8s.io/v1/ExportOptions", - "authentication.k8s.io/v1/GetOptions", - "authentication.k8s.io/v1/ListOptions", - "authentication.k8s.io/v1/PatchOptions", - "authentication.k8s.io/v1/TokenRequest", - "authentication.k8s.io/v1/TokenReview", - "authentication.k8s.io/v1/UpdateOptions", - "authentication.k8s.io/v1/WatchEvent", - "authentication.k8s.io/v1beta1", - "authentication.k8s.io/v1beta1/CreateOptions", - "authentication.k8s.io/v1beta1/DeleteOptions", - "authentication.k8s.io/v1beta1/ExportOptions", - "authentication.k8s.io/v1beta1/GetOptions", - "authentication.k8s.io/v1beta1/ListOptions", - "authentication.k8s.io/v1beta1/PatchOptions", - "authentication.k8s.io/v1beta1/TokenReview", - "authentication.k8s.io/v1beta1/UpdateOptions", - "authentication.k8s.io/v1beta1/WatchEvent", - "authorization.k8s.io/__internal", - "authorization.k8s.io/__internal/WatchEvent", - "authorization.k8s.io/v1", - "authorization.k8s.io/v1/CreateOptions", - "authorization.k8s.io/v1/DeleteOptions", - "authorization.k8s.io/v1/ExportOptions", - "authorization.k8s.io/v1/GetOptions", - "authorization.k8s.io/v1/ListOptions", - "authorization.k8s.io/v1/LocalSubjectAccessReview", - "authorization.k8s.io/v1/PatchOptions", - "authorization.k8s.io/v1/SelfSubjectAccessReview", - "authorization.k8s.io/v1/SelfSubjectRulesReview", - "authorization.k8s.io/v1/SubjectAccessReview", - "authorization.k8s.io/v1/UpdateOptions", - "authorization.k8s.io/v1/WatchEvent", - "authorization.k8s.io/v1beta1", - "authorization.k8s.io/v1beta1/CreateOptions", - "authorization.k8s.io/v1beta1/DeleteOptions", - "authorization.k8s.io/v1beta1/ExportOptions", - "authorization.k8s.io/v1beta1/GetOptions", - "authorization.k8s.io/v1beta1/ListOptions", - "authorization.k8s.io/v1beta1/LocalSubjectAccessReview", - "authorization.k8s.io/v1beta1/PatchOptions", - "authorization.k8s.io/v1beta1/SelfSubjectAccessReview", - "authorization.k8s.io/v1beta1/SelfSubjectRulesReview", - "authorization.k8s.io/v1beta1/SubjectAccessReview", - "authorization.k8s.io/v1beta1/UpdateOptions", - "authorization.k8s.io/v1beta1/WatchEvent", - "autoscaling/__internal", - "autoscaling/__internal/WatchEvent", - "autoscaling/v1", - "autoscaling/v1/CreateOptions", - "autoscaling/v1/DeleteOptions", - "autoscaling/v1/ExportOptions", - "autoscaling/v1/GetOptions", - "autoscaling/v1/HorizontalPodAutoscaler", - "autoscaling/v1/HorizontalPodAutoscalerList", - "autoscaling/v1/ListOptions", - "autoscaling/v1/PatchOptions", - "autoscaling/v1/Scale", - "autoscaling/v1/UpdateOptions", - "autoscaling/v1/WatchEvent", - "autoscaling/v2beta1", - "autoscaling/v2beta1/CreateOptions", - "autoscaling/v2beta1/DeleteOptions", - "autoscaling/v2beta1/ExportOptions", - "autoscaling/v2beta1/GetOptions", - "autoscaling/v2beta1/HorizontalPodAutoscaler", - "autoscaling/v2beta1/HorizontalPodAutoscalerList", - "autoscaling/v2beta1/ListOptions", - "autoscaling/v2beta1/PatchOptions", - "autoscaling/v2beta1/UpdateOptions", - "autoscaling/v2beta1/WatchEvent", - "autoscaling/v2beta2", - "autoscaling/v2beta2/CreateOptions", - "autoscaling/v2beta2/DeleteOptions", - "autoscaling/v2beta2/ExportOptions", - "autoscaling/v2beta2/GetOptions", - "autoscaling/v2beta2/HorizontalPodAutoscaler", - "autoscaling/v2beta2/HorizontalPodAutoscalerList", - "autoscaling/v2beta2/ListOptions", - "autoscaling/v2beta2/PatchOptions", - "autoscaling/v2beta2/UpdateOptions", - "autoscaling/v2beta2/WatchEvent", - "batch/__internal", - "batch/__internal/WatchEvent", - "batch/v1", - "batch/v1/CreateOptions", - "batch/v1/DeleteOptions", - "batch/v1/ExportOptions", - "batch/v1/GetOptions", - "batch/v1/Job", - "batch/v1/JobList", - "batch/v1/ListOptions", - "batch/v1/PatchOptions", - "batch/v1/UpdateOptions", - "batch/v1/WatchEvent", - "batch/v1beta1", - "batch/v1beta1/CreateOptions", - "batch/v1beta1/CronJob", - "batch/v1beta1/CronJobList", - "batch/v1beta1/DeleteOptions", - "batch/v1beta1/ExportOptions", - "batch/v1beta1/GetOptions", - "batch/v1beta1/JobTemplate", - "batch/v1beta1/ListOptions", - "batch/v1beta1/PatchOptions", - "batch/v1beta1/UpdateOptions", - "batch/v1beta1/WatchEvent", - "batch/v2alpha1", - "batch/v2alpha1/CreateOptions", - "batch/v2alpha1/CronJob", - "batch/v2alpha1/CronJobList", - "batch/v2alpha1/DeleteOptions", - "batch/v2alpha1/ExportOptions", - "batch/v2alpha1/GetOptions", - "batch/v2alpha1/JobTemplate", - "batch/v2alpha1/ListOptions", - "batch/v2alpha1/PatchOptions", - "batch/v2alpha1/UpdateOptions", - "batch/v2alpha1/WatchEvent", - "certificates.k8s.io/__internal", - "certificates.k8s.io/__internal/WatchEvent", - "certificates.k8s.io/v1beta1", - "certificates.k8s.io/v1beta1/CertificateSigningRequest", - "certificates.k8s.io/v1beta1/CertificateSigningRequestList", - "certificates.k8s.io/v1beta1/CreateOptions", - "certificates.k8s.io/v1beta1/DeleteOptions", - "certificates.k8s.io/v1beta1/ExportOptions", - "certificates.k8s.io/v1beta1/GetOptions", - "certificates.k8s.io/v1beta1/ListOptions", - "certificates.k8s.io/v1beta1/PatchOptions", - "certificates.k8s.io/v1beta1/UpdateOptions", - "certificates.k8s.io/v1beta1/WatchEvent", - "coordination.k8s.io/__internal", - "coordination.k8s.io/__internal/WatchEvent", - "coordination.k8s.io/v1", - "coordination.k8s.io/v1/CreateOptions", - "coordination.k8s.io/v1/DeleteOptions", - "coordination.k8s.io/v1/ExportOptions", - "coordination.k8s.io/v1/GetOptions", - "coordination.k8s.io/v1/Lease", - "coordination.k8s.io/v1/LeaseList", - "coordination.k8s.io/v1/ListOptions", - "coordination.k8s.io/v1/PatchOptions", - "coordination.k8s.io/v1/UpdateOptions", - "coordination.k8s.io/v1/WatchEvent", - "coordination.k8s.io/v1beta1", - "coordination.k8s.io/v1beta1/CreateOptions", - "coordination.k8s.io/v1beta1/DeleteOptions", - "coordination.k8s.io/v1beta1/ExportOptions", - "coordination.k8s.io/v1beta1/GetOptions", - "coordination.k8s.io/v1beta1/Lease", - "coordination.k8s.io/v1beta1/LeaseList", - "coordination.k8s.io/v1beta1/ListOptions", - "coordination.k8s.io/v1beta1/PatchOptions", - "coordination.k8s.io/v1beta1/UpdateOptions", - "coordination.k8s.io/v1beta1/WatchEvent", - "events.k8s.io/__internal", - "events.k8s.io/__internal/WatchEvent", - "events.k8s.io/v1beta1", - "events.k8s.io/v1beta1/CreateOptions", - "events.k8s.io/v1beta1/DeleteOptions", - "events.k8s.io/v1beta1/Event", - "events.k8s.io/v1beta1/EventList", - "events.k8s.io/v1beta1/ExportOptions", - "events.k8s.io/v1beta1/GetOptions", - "events.k8s.io/v1beta1/ListOptions", - "events.k8s.io/v1beta1/PatchOptions", - "events.k8s.io/v1beta1/UpdateOptions", - "events.k8s.io/v1beta1/WatchEvent", - "extensions/__internal", - "extensions/__internal/WatchEvent", - "extensions/v1beta1", - "extensions/v1beta1/CreateOptions", - "extensions/v1beta1/DaemonSet", - "extensions/v1beta1/DaemonSetList", - "extensions/v1beta1/DeleteOptions", - "extensions/v1beta1/Deployment", - "extensions/v1beta1/DeploymentList", - "extensions/v1beta1/DeploymentRollback", - "extensions/v1beta1/ExportOptions", - "extensions/v1beta1/GetOptions", - "extensions/v1beta1/Ingress", - "extensions/v1beta1/IngressList", - "extensions/v1beta1/ListOptions", - "extensions/v1beta1/NetworkPolicy", - "extensions/v1beta1/NetworkPolicyList", - "extensions/v1beta1/PatchOptions", - "extensions/v1beta1/PodSecurityPolicy", - "extensions/v1beta1/PodSecurityPolicyList", - "extensions/v1beta1/ReplicaSet", - "extensions/v1beta1/ReplicaSetList", - "extensions/v1beta1/ReplicationControllerDummy", - "extensions/v1beta1/Scale", - "extensions/v1beta1/UpdateOptions", - "extensions/v1beta1/WatchEvent", - "networking.k8s.io/__internal", - "networking.k8s.io/__internal/WatchEvent", - "networking.k8s.io/v1", - "networking.k8s.io/v1/CreateOptions", - "networking.k8s.io/v1/DeleteOptions", - "networking.k8s.io/v1/ExportOptions", - "networking.k8s.io/v1/GetOptions", - "networking.k8s.io/v1/ListOptions", - "networking.k8s.io/v1/NetworkPolicy", - "networking.k8s.io/v1/NetworkPolicyList", - "networking.k8s.io/v1/PatchOptions", - "networking.k8s.io/v1/UpdateOptions", - "networking.k8s.io/v1/WatchEvent", - "networking.k8s.io/v1beta1", - "networking.k8s.io/v1beta1/CreateOptions", - "networking.k8s.io/v1beta1/DeleteOptions", - "networking.k8s.io/v1beta1/ExportOptions", - "networking.k8s.io/v1beta1/GetOptions", - "networking.k8s.io/v1beta1/Ingress", - "networking.k8s.io/v1beta1/IngressList", - "networking.k8s.io/v1beta1/ListOptions", - "networking.k8s.io/v1beta1/PatchOptions", - "networking.k8s.io/v1beta1/UpdateOptions", - "networking.k8s.io/v1beta1/WatchEvent", - "node.k8s.io/__internal", - "node.k8s.io/__internal/WatchEvent", - "node.k8s.io/v1alpha1", - "node.k8s.io/v1alpha1/CreateOptions", - "node.k8s.io/v1alpha1/DeleteOptions", - "node.k8s.io/v1alpha1/ExportOptions", - "node.k8s.io/v1alpha1/GetOptions", - "node.k8s.io/v1alpha1/ListOptions", - "node.k8s.io/v1alpha1/PatchOptions", - "node.k8s.io/v1alpha1/RuntimeClass", - "node.k8s.io/v1alpha1/RuntimeClassList", - "node.k8s.io/v1alpha1/UpdateOptions", - "node.k8s.io/v1alpha1/WatchEvent", - "node.k8s.io/v1beta1", - "node.k8s.io/v1beta1/CreateOptions", - "node.k8s.io/v1beta1/DeleteOptions", - "node.k8s.io/v1beta1/ExportOptions", - "node.k8s.io/v1beta1/GetOptions", - "node.k8s.io/v1beta1/ListOptions", - "node.k8s.io/v1beta1/PatchOptions", - "node.k8s.io/v1beta1/RuntimeClass", - "node.k8s.io/v1beta1/RuntimeClassList", - "node.k8s.io/v1beta1/UpdateOptions", - "node.k8s.io/v1beta1/WatchEvent", - "policy/__internal", - "policy/__internal/WatchEvent", - "policy/v1beta1", - "policy/v1beta1/CreateOptions", - "policy/v1beta1/DeleteOptions", - "policy/v1beta1/Eviction", - "policy/v1beta1/ExportOptions", - "policy/v1beta1/GetOptions", - "policy/v1beta1/ListOptions", - "policy/v1beta1/PatchOptions", - "policy/v1beta1/PodDisruptionBudget", - "policy/v1beta1/PodDisruptionBudgetList", - "policy/v1beta1/PodSecurityPolicy", - "policy/v1beta1/PodSecurityPolicyList", - "policy/v1beta1/UpdateOptions", - "policy/v1beta1/WatchEvent", - "rbac.authorization.k8s.io/__internal", - "rbac.authorization.k8s.io/__internal/WatchEvent", - "rbac.authorization.k8s.io/v1", - "rbac.authorization.k8s.io/v1/ClusterRole", - "rbac.authorization.k8s.io/v1/ClusterRoleBinding", - "rbac.authorization.k8s.io/v1/ClusterRoleBindingList", - "rbac.authorization.k8s.io/v1/ClusterRoleList", - "rbac.authorization.k8s.io/v1/CreateOptions", - "rbac.authorization.k8s.io/v1/DeleteOptions", - "rbac.authorization.k8s.io/v1/ExportOptions", - "rbac.authorization.k8s.io/v1/GetOptions", - "rbac.authorization.k8s.io/v1/ListOptions", - "rbac.authorization.k8s.io/v1/PatchOptions", - "rbac.authorization.k8s.io/v1/Role", - "rbac.authorization.k8s.io/v1/RoleBinding", - "rbac.authorization.k8s.io/v1/RoleBindingList", - "rbac.authorization.k8s.io/v1/RoleList", - "rbac.authorization.k8s.io/v1/UpdateOptions", - "rbac.authorization.k8s.io/v1/WatchEvent", - "rbac.authorization.k8s.io/v1alpha1", - "rbac.authorization.k8s.io/v1alpha1/ClusterRole", - "rbac.authorization.k8s.io/v1alpha1/ClusterRoleBinding", - "rbac.authorization.k8s.io/v1alpha1/ClusterRoleBindingList", - "rbac.authorization.k8s.io/v1alpha1/ClusterRoleList", - "rbac.authorization.k8s.io/v1alpha1/CreateOptions", - "rbac.authorization.k8s.io/v1alpha1/DeleteOptions", - "rbac.authorization.k8s.io/v1alpha1/ExportOptions", - "rbac.authorization.k8s.io/v1alpha1/GetOptions", - "rbac.authorization.k8s.io/v1alpha1/ListOptions", - "rbac.authorization.k8s.io/v1alpha1/PatchOptions", - "rbac.authorization.k8s.io/v1alpha1/Role", - "rbac.authorization.k8s.io/v1alpha1/RoleBinding", - "rbac.authorization.k8s.io/v1alpha1/RoleBindingList", - "rbac.authorization.k8s.io/v1alpha1/RoleList", - "rbac.authorization.k8s.io/v1alpha1/UpdateOptions", - "rbac.authorization.k8s.io/v1alpha1/WatchEvent", - "rbac.authorization.k8s.io/v1beta1", - "rbac.authorization.k8s.io/v1beta1/ClusterRole", - "rbac.authorization.k8s.io/v1beta1/ClusterRoleBinding", - "rbac.authorization.k8s.io/v1beta1/ClusterRoleBindingList", - "rbac.authorization.k8s.io/v1beta1/ClusterRoleList", - "rbac.authorization.k8s.io/v1beta1/CreateOptions", - "rbac.authorization.k8s.io/v1beta1/DeleteOptions", - "rbac.authorization.k8s.io/v1beta1/ExportOptions", - "rbac.authorization.k8s.io/v1beta1/GetOptions", - "rbac.authorization.k8s.io/v1beta1/ListOptions", - "rbac.authorization.k8s.io/v1beta1/PatchOptions", - "rbac.authorization.k8s.io/v1beta1/Role", - "rbac.authorization.k8s.io/v1beta1/RoleBinding", - "rbac.authorization.k8s.io/v1beta1/RoleBindingList", - "rbac.authorization.k8s.io/v1beta1/RoleList", - "rbac.authorization.k8s.io/v1beta1/UpdateOptions", - "rbac.authorization.k8s.io/v1beta1/WatchEvent", - "scheduling.k8s.io/__internal", - "scheduling.k8s.io/__internal/WatchEvent", - "scheduling.k8s.io/v1", - "scheduling.k8s.io/v1/CreateOptions", - "scheduling.k8s.io/v1/DeleteOptions", - "scheduling.k8s.io/v1/ExportOptions", - "scheduling.k8s.io/v1/GetOptions", - "scheduling.k8s.io/v1/ListOptions", - "scheduling.k8s.io/v1/PatchOptions", - "scheduling.k8s.io/v1/PriorityClass", - "scheduling.k8s.io/v1/PriorityClassList", - "scheduling.k8s.io/v1/UpdateOptions", - "scheduling.k8s.io/v1/WatchEvent", - "scheduling.k8s.io/v1alpha1", - "scheduling.k8s.io/v1alpha1/CreateOptions", - "scheduling.k8s.io/v1alpha1/DeleteOptions", - "scheduling.k8s.io/v1alpha1/ExportOptions", - "scheduling.k8s.io/v1alpha1/GetOptions", - "scheduling.k8s.io/v1alpha1/ListOptions", - "scheduling.k8s.io/v1alpha1/PatchOptions", - "scheduling.k8s.io/v1alpha1/PriorityClass", - "scheduling.k8s.io/v1alpha1/PriorityClassList", - "scheduling.k8s.io/v1alpha1/UpdateOptions", - "scheduling.k8s.io/v1alpha1/WatchEvent", - "scheduling.k8s.io/v1beta1", - "scheduling.k8s.io/v1beta1/CreateOptions", - "scheduling.k8s.io/v1beta1/DeleteOptions", - "scheduling.k8s.io/v1beta1/ExportOptions", - "scheduling.k8s.io/v1beta1/GetOptions", - "scheduling.k8s.io/v1beta1/ListOptions", - "scheduling.k8s.io/v1beta1/PatchOptions", - "scheduling.k8s.io/v1beta1/PriorityClass", - "scheduling.k8s.io/v1beta1/PriorityClassList", - "scheduling.k8s.io/v1beta1/UpdateOptions", - "scheduling.k8s.io/v1beta1/WatchEvent", - "settings.k8s.io/__internal", - "settings.k8s.io/__internal/WatchEvent", - "settings.k8s.io/v1alpha1", - "settings.k8s.io/v1alpha1/CreateOptions", - "settings.k8s.io/v1alpha1/DeleteOptions", - "settings.k8s.io/v1alpha1/ExportOptions", - "settings.k8s.io/v1alpha1/GetOptions", - "settings.k8s.io/v1alpha1/ListOptions", - "settings.k8s.io/v1alpha1/PatchOptions", - "settings.k8s.io/v1alpha1/PodPreset", - "settings.k8s.io/v1alpha1/PodPresetList", - "settings.k8s.io/v1alpha1/UpdateOptions", - "settings.k8s.io/v1alpha1/WatchEvent", - "storage.k8s.io/__internal", - "storage.k8s.io/__internal/WatchEvent", - "storage.k8s.io/v1", - "storage.k8s.io/v1/CreateOptions", - "storage.k8s.io/v1/DeleteOptions", - "storage.k8s.io/v1/ExportOptions", - "storage.k8s.io/v1/GetOptions", - "storage.k8s.io/v1/ListOptions", - "storage.k8s.io/v1/PatchOptions", - "storage.k8s.io/v1/StorageClass", - "storage.k8s.io/v1/StorageClassList", - "storage.k8s.io/v1/UpdateOptions", - "storage.k8s.io/v1/VolumeAttachment", - "storage.k8s.io/v1/VolumeAttachmentList", - "storage.k8s.io/v1/WatchEvent", - "storage.k8s.io/v1alpha1", - "storage.k8s.io/v1alpha1/CreateOptions", - "storage.k8s.io/v1alpha1/DeleteOptions", - "storage.k8s.io/v1alpha1/ExportOptions", - "storage.k8s.io/v1alpha1/GetOptions", - "storage.k8s.io/v1alpha1/ListOptions", - "storage.k8s.io/v1alpha1/PatchOptions", - "storage.k8s.io/v1alpha1/UpdateOptions", - "storage.k8s.io/v1alpha1/VolumeAttachment", - "storage.k8s.io/v1alpha1/VolumeAttachmentList", - "storage.k8s.io/v1alpha1/WatchEvent", - "storage.k8s.io/v1beta1", - "storage.k8s.io/v1beta1/CSIDriver", - "storage.k8s.io/v1beta1/CSIDriverList", - "storage.k8s.io/v1beta1/CSINode", - "storage.k8s.io/v1beta1/CSINodeList", - "storage.k8s.io/v1beta1/CreateOptions", - "storage.k8s.io/v1beta1/DeleteOptions", - "storage.k8s.io/v1beta1/ExportOptions", - "storage.k8s.io/v1beta1/GetOptions", - "storage.k8s.io/v1beta1/ListOptions", - "storage.k8s.io/v1beta1/PatchOptions", - "storage.k8s.io/v1beta1/StorageClass", - "storage.k8s.io/v1beta1/StorageClassList", - "storage.k8s.io/v1beta1/UpdateOptions", - "storage.k8s.io/v1beta1/VolumeAttachment", - "storage.k8s.io/v1beta1/VolumeAttachmentList", - "storage.k8s.io/v1beta1/WatchEvent", - "v1", - "v1/APIGroup", - "v1/APIGroupList", - "v1/APIResourceList", - "v1/APIVersions", - "v1/Binding", - "v1/ComponentStatus", - "v1/ComponentStatusList", - "v1/ConfigMap", - "v1/ConfigMapList", - "v1/CreateOptions", - "v1/DeleteOptions", - "v1/Endpoints", - "v1/EndpointsList", - "v1/Event", - "v1/EventList", - "v1/ExportOptions", - "v1/GetOptions", - "v1/LimitRange", - "v1/LimitRangeList", - "v1/List", - "v1/ListOptions", - "v1/Namespace", - "v1/NamespaceList", - "v1/Node", - "v1/NodeList", - "v1/NodeProxyOptions", - "v1/PatchOptions", - "v1/PersistentVolume", - "v1/PersistentVolumeClaim", - "v1/PersistentVolumeClaimList", - "v1/PersistentVolumeList", - "v1/Pod", - "v1/PodAttachOptions", - "v1/PodExecOptions", - "v1/PodList", - "v1/PodLogOptions", - "v1/PodPortForwardOptions", - "v1/PodProxyOptions", - "v1/PodStatusResult", - "v1/PodTemplate", - "v1/PodTemplateList", - "v1/RangeAllocation", - "v1/ReplicationController", - "v1/ReplicationControllerList", - "v1/ResourceQuota", - "v1/ResourceQuotaList", - "v1/Secret", - "v1/SecretList", - "v1/SerializedReference", - "v1/Service", - "v1/ServiceAccount", - "v1/ServiceAccountList", - "v1/ServiceList", - "v1/ServiceProxyOptions", - "v1/Status", - "v1/UpdateOptions", - "v1/WatchEvent", - } -} diff --git a/vendor/k8s.io/helm/pkg/chartutil/create.go b/vendor/k8s.io/helm/pkg/chartutil/create.go deleted file mode 100644 index d8480f54f..000000000 --- a/vendor/k8s.io/helm/pkg/chartutil/create.go +++ /dev/null @@ -1,509 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "k8s.io/helm/pkg/proto/hapi/chart" -) - -const ( - // ChartfileName is the default Chart file name. - ChartfileName = "Chart.yaml" - // ValuesfileName is the default values file name. - ValuesfileName = "values.yaml" - // TemplatesDir is the relative directory name for templates. - TemplatesDir = "templates" - // ChartsDir is the relative directory name for charts dependencies. - ChartsDir = "charts" - // IgnorefileName is the name of the Helm ignore file. - IgnorefileName = ".helmignore" - // IngressFileName is the name of the example ingress file. - IngressFileName = "ingress.yaml" - // DeploymentName is the name of the example deployment file. - DeploymentName = "deployment.yaml" - // ServiceName is the name of the example service file. - ServiceName = "service.yaml" - // ServiceAccountName is the name of the example serviceaccount file. - ServiceAccountName = "serviceaccount.yaml" - // NotesName is the name of the example NOTES.txt file. - NotesName = "NOTES.txt" - // HelpersName is the name of the example helpers file. - HelpersName = "_helpers.tpl" - // TemplatesTestsDir is the relative directory name for templates tests. - TemplatesTestsDir = "templates/tests" - // TestConnectionName is the name of the example connection test file. - TestConnectionName = "test-connection.yaml" -) - -const defaultValues = `# Default values for %s. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -image: - repository: nginx - tag: stable - pullPolicy: IfNotPresent - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 80 - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: [] - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} -` - -const defaultIgnore = `# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ -` - -const defaultIngress = `{{- if .Values.ingress.enabled -}} -{{- $fullName := include ".fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: -{{ include ".labels" . | indent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: -{{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} -{{- end }} -` - -const defaultDeployment = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include ".fullname" . }} - labels: -{{ include ".labels" . | indent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include ".name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app.kubernetes.io/name: {{ include ".name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ template ".serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 80 - protocol: TCP - livenessProbe: - httpGet: - path: / - port: http - readinessProbe: - httpGet: - path: / - port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -` - -const defaultService = `apiVersion: v1 -kind: Service -metadata: - name: {{ include ".fullname" . }} - labels: -{{ include ".labels" . | indent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: {{ include ".name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} -` -const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template ".serviceAccountName" . }} - labels: -{{ include ".labels" . | indent 4 }} -{{- end -}} -` - -const defaultNotes = `1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl port-forward $POD_NAME 8080:80 -{{- end }} -` - -const defaultHelpers = `{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define ".name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define ".fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define ".chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define ".labels" -}} -app.kubernetes.io/name: {{ include ".name" . }} -helm.sh/chart: {{ include ".chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define ".serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include ".fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} -` - -const defaultTestConnection = `apiVersion: v1 -kind: Pod -metadata: - name: "{{ include ".fullname" . }}-test-connection" - labels: -{{ include ".labels" . | indent 4 }} - annotations: - "helm.sh/hook": test-success -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include ".fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never -` - -// CreateFrom creates a new chart, but scaffolds it from the src chart. -func CreateFrom(chartfile *chart.Metadata, dest string, src string) error { - schart, err := Load(src) - if err != nil { - return fmt.Errorf("could not load %s: %s", src, err) - } - - schart.Metadata = chartfile - - var updatedTemplates []*chart.Template - - for _, template := range schart.Templates { - newData := Transform(string(template.Data), "", schart.Metadata.Name) - updatedTemplates = append(updatedTemplates, &chart.Template{Name: template.Name, Data: newData}) - } - - schart.Templates = updatedTemplates - if schart.Values != nil { - schart.Values = &chart.Config{Raw: string(Transform(schart.Values.Raw, "", schart.Metadata.Name))} - } - return SaveDir(schart, dest) -} - -// Create creates a new chart in a directory. -// -// Inside of dir, this will create a directory based on the name of -// chartfile.Name. It will then write the Chart.yaml into this directory and -// create the (empty) appropriate directories. -// -// The returned string will point to the newly created directory. It will be -// an absolute path, even if the provided base directory was relative. -// -// If dir does not exist, this will return an error. -// If Chart.yaml or any directories cannot be created, this will return an -// error. In such a case, this will attempt to clean up by removing the -// new chart directory. -func Create(chartfile *chart.Metadata, dir string) (string, error) { - path, err := filepath.Abs(dir) - if err != nil { - return path, err - } - - if fi, err := os.Stat(path); err != nil { - return path, err - } else if !fi.IsDir() { - return path, fmt.Errorf("no such directory %s", path) - } - - n := chartfile.Name - cdir := filepath.Join(path, n) - if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() { - return cdir, fmt.Errorf("file %s already exists and is not a directory", cdir) - } - if err := os.MkdirAll(cdir, 0755); err != nil { - return cdir, err - } - - cf := filepath.Join(cdir, ChartfileName) - if _, err := os.Stat(cf); err != nil { - if err := SaveChartfile(cf, chartfile); err != nil { - return cdir, err - } - } - - for _, d := range []string{TemplatesDir, TemplatesTestsDir, ChartsDir} { - if err := os.MkdirAll(filepath.Join(cdir, d), 0755); err != nil { - return cdir, err - } - } - - files := []struct { - path string - content []byte - }{ - { - // values.yaml - path: filepath.Join(cdir, ValuesfileName), - content: []byte(fmt.Sprintf(defaultValues, chartfile.Name)), - }, - { - // .helmignore - path: filepath.Join(cdir, IgnorefileName), - content: []byte(defaultIgnore), - }, - { - // ingress.yaml - path: filepath.Join(cdir, TemplatesDir, IngressFileName), - content: Transform(defaultIngress, "", chartfile.Name), - }, - { - // deployment.yaml - path: filepath.Join(cdir, TemplatesDir, DeploymentName), - content: Transform(defaultDeployment, "", chartfile.Name), - }, - { - // service.yaml - path: filepath.Join(cdir, TemplatesDir, ServiceName), - content: Transform(defaultService, "", chartfile.Name), - }, - { - // serviceaccount.yaml - path: filepath.Join(cdir, TemplatesDir, ServiceAccountName), - content: Transform(defaultServiceAccount, "", chartfile.Name), - }, - { - // NOTES.txt - path: filepath.Join(cdir, TemplatesDir, NotesName), - content: Transform(defaultNotes, "", chartfile.Name), - }, - { - // _helpers.tpl - path: filepath.Join(cdir, TemplatesDir, HelpersName), - content: Transform(defaultHelpers, "", chartfile.Name), - }, - { - // test-connection.yaml - path: filepath.Join(cdir, TemplatesTestsDir, TestConnectionName), - content: Transform(defaultTestConnection, "", chartfile.Name), - }, - } - - for _, file := range files { - if _, err := os.Stat(file.path); err == nil { - // File exists and is okay. Skip it. - continue - } - if err := ioutil.WriteFile(file.path, file.content, 0644); err != nil { - return cdir, err - } - } - return cdir, nil -} diff --git a/vendor/k8s.io/helm/pkg/chartutil/files.go b/vendor/k8s.io/helm/pkg/chartutil/files.go deleted file mode 100644 index 36391c3e8..000000000 --- a/vendor/k8s.io/helm/pkg/chartutil/files.go +++ /dev/null @@ -1,236 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "path" - "strings" - - "github.com/ghodss/yaml" - - "github.com/BurntSushi/toml" - "github.com/gobwas/glob" - "github.com/golang/protobuf/ptypes/any" -) - -// Files is a map of files in a chart that can be accessed from a template. -type Files map[string][]byte - -// NewFiles creates a new Files from chart files. -// Given an []*any.Any (the format for files in a chart.Chart), extract a map of files. -func NewFiles(from []*any.Any) Files { - files := map[string][]byte{} - for _, f := range from { - files[f.TypeUrl] = f.Value - } - return files -} - -// GetBytes gets a file by path. -// -// The returned data is raw. In a template context, this is identical to calling -// {{index .Files $path}}. -// -// This is intended to be accessed from within a template, so a missed key returns -// an empty []byte. -func (f Files) GetBytes(name string) []byte { - v, ok := f[name] - if !ok { - return []byte{} - } - return v -} - -// Get returns a string representation of the given file. -// -// Fetch the contents of a file as a string. It is designed to be called in a -// template. -// -// {{.Files.Get "foo"}} -func (f Files) Get(name string) string { - return string(f.GetBytes(name)) -} - -// Glob takes a glob pattern and returns another files object only containing -// matched files. -// -// This is designed to be called from a template. -// -// {{ range $name, $content := .Files.Glob("foo/**") }} -// {{ $name }}: | -// {{ .Files.Get($name) | indent 4 }}{{ end }} -func (f Files) Glob(pattern string) Files { - g, err := glob.Compile(pattern, '/') - if err != nil { - g, _ = glob.Compile("**") - } - - nf := NewFiles(nil) - for name, contents := range f { - if g.Match(name) { - nf[name] = contents - } - } - - return nf -} - -// AsConfig turns a Files group and flattens it to a YAML map suitable for -// including in the 'data' section of a Kubernetes ConfigMap definition. -// Duplicate keys will be overwritten, so be aware that your file names -// (regardless of path) should be unique. -// -// This is designed to be called from a template, and will return empty string -// (via ToYaml function) if it cannot be serialized to YAML, or if the Files -// object is nil. -// -// The output will not be indented, so you will want to pipe this to the -// 'indent' template function. -// -// data: -// {{ .Files.Glob("config/**").AsConfig() | indent 4 }} -func (f Files) AsConfig() string { - if f == nil { - return "" - } - - m := map[string]string{} - - // Explicitly convert to strings, and file names - for k, v := range f { - m[path.Base(k)] = string(v) - } - - return ToYaml(m) -} - -// AsSecrets returns the base64-encoded value of a Files object suitable for -// including in the 'data' section of a Kubernetes Secret definition. -// Duplicate keys will be overwritten, so be aware that your file names -// (regardless of path) should be unique. -// -// This is designed to be called from a template, and will return empty string -// (via ToYaml function) if it cannot be serialized to YAML, or if the Files -// object is nil. -// -// The output will not be indented, so you will want to pipe this to the -// 'indent' template function. -// -// data: -// {{ .Files.Glob("secrets/*").AsSecrets() }} -func (f Files) AsSecrets() string { - if f == nil { - return "" - } - - m := map[string]string{} - - for k, v := range f { - m[path.Base(k)] = base64.StdEncoding.EncodeToString(v) - } - - return ToYaml(m) -} - -// Lines returns each line of a named file (split by "\n") as a slice, so it can -// be ranged over in your templates. -// -// This is designed to be called from a template. -// -// {{ range .Files.Lines "foo/bar.html" }} -// {{ . }}{{ end }} -func (f Files) Lines(path string) []string { - if f == nil || f[path] == nil { - return []string{} - } - - return strings.Split(string(f[path]), "\n") -} - -// ToYaml takes an interface, marshals it to yaml, and returns a string. It will -// always return a string, even on marshal error (empty string). -// -// This is designed to be called from a template. -func ToYaml(v interface{}) string { - data, err := yaml.Marshal(v) - if err != nil { - // Swallow errors inside of a template. - return "" - } - return string(data) -} - -// FromYaml converts a YAML document into a map[string]interface{}. -// -// This is not a general-purpose YAML parser, and will not parse all valid -// YAML documents. Additionally, because its intended use is within templates -// it tolerates errors. It will insert the returned error message string into -// m["Error"] in the returned map. -func FromYaml(str string) map[string]interface{} { - m := map[string]interface{}{} - - if err := yaml.Unmarshal([]byte(str), &m); err != nil { - m["Error"] = err.Error() - } - return m -} - -// ToToml takes an interface, marshals it to toml, and returns a string. It will -// always return a string, even on marshal error (empty string). -// -// This is designed to be called from a template. -func ToToml(v interface{}) string { - b := bytes.NewBuffer(nil) - e := toml.NewEncoder(b) - err := e.Encode(v) - if err != nil { - return err.Error() - } - return b.String() -} - -// ToJson takes an interface, marshals it to json, and returns a string. It will -// always return a string, even on marshal error (empty string). -// -// This is designed to be called from a template. -// TODO: change the function signature in Helm 3 -func ToJson(v interface{}) string { // nolint - data, err := json.Marshal(v) - if err != nil { - // Swallow errors inside of a template. - return "" - } - return string(data) -} - -// FromJson converts a JSON document into a map[string]interface{}. -// -// This is not a general-purpose JSON parser, and will not parse all valid -// JSON documents. Additionally, because its intended use is within templates -// it tolerates errors. It will insert the returned error message string into -// m["Error"] in the returned map. -// TODO: change the function signature in Helm 3 -func FromJson(str string) map[string]interface{} { // nolint - m := map[string]interface{}{} - - if err := json.Unmarshal([]byte(str), &m); err != nil { - m["Error"] = err.Error() - } - return m -} diff --git a/vendor/k8s.io/helm/pkg/chartutil/load.go b/vendor/k8s.io/helm/pkg/chartutil/load.go deleted file mode 100644 index 13724671a..000000000 --- a/vendor/k8s.io/helm/pkg/chartutil/load.go +++ /dev/null @@ -1,378 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "path" - "path/filepath" - "regexp" - "strings" - - "github.com/golang/protobuf/ptypes/any" - - "k8s.io/helm/pkg/ignore" - "k8s.io/helm/pkg/proto/hapi/chart" - "k8s.io/helm/pkg/sympath" -) - -// Load takes a string name, tries to resolve it to a file or directory, and then loads it. -// -// This is the preferred way to load a chart. It will discover the chart encoding -// and hand off to the appropriate chart reader. -// -// If a .helmignore file is present, the directory loader will skip loading any files -// matching it. But .helmignore is not evaluated when reading out of an archive. -func Load(name string) (*chart.Chart, error) { - name = filepath.FromSlash(name) - fi, err := os.Stat(name) - if err != nil { - return nil, err - } - if fi.IsDir() { - if validChart, err := IsChartDir(name); !validChart { - return nil, err - } - return LoadDir(name) - } - return LoadFile(name) -} - -// BufferedFile represents an archive file buffered for later processing. -type BufferedFile struct { - Name string - Data []byte -} - -var drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:/`) - -// loadArchiveFiles loads files out of an archive -func loadArchiveFiles(in io.Reader) ([]*BufferedFile, error) { - unzipped, err := gzip.NewReader(in) - if err != nil { - return nil, err - } - defer unzipped.Close() - - files := []*BufferedFile{} - tr := tar.NewReader(unzipped) - for { - b := bytes.NewBuffer(nil) - hd, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - - if hd.FileInfo().IsDir() { - // Use this instead of hd.Typeflag because we don't have to do any - // inference chasing. - continue - } - - switch hd.Typeflag { - // We don't want to process these extension header files. - case tar.TypeXGlobalHeader, tar.TypeXHeader: - continue - } - - // Archive could contain \ if generated on Windows - delimiter := "/" - if strings.ContainsRune(hd.Name, '\\') { - delimiter = "\\" - } - - parts := strings.Split(hd.Name, delimiter) - n := strings.Join(parts[1:], delimiter) - - // Normalize the path to the / delimiter - n = strings.Replace(n, delimiter, "/", -1) - - if path.IsAbs(n) { - return nil, errors.New("chart illegally contains absolute paths") - } - - n = path.Clean(n) - if n == "." { - // In this case, the original path was relative when it should have been absolute. - return nil, fmt.Errorf("chart illegally contains content outside the base directory: %q", hd.Name) - } - if strings.HasPrefix(n, "..") { - return nil, errors.New("chart illegally references parent directory") - } - - // In some particularly arcane acts of path creativity, it is possible to intermix - // UNIX and Windows style paths in such a way that you produce a result of the form - // c:/foo even after all the built-in absolute path checks. So we explicitly check - // for this condition. - if drivePathPattern.MatchString(n) { - return nil, errors.New("chart contains illegally named files") - } - - if parts[0] == "Chart.yaml" { - return nil, errors.New("chart yaml not in base directory") - } - - if _, err := io.Copy(b, tr); err != nil { - return files, err - } - - files = append(files, &BufferedFile{Name: n, Data: b.Bytes()}) - b.Reset() - } - - if len(files) == 0 { - return nil, errors.New("no files in chart archive") - } - return files, nil -} - -// LoadArchive loads from a reader containing a compressed tar archive. -func LoadArchive(in io.Reader) (*chart.Chart, error) { - files, err := loadArchiveFiles(in) - if err != nil { - return nil, err - } - return LoadFiles(files) -} - -// LoadFiles loads from in-memory files. -func LoadFiles(files []*BufferedFile) (*chart.Chart, error) { - c := &chart.Chart{} - subcharts := map[string][]*BufferedFile{} - - for _, f := range files { - if f.Name == "Chart.yaml" { - m, err := UnmarshalChartfile(f.Data) - if err != nil { - return c, err - } - c.Metadata = m - var apiVersion = c.Metadata.ApiVersion - if apiVersion != "" && apiVersion != ApiVersionV1 { - return c, fmt.Errorf("apiVersion '%s' is not valid. The value must be \"v1\"", apiVersion) - } - } else if f.Name == "values.toml" { - return c, errors.New("values.toml is illegal as of 2.0.0-alpha.2") - } else if f.Name == "values.yaml" { - c.Values = &chart.Config{Raw: string(f.Data)} - } else if strings.HasPrefix(f.Name, "templates/") { - c.Templates = append(c.Templates, &chart.Template{Name: f.Name, Data: f.Data}) - } else if strings.HasPrefix(f.Name, "charts/") { - if filepath.Ext(f.Name) == ".prov" { - c.Files = append(c.Files, &any.Any{TypeUrl: f.Name, Value: f.Data}) - continue - } - cname := strings.TrimPrefix(f.Name, "charts/") - if strings.IndexAny(cname, "._") == 0 { - // Ignore charts/ that start with . or _. - continue - } - parts := strings.SplitN(cname, "/", 2) - scname := parts[0] - subcharts[scname] = append(subcharts[scname], &BufferedFile{Name: cname, Data: f.Data}) - } else { - c.Files = append(c.Files, &any.Any{TypeUrl: f.Name, Value: f.Data}) - } - } - - // Ensure that we got a Chart.yaml file - if c.Metadata == nil { - return c, errors.New("chart metadata (Chart.yaml) missing") - } - if c.Metadata.Name == "" { - return c, errors.New("invalid chart (Chart.yaml): name must not be empty") - } - - for n, files := range subcharts { - var sc *chart.Chart - var err error - if strings.IndexAny(n, "_.") == 0 { - continue - } else if filepath.Ext(n) == ".tgz" { - file := files[0] - if file.Name != n { - return c, fmt.Errorf("error unpacking tar in %s: expected %s, got %s", c.Metadata.Name, n, file.Name) - } - // Untar the chart and add to c.Dependencies - b := bytes.NewBuffer(file.Data) - sc, err = LoadArchive(b) - } else { - // We have to trim the prefix off of every file, and ignore any file - // that is in charts/, but isn't actually a chart. - buff := make([]*BufferedFile, 0, len(files)) - for _, f := range files { - parts := strings.SplitN(f.Name, "/", 2) - if len(parts) < 2 { - continue - } - f.Name = parts[1] - buff = append(buff, f) - } - sc, err = LoadFiles(buff) - } - - if err != nil { - return c, fmt.Errorf("error unpacking %s in %s: %s", n, c.Metadata.Name, err) - } - - c.Dependencies = append(c.Dependencies, sc) - } - - return c, nil -} - -// LoadFile loads from an archive file. -func LoadFile(name string) (*chart.Chart, error) { - if fi, err := os.Stat(name); err != nil { - return nil, err - } else if fi.IsDir() { - return nil, errors.New("cannot load a directory") - } - - raw, err := os.Open(name) - if err != nil { - return nil, err - } - defer raw.Close() - - err = ensureArchive(name, raw) - if err != nil { - return nil, err - } - - c, err := LoadArchive(raw) - if err != nil { - if err == gzip.ErrHeader { - return nil, fmt.Errorf("file '%s' does not appear to be a valid chart file (details: %s)", name, err) - } - } - return c, err -} - -// ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive. -// -// Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence -// of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error -// if we didn't check for this. -func ensureArchive(name string, raw *os.File) error { - defer raw.Seek(0, 0) // reset read offset to allow archive loading to proceed. - - // Check the file format to give us a chance to provide the user with more actionable feedback. - buffer := make([]byte, 512) - _, err := raw.Read(buffer) - if err != nil && err != io.EOF { - return fmt.Errorf("file '%s' cannot be read: %s", name, err) - } - if contentType := http.DetectContentType(buffer); contentType != "application/x-gzip" { - // TODO: Is there a way to reliably test if a file content is YAML? ghodss/yaml accepts a wide - // variety of content (Makefile, .zshrc) as valid YAML without errors. - - // Wrong content type. Let's check if it's yaml and give an extra hint? - if strings.HasSuffix(name, ".yml") || strings.HasSuffix(name, ".yaml") { - return fmt.Errorf("file '%s' seems to be a YAML file, but I expected a gzipped archive", name) - } - return fmt.Errorf("file '%s' does not appear to be a gzipped archive; got '%s'", name, contentType) - } - return nil -} - -// LoadDir loads from a directory. -// -// This loads charts only from directories. -func LoadDir(dir string) (*chart.Chart, error) { - topdir, err := filepath.Abs(dir) - if err != nil { - return nil, err - } - - // Just used for errors. - c := &chart.Chart{} - - rules := ignore.Empty() - ifile := filepath.Join(topdir, ignore.HelmIgnore) - if _, err := os.Stat(ifile); err == nil { - r, err := ignore.ParseFile(ifile) - if err != nil { - return c, err - } - rules = r - } - rules.AddDefaults() - - files := []*BufferedFile{} - topdir += string(filepath.Separator) - - walk := func(name string, fi os.FileInfo, err error) error { - n := strings.TrimPrefix(name, topdir) - if n == "" { - // No need to process top level. Avoid bug with helmignore .* matching - // empty names. See issue 1779. - return nil - } - - // Normalize to / since it will also work on Windows - n = filepath.ToSlash(n) - - if err != nil { - return err - } - if fi.IsDir() { - // Directory-based ignore rules should involve skipping the entire - // contents of that directory. - if rules.Ignore(n, fi) { - return filepath.SkipDir - } - return nil - } - - // If a .helmignore file matches, skip this file. - if rules.Ignore(n, fi) { - return nil - } - - // Irregular files include devices, sockets, and other uses of files that - // are not regular files. In Go they have a file mode type bit set. - // See https://golang.org/pkg/os/#FileMode for examples. - if !fi.Mode().IsRegular() { - return fmt.Errorf("cannot load irregular file %s as it has file mode type bits set", name) - } - - data, err := ioutil.ReadFile(name) - if err != nil { - return fmt.Errorf("error reading %s: %s", n, err) - } - - files = append(files, &BufferedFile{Name: n, Data: data}) - return nil - } - if err = sympath.Walk(topdir, walk); err != nil { - return c, err - } - - return LoadFiles(files) -} diff --git a/vendor/k8s.io/helm/pkg/chartutil/requirements.go b/vendor/k8s.io/helm/pkg/chartutil/requirements.go deleted file mode 100644 index 0bd16d2eb..000000000 --- a/vendor/k8s.io/helm/pkg/chartutil/requirements.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "errors" - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/ghodss/yaml" - "k8s.io/helm/pkg/proto/hapi/chart" - "k8s.io/helm/pkg/version" -) - -const ( - requirementsName = "requirements.yaml" - lockfileName = "requirements.lock" -) - -var ( - // ErrRequirementsNotFound indicates that a requirements.yaml is not found. - ErrRequirementsNotFound = errors.New(requirementsName + " not found") - // ErrLockfileNotFound indicates that a requirements.lock is not found. - ErrLockfileNotFound = errors.New(lockfileName + " not found") -) - -// Dependency describes a chart upon which another chart depends. -// -// Dependencies can be used to express developer intent, or to capture the state -// of a chart. -type Dependency struct { - // Name is the name of the dependency. - // - // This must match the name in the dependency's Chart.yaml. - Name string `json:"name"` - // Version is the version (range) of this chart. - // - // A lock file will always produce a single version, while a dependency - // may contain a semantic version range. - Version string `json:"version,omitempty"` - // The URL to the repository. - // - // Appending `index.yaml` to this string should result in a URL that can be - // used to fetch the repository index. - Repository string `json:"repository"` - // A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled ) - Condition string `json:"condition,omitempty"` - // Tags can be used to group charts for enabling/disabling together - Tags []string `json:"tags,omitempty"` - // Enabled bool determines if chart should be loaded - Enabled bool `json:"enabled,omitempty"` - // ImportValues holds the mapping of source values to parent key to be imported. Each item can be a - // string or pair of child/parent sublist items. - ImportValues []interface{} `json:"import-values,omitempty"` - // Alias usable alias to be used for the chart - Alias string `json:"alias,omitempty"` -} - -// ErrNoRequirementsFile to detect error condition -type ErrNoRequirementsFile error - -// Requirements is a list of requirements for a chart. -// -// Requirements are charts upon which this chart depends. This expresses -// developer intent. -type Requirements struct { - Dependencies []*Dependency `json:"dependencies"` -} - -// RequirementsLock is a lock file for requirements. -// -// It represents the state that the dependencies should be in. -type RequirementsLock struct { - // Generated is the date the lock file was last generated. - Generated time.Time `json:"generated"` - // Digest is a hash of the requirements file used to generate it. - Digest string `json:"digest"` - // Dependencies is the list of dependencies that this lock file has locked. - Dependencies []*Dependency `json:"dependencies"` -} - -// LoadRequirements loads a requirements file from an in-memory chart. -func LoadRequirements(c *chart.Chart) (*Requirements, error) { - var data []byte - for _, f := range c.Files { - if f.TypeUrl == requirementsName { - data = f.Value - } - } - if len(data) == 0 { - return nil, ErrRequirementsNotFound - } - r := &Requirements{} - return r, yaml.Unmarshal(data, r) -} - -// LoadRequirementsLock loads a requirements lock file. -func LoadRequirementsLock(c *chart.Chart) (*RequirementsLock, error) { - var data []byte - for _, f := range c.Files { - if f.TypeUrl == lockfileName { - data = f.Value - } - } - if len(data) == 0 { - return nil, ErrLockfileNotFound - } - r := &RequirementsLock{} - return r, yaml.Unmarshal(data, r) -} - -// ProcessRequirementsConditions disables charts based on condition path value in values -func ProcessRequirementsConditions(reqs *Requirements, cvals Values, cpath string) { - var cond string - var conds []string - if reqs == nil || len(reqs.Dependencies) == 0 { - return - } - for _, r := range reqs.Dependencies { - var hasTrue, hasFalse bool - cond = string(r.Condition) - // check for list - if len(cond) > 0 { - if strings.Contains(cond, ",") { - conds = strings.Split(strings.TrimSpace(cond), ",") - } else { - conds = []string{strings.TrimSpace(cond)} - } - for _, c := range conds { - if len(c) > 0 { - // retrieve value - vv, err := cvals.PathValue(cpath + c) - if err == nil { - // if not bool, warn - if bv, ok := vv.(bool); ok { - if bv { - hasTrue = true - } else { - hasFalse = true - } - } else { - log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) - } - } else if _, ok := err.(ErrNoValue); !ok { - // this is a real error - log.Printf("Warning: PathValue returned error %v", err) - - } - if vv != nil { - // got first value, break loop - break - } - } - } - if !hasTrue && hasFalse { - r.Enabled = false - } else if hasTrue { - r.Enabled = true - - } - } - - } - -} - -// ProcessRequirementsTags disables charts based on tags in values -func ProcessRequirementsTags(reqs *Requirements, cvals Values) { - vt, err := cvals.Table("tags") - if err != nil { - return - - } - if reqs == nil || len(reqs.Dependencies) == 0 { - return - } - for _, r := range reqs.Dependencies { - if len(r.Tags) > 0 { - tags := r.Tags - - var hasTrue, hasFalse bool - for _, k := range tags { - if b, ok := vt[k]; ok { - // if not bool, warn - if bv, ok := b.(bool); ok { - if bv { - hasTrue = true - } else { - hasFalse = true - } - } else { - log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name) - } - } - } - if !hasTrue && hasFalse { - r.Enabled = false - } else if hasTrue || !hasTrue && !hasFalse { - r.Enabled = true - - } - - } - } - -} - -// Validate alias names against this regexp -var aliasRegexp = regexp.MustCompile("^[a-zA-Z0-9-_]+$") - -func getAliasDependency(charts []*chart.Chart, aliasChart *Dependency) *chart.Chart { - var chartFound chart.Chart - for _, existingChart := range charts { - if existingChart == nil { - continue - } - if existingChart.Metadata == nil { - continue - } - if existingChart.Metadata.Name != aliasChart.Name { - continue - } - if !version.IsCompatibleRange(aliasChart.Version, existingChart.Metadata.Version) { - continue - } - chartFound = *existingChart - newMetadata := *existingChart.Metadata - if aliasChart.Alias != "" { - // Make sure Alias is well-formed - if !aliasRegexp.MatchString(aliasChart.Alias) { - fmt.Printf("Invalid alias in dependency %q. Skipping.", aliasChart.Name) - continue - } - newMetadata.Name = aliasChart.Alias - } - chartFound.Metadata = &newMetadata - return &chartFound - } - return nil -} - -// ProcessRequirementsEnabled removes disabled charts from dependencies -func ProcessRequirementsEnabled(c *chart.Chart, v *chart.Config) error { - return doProcessRequirementsEnabled(c, v, "") -} - -func doProcessRequirementsEnabled(c *chart.Chart, v *chart.Config, path string) error { - reqs, err := LoadRequirements(c) - if err != nil { - // if not just missing requirements file, return error - if nerr, ok := err.(ErrNoRequirementsFile); !ok { - return nerr - } - - // no requirements to process - return nil - } - - var chartDependencies []*chart.Chart - // If any dependency is not a part of requirements.yaml - // then this should be added to chartDependencies. - // However, if the dependency is already specified in requirements.yaml - // we should not add it, as it would be anyways processed from requirements.yaml - - for _, existingDependency := range c.Dependencies { - var dependencyFound bool - for _, req := range reqs.Dependencies { - if existingDependency.Metadata.Name == req.Name && version.IsCompatibleRange(req.Version, existingDependency.Metadata.Version) { - dependencyFound = true - break - } - } - if !dependencyFound { - chartDependencies = append(chartDependencies, existingDependency) - } - } - - for _, req := range reqs.Dependencies { - if chartDependency := getAliasDependency(c.Dependencies, req); chartDependency != nil { - chartDependencies = append(chartDependencies, chartDependency) - } - if req.Alias != "" { - if !aliasRegexp.MatchString(req.Alias) { - return fmt.Errorf("illegal alias name in %q", req.Name) - } - req.Name = req.Alias - } - } - c.Dependencies = chartDependencies - - // set all to true - for _, lr := range reqs.Dependencies { - lr.Enabled = true - } - cvals, err := CoalesceValues(c, v) - if err != nil { - return err - } - // convert our values back into config - yvals, err := cvals.YAML() - if err != nil { - return err - } - cc := chart.Config{Raw: yvals} - // flag dependencies as enabled/disabled - ProcessRequirementsTags(reqs, cvals) - ProcessRequirementsConditions(reqs, cvals, path) - // make a map of charts to remove - rm := map[string]bool{} - for _, r := range reqs.Dependencies { - if !r.Enabled { - // remove disabled chart - rm[r.Name] = true - } - } - // don't keep disabled charts in new slice - cd := []*chart.Chart{} - copy(cd, c.Dependencies[:0]) - for _, n := range c.Dependencies { - if _, ok := rm[n.Metadata.Name]; !ok { - cd = append(cd, n) - } - - } - // recursively call self to process sub dependencies - for _, t := range cd { - subpath := path + t.Metadata.Name + "." - err := doProcessRequirementsEnabled(t, &cc, subpath) - // if its not just missing requirements file, return error - if nerr, ok := err.(ErrNoRequirementsFile); !ok && err != nil { - return nerr - } - } - c.Dependencies = cd - - return nil -} - -// pathToMap creates a nested map given a YAML path in dot notation. -func pathToMap(path string, data map[string]interface{}) map[string]interface{} { - if path == "." { - return data - } - ap := strings.Split(path, ".") - if len(ap) == 0 { - return nil - } - n := []map[string]interface{}{} - // created nested map for each key, adding to slice - for _, v := range ap { - nm := make(map[string]interface{}) - nm[v] = make(map[string]interface{}) - n = append(n, nm) - } - // find the last key (map) and set our data - for i, d := range n { - for k := range d { - z := i + 1 - if z == len(n) { - n[i][k] = data - break - } - n[i][k] = n[z] - } - } - - return n[0] -} - -// getParents returns a slice of parent charts in reverse order. -func getParents(c *chart.Chart, out []*chart.Chart) []*chart.Chart { - if len(out) == 0 { - out = []*chart.Chart{c} - } - for _, ch := range c.Dependencies { - if len(ch.Dependencies) > 0 { - out = append(out, ch) - out = getParents(ch, out) - } - } - - return out -} - -// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field. -func processImportValues(c *chart.Chart) error { - reqs, err := LoadRequirements(c) - if err != nil { - return err - } - // combine chart values and empty config to get Values - cvals, err := CoalesceValues(c, &chart.Config{}) - if err != nil { - return err - } - b := make(map[string]interface{}, 0) - // import values from each dependency if specified in import-values - for _, r := range reqs.Dependencies { - // only process raw requirement that is found in chart's dependencies (enabled) - found := false - name := r.Name - for _, v := range c.Dependencies { - if v.Metadata.Name == r.Name { - found = true - } - if v.Metadata.Name == r.Alias { - found = true - name = r.Alias - } - } - if !found { - continue - } - if len(r.ImportValues) > 0 { - var outiv []interface{} - for _, riv := range r.ImportValues { - nm := make(map[string]string, 0) - switch iv := riv.(type) { - case map[string]interface{}: - nm["child"] = iv["child"].(string) - nm["parent"] = iv["parent"].(string) - case string: - nm["child"] = "exports." + iv - nm["parent"] = "." - } - - outiv = append(outiv, nm) - s := name + "." + nm["child"] - // get child table - vv, err := cvals.Table(s) - if err != nil { - log.Printf("Warning: ImportValues missing table: %v", err) - continue - } - // create value map from child to be merged into parent - vm := pathToMap(nm["parent"], vv.AsMap()) - b = coalesceTables(b, vm, c.Metadata.Name) - - } - // set our formatted import values - r.ImportValues = outiv - } - } - b = coalesceTables(b, cvals, c.Metadata.Name) - y, err := yaml.Marshal(b) - if err != nil { - return err - } - - // set the new values - c.Values = &chart.Config{Raw: string(y)} - - return nil -} - -// ProcessRequirementsImportValues imports specified chart values from child to parent. -func ProcessRequirementsImportValues(c *chart.Chart) error { - pc := getParents(c, nil) - for i := len(pc) - 1; i >= 0; i-- { - processImportValues(pc[i]) - } - - return nil -} diff --git a/vendor/k8s.io/helm/pkg/chartutil/save.go b/vendor/k8s.io/helm/pkg/chartutil/save.go deleted file mode 100644 index 0482b1eb9..000000000 --- a/vendor/k8s.io/helm/pkg/chartutil/save.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "archive/tar" - "compress/gzip" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/ghodss/yaml" - - "k8s.io/helm/pkg/proto/hapi/chart" -) - -var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=") - -// SaveDir saves a chart as files in a directory. -func SaveDir(c *chart.Chart, dest string) error { - // Create the chart directory - outdir := filepath.Join(dest, c.Metadata.Name) - if err := os.Mkdir(outdir, 0755); err != nil { - return err - } - - // Save the chart file. - if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil { - return err - } - - // Save values.yaml - if c.Values != nil && len(c.Values.Raw) > 0 { - vf := filepath.Join(outdir, ValuesfileName) - if err := ioutil.WriteFile(vf, []byte(c.Values.Raw), 0644); err != nil { - return err - } - } - - for _, d := range []string{TemplatesDir, ChartsDir, TemplatesTestsDir} { - if err := os.MkdirAll(filepath.Join(outdir, d), 0755); err != nil { - return err - } - } - - // Save templates - for _, f := range c.Templates { - n := filepath.Join(outdir, f.Name) - - d := filepath.Dir(n) - if err := os.MkdirAll(d, 0755); err != nil { - return err - } - - if err := ioutil.WriteFile(n, f.Data, 0644); err != nil { - return err - } - } - - // Save files - for _, f := range c.Files { - n := filepath.Join(outdir, f.TypeUrl) - - d := filepath.Dir(n) - if err := os.MkdirAll(d, 0755); err != nil { - return err - } - - if err := ioutil.WriteFile(n, f.Value, 0644); err != nil { - return err - } - } - - // Save dependencies - base := filepath.Join(outdir, ChartsDir) - for _, dep := range c.Dependencies { - // Here, we write each dependency as a tar file. - if _, err := Save(dep, base); err != nil { - return err - } - } - return nil -} - -// Save creates an archived chart to the given directory. -// -// This takes an existing chart and a destination directory. -// -// If the directory is /foo, and the chart is named bar, with version 1.0.0, this -// will generate /foo/bar-1.0.0.tgz. -// -// This returns the absolute path to the chart archive file. -func Save(c *chart.Chart, outDir string) (string, error) { - // Create archive - if fi, err := os.Stat(outDir); err != nil { - return "", err - } else if !fi.IsDir() { - return "", fmt.Errorf("location %s is not a directory", outDir) - } - - if c.Metadata == nil { - return "", errors.New("no Chart.yaml data") - } - - cfile := c.Metadata - if cfile.Name == "" { - return "", errors.New("no chart name specified (Chart.yaml)") - } else if cfile.Version == "" { - return "", errors.New("no chart version specified (Chart.yaml)") - } - - filename := fmt.Sprintf("%s-%s.tgz", cfile.Name, cfile.Version) - filename = filepath.Join(outDir, filename) - if stat, err := os.Stat(filepath.Dir(filename)); os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(filename), 0755); !os.IsExist(err) { - return "", err - } - } else if !stat.IsDir() { - return "", fmt.Errorf("is not a directory: %s", filepath.Dir(filename)) - } - - f, err := os.Create(filename) - if err != nil { - return "", err - } - - // Wrap in gzip writer - zipper := gzip.NewWriter(f) - zipper.Header.Extra = headerBytes - zipper.Header.Comment = "Helm" - - // Wrap in tar writer - twriter := tar.NewWriter(zipper) - rollback := false - defer func() { - twriter.Close() - zipper.Close() - f.Close() - if rollback { - os.Remove(filename) - } - }() - - if err := writeTarContents(twriter, c, ""); err != nil { - rollback = true - } - return filename, err -} - -func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error { - base := filepath.Join(prefix, c.Metadata.Name) - - // Save Chart.yaml - cdata, err := yaml.Marshal(c.Metadata) - if err != nil { - return err - } - if err := writeToTar(out, base+"/Chart.yaml", cdata); err != nil { - return err - } - - // Save values.yaml - if c.Values != nil && len(c.Values.Raw) > 0 { - if err := writeToTar(out, base+"/values.yaml", []byte(c.Values.Raw)); err != nil { - return err - } - } - - // Save templates - for _, f := range c.Templates { - n := filepath.Join(base, f.Name) - if err := writeToTar(out, n, f.Data); err != nil { - return err - } - } - - // Save files - for _, f := range c.Files { - n := filepath.Join(base, f.TypeUrl) - if err := writeToTar(out, n, f.Value); err != nil { - return err - } - } - - // Save dependencies - for _, dep := range c.Dependencies { - if err := writeTarContents(out, dep, base+"/charts"); err != nil { - return err - } - } - return nil -} - -// writeToTar writes a single file to a tar archive. -func writeToTar(out *tar.Writer, name string, body []byte) error { - // TODO: Do we need to create dummy parent directory names if none exist? - h := &tar.Header{ - Name: filepath.ToSlash(name), - Mode: 0755, - Size: int64(len(body)), - ModTime: time.Now(), - } - if err := out.WriteHeader(h); err != nil { - return err - } - if _, err := out.Write(body); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/helm/pkg/chartutil/values.go b/vendor/k8s.io/helm/pkg/chartutil/values.go deleted file mode 100644 index 9986e642c..000000000 --- a/vendor/k8s.io/helm/pkg/chartutil/values.go +++ /dev/null @@ -1,421 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "strings" - - "github.com/ghodss/yaml" - "github.com/golang/protobuf/ptypes/timestamp" - "k8s.io/helm/pkg/proto/hapi/chart" -) - -// ErrNoTable indicates that a chart does not have a matching table. -type ErrNoTable error - -// ErrNoValue indicates that Values does not contain a key with a value -type ErrNoValue error - -// GlobalKey is the name of the Values key that is used for storing global vars. -const GlobalKey = "global" - -// Values represents a collection of chart values. -type Values map[string]interface{} - -// YAML encodes the Values into a YAML string. -func (v Values) YAML() (string, error) { - b, err := yaml.Marshal(v) - return string(b), err -} - -// Table gets a table (YAML subsection) from a Values object. -// -// The table is returned as a Values. -// -// Compound table names may be specified with dots: -// -// foo.bar -// -// The above will be evaluated as "The table bar inside the table -// foo". -// -// An ErrNoTable is returned if the table does not exist. -func (v Values) Table(name string) (Values, error) { - names := strings.Split(name, ".") - table := v - var err error - - for _, n := range names { - table, err = tableLookup(table, n) - if err != nil { - return table, err - } - } - return table, err -} - -// AsMap is a utility function for converting Values to a map[string]interface{}. -// -// It protects against nil map panics. -func (v Values) AsMap() map[string]interface{} { - if v == nil || len(v) == 0 { - return map[string]interface{}{} - } - return v -} - -// Encode writes serialized Values information to the given io.Writer. -func (v Values) Encode(w io.Writer) error { - //return yaml.NewEncoder(w).Encode(v) - out, err := yaml.Marshal(v) - if err != nil { - return err - } - _, err = w.Write(out) - return err -} - -// MergeInto takes the properties in src and merges them into Values. Maps -// are merged while values and arrays are replaced. -func (v Values) MergeInto(src Values) { - for key, srcVal := range src { - destVal, found := v[key] - - if found && istable(srcVal) && istable(destVal) { - destMap := destVal.(map[string]interface{}) - srcMap := srcVal.(map[string]interface{}) - Values(destMap).MergeInto(Values(srcMap)) - } else { - v[key] = srcVal - } - } -} - -func tableLookup(v Values, simple string) (Values, error) { - v2, ok := v[simple] - if !ok { - return v, ErrNoTable(fmt.Errorf("no table named %q (%v)", simple, v)) - } - if vv, ok := v2.(map[string]interface{}); ok { - return vv, nil - } - - // This catches a case where a value is of type Values, but doesn't (for some - // reason) match the map[string]interface{}. This has been observed in the - // wild, and might be a result of a nil map of type Values. - if vv, ok := v2.(Values); ok { - return vv, nil - } - - var e ErrNoTable = fmt.Errorf("no table named %q", simple) - return map[string]interface{}{}, e -} - -// ReadValues will parse YAML byte data into a Values. -func ReadValues(data []byte) (vals Values, err error) { - err = yaml.Unmarshal(data, &vals) - if len(vals) == 0 { - vals = Values{} - } - return -} - -// ReadValuesFile will parse a YAML file into a map of values. -func ReadValuesFile(filename string) (Values, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return map[string]interface{}{}, err - } - return ReadValues(data) -} - -// CoalesceValues coalesces all of the values in a chart (and its subcharts). -// -// Values are coalesced together using the following rules: -// -// - Values in a higher level chart always override values in a lower-level -// dependency chart -// - Scalar values and arrays are replaced, maps are merged -// - A chart has access to all of the variables for it, as well as all of -// the values destined for its dependencies. -func CoalesceValues(chrt *chart.Chart, vals *chart.Config) (Values, error) { - cvals := Values{} - // Parse values if not nil. We merge these at the top level because - // the passed-in values are in the same namespace as the parent chart. - if vals != nil { - evals, err := ReadValues([]byte(vals.Raw)) - if err != nil { - return cvals, err - } - return coalesce(chrt, evals) - } - - return coalesceDeps(chrt, cvals) -} - -// coalesce coalesces the dest values and the chart values, giving priority to the dest values. -// -// This is a helper function for CoalesceValues. -func coalesce(ch *chart.Chart, dest map[string]interface{}) (map[string]interface{}, error) { - var err error - dest, err = coalesceValues(ch, dest) - if err != nil { - return dest, err - } - return coalesceDeps(ch, dest) -} - -// coalesceDeps coalesces the dependencies of the given chart. -func coalesceDeps(chrt *chart.Chart, dest map[string]interface{}) (map[string]interface{}, error) { - for _, subchart := range chrt.Dependencies { - if c, ok := dest[subchart.Metadata.Name]; !ok { - // If dest doesn't already have the key, create it. - dest[subchart.Metadata.Name] = map[string]interface{}{} - } else if !istable(c) { - return dest, fmt.Errorf("type mismatch on %s: %t", subchart.Metadata.Name, c) - } - if dv, ok := dest[subchart.Metadata.Name]; ok { - dvmap := dv.(map[string]interface{}) - - // Get globals out of dest and merge them into dvmap. - dvmap = coalesceGlobals(dvmap, dest, chrt.Metadata.Name) - - var err error - // Now coalesce the rest of the values. - dest[subchart.Metadata.Name], err = coalesce(subchart, dvmap) - if err != nil { - return dest, err - } - } - } - return dest, nil -} - -// coalesceGlobals copies the globals out of src and merges them into dest. -// -// For convenience, returns dest. -func coalesceGlobals(dest, src map[string]interface{}, chartName string) map[string]interface{} { - var dg, sg map[string]interface{} - - if destglob, ok := dest[GlobalKey]; !ok { - dg = map[string]interface{}{} - } else if dg, ok = destglob.(map[string]interface{}); !ok { - log.Printf("Warning: Skipping globals for chart '%s' because destination '%s' is not a table.", chartName, GlobalKey) - return dg - } - - if srcglob, ok := src[GlobalKey]; !ok { - sg = map[string]interface{}{} - } else if sg, ok = srcglob.(map[string]interface{}); !ok { - log.Printf("Warning: skipping globals for chart '%s' because source '%s' is not a table.", chartName, GlobalKey) - return dg - } - - rv := make(map[string]interface{}) - for k, v := range dest { - rv[k] = v - } - - // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This - // reverses that decision. It may somehow be possible to introduce a loop - // here, but I haven't found a way. So for the time being, let's allow - // tables in globals. - - // Basically, we reverse order of coalesce here to merge - // top-down. - rv[GlobalKey] = coalesceTables(sg, dg, chartName) - return rv -} - -// coalesceValues builds up a values map for a particular chart. -// -// Values in v will override the values in the chart. -func coalesceValues(c *chart.Chart, v map[string]interface{}) (map[string]interface{}, error) { - // If there are no values in the chart, we just return the given values - if c.Values == nil || c.Values.Raw == "" { - return v, nil - } - - nv, err := ReadValues([]byte(c.Values.Raw)) - if err != nil { - // On error, we return just the overridden values. - // FIXME: We should log this error. It indicates that the YAML data - // did not parse. - return v, fmt.Errorf("Error: Reading chart '%s' default values (%s): %s", c.Metadata.Name, c.Values.Raw, err) - } - - return coalesceTables(v, nv.AsMap(), c.Metadata.Name), nil -} - -// coalesceTables merges a source map into a destination map. -// -// dest is considered authoritative. -func coalesceTables(dst, src map[string]interface{}, chartName string) map[string]interface{} { - // Because dest has higher precedence than src, dest values override src - // values. - - rv := make(map[string]interface{}) - for key, val := range src { - dv, ok := dst[key] - if !ok { // if not in dst, then copy from src - rv[key] = val - continue - } - if dv == nil { // if set to nil in dst, then ignore - // When the YAML value is null, we skip the value's key. - // This allows Helm's various sources of values (value files or --set) to - // remove incompatible keys from any previous chart, file, or set values. - continue - } - - srcTable, srcIsTable := val.(map[string]interface{}) - dstTable, dstIsTable := dv.(map[string]interface{}) - switch { - case srcIsTable && dstIsTable: // both tables, we coalesce - rv[key] = coalesceTables(dstTable, srcTable, chartName) - case srcIsTable && !dstIsTable: - log.Printf("Warning: Merging destination map for chart '%s'. Overwriting table item '%s', with non table value: %v", chartName, key, dv) - rv[key] = dv - case !srcIsTable && dstIsTable: - log.Printf("Warning: Merging destination map for chart '%s'. The destination item '%s' is a table and ignoring the source '%s' as it has a non-table value of: %v", chartName, key, key, val) - rv[key] = dv - default: // neither are tables, simply take the dst value - rv[key] = dv - } - } - - // do we have anything in dst that wasn't processed already that we need to copy across? - for key, val := range dst { - if val == nil { - continue - } - _, ok := rv[key] - if !ok { - rv[key] = val - } - } - - return rv -} - -// ReleaseOptions represents the additional release options needed -// for the composition of the final values struct -type ReleaseOptions struct { - Name string - Time *timestamp.Timestamp - Namespace string - IsUpgrade bool - IsInstall bool - Revision int -} - -// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files -// -// WARNING: This function is deprecated for Helm > 2.1.99 Use ToRenderValuesCaps() instead. It will -// remain in the codebase to stay SemVer compliant. -// -// In Helm 3.0, this will be changed to accept Capabilities as a fourth parameter. -func ToRenderValues(chrt *chart.Chart, chrtVals *chart.Config, options ReleaseOptions) (Values, error) { - caps := &Capabilities{APIVersions: DefaultVersionSet} - return ToRenderValuesCaps(chrt, chrtVals, options, caps) -} - -// ToRenderValuesCaps composes the struct from the data coming from the Releases, Charts and Values files -// -// This takes both ReleaseOptions and Capabilities to merge into the render values. -func ToRenderValuesCaps(chrt *chart.Chart, chrtVals *chart.Config, options ReleaseOptions, caps *Capabilities) (Values, error) { - - top := map[string]interface{}{ - "Release": map[string]interface{}{ - "Name": options.Name, - "Time": options.Time, - "Namespace": options.Namespace, - "IsUpgrade": options.IsUpgrade, - "IsInstall": options.IsInstall, - "Revision": options.Revision, - "Service": "Tiller", - }, - "Chart": chrt.Metadata, - "Files": NewFiles(chrt.Files), - "Capabilities": caps, - } - - vals, err := CoalesceValues(chrt, chrtVals) - if err != nil { - return top, err - } - - top["Values"] = vals - return top, nil -} - -// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. -func istable(v interface{}) bool { - _, ok := v.(map[string]interface{}) - return ok -} - -// PathValue takes a path that traverses a YAML structure and returns the value at the end of that path. -// The path starts at the root of the YAML structure and is comprised of YAML keys separated by periods. -// Given the following YAML data the value at path "chapter.one.title" is "Loomings". -// -// chapter: -// one: -// title: "Loomings" -func (v Values) PathValue(ypath string) (interface{}, error) { - if len(ypath) == 0 { - return nil, errors.New("YAML path string cannot be zero length") - } - yps := strings.Split(ypath, ".") - if len(yps) == 1 { - // if exists must be root key not table - vals := v.AsMap() - k := yps[0] - if _, ok := vals[k]; ok && !istable(vals[k]) { - // key found - return vals[yps[0]], nil - } - // key not found - return nil, ErrNoValue(fmt.Errorf("%v is not a value", k)) - } - // join all elements of YAML path except last to get string table path - ypsLen := len(yps) - table := yps[:ypsLen-1] - st := strings.Join(table, ".") - // get the last element as a string key - key := yps[ypsLen-1:] - sk := string(key[0]) - // get our table for table path - t, err := v.Table(st) - if err != nil { - //no table - return nil, ErrNoValue(fmt.Errorf("%v is not a value", sk)) - } - // check table for key and ensure value is not a table - if k, ok := t[sk]; ok && !istable(k) { - // key found - return k, nil - } - - // key not found - return nil, ErrNoValue(fmt.Errorf("key not found: %s", sk)) -} diff --git a/vendor/k8s.io/helm/pkg/engine/engine.go b/vendor/k8s.io/helm/pkg/engine/engine.go deleted file mode 100644 index 8c7d36112..000000000 --- a/vendor/k8s.io/helm/pkg/engine/engine.go +++ /dev/null @@ -1,388 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "bytes" - "fmt" - "log" - "path" - "sort" - "strings" - "text/template" - - "github.com/Masterminds/sprig" - "github.com/pkg/errors" - - "k8s.io/helm/pkg/chartutil" - "k8s.io/helm/pkg/proto/hapi/chart" -) - -const recursionMaxNums = 1000 - -// Engine is an implementation of 'cmd/tiller/environment'.Engine that uses Go templates. -type Engine struct { - // FuncMap contains the template functions that will be passed to each - // render call. This may only be modified before the first call to Render. - FuncMap template.FuncMap - // If strict is enabled, template rendering will fail if a template references - // a value that was not passed in. - Strict bool - // In LintMode, some 'required' template values may be missing, so don't fail - LintMode bool -} - -// New creates a new Go template Engine instance. -// -// The FuncMap is initialized here. You may modify the FuncMap _prior to_ the -// first invocation of Render. -// -// The FuncMap sets all of the Sprig functions except for those that provide -// access to the underlying OS (env, expandenv). -func New() *Engine { - f := FuncMap() - return &Engine{ - FuncMap: f, - } -} - -// FuncMap returns a mapping of all of the functions that Engine has. -// -// Because some functions are late-bound (e.g. contain context-sensitive -// data), the functions may not all perform identically outside of an -// Engine as they will inside of an Engine. -// -// Known late-bound functions: -// -// - "include": This is late-bound in Engine.Render(). The version -// included in the FuncMap is a placeholder. -// - "required": This is late-bound in Engine.Render(). The version -// included in the FuncMap is a placeholder. -// - "tpl": This is late-bound in Engine.Render(). The version -// included in the FuncMap is a placeholder. -func FuncMap() template.FuncMap { - f := sprig.TxtFuncMap() - delete(f, "env") - delete(f, "expandenv") - - // Add some extra functionality - extra := template.FuncMap{ - "toToml": chartutil.ToToml, - "toYaml": chartutil.ToYaml, - "fromYaml": chartutil.FromYaml, - "toJson": chartutil.ToJson, - "fromJson": chartutil.FromJson, - - // This is a placeholder for the "include" function, which is - // late-bound to a template. By declaring it here, we preserve the - // integrity of the linter. - "include": func(string, interface{}) string { return "not implemented" }, - "required": func(string, interface{}) interface{} { return "not implemented" }, - "tpl": func(string, interface{}) interface{} { return "not implemented" }, - } - - for k, v := range extra { - f[k] = v - } - - return f -} - -// Render takes a chart, optional values, and value overrides, and attempts to render the Go templates. -// -// Render can be called repeatedly on the same engine. -// -// This will look in the chart's 'templates' data (e.g. the 'templates/' directory) -// and attempt to render the templates there using the values passed in. -// -// Values are scoped to their templates. A dependency template will not have -// access to the values set for its parent. If chart "foo" includes chart "bar", -// "bar" will not have access to the values for "foo". -// -// Values should be prepared with something like `chartutils.ReadValues`. -// -// Values are passed through the templates according to scope. If the top layer -// chart includes the chart foo, which includes the chart bar, the values map -// will be examined for a table called "foo". If "foo" is found in vals, -// that section of the values will be passed into the "foo" chart. And if that -// section contains a value named "bar", that value will be passed on to the -// bar chart during render time. -func (e *Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { - // Render the charts - tmap := allTemplates(chrt, values) - return e.render(tmap) -} - -// renderable is an object that can be rendered. -type renderable struct { - // tpl is the current template. - tpl string - // vals are the values to be supplied to the template. - vals chartutil.Values - // basePath namespace prefix to the templates of the current chart - basePath string -} - -// alterFuncMap takes the Engine's FuncMap and adds context-specific functions. -// -// The resulting FuncMap is only valid for the passed-in template. -func (e *Engine) alterFuncMap(t *template.Template, referenceTpls map[string]renderable) template.FuncMap { - // Clone the func map because we are adding context-specific functions. - var funcMap template.FuncMap = map[string]interface{}{} - for k, v := range e.FuncMap { - funcMap[k] = v - } - - includedNames := make(map[string]int) - - // Add the 'include' function here so we can close over t. - funcMap["include"] = func(name string, data interface{}) (string, error) { - buf := bytes.NewBuffer(nil) - if v, ok := includedNames[name]; ok { - if v > recursionMaxNums { - return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name) - } - includedNames[name]++ - } else { - includedNames[name] = 1 - } - if err := t.ExecuteTemplate(buf, name, data); err != nil { - return "", err - } - includedNames[name]-- - return buf.String(), nil - } - - // Add the 'required' function here - funcMap["required"] = func(warn string, val interface{}) (interface{}, error) { - if val == nil { - if e.LintMode { - // Don't fail on missing required values when linting - log.Printf("[INFO] Missing required value: %s", warn) - return "", nil - } - // Convert nil to "" in case required is piped into other functions - return "", fmt.Errorf(warn) - } else if _, ok := val.(string); ok { - if val == "" { - if e.LintMode { - // Don't fail on missing required values when linting - log.Printf("[INFO] Missing required value: %s", warn) - return val, nil - } - return val, fmt.Errorf(warn) - } - } - return val, nil - } - - // Add the 'tpl' function here - funcMap["tpl"] = func(tpl string, vals chartutil.Values) (string, error) { - basePath, err := vals.PathValue("Template.BasePath") - if err != nil { - return "", fmt.Errorf("Cannot retrieve Template.Basepath from values inside tpl function: %s (%s)", tpl, err.Error()) - } - - r := renderable{ - tpl: tpl, - vals: vals, - basePath: basePath.(string), - } - - templates := map[string]renderable{} - templateName, err := vals.PathValue("Template.Name") - if err != nil { - return "", fmt.Errorf("Cannot retrieve Template.Name from values inside tpl function: %s (%s)", tpl, err.Error()) - } - - templates[templateName.(string)] = r - - result, err := e.renderWithReferences(templates, referenceTpls) - if err != nil { - return "", fmt.Errorf("Error during tpl function execution for %q: %s", tpl, err.Error()) - } - return result[templateName.(string)], nil - } - - return funcMap -} - -// render takes a map of templates/values and renders them. -func (e *Engine) render(tpls map[string]renderable) (rendered map[string]string, err error) { - return e.renderWithReferences(tpls, tpls) -} - -// renderWithReferences takes a map of templates/values to render, and a map of -// templates which can be referenced within them. -func (e *Engine) renderWithReferences(tpls map[string]renderable, referenceTpls map[string]renderable) (rendered map[string]string, err error) { - // Basically, what we do here is start with an empty parent template and then - // build up a list of templates -- one for each file. Once all of the templates - // have been parsed, we loop through again and execute every template. - // - // The idea with this process is to make it possible for more complex templates - // to share common blocks, but to make the entire thing feel like a file-based - // template engine. - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("rendering template failed: %v", r) - } - }() - t := template.New("gotpl") - if e.Strict { - t.Option("missingkey=error") - } else { - // Not that zero will attempt to add default values for types it knows, - // but will still emit for others. We mitigate that later. - t.Option("missingkey=zero") - } - - funcMap := e.alterFuncMap(t, referenceTpls) - - // We want to parse the templates in a predictable order. The order favors - // higher-level (in file system) templates over deeply nested templates. - keys := sortTemplates(tpls) - - files := []string{} - - for _, fname := range keys { - r := tpls[fname] - t = t.New(fname).Funcs(funcMap) - if _, err := t.Parse(r.tpl); err != nil { - return map[string]string{}, fmt.Errorf("parse error in %q: %s", fname, err) - } - files = append(files, fname) - } - - // Adding the reference templates to the template context - // so they can be referenced in the tpl function - for fname, r := range referenceTpls { - if t.Lookup(fname) == nil { - t = t.New(fname).Funcs(funcMap) - if _, err := t.Parse(r.tpl); err != nil { - return map[string]string{}, fmt.Errorf("parse error in %q: %s", fname, err) - } - } - } - - rendered = make(map[string]string, len(files)) - var buf bytes.Buffer - for _, file := range files { - // Don't render partials. We don't care about the direct output of partials. - // They are only included from other templates. - if strings.HasPrefix(path.Base(file), "_") { - continue - } - // At render time, add information about the template that is being rendered. - vals := tpls[file].vals - vals["Template"] = map[string]interface{}{"Name": file, "BasePath": tpls[file].basePath} - if err := t.ExecuteTemplate(&buf, file, vals); err != nil { - return map[string]string{}, fmt.Errorf("render error in %q: %s", file, err) - } - - // Work around the issue where Go will emit "" even if Options(missing=zero) - // is set. Since missing=error will never get here, we do not need to handle - // the Strict case. - rendered[file] = strings.Replace(buf.String(), "", "", -1) - buf.Reset() - } - - return rendered, nil -} - -func sortTemplates(tpls map[string]renderable) []string { - keys := make([]string, len(tpls)) - i := 0 - for key := range tpls { - keys[i] = key - i++ - } - sort.Sort(sort.Reverse(byPathLen(keys))) - return keys -} - -type byPathLen []string - -func (p byPathLen) Len() int { return len(p) } -func (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] } -func (p byPathLen) Less(i, j int) bool { - a, b := p[i], p[j] - ca, cb := strings.Count(a, "/"), strings.Count(b, "/") - if ca == cb { - return strings.Compare(a, b) == -1 - } - return ca < cb -} - -// allTemplates returns all templates for a chart and its dependencies. -// -// As it goes, it also prepares the values in a scope-sensitive manner. -func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable { - templates := map[string]renderable{} - recAllTpls(c, templates, vals, true, "") - return templates -} - -// recAllTpls recurses through the templates in a chart. -// -// As it recurses, it also sets the values to be appropriate for the template -// scope. -func recAllTpls(c *chart.Chart, templates map[string]renderable, parentVals chartutil.Values, top bool, parentID string) { - // This should never evaluate to a nil map. That will cause problems when - // values are appended later. - cvals := chartutil.Values{} - if top { - // If this is the top of the rendering tree, assume that parentVals - // is already resolved to the authoritative values. - cvals = parentVals - } else if c.Metadata != nil && c.Metadata.Name != "" { - // If there is a {{.Values.ThisChart}} in the parent metadata, - // copy that into the {{.Values}} for this template. - newVals := chartutil.Values{} - if vs, err := parentVals.Table("Values"); err == nil { - if tmp, err := vs.Table(c.Metadata.Name); err == nil { - newVals = tmp - } - } - - cvals = map[string]interface{}{ - "Values": newVals, - "Release": parentVals["Release"], - "Chart": c.Metadata, - "Files": chartutil.NewFiles(c.Files), - "Capabilities": parentVals["Capabilities"], - } - } - - newParentID := c.Metadata.Name - if parentID != "" { - // We artificially reconstruct the chart path to child templates. This - // creates a namespaced filename that can be used to track down the source - // of a particular template declaration. - newParentID = path.Join(parentID, "charts", newParentID) - } - - for _, child := range c.Dependencies { - recAllTpls(child, templates, cvals, false, newParentID) - } - for _, t := range c.Templates { - templates[path.Join(newParentID, t.Name)] = renderable{ - tpl: string(t.Data), - vals: cvals, - basePath: path.Join(newParentID, "templates"), - } - } -} diff --git a/vendor/k8s.io/helm/pkg/helm/client.go b/vendor/k8s.io/helm/pkg/helm/client.go deleted file mode 100644 index eb4f1167d..000000000 --- a/vendor/k8s.io/helm/pkg/helm/client.go +++ /dev/null @@ -1,565 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm // import "k8s.io/helm/pkg/helm" - -import ( - "fmt" - "io" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" - - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "k8s.io/helm/pkg/chartutil" - "k8s.io/helm/pkg/proto/hapi/chart" - rls "k8s.io/helm/pkg/proto/hapi/services" -) - -// maxMsgSize use 20MB as the default message size limit. -// grpc library default is 4MB -const maxMsgSize = 1024 * 1024 * 20 - -// Client manages client side of the Helm-Tiller protocol. -type Client struct { - opts options -} - -// NewClient creates a new client. -func NewClient(opts ...Option) *Client { - var c Client - // set some sane defaults - c.Option(ConnectTimeout(5)) - return c.Option(opts...) -} - -// Option configures the Helm client with the provided options. -func (h *Client) Option(opts ...Option) *Client { - for _, opt := range opts { - opt(&h.opts) - } - return h -} - -// ListReleases lists the current releases. -func (h *Client) ListReleases(opts ...ReleaseListOption) (*rls.ListReleasesResponse, error) { - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - req := &reqOpts.listReq - ctx := NewContext() - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - return h.list(ctx, req) -} - -// InstallRelease loads a chart from chstr, installs it, and returns the release response. -func (h *Client) InstallRelease(chstr, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - // load the chart to install - chart, err := chartutil.Load(chstr) - if err != nil { - return nil, err - } - - return h.InstallReleaseFromChart(chart, ns, opts...) -} - -// InstallReleaseWithContext loads a chart from chstr, installs it, and returns the release response while accepting a context. -func (h *Client) InstallReleaseWithContext(ctx context.Context, chstr, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - // load the chart to install - chart, err := chartutil.Load(chstr) - if err != nil { - return nil, err - } - - return h.installReleaseFromChartWithContext(ctx, chart, ns, opts...) -} - -// InstallReleaseFromChart installs a new chart and returns the release response. -func (h *Client) InstallReleaseFromChart(chart *chart.Chart, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - return h.installReleaseFromChartWithContext(NewContext(), chart, ns, opts...) -} - -// InstallReleaseFromChartWithContext installs a new chart and returns the release response while accepting a context. -func (h *Client) InstallReleaseFromChartWithContext(ctx context.Context, chart *chart.Chart, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - return h.installReleaseFromChartWithContext(ctx, chart, ns, opts...) -} - -// InstallReleaseFromChartWithContext installs a new chart and returns the release response while accepting a context. -func (h *Client) installReleaseFromChartWithContext(ctx context.Context, chart *chart.Chart, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - // apply the install options - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - req := &reqOpts.instReq - req.Chart = chart - req.Namespace = ns - req.DryRun = reqOpts.dryRun - req.DisableHooks = reqOpts.disableHooks - req.DisableCrdHook = reqOpts.disableCRDHook - req.ReuseName = reqOpts.reuseName - ctx = FromContext(ctx) - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - err := chartutil.ProcessRequirementsEnabled(req.Chart, req.Values) - if err != nil { - return nil, err - } - err = chartutil.ProcessRequirementsImportValues(req.Chart) - if err != nil { - return nil, err - } - - return h.install(ctx, req) -} - -// DeleteRelease uninstalls a named release and returns the response. -func (h *Client) DeleteRelease(rlsName string, opts ...DeleteOption) (*rls.UninstallReleaseResponse, error) { - // apply the uninstall options - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - - if reqOpts.dryRun { - // In the dry run case, just see if the release exists - r, err := h.ReleaseContent(rlsName) - if err != nil { - return &rls.UninstallReleaseResponse{}, err - } - return &rls.UninstallReleaseResponse{Release: r.Release}, nil - } - - req := &reqOpts.uninstallReq - req.Name = rlsName - req.DisableHooks = reqOpts.disableHooks - ctx := NewContext() - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - return h.delete(ctx, req) -} - -// UpdateRelease loads a chart from chstr and updates a release to a new/different chart. -func (h *Client) UpdateRelease(rlsName string, chstr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - // load the chart to update - chart, err := chartutil.Load(chstr) - if err != nil { - return nil, err - } - - return h.UpdateReleaseFromChart(rlsName, chart, opts...) -} - -// UpdateReleaseWithContext loads a chart from chstr and updates a release to a new/different chart while accepting a context. -func (h *Client) UpdateReleaseWithContext(ctx context.Context, rlsName string, chstr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - // load the chart to update - chart, err := chartutil.Load(chstr) - if err != nil { - return nil, err - } - - return h.updateReleaseFromChartWithContext(ctx, rlsName, chart, opts...) -} - -// UpdateReleaseFromChart updates a release to a new/different chart. -func (h *Client) UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - return h.updateReleaseFromChartWithContext(NewContext(), rlsName, chart, opts...) -} - -// UpdateReleaseFromChartWithContext updates a release to a new/different chart while accepting a context. -func (h *Client) UpdateReleaseFromChartWithContext(ctx context.Context, rlsName string, chart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - return h.updateReleaseFromChartWithContext(ctx, rlsName, chart, opts...) -} - -// updateReleaseFromChartWithContext updates a release to a new/different chart and accepts a context. -func (h *Client) updateReleaseFromChartWithContext(ctx context.Context, rlsName string, chart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - // apply the update options - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - req := &reqOpts.updateReq - req.Chart = chart - req.DryRun = reqOpts.dryRun - req.Name = rlsName - req.DisableHooks = reqOpts.disableHooks - req.Recreate = reqOpts.recreate - req.Force = reqOpts.force - req.ResetValues = reqOpts.resetValues - req.ReuseValues = reqOpts.reuseValues - ctx = FromContext(ctx) - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - err := chartutil.ProcessRequirementsEnabled(req.Chart, req.Values) - if err != nil { - return nil, err - } - err = chartutil.ProcessRequirementsImportValues(req.Chart) - if err != nil { - return nil, err - } - - return h.update(ctx, req) -} - -// GetVersion returns the server version. -func (h *Client) GetVersion(opts ...VersionOption) (*rls.GetVersionResponse, error) { - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - req := &rls.GetVersionRequest{} - ctx := NewContext() - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - return h.version(ctx, req) -} - -// RollbackRelease rolls back a release to the previous version. -func (h *Client) RollbackRelease(rlsName string, opts ...RollbackOption) (*rls.RollbackReleaseResponse, error) { - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - req := &reqOpts.rollbackReq - req.Recreate = reqOpts.recreate - req.Force = reqOpts.force - req.DisableHooks = reqOpts.disableHooks - req.DryRun = reqOpts.dryRun - req.Name = rlsName - ctx := NewContext() - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - return h.rollback(ctx, req) -} - -// ReleaseStatus returns the given release's status. -func (h *Client) ReleaseStatus(rlsName string, opts ...StatusOption) (*rls.GetReleaseStatusResponse, error) { - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - req := &reqOpts.statusReq - req.Name = rlsName - ctx := NewContext() - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - return h.status(ctx, req) -} - -// ReleaseContent returns the configuration for a given release. -func (h *Client) ReleaseContent(rlsName string, opts ...ContentOption) (*rls.GetReleaseContentResponse, error) { - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - req := &reqOpts.contentReq - req.Name = rlsName - ctx := NewContext() - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - return h.content(ctx, req) -} - -// ReleaseHistory returns a release's revision history. -func (h *Client) ReleaseHistory(rlsName string, opts ...HistoryOption) (*rls.GetHistoryResponse, error) { - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - - req := &reqOpts.histReq - req.Name = rlsName - ctx := NewContext() - - if reqOpts.before != nil { - if err := reqOpts.before(ctx, req); err != nil { - return nil, err - } - } - return h.history(ctx, req) -} - -// RunReleaseTest executes a pre-defined test on a release. -func (h *Client) RunReleaseTest(rlsName string, opts ...ReleaseTestOption) (<-chan *rls.TestReleaseResponse, <-chan error) { - reqOpts := h.opts - for _, opt := range opts { - opt(&reqOpts) - } - - req := &reqOpts.testReq - req.Name = rlsName - ctx := NewContext() - - return h.test(ctx, req) -} - -// PingTiller pings the Tiller pod and ensures that it is up and running -func (h *Client) PingTiller() error { - ctx := NewContext() - return h.ping(ctx) -} - -// connect returns a gRPC connection to Tiller or error. The gRPC dial options -// are constructed here. -func (h *Client) connect(ctx context.Context) (conn *grpc.ClientConn, err error) { - opts := []grpc.DialOption{ - grpc.WithBlock(), - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - // Send keepalive every 30 seconds to prevent the connection from - // getting closed by upstreams - Time: time.Duration(30) * time.Second, - }), - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), - } - switch { - case h.opts.useTLS: - opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(h.opts.tlsConfig))) - default: - opts = append(opts, grpc.WithInsecure()) - } - ctx, cancel := context.WithTimeout(ctx, h.opts.connectTimeout) - defer cancel() - if conn, err = grpc.DialContext(ctx, h.opts.host, opts...); err != nil { - return nil, err - } - return conn, nil -} - -// list executes tiller.ListReleases RPC. -func (h *Client) list(ctx context.Context, req *rls.ListReleasesRequest) (*rls.ListReleasesResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - s, err := rlc.ListReleases(ctx, req) - if err != nil { - return nil, err - } - var resp *rls.ListReleasesResponse - for { - r, err := s.Recv() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - if resp == nil { - resp = r - continue - } - resp.Releases = append(resp.Releases, r.GetReleases()...) - } - return resp, nil -} - -// install executes tiller.InstallRelease RPC. -func (h *Client) install(ctx context.Context, req *rls.InstallReleaseRequest) (*rls.InstallReleaseResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - return rlc.InstallRelease(ctx, req) -} - -// delete executes tiller.UninstallRelease RPC. -func (h *Client) delete(ctx context.Context, req *rls.UninstallReleaseRequest) (*rls.UninstallReleaseResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - return rlc.UninstallRelease(ctx, req) -} - -// update executes tiller.UpdateRelease RPC. -func (h *Client) update(ctx context.Context, req *rls.UpdateReleaseRequest) (*rls.UpdateReleaseResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - return rlc.UpdateRelease(ctx, req) -} - -// rollback executes tiller.RollbackRelease RPC. -func (h *Client) rollback(ctx context.Context, req *rls.RollbackReleaseRequest) (*rls.RollbackReleaseResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - return rlc.RollbackRelease(ctx, req) -} - -// status executes tiller.GetReleaseStatus RPC. -func (h *Client) status(ctx context.Context, req *rls.GetReleaseStatusRequest) (*rls.GetReleaseStatusResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - return rlc.GetReleaseStatus(ctx, req) -} - -// content executes tiller.GetReleaseContent RPC. -func (h *Client) content(ctx context.Context, req *rls.GetReleaseContentRequest) (*rls.GetReleaseContentResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - return rlc.GetReleaseContent(ctx, req) -} - -// version executes tiller.GetVersion RPC. -func (h *Client) version(ctx context.Context, req *rls.GetVersionRequest) (*rls.GetVersionResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - return rlc.GetVersion(ctx, req) -} - -// history executes tiller.GetHistory RPC. -func (h *Client) history(ctx context.Context, req *rls.GetHistoryRequest) (*rls.GetHistoryResponse, error) { - c, err := h.connect(ctx) - if err != nil { - return nil, err - } - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - return rlc.GetHistory(ctx, req) -} - -// test executes tiller.TestRelease RPC. -func (h *Client) test(ctx context.Context, req *rls.TestReleaseRequest) (<-chan *rls.TestReleaseResponse, <-chan error) { - errc := make(chan error, 1) - c, err := h.connect(ctx) - if err != nil { - errc <- err - return nil, errc - } - - ch := make(chan *rls.TestReleaseResponse, 1) - go func() { - defer close(errc) - defer close(ch) - defer c.Close() - - rlc := rls.NewReleaseServiceClient(c) - s, err := rlc.RunReleaseTest(ctx, req) - if err != nil { - errc <- err - return - } - - for { - msg, err := s.Recv() - if err == io.EOF { - return - } - if err != nil { - errc <- err - return - } - ch <- msg - } - }() - - return ch, errc -} - -// ping executes tiller.Ping RPC. -func (h *Client) ping(ctx context.Context) error { - c, err := h.connect(ctx) - if err != nil { - return err - } - defer c.Close() - - healthClient := healthpb.NewHealthClient(c) - resp, err := healthClient.Check(ctx, &healthpb.HealthCheckRequest{Service: "Tiller"}) - if err != nil { - return err - } - switch resp.GetStatus() { - case healthpb.HealthCheckResponse_SERVING: - return nil - case healthpb.HealthCheckResponse_NOT_SERVING: - return fmt.Errorf("tiller is not serving requests at this time, Please try again later") - default: - return fmt.Errorf("tiller healthcheck returned an unknown status") - } -} diff --git a/vendor/k8s.io/helm/pkg/helm/fake.go b/vendor/k8s.io/helm/pkg/helm/fake.go deleted file mode 100644 index 3b012c00c..000000000 --- a/vendor/k8s.io/helm/pkg/helm/fake.go +++ /dev/null @@ -1,437 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm // import "k8s.io/helm/pkg/helm" - -import ( - "bytes" - "errors" - "fmt" - "math/rand" - "strings" - "sync" - - "github.com/golang/protobuf/ptypes/timestamp" - "golang.org/x/net/context" - "k8s.io/helm/pkg/chartutil" - "k8s.io/helm/pkg/manifest" - "k8s.io/helm/pkg/proto/hapi/chart" - "k8s.io/helm/pkg/proto/hapi/release" - rls "k8s.io/helm/pkg/proto/hapi/services" - "k8s.io/helm/pkg/proto/hapi/version" - "k8s.io/helm/pkg/renderutil" - storageerrors "k8s.io/helm/pkg/storage/errors" -) - -// FakeClient implements Interface -type FakeClient struct { - Rels []*release.Release - Responses map[string]release.TestRun_Status - Opts options - RenderManifests bool -} - -// Option returns the fake release client -func (c *FakeClient) Option(opts ...Option) Interface { - for _, opt := range opts { - opt(&c.Opts) - } - return c -} - -var _ Interface = &FakeClient{} -var _ Interface = (*FakeClient)(nil) - -// ListReleases lists the current releases -func (c *FakeClient) ListReleases(opts ...ReleaseListOption) (*rls.ListReleasesResponse, error) { - reqOpts := c.Opts - for _, opt := range opts { - opt(&reqOpts) - } - req := &reqOpts.listReq - rels := c.Rels - count := int64(len(c.Rels)) - var next string - limit := req.GetLimit() - // TODO: Handle all other options. - if limit != 0 && limit < count { - rels = rels[:limit] - count = limit - next = c.Rels[limit].GetName() - } - - resp := &rls.ListReleasesResponse{ - Count: count, - Releases: rels, - } - if next != "" { - resp.Next = next - } - return resp, nil -} - -// InstallRelease creates a new release and returns a InstallReleaseResponse containing that release -func (c *FakeClient) InstallRelease(chStr, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - chart := &chart.Chart{} - return c.InstallReleaseFromChart(chart, ns, opts...) -} - -// InstallReleaseWithContext creates a new release and returns a InstallReleaseResponse containing that release and accepts a context -func (c *FakeClient) InstallReleaseWithContext(ctx context.Context, chStr, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - return c.InstallRelease(chStr, ns, opts...) -} - -// InstallReleaseFromChartWithContext adds a new MockRelease to the fake client and returns a InstallReleaseResponse containing that release and accepts a context -func (c *FakeClient) InstallReleaseFromChartWithContext(ctx context.Context, chart *chart.Chart, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - return c.InstallReleaseFromChart(chart, ns, opts...) -} - -// InstallReleaseFromChart adds a new MockRelease to the fake client and returns a InstallReleaseResponse containing that release -func (c *FakeClient) InstallReleaseFromChart(chart *chart.Chart, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { - for _, opt := range opts { - opt(&c.Opts) - } - - releaseName := c.Opts.instReq.Name - releaseDescription := c.Opts.instReq.Description - - // Check to see if the release already exists. - rel, err := c.ReleaseStatus(releaseName, nil) - if err == nil && rel != nil { - return nil, errors.New("cannot re-use a name that is still in use") - } - - mockOpts := &MockReleaseOptions{ - Name: releaseName, - Chart: chart, - Config: c.Opts.instReq.Values, - Namespace: ns, - Description: releaseDescription, - } - - release := ReleaseMock(mockOpts) - - if c.RenderManifests { - if err := RenderReleaseMock(release, false); err != nil { - return nil, err - } - } - - if !c.Opts.dryRun { - c.Rels = append(c.Rels, release) - } - - return &rls.InstallReleaseResponse{ - Release: release, - }, nil -} - -// DeleteRelease deletes a release from the FakeClient -func (c *FakeClient) DeleteRelease(rlsName string, opts ...DeleteOption) (*rls.UninstallReleaseResponse, error) { - for i, rel := range c.Rels { - if rel.Name == rlsName { - c.Rels = append(c.Rels[:i], c.Rels[i+1:]...) - return &rls.UninstallReleaseResponse{ - Release: rel, - }, nil - } - } - - return nil, storageerrors.ErrReleaseNotFound(rlsName) -} - -// GetVersion returns a fake version -func (c *FakeClient) GetVersion(opts ...VersionOption) (*rls.GetVersionResponse, error) { - return &rls.GetVersionResponse{ - Version: &version.Version{ - SemVer: "1.2.3-fakeclient+testonly", - }, - }, nil -} - -// UpdateRelease returns an UpdateReleaseResponse containing the updated release, if it exists -func (c *FakeClient) UpdateRelease(rlsName string, chStr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - return c.UpdateReleaseFromChart(rlsName, &chart.Chart{}, opts...) -} - -// UpdateReleaseWithContext returns an UpdateReleaseResponse containing the updated release, if it exists and accepts a context -func (c *FakeClient) UpdateReleaseWithContext(ctx context.Context, rlsName string, chStr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - return c.UpdateRelease(rlsName, chStr, opts...) -} - -// UpdateReleaseFromChartWithContext returns an UpdateReleaseResponse containing the updated release, if it exists and accepts a context -func (c *FakeClient) UpdateReleaseFromChartWithContext(ctx context.Context, rlsName string, newChart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - return c.UpdateReleaseFromChart(rlsName, newChart, opts...) -} - -// UpdateReleaseFromChart returns an UpdateReleaseResponse containing the updated release, if it exists -func (c *FakeClient) UpdateReleaseFromChart(rlsName string, newChart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { - for _, opt := range opts { - opt(&c.Opts) - } - // Check to see if the release already exists. - rel, err := c.ReleaseContent(rlsName, nil) - if err != nil { - return nil, err - } - - mockOpts := &MockReleaseOptions{ - Name: rel.Release.Name, - Version: rel.Release.Version + 1, - Chart: newChart, - Config: c.Opts.updateReq.Values, - Namespace: rel.Release.Namespace, - Description: c.Opts.updateReq.Description, - } - - newRelease := ReleaseMock(mockOpts) - - if c.Opts.updateReq.ResetValues { - newRelease.Config = &chart.Config{Raw: "{}"} - } else if c.Opts.updateReq.ReuseValues { - // TODO: This should merge old and new values but does not. - } - - if c.RenderManifests { - if err := RenderReleaseMock(newRelease, true); err != nil { - return nil, err - } - } - - if !c.Opts.dryRun { - *rel.Release = *newRelease - } - - return &rls.UpdateReleaseResponse{Release: newRelease}, nil -} - -// RollbackRelease returns nil, nil -func (c *FakeClient) RollbackRelease(rlsName string, opts ...RollbackOption) (*rls.RollbackReleaseResponse, error) { - return nil, nil -} - -// ReleaseStatus returns a release status response with info from the matching release name. -func (c *FakeClient) ReleaseStatus(rlsName string, opts ...StatusOption) (*rls.GetReleaseStatusResponse, error) { - for _, rel := range c.Rels { - if rel.Name == rlsName { - return &rls.GetReleaseStatusResponse{ - Name: rel.Name, - Info: rel.Info, - Namespace: rel.Namespace, - }, nil - } - } - return nil, storageerrors.ErrReleaseNotFound(rlsName) -} - -// ReleaseContent returns the configuration for the matching release name in the fake release client. -func (c *FakeClient) ReleaseContent(rlsName string, opts ...ContentOption) (resp *rls.GetReleaseContentResponse, err error) { - for _, rel := range c.Rels { - if rel.Name == rlsName { - return &rls.GetReleaseContentResponse{ - Release: rel, - }, nil - } - } - return resp, storageerrors.ErrReleaseNotFound(rlsName) -} - -// ReleaseHistory returns a release's revision history. -func (c *FakeClient) ReleaseHistory(rlsName string, opts ...HistoryOption) (*rls.GetHistoryResponse, error) { - reqOpts := c.Opts - for _, opt := range opts { - opt(&reqOpts) - } - maxLen := int(reqOpts.histReq.Max) - - var resp rls.GetHistoryResponse - for _, rel := range c.Rels { - if maxLen > 0 && len(resp.Releases) >= maxLen { - return &resp, nil - } - if rel.Name == rlsName { - resp.Releases = append(resp.Releases, rel) - } - } - return &resp, nil -} - -// RunReleaseTest executes a pre-defined tests on a release -func (c *FakeClient) RunReleaseTest(rlsName string, opts ...ReleaseTestOption) (<-chan *rls.TestReleaseResponse, <-chan error) { - - results := make(chan *rls.TestReleaseResponse) - errc := make(chan error, 1) - - go func() { - var wg sync.WaitGroup - for m, s := range c.Responses { - wg.Add(1) - - go func(msg string, status release.TestRun_Status) { - defer wg.Done() - results <- &rls.TestReleaseResponse{Msg: msg, Status: status} - }(m, s) - } - - wg.Wait() - close(results) - close(errc) - }() - - return results, errc -} - -// PingTiller pings the Tiller pod and ensures that it is up and running -func (c *FakeClient) PingTiller() error { - return nil -} - -// MockHookTemplate is the hook template used for all mock release objects. -var MockHookTemplate = `apiVersion: v1 -kind: Job -metadata: - annotations: - "helm.sh/hook": pre-install -` - -// MockManifest is the manifest used for all mock release objects. -var MockManifest = `apiVersion: v1 -kind: Secret -metadata: - name: fixture -` - -// MockReleaseOptions allows for user-configurable options on mock release objects. -type MockReleaseOptions struct { - Name string - Version int32 - Chart *chart.Chart - Config *chart.Config - StatusCode release.Status_Code - Namespace string - Description string -} - -// ReleaseMock creates a mock release object based on options set by -// MockReleaseOptions. This function should typically not be used outside of -// testing. -func ReleaseMock(opts *MockReleaseOptions) *release.Release { - date := timestamp.Timestamp{Seconds: 242085845, Nanos: 0} - - name := opts.Name - if name == "" { - name = fmt.Sprintf("testrelease-%d", rand.Intn(100)) - } - - var version int32 = 1 - if opts.Version != 0 { - version = opts.Version - } - - namespace := opts.Namespace - if namespace == "" { - namespace = "default" - } - - description := opts.Description - if description == "" { - description = "Release mock" - } - - ch := opts.Chart - if opts.Chart == nil { - ch = &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "foo", - Version: "0.1.0-beta.1", - }, - Templates: []*chart.Template{ - {Name: "templates/foo.tpl", Data: []byte(MockManifest)}, - }, - } - } - - config := opts.Config - if config == nil { - config = &chart.Config{Raw: `name: "value"`} - } - - scode := release.Status_DEPLOYED - if opts.StatusCode > 0 { - scode = opts.StatusCode - } - - return &release.Release{ - Name: name, - Info: &release.Info{ - FirstDeployed: &date, - LastDeployed: &date, - Status: &release.Status{Code: scode}, - Description: description, - }, - Chart: ch, - Config: config, - Version: version, - Namespace: namespace, - Hooks: []*release.Hook{ - { - Name: "pre-install-hook", - Kind: "Job", - Path: "pre-install-hook.yaml", - Manifest: MockHookTemplate, - LastRun: &date, - Events: []release.Hook_Event{release.Hook_PRE_INSTALL}, - }, - }, - Manifest: MockManifest, - } -} - -// RenderReleaseMock will take a release (usually produced by helm.ReleaseMock) -// and will render the Manifest inside using the local mechanism (no tiller). -// (Compare to renderResources in pkg/tiller) -func RenderReleaseMock(r *release.Release, asUpgrade bool) error { - if r == nil || r.Chart == nil || r.Chart.Metadata == nil { - return errors.New("a release with a chart with metadata must be provided to render the manifests") - } - - renderOpts := renderutil.Options{ - ReleaseOptions: chartutil.ReleaseOptions{ - Name: r.Name, - Namespace: r.Namespace, - Time: r.Info.LastDeployed, - Revision: int(r.Version), - IsUpgrade: asUpgrade, - IsInstall: !asUpgrade, - }, - } - rendered, err := renderutil.Render(r.Chart, r.Config, renderOpts) - if err != nil { - return err - } - - b := bytes.NewBuffer(nil) - for _, m := range manifest.SplitManifests(rendered) { - // Remove empty manifests - if len(strings.TrimSpace(m.Content)) == 0 { - continue - } - b.WriteString("\n---\n# Source: " + m.Name + "\n") - b.WriteString(m.Content) - } - r.Manifest = b.String() - return nil -} diff --git a/vendor/k8s.io/helm/pkg/helm/interface.go b/vendor/k8s.io/helm/pkg/helm/interface.go deleted file mode 100644 index 59b1103db..000000000 --- a/vendor/k8s.io/helm/pkg/helm/interface.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "golang.org/x/net/context" - "k8s.io/helm/pkg/proto/hapi/chart" - rls "k8s.io/helm/pkg/proto/hapi/services" -) - -// Interface for helm client for mocking in tests -type Interface interface { - ListReleases(opts ...ReleaseListOption) (*rls.ListReleasesResponse, error) - InstallRelease(chStr, namespace string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) - InstallReleaseWithContext(ctx context.Context, chStr, namespace string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) - InstallReleaseFromChart(chart *chart.Chart, namespace string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) - InstallReleaseFromChartWithContext(ctx context.Context, chart *chart.Chart, namespace string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) - DeleteRelease(rlsName string, opts ...DeleteOption) (*rls.UninstallReleaseResponse, error) - ReleaseStatus(rlsName string, opts ...StatusOption) (*rls.GetReleaseStatusResponse, error) - UpdateRelease(rlsName, chStr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) - UpdateReleaseWithContext(ctx context.Context, rlsName, chStr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) - UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) - UpdateReleaseFromChartWithContext(ctx context.Context, rlsName string, chart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) - RollbackRelease(rlsName string, opts ...RollbackOption) (*rls.RollbackReleaseResponse, error) - ReleaseContent(rlsName string, opts ...ContentOption) (*rls.GetReleaseContentResponse, error) - ReleaseHistory(rlsName string, opts ...HistoryOption) (*rls.GetHistoryResponse, error) - GetVersion(opts ...VersionOption) (*rls.GetVersionResponse, error) - RunReleaseTest(rlsName string, opts ...ReleaseTestOption) (<-chan *rls.TestReleaseResponse, <-chan error) - PingTiller() error -} diff --git a/vendor/k8s.io/helm/pkg/helm/option.go b/vendor/k8s.io/helm/pkg/helm/option.go deleted file mode 100644 index 1eb0f133c..000000000 --- a/vendor/k8s.io/helm/pkg/helm/option.go +++ /dev/null @@ -1,535 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helm - -import ( - "crypto/tls" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" - - cpb "k8s.io/helm/pkg/proto/hapi/chart" - "k8s.io/helm/pkg/proto/hapi/release" - rls "k8s.io/helm/pkg/proto/hapi/services" - "k8s.io/helm/pkg/version" -) - -// Option allows specifying various settings configurable by -// the helm client user for overriding the defaults used when -// issuing rpc's to the Tiller release server. -type Option func(*options) - -// options specify optional settings used by the helm client. -type options struct { - // value of helm home override - host string - // if set dry-run helm client calls - dryRun bool - // if set enable TLS on helm client calls - useTLS bool - // if set, re-use an existing name - reuseName bool - // if set, performs pod restart during upgrade/rollback - recreate bool - // if set, force resource update through delete/recreate if needed - force bool - // if set, skip running hooks - disableHooks bool - // if set, skip CRD hook only - disableCRDHook bool - // name of release - releaseName string - // tls.Config to use for rpc if tls enabled - tlsConfig *tls.Config - // release list options are applied directly to the list releases request - listReq rls.ListReleasesRequest - // release install options are applied directly to the install release request - instReq rls.InstallReleaseRequest - // release update options are applied directly to the update release request - updateReq rls.UpdateReleaseRequest - // release uninstall options are applied directly to the uninstall release request - uninstallReq rls.UninstallReleaseRequest - // release get status options are applied directly to the get release status request - statusReq rls.GetReleaseStatusRequest - // release get content options are applied directly to the get release content request - contentReq rls.GetReleaseContentRequest - // release rollback options are applied directly to the rollback release request - rollbackReq rls.RollbackReleaseRequest - // before intercepts client calls before sending - before func(context.Context, proto.Message) error - // release history options are applied directly to the get release history request - histReq rls.GetHistoryRequest - // resetValues instructs Tiller to reset values to their defaults. - resetValues bool - // reuseValues instructs Tiller to reuse the values from the last release. - reuseValues bool - // release test options are applied directly to the test release history request - testReq rls.TestReleaseRequest - // connectTimeout specifies the time duration Helm will wait to establish a connection to tiller - connectTimeout time.Duration -} - -// Host specifies the host address of the Tiller release server, (default = ":44134"). -func Host(host string) Option { - return func(opts *options) { - opts.host = host - } -} - -// WithTLS specifies the tls configuration if the helm client is enabled to use TLS. -func WithTLS(cfg *tls.Config) Option { - return func(opts *options) { - opts.useTLS = true - opts.tlsConfig = cfg - } -} - -// BeforeCall returns an option that allows intercepting a helm client rpc -// before being sent OTA to tiller. The intercepting function should return -// an error to indicate that the call should not proceed or nil otherwise. -func BeforeCall(fn func(context.Context, proto.Message) error) Option { - return func(opts *options) { - opts.before = fn - } -} - -// ReleaseListOption allows specifying various settings -// configurable by the helm client user for overriding -// the defaults used when running the `helm list` command. -type ReleaseListOption func(*options) - -// ReleaseListOffset specifies the offset into a list of releases. -func ReleaseListOffset(offset string) ReleaseListOption { - return func(opts *options) { - opts.listReq.Offset = offset - } -} - -// ReleaseListFilter specifies a filter to apply a list of releases. -func ReleaseListFilter(filter string) ReleaseListOption { - return func(opts *options) { - opts.listReq.Filter = filter - } -} - -// ReleaseListLimit set an upper bound on the number of releases returned. -func ReleaseListLimit(limit int) ReleaseListOption { - return func(opts *options) { - opts.listReq.Limit = int64(limit) - } -} - -// ReleaseListOrder specifies how to order a list of releases. -func ReleaseListOrder(order int32) ReleaseListOption { - return func(opts *options) { - opts.listReq.SortOrder = rls.ListSort_SortOrder(order) - } -} - -// ReleaseListSort specifies how to sort a release list. -func ReleaseListSort(sort int32) ReleaseListOption { - return func(opts *options) { - opts.listReq.SortBy = rls.ListSort_SortBy(sort) - } -} - -// ReleaseListStatuses specifies which status codes should be returned. -func ReleaseListStatuses(statuses []release.Status_Code) ReleaseListOption { - return func(opts *options) { - if len(statuses) == 0 { - statuses = []release.Status_Code{release.Status_DEPLOYED} - } - opts.listReq.StatusCodes = statuses - } -} - -// ReleaseListNamespace specifies the namespace to list releases from -func ReleaseListNamespace(namespace string) ReleaseListOption { - return func(opts *options) { - opts.listReq.Namespace = namespace - } -} - -// InstallOption allows specifying various settings -// configurable by the helm client user for overriding -// the defaults used when running the `helm install` command. -type InstallOption func(*options) - -// ValueOverrides specifies a list of values to include when installing. -func ValueOverrides(raw []byte) InstallOption { - return func(opts *options) { - opts.instReq.Values = &cpb.Config{Raw: string(raw)} - } -} - -// ReleaseName specifies the name of the release when installing. -func ReleaseName(name string) InstallOption { - return func(opts *options) { - opts.instReq.Name = name - } -} - -// ConnectTimeout specifies the duration (in seconds) Helm will wait to establish a connection to tiller -func ConnectTimeout(timeout int64) Option { - return func(opts *options) { - opts.connectTimeout = time.Duration(timeout) * time.Second - } -} - -// InstallTimeout specifies the number of seconds before kubernetes calls timeout -func InstallTimeout(timeout int64) InstallOption { - return func(opts *options) { - opts.instReq.Timeout = timeout - } -} - -// UpgradeTimeout specifies the number of seconds before kubernetes calls timeout -func UpgradeTimeout(timeout int64) UpdateOption { - return func(opts *options) { - opts.updateReq.Timeout = timeout - } -} - -// DeleteTimeout specifies the number of seconds before kubernetes calls timeout -func DeleteTimeout(timeout int64) DeleteOption { - return func(opts *options) { - opts.uninstallReq.Timeout = timeout - } -} - -// ReleaseTestTimeout specifies the number of seconds before kubernetes calls timeout -func ReleaseTestTimeout(timeout int64) ReleaseTestOption { - return func(opts *options) { - opts.testReq.Timeout = timeout - } -} - -// ReleaseTestCleanup is a boolean value representing whether to cleanup test pods -func ReleaseTestCleanup(cleanup bool) ReleaseTestOption { - return func(opts *options) { - opts.testReq.Cleanup = cleanup - } -} - -// ReleaseTestParallel is a boolean value representing whether to run test pods in parallel -func ReleaseTestParallel(parallel bool) ReleaseTestOption { - return func(opts *options) { - opts.testReq.Parallel = parallel - } -} - -// ReleaseTestMaxParallel specifies the maximum number of test pods to run in parallel -func ReleaseTestMaxParallel(max uint32) ReleaseTestOption { - return func(opts *options) { - opts.testReq.MaxParallel = max - } -} - -// ReleaseTestLogs is a boolean value representing whether to dump the logs from test pods -func ReleaseTestLogs(logs bool) ReleaseTestOption { - return func(opts *options) { - opts.testReq.Logs = logs - } -} - -// RollbackTimeout specifies the number of seconds before kubernetes calls timeout -func RollbackTimeout(timeout int64) RollbackOption { - return func(opts *options) { - opts.rollbackReq.Timeout = timeout - } -} - -// InstallWait specifies whether or not to wait for all resources to be ready -func InstallWait(wait bool) InstallOption { - return func(opts *options) { - opts.instReq.Wait = wait - } -} - -// UpgradeWait specifies whether or not to wait for all resources to be ready -func UpgradeWait(wait bool) UpdateOption { - return func(opts *options) { - opts.updateReq.Wait = wait - } -} - -// RollbackWait specifies whether or not to wait for all resources to be ready -func RollbackWait(wait bool) RollbackOption { - return func(opts *options) { - opts.rollbackReq.Wait = wait - } -} - -// UpdateValueOverrides specifies a list of values to include when upgrading -func UpdateValueOverrides(raw []byte) UpdateOption { - return func(opts *options) { - opts.updateReq.Values = &cpb.Config{Raw: string(raw)} - } -} - -// InstallDescription specifies the description for the release -func InstallDescription(description string) InstallOption { - return func(opts *options) { - opts.instReq.Description = description - } -} - -// UpgradeDescription specifies the description for the update -func UpgradeDescription(description string) UpdateOption { - return func(opts *options) { - opts.updateReq.Description = description - } -} - -// RollbackDescription specifies the description for the release -func RollbackDescription(description string) RollbackOption { - return func(opts *options) { - opts.rollbackReq.Description = description - } -} - -// DeleteDescription specifies the description for the release -func DeleteDescription(description string) DeleteOption { - return func(opts *options) { - opts.uninstallReq.Description = description - } -} - -// UpgradeCleanupOnFail allows deletion of new resources created in this upgrade when upgrade failed -func UpgradeCleanupOnFail(cleanupOnFail bool) UpdateOption { - return func(opts *options) { - opts.updateReq.CleanupOnFail = cleanupOnFail - } -} - -// RollbackCleanupOnFail allows deletion of new resources created in this rollback when rollback failed -func RollbackCleanupOnFail(cleanupOnFail bool) RollbackOption { - return func(opts *options) { - opts.rollbackReq.CleanupOnFail = cleanupOnFail - } -} - -// DeleteDisableHooks will disable hooks for a deletion operation. -func DeleteDisableHooks(disable bool) DeleteOption { - return func(opts *options) { - opts.disableHooks = disable - } -} - -// DeleteDryRun will (if true) execute a deletion as a dry run. -func DeleteDryRun(dry bool) DeleteOption { - return func(opts *options) { - opts.dryRun = dry - } -} - -// DeletePurge removes the release from the store and make its name free for later use. -func DeletePurge(purge bool) DeleteOption { - return func(opts *options) { - opts.uninstallReq.Purge = purge - } -} - -// InstallDryRun will (if true) execute an installation as a dry run. -func InstallDryRun(dry bool) InstallOption { - return func(opts *options) { - opts.dryRun = dry - } -} - -// InstallDisableHooks disables hooks during installation. -func InstallDisableHooks(disable bool) InstallOption { - return func(opts *options) { - opts.disableHooks = disable - } -} - -// InstallDisableCRDHook disables CRD hook during installation. -func InstallDisableCRDHook(disable bool) InstallOption { - return func(opts *options) { - opts.disableCRDHook = disable - } -} - -// InstallReuseName will (if true) instruct Tiller to re-use an existing name. -func InstallReuseName(reuse bool) InstallOption { - return func(opts *options) { - opts.reuseName = reuse - } -} - -// InstallSubNotes will (if true) instruct Tiller to render SubChart Notes -func InstallSubNotes(enable bool) InstallOption { - return func(opts *options) { - opts.instReq.SubNotes = enable - } -} - -// UpgradeSubNotes will (if true) instruct Tiller to render SubChart Notes -func UpgradeSubNotes(enable bool) UpdateOption { - return func(opts *options) { - opts.updateReq.SubNotes = enable - } -} - -// RollbackDisableHooks will disable hooks for a rollback operation -func RollbackDisableHooks(disable bool) RollbackOption { - return func(opts *options) { - opts.disableHooks = disable - } -} - -// RollbackDryRun will (if true) execute a rollback as a dry run. -func RollbackDryRun(dry bool) RollbackOption { - return func(opts *options) { - opts.dryRun = dry - } -} - -// RollbackRecreate will (if true) recreate pods after rollback. -func RollbackRecreate(recreate bool) RollbackOption { - return func(opts *options) { - opts.recreate = recreate - } -} - -// RollbackForce will (if true) force resource update through delete/recreate if needed -func RollbackForce(force bool) RollbackOption { - return func(opts *options) { - opts.force = force - } -} - -// RollbackVersion sets the version of the release to deploy. -func RollbackVersion(ver int32) RollbackOption { - return func(opts *options) { - opts.rollbackReq.Version = ver - } -} - -// UpgradeDisableHooks will disable hooks for an upgrade operation. -func UpgradeDisableHooks(disable bool) UpdateOption { - return func(opts *options) { - opts.disableHooks = disable - } -} - -// UpgradeDryRun will (if true) execute an upgrade as a dry run. -func UpgradeDryRun(dry bool) UpdateOption { - return func(opts *options) { - opts.dryRun = dry - } -} - -// ResetValues will (if true) trigger resetting the values to their original state. -func ResetValues(reset bool) UpdateOption { - return func(opts *options) { - opts.resetValues = reset - } -} - -// ReuseValues will cause Tiller to reuse the values from the last release. -// This is ignored if ResetValues is true. -func ReuseValues(reuse bool) UpdateOption { - return func(opts *options) { - opts.reuseValues = reuse - } -} - -// UpgradeRecreate will (if true) recreate pods after upgrade. -func UpgradeRecreate(recreate bool) UpdateOption { - return func(opts *options) { - opts.recreate = recreate - } -} - -// UpgradeForce will (if true) force resource update through delete/recreate if needed -func UpgradeForce(force bool) UpdateOption { - return func(opts *options) { - opts.force = force - } -} - -// ContentOption allows setting optional attributes when -// performing a GetReleaseContent tiller rpc. -type ContentOption func(*options) - -// ContentReleaseVersion will instruct Tiller to retrieve the content -// of a particular version of a release. -func ContentReleaseVersion(version int32) ContentOption { - return func(opts *options) { - opts.contentReq.Version = version - } -} - -// StatusOption allows setting optional attributes when -// performing a GetReleaseStatus tiller rpc. -type StatusOption func(*options) - -// StatusReleaseVersion will instruct Tiller to retrieve the status -// of a particular version of a release. -func StatusReleaseVersion(version int32) StatusOption { - return func(opts *options) { - opts.statusReq.Version = version - } -} - -// DeleteOption allows setting optional attributes when -// performing a UninstallRelease tiller rpc. -type DeleteOption func(*options) - -// VersionOption -- TODO -type VersionOption func(*options) - -// UpdateOption allows specifying various settings -// configurable by the helm client user for overriding -// the defaults used when running the `helm upgrade` command. -type UpdateOption func(*options) - -// RollbackOption allows specifying various settings configurable -// by the helm client user for overriding the defaults used when -// running the `helm rollback` command. -type RollbackOption func(*options) - -// HistoryOption allows configuring optional request data for -// issuing a GetHistory rpc. -type HistoryOption func(*options) - -// WithMaxHistory sets the max number of releases to return -// in a release history query. -func WithMaxHistory(max int32) HistoryOption { - return func(opts *options) { - opts.histReq.Max = max - } -} - -// NewContext creates a versioned context. -func NewContext() context.Context { - return FromContext(context.TODO()) -} - -// FromContext returns a versioned context from a parent context -func FromContext(ctx context.Context) context.Context { - md := metadata.Pairs("x-helm-api-client", version.GetVersion()) - return metadata.NewOutgoingContext(ctx, md) -} - -// ReleaseTestOption allows configuring optional request data for -// issuing a TestRelease rpc. -type ReleaseTestOption func(*options) diff --git a/vendor/k8s.io/helm/pkg/ignore/doc.go b/vendor/k8s.io/helm/pkg/ignore/doc.go deleted file mode 100644 index 85cc91060..000000000 --- a/vendor/k8s.io/helm/pkg/ignore/doc.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/*Package ignore provides tools for writing ignore files (a la .gitignore). - -This provides both an ignore parser and a file-aware processor. - -The format of ignore files closely follows, but does not exactly match, the -format for .gitignore files (https://git-scm.com/docs/gitignore). - -The formatting rules are as follows: - - - Parsing is line-by-line - - Empty lines are ignored - - Lines the begin with # (comments) will be ignored - - Leading and trailing spaces are always ignored - - Inline comments are NOT supported ('foo* # Any foo' does not contain a comment) - - There is no support for multi-line patterns - - Shell glob patterns are supported. See Go's "path/filepath".Match - - If a pattern begins with a leading !, the match will be negated. - - If a pattern begins with a leading /, only paths relatively rooted will match. - - If the pattern ends with a trailing /, only directories will match - - If a pattern contains no slashes, file basenames are tested (not paths) - - The pattern sequence "**", while legal in a glob, will cause an error here - (to indicate incompatibility with .gitignore). - -Example: - - # Match any file named foo.txt - foo.txt - - # Match any text file - *.txt - - # Match only directories named mydir - mydir/ - - # Match only text files in the top-level directory - /*.txt - - # Match only the file foo.txt in the top-level directory - /foo.txt - - # Match any file named ab.txt, ac.txt, or ad.txt - a[b-d].txt - -Notable differences from .gitignore: - - The '**' syntax is not supported. - - The globbing library is Go's 'filepath.Match', not fnmatch(3) - - Trailing spaces are always ignored (there is no supported escape sequence) - - The evaluation of escape sequences has not been tested for compatibility - - There is no support for '\!' as a special leading sequence. -*/ -package ignore // import "k8s.io/helm/pkg/ignore" diff --git a/vendor/k8s.io/helm/pkg/manifest/splitter.go b/vendor/k8s.io/helm/pkg/manifest/splitter.go deleted file mode 100644 index 7081e7aa7..000000000 --- a/vendor/k8s.io/helm/pkg/manifest/splitter.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package manifest - -import ( - "regexp" - "strings" - - "k8s.io/helm/pkg/releaseutil" -) - -var ( - kindRegex = regexp.MustCompile("kind:(.*)\n") -) - -// SplitManifests takes a map of rendered templates and splits them into the -// detected manifests. -func SplitManifests(templates map[string]string) []Manifest { - var listManifests []Manifest - // extract kind and name - for k, v := range templates { - match := kindRegex.FindStringSubmatch(v) - h := "Unknown" - if len(match) == 2 { - h = strings.TrimSpace(match[1]) - } - m := Manifest{Name: k, Content: v, Head: &releaseutil.SimpleHead{Kind: h}} - listManifests = append(listManifests, m) - } - - return listManifests -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/chart/chart.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/chart/chart.pb.go deleted file mode 100644 index eddc2ccae..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/chart/chart.pb.go +++ /dev/null @@ -1,124 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/chart/chart.proto - -package chart - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Chart is a helm package that contains metadata, a default config, zero or more -// optionally parameterizable templates, and zero or more charts (dependencies). -type Chart struct { - // Contents of the Chartfile. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // Templates for this chart. - Templates []*Template `protobuf:"bytes,2,rep,name=templates,proto3" json:"templates,omitempty"` - // Charts that this chart depends on. - Dependencies []*Chart `protobuf:"bytes,3,rep,name=dependencies,proto3" json:"dependencies,omitempty"` - // Default config for this template. - Values *Config `protobuf:"bytes,4,opt,name=values,proto3" json:"values,omitempty"` - // Miscellaneous files in a chart archive, - // e.g. README, LICENSE, etc. - Files []*any.Any `protobuf:"bytes,5,rep,name=files,proto3" json:"files,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Chart) Reset() { *m = Chart{} } -func (m *Chart) String() string { return proto.CompactTextString(m) } -func (*Chart) ProtoMessage() {} -func (*Chart) Descriptor() ([]byte, []int) { - return fileDescriptor_chart_3948302f7486cf3d, []int{0} -} -func (m *Chart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Chart.Unmarshal(m, b) -} -func (m *Chart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Chart.Marshal(b, m, deterministic) -} -func (dst *Chart) XXX_Merge(src proto.Message) { - xxx_messageInfo_Chart.Merge(dst, src) -} -func (m *Chart) XXX_Size() int { - return xxx_messageInfo_Chart.Size(m) -} -func (m *Chart) XXX_DiscardUnknown() { - xxx_messageInfo_Chart.DiscardUnknown(m) -} - -var xxx_messageInfo_Chart proto.InternalMessageInfo - -func (m *Chart) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *Chart) GetTemplates() []*Template { - if m != nil { - return m.Templates - } - return nil -} - -func (m *Chart) GetDependencies() []*Chart { - if m != nil { - return m.Dependencies - } - return nil -} - -func (m *Chart) GetValues() *Config { - if m != nil { - return m.Values - } - return nil -} - -func (m *Chart) GetFiles() []*any.Any { - if m != nil { - return m.Files - } - return nil -} - -func init() { - proto.RegisterType((*Chart)(nil), "hapi.chart.Chart") -} - -func init() { proto.RegisterFile("hapi/chart/chart.proto", fileDescriptor_chart_3948302f7486cf3d) } - -var fileDescriptor_chart_3948302f7486cf3d = []byte{ - // 242 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4e, 0xc3, 0x30, - 0x10, 0x86, 0x15, 0x4a, 0x0a, 0x1c, 0x2c, 0x58, 0x08, 0x4c, 0xa7, 0x8a, 0x09, 0x75, 0x70, 0x50, - 0x11, 0x0f, 0x00, 0xcc, 0x2c, 0x16, 0x13, 0xdb, 0xb5, 0xb9, 0xa4, 0x91, 0x52, 0x3b, 0xaa, 0x5d, - 0xa4, 0xbe, 0x3b, 0x03, 0xea, 0xd9, 0xa6, 0x09, 0xea, 0x12, 0x29, 0xf7, 0x7d, 0xff, 0xe5, 0xbf, - 0xc0, 0xed, 0x0a, 0xbb, 0xa6, 0x58, 0xae, 0x70, 0xe3, 0xc3, 0x53, 0x75, 0x1b, 0xeb, 0xad, 0x80, - 0xfd, 0x5c, 0xf1, 0x64, 0x72, 0xd7, 0x77, 0xac, 0xa9, 0x9a, 0x3a, 0x48, 0x93, 0xfb, 0x1e, 0x58, - 0x93, 0xc7, 0x12, 0x3d, 0x1e, 0x41, 0x9e, 0xd6, 0x5d, 0x8b, 0x9e, 0x12, 0xaa, 0xad, 0xad, 0x5b, - 0x2a, 0xf8, 0x6d, 0xb1, 0xad, 0x0a, 0x34, 0xbb, 0x80, 0x1e, 0x7e, 0x32, 0xc8, 0xdf, 0xf7, 0x19, - 0xf1, 0x04, 0xe7, 0x69, 0xa3, 0xcc, 0xa6, 0xd9, 0xe3, 0xe5, 0xfc, 0x46, 0x1d, 0x2a, 0xa9, 0x8f, - 0xc8, 0xf4, 0x9f, 0x25, 0xe6, 0x70, 0x91, 0x3e, 0xe4, 0xe4, 0xc9, 0x74, 0xf4, 0x3f, 0xf2, 0x19, - 0xa1, 0x3e, 0x68, 0xe2, 0x05, 0xae, 0x4a, 0xea, 0xc8, 0x94, 0x64, 0x96, 0x0d, 0x39, 0x39, 0xe2, - 0xd8, 0x75, 0x3f, 0xc6, 0x75, 0xf4, 0x40, 0x13, 0x33, 0x18, 0x7f, 0x63, 0xbb, 0x25, 0x27, 0x4f, - 0xb9, 0x9a, 0x18, 0x04, 0xf8, 0x0f, 0xe9, 0x68, 0x88, 0x19, 0xe4, 0x55, 0xd3, 0x92, 0x93, 0x79, - 0xac, 0x14, 0xae, 0x57, 0xe9, 0x7a, 0xf5, 0x6a, 0x76, 0x3a, 0x28, 0x6f, 0x67, 0x5f, 0x39, 0xef, - 0x58, 0x8c, 0x99, 0x3e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x70, 0x34, 0x75, 0x9e, 0x01, - 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/chart/config.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/chart/config.pb.go deleted file mode 100644 index 9dd71ef5e..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/chart/config.pb.go +++ /dev/null @@ -1,129 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/chart/config.proto - -package chart - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Config supplies values to the parametrizable templates of a chart. -type Config struct { - Raw string `protobuf:"bytes,1,opt,name=raw,proto3" json:"raw,omitempty"` - Values map[string]*Value `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Config) Reset() { *m = Config{} } -func (m *Config) String() string { return proto.CompactTextString(m) } -func (*Config) ProtoMessage() {} -func (*Config) Descriptor() ([]byte, []int) { - return fileDescriptor_config_7b4c7a75f66363c1, []int{0} -} -func (m *Config) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Config.Unmarshal(m, b) -} -func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Config.Marshal(b, m, deterministic) -} -func (dst *Config) XXX_Merge(src proto.Message) { - xxx_messageInfo_Config.Merge(dst, src) -} -func (m *Config) XXX_Size() int { - return xxx_messageInfo_Config.Size(m) -} -func (m *Config) XXX_DiscardUnknown() { - xxx_messageInfo_Config.DiscardUnknown(m) -} - -var xxx_messageInfo_Config proto.InternalMessageInfo - -func (m *Config) GetRaw() string { - if m != nil { - return m.Raw - } - return "" -} - -func (m *Config) GetValues() map[string]*Value { - if m != nil { - return m.Values - } - return nil -} - -// Value describes a configuration value as a string. -type Value struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { - return fileDescriptor_config_7b4c7a75f66363c1, []int{1} -} -func (m *Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Value.Unmarshal(m, b) -} -func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Value.Marshal(b, m, deterministic) -} -func (dst *Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Value.Merge(dst, src) -} -func (m *Value) XXX_Size() int { - return xxx_messageInfo_Value.Size(m) -} -func (m *Value) XXX_DiscardUnknown() { - xxx_messageInfo_Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Value proto.InternalMessageInfo - -func (m *Value) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func init() { - proto.RegisterType((*Config)(nil), "hapi.chart.Config") - proto.RegisterMapType((map[string]*Value)(nil), "hapi.chart.Config.ValuesEntry") - proto.RegisterType((*Value)(nil), "hapi.chart.Value") -} - -func init() { proto.RegisterFile("hapi/chart/config.proto", fileDescriptor_config_7b4c7a75f66363c1) } - -var fileDescriptor_config_7b4c7a75f66363c1 = []byte{ - // 182 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0x48, 0x2c, 0xc8, - 0xd4, 0x4f, 0xce, 0x48, 0x2c, 0x2a, 0xd1, 0x4f, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x02, 0x49, 0xe8, 0x81, 0x25, 0x94, 0x16, 0x30, 0x72, 0xb1, 0x39, - 0x83, 0x25, 0x85, 0x04, 0xb8, 0x98, 0x8b, 0x12, 0xcb, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, - 0x40, 0x4c, 0x21, 0x33, 0x2e, 0xb6, 0xb2, 0xc4, 0x9c, 0xd2, 0xd4, 0x62, 0x09, 0x26, 0x05, 0x66, - 0x0d, 0x6e, 0x23, 0x39, 0x3d, 0x84, 0x4e, 0x3d, 0x88, 0x2e, 0xbd, 0x30, 0xb0, 0x02, 0xd7, 0xbc, - 0x92, 0xa2, 0xca, 0x20, 0xa8, 0x6a, 0x29, 0x1f, 0x2e, 0x6e, 0x24, 0x61, 0x90, 0xc1, 0xd9, 0xa9, - 0x95, 0x30, 0x83, 0xb3, 0x53, 0x2b, 0x85, 0xd4, 0xb9, 0x58, 0xc1, 0x4a, 0x25, 0x98, 0x14, 0x18, - 0x35, 0xb8, 0x8d, 0x04, 0x91, 0xcd, 0x05, 0xeb, 0x0c, 0x82, 0xc8, 0x5b, 0x31, 0x59, 0x30, 0x2a, - 0xc9, 0x72, 0xb1, 0x82, 0xc5, 0x84, 0x44, 0x60, 0xba, 0x20, 0x26, 0x41, 0x38, 0x4e, 0xec, 0x51, - 0xac, 0x60, 0x8d, 0x49, 0x6c, 0x60, 0xdf, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe1, 0x12, - 0x60, 0xda, 0xf8, 0x00, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go deleted file mode 100644 index ffa3ef568..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go +++ /dev/null @@ -1,329 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/chart/metadata.proto - -package chart - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Metadata_Engine int32 - -const ( - Metadata_UNKNOWN Metadata_Engine = 0 - Metadata_GOTPL Metadata_Engine = 1 -) - -var Metadata_Engine_name = map[int32]string{ - 0: "UNKNOWN", - 1: "GOTPL", -} -var Metadata_Engine_value = map[string]int32{ - "UNKNOWN": 0, - "GOTPL": 1, -} - -func (x Metadata_Engine) String() string { - return proto.EnumName(Metadata_Engine_name, int32(x)) -} -func (Metadata_Engine) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_metadata_bafee76586953fd5, []int{1, 0} -} - -// Maintainer describes a Chart maintainer. -type Maintainer struct { - // Name is a user name or organization name - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Email is an optional email address to contact the named maintainer - Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"` - // Url is an optional URL to an address for the named maintainer - Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Maintainer) Reset() { *m = Maintainer{} } -func (m *Maintainer) String() string { return proto.CompactTextString(m) } -func (*Maintainer) ProtoMessage() {} -func (*Maintainer) Descriptor() ([]byte, []int) { - return fileDescriptor_metadata_bafee76586953fd5, []int{0} -} -func (m *Maintainer) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Maintainer.Unmarshal(m, b) -} -func (m *Maintainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Maintainer.Marshal(b, m, deterministic) -} -func (dst *Maintainer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Maintainer.Merge(dst, src) -} -func (m *Maintainer) XXX_Size() int { - return xxx_messageInfo_Maintainer.Size(m) -} -func (m *Maintainer) XXX_DiscardUnknown() { - xxx_messageInfo_Maintainer.DiscardUnknown(m) -} - -var xxx_messageInfo_Maintainer proto.InternalMessageInfo - -func (m *Maintainer) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Maintainer) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *Maintainer) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -// Metadata for a Chart file. This models the structure of a Chart.yaml file. -// -// Spec: https://k8s.io/helm/blob/master/docs/design/chart_format.md#the-chart-file -type Metadata struct { - // The name of the chart - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL to a relevant project page, git repo, or contact person - Home string `protobuf:"bytes,2,opt,name=home,proto3" json:"home,omitempty"` - // Source is the URL to the source code of this chart - Sources []string `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` - // A SemVer 2 conformant version string of the chart - Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - // A one-sentence description of the chart - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - // A list of string keywords - Keywords []string `protobuf:"bytes,6,rep,name=keywords,proto3" json:"keywords,omitempty"` - // A list of name and URL/email address combinations for the maintainer(s) - Maintainers []*Maintainer `protobuf:"bytes,7,rep,name=maintainers,proto3" json:"maintainers,omitempty"` - // The name of the template engine to use. Defaults to 'gotpl'. - Engine string `protobuf:"bytes,8,opt,name=engine,proto3" json:"engine,omitempty"` - // The URL to an icon file. - Icon string `protobuf:"bytes,9,opt,name=icon,proto3" json:"icon,omitempty"` - // The API Version of this chart. - ApiVersion string `protobuf:"bytes,10,opt,name=apiVersion,proto3" json:"apiVersion,omitempty"` - // The condition to check to enable chart - Condition string `protobuf:"bytes,11,opt,name=condition,proto3" json:"condition,omitempty"` - // The tags to check to enable chart - Tags string `protobuf:"bytes,12,opt,name=tags,proto3" json:"tags,omitempty"` - // The version of the application enclosed inside of this chart. - AppVersion string `protobuf:"bytes,13,opt,name=appVersion,proto3" json:"appVersion,omitempty"` - // Whether or not this chart is deprecated - Deprecated bool `protobuf:"varint,14,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - // TillerVersion is a SemVer constraints on what version of Tiller is required. - // See SemVer ranges here: https://github.com/Masterminds/semver#basic-comparisons - TillerVersion string `protobuf:"bytes,15,opt,name=tillerVersion,proto3" json:"tillerVersion,omitempty"` - // Annotations are additional mappings uninterpreted by Tiller, - // made available for inspection by other applications. - Annotations map[string]string `protobuf:"bytes,16,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // KubeVersion is a SemVer constraint specifying the version of Kubernetes required. - KubeVersion string `protobuf:"bytes,17,opt,name=kubeVersion,proto3" json:"kubeVersion,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_metadata_bafee76586953fd5, []int{1} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (dst *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(dst, src) -} -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Metadata) GetHome() string { - if m != nil { - return m.Home - } - return "" -} - -func (m *Metadata) GetSources() []string { - if m != nil { - return m.Sources - } - return nil -} - -func (m *Metadata) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Metadata) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Metadata) GetKeywords() []string { - if m != nil { - return m.Keywords - } - return nil -} - -func (m *Metadata) GetMaintainers() []*Maintainer { - if m != nil { - return m.Maintainers - } - return nil -} - -func (m *Metadata) GetEngine() string { - if m != nil { - return m.Engine - } - return "" -} - -func (m *Metadata) GetIcon() string { - if m != nil { - return m.Icon - } - return "" -} - -func (m *Metadata) GetApiVersion() string { - if m != nil { - return m.ApiVersion - } - return "" -} - -func (m *Metadata) GetCondition() string { - if m != nil { - return m.Condition - } - return "" -} - -func (m *Metadata) GetTags() string { - if m != nil { - return m.Tags - } - return "" -} - -func (m *Metadata) GetAppVersion() string { - if m != nil { - return m.AppVersion - } - return "" -} - -func (m *Metadata) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -func (m *Metadata) GetTillerVersion() string { - if m != nil { - return m.TillerVersion - } - return "" -} - -func (m *Metadata) GetAnnotations() map[string]string { - if m != nil { - return m.Annotations - } - return nil -} - -func (m *Metadata) GetKubeVersion() string { - if m != nil { - return m.KubeVersion - } - return "" -} - -func init() { - proto.RegisterType((*Maintainer)(nil), "hapi.chart.Maintainer") - proto.RegisterType((*Metadata)(nil), "hapi.chart.Metadata") - proto.RegisterMapType((map[string]string)(nil), "hapi.chart.Metadata.AnnotationsEntry") - proto.RegisterEnum("hapi.chart.Metadata_Engine", Metadata_Engine_name, Metadata_Engine_value) -} - -func init() { proto.RegisterFile("hapi/chart/metadata.proto", fileDescriptor_metadata_bafee76586953fd5) } - -var fileDescriptor_metadata_bafee76586953fd5 = []byte{ - // 435 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x5d, 0x6b, 0xd4, 0x40, - 0x14, 0x35, 0xcd, 0x66, 0x77, 0x73, 0x63, 0x35, 0x0e, 0x52, 0xc6, 0x22, 0x12, 0x16, 0x85, 0x7d, - 0xda, 0x82, 0xbe, 0x14, 0x1f, 0x04, 0x85, 0x52, 0x41, 0xbb, 0x95, 0xe0, 0x07, 0xf8, 0x36, 0x4d, - 0x2e, 0xdd, 0x61, 0x93, 0x99, 0x30, 0x99, 0xad, 0xec, 0xaf, 0xf0, 0x2f, 0xcb, 0xdc, 0x64, 0x9a, - 0xac, 0xf4, 0xed, 0x9e, 0x73, 0x66, 0xce, 0xcc, 0xbd, 0xf7, 0xc0, 0x8b, 0x8d, 0x68, 0xe4, 0x59, - 0xb1, 0x11, 0xc6, 0x9e, 0xd5, 0x68, 0x45, 0x29, 0xac, 0x58, 0x35, 0x46, 0x5b, 0xcd, 0xc0, 0x49, - 0x2b, 0x92, 0x16, 0x9f, 0x01, 0xae, 0x84, 0x54, 0x56, 0x48, 0x85, 0x86, 0x31, 0x98, 0x28, 0x51, - 0x23, 0x0f, 0xb2, 0x60, 0x19, 0xe7, 0x54, 0xb3, 0xe7, 0x10, 0x61, 0x2d, 0x64, 0xc5, 0x8f, 0x88, - 0xec, 0x00, 0x4b, 0x21, 0xdc, 0x99, 0x8a, 0x87, 0xc4, 0xb9, 0x72, 0xf1, 0x37, 0x82, 0xf9, 0x55, - 0xff, 0xd0, 0x83, 0x46, 0x0c, 0x26, 0x1b, 0x5d, 0x63, 0xef, 0x43, 0x35, 0xe3, 0x30, 0x6b, 0xf5, - 0xce, 0x14, 0xd8, 0xf2, 0x30, 0x0b, 0x97, 0x71, 0xee, 0xa1, 0x53, 0xee, 0xd0, 0xb4, 0x52, 0x2b, - 0x3e, 0xa1, 0x0b, 0x1e, 0xb2, 0x0c, 0x92, 0x12, 0xdb, 0xc2, 0xc8, 0xc6, 0x3a, 0x35, 0x22, 0x75, - 0x4c, 0xb1, 0x53, 0x98, 0x6f, 0x71, 0xff, 0x47, 0x9b, 0xb2, 0xe5, 0x53, 0xb2, 0xbd, 0xc7, 0xec, - 0x1c, 0x92, 0xfa, 0xbe, 0xe1, 0x96, 0xcf, 0xb2, 0x70, 0x99, 0xbc, 0x3d, 0x59, 0x0d, 0x23, 0x59, - 0x0d, 0xf3, 0xc8, 0xc7, 0x47, 0xd9, 0x09, 0x4c, 0x51, 0xdd, 0x4a, 0x85, 0x7c, 0x4e, 0x4f, 0xf6, - 0xc8, 0xf5, 0x25, 0x0b, 0xad, 0x78, 0xdc, 0xf5, 0xe5, 0x6a, 0xf6, 0x0a, 0x40, 0x34, 0xf2, 0x67, - 0xdf, 0x00, 0x90, 0x32, 0x62, 0xd8, 0x4b, 0x88, 0x0b, 0xad, 0x4a, 0x49, 0x1d, 0x24, 0x24, 0x0f, - 0x84, 0x73, 0xb4, 0xe2, 0xb6, 0xe5, 0x8f, 0x3b, 0x47, 0x57, 0x77, 0x8e, 0x8d, 0x77, 0x3c, 0xf6, - 0x8e, 0x9e, 0x71, 0x7a, 0x89, 0x8d, 0xc1, 0x42, 0x58, 0x2c, 0xf9, 0x93, 0x2c, 0x58, 0xce, 0xf3, - 0x11, 0xc3, 0x5e, 0xc3, 0xb1, 0x95, 0x55, 0x85, 0xc6, 0x5b, 0x3c, 0x25, 0x8b, 0x43, 0x92, 0x5d, - 0x42, 0x22, 0x94, 0xd2, 0x56, 0xb8, 0x7f, 0xb4, 0x3c, 0xa5, 0xe9, 0xbc, 0x39, 0x98, 0x8e, 0xcf, - 0xd2, 0xc7, 0xe1, 0xdc, 0x85, 0xb2, 0x66, 0x9f, 0x8f, 0x6f, 0xba, 0x25, 0x6d, 0x77, 0x37, 0xe8, - 0x1f, 0x7b, 0xd6, 0x2d, 0x69, 0x44, 0x9d, 0x7e, 0x80, 0xf4, 0x7f, 0x0b, 0x97, 0xaa, 0x2d, 0xee, - 0xfb, 0xd4, 0xb8, 0xd2, 0xa5, 0xef, 0x4e, 0x54, 0x3b, 0x9f, 0x9a, 0x0e, 0xbc, 0x3f, 0x3a, 0x0f, - 0x16, 0x19, 0x4c, 0x2f, 0xba, 0x05, 0x24, 0x30, 0xfb, 0xb1, 0xfe, 0xb2, 0xbe, 0xfe, 0xb5, 0x4e, - 0x1f, 0xb1, 0x18, 0xa2, 0xcb, 0xeb, 0xef, 0xdf, 0xbe, 0xa6, 0xc1, 0xa7, 0xd9, 0xef, 0x88, 0xfe, - 0x7c, 0x33, 0xa5, 0xdc, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x36, 0xf9, 0x0d, 0xa6, 0x14, - 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/chart/template.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/chart/template.pb.go deleted file mode 100644 index 318f82f09..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/chart/template.pb.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/chart/template.proto - -package chart - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Template represents a template as a name/value pair. -// -// By convention, name is a relative path within the scope of the chart's -// base directory. -type Template struct { - // Name is the path-like name of the template. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Data is the template as byte data. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Template) Reset() { *m = Template{} } -func (m *Template) String() string { return proto.CompactTextString(m) } -func (*Template) ProtoMessage() {} -func (*Template) Descriptor() ([]byte, []int) { - return fileDescriptor_template_926c98477d6df5e9, []int{0} -} -func (m *Template) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Template.Unmarshal(m, b) -} -func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Template.Marshal(b, m, deterministic) -} -func (dst *Template) XXX_Merge(src proto.Message) { - xxx_messageInfo_Template.Merge(dst, src) -} -func (m *Template) XXX_Size() int { - return xxx_messageInfo_Template.Size(m) -} -func (m *Template) XXX_DiscardUnknown() { - xxx_messageInfo_Template.DiscardUnknown(m) -} - -var xxx_messageInfo_Template proto.InternalMessageInfo - -func (m *Template) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Template) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterType((*Template)(nil), "hapi.chart.Template") -} - -func init() { proto.RegisterFile("hapi/chart/template.proto", fileDescriptor_template_926c98477d6df5e9) } - -var fileDescriptor_template_926c98477d6df5e9 = []byte{ - // 107 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0x48, 0x2c, 0xc8, - 0xd4, 0x4f, 0xce, 0x48, 0x2c, 0x2a, 0xd1, 0x2f, 0x49, 0xcd, 0x2d, 0xc8, 0x49, 0x2c, 0x49, 0xd5, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x02, 0x49, 0xe9, 0x81, 0xa5, 0x94, 0x8c, 0xb8, 0x38, - 0x42, 0xa0, 0xb2, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, - 0x9c, 0x41, 0x60, 0x36, 0x48, 0x2c, 0x25, 0xb1, 0x24, 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, - 0x08, 0xcc, 0x76, 0x62, 0x8f, 0x62, 0x05, 0x6b, 0x4e, 0x62, 0x03, 0x9b, 0x67, 0x0c, 0x08, 0x00, - 0x00, 0xff, 0xff, 0x53, 0xee, 0x0e, 0x67, 0x6c, 0x00, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/hook.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/hook.pb.go deleted file mode 100644 index 4ce40b6eb..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/release/hook.pb.go +++ /dev/null @@ -1,252 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/release/hook.proto - -package release - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Hook_Event int32 - -const ( - Hook_UNKNOWN Hook_Event = 0 - Hook_PRE_INSTALL Hook_Event = 1 - Hook_POST_INSTALL Hook_Event = 2 - Hook_PRE_DELETE Hook_Event = 3 - Hook_POST_DELETE Hook_Event = 4 - Hook_PRE_UPGRADE Hook_Event = 5 - Hook_POST_UPGRADE Hook_Event = 6 - Hook_PRE_ROLLBACK Hook_Event = 7 - Hook_POST_ROLLBACK Hook_Event = 8 - Hook_RELEASE_TEST_SUCCESS Hook_Event = 9 - Hook_RELEASE_TEST_FAILURE Hook_Event = 10 - Hook_CRD_INSTALL Hook_Event = 11 -) - -var Hook_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "PRE_INSTALL", - 2: "POST_INSTALL", - 3: "PRE_DELETE", - 4: "POST_DELETE", - 5: "PRE_UPGRADE", - 6: "POST_UPGRADE", - 7: "PRE_ROLLBACK", - 8: "POST_ROLLBACK", - 9: "RELEASE_TEST_SUCCESS", - 10: "RELEASE_TEST_FAILURE", - 11: "CRD_INSTALL", -} -var Hook_Event_value = map[string]int32{ - "UNKNOWN": 0, - "PRE_INSTALL": 1, - "POST_INSTALL": 2, - "PRE_DELETE": 3, - "POST_DELETE": 4, - "PRE_UPGRADE": 5, - "POST_UPGRADE": 6, - "PRE_ROLLBACK": 7, - "POST_ROLLBACK": 8, - "RELEASE_TEST_SUCCESS": 9, - "RELEASE_TEST_FAILURE": 10, - "CRD_INSTALL": 11, -} - -func (x Hook_Event) String() string { - return proto.EnumName(Hook_Event_name, int32(x)) -} -func (Hook_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_hook_2f46a75c3171b448, []int{0, 0} -} - -type Hook_DeletePolicy int32 - -const ( - Hook_SUCCEEDED Hook_DeletePolicy = 0 - Hook_FAILED Hook_DeletePolicy = 1 - Hook_BEFORE_HOOK_CREATION Hook_DeletePolicy = 2 -) - -var Hook_DeletePolicy_name = map[int32]string{ - 0: "SUCCEEDED", - 1: "FAILED", - 2: "BEFORE_HOOK_CREATION", -} -var Hook_DeletePolicy_value = map[string]int32{ - "SUCCEEDED": 0, - "FAILED": 1, - "BEFORE_HOOK_CREATION": 2, -} - -func (x Hook_DeletePolicy) String() string { - return proto.EnumName(Hook_DeletePolicy_name, int32(x)) -} -func (Hook_DeletePolicy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_hook_2f46a75c3171b448, []int{0, 1} -} - -// Hook defines a hook object. -type Hook struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Kind is the Kubernetes kind. - Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` - // Path is the chart-relative path to the template. - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` - // Manifest is the manifest contents. - Manifest string `protobuf:"bytes,4,opt,name=manifest,proto3" json:"manifest,omitempty"` - // Events are the events that this hook fires on. - Events []Hook_Event `protobuf:"varint,5,rep,packed,name=events,proto3,enum=hapi.release.Hook_Event" json:"events,omitempty"` - // LastRun indicates the date/time this was last run. - LastRun *timestamp.Timestamp `protobuf:"bytes,6,opt,name=last_run,json=lastRun,proto3" json:"last_run,omitempty"` - // Weight indicates the sort order for execution among similar Hook type - Weight int32 `protobuf:"varint,7,opt,name=weight,proto3" json:"weight,omitempty"` - // DeletePolicies are the policies that indicate when to delete the hook - DeletePolicies []Hook_DeletePolicy `protobuf:"varint,8,rep,packed,name=delete_policies,json=deletePolicies,proto3,enum=hapi.release.Hook_DeletePolicy" json:"delete_policies,omitempty"` - // DeleteTimeout indicates how long to wait for a resource to be deleted before timing out - DeleteTimeout int64 `protobuf:"varint,9,opt,name=delete_timeout,json=deleteTimeout,proto3" json:"delete_timeout,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Hook) Reset() { *m = Hook{} } -func (m *Hook) String() string { return proto.CompactTextString(m) } -func (*Hook) ProtoMessage() {} -func (*Hook) Descriptor() ([]byte, []int) { - return fileDescriptor_hook_2f46a75c3171b448, []int{0} -} -func (m *Hook) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Hook.Unmarshal(m, b) -} -func (m *Hook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Hook.Marshal(b, m, deterministic) -} -func (dst *Hook) XXX_Merge(src proto.Message) { - xxx_messageInfo_Hook.Merge(dst, src) -} -func (m *Hook) XXX_Size() int { - return xxx_messageInfo_Hook.Size(m) -} -func (m *Hook) XXX_DiscardUnknown() { - xxx_messageInfo_Hook.DiscardUnknown(m) -} - -var xxx_messageInfo_Hook proto.InternalMessageInfo - -func (m *Hook) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Hook) GetKind() string { - if m != nil { - return m.Kind - } - return "" -} - -func (m *Hook) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *Hook) GetManifest() string { - if m != nil { - return m.Manifest - } - return "" -} - -func (m *Hook) GetEvents() []Hook_Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *Hook) GetLastRun() *timestamp.Timestamp { - if m != nil { - return m.LastRun - } - return nil -} - -func (m *Hook) GetWeight() int32 { - if m != nil { - return m.Weight - } - return 0 -} - -func (m *Hook) GetDeletePolicies() []Hook_DeletePolicy { - if m != nil { - return m.DeletePolicies - } - return nil -} - -func (m *Hook) GetDeleteTimeout() int64 { - if m != nil { - return m.DeleteTimeout - } - return 0 -} - -func init() { - proto.RegisterType((*Hook)(nil), "hapi.release.Hook") - proto.RegisterEnum("hapi.release.Hook_Event", Hook_Event_name, Hook_Event_value) - proto.RegisterEnum("hapi.release.Hook_DeletePolicy", Hook_DeletePolicy_name, Hook_DeletePolicy_value) -} - -func init() { proto.RegisterFile("hapi/release/hook.proto", fileDescriptor_hook_2f46a75c3171b448) } - -var fileDescriptor_hook_2f46a75c3171b448 = []byte{ - // 473 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xdb, 0x8e, 0xda, 0x3c, - 0x10, 0x80, 0x37, 0x1c, 0x02, 0x0c, 0x87, 0xf5, 0x6f, 0xfd, 0x6a, 0x2d, 0x6e, 0x16, 0x21, 0x55, - 0xe2, 0x2a, 0x54, 0x5b, 0xf5, 0x01, 0x42, 0xe2, 0x2d, 0x88, 0x88, 0x20, 0x27, 0xa8, 0x52, 0x6f, - 0xa2, 0x6c, 0xf1, 0x42, 0x44, 0x88, 0x23, 0x62, 0x5a, 0xf5, 0x81, 0xfb, 0x18, 0x95, 0x2a, 0x3b, - 0x21, 0x5d, 0xa9, 0xbd, 0x9b, 0xf9, 0xe6, 0xf3, 0x78, 0xc6, 0x86, 0xb7, 0xc7, 0x38, 0x4f, 0xe6, - 0x17, 0x9e, 0xf2, 0xb8, 0xe0, 0xf3, 0xa3, 0x10, 0x27, 0x2b, 0xbf, 0x08, 0x29, 0xf0, 0x40, 0x15, - 0xac, 0xaa, 0x30, 0x7e, 0x38, 0x08, 0x71, 0x48, 0xf9, 0x5c, 0xd7, 0x9e, 0xaf, 0x2f, 0x73, 0x99, - 0x9c, 0x79, 0x21, 0xe3, 0x73, 0x5e, 0xea, 0xd3, 0x5f, 0x2d, 0x68, 0x2d, 0x85, 0x38, 0x61, 0x0c, - 0xad, 0x2c, 0x3e, 0x73, 0x62, 0x4c, 0x8c, 0x59, 0x8f, 0xe9, 0x58, 0xb1, 0x53, 0x92, 0xed, 0x49, - 0xa3, 0x64, 0x2a, 0x56, 0x2c, 0x8f, 0xe5, 0x91, 0x34, 0x4b, 0xa6, 0x62, 0x3c, 0x86, 0xee, 0x39, - 0xce, 0x92, 0x17, 0x5e, 0x48, 0xd2, 0xd2, 0xbc, 0xce, 0xf1, 0x7b, 0x30, 0xf9, 0x37, 0x9e, 0xc9, - 0x82, 0xb4, 0x27, 0xcd, 0xd9, 0xe8, 0x91, 0x58, 0xaf, 0x07, 0xb4, 0xd4, 0xdd, 0x16, 0x55, 0x02, - 0xab, 0x3c, 0xfc, 0x11, 0xba, 0x69, 0x5c, 0xc8, 0xe8, 0x72, 0xcd, 0x88, 0x39, 0x31, 0x66, 0xfd, - 0xc7, 0xb1, 0x55, 0xae, 0x61, 0xdd, 0xd6, 0xb0, 0xc2, 0xdb, 0x1a, 0xac, 0xa3, 0x5c, 0x76, 0xcd, - 0xf0, 0x1b, 0x30, 0xbf, 0xf3, 0xe4, 0x70, 0x94, 0xa4, 0x33, 0x31, 0x66, 0x6d, 0x56, 0x65, 0x78, - 0x09, 0xf7, 0x7b, 0x9e, 0x72, 0xc9, 0xa3, 0x5c, 0xa4, 0xc9, 0xd7, 0x84, 0x17, 0xa4, 0xab, 0x27, - 0x79, 0xf8, 0xc7, 0x24, 0xae, 0x36, 0xb7, 0x4a, 0xfc, 0xc1, 0x46, 0xfb, 0x3f, 0x59, 0xc2, 0x0b, - 0xfc, 0x0e, 0x2a, 0x12, 0xa9, 0x57, 0x14, 0x57, 0x49, 0x7a, 0x13, 0x63, 0xd6, 0x64, 0xc3, 0x92, - 0x86, 0x25, 0x9c, 0xfe, 0x34, 0xa0, 0xad, 0x37, 0xc2, 0x7d, 0xe8, 0xec, 0x36, 0xeb, 0x8d, 0xff, - 0x79, 0x83, 0xee, 0xf0, 0x3d, 0xf4, 0xb7, 0x8c, 0x46, 0xab, 0x4d, 0x10, 0xda, 0x9e, 0x87, 0x0c, - 0x8c, 0x60, 0xb0, 0xf5, 0x83, 0xb0, 0x26, 0x0d, 0x3c, 0x02, 0x50, 0x8a, 0x4b, 0x3d, 0x1a, 0x52, - 0xd4, 0xd4, 0x47, 0x94, 0x51, 0x81, 0xd6, 0xad, 0xc7, 0x6e, 0xfb, 0x89, 0xd9, 0x2e, 0x45, 0xed, - 0xba, 0xc7, 0x8d, 0x98, 0x9a, 0x30, 0x1a, 0x31, 0xdf, 0xf3, 0x16, 0xb6, 0xb3, 0x46, 0x1d, 0xfc, - 0x1f, 0x0c, 0xb5, 0x53, 0xa3, 0x2e, 0x26, 0xf0, 0x3f, 0xa3, 0x1e, 0xb5, 0x03, 0x1a, 0x85, 0x34, - 0x08, 0xa3, 0x60, 0xe7, 0x38, 0x34, 0x08, 0x50, 0xef, 0xaf, 0xca, 0x93, 0xbd, 0xf2, 0x76, 0x8c, - 0x22, 0x50, 0x77, 0x3b, 0xcc, 0xad, 0xa7, 0xed, 0x4f, 0x1d, 0x18, 0xbc, 0x7e, 0x2e, 0x3c, 0x84, - 0x9e, 0xee, 0x43, 0x5d, 0xea, 0xa2, 0x3b, 0x0c, 0x60, 0xaa, 0xc3, 0xd4, 0x45, 0x86, 0xea, 0xba, - 0xa0, 0x4f, 0x3e, 0xa3, 0xd1, 0xd2, 0xf7, 0xd7, 0x91, 0xc3, 0xa8, 0x1d, 0xae, 0xfc, 0x0d, 0x6a, - 0x2c, 0x7a, 0x5f, 0x3a, 0xd5, 0x07, 0x3c, 0x9b, 0xfa, 0x77, 0x3f, 0xfc, 0x0e, 0x00, 0x00, 0xff, - 0xff, 0xce, 0x84, 0xd9, 0x98, 0xdb, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/info.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/info.pb.go deleted file mode 100644 index f7de56931..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/release/info.pb.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/release/info.proto - -package release - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Info describes release information. -type Info struct { - Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - FirstDeployed *timestamp.Timestamp `protobuf:"bytes,2,opt,name=first_deployed,json=firstDeployed,proto3" json:"first_deployed,omitempty"` - LastDeployed *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_deployed,json=lastDeployed,proto3" json:"last_deployed,omitempty"` - // Deleted tracks when this object was deleted. - Deleted *timestamp.Timestamp `protobuf:"bytes,4,opt,name=deleted,proto3" json:"deleted,omitempty"` - // Description is human-friendly "log entry" about this release. - Description string `protobuf:"bytes,5,opt,name=Description,proto3" json:"Description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_info_a0e9dd6d22b13366, []int{0} -} -func (m *Info) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Info.Unmarshal(m, b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) -} -func (dst *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(dst, src) -} -func (m *Info) XXX_Size() int { - return xxx_messageInfo_Info.Size(m) -} -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) -} - -var xxx_messageInfo_Info proto.InternalMessageInfo - -func (m *Info) GetStatus() *Status { - if m != nil { - return m.Status - } - return nil -} - -func (m *Info) GetFirstDeployed() *timestamp.Timestamp { - if m != nil { - return m.FirstDeployed - } - return nil -} - -func (m *Info) GetLastDeployed() *timestamp.Timestamp { - if m != nil { - return m.LastDeployed - } - return nil -} - -func (m *Info) GetDeleted() *timestamp.Timestamp { - if m != nil { - return m.Deleted - } - return nil -} - -func (m *Info) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func init() { - proto.RegisterType((*Info)(nil), "hapi.release.Info") -} - -func init() { proto.RegisterFile("hapi/release/info.proto", fileDescriptor_info_a0e9dd6d22b13366) } - -var fileDescriptor_info_a0e9dd6d22b13366 = []byte{ - // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x31, 0x4f, 0xc3, 0x30, - 0x10, 0x85, 0x95, 0x52, 0x5a, 0xd5, 0x6d, 0x19, 0x2c, 0x24, 0x42, 0x16, 0x22, 0xa6, 0x0e, 0xc8, - 0x91, 0x80, 0x1d, 0x81, 0xba, 0xb0, 0x06, 0x26, 0x16, 0xe4, 0xe2, 0x73, 0xb1, 0xe4, 0xe6, 0x2c, - 0xfb, 0x3a, 0xf0, 0x2f, 0xf8, 0xc9, 0xa8, 0xb6, 0x83, 0xd2, 0xa9, 0xab, 0xbf, 0xf7, 0x3e, 0xbf, - 0x63, 0x57, 0xdf, 0xd2, 0x99, 0xc6, 0x83, 0x05, 0x19, 0xa0, 0x31, 0x9d, 0x46, 0xe1, 0x3c, 0x12, - 0xf2, 0xc5, 0x01, 0x88, 0x0c, 0xaa, 0x9b, 0x2d, 0xe2, 0xd6, 0x42, 0x13, 0xd9, 0x66, 0xaf, 0x1b, - 0x32, 0x3b, 0x08, 0x24, 0x77, 0x2e, 0xc5, 0xab, 0xeb, 0x23, 0x4f, 0x20, 0x49, 0xfb, 0x90, 0xd0, - 0xed, 0xef, 0x88, 0x8d, 0x5f, 0x3b, 0x8d, 0xfc, 0x8e, 0x4d, 0x12, 0x28, 0x8b, 0xba, 0x58, 0xcd, - 0xef, 0x2f, 0xc5, 0xf0, 0x0f, 0xf1, 0x16, 0x59, 0x9b, 0x33, 0xfc, 0x99, 0x5d, 0x68, 0xe3, 0x03, - 0x7d, 0x2a, 0x70, 0x16, 0x7f, 0x40, 0x95, 0xa3, 0xd8, 0xaa, 0x44, 0xda, 0x22, 0xfa, 0x2d, 0xe2, - 0xbd, 0xdf, 0xd2, 0x2e, 0x63, 0x63, 0x9d, 0x0b, 0xfc, 0x89, 0x2d, 0xad, 0x1c, 0x1a, 0xce, 0x4e, - 0x1a, 0x16, 0x87, 0xc2, 0xbf, 0xe0, 0x91, 0x4d, 0x15, 0x58, 0x20, 0x50, 0xe5, 0xf8, 0x64, 0xb5, - 0x8f, 0xf2, 0x9a, 0xcd, 0xd7, 0x10, 0xbe, 0xbc, 0x71, 0x64, 0xb0, 0x2b, 0xcf, 0xeb, 0x62, 0x35, - 0x6b, 0x87, 0x4f, 0x2f, 0xb3, 0x8f, 0x69, 0xbe, 0x7a, 0x33, 0x89, 0xa6, 0x87, 0xbf, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x1a, 0x52, 0x8f, 0x9c, 0x89, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/release.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/release.pb.go deleted file mode 100644 index f87decd72..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/release/release.pb.go +++ /dev/null @@ -1,151 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/release/release.proto - -package release - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import chart "k8s.io/helm/pkg/proto/hapi/chart" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Release describes a deployment of a chart, together with the chart -// and the variables used to deploy that chart. -type Release struct { - // Name is the name of the release - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Info provides information about a release - Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - // Chart is the chart that was released. - Chart *chart.Chart `protobuf:"bytes,3,opt,name=chart,proto3" json:"chart,omitempty"` - // Config is the set of extra Values added to the chart. - // These values override the default values inside of the chart. - Config *chart.Config `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - // Manifest is the string representation of the rendered template. - Manifest string `protobuf:"bytes,5,opt,name=manifest,proto3" json:"manifest,omitempty"` - // Hooks are all of the hooks declared for this release. - Hooks []*Hook `protobuf:"bytes,6,rep,name=hooks,proto3" json:"hooks,omitempty"` - // Version is an int32 which represents the version of the release. - Version int32 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` - // Namespace is the kubernetes namespace of the release. - Namespace string `protobuf:"bytes,8,opt,name=namespace,proto3" json:"namespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Release) Reset() { *m = Release{} } -func (m *Release) String() string { return proto.CompactTextString(m) } -func (*Release) ProtoMessage() {} -func (*Release) Descriptor() ([]byte, []int) { - return fileDescriptor_release_fa600adfb1fffc82, []int{0} -} -func (m *Release) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Release.Unmarshal(m, b) -} -func (m *Release) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Release.Marshal(b, m, deterministic) -} -func (dst *Release) XXX_Merge(src proto.Message) { - xxx_messageInfo_Release.Merge(dst, src) -} -func (m *Release) XXX_Size() int { - return xxx_messageInfo_Release.Size(m) -} -func (m *Release) XXX_DiscardUnknown() { - xxx_messageInfo_Release.DiscardUnknown(m) -} - -var xxx_messageInfo_Release proto.InternalMessageInfo - -func (m *Release) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Release) GetInfo() *Info { - if m != nil { - return m.Info - } - return nil -} - -func (m *Release) GetChart() *chart.Chart { - if m != nil { - return m.Chart - } - return nil -} - -func (m *Release) GetConfig() *chart.Config { - if m != nil { - return m.Config - } - return nil -} - -func (m *Release) GetManifest() string { - if m != nil { - return m.Manifest - } - return "" -} - -func (m *Release) GetHooks() []*Hook { - if m != nil { - return m.Hooks - } - return nil -} - -func (m *Release) GetVersion() int32 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *Release) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func init() { - proto.RegisterType((*Release)(nil), "hapi.release.Release") -} - -func init() { proto.RegisterFile("hapi/release/release.proto", fileDescriptor_release_fa600adfb1fffc82) } - -var fileDescriptor_release_fa600adfb1fffc82 = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0xbf, 0x4e, 0xc3, 0x40, - 0x0c, 0xc6, 0x95, 0x36, 0x7f, 0x1a, 0xc3, 0x82, 0x07, 0xb0, 0x22, 0x86, 0x88, 0x01, 0x22, 0x86, - 0x54, 0x82, 0x37, 0x80, 0x05, 0xd6, 0x1b, 0xd9, 0x8e, 0xe8, 0x42, 0x4e, 0xa5, 0xe7, 0x28, 0x17, - 0xf1, 0x2c, 0x3c, 0x2e, 0xba, 0x3f, 0x85, 0x94, 0x2e, 0x4e, 0xec, 0xdf, 0xa7, 0xcf, 0xdf, 0x19, - 0xaa, 0x41, 0x8e, 0x7a, 0x3b, 0xa9, 0x4f, 0x25, 0xad, 0x3a, 0x7c, 0xdb, 0x71, 0xe2, 0x99, 0xf1, - 0xdc, 0xb1, 0x36, 0xce, 0xaa, 0xab, 0x23, 0xe5, 0xc0, 0xbc, 0x0b, 0xb2, 0x7f, 0x40, 0x9b, 0x9e, - 0x8f, 0x40, 0x37, 0xc8, 0x69, 0xde, 0x76, 0x6c, 0x7a, 0xfd, 0x11, 0xc1, 0xe5, 0x12, 0xb8, 0x1a, - 0xe6, 0x37, 0xdf, 0x2b, 0x28, 0x44, 0xf0, 0x41, 0x84, 0xd4, 0xc8, 0xbd, 0xa2, 0xa4, 0x4e, 0x9a, - 0x52, 0xf8, 0x7f, 0xbc, 0x85, 0xd4, 0xd9, 0xd3, 0xaa, 0x4e, 0x9a, 0xb3, 0x07, 0x6c, 0x97, 0xf9, - 0xda, 0x57, 0xd3, 0xb3, 0xf0, 0x1c, 0xef, 0x20, 0xf3, 0xb6, 0xb4, 0xf6, 0xc2, 0x8b, 0x20, 0x0c, - 0x9b, 0x9e, 0x5d, 0x15, 0x81, 0xe3, 0x3d, 0xe4, 0x21, 0x18, 0xa5, 0x4b, 0xcb, 0xa8, 0xf4, 0x44, - 0x44, 0x05, 0x56, 0xb0, 0xd9, 0x4b, 0xa3, 0x7b, 0x65, 0x67, 0xca, 0x7c, 0xa8, 0xdf, 0x1e, 0x1b, - 0xc8, 0xdc, 0x41, 0x2c, 0xe5, 0xf5, 0xfa, 0x34, 0xd9, 0x0b, 0xf3, 0x4e, 0x04, 0x01, 0x12, 0x14, - 0x5f, 0x6a, 0xb2, 0x9a, 0x0d, 0x15, 0x75, 0xd2, 0x64, 0xe2, 0xd0, 0xe2, 0x35, 0x94, 0xee, 0x91, - 0x76, 0x94, 0x9d, 0xa2, 0x8d, 0x5f, 0xf0, 0x37, 0x78, 0x2a, 0xdf, 0x8a, 0x68, 0xf7, 0x9e, 0xfb, - 0x63, 0x3d, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x8f, 0xec, 0x97, 0xbb, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/status.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/status.pb.go deleted file mode 100644 index 39f75bab6..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/release/status.pb.go +++ /dev/null @@ -1,171 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/release/status.proto - -package release - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Status_Code int32 - -const ( - // Status_UNKNOWN indicates that a release is in an uncertain state. - Status_UNKNOWN Status_Code = 0 - // Status_DEPLOYED indicates that the release has been pushed to Kubernetes. - Status_DEPLOYED Status_Code = 1 - // Status_DELETED indicates that a release has been deleted from Kubernetes. - Status_DELETED Status_Code = 2 - // Status_SUPERSEDED indicates that this release object is outdated and a newer one exists. - Status_SUPERSEDED Status_Code = 3 - // Status_FAILED indicates that the release was not successfully deployed. - Status_FAILED Status_Code = 4 - // Status_DELETING indicates that a delete operation is underway. - Status_DELETING Status_Code = 5 - // Status_PENDING_INSTALL indicates that an install operation is underway. - Status_PENDING_INSTALL Status_Code = 6 - // Status_PENDING_UPGRADE indicates that an upgrade operation is underway. - Status_PENDING_UPGRADE Status_Code = 7 - // Status_PENDING_ROLLBACK indicates that a rollback operation is underway. - Status_PENDING_ROLLBACK Status_Code = 8 -) - -var Status_Code_name = map[int32]string{ - 0: "UNKNOWN", - 1: "DEPLOYED", - 2: "DELETED", - 3: "SUPERSEDED", - 4: "FAILED", - 5: "DELETING", - 6: "PENDING_INSTALL", - 7: "PENDING_UPGRADE", - 8: "PENDING_ROLLBACK", -} -var Status_Code_value = map[string]int32{ - "UNKNOWN": 0, - "DEPLOYED": 1, - "DELETED": 2, - "SUPERSEDED": 3, - "FAILED": 4, - "DELETING": 5, - "PENDING_INSTALL": 6, - "PENDING_UPGRADE": 7, - "PENDING_ROLLBACK": 8, -} - -func (x Status_Code) String() string { - return proto.EnumName(Status_Code_name, int32(x)) -} -func (Status_Code) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_status_59977299d7c48665, []int{0, 0} -} - -// Status defines the status of a release. -type Status struct { - Code Status_Code `protobuf:"varint,1,opt,name=code,proto3,enum=hapi.release.Status_Code" json:"code,omitempty"` - // Cluster resources as kubectl would print them. - Resources string `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` - // Contains the rendered templates/NOTES.txt if available - Notes string `protobuf:"bytes,4,opt,name=notes,proto3" json:"notes,omitempty"` - // LastTestSuiteRun provides results on the last test run on a release - LastTestSuiteRun *TestSuite `protobuf:"bytes,5,opt,name=last_test_suite_run,json=lastTestSuiteRun,proto3" json:"last_test_suite_run,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_status_59977299d7c48665, []int{0} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (dst *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(dst, src) -} -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -func (m *Status) GetCode() Status_Code { - if m != nil { - return m.Code - } - return Status_UNKNOWN -} - -func (m *Status) GetResources() string { - if m != nil { - return m.Resources - } - return "" -} - -func (m *Status) GetNotes() string { - if m != nil { - return m.Notes - } - return "" -} - -func (m *Status) GetLastTestSuiteRun() *TestSuite { - if m != nil { - return m.LastTestSuiteRun - } - return nil -} - -func init() { - proto.RegisterType((*Status)(nil), "hapi.release.Status") - proto.RegisterEnum("hapi.release.Status_Code", Status_Code_name, Status_Code_value) -} - -func init() { proto.RegisterFile("hapi/release/status.proto", fileDescriptor_status_59977299d7c48665) } - -var fileDescriptor_status_59977299d7c48665 = []byte{ - // 333 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xd1, 0x6e, 0xa2, 0x40, - 0x14, 0x86, 0x17, 0x45, 0xd4, 0xa3, 0x71, 0x27, 0xa3, 0xc9, 0xa2, 0xd9, 0x4d, 0x8c, 0x57, 0xde, - 0x2c, 0x24, 0xf6, 0x09, 0xd0, 0x19, 0x0d, 0x71, 0x82, 0x04, 0x30, 0x4d, 0x7b, 0x43, 0x50, 0xa7, - 0xd6, 0xc4, 0x30, 0x86, 0x19, 0x2e, 0xfa, 0x26, 0x7d, 0xaa, 0x3e, 0x53, 0x03, 0xd8, 0xa8, 0x97, - 0xff, 0xff, 0x7d, 0x87, 0x73, 0x18, 0x18, 0xbe, 0x27, 0x97, 0x93, 0x9d, 0xf1, 0x33, 0x4f, 0x24, - 0xb7, 0xa5, 0x4a, 0x54, 0x2e, 0xad, 0x4b, 0x26, 0x94, 0xc0, 0xdd, 0x02, 0x59, 0x57, 0x34, 0xfa, - 0xf7, 0x20, 0x2a, 0x2e, 0x55, 0x2c, 0xf3, 0x93, 0xe2, 0x95, 0x3c, 0x1a, 0x1e, 0x85, 0x38, 0x9e, - 0xb9, 0x5d, 0xa6, 0x5d, 0xfe, 0x66, 0x27, 0xe9, 0x47, 0x85, 0x26, 0x5f, 0x35, 0x30, 0xc2, 0xf2, - 0xc3, 0xf8, 0x3f, 0xe8, 0x7b, 0x71, 0xe0, 0xa6, 0x36, 0xd6, 0xa6, 0xbd, 0xd9, 0xd0, 0xba, 0xdf, - 0x60, 0x55, 0x8e, 0xb5, 0x10, 0x07, 0x1e, 0x94, 0x1a, 0xfe, 0x0b, 0xed, 0x8c, 0x4b, 0x91, 0x67, - 0x7b, 0x2e, 0xcd, 0xfa, 0x58, 0x9b, 0xb6, 0x83, 0x5b, 0x81, 0x07, 0xd0, 0x48, 0x85, 0xe2, 0xd2, - 0xd4, 0x4b, 0x52, 0x05, 0xbc, 0x84, 0xfe, 0x39, 0x91, 0x2a, 0xbe, 0x5d, 0x18, 0x67, 0x79, 0x6a, - 0x36, 0xc6, 0xda, 0xb4, 0x33, 0xfb, 0xf3, 0xb8, 0x31, 0xe2, 0x52, 0x85, 0x85, 0x12, 0xa0, 0x62, - 0xe6, 0x16, 0xf3, 0x74, 0xf2, 0xa9, 0x81, 0x5e, 0x9c, 0x82, 0x3b, 0xd0, 0xdc, 0x7a, 0x6b, 0x6f, - 0xf3, 0xec, 0xa1, 0x5f, 0xb8, 0x0b, 0x2d, 0x42, 0x7d, 0xb6, 0x79, 0xa1, 0x04, 0x69, 0x05, 0x22, - 0x94, 0xd1, 0x88, 0x12, 0x54, 0xc3, 0x3d, 0x80, 0x70, 0xeb, 0xd3, 0x20, 0xa4, 0x84, 0x12, 0x54, - 0xc7, 0x00, 0xc6, 0xd2, 0x71, 0x19, 0x25, 0x48, 0xaf, 0xc6, 0x18, 0x8d, 0x5c, 0x6f, 0x85, 0x1a, - 0xb8, 0x0f, 0xbf, 0x7d, 0xea, 0x11, 0xd7, 0x5b, 0xc5, 0xae, 0x17, 0x46, 0x0e, 0x63, 0xc8, 0xb8, - 0x2f, 0xb7, 0xfe, 0x2a, 0x70, 0x08, 0x45, 0x4d, 0x3c, 0x00, 0xf4, 0x53, 0x06, 0x1b, 0xc6, 0xe6, - 0xce, 0x62, 0x8d, 0x5a, 0xf3, 0xf6, 0x6b, 0xf3, 0xfa, 0x07, 0x3b, 0xa3, 0x7c, 0xe2, 0xa7, 0xef, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x09, 0x48, 0x18, 0xba, 0xc7, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/test_run.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/test_run.pb.go deleted file mode 100644 index 93460f798..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/release/test_run.pb.go +++ /dev/null @@ -1,150 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/release/test_run.proto - -package release - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type TestRun_Status int32 - -const ( - TestRun_UNKNOWN TestRun_Status = 0 - TestRun_SUCCESS TestRun_Status = 1 - TestRun_FAILURE TestRun_Status = 2 - TestRun_RUNNING TestRun_Status = 3 -) - -var TestRun_Status_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SUCCESS", - 2: "FAILURE", - 3: "RUNNING", -} -var TestRun_Status_value = map[string]int32{ - "UNKNOWN": 0, - "SUCCESS": 1, - "FAILURE": 2, - "RUNNING": 3, -} - -func (x TestRun_Status) String() string { - return proto.EnumName(TestRun_Status_name, int32(x)) -} -func (TestRun_Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_test_run_3a2eb78f132e5146, []int{0, 0} -} - -type TestRun struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Status TestRun_Status `protobuf:"varint,2,opt,name=status,proto3,enum=hapi.release.TestRun_Status" json:"status,omitempty"` - Info string `protobuf:"bytes,3,opt,name=info,proto3" json:"info,omitempty"` - StartedAt *timestamp.Timestamp `protobuf:"bytes,4,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - CompletedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TestRun) Reset() { *m = TestRun{} } -func (m *TestRun) String() string { return proto.CompactTextString(m) } -func (*TestRun) ProtoMessage() {} -func (*TestRun) Descriptor() ([]byte, []int) { - return fileDescriptor_test_run_3a2eb78f132e5146, []int{0} -} -func (m *TestRun) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestRun.Unmarshal(m, b) -} -func (m *TestRun) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestRun.Marshal(b, m, deterministic) -} -func (dst *TestRun) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestRun.Merge(dst, src) -} -func (m *TestRun) XXX_Size() int { - return xxx_messageInfo_TestRun.Size(m) -} -func (m *TestRun) XXX_DiscardUnknown() { - xxx_messageInfo_TestRun.DiscardUnknown(m) -} - -var xxx_messageInfo_TestRun proto.InternalMessageInfo - -func (m *TestRun) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *TestRun) GetStatus() TestRun_Status { - if m != nil { - return m.Status - } - return TestRun_UNKNOWN -} - -func (m *TestRun) GetInfo() string { - if m != nil { - return m.Info - } - return "" -} - -func (m *TestRun) GetStartedAt() *timestamp.Timestamp { - if m != nil { - return m.StartedAt - } - return nil -} - -func (m *TestRun) GetCompletedAt() *timestamp.Timestamp { - if m != nil { - return m.CompletedAt - } - return nil -} - -func init() { - proto.RegisterType((*TestRun)(nil), "hapi.release.TestRun") - proto.RegisterEnum("hapi.release.TestRun_Status", TestRun_Status_name, TestRun_Status_value) -} - -func init() { - proto.RegisterFile("hapi/release/test_run.proto", fileDescriptor_test_run_3a2eb78f132e5146) -} - -var fileDescriptor_test_run_3a2eb78f132e5146 = []byte{ - // 274 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0xc1, 0x4b, 0xfb, 0x30, - 0x1c, 0xc5, 0x7f, 0xe9, 0xf6, 0x6b, 0x69, 0x3a, 0xa4, 0xe4, 0x54, 0xa6, 0x60, 0xd9, 0xa9, 0xa7, - 0x14, 0xa6, 0x17, 0x41, 0x0f, 0x75, 0x4c, 0x19, 0x4a, 0x84, 0x74, 0x45, 0xf0, 0x32, 0x32, 0xcd, - 0x66, 0xa1, 0x6d, 0x4a, 0xf3, 0xed, 0xdf, 0xe3, 0xbf, 0x2a, 0x69, 0x33, 0xf1, 0xe6, 0xed, 0xfb, - 0x78, 0x9f, 0xf7, 0xf2, 0x82, 0xcf, 0x3f, 0x45, 0x5b, 0xa6, 0x9d, 0xac, 0xa4, 0xd0, 0x32, 0x05, - 0xa9, 0x61, 0xd7, 0xf5, 0x0d, 0x6d, 0x3b, 0x05, 0x8a, 0xcc, 0x8c, 0x49, 0xad, 0x39, 0xbf, 0x3c, - 0x2a, 0x75, 0xac, 0x64, 0x3a, 0x78, 0xfb, 0xfe, 0x90, 0x42, 0x59, 0x4b, 0x0d, 0xa2, 0x6e, 0x47, - 0x7c, 0xf1, 0xe5, 0x60, 0x6f, 0x2b, 0x35, 0xf0, 0xbe, 0x21, 0x04, 0x4f, 0x1b, 0x51, 0xcb, 0x08, - 0xc5, 0x28, 0xf1, 0xf9, 0x70, 0x93, 0x6b, 0xec, 0x6a, 0x10, 0xd0, 0xeb, 0xc8, 0x89, 0x51, 0x72, - 0xb6, 0xbc, 0xa0, 0xbf, 0xfb, 0xa9, 0x8d, 0xd2, 0x7c, 0x60, 0xb8, 0x65, 0x4d, 0x53, 0xd9, 0x1c, - 0x54, 0x34, 0x19, 0x9b, 0xcc, 0x4d, 0x6e, 0x30, 0xd6, 0x20, 0x3a, 0x90, 0x1f, 0x3b, 0x01, 0xd1, - 0x34, 0x46, 0x49, 0xb0, 0x9c, 0xd3, 0x71, 0x1f, 0x3d, 0xed, 0xa3, 0xdb, 0xd3, 0x3e, 0xee, 0x5b, - 0x3a, 0x03, 0x72, 0x87, 0x67, 0xef, 0xaa, 0x6e, 0x2b, 0x69, 0xc3, 0xff, 0xff, 0x0c, 0x07, 0x3f, - 0x7c, 0x06, 0x8b, 0x5b, 0xec, 0x8e, 0xfb, 0x48, 0x80, 0xbd, 0x82, 0x3d, 0xb1, 0x97, 0x57, 0x16, - 0xfe, 0x33, 0x22, 0x2f, 0x56, 0xab, 0x75, 0x9e, 0x87, 0xc8, 0x88, 0x87, 0x6c, 0xf3, 0x5c, 0xf0, - 0x75, 0xe8, 0x18, 0xc1, 0x0b, 0xc6, 0x36, 0xec, 0x31, 0x9c, 0xdc, 0xfb, 0x6f, 0x9e, 0xfd, 0xed, - 0xde, 0x1d, 0x5e, 0xba, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x31, 0x86, 0x46, 0xdb, 0x81, 0x01, - 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/test_suite.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/test_suite.pb.go deleted file mode 100644 index ba1e5e447..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/release/test_suite.pb.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/release/test_suite.proto - -package release - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// TestSuite comprises of the last run of the pre-defined test suite of a release version -type TestSuite struct { - // StartedAt indicates the date/time this test suite was kicked off - StartedAt *timestamp.Timestamp `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - // CompletedAt indicates the date/time this test suite was completed - CompletedAt *timestamp.Timestamp `protobuf:"bytes,2,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` - // Results are the results of each segment of the test - Results []*TestRun `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TestSuite) Reset() { *m = TestSuite{} } -func (m *TestSuite) String() string { return proto.CompactTextString(m) } -func (*TestSuite) ProtoMessage() {} -func (*TestSuite) Descriptor() ([]byte, []int) { - return fileDescriptor_test_suite_97a98e0ba80794de, []int{0} -} -func (m *TestSuite) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestSuite.Unmarshal(m, b) -} -func (m *TestSuite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestSuite.Marshal(b, m, deterministic) -} -func (dst *TestSuite) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestSuite.Merge(dst, src) -} -func (m *TestSuite) XXX_Size() int { - return xxx_messageInfo_TestSuite.Size(m) -} -func (m *TestSuite) XXX_DiscardUnknown() { - xxx_messageInfo_TestSuite.DiscardUnknown(m) -} - -var xxx_messageInfo_TestSuite proto.InternalMessageInfo - -func (m *TestSuite) GetStartedAt() *timestamp.Timestamp { - if m != nil { - return m.StartedAt - } - return nil -} - -func (m *TestSuite) GetCompletedAt() *timestamp.Timestamp { - if m != nil { - return m.CompletedAt - } - return nil -} - -func (m *TestSuite) GetResults() []*TestRun { - if m != nil { - return m.Results - } - return nil -} - -func init() { - proto.RegisterType((*TestSuite)(nil), "hapi.release.TestSuite") -} - -func init() { - proto.RegisterFile("hapi/release/test_suite.proto", fileDescriptor_test_suite_97a98e0ba80794de) -} - -var fileDescriptor_test_suite_97a98e0ba80794de = []byte{ - // 207 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0xc1, 0x4a, 0x86, 0x40, - 0x14, 0x85, 0x31, 0x21, 0x71, 0x74, 0x35, 0x10, 0x88, 0x11, 0x49, 0x2b, 0x57, 0x33, 0x60, 0xab, - 0x16, 0x2d, 0xec, 0x11, 0xcc, 0x55, 0x1b, 0x19, 0xeb, 0x66, 0xc2, 0xe8, 0x0c, 0x73, 0xef, 0xbc, - 0x5a, 0xcf, 0x17, 0xea, 0x18, 0x41, 0x8b, 0x7f, 0xfd, 0x7d, 0xe7, 0x9c, 0x7b, 0xd9, 0xdd, 0x97, - 0xb2, 0xb3, 0x74, 0xa0, 0x41, 0x21, 0x48, 0x02, 0xa4, 0x01, 0xfd, 0x4c, 0x20, 0xac, 0x33, 0x64, - 0x78, 0xbe, 0x61, 0x11, 0x70, 0x79, 0x3f, 0x19, 0x33, 0x69, 0x90, 0x3b, 0x1b, 0xfd, 0xa7, 0xa4, - 0x79, 0x01, 0x24, 0xb5, 0xd8, 0x43, 0x2f, 0x6f, 0xff, 0xb7, 0x39, 0xbf, 0x1e, 0xf0, 0xe1, 0x3b, - 0x62, 0x69, 0x0f, 0x48, 0xaf, 0x5b, 0x3f, 0x7f, 0x62, 0x0c, 0x49, 0x39, 0x82, 0x8f, 0x41, 0x51, - 0x11, 0x55, 0x51, 0x9d, 0x35, 0xa5, 0x38, 0x06, 0xc4, 0x39, 0x20, 0xfa, 0x73, 0xa0, 0x4b, 0x83, - 0xdd, 0x12, 0x7f, 0x66, 0xf9, 0xbb, 0x59, 0xac, 0x86, 0x10, 0xbe, 0xba, 0x18, 0xce, 0x7e, 0xfd, - 0x96, 0xb8, 0x64, 0x89, 0x03, 0xf4, 0x9a, 0xb0, 0x88, 0xab, 0xb8, 0xce, 0x9a, 0x1b, 0xf1, 0xf7, - 0x4b, 0xb1, 0xdd, 0xd8, 0xf9, 0xb5, 0x3b, 0xad, 0x97, 0xf4, 0x2d, 0x09, 0x6c, 0xbc, 0xde, 0xcb, - 0x1f, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x59, 0x65, 0x4f, 0x37, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/services/tiller.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/services/tiller.pb.go deleted file mode 100644 index 220accc2a..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/services/tiller.pb.go +++ /dev/null @@ -1,2000 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/services/tiller.proto - -package services - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import chart "k8s.io/helm/pkg/proto/hapi/chart" -import release "k8s.io/helm/pkg/proto/hapi/release" -import version "k8s.io/helm/pkg/proto/hapi/version" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// SortBy defines sort operations. -type ListSort_SortBy int32 - -const ( - ListSort_UNKNOWN ListSort_SortBy = 0 - ListSort_NAME ListSort_SortBy = 1 - ListSort_LAST_RELEASED ListSort_SortBy = 2 - ListSort_CHART_NAME ListSort_SortBy = 3 -) - -var ListSort_SortBy_name = map[int32]string{ - 0: "UNKNOWN", - 1: "NAME", - 2: "LAST_RELEASED", - 3: "CHART_NAME", -} -var ListSort_SortBy_value = map[string]int32{ - "UNKNOWN": 0, - "NAME": 1, - "LAST_RELEASED": 2, - "CHART_NAME": 3, -} - -func (x ListSort_SortBy) String() string { - return proto.EnumName(ListSort_SortBy_name, int32(x)) -} -func (ListSort_SortBy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{1, 0} -} - -// SortOrder defines sort orders to augment sorting operations. -type ListSort_SortOrder int32 - -const ( - ListSort_ASC ListSort_SortOrder = 0 - ListSort_DESC ListSort_SortOrder = 1 -) - -var ListSort_SortOrder_name = map[int32]string{ - 0: "ASC", - 1: "DESC", -} -var ListSort_SortOrder_value = map[string]int32{ - "ASC": 0, - "DESC": 1, -} - -func (x ListSort_SortOrder) String() string { - return proto.EnumName(ListSort_SortOrder_name, int32(x)) -} -func (ListSort_SortOrder) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{1, 1} -} - -// ListReleasesRequest requests a list of releases. -// -// Releases can be retrieved in chunks by setting limit and offset. -// -// Releases can be sorted according to a few pre-determined sort strategies. -type ListReleasesRequest struct { - // Limit is the maximum number of releases to be returned. - Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` - // Offset is the last release name that was seen. The next listing - // operation will start with the name after this one. - // Example: If list one returns albert, bernie, carl, and sets 'next: dennis'. - // dennis is the offset. Supplying 'dennis' for the next request should - // cause the next batch to return a set of results starting with 'dennis'. - Offset string `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"` - // SortBy is the sort field that the ListReleases server should sort data before returning. - SortBy ListSort_SortBy `protobuf:"varint,3,opt,name=sort_by,json=sortBy,proto3,enum=hapi.services.tiller.ListSort_SortBy" json:"sort_by,omitempty"` - // Filter is a regular expression used to filter which releases should be listed. - // - // Anything that matches the regexp will be included in the results. - Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` - // SortOrder is the ordering directive used for sorting. - SortOrder ListSort_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=hapi.services.tiller.ListSort_SortOrder" json:"sort_order,omitempty"` - StatusCodes []release.Status_Code `protobuf:"varint,6,rep,packed,name=status_codes,json=statusCodes,proto3,enum=hapi.release.Status_Code" json:"status_codes,omitempty"` - // Namespace is the filter to select releases only from a specific namespace. - Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListReleasesRequest) Reset() { *m = ListReleasesRequest{} } -func (m *ListReleasesRequest) String() string { return proto.CompactTextString(m) } -func (*ListReleasesRequest) ProtoMessage() {} -func (*ListReleasesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{0} -} -func (m *ListReleasesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListReleasesRequest.Unmarshal(m, b) -} -func (m *ListReleasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListReleasesRequest.Marshal(b, m, deterministic) -} -func (dst *ListReleasesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListReleasesRequest.Merge(dst, src) -} -func (m *ListReleasesRequest) XXX_Size() int { - return xxx_messageInfo_ListReleasesRequest.Size(m) -} -func (m *ListReleasesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListReleasesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListReleasesRequest proto.InternalMessageInfo - -func (m *ListReleasesRequest) GetLimit() int64 { - if m != nil { - return m.Limit - } - return 0 -} - -func (m *ListReleasesRequest) GetOffset() string { - if m != nil { - return m.Offset - } - return "" -} - -func (m *ListReleasesRequest) GetSortBy() ListSort_SortBy { - if m != nil { - return m.SortBy - } - return ListSort_UNKNOWN -} - -func (m *ListReleasesRequest) GetFilter() string { - if m != nil { - return m.Filter - } - return "" -} - -func (m *ListReleasesRequest) GetSortOrder() ListSort_SortOrder { - if m != nil { - return m.SortOrder - } - return ListSort_ASC -} - -func (m *ListReleasesRequest) GetStatusCodes() []release.Status_Code { - if m != nil { - return m.StatusCodes - } - return nil -} - -func (m *ListReleasesRequest) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -// ListSort defines sorting fields on a release list. -type ListSort struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListSort) Reset() { *m = ListSort{} } -func (m *ListSort) String() string { return proto.CompactTextString(m) } -func (*ListSort) ProtoMessage() {} -func (*ListSort) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{1} -} -func (m *ListSort) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSort.Unmarshal(m, b) -} -func (m *ListSort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSort.Marshal(b, m, deterministic) -} -func (dst *ListSort) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSort.Merge(dst, src) -} -func (m *ListSort) XXX_Size() int { - return xxx_messageInfo_ListSort.Size(m) -} -func (m *ListSort) XXX_DiscardUnknown() { - xxx_messageInfo_ListSort.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSort proto.InternalMessageInfo - -// ListReleasesResponse is a list of releases. -type ListReleasesResponse struct { - // Count is the expected total number of releases to be returned. - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // Next is the name of the next release. If this is other than an empty - // string, it means there are more results. - Next string `protobuf:"bytes,2,opt,name=next,proto3" json:"next,omitempty"` - // Total is the total number of queryable releases. - Total int64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"` - // Releases is the list of found release objects. - Releases []*release.Release `protobuf:"bytes,4,rep,name=releases,proto3" json:"releases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListReleasesResponse) Reset() { *m = ListReleasesResponse{} } -func (m *ListReleasesResponse) String() string { return proto.CompactTextString(m) } -func (*ListReleasesResponse) ProtoMessage() {} -func (*ListReleasesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{2} -} -func (m *ListReleasesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListReleasesResponse.Unmarshal(m, b) -} -func (m *ListReleasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListReleasesResponse.Marshal(b, m, deterministic) -} -func (dst *ListReleasesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListReleasesResponse.Merge(dst, src) -} -func (m *ListReleasesResponse) XXX_Size() int { - return xxx_messageInfo_ListReleasesResponse.Size(m) -} -func (m *ListReleasesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListReleasesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListReleasesResponse proto.InternalMessageInfo - -func (m *ListReleasesResponse) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *ListReleasesResponse) GetNext() string { - if m != nil { - return m.Next - } - return "" -} - -func (m *ListReleasesResponse) GetTotal() int64 { - if m != nil { - return m.Total - } - return 0 -} - -func (m *ListReleasesResponse) GetReleases() []*release.Release { - if m != nil { - return m.Releases - } - return nil -} - -// GetReleaseStatusRequest is a request to get the status of a release. -type GetReleaseStatusRequest struct { - // Name is the name of the release - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Version is the version of the release - Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetReleaseStatusRequest) Reset() { *m = GetReleaseStatusRequest{} } -func (m *GetReleaseStatusRequest) String() string { return proto.CompactTextString(m) } -func (*GetReleaseStatusRequest) ProtoMessage() {} -func (*GetReleaseStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{3} -} -func (m *GetReleaseStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetReleaseStatusRequest.Unmarshal(m, b) -} -func (m *GetReleaseStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetReleaseStatusRequest.Marshal(b, m, deterministic) -} -func (dst *GetReleaseStatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetReleaseStatusRequest.Merge(dst, src) -} -func (m *GetReleaseStatusRequest) XXX_Size() int { - return xxx_messageInfo_GetReleaseStatusRequest.Size(m) -} -func (m *GetReleaseStatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetReleaseStatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetReleaseStatusRequest proto.InternalMessageInfo - -func (m *GetReleaseStatusRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GetReleaseStatusRequest) GetVersion() int32 { - if m != nil { - return m.Version - } - return 0 -} - -// GetReleaseStatusResponse is the response indicating the status of the named release. -type GetReleaseStatusResponse struct { - // Name is the name of the release. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Info contains information about the release. - Info *release.Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - // Namespace the release was released into - Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetReleaseStatusResponse) Reset() { *m = GetReleaseStatusResponse{} } -func (m *GetReleaseStatusResponse) String() string { return proto.CompactTextString(m) } -func (*GetReleaseStatusResponse) ProtoMessage() {} -func (*GetReleaseStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{4} -} -func (m *GetReleaseStatusResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetReleaseStatusResponse.Unmarshal(m, b) -} -func (m *GetReleaseStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetReleaseStatusResponse.Marshal(b, m, deterministic) -} -func (dst *GetReleaseStatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetReleaseStatusResponse.Merge(dst, src) -} -func (m *GetReleaseStatusResponse) XXX_Size() int { - return xxx_messageInfo_GetReleaseStatusResponse.Size(m) -} -func (m *GetReleaseStatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetReleaseStatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetReleaseStatusResponse proto.InternalMessageInfo - -func (m *GetReleaseStatusResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GetReleaseStatusResponse) GetInfo() *release.Info { - if m != nil { - return m.Info - } - return nil -} - -func (m *GetReleaseStatusResponse) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -// GetReleaseContentRequest is a request to get the contents of a release. -type GetReleaseContentRequest struct { - // The name of the release - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Version is the version of the release - Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetReleaseContentRequest) Reset() { *m = GetReleaseContentRequest{} } -func (m *GetReleaseContentRequest) String() string { return proto.CompactTextString(m) } -func (*GetReleaseContentRequest) ProtoMessage() {} -func (*GetReleaseContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{5} -} -func (m *GetReleaseContentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetReleaseContentRequest.Unmarshal(m, b) -} -func (m *GetReleaseContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetReleaseContentRequest.Marshal(b, m, deterministic) -} -func (dst *GetReleaseContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetReleaseContentRequest.Merge(dst, src) -} -func (m *GetReleaseContentRequest) XXX_Size() int { - return xxx_messageInfo_GetReleaseContentRequest.Size(m) -} -func (m *GetReleaseContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetReleaseContentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetReleaseContentRequest proto.InternalMessageInfo - -func (m *GetReleaseContentRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GetReleaseContentRequest) GetVersion() int32 { - if m != nil { - return m.Version - } - return 0 -} - -// GetReleaseContentResponse is a response containing the contents of a release. -type GetReleaseContentResponse struct { - // The release content - Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetReleaseContentResponse) Reset() { *m = GetReleaseContentResponse{} } -func (m *GetReleaseContentResponse) String() string { return proto.CompactTextString(m) } -func (*GetReleaseContentResponse) ProtoMessage() {} -func (*GetReleaseContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{6} -} -func (m *GetReleaseContentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetReleaseContentResponse.Unmarshal(m, b) -} -func (m *GetReleaseContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetReleaseContentResponse.Marshal(b, m, deterministic) -} -func (dst *GetReleaseContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetReleaseContentResponse.Merge(dst, src) -} -func (m *GetReleaseContentResponse) XXX_Size() int { - return xxx_messageInfo_GetReleaseContentResponse.Size(m) -} -func (m *GetReleaseContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetReleaseContentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetReleaseContentResponse proto.InternalMessageInfo - -func (m *GetReleaseContentResponse) GetRelease() *release.Release { - if m != nil { - return m.Release - } - return nil -} - -// UpdateReleaseRequest updates a release. -type UpdateReleaseRequest struct { - // The name of the release - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Chart is the protobuf representation of a chart. - Chart *chart.Chart `protobuf:"bytes,2,opt,name=chart,proto3" json:"chart,omitempty"` - // Values is a string containing (unparsed) YAML values. - Values *chart.Config `protobuf:"bytes,3,opt,name=values,proto3" json:"values,omitempty"` - // dry_run, if true, will run through the release logic, but neither create - DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` - // DisableHooks causes the server to skip running any hooks for the upgrade. - DisableHooks bool `protobuf:"varint,5,opt,name=disable_hooks,json=disableHooks,proto3" json:"disable_hooks,omitempty"` - // Performs pods restart for resources if applicable - Recreate bool `protobuf:"varint,6,opt,name=recreate,proto3" json:"recreate,omitempty"` - // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,7,opt,name=timeout,proto3" json:"timeout,omitempty"` - // ResetValues will cause Tiller to ignore stored values, resetting to default values. - ResetValues bool `protobuf:"varint,8,opt,name=reset_values,json=resetValues,proto3" json:"reset_values,omitempty"` - // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state - // before marking the release as successful. It will wait for as long as timeout - Wait bool `protobuf:"varint,9,opt,name=wait,proto3" json:"wait,omitempty"` - // ReuseValues will cause Tiller to reuse the values from the last release. - // This is ignored if reset_values is set. - ReuseValues bool `protobuf:"varint,10,opt,name=reuse_values,json=reuseValues,proto3" json:"reuse_values,omitempty"` - // Force resource update through delete/recreate if needed. - Force bool `protobuf:"varint,11,opt,name=force,proto3" json:"force,omitempty"` - // Description, if set, will set the description for the updated release - Description string `protobuf:"bytes,12,opt,name=description,proto3" json:"description,omitempty"` - // Render subchart notes if enabled - SubNotes bool `protobuf:"varint,13,opt,name=subNotes,proto3" json:"subNotes,omitempty"` - // Allow deletion of new resources created in this update when update failed - CleanupOnFail bool `protobuf:"varint,14,opt,name=cleanup_on_fail,json=cleanupOnFail,proto3" json:"cleanup_on_fail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdateReleaseRequest) Reset() { *m = UpdateReleaseRequest{} } -func (m *UpdateReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateReleaseRequest) ProtoMessage() {} -func (*UpdateReleaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{7} -} -func (m *UpdateReleaseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdateReleaseRequest.Unmarshal(m, b) -} -func (m *UpdateReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdateReleaseRequest.Marshal(b, m, deterministic) -} -func (dst *UpdateReleaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateReleaseRequest.Merge(dst, src) -} -func (m *UpdateReleaseRequest) XXX_Size() int { - return xxx_messageInfo_UpdateReleaseRequest.Size(m) -} -func (m *UpdateReleaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateReleaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateReleaseRequest proto.InternalMessageInfo - -func (m *UpdateReleaseRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *UpdateReleaseRequest) GetChart() *chart.Chart { - if m != nil { - return m.Chart - } - return nil -} - -func (m *UpdateReleaseRequest) GetValues() *chart.Config { - if m != nil { - return m.Values - } - return nil -} - -func (m *UpdateReleaseRequest) GetDryRun() bool { - if m != nil { - return m.DryRun - } - return false -} - -func (m *UpdateReleaseRequest) GetDisableHooks() bool { - if m != nil { - return m.DisableHooks - } - return false -} - -func (m *UpdateReleaseRequest) GetRecreate() bool { - if m != nil { - return m.Recreate - } - return false -} - -func (m *UpdateReleaseRequest) GetTimeout() int64 { - if m != nil { - return m.Timeout - } - return 0 -} - -func (m *UpdateReleaseRequest) GetResetValues() bool { - if m != nil { - return m.ResetValues - } - return false -} - -func (m *UpdateReleaseRequest) GetWait() bool { - if m != nil { - return m.Wait - } - return false -} - -func (m *UpdateReleaseRequest) GetReuseValues() bool { - if m != nil { - return m.ReuseValues - } - return false -} - -func (m *UpdateReleaseRequest) GetForce() bool { - if m != nil { - return m.Force - } - return false -} - -func (m *UpdateReleaseRequest) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *UpdateReleaseRequest) GetSubNotes() bool { - if m != nil { - return m.SubNotes - } - return false -} - -func (m *UpdateReleaseRequest) GetCleanupOnFail() bool { - if m != nil { - return m.CleanupOnFail - } - return false -} - -// UpdateReleaseResponse is the response to an update request. -type UpdateReleaseResponse struct { - Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdateReleaseResponse) Reset() { *m = UpdateReleaseResponse{} } -func (m *UpdateReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateReleaseResponse) ProtoMessage() {} -func (*UpdateReleaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{8} -} -func (m *UpdateReleaseResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdateReleaseResponse.Unmarshal(m, b) -} -func (m *UpdateReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdateReleaseResponse.Marshal(b, m, deterministic) -} -func (dst *UpdateReleaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateReleaseResponse.Merge(dst, src) -} -func (m *UpdateReleaseResponse) XXX_Size() int { - return xxx_messageInfo_UpdateReleaseResponse.Size(m) -} -func (m *UpdateReleaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateReleaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateReleaseResponse proto.InternalMessageInfo - -func (m *UpdateReleaseResponse) GetRelease() *release.Release { - if m != nil { - return m.Release - } - return nil -} - -type RollbackReleaseRequest struct { - // The name of the release - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // dry_run, if true, will run through the release logic but no create - DryRun bool `protobuf:"varint,2,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` - // DisableHooks causes the server to skip running any hooks for the rollback - DisableHooks bool `protobuf:"varint,3,opt,name=disable_hooks,json=disableHooks,proto3" json:"disable_hooks,omitempty"` - // Version is the version of the release to deploy. - Version int32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - // Performs pods restart for resources if applicable - Recreate bool `protobuf:"varint,5,opt,name=recreate,proto3" json:"recreate,omitempty"` - // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,6,opt,name=timeout,proto3" json:"timeout,omitempty"` - // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state - // before marking the release as successful. It will wait for as long as timeout - Wait bool `protobuf:"varint,7,opt,name=wait,proto3" json:"wait,omitempty"` - // Force resource update through delete/recreate if needed. - Force bool `protobuf:"varint,8,opt,name=force,proto3" json:"force,omitempty"` - // Description, if set, will set the description for the rollback - Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"` - // Allow deletion of new resources created in this rollback when rollback failed - CleanupOnFail bool `protobuf:"varint,10,opt,name=cleanup_on_fail,json=cleanupOnFail,proto3" json:"cleanup_on_fail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RollbackReleaseRequest) Reset() { *m = RollbackReleaseRequest{} } -func (m *RollbackReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*RollbackReleaseRequest) ProtoMessage() {} -func (*RollbackReleaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{9} -} -func (m *RollbackReleaseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RollbackReleaseRequest.Unmarshal(m, b) -} -func (m *RollbackReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RollbackReleaseRequest.Marshal(b, m, deterministic) -} -func (dst *RollbackReleaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollbackReleaseRequest.Merge(dst, src) -} -func (m *RollbackReleaseRequest) XXX_Size() int { - return xxx_messageInfo_RollbackReleaseRequest.Size(m) -} -func (m *RollbackReleaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RollbackReleaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RollbackReleaseRequest proto.InternalMessageInfo - -func (m *RollbackReleaseRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *RollbackReleaseRequest) GetDryRun() bool { - if m != nil { - return m.DryRun - } - return false -} - -func (m *RollbackReleaseRequest) GetDisableHooks() bool { - if m != nil { - return m.DisableHooks - } - return false -} - -func (m *RollbackReleaseRequest) GetVersion() int32 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *RollbackReleaseRequest) GetRecreate() bool { - if m != nil { - return m.Recreate - } - return false -} - -func (m *RollbackReleaseRequest) GetTimeout() int64 { - if m != nil { - return m.Timeout - } - return 0 -} - -func (m *RollbackReleaseRequest) GetWait() bool { - if m != nil { - return m.Wait - } - return false -} - -func (m *RollbackReleaseRequest) GetForce() bool { - if m != nil { - return m.Force - } - return false -} - -func (m *RollbackReleaseRequest) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *RollbackReleaseRequest) GetCleanupOnFail() bool { - if m != nil { - return m.CleanupOnFail - } - return false -} - -// RollbackReleaseResponse is the response to an update request. -type RollbackReleaseResponse struct { - Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RollbackReleaseResponse) Reset() { *m = RollbackReleaseResponse{} } -func (m *RollbackReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*RollbackReleaseResponse) ProtoMessage() {} -func (*RollbackReleaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{10} -} -func (m *RollbackReleaseResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RollbackReleaseResponse.Unmarshal(m, b) -} -func (m *RollbackReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RollbackReleaseResponse.Marshal(b, m, deterministic) -} -func (dst *RollbackReleaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollbackReleaseResponse.Merge(dst, src) -} -func (m *RollbackReleaseResponse) XXX_Size() int { - return xxx_messageInfo_RollbackReleaseResponse.Size(m) -} -func (m *RollbackReleaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RollbackReleaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RollbackReleaseResponse proto.InternalMessageInfo - -func (m *RollbackReleaseResponse) GetRelease() *release.Release { - if m != nil { - return m.Release - } - return nil -} - -// InstallReleaseRequest is the request for an installation of a chart. -type InstallReleaseRequest struct { - // Chart is the protobuf representation of a chart. - Chart *chart.Chart `protobuf:"bytes,1,opt,name=chart,proto3" json:"chart,omitempty"` - // Values is a string containing (unparsed) YAML values. - Values *chart.Config `protobuf:"bytes,2,opt,name=values,proto3" json:"values,omitempty"` - // DryRun, if true, will run through the release logic, but neither create - // a release object nor deploy to Kubernetes. The release object returned - // in the response will be fake. - DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` - // Name is the candidate release name. This must be unique to the - // namespace, otherwise the server will return an error. If it is not - // supplied, the server will autogenerate one. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // DisableHooks causes the server to skip running any hooks for the install. - DisableHooks bool `protobuf:"varint,5,opt,name=disable_hooks,json=disableHooks,proto3" json:"disable_hooks,omitempty"` - // Namespace is the kubernetes namespace of the release. - Namespace string `protobuf:"bytes,6,opt,name=namespace,proto3" json:"namespace,omitempty"` - // Reuse_name requests that Tiller re-uses a name, instead of erroring out. - ReuseName bool `protobuf:"varint,7,opt,name=reuse_name,json=reuseName,proto3" json:"reuse_name,omitempty"` - // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,8,opt,name=timeout,proto3" json:"timeout,omitempty"` - // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state - // before marking the release as successful. It will wait for as long as timeout - Wait bool `protobuf:"varint,9,opt,name=wait,proto3" json:"wait,omitempty"` - DisableCrdHook bool `protobuf:"varint,10,opt,name=disable_crd_hook,json=disableCrdHook,proto3" json:"disable_crd_hook,omitempty"` - // Description, if set, will set the description for the installed release - Description string `protobuf:"bytes,11,opt,name=description,proto3" json:"description,omitempty"` - SubNotes bool `protobuf:"varint,12,opt,name=subNotes,proto3" json:"subNotes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InstallReleaseRequest) Reset() { *m = InstallReleaseRequest{} } -func (m *InstallReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*InstallReleaseRequest) ProtoMessage() {} -func (*InstallReleaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{11} -} -func (m *InstallReleaseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InstallReleaseRequest.Unmarshal(m, b) -} -func (m *InstallReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InstallReleaseRequest.Marshal(b, m, deterministic) -} -func (dst *InstallReleaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstallReleaseRequest.Merge(dst, src) -} -func (m *InstallReleaseRequest) XXX_Size() int { - return xxx_messageInfo_InstallReleaseRequest.Size(m) -} -func (m *InstallReleaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InstallReleaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_InstallReleaseRequest proto.InternalMessageInfo - -func (m *InstallReleaseRequest) GetChart() *chart.Chart { - if m != nil { - return m.Chart - } - return nil -} - -func (m *InstallReleaseRequest) GetValues() *chart.Config { - if m != nil { - return m.Values - } - return nil -} - -func (m *InstallReleaseRequest) GetDryRun() bool { - if m != nil { - return m.DryRun - } - return false -} - -func (m *InstallReleaseRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *InstallReleaseRequest) GetDisableHooks() bool { - if m != nil { - return m.DisableHooks - } - return false -} - -func (m *InstallReleaseRequest) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *InstallReleaseRequest) GetReuseName() bool { - if m != nil { - return m.ReuseName - } - return false -} - -func (m *InstallReleaseRequest) GetTimeout() int64 { - if m != nil { - return m.Timeout - } - return 0 -} - -func (m *InstallReleaseRequest) GetWait() bool { - if m != nil { - return m.Wait - } - return false -} - -func (m *InstallReleaseRequest) GetDisableCrdHook() bool { - if m != nil { - return m.DisableCrdHook - } - return false -} - -func (m *InstallReleaseRequest) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *InstallReleaseRequest) GetSubNotes() bool { - if m != nil { - return m.SubNotes - } - return false -} - -// InstallReleaseResponse is the response from a release installation. -type InstallReleaseResponse struct { - Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InstallReleaseResponse) Reset() { *m = InstallReleaseResponse{} } -func (m *InstallReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*InstallReleaseResponse) ProtoMessage() {} -func (*InstallReleaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{12} -} -func (m *InstallReleaseResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InstallReleaseResponse.Unmarshal(m, b) -} -func (m *InstallReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InstallReleaseResponse.Marshal(b, m, deterministic) -} -func (dst *InstallReleaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_InstallReleaseResponse.Merge(dst, src) -} -func (m *InstallReleaseResponse) XXX_Size() int { - return xxx_messageInfo_InstallReleaseResponse.Size(m) -} -func (m *InstallReleaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_InstallReleaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_InstallReleaseResponse proto.InternalMessageInfo - -func (m *InstallReleaseResponse) GetRelease() *release.Release { - if m != nil { - return m.Release - } - return nil -} - -// UninstallReleaseRequest represents a request to uninstall a named release. -type UninstallReleaseRequest struct { - // Name is the name of the release to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // DisableHooks causes the server to skip running any hooks for the uninstall. - DisableHooks bool `protobuf:"varint,2,opt,name=disable_hooks,json=disableHooks,proto3" json:"disable_hooks,omitempty"` - // Purge removes the release from the store and make its name free for later use. - Purge bool `protobuf:"varint,3,opt,name=purge,proto3" json:"purge,omitempty"` - // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` - // Description, if set, will set the description for the uninstalled release - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninstallReleaseRequest) Reset() { *m = UninstallReleaseRequest{} } -func (m *UninstallReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*UninstallReleaseRequest) ProtoMessage() {} -func (*UninstallReleaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{13} -} -func (m *UninstallReleaseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninstallReleaseRequest.Unmarshal(m, b) -} -func (m *UninstallReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninstallReleaseRequest.Marshal(b, m, deterministic) -} -func (dst *UninstallReleaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninstallReleaseRequest.Merge(dst, src) -} -func (m *UninstallReleaseRequest) XXX_Size() int { - return xxx_messageInfo_UninstallReleaseRequest.Size(m) -} -func (m *UninstallReleaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UninstallReleaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UninstallReleaseRequest proto.InternalMessageInfo - -func (m *UninstallReleaseRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *UninstallReleaseRequest) GetDisableHooks() bool { - if m != nil { - return m.DisableHooks - } - return false -} - -func (m *UninstallReleaseRequest) GetPurge() bool { - if m != nil { - return m.Purge - } - return false -} - -func (m *UninstallReleaseRequest) GetTimeout() int64 { - if m != nil { - return m.Timeout - } - return 0 -} - -func (m *UninstallReleaseRequest) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -// UninstallReleaseResponse represents a successful response to an uninstall request. -type UninstallReleaseResponse struct { - // Release is the release that was marked deleted. - Release *release.Release `protobuf:"bytes,1,opt,name=release,proto3" json:"release,omitempty"` - // Info is an uninstall message - Info string `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninstallReleaseResponse) Reset() { *m = UninstallReleaseResponse{} } -func (m *UninstallReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*UninstallReleaseResponse) ProtoMessage() {} -func (*UninstallReleaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{14} -} -func (m *UninstallReleaseResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninstallReleaseResponse.Unmarshal(m, b) -} -func (m *UninstallReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninstallReleaseResponse.Marshal(b, m, deterministic) -} -func (dst *UninstallReleaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninstallReleaseResponse.Merge(dst, src) -} -func (m *UninstallReleaseResponse) XXX_Size() int { - return xxx_messageInfo_UninstallReleaseResponse.Size(m) -} -func (m *UninstallReleaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UninstallReleaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UninstallReleaseResponse proto.InternalMessageInfo - -func (m *UninstallReleaseResponse) GetRelease() *release.Release { - if m != nil { - return m.Release - } - return nil -} - -func (m *UninstallReleaseResponse) GetInfo() string { - if m != nil { - return m.Info - } - return "" -} - -// GetVersionRequest requests for version information. -type GetVersionRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } -func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionRequest) ProtoMessage() {} -func (*GetVersionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{15} -} -func (m *GetVersionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionRequest.Unmarshal(m, b) -} -func (m *GetVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionRequest.Marshal(b, m, deterministic) -} -func (dst *GetVersionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionRequest.Merge(dst, src) -} -func (m *GetVersionRequest) XXX_Size() int { - return xxx_messageInfo_GetVersionRequest.Size(m) -} -func (m *GetVersionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionRequest proto.InternalMessageInfo - -type GetVersionResponse struct { - Version *version.Version `protobuf:"bytes,1,opt,name=Version,proto3" json:"Version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionResponse) Reset() { *m = GetVersionResponse{} } -func (m *GetVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionResponse) ProtoMessage() {} -func (*GetVersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{16} -} -func (m *GetVersionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionResponse.Unmarshal(m, b) -} -func (m *GetVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionResponse.Marshal(b, m, deterministic) -} -func (dst *GetVersionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionResponse.Merge(dst, src) -} -func (m *GetVersionResponse) XXX_Size() int { - return xxx_messageInfo_GetVersionResponse.Size(m) -} -func (m *GetVersionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionResponse proto.InternalMessageInfo - -func (m *GetVersionResponse) GetVersion() *version.Version { - if m != nil { - return m.Version - } - return nil -} - -// GetHistoryRequest requests a release's history. -type GetHistoryRequest struct { - // The name of the release. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The maximum number of releases to include. - Max int32 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHistoryRequest) Reset() { *m = GetHistoryRequest{} } -func (m *GetHistoryRequest) String() string { return proto.CompactTextString(m) } -func (*GetHistoryRequest) ProtoMessage() {} -func (*GetHistoryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{17} -} -func (m *GetHistoryRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHistoryRequest.Unmarshal(m, b) -} -func (m *GetHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHistoryRequest.Marshal(b, m, deterministic) -} -func (dst *GetHistoryRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHistoryRequest.Merge(dst, src) -} -func (m *GetHistoryRequest) XXX_Size() int { - return xxx_messageInfo_GetHistoryRequest.Size(m) -} -func (m *GetHistoryRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetHistoryRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHistoryRequest proto.InternalMessageInfo - -func (m *GetHistoryRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GetHistoryRequest) GetMax() int32 { - if m != nil { - return m.Max - } - return 0 -} - -// GetHistoryResponse is received in response to a GetHistory rpc. -type GetHistoryResponse struct { - Releases []*release.Release `protobuf:"bytes,1,rep,name=releases,proto3" json:"releases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHistoryResponse) Reset() { *m = GetHistoryResponse{} } -func (m *GetHistoryResponse) String() string { return proto.CompactTextString(m) } -func (*GetHistoryResponse) ProtoMessage() {} -func (*GetHistoryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{18} -} -func (m *GetHistoryResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHistoryResponse.Unmarshal(m, b) -} -func (m *GetHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHistoryResponse.Marshal(b, m, deterministic) -} -func (dst *GetHistoryResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHistoryResponse.Merge(dst, src) -} -func (m *GetHistoryResponse) XXX_Size() int { - return xxx_messageInfo_GetHistoryResponse.Size(m) -} -func (m *GetHistoryResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetHistoryResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHistoryResponse proto.InternalMessageInfo - -func (m *GetHistoryResponse) GetReleases() []*release.Release { - if m != nil { - return m.Releases - } - return nil -} - -// TestReleaseRequest is a request to get the status of a release. -type TestReleaseRequest struct { - // Name is the name of the release - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // timeout specifies the max amount of time any kubernetes client command can run. - Timeout int64 `protobuf:"varint,2,opt,name=timeout,proto3" json:"timeout,omitempty"` - // cleanup specifies whether or not to attempt pod deletion after test completes - Cleanup bool `protobuf:"varint,3,opt,name=cleanup,proto3" json:"cleanup,omitempty"` - // parallel specifies whether or not to run test pods in parallel - Parallel bool `protobuf:"varint,4,opt,name=parallel,proto3" json:"parallel,omitempty"` - // maximum number of test pods to run in parallel - MaxParallel uint32 `protobuf:"varint,5,opt,name=max_parallel,json=maxParallel,proto3" json:"max_parallel,omitempty"` - // logs specifies whether or not to dump the logs from the test pods - Logs bool `protobuf:"varint,6,opt,name=logs,proto3" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TestReleaseRequest) Reset() { *m = TestReleaseRequest{} } -func (m *TestReleaseRequest) String() string { return proto.CompactTextString(m) } -func (*TestReleaseRequest) ProtoMessage() {} -func (*TestReleaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{19} -} -func (m *TestReleaseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestReleaseRequest.Unmarshal(m, b) -} -func (m *TestReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestReleaseRequest.Marshal(b, m, deterministic) -} -func (dst *TestReleaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestReleaseRequest.Merge(dst, src) -} -func (m *TestReleaseRequest) XXX_Size() int { - return xxx_messageInfo_TestReleaseRequest.Size(m) -} -func (m *TestReleaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TestReleaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_TestReleaseRequest proto.InternalMessageInfo - -func (m *TestReleaseRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *TestReleaseRequest) GetTimeout() int64 { - if m != nil { - return m.Timeout - } - return 0 -} - -func (m *TestReleaseRequest) GetCleanup() bool { - if m != nil { - return m.Cleanup - } - return false -} - -func (m *TestReleaseRequest) GetParallel() bool { - if m != nil { - return m.Parallel - } - return false -} - -func (m *TestReleaseRequest) GetMaxParallel() uint32 { - if m != nil { - return m.MaxParallel - } - return 0 -} - -func (m *TestReleaseRequest) GetLogs() bool { - if m != nil { - return m.Logs - } - return false -} - -// TestReleaseResponse represents a message from executing a test -type TestReleaseResponse struct { - Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` - Status release.TestRun_Status `protobuf:"varint,2,opt,name=status,proto3,enum=hapi.release.TestRun_Status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TestReleaseResponse) Reset() { *m = TestReleaseResponse{} } -func (m *TestReleaseResponse) String() string { return proto.CompactTextString(m) } -func (*TestReleaseResponse) ProtoMessage() {} -func (*TestReleaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_tiller_0094511522f6b040, []int{20} -} -func (m *TestReleaseResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestReleaseResponse.Unmarshal(m, b) -} -func (m *TestReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestReleaseResponse.Marshal(b, m, deterministic) -} -func (dst *TestReleaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestReleaseResponse.Merge(dst, src) -} -func (m *TestReleaseResponse) XXX_Size() int { - return xxx_messageInfo_TestReleaseResponse.Size(m) -} -func (m *TestReleaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TestReleaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TestReleaseResponse proto.InternalMessageInfo - -func (m *TestReleaseResponse) GetMsg() string { - if m != nil { - return m.Msg - } - return "" -} - -func (m *TestReleaseResponse) GetStatus() release.TestRun_Status { - if m != nil { - return m.Status - } - return release.TestRun_UNKNOWN -} - -func init() { - proto.RegisterType((*ListReleasesRequest)(nil), "hapi.services.tiller.ListReleasesRequest") - proto.RegisterType((*ListSort)(nil), "hapi.services.tiller.ListSort") - proto.RegisterType((*ListReleasesResponse)(nil), "hapi.services.tiller.ListReleasesResponse") - proto.RegisterType((*GetReleaseStatusRequest)(nil), "hapi.services.tiller.GetReleaseStatusRequest") - proto.RegisterType((*GetReleaseStatusResponse)(nil), "hapi.services.tiller.GetReleaseStatusResponse") - proto.RegisterType((*GetReleaseContentRequest)(nil), "hapi.services.tiller.GetReleaseContentRequest") - proto.RegisterType((*GetReleaseContentResponse)(nil), "hapi.services.tiller.GetReleaseContentResponse") - proto.RegisterType((*UpdateReleaseRequest)(nil), "hapi.services.tiller.UpdateReleaseRequest") - proto.RegisterType((*UpdateReleaseResponse)(nil), "hapi.services.tiller.UpdateReleaseResponse") - proto.RegisterType((*RollbackReleaseRequest)(nil), "hapi.services.tiller.RollbackReleaseRequest") - proto.RegisterType((*RollbackReleaseResponse)(nil), "hapi.services.tiller.RollbackReleaseResponse") - proto.RegisterType((*InstallReleaseRequest)(nil), "hapi.services.tiller.InstallReleaseRequest") - proto.RegisterType((*InstallReleaseResponse)(nil), "hapi.services.tiller.InstallReleaseResponse") - proto.RegisterType((*UninstallReleaseRequest)(nil), "hapi.services.tiller.UninstallReleaseRequest") - proto.RegisterType((*UninstallReleaseResponse)(nil), "hapi.services.tiller.UninstallReleaseResponse") - proto.RegisterType((*GetVersionRequest)(nil), "hapi.services.tiller.GetVersionRequest") - proto.RegisterType((*GetVersionResponse)(nil), "hapi.services.tiller.GetVersionResponse") - proto.RegisterType((*GetHistoryRequest)(nil), "hapi.services.tiller.GetHistoryRequest") - proto.RegisterType((*GetHistoryResponse)(nil), "hapi.services.tiller.GetHistoryResponse") - proto.RegisterType((*TestReleaseRequest)(nil), "hapi.services.tiller.TestReleaseRequest") - proto.RegisterType((*TestReleaseResponse)(nil), "hapi.services.tiller.TestReleaseResponse") - proto.RegisterEnum("hapi.services.tiller.ListSort_SortBy", ListSort_SortBy_name, ListSort_SortBy_value) - proto.RegisterEnum("hapi.services.tiller.ListSort_SortOrder", ListSort_SortOrder_name, ListSort_SortOrder_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ReleaseServiceClient is the client API for ReleaseService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ReleaseServiceClient interface { - // ListReleases retrieves release history. - // TODO: Allow filtering the set of releases by - // release status. By default, ListAllReleases returns the releases who - // current status is "Active". - ListReleases(ctx context.Context, in *ListReleasesRequest, opts ...grpc.CallOption) (ReleaseService_ListReleasesClient, error) - // GetReleasesStatus retrieves status information for the specified release. - GetReleaseStatus(ctx context.Context, in *GetReleaseStatusRequest, opts ...grpc.CallOption) (*GetReleaseStatusResponse, error) - // GetReleaseContent retrieves the release content (chart + value) for the specified release. - GetReleaseContent(ctx context.Context, in *GetReleaseContentRequest, opts ...grpc.CallOption) (*GetReleaseContentResponse, error) - // UpdateRelease updates release content. - UpdateRelease(ctx context.Context, in *UpdateReleaseRequest, opts ...grpc.CallOption) (*UpdateReleaseResponse, error) - // InstallRelease requests installation of a chart as a new release. - InstallRelease(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*InstallReleaseResponse, error) - // UninstallRelease requests deletion of a named release. - UninstallRelease(ctx context.Context, in *UninstallReleaseRequest, opts ...grpc.CallOption) (*UninstallReleaseResponse, error) - // GetVersion returns the current version of the server. - GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) - // RollbackRelease rolls back a release to a previous version. - RollbackRelease(ctx context.Context, in *RollbackReleaseRequest, opts ...grpc.CallOption) (*RollbackReleaseResponse, error) - // ReleaseHistory retrieves a release's history. - GetHistory(ctx context.Context, in *GetHistoryRequest, opts ...grpc.CallOption) (*GetHistoryResponse, error) - // RunReleaseTest executes the tests defined of a named release - RunReleaseTest(ctx context.Context, in *TestReleaseRequest, opts ...grpc.CallOption) (ReleaseService_RunReleaseTestClient, error) -} - -type releaseServiceClient struct { - cc *grpc.ClientConn -} - -func NewReleaseServiceClient(cc *grpc.ClientConn) ReleaseServiceClient { - return &releaseServiceClient{cc} -} - -func (c *releaseServiceClient) ListReleases(ctx context.Context, in *ListReleasesRequest, opts ...grpc.CallOption) (ReleaseService_ListReleasesClient, error) { - stream, err := c.cc.NewStream(ctx, &_ReleaseService_serviceDesc.Streams[0], "/hapi.services.tiller.ReleaseService/ListReleases", opts...) - if err != nil { - return nil, err - } - x := &releaseServiceListReleasesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type ReleaseService_ListReleasesClient interface { - Recv() (*ListReleasesResponse, error) - grpc.ClientStream -} - -type releaseServiceListReleasesClient struct { - grpc.ClientStream -} - -func (x *releaseServiceListReleasesClient) Recv() (*ListReleasesResponse, error) { - m := new(ListReleasesResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *releaseServiceClient) GetReleaseStatus(ctx context.Context, in *GetReleaseStatusRequest, opts ...grpc.CallOption) (*GetReleaseStatusResponse, error) { - out := new(GetReleaseStatusResponse) - err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetReleaseStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *releaseServiceClient) GetReleaseContent(ctx context.Context, in *GetReleaseContentRequest, opts ...grpc.CallOption) (*GetReleaseContentResponse, error) { - out := new(GetReleaseContentResponse) - err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetReleaseContent", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *releaseServiceClient) UpdateRelease(ctx context.Context, in *UpdateReleaseRequest, opts ...grpc.CallOption) (*UpdateReleaseResponse, error) { - out := new(UpdateReleaseResponse) - err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/UpdateRelease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *releaseServiceClient) InstallRelease(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*InstallReleaseResponse, error) { - out := new(InstallReleaseResponse) - err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/InstallRelease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *releaseServiceClient) UninstallRelease(ctx context.Context, in *UninstallReleaseRequest, opts ...grpc.CallOption) (*UninstallReleaseResponse, error) { - out := new(UninstallReleaseResponse) - err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/UninstallRelease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *releaseServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) { - out := new(GetVersionResponse) - err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetVersion", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *releaseServiceClient) RollbackRelease(ctx context.Context, in *RollbackReleaseRequest, opts ...grpc.CallOption) (*RollbackReleaseResponse, error) { - out := new(RollbackReleaseResponse) - err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/RollbackRelease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *releaseServiceClient) GetHistory(ctx context.Context, in *GetHistoryRequest, opts ...grpc.CallOption) (*GetHistoryResponse, error) { - out := new(GetHistoryResponse) - err := c.cc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetHistory", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *releaseServiceClient) RunReleaseTest(ctx context.Context, in *TestReleaseRequest, opts ...grpc.CallOption) (ReleaseService_RunReleaseTestClient, error) { - stream, err := c.cc.NewStream(ctx, &_ReleaseService_serviceDesc.Streams[1], "/hapi.services.tiller.ReleaseService/RunReleaseTest", opts...) - if err != nil { - return nil, err - } - x := &releaseServiceRunReleaseTestClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type ReleaseService_RunReleaseTestClient interface { - Recv() (*TestReleaseResponse, error) - grpc.ClientStream -} - -type releaseServiceRunReleaseTestClient struct { - grpc.ClientStream -} - -func (x *releaseServiceRunReleaseTestClient) Recv() (*TestReleaseResponse, error) { - m := new(TestReleaseResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ReleaseServiceServer is the server API for ReleaseService service. -type ReleaseServiceServer interface { - // ListReleases retrieves release history. - // TODO: Allow filtering the set of releases by - // release status. By default, ListAllReleases returns the releases who - // current status is "Active". - ListReleases(*ListReleasesRequest, ReleaseService_ListReleasesServer) error - // GetReleasesStatus retrieves status information for the specified release. - GetReleaseStatus(context.Context, *GetReleaseStatusRequest) (*GetReleaseStatusResponse, error) - // GetReleaseContent retrieves the release content (chart + value) for the specified release. - GetReleaseContent(context.Context, *GetReleaseContentRequest) (*GetReleaseContentResponse, error) - // UpdateRelease updates release content. - UpdateRelease(context.Context, *UpdateReleaseRequest) (*UpdateReleaseResponse, error) - // InstallRelease requests installation of a chart as a new release. - InstallRelease(context.Context, *InstallReleaseRequest) (*InstallReleaseResponse, error) - // UninstallRelease requests deletion of a named release. - UninstallRelease(context.Context, *UninstallReleaseRequest) (*UninstallReleaseResponse, error) - // GetVersion returns the current version of the server. - GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) - // RollbackRelease rolls back a release to a previous version. - RollbackRelease(context.Context, *RollbackReleaseRequest) (*RollbackReleaseResponse, error) - // ReleaseHistory retrieves a release's history. - GetHistory(context.Context, *GetHistoryRequest) (*GetHistoryResponse, error) - // RunReleaseTest executes the tests defined of a named release - RunReleaseTest(*TestReleaseRequest, ReleaseService_RunReleaseTestServer) error -} - -func RegisterReleaseServiceServer(s *grpc.Server, srv ReleaseServiceServer) { - s.RegisterService(&_ReleaseService_serviceDesc, srv) -} - -func _ReleaseService_ListReleases_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListReleasesRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ReleaseServiceServer).ListReleases(m, &releaseServiceListReleasesServer{stream}) -} - -type ReleaseService_ListReleasesServer interface { - Send(*ListReleasesResponse) error - grpc.ServerStream -} - -type releaseServiceListReleasesServer struct { - grpc.ServerStream -} - -func (x *releaseServiceListReleasesServer) Send(m *ListReleasesResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _ReleaseService_GetReleaseStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetReleaseStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReleaseServiceServer).GetReleaseStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hapi.services.tiller.ReleaseService/GetReleaseStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReleaseServiceServer).GetReleaseStatus(ctx, req.(*GetReleaseStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReleaseService_GetReleaseContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetReleaseContentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReleaseServiceServer).GetReleaseContent(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hapi.services.tiller.ReleaseService/GetReleaseContent", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReleaseServiceServer).GetReleaseContent(ctx, req.(*GetReleaseContentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReleaseService_UpdateRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateReleaseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReleaseServiceServer).UpdateRelease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hapi.services.tiller.ReleaseService/UpdateRelease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReleaseServiceServer).UpdateRelease(ctx, req.(*UpdateReleaseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReleaseService_InstallRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InstallReleaseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReleaseServiceServer).InstallRelease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hapi.services.tiller.ReleaseService/InstallRelease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReleaseServiceServer).InstallRelease(ctx, req.(*InstallReleaseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReleaseService_UninstallRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UninstallReleaseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReleaseServiceServer).UninstallRelease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hapi.services.tiller.ReleaseService/UninstallRelease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReleaseServiceServer).UninstallRelease(ctx, req.(*UninstallReleaseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReleaseService_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReleaseServiceServer).GetVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hapi.services.tiller.ReleaseService/GetVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReleaseServiceServer).GetVersion(ctx, req.(*GetVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReleaseService_RollbackRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RollbackReleaseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReleaseServiceServer).RollbackRelease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hapi.services.tiller.ReleaseService/RollbackRelease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReleaseServiceServer).RollbackRelease(ctx, req.(*RollbackReleaseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReleaseService_GetHistory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetHistoryRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReleaseServiceServer).GetHistory(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/hapi.services.tiller.ReleaseService/GetHistory", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReleaseServiceServer).GetHistory(ctx, req.(*GetHistoryRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ReleaseService_RunReleaseTest_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(TestReleaseRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ReleaseServiceServer).RunReleaseTest(m, &releaseServiceRunReleaseTestServer{stream}) -} - -type ReleaseService_RunReleaseTestServer interface { - Send(*TestReleaseResponse) error - grpc.ServerStream -} - -type releaseServiceRunReleaseTestServer struct { - grpc.ServerStream -} - -func (x *releaseServiceRunReleaseTestServer) Send(m *TestReleaseResponse) error { - return x.ServerStream.SendMsg(m) -} - -var _ReleaseService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "hapi.services.tiller.ReleaseService", - HandlerType: (*ReleaseServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetReleaseStatus", - Handler: _ReleaseService_GetReleaseStatus_Handler, - }, - { - MethodName: "GetReleaseContent", - Handler: _ReleaseService_GetReleaseContent_Handler, - }, - { - MethodName: "UpdateRelease", - Handler: _ReleaseService_UpdateRelease_Handler, - }, - { - MethodName: "InstallRelease", - Handler: _ReleaseService_InstallRelease_Handler, - }, - { - MethodName: "UninstallRelease", - Handler: _ReleaseService_UninstallRelease_Handler, - }, - { - MethodName: "GetVersion", - Handler: _ReleaseService_GetVersion_Handler, - }, - { - MethodName: "RollbackRelease", - Handler: _ReleaseService_RollbackRelease_Handler, - }, - { - MethodName: "GetHistory", - Handler: _ReleaseService_GetHistory_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ListReleases", - Handler: _ReleaseService_ListReleases_Handler, - ServerStreams: true, - }, - { - StreamName: "RunReleaseTest", - Handler: _ReleaseService_RunReleaseTest_Handler, - ServerStreams: true, - }, - }, - Metadata: "hapi/services/tiller.proto", -} - -func init() { proto.RegisterFile("hapi/services/tiller.proto", fileDescriptor_tiller_0094511522f6b040) } - -var fileDescriptor_tiller_0094511522f6b040 = []byte{ - // 1369 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0xdd, 0x72, 0x1b, 0xc5, - 0x12, 0xb6, 0xb4, 0xfa, 0x6d, 0xd9, 0x8a, 0x32, 0x71, 0xec, 0x8d, 0x4e, 0xce, 0x29, 0x9f, 0xa5, - 0x48, 0x94, 0x40, 0x64, 0x30, 0xdc, 0x50, 0x45, 0x51, 0xe5, 0x28, 0xc6, 0x0e, 0x18, 0x87, 0x5a, - 0x27, 0xa1, 0x8a, 0x2a, 0x4a, 0x35, 0x96, 0x46, 0xce, 0x92, 0xd5, 0x8e, 0xd8, 0x99, 0x35, 0xf6, - 0x23, 0x70, 0xc9, 0x3b, 0x70, 0x0d, 0xaf, 0xc0, 0x2d, 0xcf, 0xc0, 0xcb, 0x50, 0xf3, 0xb7, 0xd6, - 0xac, 0x76, 0x15, 0xe1, 0x1b, 0xed, 0xcc, 0x74, 0x4f, 0x77, 0xcf, 0xf7, 0x4d, 0xf7, 0xb4, 0x0d, - 0xdd, 0x37, 0x78, 0x16, 0xec, 0x32, 0x12, 0x5f, 0x04, 0x23, 0xc2, 0x76, 0x79, 0x10, 0x86, 0x24, - 0xee, 0xcf, 0x62, 0xca, 0x29, 0xda, 0x14, 0xb2, 0xbe, 0x91, 0xf5, 0x95, 0xac, 0xbb, 0x25, 0x77, - 0x8c, 0xde, 0xe0, 0x98, 0xab, 0x5f, 0xa5, 0xdd, 0xdd, 0x9e, 0x5f, 0xa7, 0xd1, 0x24, 0x38, 0xd7, - 0x02, 0xe5, 0x22, 0x26, 0x21, 0xc1, 0x8c, 0x98, 0xaf, 0xb5, 0xc9, 0xc8, 0x82, 0x68, 0x42, 0xb5, - 0xe0, 0x3f, 0x96, 0x80, 0x13, 0xc6, 0x87, 0x71, 0x12, 0x69, 0xe1, 0x3d, 0x4b, 0xc8, 0x38, 0xe6, - 0x09, 0xb3, 0x9c, 0x5d, 0x90, 0x98, 0x05, 0x34, 0x32, 0x5f, 0x25, 0xf3, 0xfe, 0x2c, 0xc3, 0x9d, - 0xe3, 0x80, 0x71, 0x5f, 0x6d, 0x64, 0x3e, 0xf9, 0x29, 0x21, 0x8c, 0xa3, 0x4d, 0xa8, 0x86, 0xc1, - 0x34, 0xe0, 0x6e, 0x69, 0xa7, 0xd4, 0x73, 0x7c, 0x35, 0x41, 0x5b, 0x50, 0xa3, 0x93, 0x09, 0x23, - 0xdc, 0x2d, 0xef, 0x94, 0x7a, 0x4d, 0x5f, 0xcf, 0xd0, 0x17, 0x50, 0x67, 0x34, 0xe6, 0xc3, 0xb3, - 0x2b, 0xd7, 0xd9, 0x29, 0xf5, 0xda, 0x7b, 0xef, 0xf7, 0xf3, 0x70, 0xea, 0x0b, 0x4f, 0xa7, 0x34, - 0xe6, 0x7d, 0xf1, 0xf3, 0xf4, 0xca, 0xaf, 0x31, 0xf9, 0x15, 0x76, 0x27, 0x41, 0xc8, 0x49, 0xec, - 0x56, 0x94, 0x5d, 0x35, 0x43, 0x87, 0x00, 0xd2, 0x2e, 0x8d, 0xc7, 0x24, 0x76, 0xab, 0xd2, 0x74, - 0x6f, 0x05, 0xd3, 0x2f, 0x84, 0xbe, 0xdf, 0x64, 0x66, 0x88, 0x3e, 0x87, 0x75, 0x05, 0xc9, 0x70, - 0x44, 0xc7, 0x84, 0xb9, 0xb5, 0x1d, 0xa7, 0xd7, 0xde, 0xbb, 0xa7, 0x4c, 0x19, 0xf8, 0x4f, 0x15, - 0x68, 0x03, 0x3a, 0x26, 0x7e, 0x4b, 0xa9, 0x8b, 0x31, 0x43, 0xf7, 0xa1, 0x19, 0xe1, 0x29, 0x61, - 0x33, 0x3c, 0x22, 0x6e, 0x5d, 0x46, 0x78, 0xbd, 0xe0, 0x45, 0xd0, 0x30, 0xce, 0xbd, 0xa7, 0x50, - 0x53, 0x47, 0x43, 0x2d, 0xa8, 0xbf, 0x3a, 0xf9, 0xfa, 0xe4, 0xc5, 0x77, 0x27, 0x9d, 0x35, 0xd4, - 0x80, 0xca, 0xc9, 0xfe, 0x37, 0x07, 0x9d, 0x12, 0xba, 0x0d, 0x1b, 0xc7, 0xfb, 0xa7, 0x2f, 0x87, - 0xfe, 0xc1, 0xf1, 0xc1, 0xfe, 0xe9, 0xc1, 0xb3, 0x4e, 0x19, 0xb5, 0x01, 0x06, 0x47, 0xfb, 0xfe, - 0xcb, 0xa1, 0x54, 0x71, 0xbc, 0xff, 0x41, 0x33, 0x3d, 0x03, 0xaa, 0x83, 0xb3, 0x7f, 0x3a, 0x50, - 0x26, 0x9e, 0x1d, 0x9c, 0x0e, 0x3a, 0x25, 0xef, 0x97, 0x12, 0x6c, 0xda, 0x94, 0xb1, 0x19, 0x8d, - 0x18, 0x11, 0x9c, 0x8d, 0x68, 0x12, 0xa5, 0x9c, 0xc9, 0x09, 0x42, 0x50, 0x89, 0xc8, 0xa5, 0x61, - 0x4c, 0x8e, 0x85, 0x26, 0xa7, 0x1c, 0x87, 0x92, 0x2d, 0xc7, 0x57, 0x13, 0xf4, 0x31, 0x34, 0x34, - 0x14, 0xcc, 0xad, 0xec, 0x38, 0xbd, 0xd6, 0xde, 0x5d, 0x1b, 0x20, 0xed, 0xd1, 0x4f, 0xd5, 0xbc, - 0x43, 0xd8, 0x3e, 0x24, 0x26, 0x12, 0x85, 0x9f, 0xb9, 0x41, 0xc2, 0x2f, 0x9e, 0x12, 0x19, 0x8c, - 0xf0, 0x8b, 0xa7, 0x04, 0xb9, 0x50, 0xd7, 0xd7, 0x4f, 0x86, 0x53, 0xf5, 0xcd, 0xd4, 0xe3, 0xe0, - 0x2e, 0x1a, 0xd2, 0xe7, 0xca, 0xb3, 0xf4, 0x00, 0x2a, 0x22, 0x33, 0xa4, 0x99, 0xd6, 0x1e, 0xb2, - 0xe3, 0x7c, 0x1e, 0x4d, 0xa8, 0x2f, 0xe5, 0x36, 0x75, 0x4e, 0x96, 0xba, 0xa3, 0x79, 0xaf, 0x03, - 0x1a, 0x71, 0x12, 0xf1, 0x9b, 0xc5, 0x7f, 0x0c, 0xf7, 0x72, 0x2c, 0xe9, 0x03, 0xec, 0x42, 0x5d, - 0x87, 0x26, 0xad, 0x15, 0xe2, 0x6a, 0xb4, 0xbc, 0xbf, 0x1c, 0xd8, 0x7c, 0x35, 0x1b, 0x63, 0x4e, - 0x8c, 0x68, 0x49, 0x50, 0x0f, 0xa1, 0x2a, 0x2b, 0x8c, 0xc6, 0xe2, 0xb6, 0xb2, 0xad, 0xca, 0xd0, - 0x40, 0xfc, 0xfa, 0x4a, 0x8e, 0x1e, 0x43, 0xed, 0x02, 0x87, 0x09, 0x61, 0x12, 0x88, 0x14, 0x35, - 0xad, 0x29, 0xcb, 0x93, 0xaf, 0x35, 0xd0, 0x36, 0xd4, 0xc7, 0xf1, 0x95, 0xa8, 0x2f, 0x32, 0x25, - 0x1b, 0x7e, 0x6d, 0x1c, 0x5f, 0xf9, 0x49, 0x84, 0xde, 0x83, 0x8d, 0x71, 0xc0, 0xf0, 0x59, 0x48, - 0x86, 0x6f, 0x28, 0x7d, 0xcb, 0x64, 0x56, 0x36, 0xfc, 0x75, 0xbd, 0x78, 0x24, 0xd6, 0x50, 0x57, - 0xdc, 0xa4, 0x51, 0x4c, 0x30, 0x27, 0x6e, 0x4d, 0xca, 0xd3, 0xb9, 0xc0, 0x90, 0x07, 0x53, 0x42, - 0x13, 0x2e, 0x53, 0xc9, 0xf1, 0xcd, 0x14, 0xfd, 0x1f, 0xd6, 0x63, 0xc2, 0x08, 0x1f, 0xea, 0x28, - 0x1b, 0x72, 0x67, 0x4b, 0xae, 0xbd, 0x56, 0x61, 0x21, 0xa8, 0xfc, 0x8c, 0x03, 0xee, 0x36, 0xa5, - 0x48, 0x8e, 0xd5, 0xb6, 0x84, 0x11, 0xb3, 0x0d, 0xcc, 0xb6, 0x84, 0x11, 0xbd, 0x6d, 0x13, 0xaa, - 0x13, 0x1a, 0x8f, 0x88, 0xdb, 0x92, 0x32, 0x35, 0x41, 0x3b, 0xd0, 0x1a, 0x13, 0x36, 0x8a, 0x83, - 0x19, 0x17, 0x8c, 0xae, 0x4b, 0x4c, 0xe7, 0x97, 0xc4, 0x39, 0x58, 0x72, 0x76, 0x42, 0x39, 0x61, - 0xee, 0x86, 0x3a, 0x87, 0x99, 0xa3, 0x07, 0x70, 0x6b, 0x14, 0x12, 0x1c, 0x25, 0xb3, 0x21, 0x8d, - 0x86, 0x13, 0x1c, 0x84, 0x6e, 0x5b, 0xaa, 0x6c, 0xe8, 0xe5, 0x17, 0xd1, 0x97, 0x38, 0x08, 0xbd, - 0x23, 0xb8, 0x9b, 0xa1, 0xf2, 0xa6, 0xb7, 0xe2, 0xf7, 0x32, 0x6c, 0xf9, 0x34, 0x0c, 0xcf, 0xf0, - 0xe8, 0xed, 0x0a, 0xf7, 0x62, 0x8e, 0xc2, 0xf2, 0x72, 0x0a, 0x9d, 0x1c, 0x0a, 0xe7, 0xae, 0x7a, - 0xc5, 0xba, 0xea, 0x16, 0xb9, 0xd5, 0x62, 0x72, 0x6b, 0x36, 0xb9, 0x86, 0xb9, 0xfa, 0x1c, 0x73, - 0x29, 0x2d, 0x8d, 0x25, 0xb4, 0x34, 0x17, 0x69, 0xc9, 0x81, 0x1e, 0xf2, 0xa0, 0xff, 0x0a, 0xb6, - 0x17, 0xf0, 0xba, 0x29, 0xf8, 0xbf, 0x3a, 0x70, 0xf7, 0x79, 0xc4, 0x38, 0x0e, 0xc3, 0x0c, 0xf6, - 0x69, 0xfe, 0x95, 0x56, 0xce, 0xbf, 0xf2, 0xbf, 0xc9, 0x3f, 0xc7, 0x22, 0xcf, 0x30, 0x5d, 0x99, - 0x63, 0x7a, 0xa5, 0x9c, 0xb4, 0x2a, 0x61, 0x2d, 0x53, 0x09, 0xd1, 0x7f, 0x01, 0x54, 0x12, 0x49, - 0xe3, 0x8a, 0xa4, 0xa6, 0x5c, 0x39, 0xd1, 0x85, 0xcf, 0xf0, 0xda, 0xc8, 0xe7, 0x75, 0x3e, 0x23, - 0x7b, 0xd0, 0x31, 0xf1, 0x8c, 0xe2, 0xb1, 0x8c, 0x49, 0x13, 0xd4, 0xd6, 0xeb, 0x83, 0x78, 0x2c, - 0xa2, 0xca, 0x72, 0xdd, 0x5a, 0x9e, 0x82, 0xeb, 0x76, 0x0a, 0x7a, 0xcf, 0x61, 0x2b, 0x4b, 0xc9, - 0x4d, 0xe9, 0xfd, 0xad, 0x04, 0xdb, 0xaf, 0xa2, 0x20, 0x97, 0xe0, 0xbc, 0xe4, 0x5a, 0x80, 0xbc, - 0x9c, 0x03, 0xf9, 0x26, 0x54, 0x67, 0x49, 0x7c, 0x4e, 0x34, 0x85, 0x6a, 0x32, 0x8f, 0x65, 0xc5, - 0xc6, 0x32, 0x83, 0x46, 0x75, 0x01, 0x0d, 0x6f, 0x08, 0xee, 0x62, 0x94, 0x37, 0x3c, 0xb3, 0x38, - 0x57, 0xfa, 0x86, 0x36, 0xd5, 0x7b, 0xe9, 0xdd, 0x81, 0xdb, 0x87, 0x84, 0xbf, 0x56, 0xa9, 0xae, - 0x01, 0xf0, 0x0e, 0x00, 0xcd, 0x2f, 0x5e, 0xfb, 0xd3, 0x4b, 0xb6, 0x3f, 0xd3, 0x60, 0x1a, 0x7d, - 0xa3, 0xe5, 0x7d, 0x26, 0x6d, 0x1f, 0x05, 0x8c, 0xd3, 0xf8, 0x6a, 0x19, 0xb8, 0x1d, 0x70, 0xa6, - 0xf8, 0x52, 0x3f, 0xb1, 0x62, 0xe8, 0x1d, 0xca, 0x08, 0xd2, 0xad, 0x3a, 0x82, 0xf9, 0x86, 0xa5, - 0xb4, 0x5a, 0xc3, 0xf2, 0x47, 0x09, 0xd0, 0x4b, 0x92, 0x36, 0x4f, 0xef, 0x78, 0xec, 0x0d, 0x4f, - 0x65, 0x9b, 0x27, 0x17, 0xea, 0xba, 0xd0, 0x68, 0x66, 0xcd, 0x54, 0xdc, 0xd6, 0x19, 0x8e, 0x71, - 0x18, 0x92, 0x50, 0xbf, 0x9b, 0xe9, 0x5c, 0xbc, 0x53, 0x53, 0x7c, 0x39, 0x4c, 0xe5, 0x82, 0xde, - 0x0d, 0xbf, 0x35, 0xc5, 0x97, 0xdf, 0x1a, 0x15, 0x04, 0x95, 0x90, 0x9e, 0x33, 0xfd, 0x66, 0xca, - 0xb1, 0xf7, 0x03, 0xdc, 0xb1, 0x02, 0xd6, 0x67, 0x17, 0x18, 0xb1, 0x73, 0x1d, 0xb0, 0x18, 0xa2, - 0x4f, 0xa1, 0xa6, 0x9a, 0x56, 0x19, 0x6e, 0x7b, 0xef, 0xbe, 0x8d, 0x85, 0x34, 0x92, 0x44, 0xba, - 0xcb, 0xf5, 0xb5, 0xee, 0xde, 0xdf, 0x0d, 0x68, 0x9b, 0xb6, 0x4b, 0xb5, 0xd4, 0x28, 0x80, 0xf5, - 0xf9, 0xfe, 0x12, 0x3d, 0x2a, 0xee, 0xb8, 0x33, 0x7f, 0x36, 0x74, 0x1f, 0xaf, 0xa2, 0xaa, 0x4e, - 0xe0, 0xad, 0x7d, 0x54, 0x42, 0x0c, 0x3a, 0xd9, 0xb6, 0x0f, 0x3d, 0xc9, 0xb7, 0x51, 0xd0, 0x67, - 0x76, 0xfb, 0xab, 0xaa, 0x1b, 0xb7, 0xe8, 0x42, 0xde, 0x43, 0xbb, 0x57, 0x43, 0xef, 0x34, 0x63, - 0xb7, 0x87, 0xdd, 0xdd, 0x95, 0xf5, 0x53, 0xbf, 0x3f, 0xc2, 0x86, 0xd5, 0x09, 0xa0, 0x02, 0xb4, - 0xf2, 0x3a, 0xbf, 0xee, 0x07, 0x2b, 0xe9, 0xa6, 0xbe, 0xa6, 0xd0, 0xb6, 0x4b, 0x23, 0x2a, 0x30, - 0x90, 0xfb, 0xa6, 0x75, 0x3f, 0x5c, 0x4d, 0x39, 0x75, 0xc7, 0xa0, 0x93, 0xad, 0x4b, 0x45, 0x3c, - 0x16, 0x54, 0xd9, 0x22, 0x1e, 0x8b, 0xca, 0x9d, 0xb7, 0x86, 0x30, 0xc0, 0x75, 0x59, 0x42, 0x0f, - 0x0b, 0x09, 0xb1, 0xab, 0x59, 0xb7, 0xf7, 0x6e, 0xc5, 0xd4, 0xc5, 0x0c, 0x6e, 0x65, 0x3a, 0x08, - 0x54, 0x00, 0x4d, 0x7e, 0x63, 0xd6, 0x7d, 0xb2, 0xa2, 0x76, 0xe6, 0x50, 0xba, 0xd2, 0x2d, 0x39, - 0x94, 0x5d, 0x46, 0x97, 0x1c, 0x2a, 0x53, 0x34, 0xbd, 0x35, 0x14, 0x40, 0xdb, 0x4f, 0x22, 0xed, - 0x5a, 0x94, 0x05, 0x54, 0xb0, 0x7b, 0xb1, 0x50, 0x76, 0x1f, 0xad, 0xa0, 0x79, 0x9d, 0xdf, 0x4f, - 0xe1, 0xfb, 0x86, 0x51, 0x3d, 0xab, 0xc9, 0xff, 0x38, 0x7c, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x8a, 0x23, 0x76, 0x0d, 0x5f, 0x11, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/version/version.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/version/version.pb.go deleted file mode 100644 index e51def8e2..000000000 --- a/vendor/k8s.io/helm/pkg/proto/hapi/version/version.pb.go +++ /dev/null @@ -1,94 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hapi/version/version.proto - -package version - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Version struct { - // Sem ver string for the version - SemVer string `protobuf:"bytes,1,opt,name=sem_ver,json=semVer,proto3" json:"sem_ver,omitempty"` - GitCommit string `protobuf:"bytes,2,opt,name=git_commit,json=gitCommit,proto3" json:"git_commit,omitempty"` - GitTreeState string `protobuf:"bytes,3,opt,name=git_tree_state,json=gitTreeState,proto3" json:"git_tree_state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_version_10859f2d56ed17fa, []int{0} -} -func (m *Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (dst *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(dst, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetSemVer() string { - if m != nil { - return m.SemVer - } - return "" -} - -func (m *Version) GetGitCommit() string { - if m != nil { - return m.GitCommit - } - return "" -} - -func (m *Version) GetGitTreeState() string { - if m != nil { - return m.GitTreeState - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "hapi.version.Version") -} - -func init() { proto.RegisterFile("hapi/version/version.proto", fileDescriptor_version_10859f2d56ed17fa) } - -var fileDescriptor_version_10859f2d56ed17fa = []byte{ - // 151 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xca, 0x48, 0x2c, 0xc8, - 0xd4, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x83, 0xd1, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, - 0x42, 0x3c, 0x20, 0x39, 0x3d, 0xa8, 0x98, 0x52, 0x3a, 0x17, 0x7b, 0x18, 0x84, 0x29, 0x24, 0xce, - 0xc5, 0x5e, 0x9c, 0x9a, 0x1b, 0x5f, 0x96, 0x5a, 0x24, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0xc4, - 0x56, 0x9c, 0x9a, 0x1b, 0x96, 0x5a, 0x24, 0x24, 0xcb, 0xc5, 0x95, 0x9e, 0x59, 0x12, 0x9f, 0x9c, - 0x9f, 0x9b, 0x9b, 0x59, 0x22, 0xc1, 0x04, 0x96, 0xe3, 0x4c, 0xcf, 0x2c, 0x71, 0x06, 0x0b, 0x08, - 0xa9, 0x70, 0xf1, 0x81, 0xa4, 0x4b, 0x8a, 0x52, 0x53, 0xe3, 0x8b, 0x4b, 0x12, 0x4b, 0x52, 0x25, - 0x98, 0xc1, 0x4a, 0x78, 0xd2, 0x33, 0x4b, 0x42, 0x8a, 0x52, 0x53, 0x83, 0x41, 0x62, 0x4e, 0x9c, - 0x51, 0xec, 0x50, 0x3b, 0x93, 0xd8, 0xc0, 0x0e, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x20, - 0xcc, 0x0e, 0x1b, 0xa6, 0x00, 0x00, 0x00, -} diff --git a/vendor/k8s.io/helm/pkg/releaseutil/sorter.go b/vendor/k8s.io/helm/pkg/releaseutil/sorter.go deleted file mode 100644 index 977f49398..000000000 --- a/vendor/k8s.io/helm/pkg/releaseutil/sorter.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package releaseutil // import "k8s.io/helm/pkg/releaseutil" - -import ( - "sort" - - rspb "k8s.io/helm/pkg/proto/hapi/release" -) - -type sorter struct { - list []*rspb.Release - less func(int, int) bool -} - -func (s *sorter) Len() int { return len(s.list) } -func (s *sorter) Less(i, j int) bool { return s.less(i, j) } -func (s *sorter) Swap(i, j int) { s.list[i], s.list[j] = s.list[j], s.list[i] } - -// Reverse reverses the list of releases sorted by the sort func. -func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) { - sortFn(list) - for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { - list[i], list[j] = list[j], list[i] - } -} - -// SortByName returns the list of releases sorted -// in lexicographical order. -func SortByName(list []*rspb.Release) { - s := &sorter{list: list} - s.less = func(i, j int) bool { - ni := s.list[i].Name - nj := s.list[j].Name - return ni < nj - } - sort.Sort(s) -} - -// SortByDate returns the list of releases sorted by a -// release's last deployed time (in seconds). -func SortByDate(list []*rspb.Release) { - s := &sorter{list: list} - - s.less = func(i, j int) bool { - ti := s.list[i].Info.LastDeployed.Seconds - tj := s.list[j].Info.LastDeployed.Seconds - return ti < tj - } - sort.Sort(s) -} - -// SortByRevision returns the list of releases sorted by a -// release's revision number (release.Version). -func SortByRevision(list []*rspb.Release) { - s := &sorter{list: list} - s.less = func(i, j int) bool { - vi := s.list[i].Version - vj := s.list[j].Version - return vi < vj - } - sort.Sort(s) -} - -// SortByChartName sorts the list of releases by a -// release's chart name in lexicographical order. -func SortByChartName(list []*rspb.Release) { - s := &sorter{list: list} - s.less = func(i, j int) bool { - chi := s.list[i].Chart - chj := s.list[j].Chart - - ni := "" - if chi != nil && chi.Metadata != nil { - ni = chi.Metadata.Name - } - - nj := "" - if chj != nil && chj.Metadata != nil { - nj = chj.Metadata.Name - } - - return ni < nj - } - sort.Sort(s) -} diff --git a/vendor/k8s.io/helm/pkg/renderutil/deps.go b/vendor/k8s.io/helm/pkg/renderutil/deps.go deleted file mode 100644 index 72e4d12a1..000000000 --- a/vendor/k8s.io/helm/pkg/renderutil/deps.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package renderutil - -import ( - "fmt" - "strings" - - "k8s.io/helm/pkg/chartutil" - "k8s.io/helm/pkg/proto/hapi/chart" -) - -// CheckDependencies will do a simple dependency check on the chart for local -// rendering -func CheckDependencies(ch *chart.Chart, reqs *chartutil.Requirements) error { - missing := []string{} - - deps := ch.GetDependencies() - for _, r := range reqs.Dependencies { - found := false - for _, d := range deps { - if d.Metadata.Name == r.Name { - found = true - break - } - } - if !found { - missing = append(missing, r.Name) - } - } - - if len(missing) > 0 { - return fmt.Errorf("found in requirements.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", ")) - } - return nil -} diff --git a/vendor/k8s.io/helm/pkg/renderutil/render.go b/vendor/k8s.io/helm/pkg/renderutil/render.go deleted file mode 100644 index 5be40992b..000000000 --- a/vendor/k8s.io/helm/pkg/renderutil/render.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package renderutil - -import ( - "fmt" - - "github.com/Masterminds/semver" - - "k8s.io/helm/pkg/chartutil" - "k8s.io/helm/pkg/engine" - "k8s.io/helm/pkg/proto/hapi/chart" - tversion "k8s.io/helm/pkg/version" -) - -// Options are options for this simple local render -type Options struct { - ReleaseOptions chartutil.ReleaseOptions - KubeVersion string - APIVersions []string -} - -// Render chart templates locally and display the output. -// This does not require the Tiller. Any values that would normally be -// looked up or retrieved in-cluster will be faked locally. Additionally, none -// of the server-side testing of chart validity (e.g. whether an API is supported) -// is done. -// -// Note: a `nil` config passed here means "ignore the chart's default values"; -// if you want the normal behavior of merging the defaults with the new config, -// you should pass `&chart.Config{Raw: "{}"}, -func Render(c *chart.Chart, config *chart.Config, opts Options) (map[string]string, error) { - if req, err := chartutil.LoadRequirements(c); err == nil { - if err := CheckDependencies(c, req); err != nil { - return nil, err - } - } else if err != chartutil.ErrRequirementsNotFound { - return nil, fmt.Errorf("cannot load requirements: %v", err) - } - - err := chartutil.ProcessRequirementsEnabled(c, config) - if err != nil { - return nil, err - } - err = chartutil.ProcessRequirementsImportValues(c) - if err != nil { - return nil, err - } - - // Set up engine. - renderer := engine.New() - - caps := &chartutil.Capabilities{ - APIVersions: chartutil.DefaultVersionSet, - KubeVersion: chartutil.DefaultKubeVersion, - TillerVersion: tversion.GetVersionProto(), - } - - if opts.KubeVersion != "" { - kv, verErr := semver.NewVersion(opts.KubeVersion) - if verErr != nil { - return nil, fmt.Errorf("could not parse a kubernetes version: %v", verErr) - } - caps.KubeVersion.Major = fmt.Sprint(kv.Major()) - caps.KubeVersion.Minor = fmt.Sprint(kv.Minor()) - caps.KubeVersion.GitVersion = fmt.Sprintf("v%d.%d.0", kv.Major(), kv.Minor()) - } - - if len(opts.APIVersions) > 0 { - caps.APIVersions = chartutil.NewVersionSet(append(opts.APIVersions, "v1")...) - } - - vals, err := chartutil.ToRenderValuesCaps(c, config, opts.ReleaseOptions, caps) - if err != nil { - return nil, err - } - - return renderer.Render(c, vals) -} diff --git a/vendor/k8s.io/helm/pkg/storage/errors/errors.go b/vendor/k8s.io/helm/pkg/storage/errors/errors.go deleted file mode 100644 index 38662d1ca..000000000 --- a/vendor/k8s.io/helm/pkg/storage/errors/errors.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors // import "k8s.io/helm/pkg/storage/errors" -import "fmt" - -var ( - // ErrReleaseNotFound indicates that a release is not found. - ErrReleaseNotFound = func(release string) error { return fmt.Errorf("release: %q not found", release) } - // ErrReleaseExists indicates that a release already exists. - ErrReleaseExists = func(release string) error { return fmt.Errorf("release: %q already exists", release) } - // ErrInvalidKey indicates that a release key could not be parsed. - ErrInvalidKey = func(release string) error { return fmt.Errorf("release: %q invalid key", release) } -) diff --git a/vendor/k8s.io/helm/pkg/version/compatible.go b/vendor/k8s.io/helm/pkg/version/compatible.go deleted file mode 100644 index d0516a9d0..000000000 --- a/vendor/k8s.io/helm/pkg/version/compatible.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version // import "k8s.io/helm/pkg/version" - -import ( - "fmt" - "strings" - - "github.com/Masterminds/semver" -) - -// IsCompatible tests if a client and server version are compatible. -func IsCompatible(client, server string) bool { - if isUnreleased(client) || isUnreleased(server) { - return true - } - cv, err := semver.NewVersion(client) - if err != nil { - return false - } - sv, err := semver.NewVersion(server) - if err != nil { - return false - } - - constraint := fmt.Sprintf("^%d.%d.x", cv.Major(), cv.Minor()) - if cv.Prerelease() != "" || sv.Prerelease() != "" { - constraint = cv.String() - } - - return IsCompatibleRange(constraint, server) -} - -// IsCompatibleRange compares a version to a constraint. -// It returns true if the version matches the constraint, and false in all other cases. -func IsCompatibleRange(constraint, ver string) bool { - sv, err := semver.NewVersion(ver) - if err != nil { - return false - } - - c, err := semver.NewConstraint(constraint) - if err != nil { - return false - } - return c.Check(sv) -} - -func isUnreleased(v string) bool { - return strings.HasSuffix(v, "unreleased") -} diff --git a/vendor/k8s.io/helm/pkg/version/version.go b/vendor/k8s.io/helm/pkg/version/version.go deleted file mode 100644 index 74accb402..000000000 --- a/vendor/k8s.io/helm/pkg/version/version.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version // import "k8s.io/helm/pkg/version" - -import "k8s.io/helm/pkg/proto/hapi/version" - -var ( - // Version is the current version of the Helm. - // Update this whenever making a new release. - // The version is of the format Major.Minor.Patch[-Prerelease][+BuildMetadata] - // - // Increment major number for new feature additions and behavioral changes. - // Increment minor number for bug fixes and performance enhancements. - // Increment patch number for critical fixes to existing releases. - Version = "v2.17" - - // BuildMetadata is extra build time data - BuildMetadata = "unreleased" - // GitCommit is the git sha1 - GitCommit = "" - // GitTreeState is the state of the git tree - GitTreeState = "" -) - -// GetVersion returns the semver string of the version -func GetVersion() string { - if BuildMetadata == "" { - return Version - } - return Version + "+" + BuildMetadata -} - -// GetVersionProto returns protobuf representing the version -func GetVersionProto() *version.Version { - return &version.Version{ - SemVer: GetVersion(), - GitCommit: GitCommit, - GitTreeState: GitTreeState, - } -} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go new file mode 100644 index 000000000..b1aa8c008 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go @@ -0,0 +1,79 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" +) + +type errors struct { + errors []error +} + +func (e *errors) Errors() []error { + return e.errors +} + +func (e *errors) AppendErrors(err ...error) { + e.errors = append(e.errors, err...) +} + +type ValidationError struct { + Path string + Err error +} + +func (e ValidationError) Error() string { + return fmt.Sprintf("ValidationError(%s): %v", e.Path, e.Err) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} + +type MissingRequiredFieldError struct { + Path string + Field string +} + +func (e MissingRequiredFieldError) Error() string { + return fmt.Sprintf("missing required field %q in %s", e.Field, e.Path) +} + +type UnknownFieldError struct { + Path string + Field string +} + +func (e UnknownFieldError) Error() string { + return fmt.Sprintf("unknown field %q in %s", e.Field, e.Path) +} + +type InvalidObjectTypeError struct { + Path string + Type string +} + +func (e InvalidObjectTypeError) Error() string { + return fmt.Sprintf("unknown object type %q in %s", e.Type, e.Path) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go new file mode 100644 index 000000000..e66342a7f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -0,0 +1,299 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "reflect" + "sort" + + "k8s.io/kube-openapi/pkg/util/proto" +) + +type validationItem interface { + proto.SchemaVisitor + + Errors() []error + Path() *proto.Path +} + +type baseItem struct { + errors errors + path proto.Path +} + +// Errors returns the list of errors found for this item. +func (item *baseItem) Errors() []error { + return item.errors.Errors() +} + +// AddValidationError wraps the given error into a ValidationError and +// attaches it to this item. +func (item *baseItem) AddValidationError(err error) { + item.errors.AppendErrors(ValidationError{Path: item.path.String(), Err: err}) +} + +// AddError adds a regular (non-validation related) error to the list. +func (item *baseItem) AddError(err error) { + item.errors.AppendErrors(err) +} + +// CopyErrors adds a list of errors to this item. This is useful to copy +// errors from subitems. +func (item *baseItem) CopyErrors(errs []error) { + item.errors.AppendErrors(errs...) +} + +// Path returns the path of this item, helps print useful errors. +func (item *baseItem) Path() *proto.Path { + return &item.path +} + +// mapItem represents a map entry in the yaml. +type mapItem struct { + baseItem + + Map map[string]interface{} +} + +func (item *mapItem) sortedKeys() []string { + sortedKeys := []string{} + for key := range item.Map { + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + return sortedKeys +} + +var _ validationItem = &mapItem{} + +func (item *mapItem) VisitPrimitive(schema *proto.Primitive) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "map"}) +} + +func (item *mapItem) VisitArray(schema *proto.Array) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) +} + +func (item *mapItem) VisitMap(schema *proto.Map) { + for _, key := range item.sortedKeys() { + subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key]) + if err != nil { + item.AddError(err) + continue + } + schema.SubType.Accept(subItem) + item.CopyErrors(subItem.Errors()) + } +} + +func (item *mapItem) VisitKind(schema *proto.Kind) { + // Verify each sub-field. + for _, key := range item.sortedKeys() { + if item.Map[key] == nil { + continue + } + subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key]) + if err != nil { + item.AddError(err) + continue + } + if _, ok := schema.Fields[key]; !ok { + item.AddValidationError(UnknownFieldError{Path: schema.GetPath().String(), Field: key}) + continue + } + schema.Fields[key].Accept(subItem) + item.CopyErrors(subItem.Errors()) + } + + // Verify that all required fields are present. + for _, required := range schema.RequiredFields { + if v, ok := item.Map[required]; !ok || v == nil { + item.AddValidationError(MissingRequiredFieldError{Path: schema.GetPath().String(), Field: required}) + } + } +} + +func (item *mapItem) VisitArbitrary(schema *proto.Arbitrary) { +} + +func (item *mapItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// arrayItem represents a yaml array. +type arrayItem struct { + baseItem + + Array []interface{} +} + +var _ validationItem = &arrayItem{} + +func (item *arrayItem) VisitPrimitive(schema *proto.Primitive) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "array"}) +} + +func (item *arrayItem) VisitArray(schema *proto.Array) { + for i, v := range item.Array { + path := item.Path().ArrayPath(i) + if v == nil { + item.AddValidationError(InvalidObjectTypeError{Type: "nil", Path: path.String()}) + continue + } + subItem, err := itemFactory(path, v) + if err != nil { + item.AddError(err) + continue + } + schema.SubType.Accept(subItem) + item.CopyErrors(subItem.Errors()) + } +} + +func (item *arrayItem) VisitMap(schema *proto.Map) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) +} + +func (item *arrayItem) VisitKind(schema *proto.Kind) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) +} + +func (item *arrayItem) VisitArbitrary(schema *proto.Arbitrary) { +} + +func (item *arrayItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// primitiveItem represents a yaml value. +type primitiveItem struct { + baseItem + + Value interface{} + Kind string +} + +var _ validationItem = &primitiveItem{} + +func (item *primitiveItem) VisitPrimitive(schema *proto.Primitive) { + // Some types of primitives can match more than one (a number + // can be a string, but not the other way around). Return from + // the switch if we have a valid possible type conversion + // NOTE(apelisse): This logic is blindly copied from the + // existing swagger logic, and I'm not sure I agree with it. + switch schema.Type { + case proto.Boolean: + switch item.Kind { + case proto.Boolean: + return + } + case proto.Integer: + switch item.Kind { + case proto.Integer, proto.Number: + return + } + case proto.Number: + switch item.Kind { + case proto.Integer, proto.Number: + return + } + case proto.String: + return + } + // TODO(wrong): this misses "null" + + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: item.Kind}) +} + +func (item *primitiveItem) VisitArray(schema *proto.Array) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitMap(schema *proto.Map) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitKind(schema *proto.Kind) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitArbitrary(schema *proto.Arbitrary) { +} + +func (item *primitiveItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// itemFactory creates the relevant item type/visitor based on the current yaml type. +func itemFactory(path proto.Path, v interface{}) (validationItem, error) { + // We need to special case for no-type fields in yaml (e.g. empty item in list) + if v == nil { + return nil, InvalidObjectTypeError{Type: "nil", Path: path.String()} + } + kind := reflect.TypeOf(v).Kind() + switch kind { + case reflect.Bool: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Boolean, + }, nil + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64, + reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Integer, + }, nil + case reflect.Float32, + reflect.Float64: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Number, + }, nil + case reflect.String: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.String, + }, nil + case reflect.Array, + reflect.Slice: + return &arrayItem{ + baseItem: baseItem{path: path}, + Array: v.([]interface{}), + }, nil + case reflect.Map: + return &mapItem{ + baseItem: baseItem{path: path}, + Map: v.(map[string]interface{}), + }, nil + } + return nil, InvalidObjectTypeError{Type: kind.String(), Path: path.String()} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go new file mode 100644 index 000000000..35310f637 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/kube-openapi/pkg/util/proto" +) + +func ValidateModel(obj interface{}, schema proto.Schema, name string) []error { + rootValidation, err := itemFactory(proto.NewPath(name), obj) + if err != nil { + return []error{err} + } + schema.Accept(rootValidation) + return rootValidation.Errors() +} diff --git a/vendor/k8s.io/kubectl/LICENSE b/vendor/k8s.io/kubectl/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/k8s.io/kubectl/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/env_file.go b/vendor/k8s.io/kubectl/pkg/cmd/util/env_file.go new file mode 100644 index 000000000..ed1255839 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/env_file.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + "unicode" + "unicode/utf8" + + "k8s.io/apimachinery/pkg/util/validation" +) + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +// processEnvFileLine returns a blank key if the line is empty or a comment. +// The value will be retrieved from the environment if necessary. +func processEnvFileLine(line []byte, filePath string, + currentLine int) (key, value string, err error) { + + if !utf8.Valid(line) { + return ``, ``, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", + filePath, currentLine+1, line) + } + + // We trim UTF8 BOM from the first line of the file but no others + if currentLine == 0 { + line = bytes.TrimPrefix(line, utf8bom) + } + + // trim the line from all leading whitespace first + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + + // If the line is empty or a comment, we return a blank key/value pair. + if len(line) == 0 || line[0] == '#' { + return ``, ``, nil + } + + data := strings.SplitN(string(line), "=", 2) + key = data[0] + if errs := validation.IsEnvVarName(key); len(errs) != 0 { + return ``, ``, fmt.Errorf("%q is not a valid key name: %s", key, strings.Join(errs, ";")) + } + + if len(data) == 2 { + value = data[1] + } else { + // No value (no `=` in the line) is a signal to obtain the value + // from the environment. + value = os.Getenv(key) + } + return +} + +// AddFromEnvFile processes an env file allows a generic addTo to handle the +// collection of key value pairs or returns an error. +func AddFromEnvFile(filePath string, addTo func(key, value string) error) error { + f, err := os.Open(filePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + currentLine := 0 + for scanner.Scan() { + // Process the current line, retrieving a key/value pair if + // possible. + scannedBytes := scanner.Bytes() + key, value, err := processEnvFileLine(scannedBytes, filePath, currentLine) + if err != nil { + return err + } + currentLine++ + + if len(key) == 0 { + // no key means line was empty or a comment + continue + } + + if err = addTo(key, value); err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go b/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go new file mode 100644 index 000000000..e6414f3e0 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go @@ -0,0 +1,72 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + openapiclient "k8s.io/client-go/openapi" + restclient "k8s.io/client-go/rest" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/validation" +) + +// Factory provides abstractions that allow the Kubectl command to be extended across multiple types +// of resources and different API sets. +// The rings are here for a reason. In order for composers to be able to provide alternative factory implementations +// they need to provide low level pieces of *certain* functions so that when the factory calls back into itself +// it uses the custom version of the function. Rather than try to enumerate everything that someone would want to override +// we split the factory into rings, where each ring can depend on methods in an earlier ring, but cannot depend +// upon peer methods in its own ring. +// TODO: make the functions interfaces +// TODO: pass the various interfaces on the factory directly into the command constructors (so the +// commands are decoupled from the factory). +type Factory interface { + genericclioptions.RESTClientGetter + + // DynamicClient returns a dynamic client ready for use + DynamicClient() (dynamic.Interface, error) + + // KubernetesClientSet gives you back an external clientset + KubernetesClientSet() (*kubernetes.Clientset, error) + + // Returns a RESTClient for accessing Kubernetes resources or an error. + RESTClient() (*restclient.RESTClient, error) + + // NewBuilder returns an object that assists in loading objects from both disk and the server + // and which implements the common patterns for CLI interactions with generic resources. + NewBuilder() *resource.Builder + + // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended + // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. + ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) + // Returns a RESTClient for working with Unstructured objects. + UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) + + // Returns a schema that can validate objects stored on disk. + Validator(validationDirective string) (validation.Schema, error) + + // Used for retrieving openapi v2 resources. + openapi.OpenAPIResourcesGetter + + // OpenAPIV3Schema returns a client for fetching parsed schemas for + // any group version + OpenAPIV3Client() (openapiclient.Client, error) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go b/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go new file mode 100644 index 000000000..6a1646b84 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go @@ -0,0 +1,218 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file contains factories with no other dependencies + +package util + +import ( + "errors" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + openapiclient "k8s.io/client-go/openapi" + "k8s.io/client-go/openapi/cached" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/validation" +) + +type factoryImpl struct { + clientGetter genericclioptions.RESTClientGetter + + // Caches OpenAPI document and parsed resources + openAPIParser *openapi.CachedOpenAPIParser + oapi *openapi.CachedOpenAPIGetter + parser sync.Once + getter sync.Once +} + +func NewFactory(clientGetter genericclioptions.RESTClientGetter) Factory { + if clientGetter == nil { + panic("attempt to instantiate client_access_factory with nil clientGetter") + } + f := &factoryImpl{ + clientGetter: clientGetter, + } + + return f +} + +func (f *factoryImpl) ToRESTConfig() (*restclient.Config, error) { + return f.clientGetter.ToRESTConfig() +} + +func (f *factoryImpl) ToRESTMapper() (meta.RESTMapper, error) { + return f.clientGetter.ToRESTMapper() +} + +func (f *factoryImpl) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + return f.clientGetter.ToDiscoveryClient() +} + +func (f *factoryImpl) ToRawKubeConfigLoader() clientcmd.ClientConfig { + return f.clientGetter.ToRawKubeConfigLoader() +} + +func (f *factoryImpl) KubernetesClientSet() (*kubernetes.Clientset, error) { + clientConfig, err := f.ToRESTConfig() + if err != nil { + return nil, err + } + return kubernetes.NewForConfig(clientConfig) +} + +func (f *factoryImpl) DynamicClient() (dynamic.Interface, error) { + clientConfig, err := f.ToRESTConfig() + if err != nil { + return nil, err + } + return dynamic.NewForConfig(clientConfig) +} + +// NewBuilder returns a new resource builder for structured api objects. +func (f *factoryImpl) NewBuilder() *resource.Builder { + return resource.NewBuilder(f.clientGetter) +} + +func (f *factoryImpl) RESTClient() (*restclient.RESTClient, error) { + clientConfig, err := f.ToRESTConfig() + if err != nil { + return nil, err + } + setKubernetesDefaults(clientConfig) + return restclient.RESTClientFor(clientConfig) +} + +func (f *factoryImpl) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { + cfg, err := f.clientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + if err := setKubernetesDefaults(cfg); err != nil { + return nil, err + } + gvk := mapping.GroupVersionKind + switch gvk.Group { + case corev1.GroupName: + cfg.APIPath = "/api" + default: + cfg.APIPath = "/apis" + } + gv := gvk.GroupVersion() + cfg.GroupVersion = &gv + return restclient.RESTClientFor(cfg) +} + +func (f *factoryImpl) UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { + cfg, err := f.clientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + if err := restclient.SetKubernetesDefaults(cfg); err != nil { + return nil, err + } + cfg.APIPath = "/apis" + if mapping.GroupVersionKind.Group == corev1.GroupName { + cfg.APIPath = "/api" + } + gv := mapping.GroupVersionKind.GroupVersion() + cfg.ContentConfig = resource.UnstructuredPlusDefaultContentConfig() + cfg.GroupVersion = &gv + return restclient.RESTClientFor(cfg) +} + +func (f *factoryImpl) Validator(validationDirective string) (validation.Schema, error) { + // client-side schema validation is only performed + // when the validationDirective is strict. + // If the directive is warn, we rely on the ParamVerifyingSchema + // to ignore the client-side validation and provide a warning + // to the user that attempting warn validation when SS validation + // is unsupported is inert. + if validationDirective == metav1.FieldValidationIgnore { + return validation.NullSchema{}, nil + } + + schema := validation.ConjunctiveSchema{ + validation.NewSchemaValidation(f), + validation.NoDoubleKeySchema{}, + } + + dynamicClient, err := f.DynamicClient() + if err != nil { + return nil, err + } + // Create the FieldValidationVerifier for use in the ParamVerifyingSchema. + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return nil, err + } + // Memory-cache the OpenAPI V3 responses. The disk cache behavior is determined by + // the discovery client. + oapiV3Client := cached.NewClient(discoveryClient.OpenAPIV3()) + queryParam := resource.QueryParamFieldValidation + primary := resource.NewQueryParamVerifierV3(dynamicClient, oapiV3Client, queryParam) + secondary := resource.NewQueryParamVerifier(dynamicClient, f.openAPIGetter(), queryParam) + fallback := resource.NewFallbackQueryParamVerifier(primary, secondary) + return validation.NewParamVerifyingSchema(schema, fallback, string(validationDirective)), nil +} + +// OpenAPISchema returns metadata and structural information about +// Kubernetes object definitions. +func (f *factoryImpl) OpenAPISchema() (openapi.Resources, error) { + openAPIGetter := f.openAPIGetter() + if openAPIGetter == nil { + return nil, errors.New("no openapi getter") + } + + // Lazily initialize the OpenAPIParser once + f.parser.Do(func() { + // Create the caching OpenAPIParser + f.openAPIParser = openapi.NewOpenAPIParser(f.openAPIGetter()) + }) + + // Delegate to the OpenAPIPArser + return f.openAPIParser.Parse() +} + +func (f *factoryImpl) openAPIGetter() discovery.OpenAPISchemaInterface { + discovery, err := f.clientGetter.ToDiscoveryClient() + if err != nil { + return nil + } + f.getter.Do(func() { + f.oapi = openapi.NewOpenAPIGetter(discovery) + }) + + return f.oapi +} + +func (f *factoryImpl) OpenAPIV3Client() (openapiclient.Client, error) { + discovery, err := f.clientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + + return cached.NewClient(discovery.OpenAPIV3()), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go new file mode 100644 index 000000000..6d38fade3 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -0,0 +1,915 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "os" + "strconv" + "strings" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/scale" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" + utilexec "k8s.io/utils/exec" +) + +const ( + ApplyAnnotationsFlag = "save-config" + DefaultErrorExitCode = 1 + DefaultChunkSize = 500 +) + +type debugError interface { + DebugError() (msg string, args []interface{}) +} + +// AddSourceToErr adds handleResourcePrefix and source string to error message. +// verb is the string like "creating", "deleting" etc. +// source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource. +func AddSourceToErr(verb string, source string, err error) error { + if source != "" { + if statusError, ok := err.(apierrors.APIStatus); ok { + status := statusError.Status() + status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message) + return &apierrors.StatusError{ErrStatus: status} + } + return fmt.Errorf("error when %s %q: %v", verb, source, err) + } + return err +} + +var fatalErrHandler = fatal + +// BehaviorOnFatal allows you to override the default behavior when a fatal +// error occurs, which is to call os.Exit(code). You can pass 'panic' as a function +// here if you prefer the panic() over os.Exit(1). +func BehaviorOnFatal(f func(string, int)) { + fatalErrHandler = f +} + +// DefaultBehaviorOnFatal allows you to undo any previous override. Useful in +// tests. +func DefaultBehaviorOnFatal() { + fatalErrHandler = fatal +} + +// fatal prints the message (if provided) and then exits. If V(99) or greater, +// klog.Fatal is invoked for extended information. This is intended for maintainer +// debugging and out of a reasonable range for users. +func fatal(msg string, code int) { + // nolint:logcheck // Not using the result of klog.V(99) inside the if + // branch is okay, we just use it to determine how to terminate. + if klog.V(99).Enabled() { + klog.FatalDepth(2, msg) + } + if len(msg) > 0 { + // add newline if needed + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + fmt.Fprint(os.Stderr, msg) + } + os.Exit(code) +} + +// ErrExit may be passed to CheckError to instruct it to output nothing but exit with +// status code 1. +var ErrExit = fmt.Errorf("exit") + +// CheckErr prints a user friendly error to STDERR and exits with a non-zero +// exit code. Unrecognized errors will be printed with an "error: " prefix. +// +// This method is generic to the command in use and may be used by non-Kubectl +// commands. +func CheckErr(err error) { + checkErr(err, fatalErrHandler) +} + +// CheckDiffErr prints a user friendly error to STDERR and exits with a +// non-zero and non-one exit code. Unrecognized errors will be printed +// with an "error: " prefix. +// +// This method is meant specifically for `kubectl diff` and may be used +// by other commands. +func CheckDiffErr(err error) { + checkErr(err, func(msg string, code int) { + fatalErrHandler(msg, code+1) + }) +} + +// isInvalidReasonStatusError returns true if this is an API Status error with reason=Invalid. +// This is distinct from generic 422 errors we want to fall back to generic error handling. +func isInvalidReasonStatusError(err error) bool { + if !apierrors.IsInvalid(err) { + return false + } + statusError, isStatusError := err.(*apierrors.StatusError) + if !isStatusError { + return false + } + status := statusError.Status() + return status.Reason == metav1.StatusReasonInvalid +} + +// checkErr formats a given error as a string and calls the passed handleErr +// func with that string and an kubectl exit code. +func checkErr(err error, handleErr func(string, int)) { + // unwrap aggregates of 1 + if agg, ok := err.(utilerrors.Aggregate); ok && len(agg.Errors()) == 1 { + err = agg.Errors()[0] + } + + if err == nil { + return + } + + switch { + case err == ErrExit: + handleErr("", DefaultErrorExitCode) + case isInvalidReasonStatusError(err): + status := err.(*apierrors.StatusError).Status() + details := status.Details + s := "The request is invalid" + if details == nil { + // if we have no other details, include the message from the server if present + if len(status.Message) > 0 { + s += ": " + status.Message + } + handleErr(s, DefaultErrorExitCode) + return + } + if len(details.Kind) != 0 || len(details.Name) != 0 { + s = fmt.Sprintf("The %s %q is invalid", details.Kind, details.Name) + } else if len(status.Message) > 0 && len(details.Causes) == 0 { + // only append the message if we have no kind/name details and no causes, + // since default invalid error constructors duplicate that information in the message + s += ": " + status.Message + } + + if len(details.Causes) > 0 { + errs := statusCausesToAggrError(details.Causes) + handleErr(MultilineError(s+": ", errs), DefaultErrorExitCode) + } else { + handleErr(s, DefaultErrorExitCode) + } + case clientcmd.IsConfigurationInvalid(err): + handleErr(MultilineError("Error in configuration: ", err), DefaultErrorExitCode) + default: + switch err := err.(type) { + case *meta.NoResourceMatchError: + switch { + case len(err.PartialResource.Group) > 0 && len(err.PartialResource.Version) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q and version %q", err.PartialResource.Resource, err.PartialResource.Group, err.PartialResource.Version), DefaultErrorExitCode) + case len(err.PartialResource.Group) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q", err.PartialResource.Resource, err.PartialResource.Group), DefaultErrorExitCode) + case len(err.PartialResource.Version) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in version %q", err.PartialResource.Resource, err.PartialResource.Version), DefaultErrorExitCode) + default: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q", err.PartialResource.Resource), DefaultErrorExitCode) + } + case utilerrors.Aggregate: + handleErr(MultipleErrors(``, err.Errors()), DefaultErrorExitCode) + case utilexec.ExitError: + handleErr(err.Error(), err.ExitStatus()) + default: // for any other error type + msg, ok := StandardErrorMessage(err) + if !ok { + msg = err.Error() + if !strings.HasPrefix(msg, "error: ") { + msg = fmt.Sprintf("error: %s", msg) + } + } + handleErr(msg, DefaultErrorExitCode) + } + } +} + +func statusCausesToAggrError(scs []metav1.StatusCause) utilerrors.Aggregate { + errs := make([]error, 0, len(scs)) + errorMsgs := sets.NewString() + for _, sc := range scs { + // check for duplicate error messages and skip them + msg := fmt.Sprintf("%s: %s", sc.Field, sc.Message) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, errors.New(msg)) + } + return utilerrors.NewAggregate(errs) +} + +// StandardErrorMessage translates common errors into a human readable message, or returns +// false if the error is not one of the recognized types. It may also log extended +// information to klog. +// +// This method is generic to the command in use and may be used by non-Kubectl +// commands. +func StandardErrorMessage(err error) (string, bool) { + if debugErr, ok := err.(debugError); ok { + klog.V(4).Infof(debugErr.DebugError()) + } + status, isStatus := err.(apierrors.APIStatus) + switch { + case isStatus: + switch s := status.Status(); { + case s.Reason == metav1.StatusReasonUnauthorized: + return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message), true + case len(s.Reason) > 0: + return fmt.Sprintf("Error from server (%s): %s", s.Reason, err.Error()), true + default: + return fmt.Sprintf("Error from server: %s", err.Error()), true + } + case apierrors.IsUnexpectedObjectError(err): + return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true + } + switch t := err.(type) { + case *url.Error: + klog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) + switch { + case strings.Contains(t.Err.Error(), "connection refused"): + host := t.URL + if server, err := url.Parse(t.URL); err == nil { + host = server.Host + } + return fmt.Sprintf("The connection to the server %s was refused - did you specify the right host or port?", host), true + } + return fmt.Sprintf("Unable to connect to the server: %v", t.Err), true + } + return "", false +} + +// MultilineError returns a string representing an error that splits sub errors into their own +// lines. The returned string will end with a newline. +func MultilineError(prefix string, err error) string { + if agg, ok := err.(utilerrors.Aggregate); ok { + errs := utilerrors.Flatten(agg).Errors() + buf := &bytes.Buffer{} + switch len(errs) { + case 0: + return fmt.Sprintf("%s%v\n", prefix, err) + case 1: + return fmt.Sprintf("%s%v\n", prefix, messageForError(errs[0])) + default: + fmt.Fprintln(buf, prefix) + for _, err := range errs { + fmt.Fprintf(buf, "* %v\n", messageForError(err)) + } + return buf.String() + } + } + return fmt.Sprintf("%s%s\n", prefix, err) +} + +// PrintErrorWithCauses prints an error's kind, name, and each of the error's causes in a new line. +// The returned string will end with a newline. +// Returns true if a case exists to handle the error type, or false otherwise. +func PrintErrorWithCauses(err error, errOut io.Writer) bool { + switch t := err.(type) { + case *apierrors.StatusError: + errorDetails := t.Status().Details + if errorDetails != nil { + fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name) + for _, cause := range errorDetails.Causes { + fmt.Fprintf(errOut, "* %s: %s\n", cause.Field, cause.Message) + } + return true + } + } + + fmt.Fprintf(errOut, "error: %v\n", err) + return false +} + +// MultipleErrors returns a newline delimited string containing +// the prefix and referenced errors in standard form. +func MultipleErrors(prefix string, errs []error) string { + buf := &bytes.Buffer{} + for _, err := range errs { + fmt.Fprintf(buf, "%s%v\n", prefix, messageForError(err)) + } + return buf.String() +} + +// messageForError returns the string representing the error. +func messageForError(err error) string { + msg, ok := StandardErrorMessage(err) + if !ok { + msg = err.Error() + } + return msg +} + +func UsageErrorf(cmd *cobra.Command, format string, args ...interface{}) error { + msg := fmt.Sprintf(format, args...) + return fmt.Errorf("%s\nSee '%s -h' for help and examples", msg, cmd.CommandPath()) +} + +func IsFilenameSliceEmpty(filenames []string, directory string) bool { + return len(filenames) == 0 && directory == "" +} + +func GetFlagString(cmd *cobra.Command, flag string) string { + s, err := cmd.Flags().GetString(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return s +} + +// GetFlagStringSlice can be used to accept multiple argument with flag repetition (e.g. -f arg1,arg2 -f arg3 ...) +func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { + s, err := cmd.Flags().GetStringSlice(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return s +} + +// GetFlagStringArray can be used to accept multiple argument with flag repetition (e.g. -f arg1 -f arg2 ...) +func GetFlagStringArray(cmd *cobra.Command, flag string) []string { + s, err := cmd.Flags().GetStringArray(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return s +} + +func GetFlagBool(cmd *cobra.Command, flag string) bool { + b, err := cmd.Flags().GetBool(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return b +} + +// Assumes the flag has a default value. +func GetFlagInt(cmd *cobra.Command, flag string) int { + i, err := cmd.Flags().GetInt(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + +// Assumes the flag has a default value. +func GetFlagInt32(cmd *cobra.Command, flag string) int32 { + i, err := cmd.Flags().GetInt32(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + +// Assumes the flag has a default value. +func GetFlagInt64(cmd *cobra.Command, flag string) int64 { + i, err := cmd.Flags().GetInt64(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + +func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { + d, err := cmd.Flags().GetDuration(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return d +} + +func GetPodRunningTimeoutFlag(cmd *cobra.Command) (time.Duration, error) { + timeout := GetFlagDuration(cmd, "pod-running-timeout") + if timeout <= 0 { + return timeout, fmt.Errorf("--pod-running-timeout must be higher than zero") + } + return timeout, nil +} + +type FeatureGate string + +const ( + ApplySet FeatureGate = "KUBECTL_APPLYSET" + CmdPluginAsSubcommand FeatureGate = "KUBECTL_ENABLE_CMD_SHADOW" + InteractiveDelete FeatureGate = "KUBECTL_INTERACTIVE_DELETE" + OpenAPIV3Patch FeatureGate = "KUBECTL_OPENAPIV3_PATCH" + RemoteCommandWebsockets FeatureGate = "KUBECTL_REMOTE_COMMAND_WEBSOCKETS" +) + +// IsEnabled returns true iff environment variable is set to true. +// All other cases, it returns false. +func (f FeatureGate) IsEnabled() bool { + return strings.ToLower(os.Getenv(string(f))) == "true" +} + +// IsDisabled returns true iff environment variable is set to false. +// All other cases, it returns true. +// This function is used for the cases where feature is enabled by default, +// but it may be needed to provide a way to ability to disable this feature. +func (f FeatureGate) IsDisabled() bool { + return strings.ToLower(os.Getenv(string(f))) == "false" +} + +func AddValidateFlags(cmd *cobra.Command) { + cmd.Flags().String( + "validate", + "strict", + `Must be one of: strict (or true), warn, ignore (or false). + "true" or "strict" will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not. + "warn" will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as "ignore" otherwise. + "false" or "ignore" will not perform any schema validation, silently dropping any unknown or duplicate fields.`, + ) + + cmd.Flags().Lookup("validate").NoOptDefVal = "strict" +} + +func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) { + AddJsonFilenameFlag(cmd.Flags(), &options.Filenames, "Filename, directory, or URL to files "+usage) + AddKustomizeFlag(cmd.Flags(), &options.Kustomize) + cmd.Flags().BoolVarP(&options.Recursive, "recursive", "R", options.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") +} + +func AddJsonFilenameFlag(flags *pflag.FlagSet, value *[]string, usage string) { + flags.StringSliceVarP(value, "filename", "f", *value, usage) + annotations := make([]string, 0, len(resource.FileExtensions)) + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } + flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) +} + +// AddKustomizeFlag adds kustomize flag to a command +func AddKustomizeFlag(flags *pflag.FlagSet, value *string) { + flags.StringVarP(value, "kustomize", "k", *value, "Process the kustomization directory. This flag can't be used together with -f or -R.") +} + +// AddDryRunFlag adds dry-run flag to a command. Usually used by mutations. +func AddDryRunFlag(cmd *cobra.Command) { + cmd.Flags().String( + "dry-run", + "none", + `Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource.`, + ) + cmd.Flags().Lookup("dry-run").NoOptDefVal = "unchanged" +} + +func AddFieldManagerFlagVar(cmd *cobra.Command, p *string, defaultFieldManager string) { + cmd.Flags().StringVar(p, "field-manager", defaultFieldManager, "Name of the manager used to track field ownership.") +} + +func AddContainerVarFlags(cmd *cobra.Command, p *string, containerName string) { + cmd.Flags().StringVarP(p, "container", "c", containerName, "Container name. If omitted, use the kubectl.kubernetes.io/default-container annotation for selecting the container to be attached or the first container in the pod will be chosen") +} + +func AddServerSideApplyFlags(cmd *cobra.Command) { + cmd.Flags().Bool("server-side", false, "If true, apply runs in the server instead of the client.") + cmd.Flags().Bool("force-conflicts", false, "If true, server-side apply will force the changes against conflicts.") +} + +func AddPodRunningTimeoutFlag(cmd *cobra.Command, defaultTimeout time.Duration) { + cmd.Flags().Duration("pod-running-timeout", defaultTimeout, "The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one pod is running") +} + +func AddApplyAnnotationFlags(cmd *cobra.Command) { + cmd.Flags().Bool(ApplyAnnotationsFlag, false, "If true, the configuration of current object will be saved in its annotation. Otherwise, the annotation will be unchanged. This flag is useful when you want to perform kubectl apply on this object in the future.") +} + +func AddApplyAnnotationVarFlags(cmd *cobra.Command, applyAnnotation *bool) { + cmd.Flags().BoolVar(applyAnnotation, ApplyAnnotationsFlag, *applyAnnotation, "If true, the configuration of current object will be saved in its annotation. Otherwise, the annotation will be unchanged. This flag is useful when you want to perform kubectl apply on this object in the future.") +} + +func AddChunkSizeFlag(cmd *cobra.Command, value *int64) { + cmd.Flags().Int64Var(value, "chunk-size", *value, + "Return large lists in chunks rather than all at once. Pass 0 to disable. This flag is beta and may change in the future.") +} + +func AddLabelSelectorFlagVar(cmd *cobra.Command, p *string) { + cmd.Flags().StringVarP(p, "selector", "l", *p, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints.") +} + +func AddPruningFlags(cmd *cobra.Command, prune *bool, pruneAllowlist *[]string, pruneWhitelist *[]string, all *bool, applySetRef *string) { + // Flags associated with the original allowlist-based alpha + cmd.Flags().StringArrayVar(pruneAllowlist, "prune-allowlist", *pruneAllowlist, "Overwrite the default allowlist with for --prune") + cmd.Flags().StringArrayVar(pruneWhitelist, "prune-whitelist", *pruneWhitelist, "Overwrite the default whitelist with for --prune") // TODO: Remove this in kubectl 1.28 or later + _ = cmd.Flags().MarkDeprecated("prune-whitelist", "Use --prune-allowlist instead.") + cmd.Flags().BoolVar(all, "all", *all, "Select all resources in the namespace of the specified resource types.") + + // Flags associated with the new ApplySet-based alpha + if ApplySet.IsEnabled() { + cmd.Flags().StringVar(applySetRef, "applyset", *applySetRef, "[alpha] The name of the ApplySet that tracks which resources are being managed, for the purposes of determining what to prune. Live resources that are part of the ApplySet but have been removed from the provided configs will be deleted. Format: [RESOURCE][.GROUP]/NAME. A Secret will be used if no resource or group is specified.") + cmd.Flags().BoolVar(prune, "prune", *prune, "Automatically delete previously applied resource objects that do not appear in the provided configs. For alpha1, use with either -l or --all. For alpha2, use with --applyset.") + } else { + // different docs for the shared --prune flag if only alpha1 is enabled + cmd.Flags().BoolVar(prune, "prune", *prune, "Automatically delete resource objects, that do not appear in the configs and are created by either apply or create --save-config. Should be used with either -l or --all.") + } +} + +func AddSubresourceFlags(cmd *cobra.Command, subresource *string, usage string, allowedSubresources ...string) { + cmd.Flags().StringVar(subresource, "subresource", "", fmt.Sprintf("%s Must be one of %v. This flag is beta and may change in the future.", usage, allowedSubresources)) + CheckErr(cmd.RegisterFlagCompletionFunc("subresource", func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return allowedSubresources, cobra.ShellCompDirectiveNoFileComp + })) +} + +type ValidateOptions struct { + ValidationDirective string +} + +// Merge converts the passed in object to JSON, merges the fragment into it using an RFC7396 JSON Merge Patch, +// and returns the resulting object +// TODO: merge assumes JSON serialization, and does not properly abstract API retrieval +func Merge(codec runtime.Codec, dst runtime.Object, fragment string) (runtime.Object, error) { + // encode dst into versioned json and apply fragment directly too it + target, err := runtime.Encode(codec, dst) + if err != nil { + return nil, err + } + patched, err := jsonpatch.MergePatch(target, []byte(fragment)) + if err != nil { + return nil, err + } + out, err := runtime.Decode(codec, patched) + if err != nil { + return nil, err + } + return out, nil +} + +// StrategicMerge converts the passed in object to JSON, merges the fragment into it using a Strategic Merge Patch, +// and returns the resulting object +func StrategicMerge(codec runtime.Codec, dst runtime.Object, fragment string, dataStruct runtime.Object) (runtime.Object, error) { + target, err := runtime.Encode(codec, dst) + if err != nil { + return nil, err + } + patched, err := strategicpatch.StrategicMergePatch(target, []byte(fragment), dataStruct) + if err != nil { + return nil, err + } + out, err := runtime.Decode(codec, patched) + if err != nil { + return nil, err + } + return out, nil +} + +// JSONPatch converts the passed in object to JSON, performs an RFC6902 JSON Patch using operations specified in the +// fragment, and returns the resulting object +func JSONPatch(codec runtime.Codec, dst runtime.Object, fragment string) (runtime.Object, error) { + target, err := runtime.Encode(codec, dst) + if err != nil { + return nil, err + } + patch, err := jsonpatch.DecodePatch([]byte(fragment)) + if err != nil { + return nil, err + } + patched, err := patch.Apply(target) + if err != nil { + return nil, err + } + out, err := runtime.Decode(codec, patched) + if err != nil { + return nil, err + } + return out, nil +} + +// DumpReaderToFile writes all data from the given io.Reader to the specified file +// (usually for temporary use). +func DumpReaderToFile(reader io.Reader, filename string) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + + buffer := make([]byte, 1024) + for { + count, err := reader.Read(buffer) + if err == io.EOF { + break + } + if err != nil { + return err + } + _, err = f.Write(buffer[:count]) + if err != nil { + return err + } + } + return nil +} + +func GetServerSideApplyFlag(cmd *cobra.Command) bool { + return GetFlagBool(cmd, "server-side") +} + +func GetForceConflictsFlag(cmd *cobra.Command) bool { + return GetFlagBool(cmd, "force-conflicts") +} + +func GetFieldManagerFlag(cmd *cobra.Command) string { + return GetFlagString(cmd, "field-manager") +} + +func GetValidationDirective(cmd *cobra.Command) (string, error) { + var validateFlag = GetFlagString(cmd, "validate") + b, err := strconv.ParseBool(validateFlag) + if err != nil { + switch validateFlag { + case "strict": + return metav1.FieldValidationStrict, nil + case "warn": + return metav1.FieldValidationWarn, nil + case "ignore": + return metav1.FieldValidationIgnore, nil + default: + return metav1.FieldValidationStrict, fmt.Errorf(`invalid - validate option %q; must be one of: strict (or true), warn, ignore (or false)`, validateFlag) + } + } + // The flag was a boolean + if b { + return metav1.FieldValidationStrict, nil + } + return metav1.FieldValidationIgnore, nil +} + +type DryRunStrategy int + +const ( + // DryRunNone indicates the client will make all mutating calls + DryRunNone DryRunStrategy = iota + + // DryRunClient, or client-side dry-run, indicates the client will prevent + // making mutating calls such as CREATE, PATCH, and DELETE + DryRunClient + + // DryRunServer, or server-side dry-run, indicates the client will send + // mutating calls to the APIServer with the dry-run parameter to prevent + // persisting changes. + // + // Note that clients sending server-side dry-run calls should verify that + // the APIServer and the resource supports server-side dry-run, and otherwise + // clients should fail early. + // + // If a client sends a server-side dry-run call to an APIServer that doesn't + // support server-side dry-run, then the APIServer will persist changes inadvertently. + DryRunServer +) + +func GetDryRunStrategy(cmd *cobra.Command) (DryRunStrategy, error) { + var dryRunFlag = GetFlagString(cmd, "dry-run") + b, err := strconv.ParseBool(dryRunFlag) + // The flag is not a boolean + if err != nil { + switch dryRunFlag { + case cmd.Flag("dry-run").NoOptDefVal: + klog.Warning(`--dry-run is deprecated and can be replaced with --dry-run=client.`) + return DryRunClient, nil + case "client": + return DryRunClient, nil + case "server": + return DryRunServer, nil + case "none": + return DryRunNone, nil + default: + return DryRunNone, fmt.Errorf(`Invalid dry-run value (%v). Must be "none", "server", or "client".`, dryRunFlag) + } + } + // The flag was a boolean + if b { + klog.Warningf(`--dry-run=%v is deprecated (boolean value) and can be replaced with --dry-run=%s.`, dryRunFlag, "client") + return DryRunClient, nil + } + klog.Warningf(`--dry-run=%v is deprecated (boolean value) and can be replaced with --dry-run=%s.`, dryRunFlag, "none") + return DryRunNone, nil +} + +// PrintFlagsWithDryRunStrategy sets a success message at print time for the dry run strategy +// +// TODO(juanvallejo): This can be cleaned up even further by creating +// a PrintFlags struct that binds the --dry-run flag, and whose +// ToPrinter method returns a printer that understands how to print +// this success message. +func PrintFlagsWithDryRunStrategy(printFlags *genericclioptions.PrintFlags, dryRunStrategy DryRunStrategy) *genericclioptions.PrintFlags { + switch dryRunStrategy { + case DryRunClient: + printFlags.Complete("%s (dry run)") + case DryRunServer: + printFlags.Complete("%s (server dry run)") + } + return printFlags +} + +// GetResourcesAndPairs retrieves resources and "KEY=VALUE or KEY-" pair args from given args +func GetResourcesAndPairs(args []string, pairType string) (resources []string, pairArgs []string, err error) { + foundPair := false + for _, s := range args { + nonResource := (strings.Contains(s, "=") && s[0] != '=') || (strings.HasSuffix(s, "-") && s != "-") + switch { + case !foundPair && nonResource: + foundPair = true + fallthrough + case foundPair && nonResource: + pairArgs = append(pairArgs, s) + case !foundPair && !nonResource: + resources = append(resources, s) + case foundPair && !nonResource: + err = fmt.Errorf("all resources must be specified before %s changes: %s", pairType, s) + return + } + } + return +} + +// ParsePairs retrieves new and remove pairs (if supportRemove is true) from "KEY=VALUE or KEY-" pair args +func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPairs map[string]string, removePairs []string, err error) { + newPairs = map[string]string{} + if supportRemove { + removePairs = []string{} + } + var invalidBuf bytes.Buffer + var invalidBufNonEmpty bool + for _, pairArg := range pairArgs { + if strings.Contains(pairArg, "=") && pairArg[0] != '=' { + parts := strings.SplitN(pairArg, "=", 2) + if len(parts) != 2 { + if invalidBufNonEmpty { + invalidBuf.WriteString(", ") + } + invalidBuf.WriteString(pairArg) + invalidBufNonEmpty = true + } else { + newPairs[parts[0]] = parts[1] + } + } else if supportRemove && strings.HasSuffix(pairArg, "-") && pairArg != "-" { + removePairs = append(removePairs, pairArg[:len(pairArg)-1]) + } else { + if invalidBufNonEmpty { + invalidBuf.WriteString(", ") + } + invalidBuf.WriteString(pairArg) + invalidBufNonEmpty = true + } + } + if invalidBufNonEmpty { + err = fmt.Errorf("invalid %s format: %s", pairType, invalidBuf.String()) + return + } + + return +} + +// IsSiblingCommandExists receives a pointer to a cobra command and a target string. +// Returns true if the target string is found in the list of sibling commands. +func IsSiblingCommandExists(cmd *cobra.Command, targetCmdName string) bool { + for _, c := range cmd.Parent().Commands() { + if c.Name() == targetCmdName { + return true + } + } + + return false +} + +// DefaultSubCommandRun prints a command's help string to the specified output if no +// arguments (sub-commands) are provided, or a usage error otherwise. +func DefaultSubCommandRun(out io.Writer) func(c *cobra.Command, args []string) { + return func(c *cobra.Command, args []string) { + c.SetOut(out) + c.SetErr(out) + RequireNoArguments(c, args) + c.Help() + CheckErr(ErrExit) + } +} + +// RequireNoArguments exits with a usage error if extra arguments are provided. +func RequireNoArguments(c *cobra.Command, args []string) { + if len(args) > 0 { + CheckErr(UsageErrorf(c, "unknown command %q", strings.Join(args, " "))) + } +} + +// StripComments will transform a YAML file into JSON, thus dropping any comments +// in it. Note that if the given file has a syntax error, the transformation will +// fail and we will manually drop all comments from the file. +func StripComments(file []byte) []byte { + stripped := file + stripped, err := yaml.ToJSON(stripped) + if err != nil { + stripped = ManualStrip(file) + } + return stripped +} + +// ManualStrip is used for dropping comments from a YAML file +func ManualStrip(file []byte) []byte { + stripped := []byte{} + lines := bytes.Split(file, []byte("\n")) + for i, line := range lines { + trimline := bytes.TrimSpace(line) + + if bytes.HasPrefix(trimline, []byte("#")) && !bytes.HasPrefix(trimline, []byte("#!")) { + continue + } + stripped = append(stripped, line...) + if i < len(lines)-1 { + stripped = append(stripped, '\n') + } + } + return stripped +} + +// ScaleClientFunc provides a ScalesGetter +type ScaleClientFunc func(genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) + +// ScaleClientFn gives a way to easily override the function for unit testing if needed. +var ScaleClientFn ScaleClientFunc = scaleClient + +// scaleClient gives you back scale getter +func scaleClient(restClientGetter genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) { + discoveryClient, err := restClientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + + setKubernetesDefaults(clientConfig) + restClient, err := rest.RESTClientFor(clientConfig) + if err != nil { + return nil, err + } + resolver := scale.NewDiscoveryScaleKindResolver(discoveryClient) + mapper, err := restClientGetter.ToRESTMapper() + if err != nil { + return nil, err + } + + return scale.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil +} + +func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) { + fmt.Fprintf(cmdErr, "WARNING: New generator %q specified, "+ + "but it isn't available. "+ + "Falling back to %q.\n", + newGeneratorName, + oldGeneratorName, + ) +} + +// Difference removes any elements of subArray from fullArray and returns the result +func Difference(fullArray []string, subArray []string) []string { + exclude := make(map[string]bool, len(subArray)) + for _, elem := range subArray { + exclude[elem] = true + } + var result []string + for _, elem := range fullArray { + if _, found := exclude[elem]; !found { + result = append(result, elem) + } + } + return result +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go b/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go new file mode 100644 index 000000000..74308bc5d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "sync" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/scheme" + + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/component-base/version" +) + +const ( + flagMatchBinaryVersion = "match-server-version" +) + +// MatchVersionFlags is for setting the "match server version" function. +type MatchVersionFlags struct { + Delegate genericclioptions.RESTClientGetter + + RequireMatchedServerVersion bool + checkServerVersion sync.Once + matchesServerVersionErr error +} + +var _ genericclioptions.RESTClientGetter = &MatchVersionFlags{} + +func (f *MatchVersionFlags) checkMatchingServerVersion() error { + f.checkServerVersion.Do(func() { + if !f.RequireMatchedServerVersion { + return + } + discoveryClient, err := f.Delegate.ToDiscoveryClient() + if err != nil { + f.matchesServerVersionErr = err + return + } + f.matchesServerVersionErr = discovery.MatchesServerVersion(version.Get(), discoveryClient) + }) + + return f.matchesServerVersionErr +} + +// ToRESTConfig implements RESTClientGetter. +// Returns a REST client configuration based on a provided path +// to a .kubeconfig file, loading rules, and config flag overrides. +// Expects the AddFlags method to have been called. +func (f *MatchVersionFlags) ToRESTConfig() (*rest.Config, error) { + if err := f.checkMatchingServerVersion(); err != nil { + return nil, err + } + clientConfig, err := f.Delegate.ToRESTConfig() + if err != nil { + return nil, err + } + // TODO we should not have to do this. It smacks of something going wrong. + setKubernetesDefaults(clientConfig) + return clientConfig, nil +} + +func (f *MatchVersionFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig { + return f.Delegate.ToRawKubeConfigLoader() +} + +func (f *MatchVersionFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + if err := f.checkMatchingServerVersion(); err != nil { + return nil, err + } + return f.Delegate.ToDiscoveryClient() +} + +// ToRESTMapper returns a mapper. +func (f *MatchVersionFlags) ToRESTMapper() (meta.RESTMapper, error) { + if err := f.checkMatchingServerVersion(); err != nil { + return nil, err + } + return f.Delegate.ToRESTMapper() +} + +func (f *MatchVersionFlags) AddFlags(flags *pflag.FlagSet) { + flags.BoolVar(&f.RequireMatchedServerVersion, flagMatchBinaryVersion, f.RequireMatchedServerVersion, "Require server version to match client version") +} + +func NewMatchVersionFlags(delegate genericclioptions.RESTClientGetter) *MatchVersionFlags { + return &MatchVersionFlags{ + Delegate: delegate, + } +} + +// setKubernetesDefaults sets default values on the provided client config for accessing the +// Kubernetes API or returns an error if any of the defaults are impossible or invalid. +// TODO this isn't what we want. Each clientset should be setting defaults as it sees fit. +func setKubernetesDefaults(config *rest.Config) error { + // TODO remove this hack. This is allowing the GetOptions to be serialized. + config.GroupVersion = &schema.GroupVersion{Group: "", Version: "v1"} + + if config.APIPath == "" { + config.APIPath = "/api" + } + if config.NegotiatedSerializer == nil { + // This codec factory ensures the resources are not converted. Therefore, resources + // will not be round-tripped through internal versions. Defaulting does not happen + // on the client. + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + } + return rest.SetKubernetesDefaults(config) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/override_options.go b/vendor/k8s.io/kubectl/pkg/cmd/util/override_options.go new file mode 100644 index 000000000..1e63bc789 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/override_options.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/i18n" +) + +type OverrideType string + +const ( + // OverrideTypeJSON will use an RFC6902 JSON Patch to alter the generated output + OverrideTypeJSON OverrideType = "json" + + // OverrideTypeMerge will use an RFC7396 JSON Merge Patch to alter the generated output + OverrideTypeMerge OverrideType = "merge" + + // OverrideTypeStrategic will use a Strategic Merge Patch to alter the generated output + OverrideTypeStrategic OverrideType = "strategic" +) + +const DefaultOverrideType = OverrideTypeMerge + +type OverrideOptions struct { + Overrides string + OverrideType OverrideType +} + +func (o *OverrideOptions) AddOverrideFlags(cmd *cobra.Command) { + cmd.Flags().StringVar(&o.Overrides, "overrides", "", i18n.T("An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.")) + cmd.Flags().StringVar((*string)(&o.OverrideType), "override-type", string(DefaultOverrideType), fmt.Sprintf("The method used to override the generated object: %s, %s, or %s.", OverrideTypeJSON, OverrideTypeMerge, OverrideTypeStrategic)) +} + +func (o *OverrideOptions) NewOverrider(dataStruct runtime.Object) *Overrider { + return &Overrider{ + Options: o, + DataStruct: dataStruct, + } +} + +type Overrider struct { + Options *OverrideOptions + DataStruct runtime.Object +} + +func (o *Overrider) Apply(obj runtime.Object) (runtime.Object, error) { + if len(o.Options.Overrides) == 0 { + return obj, nil + } + + codec := runtime.NewCodec(scheme.DefaultJSONEncoder(), scheme.Codecs.UniversalDecoder(scheme.Scheme.PrioritizedVersionsAllGroups()...)) + + var overrideType OverrideType + if len(o.Options.OverrideType) == 0 { + overrideType = DefaultOverrideType + } else { + overrideType = o.Options.OverrideType + } + + switch overrideType { + case OverrideTypeJSON: + return JSONPatch(codec, obj, o.Options.Overrides) + case OverrideTypeMerge: + return Merge(codec, obj, o.Options.Overrides) + case OverrideTypeStrategic: + return StrategicMerge(codec, obj, o.Options.Overrides, o.DataStruct) + default: + return nil, fmt.Errorf("invalid override type: %v", overrideType) + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go b/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go new file mode 100644 index 000000000..ebd228821 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go @@ -0,0 +1,29 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/kubectl/pkg/util/templates" +) + +// SuggestAPIResources returns a suggestion to use the "api-resources" command +// to retrieve a supported list of resources +func SuggestAPIResources(parent string) string { + return templates.LongDesc(fmt.Sprintf("Use \"%s api-resources\" for a complete list of supported resources.", parent)) +} diff --git a/vendor/k8s.io/kubectl/pkg/scheme/install.go b/vendor/k8s.io/kubectl/pkg/scheme/install.go new file mode 100644 index 000000000..52a7ce6a8 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/scheme/install.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + admissionv1 "k8s.io/api/admission/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + networkingv1 "k8s.io/api/networking/v1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" +) + +// Register all groups in the kubectl's registry, but no componentconfig group since it's not in k8s.io/api +// The code in this file mostly duplicate the install under k8s.io/kubernetes/pkg/api and k8s.io/kubernetes/pkg/apis, +// but does NOT register the internal types. +func init() { + // Register external types for Scheme + metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(metav1beta1.AddMetaToScheme(Scheme)) + utilruntime.Must(metav1.AddMetaToScheme(Scheme)) + utilruntime.Must(scheme.AddToScheme(Scheme)) + + utilruntime.Must(Scheme.SetVersionPriority(corev1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(admissionv1beta1.SchemeGroupVersion, admissionv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(admissionregistrationv1beta1.SchemeGroupVersion, admissionregistrationv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion, appsv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(authenticationv1.SchemeGroupVersion, authenticationv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(authorizationv1.SchemeGroupVersion, authorizationv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(autoscalingv1.SchemeGroupVersion, autoscalingv2.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(batchv1.SchemeGroupVersion, batchv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(certificatesv1.SchemeGroupVersion, certificatesv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(extensionsv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(imagepolicyv1alpha1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(networkingv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(policyv1beta1.SchemeGroupVersion, policyv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(rbacv1.SchemeGroupVersion, rbacv1beta1.SchemeGroupVersion, rbacv1alpha1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(schedulingv1alpha1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(storagev1.SchemeGroupVersion, storagev1beta1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/kubectl/pkg/scheme/scheme.go b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go new file mode 100644 index 000000000..d1d7847b8 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// All kubectl code should eventually switch to use this Registry and Scheme instead of the global ones. + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +// DefaultJSONEncoder returns a default encoder for our scheme +func DefaultJSONEncoder() runtime.Encoder { + return unstructured.NewJSONFallbackEncoder(Codecs.LegacyCodec(Scheme.PrioritizedVersionsAllGroups()...)) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go b/vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go new file mode 100644 index 000000000..d850b283d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go @@ -0,0 +1,215 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package i18n + +import ( + "archive/zip" + "bytes" + "embed" + "errors" + "fmt" + "os" + "strings" + "sync" + + "github.com/chai2010/gettext-go" + + "k8s.io/klog/v2" +) + +//go:embed translations +var translations embed.FS + +var knownTranslations = map[string][]string{ + "kubectl": { + "default", + "en_US", + "fr_FR", + "zh_CN", + "ja_JP", + "zh_TW", + "it_IT", + "de_DE", + "ko_KR", + "pt_BR", + }, + // only used for unit tests. + "test": { + "default", + "en_US", + }, +} + +var ( + lazyLoadTranslationsOnce sync.Once + LoadTranslationsFunc = func() error { + return LoadTranslations("kubectl", nil) + } + translationsLoaded bool +) + +// SetLoadTranslationsFunc sets the function called to lazy load translations. +// It must be called in an init() func that occurs BEFORE any i18n.T() calls are made by any package. You can +// accomplish this by creating a separate package containing your init() func, and then importing that package BEFORE +// any other packages that call i18n.T(). +// +// Example Usage: +// +// package myi18n +// +// import "k8s.io/kubectl/pkg/util/i18n" +// +// func init() { +// if err := i18n.SetLoadTranslationsFunc(loadCustomTranslations); err != nil { +// panic(err) +// } +// } +// +// func loadCustomTranslations() error { +// // Load your custom translations here... +// } +// +// And then in your main or root command package, import your custom package like this: +// +// import ( +// // Other imports that don't need i18n... +// _ "example.com/myapp/myi18n" +// // Other imports that do need i18n... +// ) +func SetLoadTranslationsFunc(f func() error) error { + if translationsLoaded { + return errors.New("translations have already been loaded") + } + LoadTranslationsFunc = func() error { + if err := f(); err != nil { + return err + } + translationsLoaded = true + return nil + } + return nil +} + +func loadSystemLanguage() string { + // Implements the following locale priority order: LC_ALL, LC_MESSAGES, LANG + // Similarly to: https://www.gnu.org/software/gettext/manual/html_node/Locale-Environment-Variables.html + langStr := os.Getenv("LC_ALL") + if langStr == "" { + langStr = os.Getenv("LC_MESSAGES") + } + if langStr == "" { + langStr = os.Getenv("LANG") + } + + if langStr == "" { + klog.V(3).Infof("Couldn't find the LC_ALL, LC_MESSAGES or LANG environment variables, defaulting to en_US") + return "default" + } + pieces := strings.Split(langStr, ".") + if len(pieces) != 2 { + klog.V(3).Infof("Unexpected system language (%s), defaulting to en_US", langStr) + return "default" + } + return pieces[0] +} + +func findLanguage(root string, getLanguageFn func() string) string { + langStr := getLanguageFn() + + translations := knownTranslations[root] + for ix := range translations { + if translations[ix] == langStr { + return langStr + } + } + klog.V(3).Infof("Couldn't find translations for %s, using default", langStr) + return "default" +} + +// LoadTranslations loads translation files. getLanguageFn should return a language +// string (e.g. 'en-US'). If getLanguageFn is nil, then the loadSystemLanguage function +// is used, which uses the 'LANG' environment variable. +func LoadTranslations(root string, getLanguageFn func() string) error { + if getLanguageFn == nil { + getLanguageFn = loadSystemLanguage + } + + langStr := findLanguage(root, getLanguageFn) + translationFiles := []string{ + fmt.Sprintf("%s/%s/LC_MESSAGES/k8s.po", root, langStr), + fmt.Sprintf("%s/%s/LC_MESSAGES/k8s.mo", root, langStr), + } + + klog.V(3).Infof("Setting language to %s", langStr) + // TODO: list the directory and load all files. + buf := new(bytes.Buffer) + w := zip.NewWriter(buf) + + // Make sure to check the error on Close. + for _, file := range translationFiles { + filename := "translations/" + file + f, err := w.Create(file) + if err != nil { + return err + } + data, err := translations.ReadFile(filename) + if err != nil { + return err + } + if _, err := f.Write(data); err != nil { + return nil + } + } + if err := w.Close(); err != nil { + return err + } + gettext.BindLocale(gettext.New("k8s", root+".zip", buf.Bytes())) + gettext.SetDomain("k8s") + gettext.SetLanguage(langStr) + translationsLoaded = true + return nil +} + +func lazyLoadTranslations() { + lazyLoadTranslationsOnce.Do(func() { + if translationsLoaded { + return + } + if err := LoadTranslationsFunc(); err != nil { + klog.Warning("Failed to load translations") + } + }) +} + +// T translates a string, possibly substituting arguments into it along +// the way. If len(args) is > 0, args1 is assumed to be the plural value +// and plural translation is used. +func T(defaultValue string, args ...int) string { + lazyLoadTranslations() + if len(args) == 0 { + return gettext.PGettext("", defaultValue) + } + return fmt.Sprintf(gettext.PNGettext("", defaultValue, defaultValue+".plural", args[0]), + args[0]) +} + +// Errorf produces an error with a translated error string. +// Substitution is performed via the `T` function above, following +// the same rules. +func Errorf(defaultValue string, args ...int) error { + return errors.New(T(defaultValue, args...)) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/OWNERS b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/OWNERS new file mode 100644 index 000000000..610dc59f8 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: [] +approvers: + - sig-cli-maintainers +emeritus_approvers: + - brendandburns diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/README.md b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/README.md new file mode 100644 index 000000000..6318ffe62 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/README.md @@ -0,0 +1,82 @@ +# Translations README + +This is a basic sketch of the workflow needed to add translations: + +# Adding/Updating Translations + +## New languages +Create `staging/src/k8s.io/kubectl/pkg/util/i18n/translations/kubectl//LC_MESSAGES/k8s.po`. There's +no need to update `translations/test/...` which is only used for unit tests. + +There is an example [PR here](https://github.com/kubernetes/kubernetes/pull/40645) which adds support for French. + +Once you've added a new language, you'll need to register it in +`staging/src/k8s.io/kubectl/pkg/util/i18n/i18n.go` by adding it to the `knownTranslations` map. + +## Wrapping strings +There is a simple script in `staging/src/k8s.io/kubectl/pkg/util/i18n/translations/extract.py` that performs +simple regular expression based wrapping of strings. It can always +use improvements to understand additional strings. + +## Extracting strings +Once the strings are wrapped, you can extract strings from go files using +the `go-xgettext` command which can be installed with: + +```console +go get github.com/gosexy/gettext/go-xgettext +``` + +Once that's installed you can run `./hack/update-translations.sh`, which +will extract and sort any new strings. + +## Adding new translations +Edit the appropriate `k8s.po` file, `poedit` is a popular open source tool +for translations. You can load the `staging/src/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot` file +to find messages that might be missing. + +Once you are done with your `k8s.po` file, generate the corresponding `k8s.mo` +file. `poedit` does this automatically on save, but you can also run +`./hack/update-translations.sh` to perform the `po` to `mo` translation. + +We use the English translation as the `msgid`. + +## Regenerating the bindata file + +> Note: Regeneration of bindata is no more necessary for Kubernetes 1.22+ as +> the translations are now embedded into the binary at compile time. +> See: https://github.com/kubernetes/kubernetes/pull/99829 + +With the `mo` files up to date, you can now convert the generated files +into code using `go-bindata` command which can be installed with: + +```console +go get github.com/go-bindata/go-bindata/... +``` + +Run `./hack/generate-bindata.sh`, this will turn the translation files +into generated code which will in turn be packaged into the Kubernetes +binaries. + +## Extracting strings + +There is a script in `staging/src/k8s.io/kubectl/pkg/util/i18n/translations/extract.py` that knows how to do some +simple extraction. It needs a lot of work. + +# Using translations + +To use translations, you simply need to add: +```go +import pkg/i18n +... +// Get a translated string +translated := i18n.T("Your message in english here") + +// Get a translated plural string +translated := i18n.T("You had % items", items) + +// Translated error +return i18n.Error("Something bad happened") + +// Translated plural error +return i18n.Error("%d bad things happened") +``` diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py new file mode 100644 index 000000000..33f975128 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Extract strings from command files and externalize into translation files. +Expects to be run from the root directory of the repository. + +Usage: + extract.py pkg/kubectl/cmd/apply.go + +""" +import fileinput +import sys +import re + +class MatchHandler(object): + """ Simple holder for a regular expression and a function + to run if that regular expression matches a line. + The function should expect (re.match, file, linenumber) as parameters + """ + def __init__(self, regex, replace_fn): + self.regex = re.compile(regex) + self.replace_fn = replace_fn + +def short_replace(match, file, line_number): + """Replace a Short: ... cobra command description with an internationalization + """ + sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2))) + +SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace) + +def import_replace(match, file, line_number): + """Add an extra import for the i18n library. + Doesn't try to be smart and detect if it's already present, assumes a + gofmt round wil fix things. + """ + sys.stdout.write('{}\n"k8s.io/kubectl/pkg/util/i18n"\n'.format(match.group(1))) + +IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubectl/pkg/cmd/util")', import_replace) + + +def string_flag_replace(match, file, line_number): + """Replace a cmd.Flags().String("...", "", "...") with an internationalization + """ + sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2))) + +STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace) + + +def long_string_replace(match, file, line_number): + return '{}i18n.T({}){}'.format(match.group(1), match.group(2), match.group(3)) + +LONG_DESC_MATCH = MatchHandler('(LongDesc\()(`[^`]+`)([^\n]\n)', long_string_replace) + +EXAMPLE_MATCH = MatchHandler('(Examples\()(`[^`]+`)([^\n]\n)', long_string_replace) + +def replace(filename, matchers, multiline_matchers): + """Given a file and a set of matchers, run those matchers + across the file and replace it with the results. + """ + # Run all the matchers + line_number = 0 + for line in fileinput.input(filename, inplace=True): + line_number += 1 + matched = False + for matcher in matchers: + match = matcher.regex.match(line) + if match: + matcher.replace_fn(match, filename, line_number) + matched = True + break + if not matched: + sys.stdout.write(line) + sys.stdout.flush() + with open(filename, 'r') as datafile: + content = datafile.read() + for matcher in multiline_matchers: + match = matcher.regex.search(content) + while match: + rep = matcher.replace_fn(match, filename, 0) + # Escape back references in the replacement string + # (And escape for Python) + # (And escape for regex) + rep = re.sub('\\\\(\\d)', '\\\\\\\\\\1', rep) + content = matcher.regex.sub(rep, content, 1) + match = matcher.regex.search(content) + sys.stdout.write(content) + + # gofmt the file again + from subprocess import call + call(["goimports", "-w", filename]) + +replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH], [LONG_DESC_MATCH, EXAMPLE_MATCH]) diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/OWNERS b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/OWNERS new file mode 100644 index 000000000..56cbb7f7c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-cli-maintainers +reviewers: + - sig-cli-reviewers diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..ee64eb7a8d1128957e8e49473ef694f5e5daf259 GIT binary patch literal 17420 zcmcJWdyE}dea9yd9@$VJJj45h6l{a<^&>zW2d7zk*NO42y?E_7I7HFxy|cSFd+*H6 z%v^7FD_S0ZP@$?tRi7#$TB=G_tEv*}=leTn z=FYv|UAsYDdG@z6bI$La^LszW|M0roe=FkWNv^kZ-Tu-j`eoq1yo_J`{P-17v<>_x z;Pb%lE2HScz_;8HMa#g)fV%%Ouyb1!{Uh*s;Q2eFsLk`}=c4G{fFr;U051Wx&S!z| z1U?6R8RK_=k8}Swua2S&`0m$4(QQ2c4d8X|fAh6bR08k3D~i6w^G^aFeUUj}M_?#)ru0$u^$ z2mB21IPhygP)Gj^d>`*A0UdA+DE|Hmza9YoQMmsBQ0I!?6-D;~-U|FQ@FC!%z;6PDcgAs6;4$EjfWHHr z2Y%unf1Vv6Ta4ZVadpnqz*E3$K+)lMfuM@M4ivuc0d?NnSrn0ojskVQdEl3CLk57K z<=%0aZvy`qcmen{ldl5*5~%g>f(RRozY6>};0}kD-26FA6}}$>b>6#K43yC&AgH42 zz@GxY0K5kL{lif-1pX)Rb>LY;=yLA25vuFJe+B-S=UtG_a=!=@H2&9tnlGXJMZgol z?*so9DExnc&`Q3(#-;|q9|AdgwDdt_23P=30$&2NCEurjzW{s&sQrHhD0=~?L@@e8;KzY~1^hhl9S?eb{V9-t z(SPs@l14{A?D0PZ+~ofEfZFF>2oFbz=7GBZ1W=9Sfu96E1-uIUK2Ya9@i4pxJ_-B@@Y_Jq>v@>|H1M|5 zUXJ=e$NlGllEbe6#h?EIir)v%c>P@l>KvZ~z7hD>K%M8iz}tXt=DN(KXB;Vdm`n80 zp7rw>*URiLGu>w69rxn-BA0k}iAz6{d;Mq~%zkv2ZursuwZ|DQj%h#d=J(6FWFrtM z`#BpREHIn<2=F}DOSw2!{Uf^SjQWvm=tsP~z=hZwKdu4q;d&+4$GAkBk8-_+3syuN zf8s|xcvE=tHsI^IAbI^G8ondkeI&rQ0ACmW77$E~5kFR{P;FUjcRTyNkyz;!RzlU(vC zlG|wJjys&Y*DbGSn{JSld9PEr)hu^$zwgqln-n%+W3-ZV%DyYJp*@}1bDCT0T~AU! zNT{c0x(gTQ=AXJSyEw07~4v zmTt8luNAJBt|xh~OuBAm%c$1L1_Son++{Y@sU{}t%$X?jE2K3*LG%?&eixD^WtHzRW0Ku@5*}-l(Tu>B&+eLU%E6N zB*if9Bn@KIG`c>U# z5971~d!&Z!h>1nn<)s8x6bmP4yNfAQiBmT!64%O{*F$!|jLv$Tt|giquwv`VUTXHI zZA{A(c7uti1(JnfiD{X)B&@BMlf(0@pZJkQFJ0sBeAJ&BB5m)u;ucUe8?CLo1C>$N z7Pnk$)qPkiJaoWqGAJLVhgnOc5~ppt@WknsLsKxG!#MXl6Nj>ur;&MaM(*uQ&C6u7 z!A~)->jqn6o1@{?HadRNnElOxyCI%tYmGjrYUbQ4^DzhGI%&y)x)WO4U&+>g@^a#t~+W_N$3ut`&bRE_I1d8L{R zrx}hzDm%kdAN&@KnLp$u6;JZ}cXY@?%=PbTxBbX3vD} z?~mz-=y0UWjs(iLY>oXfvuAHPTeW{VwFjQ&9xSj#ZY_{78M&2Xuox4^dP=b8*<$A+ zw8PTjt!iC0#Cz;U6KmE?(~E@~NP^=ztl_O<3!xm``msF|E3I|A%FJF$8_jx=Y;}`< zZ(!kNIN~!S3^k4*8%lAdN&}R|B2RCplNinP=FgRR+}XgyVbWz3ZEkyS+*L4q>X}iT zmc0^fk9$&NE#u=L{NZ|>cb)A@sC-ORBz+4pB$jfvQp<83x3Dtm5)jA}qksu``=D*D z#09&mC*`~#Mp69lLz_B^y@eHmTD|xs6t^UUA*VekA_blw!t`Mvjc)ZuSMzLOPx1sW z8qA%XB2K^0U``$0NFv*ZqSw|4qV96 zpg;2OSz-zx-2!g?{>?7C@v3glbr0sk}0k)=SZW&oLUWEZVsO`ejL+q@@ zVEeQ7fKl_!`!QUeAk%-*VMzk)%NM~!ZPRM%;or5Z3_1aQO865 zRO74+fEl*f&1KjhDa9J@KHs`w_((BwBR?ocE5Q{vhu%`O9L-^aDIzA5$&7WJZ z%<^08yT=5dY40(CNMnB`m`jRI-a`(Na|sp7t>DD6_ad6>6@)uH@TyS+qe^kZXWGrp zg`tCljH=t%HcYbMoFJSsP2;HJ$tD8zV$GdY7M`u-s+*|9qs51qtvKWYvfTq-66q_z ziRLj8EL2TZm{uuPl1K9l?Hk*tc|HGZ(!nx`U?DMOA>CA}dp65AvcENRzcsRLZsxix^LvQCe?t~Wux z;?&?I)2N1Im}onOwp8)9DqFPOB_EK4$VY6dg7E3F5X&dWxYX-$e`MOkSz!y}Tadbt z`}=yKq1mhCr)a}!JJ7Z0A}Z$Ru)i?=sxQ5cnH&k}cLCg$hs$i@nj^2!*GWuk&{RhD|o2>J?} zk6#NL;I$;gm%JOBqR0x4Aye};i5gqYC`hqJh&0su>LG|#@HWh3QzI*A=q@LY!^}j> zTWOYV4UCZFW>(c|Nt2_?jPi#Tku|5G)S1c@?Z%j)QpsV4_~MtR$6FxRPgco>6@Dsi ztEI?NYek1B;@F#oIKELh4$&&&9^M7YYMX;z+8c}pl|f?pVL~+>^DCB7{l1RrRxQbr zdcV4l*CCWe-hL>ancDuLioq7{Fb6CX;%pefyMP+hJsa^b;as_V+M!pp*Uh0WmEaR z0mo`N(r#6Wk|GMTMTlm}a?&a^23dA$NE=q8@m@)UiiTL65U(i+vBp<)1p?KQe-pR$ zSdYSDIHE&~Rc((fLZE9V1Hn5E60+?gOGZigc=en)Od?VU zovp=Ut=Dax9<3Fv%h^dczc_ntVP@%K>k@=k^QbikrB1rzM~~g#I{HBC=y7-S*vb1o z^!}sAj~<;_Pzm^Q9;XHL%<_|NxlCw3q;9#hPMKlbeV7`D9uQ9H{^9kkRDjp+CJ!+= zT^nILT9=b}@ZzSPB;tK)ZhmI*+~WNBkjL6bXU-@_PT3qqx|4AH+46|B_8wHFJ}2fq zb>;He)`{^r9fHiNHLoZEF+J%{tn|v6r9L(7e(NmG6qQehc3Ye}{vd@$y*YI-b@!Zd z#|}L>^LXnKpYDnfOIe~g_gMQx`~LPZ-uVZuW$Qqm@q0Mw{3fVHT3z{C(!-Q4s&Bdh z5m$F!u+C}XF5I+ry4%z)IM$N2WQA>|hQrpWLVOE#JQj& zG*Qz^Dz3T~I^*+)j1UXUp0Cg`NeHph#u^hXRzoKQXDr-mVYRw$*+Py==vYQIqv>-MHcXdat`J zVzj^lL4tu>c#(Zqcnbf@p27kC9{0NnOfn>H+Kl-94DL%U8>@>!>0K7Qq7l<6)~;ymmw=3S|!!$yR5EWTlh z^qUVbcp_olsCQ0qXk8d5ix%q(0L>C*>iT<-(T3>=!q|3KlT;n%#9c#Gt8pp4o6_l6 zbU@A8;A)kJ3-c&l+j*g1(x%3X5ya?)5TuEUe2ELFrHPwKO6=`V&4}faniKgv3bY2p z{M2|vY!I5W15ZzX5b0T1z3mp!^bH)kt>p7v^=qp&tn)`8+ocaU>#@u9xK(zvO?Qu|Jbq5!9!O2szwLUL8IhK2f zzKTYr?p!LD!Jj1Ll1#O6=b-LN(oLw@z<6j~?28G@Nin~W;2W#?mVonQEBo|ujpaDW zbxj#z!PU{t*0CH&j+FFrLc^C`749N=#Riet_Q-5>Sjn?Y?YJo}NIEAN{xkG+cSSwT z<{KKcFS4mSwOF%L+X`z2>757-OnPt9*+oU5vl(XJ+9f)mvpZ{zT@ncrU(AW#Q~2^- zUFlaUwZ;ly@rV$iYLfTk(F!7=p+t(IR1arDX8GNhf7dLP}tRJ~%w*0SxICtb3+Xc{T1WrDc< zc674^s|vxmQ@j*Ya3Q!+tN=6AB_?5a3+i684&Tt3l`zykphKPd*!V1G68o^^Ngn>R zwT#(OIwHKT!YqXcZFhm*BHvUIz^u#4sFCbxL0GUEulG0fDJJAet7*+h{bp-7jNAXi z;yxDRp#6kjs_Py!%_d?xRK;1msE7i824rq^n_0273JeS}yBS`of*P-^K}il?Mp%oT z=gMvBjq5ZuE3;1vHdy`C9kgK1B0xz%qa2}VOL=5Pa(1P$;!S1q>wp$g6>Mf4VAAqq z6{JpzIB@5EgtL}}=Cs1d!y&|L;M;jYof56WWJRL|^)8FYeS&uwjP-$p-SP}XKzX|{ zzV=5gHL8`*Q@6kfXdCVfY7)BDt!3Y>rcC0y)mM`qd!Dm#%HlP=ZdkLD5t*_BaTX$G zma1xA%Zq4U+UQ`{zTw~ouNohFP??~I?N`&J0bfjvD6q)d*ZC4htGdJImMXFjYO-&S z){;${xJ>bM3B*5(^w=M z^D1EyL0U8mos>j{Jd3FF2rQ|IeK3U~O>^vMeRFVYeK((jrgkKIr4W;dJd$9_kebtG z!^DDdO5rPs$YE%SH&=w?+C{xsouILm<dh^sAko6kS0Nt?4I{^hE1n|Z zK4`NT$~VaC87wlMfzj}_l3k}H3g;zM`)MC0D>_Z5JVZWT22JAziRx39uB!%^M7}r> z1kXz%%#=|YZ?yI=bd&h_BFsTtK#ZRp4{Xib_l*5MEa6PHqO8yOaLvIcO97dkXkv#| zRV|GcFzD>qOx|Hfo@#@y;sC|@>sh|7=9dg~%Bka=sFq#XpbClE7sa{aWkOfIwPPcrcc4v4uTVYlLG~gtd4(q1>qsJE4-ck#bAFR8#WkQ6g`;Jz7~2 zM74x<&P~OFZJ)|scYQyRxsFGO9c` z^wzWY!f3!J6i5gq7CW;^PlH_YF}$X_&~Qv*Jw0~gSunQaHoO}(?xt%XkXR+F!Z_LjZ-bYdh--PyjHZ|_fdxxa^ zsnDn$M08m#`qr7cMbDpV(TkB&GHxSXXwqw$W}BY>*iucI4ZZo*o>Dvsvl22*n3l<< zRlX568e6Zvbw1f@UGv^|L;qi)H*j?HL+Ol$xQ1^-A3Eb|-D^C$t)1|&5{(psnF9xf z1+WCBk7cNj%Kh4nZL|`Js)OX9FVpt+c4{owd~;|Ge+3iLnP8Wr z@mOd{AB)DnIC;MA`nPb`7F=Lq{9bEap?we<|HA4EKca%Kae=Q6iS=Gb9Je%r=vV1H zJoE2s)lJ+HejCz@O=u^lxu_{3-$_EJ= z=V|yijJB-}UU*}+R}`dzf<#+MRu(ao^lojCJBwm~!V;C*JvWFDR>>)4suWQ%v#uFq zD<(FBXmA!QSzGKac~rWC5=fa(xb#RPF`Ma6Ai*DTqv*qj5d?#5GN6J1|Zx*PYXc({nJ@;^TE zoV-c;t&mFp=@N;rwRCn|jvm{+)V^au1?#U1nij^QY($!NcJ+C|1QuEPaaElI?P7jz zm|(3cZwaO>)eS>0`)>~_tOKurAxsKh4; Z^p~ufpjZpSH|qecVAF{}^<8=%{V$3K83_OY literal 0 HcmV?d00001 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po new file mode 100644 index 000000000..cd5d53c88 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po @@ -0,0 +1,2920 @@ +# German translation. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR steffenschmitz@hotmail.de, 2017. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: kubernetes\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-09-02 01:36+0200\n" +"Last-Translator: Steffen Schmitz \n" +"Language-Team: Steffen Schmitz \n" +"Language: de_DE\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Generator: Poedit 1.8.7.1\n" +"X-Poedit-SourceCharset: UTF-8\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Zeige Metriken für alle Nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Zeige Metriken für den gegebenen Node\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Erhalte die Dokumentation einer Resource und ihrer Felder\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Erhalte die Dokumentation eines speziellen Felds einer Resource\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Gebe Optionen aus, die an alle Kommandos vererbt werden\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Gebe die Client- und Server-Versionen des aktuellen Kontexts aus\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Gebe die unterstützten API Versionen aus\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Zeige Metriken für alle Pods im Namespace default\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Zeige Metriken für alle Pods im gegebenen namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Zeige Metriken für den gebenen Pod und seine Container\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Zeige Metriken für Pods mit dem Label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tKonvertiere Konfigurationsdateien zwischen API Versionen. Sowohl YAML-\n" +"\t\talsauch JSON-Formate werden akzeptiert.\n" +"\n" +"\t\tDer Befehlt akzeptiert Dateinamen, Ordner oder URL als Parameter und " +"konvertiert es ins Format\n" +"\t\tder mit --output-version gegebenen Version. Wenn die Zielversion nicht \n" +"\t\tangegeben wird oder ungültig ist, wird die neueste Version verwendet.\n" +"\n" +"\t\tDie Standardausgabe wird auf stdout im YAML-Format ausgegeben. Man kann " +"die Option -o verwenden,\n" +"\t\tum das Ausgabeziel festzulegen." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tErstelle einen Namespace mit dem gegebenen Namen." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tErstelle eine Role mit einer einzelnen Rule." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tErstelle einen ServiceAccount mit dem gegebenen Namen." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMarkiere Knoten als schedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMarkiere Knoten als unschedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSetze die aktuelle Annotation Last-Applied-Configuration auf den Inhalt " +"der Datei.\n" +"\t\tDas bedeutet, dass Last-Applied-Configuration aktualisiert wird, als ob " +"'kubectl apply -f ' ausgeführt wurde,\n" +"\t\tohne andere Teile des Objekts zu aktualisieren." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Erstelle einen neuen Namespace my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Erstelle einen neuen ServiceAccount my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tErstelle einen ExternalName-Service mit den gegebenen Namen.\n" +"\n" +"\tExternalName service referenziert eine externe DNS Adresse statt\n" +"\teines pods, was Anwendungsautoren erlaubt, einen Service zu " +"referenzieren,\n" +"\tder abseits der Platform, auf anderen Clustern oder lokal exisiert." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp hilft bei jedem Befehl in der Anwendung.\n" +"\tGib einfach kubectl help [path to command] für alle Details ein." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Erstelle einen neuen LoadBalancer-Service my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Schreibe den aktuellen Cluster-Zustand auf stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Schreibe aktuellen Cluster-Zustand in /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Schreibe alle Namespaces auf stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Schreibe eine Menge an Namespaces in /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Erstelle einen LoadBalancer-Service mit dem gegebenen Namen." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"Eine komma-separierte Menge von Quota-Scopes, die zu jedem Object passen " +"muss, dass von der Quota betroffen ist." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"Eine komma-separierte Menge von resource=quantity Paaren, die ein hartes " +"Limit definieren." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"Ein Label-Selektor, der für das Budget benutzt werden kann. Nur gleichheits-" +"basierte Auswahlkriterien werden unterstützt." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"Ein Label-Selektor, der für den Service benutzt werden kann. Nur gleichheits-" +"basierte Auswahlkriterien werden unterstützt. Wenn er leer ist (standard), " +"wird der Selektor vom ReplicationController oder ReplicaSet abgeleitet" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Zusätzliche, externe IP Adressen (die nicht von Kubernetes verwaltet " +"werden), die der Service akzeptieren soll. Wenn diese IP zu einem Knoten " +"gerouted wird, kann der Service über die IP angesprochen werden, zusätzlich " +"zu seiner generierten Service-IP." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Genehmige eine Certificate-Signing-Request" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Weise Deine eigene ClusterIP zu oder setze sie auf 'None' für einen " +"'headless'-Service (kein LoadBalancing)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Weise einem laufenden Container zu" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP, die dem Service zugewiesen werden soll. Freilassen, für " +"automatische Zuweisung oder auf 'None' setzen für einen headless-Service." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole, die das ClusterRoleBinding referenzieren soll" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole, die das RoleBinding referenzieren soll" + +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Konvertiere Config-Dateien zwischen verschiedenen API Versionen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Kopiere Dateien und Ordner aus/in Container(n)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Erstelle ein TLS-Secret" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Erstelle einen Namespace mit dem gegebenen Namen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Erstelle ein Secret für die Benutzung mit einer Docker-Registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Erstelle ein Secret mit dem angegebenen Sub-Befehl" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Erstelle einen ServiceAccount mit dem gegebenen Namen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Lösche das angegebene Cluster aus der kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Lösche den angegebenen Kontext aus der kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Lehne eine Certificate-Signing-Request ab" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Beschreibe einen oder mehrere Kontexte" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Zeige Cluster, die in der kubeconfig definiert sind" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" +"Zeige vereinte kubeconfig-Einstellungen oder die angegebene kubeconfig-Datei" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Zeige eine oder mehrere Resourcen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Leere Knoten, um eine Wartung vorzubereiten" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Bearbeite eine Resource auf dem Server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "E-Mail für Docker-Registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Führe einen Befehl im Container aus" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Leite einen oder mehrere lokale Ports an einen Pod weiter" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Hilfe für jeden Befehl" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Markiere Knoten als schedulable" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Markiere Knoten als unschedulable" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Markiere die gegebene Resource als pausiert" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Verändere Certificate-Resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Verändere kubeconfig Dateien" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name oder Nummer des Ports in dem Container, zu dem der Service Daten leiten " +"soll. Optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Zeige nur Logs nach einem bestimmten Datum (RFC3339) an. Zeigt standardmäßig " +"alle logs. Es kann entweder since-time oder since benutzt werden." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Zeige Shell-Completion-Code für die angegebene Shell (bash oder zsh)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Passwort für die Authentifizierung bei der Docker-Registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Pfad des Public-Key-Zertifikats im PEM-Format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Pfad zum Private-Key, der zum gegebenen Zertifikat passt." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Vorbedingung für Resource-Version. Verlangt, dass die aktuelle Resource-" +"Version diesen Wert erfüllt, um zu skalieren." + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Schreibt die Client- und Server-Versionsinformation" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Schreibt die Liste von Optionen, die alle Befehle erben" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Schreibt die Logs für einen Container in einem Pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Setze eine pausierte Resource fort" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role, die dieses RoleBinding referenzieren soll" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Starte ein bestimmtes Image auf dem Cluster" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Starte einen Proxy zum Kubernetes-API-Server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Setze bestimmte Features auf Objekten" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Setze den Selektor auf einer Resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Zeige Details zu einer bestimmten Resource oder Gruppe von Resourcen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Zeige den Status des Rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Synonym für --target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "Das Image, dass auf dem Container gestartet werden soll." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"Die Image-Pull-Policy für den Container. Wenn leer, wird der Wert nicht vom " +"Client gesetzt, sondern standardmäßig vom Server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"Die minimale Anzahl oder Prozentzahl von verfügbaren Pods, die das Budget " +"voraussetzt." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "Der Name des neu erstellten Objekts." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"Der Name des neu erstellten Objekts. Falls nicht angegeben, wird der Name " +"der Input-Resource verwendet." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"Der Name des zu verwendenden API-Generators. Es gibt zwei Generatoren: " +"'service/v1' und 'service/v2'. Der einzige Unterschied ist, dass der " +"Serviceport in v1 'default' heißt, während er in v2 unbenannt bleibt. " +"Standard ist 'service/v2'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "" +"Das Netzwerkprotokoll, für den zu erstellenden Service. Standard ist 'TCP'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"Der Port auf den der Service hören soll. Wird von der angebotenen Resource " +"kopiert, falls nicht angegeben" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "Der Typ des zu erstellenden Secrets" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Widerrufe ein vorheriges Rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Aktualisiere Resourcen requests/limits auf Objekten mit Pod-Templates" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Aktualisiere die Annotationen auf einer Resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Aktualisiere die Labels auf einer Resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Aktualisiere die Taints auf einem oder mehreren Knoten" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Username für Authentifizierung bei der Docker-Registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "Zeige rollout-Verlauf" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl kontrolliert den Kubernetes-Cluster-Manager" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Erstellt ein ClusterRoleBinding für user1, user2 und group1 mit " +#~ "der ClusterRole cluster-admin\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Erstellt ein RoleBinding für user1, user2 und group1 mit der " +#~ "ClusterRole admin\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Erstellt eine neue ConfigMap mit dem Namen my-config basierend " +#~ "auf dem Ordner bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Erstellt eine neue ConfigMap mit dem Namen my-config mit den " +#~ "angegebenen Keys statt des Dateinamens auf der Festplatte.\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Erstellt eine neue ConfigMap mit dem Namen my-config mit " +#~ "key1=config1 und key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Wenn keine .dockercfg Datei existiert, kann direkt ein dockercfg " +#~ "Secret erstellen mit:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Wende die Konfiguration in pod.json auf einen Pod an.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Wende die JSON-Daten von stdin auf einen Pod an.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Hinweis: --prune ist noch in Alpha\n" +#~ "\t\t# Wende die Konfiguration, mit dem Label app=nginx, im manifest.yaml " +#~ "an und lösche alle Resourcen, die nicht in der Datei sind oder nicht das " +#~ "Label app=nginx besitzen.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Wende die Konfiguration im manifest.yaml an und lösche alle " +#~ "ConfigMaps, die nicht in der Datei sind.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Auto-skaliere ein Deployment \"foo\", mit einer Anzahl an Pods " +#~ "zwischen 2 und 10, eine Ziel-CPU-Auslastung ist angegeben, sodass eine " +#~ "Standard-autoskalierungsregel verwendet wird:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto-skaliere einen Replication-Controller \"foo\", mit einer " +#~ "Anzahl an Pods zwischen 1 und 5, mit einer Ziel-CPU-Auslastung von 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Konvertiere 'pod.yaml' zur neuesten Version und schreibe auf " +#~ "stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Konvertiere den aktuellen Zustand der Resource, die in 'pod.yaml' " +#~ "angegeben ist, zur neuesten Version\n" +#~ "\t\t# und schreibe auf stdout im JSON-Format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Konvertiere alle Dateien im aktuellen Ordner zur neuesten Version " +#~ "und erstelle sie.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt eine ClusterRole \"pod-reader\", die es Nutzern erlaubt " +#~ "\"get\", \"watch\" und \"list\" auf den Pods auszuführen\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Erstellt eine ClusterRole \"pod-reader\" mit dem angegebenen " +#~ "ResourceName\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt eine Role \"pod-reader\", die es dem Nutzer erlaubt \"get" +#~ "\", \"watch\" und \"list\" auf den Pods auszuführen\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Erstellt eine Role \"pod-reader\" mit dem angegebenen ResourceName\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt eine neue ResourceQuota my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Erstellt eine neue ResourceQuota best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt ein Pod-Disruption-Budget my-pdb, dass alle Pods mit dem " +#~ "Label app=rails auswählt\n" +#~ "\t\t# und sicherstellt, dass mindestens einer von ihnen zu jedem " +#~ "Zeitpunkt verfügbar ist.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Erstellt ein Pod-Disruption-Budget my-pdb, dass alle Pods mit dem " +#~ "Label app=nginx auswählt\n" +#~ "\t\t# und sicherstellt, dass mindestens die Hälfte der gewählten Pods zu " +#~ "jedem Zeitpunkt verfügbar ist.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt einen Pod mit den Daten in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Erstellt einen Pod basierend auf den JSON-Daten von stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Verändert die Daten in docker-registry.yaml in JSON mit dem v1 API " +#~ "Format und erstellt eine Resource mit den veränderten Daten.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt einen Service für einen replizierten nginx, der auf Port " +#~ "80 hört und verbindet sich mit den Containern auf Port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service für einen Replication-Controller, der über " +#~ "type und name in \"nginx-controller.yaml\" identifiziert wird, auf Port " +#~ "80 hört und verbindet sich mit den Containern auf Port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service, mit dem Namen \"frontend\", für einen Pod " +#~ "valid-pod, der auf port 444 hört\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Erstellt einen zweiten Service basierend auf dem vorherigen " +#~ "Service, der den Container Port 8443 auf Port 443 mit dem Namen \"nginx-" +#~ "https\" anbietet\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service für eine Replicated-Streaming-Application " +#~ "auf Port 4100, die UDP-Traffic verarbeitet und 'video-stream' heißt.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service für einen replizierten nginx mit einem " +#~ "Replica-Set, dass auf Port 80 hört und verbindet sich mit den Containern " +#~ "auf Port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service für ein nginx Deployment, dass auf Port 80 " +#~ "hört und verbindet sich mit den Containern auf Port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Löscht einen Pod mit type und name aus pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Löscht einen Pod mit dem type und name aus den JSON-Daten von " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Löscht Pods und Services mit den Namen \"baz\" und \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Löscht Pods und Services mit dem Label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Löscht einen Pod mit minimaler Verzögerung\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Erzwingt das Löschen eines Pods auf einem toten Node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Löscht alle Pods\n" +#~ "\t\tkubectl delete pods --all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Beschreibt einen Knoten\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Beschreibt einen Pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Beschreibt einen Pod mit type und name aus \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Beschreibt alle Pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Beschreibt Pods mit dem Label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Beschreibt alle Pods, die vom ReplicationController 'frontend' " +#~ "verwaltet werden (rc-erstellte Pods\n" +#~ "\t\t# bekommen den Namen des rc als Prefix im Podnamen).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Leere den Knoten \"foo\", selbst wenn er Pods enthält, die nicht " +#~ "von einem ReplicationController, ReplicaSet, Job, DaemonSet oder " +#~ "StatefulSet verwaltet werden.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# Wie zuvor, aber es wird abgebrochen, wenn er Pods enthält, die " +#~ "nicht von einem ReplicationController, ReplicaSet, Job, DaemonSet oder " +#~ "StatefulSet verwaltet werden und mit einer Schonfrist von 15 Minuten.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Bearbeite den Service 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Benutze einen anderen Editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Bearbeite den Job 'myjob' in JSON mit dem v1 API Format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Bearbeite das Deployment 'mydeployment' in YAML und speichere die " +#~ "veränderte Konfiguration in ihrer Annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom Aufruf von 'date' auf dem Pod 123456-7890, " +#~ "mit dem ersten Container als Standard\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom Aufruf von 'date' im Ruby-Container aus dem " +#~ "Pod 123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Wechsle in den Terminal-Modus und sende stdin zu 'bash' im Ruby-" +#~ "Container aus dem Pod 123456-7890\n" +#~ "\t\t# und sende stdout/stderr von 'bash' zurück zum Client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom laufenden Pod 123456-7890, mit dem ersten " +#~ "Container als Standard\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom Ruby-Container aus dem Pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Wechsle in den Terminal-Modus und sende stdin zu 'bash' im Ruby-" +#~ "Container aus dem Pod 123456-7890\n" +#~ "\t\t# und sende stdout/stderr von 'bash' zurück zum Client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom ersten Pod eines ReplicaSets nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Installiere bash completion auf einem Mac mit homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Lade den kubectl-Completion-Code für bash in der aktuellen Shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Schreibe den Bash-Completion-Code in eine Datei und source sie im ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Lade den kubectl-Completion-Code für zsh[1] in die aktuelle Shell\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Liste alle Pods im ps-Format auf.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# Liste alle Pods im ps-Format mit zusätzlichen Informationen (wie " +#~ "dem Knotennamen) auf.\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# Liste alle einzelnen ReplicationController mit dem angegebenen " +#~ "Namen im ps-Format auf.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# Liste einen einzelnen Pod im JSON-Format auf.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Liste einen Pod mit Typ und Namen aus \"pod.yaml\" im JSON-Format " +#~ "auf.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Gib nur den phase-Wert des angegebenen Pods zurück.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# Liste alle ReplicationController und Services im ps-Format auf.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# Liste eine oder mehrere Resourcen über ihren Typ und Namen auf.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Liste alle Resourcen mit verschiedenen Typen auf.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Hört lokal auf Port 5000 und 6000 und leitet Daten zum/vom Port " +#~ "5000 und 6000 weiter an den Pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Hört lokal auf Port 8888 und leitet an Port 5000 des Pods weiter\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Hört auf einen zufälligen lokalen Port und leitet an Port 5000 des " +#~ "Pods weiter\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Hört auf einen zufälligen lokalen Port und leitet an Port 5000 des " +#~ "Pods weiter\n" +#~ "\t\tkubectl port-forward mypod 0:5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Markiere Knoten \"foo\" als schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Markiere Knoten \"foo\" als unschedulable.\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aktualisiere einen Knoten teilweise mit einem Strategic-Merge-" +#~ "Patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Aktualisiere einen Knoten, mit type und name aus \"node.json\", mit " +#~ "einem Strategic-Merge-Patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Aktualisiere das Image eines Containers; spec.containers[*].name " +#~ "ist erforderlich, da es der Merge-Key ist\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Aktualisiere das Image eines Containers mit einem JSON-Patch mit " +#~ "Positional-Arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Gebe die Adresse des Masters und des Cluster-Services aus\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ersetze einen Pod mit den Daten aus pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Ersetze einen Pod mit den JSON-Daten von stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Setze die Pod-Image-Version (tag) eines einzelnen Containers zu v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Erzwinge das Ersetzen, Löschen und Neu-Erstellen der Resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs des Pods nginx mit nur einem Container " +#~ "zurück\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs für die Pods mit dem Label app=nginx zurück\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs des zuvor gelöschten Ruby-Containers des Pods " +#~ "web-1 zurück\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Starte das Streaming der Logs vom Ruby-Container im Pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Zeige die letzten 20 Zeilen der Ausgabe des Pods nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Zeige alle Logs der letzten Stunde des Pods nginx an\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs des ersten Containers des Jobs hello zurück\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs des Containers nginx-1 eines Deployments " +#~ "nginx zurück\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Starte einen Proxy zum Kubernetes-Apiserver auf Port 8011 und sende " +#~ "statische Inhalte von ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Starte einen Proxy zum Kubernetes-Apiserver auf einem zufälligen " +#~ "lokalen Port.\n" +#~ "\t\t# Der gewählte Port für den Server wird im stdout zurückgegeben.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Starte einen Proxy zum Kubernetes-Apiserver und ändere das API-" +#~ "Prefix zu k8s-api\n" +#~ "\t\t# Damit ist die Pods-API bspw. unter localhost:8001/k8s-api/v1/pods/ " +#~ "erreichbar\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Skaliere ein ReplicaSet 'foo' auf 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Skaliere eine Resource mit type und name aus \"foo.yaml\" auf 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# Wenn die aktuelle Größe des Deployments mysql 2 ist, skaliere mysql " +#~ "auf 3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Skaliere mehrere MultiplicationController.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Skaliere den Job cron auf 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Setze die Last-Applied-Configuration einer Resource auf den Inhalt " +#~ "einer Datei.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Führe Set-Last-Applied auf jeder Konfigurationsdatei in einem " +#~ "Ordner aus.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Setze die Last-Applied-Configuration einer Resource auf den Inhalt " +#~ "einer Datei; erstellt die Annotation, wenn sie noch nicht existiert.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Stoppe foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stoppe Pods und Services mit dem Label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Stoppe den in service.json definierten Service\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Stoppe alle Resourcen im Ordner path/to/resources\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute π to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute π to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Starte eine einzelne Instanz von nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Starte eine einzelne Instanz von hazelcast und öffne Port 5701 auf " +#~ "dem Container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Starte eine einzelne Instanz von hazelcast und setze die Umgebungs-" +#~ "variablen \"DNS_DOMAIN=cluster\" und \"POD_NAMESPACE=default\" im " +#~ "Container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Starte eine replizierte Instanz von nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Testlauf. Zeige die zugehörigen API Objekte ohne sie zu erstellen.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Starte eine einzelne Instanz von nginx, aber überlade die Spec des " +#~ "Deployments mit einer Teilmenge von JSON-Werten.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Starte einen busybox Pod und lass ihn im Vordergrund laufen; starte " +#~ "ihn nicht neu, wenn er existiert.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Starte einen nginx-Container mit dem Standardkommando, aber " +#~ "übergebe die Parameter (arg1 .. argN) an das Kommando.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Starte den nginx-Container mit einem anderen Kommando und " +#~ "Parametern.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Starte den perl-Container, um π auf 2000 Stellen zu berechnen und " +#~ "gib es aus.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Starte den cron-Job, um π auf 2000 Stellen zu berechnen und gib sie " +#~ "alle 5 Minuten aus.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aktualisiere Knoten 'foo' mit einem Taint mit dem Key 'dedicated', " +#~ "dem Value 'special-user' und dem Effect 'NoSchedule'.\n" +#~ "\t\t# Wenn ein Taint mit dem Key und Effect schon existiert, wird sein " +#~ "Value mit den gegebenen Werten ersetzt.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Entferne vom Knoten 'foo' den Taint mit dem Key 'dedicated' und dem " +#~ "Effect 'NoSchedule', wenn er existiert.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Entferne vom Knoten 'foo' alle Tains mit dem Key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod 'foo' mit dem Label 'unhealthy' und dem Value " +#~ "'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod 'foo' mit dem Label 'status' und dem Value " +#~ "'unhealthy' und überschreibe alle bisherigen Values.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aktualisiere alle Pods im Namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod mit type und name aus \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod 'foo', wenn die Resource sich nicht von " +#~ "version 1 unterscheidet.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod 'foo', indem das Label 'bar' gelöscht wird, " +#~ "wenn es existiert.\n" +#~ "\t\t# Benötigt kein --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aktualisiere die Pods in frontend-v1 mit den neuen Replication-" +#~ "Controller Daten in frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Aktualisiere die Pods in frontend-v1 mit den JSON-Daten von stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Aktualisiere die Pods von frontend-v1 auf frontend-v2, indem das " +#~ "Image geändert wird und\n" +#~ "\t\t# der Name des ReplicationControllers.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Aktualisiere die Pods in frontend, indem das Image geändert, aber " +#~ "der alte Name beibehalten wird.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Breche ein laufendes Rollout (von frontend-v1 zu frontend-v2) ab " +#~ "und mache es rückgängig.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Zeige die Annotation Last-Applied-Configuration mit type/name in " +#~ "YAML an.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# Zeige die Annotation Last-applied-configuration mit der Datei in " +#~ "JSON an\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\tWende eine Konfiguration auf eine Resource mit Dateinamen oder stdin " +#~ "an.\n" +#~ "\t\tDie Resource wird erstellt, wenn sie noch nicht existiert.\n" +#~ "\t\tUm 'apply' zu benutzen, muss die Resource initital mit 'apply' oder " +#~ "'create --save-config' erstellt werden.\n" +#~ "\n" +#~ "\t\tJSON- und YAML-Formate werden akzeptiert.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: Die --prune Funktion ist noch nicht fertig. Benutze " +#~ "sie nicht, wenn der aktuelle Zustand nicht bekannt ist. Siehe https://" +#~ "issues.k8s.io/34274." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle eine ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein ClusterRoleBinding für eine bestimmte ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein RoleBinding für eine bestimmte ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein TLS-Secret vom gegebenen Public/Private-Schlüsselpaar.\n" +#~ "\n" +#~ "\t\tDas Public/Private-Schlüsselpaar muss vorab bestehen. Das Public-Key-" +#~ "Zertifikat muss im PEM-Format sein und zum gegebenen Private-Key passen." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle eine ConfigMap basierend auf einer Datei, einem Order oder " +#~ "einem gegebenen Wert.\n" +#~ "\n" +#~ "\t\tEine einzelne ConfigMap kann eins oder mehr Key/Value-Paare " +#~ "beinhalten.\n" +#~ "\n" +#~ "\t\tWenn man eine ConfigMap von einer Datei erstellt, wird der Key " +#~ "standardmäßig der Name der Datei, und der Wert wird\n" +#~ "\t\tstandardmäßig der Dateiinhalt. Wenn der Dateiname ein ungültiger Key " +#~ "ist, kann ein anderer Key angegeben werden.\n" +#~ "\n" +#~ "\t\tWenn man eine ConfigMap von einem Ordner erstellt, wird jede Datei, " +#~ "deren Name ein gültiger Key ist\n" +#~ "\t\tin die ConfigMap aufgenommen. Jegliche Einträge im Ordner, die keine " +#~ "regulären Dateien sind, werden ignoriert (z.B. SubDirectories, \n" +#~ "\t\tSymLinks, Devices, Pipes, usw)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein Secret für die Benutzung mit Docker-Registries.\n" +#~ "\n" +#~ "\t\tDockercfg Secrets werden für die Authentifizierung bei Docker-" +#~ "Registries benutzt.\n" +#~ "\n" +#~ "\t\tWenn die Docker-command -line zum pushen von Images benutzt wird, " +#~ "kann man sich bei einer gegebenen Registry authentifizieren mit\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " Dies produziert eine ~/.dockercfg Datei, die für folgende 'docker " +#~ "push' und 'docker pull' Befehle genutzt wird,\n" +#~ "\t\tum sich an der Registry zu authentifizieren. Die E-Mail-Adresse ist " +#~ "optional.\n" +#~ "\n" +#~ "\t\tBei der Erstellung von Applikationen, kann eine Docker-Registry eine " +#~ "Authentifizierung verlangen. Damit\n" +#~ "\t\tdeine Knoten in deinem Namen Images herunterladen können, benötigen " +#~ "sie die Credentials. Man kann diese Information bereitstellen\n" +#~ "\t\tindem man ein dockercfg secret erstellt und zu seinem ServiceAccount " +#~ "hinzufügt." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein Pod-Disruption-Budget mit dem gegebenen name, selector " +#~ "und der gewünschten Mindestanzahl verfügbarer Pods" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle eine Resource mit Dateinamen oder stdin.\n" +#~ "\n" +#~ "\t\tJSON- und YAML-Formate werden akzeptiert." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle eine ResourceQuota mit dem gegebenen name, hard limits und " +#~ "optional scopes" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein Secret basierend auf einer Datei, einem Ordner oder " +#~ "einem gegebenen Wert.\n" +#~ "\n" +#~ "\t\tEin einzelnes Secret kann eins oder mehr Key/Value-Paare beinhalten.\n" +#~ "\n" +#~ "\t\tWenn man ein Secret von einer Datei erstellt, wird der Key " +#~ "standardmäßig der Name der Datei, und der Wert wird\n" +#~ "\t\tstandardmäßig der Dateiinhalt. Wenn der Dateiname ein ungültiger Key " +#~ "ist, kann ein anderer Key angegeben werden.\n" +#~ "\n" +#~ "\t\tWenn man ein Secret von einem Ordner erstellt, wird jede Datei, deren " +#~ "Name ein gültiger Key ist\n" +#~ "\t\tin das Secret aufgenommen. Jegliche Einträge im Ordner, die keine " +#~ "regulären Dateien sind, werden ignoriert (z.B. SubDirectories, \n" +#~ "\t\tSymLinks, Devices, Pipes, usw)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle und starte ein bestimmtes Image, möglicherweise repliziert.\n" +#~ "\n" +#~ "\t\tErstellt ein Deployment oder Job, um den/die erstellten Container zu " +#~ "verwalten." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstellt einen AutoScaler der die Anzahl der Pods, die im Kubernetes-" +#~ "Cluster laufen, automatisch wählt und anpasst.\n" +#~ "\n" +#~ "\t\tSucht ein Deployment, ReplicaSet oder ReplicationController mit Namen " +#~ "name und erstellt einen AutoScaler, der die Resource als Referenz nimmt.\n" +#~ "\t\tEin AutoScaler kann die Anzahl der im System deployten Pods nach " +#~ "Bedarf erhöhen oder verringern." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tLöscht die Resourcen mit Dateinamen, stdin, resources- und names- " +#~ "oder mit resources- und label-Selektor.\n" +#~ "\n" +#~ "\t\tJSON- und YAML-Formate werden akzeptiert. Nur einer der Parameter " +#~ "darf verwendet werden: Dateiname,\n" +#~ "\t\tresources- und names-, oder resources- und label-Selektor.\n" +#~ "\n" +#~ "\t\tManche Resourcen, zum Beispiel Pods, unterstützen graziöses Löschen. " +#~ "Sie definieren einen Standardzeitraum\n" +#~ "\t\tbevor das Löschen erzwungen wird (grace-period), aber dieser Wert " +#~ "kann überschrieben werden mit\n" +#~ "\t\tder --grace-period Option, oder mit --now, das die grace-period auf 1 " +#~ "setzt. Da diese Resourcen\n" +#~ "\t\thäufig Einheiten im Cluster sind, kann das Löschen nicht immer direkt " +#~ "bestätigt werden. Wenn der Knoten\n" +#~ "\t\tauf dem der Pod läuft nicht verfügbar ist, oder den API Server nicht " +#~ "erreichen kann, kann das Löschen bedeutend länger dauern\n" +#~ "\t\tals die grace-period. Um das Löschen zu erzwingen, muss eine grace-" +#~ "period von 0 übergeben werden\n" +#~ "\t\tund die --force Option gesetzt sein.\n" +#~ "\n" +#~ "\t\tWICHTIG: Ein erzwungenes Löschen wartet nicht auf die Bestätigung, " +#~ "dass der Prozess\n" +#~ "\t\tbeendet wurde, was den Prozess am Leben erhalten kann, bis der Knoten " +#~ "die Löschung erkennt\n" +#~ "\t\tund das graziöse Löschen beendet. Wenn Prozesse Shared-Storage oder " +#~ "eine Remote-API verwenden und\n" +#~ "\t\tden Namen des Pods brauchen, um sich selbst zu identifizieren, kann " +#~ "das erzwungene Löschen dazu führen, dass\n" +#~ "\t\tmehrere Prozesse auf der gleichen Maschine die gleiche Identität " +#~ "verwenden, was zu\n" +#~ "\t\tDatenkorruption oder Inkonsistenz führen kann. Erzwinge nur ein " +#~ "Löschen, wenn sichergestellt ist, dass der Pod\n" +#~ "\t\tgelöscht ist, oder wenn die Anwendung mehrere gleichzeitig laufende " +#~ "Kopien verarbeiten kann.\n" +#~ "\t\tAußerdem kann es passieren, dass der Scheduler neue Pods auf dem " +#~ "Knoten plaziert, bevor der Knoten\n" +#~ "\t\tdie Resourcen freigegeben hat, sodass diese Pods direkt entfernt " +#~ "werden.\n" +#~ "\n" +#~ "\t\tBerücksichtige außerdem, dass der Delete-Befehl KEINE versionen " +#~ "prüft, sodass, wenn jemand\n" +#~ "\t\tein Update einer Resource gleichzeitig mit Deinem delete anstößt, " +#~ "dessen Update\n" +#~ "\t\tmit dem Rest der Resource entfernt wird." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tVeraltet: Fahre eine Resource mit Namen oder Dateinamen graziös " +#~ "heruter.\n" +#~ "\n" +#~ "\t\tDer Stop-Befehl ist veraltet und alle Funktioneren werden mit dem " +#~ "Delete-Befehl abgedeckt.\n" +#~ "\t\tSiehe 'kubectl delete --help' für mehr Details.\n" +#~ "\n" +#~ "\t\tVersucht eine Resource, die graziöses Löschen unterstützt, " +#~ "herunterzufahren und zu löschen.\n" +#~ "\t\tWenn die Resource skaliert werden kann, wird sie vor dem Löschen auf " +#~ "0 skaliert." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\tZeigt die Resourcennutzung (CPU/Memory/Storage) von Knoten.\n" +#~ "\n" +#~ "\t\tDer top-node-Befehl erlaubt es, die Resourcennutzung von Knoten zu " +#~ "betrachten." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\tZeigt die Resourcennutzung (CPU/Memory/Storage) von Pods.\n" +#~ "\n" +#~ "\t\tDer 'top pod'-Befehl erlaubt es, die Resourcennutzung von Pods zu " +#~ "betrachten.\n" +#~ "\n" +#~ "\t\tAufgrund der Metrik-Pipeline-Verzögerung, können sie für ein paar " +#~ "Minuten nicht verfügbar sein,\n" +#~ "\t\tnach der Pod-Erstellung." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\tZeige Resourcennutzung (CPU/Memory/Storage).\n" +#~ "\n" +#~ "\t\tDer top-Befehl erlaubt es, die Resourcennutzung von Knoten oder Pods " +#~ "zu betrachten.\n" +#~ "\n" +#~ "\t\tDieser Befehl benötigt eine korrekt konfigurierte und funktionierende " +#~ "Heapster-Installation auf dem Server. " + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tLeere Knoten, um eine Wartung vorzubereiten.\n" +#~ "\n" +#~ "\t\tDer gegebene Knoten wird als unschedulable markiert, um die Zuordnung " +#~ "von neuen Pods zu verhindern.\n" +#~ "\t\t'drain' entfernt den Pod, falls der API-Server die Entfernung " +#~ "unterstützt\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Wenn nicht, wird ein " +#~ "normales DELETE verwendet,\n" +#~ "\t\tum die Pods zu löschen.\n" +#~ "\t\t'drain' entfernt oder löscht alle Pods mit der Ausnahme von Mirror-" +#~ "Pods (welche vom API Server nicht gelöscht werden können)\n" +#~ "\t\tWenn DaemonSet-verwaltete Pods existieren, wird 'drain' nicht " +#~ "fortgesetzt\n" +#~ "\t\tohne die --ignore-daemonsets Option, und es wird in keinem Fall\n" +#~ "\t\tDaemonSet-verwaltete Pods löschen, weil diese Pods direkt ersetzt " +#~ "würden durch den\n" +#~ "\t\tDaemonSet-Controller, der unschedulable Markierungen ignoriert. Wenn " +#~ "es irgendwelche\n" +#~ "\t\tPods gibt, die weder Mirror-Pods sind, noch von einem " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet oder Job verwaltet werden, wird " +#~ "drain keine Pods löschen, außer die\n" +#~ "\t\t--force Option ist gesetzt. --force lässt das Löschen selbst zu, " +#~ "wenn die verwaltende Resource von einem\n" +#~ "\t\toder mehreren Pods fehlt.\n" +#~ "\n" +#~ "\t\t'drain' wartet auf eine graziöse Löschung. Man sollte auf der " +#~ "Maschine nichts tun, während\n" +#~ "\t\tder Befehl läuft.\n" +#~ "\n" +#~ "\t\tWenn der Knoten wieder bereit ist die Arbeit aufzunehmen, benutze " +#~ "kubectl uncordon,\n" +#~ "\t\twas den Knoten als schedulable markiert.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\tBearbeite eine Resource mit dem Standardeditor.\n" +#~ "\n" +#~ "\t\tDer edit-Befehl erlaubt es jede API Resource direkt zu bearbeiten, " +#~ "wenn sie mit den\n" +#~ "\t\tCommand-Line-Tools erreichbar ist. Er öffnet den Editor, der in der " +#~ "KUBE_EDITOR oder EDITOR\n" +#~ "\t\tUmgebunsvariable festgelegt ist, oder 'vi' auf Linux und 'notepad' " +#~ "auf Windows.\n" +#~ "\t\tEs ist möglich mehrere Objekte zu bearbeiten, aber die Änderungen " +#~ "werden nacheinander angewendet. Der Befehl\n" +#~ "\t\takzeptiert Dateinamen und Command-Line-Parameter, aber die " +#~ "verwendeten Dateien müssen\n" +#~ "\t\tvorab gespeicherte Versionen von Resourcen sein.\n" +#~ "\n" +#~ "\t\tDie Bearbeitung verwendet die API Version, die genutzt wurde, um die " +#~ "Resource zu lesen.\n" +#~ "\t\tUm eine spezifische API Version zu verwenden, muss die vollständige " +#~ "Resource, Version und Group angegeben werden.\n" +#~ "\n" +#~ "\t\tDas Standardformat ist YAML. Um mit JSON zu arbeiten, setze \"-o json" +#~ "\".\n" +#~ "\n" +#~ "\t\tDie Option --windows-line-endings kann benutzt werden, um Windows " +#~ "Zeilen-umbrüche zu verwenden,\n" +#~ "\t\tansonsten wird der Standard des Betriebssystems verwendet.\n" +#~ "\n" +#~ "\t\tFalls beim Update ein Fehler auftritt, wird eine temporäre Datei auf " +#~ "der Festplatte angelegt,\n" +#~ "\t\tdie die nicht verarbeiteten Änderungen enthält. Der häufigste Fehler " +#~ "beim Bearbeiten einer Resource\n" +#~ "\t\tist ein anderer Editor, der die Resource auf dem Server ändert. Wenn " +#~ "das auftritt, muss man\n" +#~ "\t\tseine Änderungen auf die neue Version anwenden oder seine temporäre\n" +#~ "\t\tgespeicherte Kopie mit der neuesten Resourcenversion aktualisieren." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tGibt den Shell-Completion-Code für die angegebene Shell aus (bash " +#~ "oder zsh).\n" +#~ "\t\tDer Shell-Code muss für eine interaktive Vervollständigung von " +#~ "kubectl \n" +#~ "\t\tausgewertet werden. Das ist möglich, indem man\n" +#~ "\t\tdie .bash_profile Datei sourcet.\n" +#~ "\n" +#~ "\t\tHinweis: Dies setzt das Bash-Completion-Framework voraus, das auf dem " +#~ "Mac nicht standardmäßig installiert ist. \n" +#~ "\t\tEs kann mit homebrew installiert werden:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tSobald es installiert ist, muss bash_completion ausgewertet werden. " +#~ "Dies wird erreicht, indem man\n" +#~ "\t\tdie folgende Zeile zur .bash_profile-Datei hinzufügt\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tHinweis für zsh Nutzer: [1] zsh completions werden nur in Versionen " +#~ "von zsh >= 5.2 unterstützt" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tFühre ein Rolling-Update des gegebenen ReplicationControllers aus.\n" +#~ "\n" +#~ "\t\tErsetzt den gegebenen ReplicationController mit einem neuen " +#~ "Replication-Controller, indem die neue PodTampleta Pod für Pod\n" +#~ "\t\tangewendet wird. Die new-controller.json muss den gleichen Namespace " +#~ "wie\n" +#~ "\t\tder aktuelle ReplicationController besitzen und mindestens ein " +#~ "gemeinsames Label im ReplicaSelector überschreiben.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tErsetze eine Resource mit Dateinamen oder stdin.\n" +#~ "\n" +#~ "\t\tJSON- and YAML-Formate werden akzeptiert. Wenn eine existierende " +#~ "Resource ersetzt wird,\n" +#~ "\t\tmuss die vollständige spSpecec mitgegeben werden. Diese kann hiermit " +#~ "ausgelesen werden\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tBitte konsultiere https://htmlpreview.github.io/?https://github.com/" +#~ "kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html um " +#~ "zu erfahren, ob ein Feld verändert werden darf." + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tSetze eine neue Größe für ein Deployment, ReplicaSet, Replication-" +#~ "Contoller oder Job.\n" +#~ "\n" +#~ "\t\tScale erlaubt es Nutzern eine oder mehr Voraussetzungen für die " +#~ "Aktion festzulegen.\n" +#~ "\n" +#~ "\t\tWenn --current-replicas oder --resource-version gegeben ist, wird es " +#~ "validiert, bevor\n" +#~ "\t\tscale versucht wird, und es wird garantiert, dass die Voraussetzungen " +#~ "erfüllt sind, wenn\n" +#~ "\t\tscale zum Server geschickt wird." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tProxy alle Teile der Kubernetes-API und sonst nichts:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tProxy nur bestimmte Teile der Kubernetes-API und einige statische " +#~ "Dateien:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tDer Befehl oben lässt dich 'curl localhost:8001/api/v1/pods' " +#~ "aufrufen.\n" +#~ "\n" +#~ "\t\tProxy alle Teile der Kubernetes-API auf einem anderen root:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tDer Befehl oben lässt dich 'curl localhost:8001/custom/api/v1/pods' " +#~ "aufrufen" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tAktualisiere Felder einer Resource mit einem Strategic-Merge-Patch\n" +#~ "\n" +#~ "\t\tJSON- und YAML-Formate werden akzeptiert.\n" +#~ "\n" +#~ "\t\tBitte konsultiere https://htmlpreview.github.io/?https://github.com/" +#~ "kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html um " +#~ "zu erfahren, ob ein Feld mutable ist." + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # Erstelle ein neues TLS Secret tl-secret mit dem gegebenen " +#~ "Schlüsselpaar:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Erstelle ein neues Secret my-secret mit einem Key für jede Datei im " +#~ "Ordner bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Erstelle ein neues Secret my-secret mit den gegebenen Keys statt " +#~ "der Namen auf der Festplatte\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Erstelle ein neues Scret my-secret mit key1=supersecret und " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Erstelle einen neuen ExternalName-Service my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # Erstelle einen neuen ClusterIP-Service my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Erstelle einen neuen ClusterIP-Service my-cs (im headless-Modus)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # Erstelle ein neues Deployment my-dep, dass das busybox-Image " +#~ "nutzt.\n" +#~ " kubectl create deployment my-dep --image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # Erstelle einen neuen NodePort-Service my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Erstelle einen ClusterIP-Service mit dem gegebenen Namen." + +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Erstelle ein Deployment mit dem gegebenen Namen." + +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Erstelle einen NodePort-Service mit dem gegebenen Namen." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Zeigt Adressen des Master und von Services mit Label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " Für das weitere Debugging und die Diagnose von Clusterproblemen nutze " +#~ "'kubectl cluster-info dump'." + +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "Ein Schedule im Cron Format, dass der Job nutzen soll." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "" +#~ "Wende eine Konfiguration auf eine Resource über den Dateinamen oder stdin " +#~ "an" + +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "Auto-skaliere ein Deployment, ReplicaSet oder ReplicationController" + +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Name des Containers dessen Image aktualisiert wird. Nur relevant, wenn --" +#~ "image angegeben ist, sonst ignoriert. Verpflichtend, wenn --image auf " +#~ "einem Multi-Container-Pod verwendet wird" + +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "Erstelle ein ClusterRoleBinding für eine bestimmte ClusterRole" + +#~ msgid "Create a LoadBalancer service." +#~ msgstr "Erstelle einen LoadBalancer-Service." + +#~ msgid "Create a NodePort service." +#~ msgstr "Erstelle einen NodePort-Service." + +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "Erstelle ein RoleBinding für eine bestimmte Role oder ClusterRole" + +#~ msgid "Create a clusterIP service." +#~ msgstr "Erstelle einen ClusterIP-Service" + +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "" +#~ "Erstelle eine ConfigMap von einer Datei, einem Ordner oder einem festen " +#~ "Wert" + +#~ msgid "Create a deployment with the specified name." +#~ msgstr "Erstelle ein Deployment mit dem gegebenen Namen." + +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "Erstelle ein Pod-Disruption-Budget mit dem gegebenen Namen." + +#~ msgid "Create a quota with the specified name." +#~ msgstr "Erstelle eine Quota mit dem gegebenen Namen." + +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "Erstelle eine Resource von einer Datei oder stdin" + +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "" +#~ "Erstelle ein Secret von einer lokalen Datei, einem Ordner oder einem " +#~ "festen Wert" + +#~ msgid "Create a service using specified subcommand." +#~ msgstr "Erstelle einen Servuce mit dem angegebenen Sub-Befehl" + +#~ msgid "Create an ExternalName service." +#~ msgstr "Erstelle einen ExternalName-Service." + +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" +#~ msgstr "" +#~ "Lösche Resourcen von einer Datei, stdin, resources- und names- oder mit " +#~ "resources- und label-Selektor" + +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "" +#~ "Veraltet: Graziöses herunterfahren einer Resource über den Namen oder " +#~ "Dateinamen" + +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "Zeige Resourcennutzung (CPU/Memory) von Knoten" + +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "Zeige Resourcennutzung (CPU/Memory) von Pods" + +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "Zeige Resourcennutzung (CPU/Memory)." + +#~ msgid "Display cluster info" +#~ msgstr "Zeige Cluster-Info" + +#~ msgid "Displays the current-context" +#~ msgstr "Zeige den aktuellen Kontext" + +#~ msgid "Documentation of resources" +#~ msgstr "Dokumentation einer Resource" + +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "Zeige viele relevante Informationen für Debugging und Diagnose" + +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Explizite Vorgabe, wann Container-Images gepullt werden. Verpflichtend, " +#~ "wenn --image ist gleich dem aktuellen Image ist - sonst ignoriert." + +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP, die dem Load-Balancer zugewiesen wird. Falls leer, wird eine " +#~ "temporäre IP erstellt und verwendet (Cloud-Provider spezifisch)" + +#~ msgid "Manage a deployment rollout" +#~ msgstr "Verwalte ein Deployment-Rollout" + +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "Führe ein Rolling-Update des gegebenen ReplicationControllers aus" + +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "Ersetze eine Resource von einem Dateinamen oder stdin" + +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" +#~ msgstr "" +#~ "Setze eine neue Größe für ein Deployment, ReplicaSet, " +#~ "ReplicationController oder Job" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Setze die Annotation Last-Applied-Configuration auf einem Live-Objekt auf " +#~ "den Inhalt einer Datei." + +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "Setze einen Cluster-Eintrag in der kubeconfig" + +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "Setze einen Kontext-Eintrag in der kubeconfig" + +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "Setze einen User-Eintrag in der kubeconfig" + +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "Setze einen einzelnen Value in einer kubeconfig-Datei" + +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "Setze den aktuellen Kontext in einer kubeconfig-Datei" + +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" +#~ msgstr "" +#~ "Nehme einen Replication Controller, Service, Deployment oder Pod und " +#~ "biete ihn als neuen Kubernetes-Service an" + +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "Der Name des zu verwendenden API-Generators. Siehe http://kubernetes.io/" +#~ "docs/user-guide/kubectl-conventions/#generators für eine Übersicht." + +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "" +#~ "Der Name des zu verwendenden API-Generators. Zur Zeit gibt es nur einen " +#~ "Generator." + +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "Der Name des zu verwendenden Generators, um einen Service zu erstellen. " +#~ "Wird nur benutzt, wenn --expose true ist" + +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "Der Port, den der Container anbietet. Wenn --expose true ist, ist es " +#~ "auch der Port, den der zu erstellende Service verwendet" + +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "Typ für diesen Service: ClusterIP, NodePort oder LoadBalancer. Standard " +#~ "ist 'ClusterIP'." + +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "Aktualisiere Felder einer Resource mit einem Strategic-Merge-Patch" + +#~ msgid "Update image of a pod template" +#~ msgstr "Aktualisiere das Image einer Pod-Template" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "" +#~ "Zeige die aktuelle Annotation Last-Applied-Configuration einer Resource / " +#~ "eines Object" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..77b13524a32b4bc7d99134bcba351ecb07b553c3 GIT binary patch literal 153024 zcmeFa3!J1^S?B+DtHBI61p)*_-t+`}Ch6*)p2=nCqym$fPBLUJO;1lQh7hW&tEVed z-PNh8o|#UF+!hsCPytt5Toe&q6x3A_bVaxGNrf^tz%5Cku{-{13`^S*Cg zx~FFnbpQX41C^)it@EDqoaa2}InV8!^N~k=?e``8d*)D*d=v0TwalKed7UUX-YJcN4pJ{z2P{`6{+ zd=dQ8r6i%r@D=O)Q}srN}QOp?E#+^1fgBp(F+&UYrsKT_WtUz#NE zq5O~jXNRjVPm&3KzxI2QWQE@wub?0N{z2fo`2IbwN|L7mZ)21%2EH2Dp#0AQA${`X zS2KRV!$5{0IR)HF`)>f!<>Y;@Ns`-vdw(!VAXV}T;Bnw5fvWdu|1C*IfX@S-qTU|? zUcvW^e>h2=0Uk$y_W^(4bA1og{&EldFIq03HX5-aCJn@#Xv1{eF`C z3h%uP_+`EyqR_{HH~azo2fX|blZ2|1Rp55uHUAr$1Mdp(Z-MIn3+P-O_%DAvP z#oIaduU`JQf$yZAJ;_k=Iq>=tjqRn}ry+F94qbeCET3k_zw#fo}(X1qe$dzx?o_1ksbc@DW4FPXT`l zm;wupGs-7K>_43DnyZHV*Upx)p9c&Ec0 zP~X2je19uY^?nkldLMW3kja-DfTB+wC_ecP;1hwr3KV`H0;;`-Ji*)B1604)fO_vu zK+Vs01I5pu0jm5HpXl}62pr{m6}SWVQlRMi^S~>C9|5YK$9&UJawBjzP~&tjPc#QE^0z?AR53RJrv1&Y7^5h%HL(TMkB3ivp_-v+Dz zSAkQ&_XFF&hi@NBt_QvVcpLDYK+)?-mpVKMH2MNH58nh7e|_^)hLW3rM}Xq0}zT{hmk|V%b;8Eb~fc863biZWO>2)*kQGD+J)t~PGihq9?sCoJ>py>KH zK#lKXcR2m00(=%wZ@p94jQ%f}qw4zR}e*8o2b94ZYZ)4;z1ivFc>@6UGw z-^2H}0N)3E!9Uj&wckJ|0yKLZ@$ z`x;Q|=&OOE-`jwq>qmiez^~uq<9sLZF?@ds@Mhp^fzJbe3MjdE_Z2Q5e+VeK_(`D3 zU47+H(gMB+*aUtND0#McmG}2vpvL<{z}tZjyV~is2vom*8h8otQG17y?;Jv>07?$O z{i#EyhdyL#$n?;gfq%yHPXVQezWW-V7oWdw$n?-RJ#EPJ(C6xLoI zL+?B|WP0capD|>5=*dGvriaeX44ED}^UNXBL%(;{zrS*he)Ikx93L`0^upVi=U)eZ zeJkt11f(^;p%W*Wf0X~q@{sAF7q2qE=+EncZvZ~7#X7_9Z|9-((2GtD znI8Jc`-V&peJ4nI4-J_fdg}EUX(fQ<^z+3qK&A&8cdgvPPX?%YVa0K|! z|2brO=r~aP`G&VMf578UfsitJ$~&Mlc)speq3c7KkH0Zwdgy(0LV9T9z0iaAu7BT< z>7h^fJ>&q--vPXz=U;w5^Z=gzfX|E^w5?6Q!^4Kp^w$iV9{M{0KJpyc+4FB&#IblYQxO%HtmPa8|lsx}k;IGk-5|i+=eE;kd zy`4Y%CNDqtq+!!T-vN{!`k$UWZ2HCN?Zc)Q{S5GG-ur8y^w3K$9kzA%t=}?idg$ut zu&sZe+A(b7`K+D8riZ=&_zd3rFt7sLaoMoxq2C4kO}^iB`LM00j~E*^J#-3~^8AxP ztvlOF!?wO10N%>?yMdB_zYdfh^04t?led=vrH7sYir+s2)O^~`WE4L=6R3526(~LF z2Z4Kl?*eLl{wJXH(9*79(-WQpRQ>k>HBWy4D7pP}Klr?<97hH zZod?$_TB;%e}5FHb}!iNd@%`>++PA}y?HfI<=+8Ry`KgO@6kOz{x<-xsDVWA*o1pybBgK)wG1K;iQ?pvwORQ20LLO6Q9ipx*y!py=@D;rpYm^6&eB zs&@^jdS4G50lpt7`g{Sn7x?I_hi#ob1QdQtK(+TupxS!}PdQ1kN^p!oS- zpvwO^Q0w&vfYO&f3)H&&IE0V-@pPc{(7S0j>fM z0AB%={`5yc)%&-=6nOD9!={H$0W}|=349Ll*MP49j$Auzdg!kJD}28N<5To_8&G)t z74U7qZ@g~U^w4($#fOi3n%92_sCJiu(nG%;D86_dQ2qEVp!Cp>0agD$1Eq&P`{~1` zhyDQYdcHq=pUaOKpy>6+0RIqZ^u6Ba;a;Hl>nDIW0sj=J@!h`P>3STf-|q)%{ND^b z0{jT@D6n)O{0>zA-V7AI{s<^NG@17PJQ^te9RofEcnqj=>p+d~j{rsg-wN;(K#kY7 z8=OC9fg1mNfGO~`;rly)&*uAI1I5QT-{|t-ZlJ#ZC{W}58^9{?!$7sS@2278g}@r{ zMZix0p8;$fbUM5hD14J=44WQ03j8=6e!QrH7sZir>BnJP5pe*5%?F@MgaM22gtFBab)pNn{Q53t7P8~BWbGGEKz&*CFKTjryG z;&Ff83-GCY{xN+2lK}q@@EfU5JYELQ0HvpEt@;|i|2a_l2ks%$6J8FygOC1+ zPm2G(5WYMXC?5GvK7YmMhxx>R-^U*s*L(Rqj(YzHh^dhL2=Ex6H}lavxrWbvJ|E%p z*L=R7&olY_4xjk%D&EsSObFBYzQo_($OjYC{(XS|K83%<$Nz=T6o0=8h>4Q?B=90W z@!uqWd^exJ|2RHfKQ@~{yiM{ zbl}VQ{2rfizJC~4;q%}5NZ!7kkN%zF^GQD2_`H&j{vF`+AwGwB@9})%zt{4|RQEh0d1n@QpC0h>t_^yB7>md0j;Pd!=F?|06;C{+HG<;tKK9bM1e0GI* zf0n-=!{-E_U*Hq}J()jlk6(a~;PW)zc|P#h`NV%mdG=|(JQn!7;o-Lezm3nw`FxoV zCVBEuKKeJuXBVHJ*BAaBbddZUaELO$1pH+_p9#-D0X)F-tAMx+Y>f;*B|G>&9==Qg zU&rUa@wtTO`uA%NlAV12i}3fO`1?8heLQ@BG~XW={uX!!-+uzQo6j_VF9Cmm&rUw` zeEv6|&+_?GKEK6hm@@izFu-eopZ7=h-;4SFKl!W8XOh290^eo7plbmi!n0R|?=Jxk z^Z6G(ALR3FKL3@^1$-{!gUc~_3ve%=5kC9)+{Clr1nS?5`CP*17x`Sq^Z4)E`6K1~ zLU=F=d@r9?`2NkIY)9X>U3lSz+tYoktF7j#TDt$(Y&u$Pw@U4MPTadXsvl3T&DW}( zMp~`4I`zeRwbH4jl~6n@Nx@Llf!fOYR;yW1*}ggsHy7%Q>#4QdTv@E2NEhpkT0329 z*H=!Y?X~%8b9uS4vd~T&^^>)9Bty^(w`-lWy4GsdRyw6>bEQ)|(@B@t>JPx{-a^%@@O~?AZ17Z>4rRg@zh~<;rThQdzDo1Y+vh ze5K9z=8C8%BFvSl(tuE zRX7t=PS)1jX?=w;sesp_F^*v=%-h0rq24}uJ~&eIWO<^1>4Z?99Phwg#lBE(cYnFv zDtBIevvz$5CXJ7*0e`Uj96Qpe!>g4>xvS2AeBrm<H|{F~d1F&ue`wI?ltsiE=&yU~3bEFCw%%Mz7n&=howU+G#w@I-OBGgtN;}kz1?Z8ODKe2N+qwE@2yh^tOG~)-#k5g*X;C-2anCo z-hS7y>DgPRXX#%krao(C$AZTxkpH?`X}3=!-Q%w_`;HyE_2}$@yhLreQg6hcrw{Kt zcxd1p4cKMUFMK!N`nHKct_hzs<9D~4E9cqyTq4FHFu|!BGZ{IcwaARemYS#2j)X)|x(va* z+19B_hhN8pYP+sg&-IcFmD-tFb#z?RMNw$3uBQ{7<<*JBW)qF0S^-1V*J=7=mOS;n zlD2Bg5H4M9F6g^0SF4q3ExoqgT&|U}@7GZ+BuuM|X(_Mxx!Dk@@w8M=ODlbEh6l88 z9Wx+REoW3qX@7>wAf#P_7V#_MZe41ugH;>}$6!m%_jY27DBXp$3AA*%(}b4_NZIcL zh$^LPp|)6AYjkn~7cj4`W)!t5ob;mClno(crRRf)wDpm#L~W(o#p{uJ1sn?Fm9?Li zsyuder)ny#O_u8(7)}{?_3_ML>hu+x7(l8)D4i=WL5*Ji?`ujED-Mr~BWxK~B2%$V zey+7_#%f+Mm?ogbE){(%bP$_{*Gh}&_(Uw`een7aR0yal!YuS6=e;1bWK5jI9%wGt z@3j`k*DK2nW>{_f#JHBoiT+>v+dpX~i!G}s&p3AU2+CAj`gVNF$*Yujo?`y5w$@f^lECeb#3ps^YpgC+z{iZijR=U{RbQ;NJKi-D>&ul+6{9I_ zROV|9S%&466ZMrdrmQa18a0`Vw9up*7)lux?cGYNW&&MZp_e8aWh_~py*2jJD)fRe z>&B~8z@wLvzDyT*qWe{~Ucg#dIp+SJ%%HHG4Z!^zFf55CVHirMm(a``XhUe;t=hz? z$%*~OxrZyOfu+|pgeOzs+k*b@9FP}%m+98cF6`@pMFEWuxG&5bB1E3~YJ#oVjSM!mZ3>+F0@>uJ~g z2_+?DGXym|OC=24vTV)e%9--yt}r6SF12_=o&_ztnoTIZ4f?Ug?zv%f<=94DV`l8# z^_1e^WT>{PXiA16s9gz))z!7qD#|a^E$`js)MqfzMmy=Ke54|Tw+u@m;zx*9zdgDM1b_N z3vWOuO#6PTpm8I*`6)2$W_^%6a&fe8=3siGd|q3{ z+~?t0NZD$SZ< zluBkOmgm>n>+{VsJ)Kqwn@$y$7IPe*OJ!7^E+AY7R2uZaY72p(M#40%l=OrLBZJ$~ zsu9pu4dqaJB73i=_ws;7QIg)X*6dVrKdgQ2Z^lbO9si?LTEem|v$T~b$CfcPTkGY? z8;xnpyT{~fsiW7H_ly;m;d~WtQzo`=^|h(RT_El9m1D@$Hd2rE@>H|2wp^<=D)nWG zgk^qz9K;Q~F)w=577@ywje1fj!gyF`G~;Mw7vI~}<|^o4Pd(FCO%DMn%Bs|Et*x4V zF~7DT{l`ZO8m%tO`$A#pT)J&9Kpsu1#6ZAwp&(pz7aM>JCsoLC^7;~UToh|*g_u-Pi-2$_Oa z%33K;_Au*%M*!FCP1E0)085p|V$@$WOsS)cX36r4-k2e}RdO&ugS+qO^mhNsT~8Un z0q&*INHU!(B0ji9Z(Yv-3@`#SJ{B>vxQD76Y??uy4J@*PI^*Lsy-;`OSVSy%H=~2p zZLO%f(PZX{Rr7Ib1~-%*)u<3^tGa&XqHhE1FO~FK8g#v00=DX`5gmec4Q$Xbcb4iW zmaxG5m;Q$0FaAnbVJ5AG2Q+~-gK<$-TXmbBAF?~*NIz1`|sIA$HivS%%zpbzqg{CZ-n?cDdn4@6(_Bk2A~$dKLBmKi1ayX;r8kSR=)=$utl|d z>nLbNDeBujk}7C39{)f%x?ceSU~2`glBGJXf0e>=HA@e}e(x@`{}2}|EdEFeyd($< z=|JXREtcH1s~{{07BrPEQ1$9HIZ$J89PpR>S>gj;4;;1XG*rPSazUc4VMW?X~!?o1uthNW>9$Sz4jWLyK>ESAWWZm)cunPNnWcq#o zj3D@0tKgbLLB;hQ-EVPDTp{I}wUXGGiX1;MlXhB_MG}BA7FAk~cAC{@qpX_4m=tT8duHA*{&JcAr;GqXR(@}oRyM4Yho!Fke zpZiF^N|@#iYysn5A&QH13Jp9o{#NM-pMiuPKR!#jj|He$IvBW{X`!G&*_^wc_K6IZhKU?9SN(|noIBjyeZ$ZdkQH5=Ug zrURFYFJ|2_Jk$|uvPUai$;QCt$7x*iQvb|7)1HdIj^_{E7ap)HJZ~#msK?9+rDBx~ zu~JJ=@XJwI-GJtfYWl1dQt9yaWEc&?r!2){DM}z*5tOt=TZ}ePJ^>d zEVm9%5*q6)4e;2;9(p;-^z|@c*3DY^a74_zf47{Q36-+Hj2!mY=EIj^EBxEm0WBTE zea@Elb~@9lE!NL$JXE1W%JJzQse%1a&6incH2X%nJHsS(Pn_}(t<>%*tsk5BVeZf| zlBwj(P(lUhn5Go@CDOYqSCnxgp>wgOj7;mSDKeXk7PUx14C-Q<^0<32tbexfNeFl+ z2Nbi^J(5SACN>o{>5MR&a$SFq3o-m4#Vq)9Zo?Sf++rv5Q8Y>CFHdehRCy2&=j%zd zq)pi>4&jCfg#x8eS(FhXqNs1oHGuA$VBWpzXtFCmvD#4-Y@s&4c7m0XgkP2*sm1QG z!4o9SP6V$nlZ{%(H%qX((&=X~y(9&+OeqIOJ^oT@R@kGP=HE32PgNd{eWp8c+dQqj~ov!8jtdiG?(b2OdktN-W2?<(88TRB>>& zPEXB$+5jCb?2&MrVo_bHNV{^G6rBBzV>S+(wm+af!y=f0;E~nC^R&IRHEg7M^{w++y=(GY=el$ISfrkv0nVngH9fVp$m0#MT>aKQ_W|}dN(GI z9GQ0I0R3qQfChPeqsXhvD zuM^!^_BW9e%gr^!6cVVDPN-BLp@i4O^17inNKClYI}kEs@jwt!A_TDve0_tX>f5!Xx}3{3gt%9`r`KnFkFHRJw@Gs%~}&bu+Qz@4b}|qD%p!p5j7M z9=x-#N>;%}91;p>L9r^yT_Z!RK)g9ht}M!O7>|KFBUd z<5)VshLt-ncjCVhioR1#dGZzZt111Q=zg6sN6(wQGDRM(F>#Btu+Rm2LAi6hP=4yJ zkUN(JH=}b!OolJ1CkCmVhS57=aY)RHiJme(r;ZCdWLg7TVFf=3wt`-;Wc{0uUq5}< z^nrtOM`z0;D%VI1N}S8uD0t>*D*t9J-oEefA=7Hw+Ki)F$<_mp0Kj)?$4QWMswC1~Mqt~FLI685k!?=daCHZ*0 zIT5YHOphF;7X=Kri)u^*Wmn`HdQhx`3FdYJ+(Ak8e389l?B$FWn|iLtLdx_wmYXUn zSZ$?1fnaMG(#|mbeVg~)pcdNk{$~B$pe9*Et+nENUiUH9X*WAtmD4F4$rNG1)^3q& z=&bgrW{KLP=t4_2#@#jbW8jG;+3O6<1hv#!>{8X>go44cuRO=@tiKEFAd-Va z@h^O)^`MO%QXpOP5oRMMx|mL^wF6V;Y?IGd3S<+@Wt!G9hR;PB3%9hBkOQ?nZnKUI zq5X-cwWACk7qTopMG3GZ^@Loi#YW|X?J(7QRJX%+Ta@ZM$Ka0bws&zEAB)Q`{O1ew z2t9O1skBoyw`;2v_J`Es_A_HfS$?W^f>e1@+UTs+wXZ>HJTi@?dBOf*A6bhOSA0=n z^P01=8Kn@Yrw)&PD22|}(Cs8nV_nKWg$6^mini}rr<-dH?L!M)CRI)AO#?62j{10Q z+!~jI1_KoGE2p200QrNFqGDDYEY^mwLsbxOJMGR`w=i)FAbdu%S>ev|36Y0F6VO%3*vy zfDTpEDpMnkAUFzw^5chnFpjl`Trc7=Xa4ls9RsS-+meyAb}LDZ9NZzE&1uM=v_s_; zH#sHFLw(o~rh=j6Lw0=TvPnae8?=;Om$FA(Ke9tSvbnvFt3&Z|p-a}@N{66y_^LBT zZVtz;Sg#7$Wkmu?qnF-v^zih?Ey%J`6KOhhnM`UVxCiY<=Jz3rttChvqj z&U0*d--aeV7R22Es%C5u8q-J-KGAtD@^;xgURw>_9`HV|F(@OK-nXb&An4vdaaSl7 zQW#Co$($?_!)QCfJnFUmPJk$DVc7R^);C=VAO&A?#0^O)K|A7(baNX^$u6$qu??AU zr}1d{)b_Sg>B&UE2fHY|x%_|>ZQ7^dvvsIYnbrt*cciCl^M#i4Ro~PvGnKoDBq?E2 zdF0gONVZQdEP*2g1PIp3oVK%P7_K-}>TRKN8PzJ>ZB2J~UdjlK_2bY}Z7|5)gsjag z*{n3V=kD6og?_o)bThwUPEOi152Ebqk*#5n@BbUnp{N*`4~_kCh232k7=E%v3Iuw5_-*?|Q zQ<9xgxeXw(@|FU$gDh0FVo9+}&B z@W}M+RIieA$F4iaHMPbL9^QB3^uV7jhE+gNLZC1i%=q!y&=YLvG(k-}oWcGq(xO$( zA_0Y#Y}*K-R!N3P7V{ZXE9V__#ZG!D(yEI5*fN&sHxd1>CYQPg#`Qc+9vq*@9i=(7 z(<_yRuVwBk{SnS|7}HXFnvFwJhiq#CM>N^$H|Qn0EnT3SPgGE@P1Ryd;wG~-r>64m zKls@AG|sl3D1EleR4K4?7n-@`>9#>OMcVT{2N5o;gPsgLBl|bbEgvi?LVD_cl@ZHh zF-bBQ|1iWGAs;y)w@Z|WYWW$31u7?Yx|e?g2OPYu$LBJxs%%Pc4H{Q(3YOE9LN$iR z?I@N4lWo0CB2QjJx@+A2SKzL#VPRus(O*;V%rA+~esHFX?av3Jtmd)FUE`Ba9p61U zzI)GB@WastE;#s%AvR#9a~>2M5TEvkE3K1g^eDH!g%l4;o1+^R)(A_P8XH&OwH3}G z;A|))Ky$-a*H#9<-23VbW>uX*=VYi=x%^nw42PHjgHgcK6XX?>pmPEdA_qE0^w=pU z>k>XDSUicdZEJjy{q4C`cye!uDUuzxnvUK#qAEtF?i=aCbYx0fOz*#c)S2`_#!l}0 ztzdWQsF0i5h7YJf#VJy(*J@|MtVc6JsCBIkz54po{Q^A*W^K1e zrtY|pIM8x!WXev741s4!$y!O1-+mh#F$^iAJ*xiq{dZ;@vT>gRMJsU&ph5wrrfFY* ztXx6?l~${=URl+tVPAXho4H)_Im5}5Qf!Y31G;dC2gM^M)F!6@6qtf-Udj>d zt+TzhDp`tLmz@y`6%nee*0n`U?}m=fI=x{&4N)G1Fa=1uU4O_r4AG#YgSuL#4vvEs4BjfxwlR^n3M7=l~&$2x{kwl4gy`ea3yLbT7xuC$L z#<_T^-drOr;n7UZ9)-p7p7s!0hTmi_L0N!9$W&efey70p^m>+gIcVa(BaOY`8-z(u zJ3ie)1-XB~dq`G?hpo2MfY~h%oUAm8NRL$CjIsynZA2m&veCR=X1RgUb+Ayn8zYO6 zv=CZwnS{O$9!>TVa!w_075LPSZB&np9KkLN+Z@x=2zOa{)M_-=xdeE*b)aF?@tle! zWJk4Do?PmyIQU(XU74Y2H&n-H-Xwo?t7^xg05R^(+@LuPyqdx4cJ` zI(%E`7Mer>jKzKK9`w*v-rGPEOwD^5tTu6RZrATce`}{4d+j~30voj$-pV_xW5NnO z&zn=M?Vb>ab{b^cX+n96Q)wuJgJv_DUdf(aJChTj-D(Ez^`3g|@>$hl>XuvQqlmJt z3|D-WBrthj0v9lU;#@2tLde)y(7HelPCSgxcsj0yqL?3pr?=Ft7pH$;;83?P(to)Nw<-PiGN=S>kiP9K}X@QlrD%vCu4j1KT<) zg5@s~Bo5ExW|rb28D3nf+*@mK_OFF%thZXu;>7Yqu&`s4+2@08gP1E!#kC9kuE-Oh46RcY1bhhc~5fg{K6I&kzbUhH^~f5=;pLtx_? zcVL;ju&XaUR8bGr+|q&7B!>C%ZOkEO()@d*26K+!L8v0mSZVyw~KjQ=?+r7Ch07WniEHM23G_5SmDhrQ8m3*m0V|F z_jN`&JvkDNWp>&PLRBkj+!z}gG^+Muxg{)O1!V1ATV3Xm?|_vC($R5YI>#B9t>zi` z`vk-5<}8=j_+D-tmd+)ydacne9#Q76V%6d~@jR4rSmU#L&Eyf&#!$zA+AB(>)6wRZ zB9HIac3Ro=xShN|-8K+h6Iw?X74~NH_9*J23?39}dGFY~yK8a+Omt$dR-0lLc|`Q| z>CeUIJ7Q3SgoyoE;Y4A|LFnTO6!~KX(Din;uD7KnS=@{G7 zbM1u>`Pn*CL^HhMn9eeba9P>7qBsiQ9;yeY8r~;c;r8yDDRsTQYcdAwNMNN_GVDWktNUndlRT())%O{-^)&Ul`;qaNLXp+_+l zJQ&jS-cR4%2Kh92k>;CRWdK z#AfV9Ape1b7+Lbt(Id@c9vUJXA%4dE>#Ed}R#mfV3omtP+GN8iv(fVj%*StzY?ck8 zPAoFEr=c)Lj<7l+hc9`$5#h3x4oFVrcm>91yA3&cwhR`iAmX2e1Os}iB}#rmGZng2 zV58;$8 zuqpYr!j8o9ZE0Zu7>=nKat>>)Ug`@Era^)5{a9gMJ8N(J#(*qaIDmZ3Tb9CZ$*`d$Hh@4HIhP)+8#|+I>xBb&Zat z(z4`%6XBJjZtSElz1??wN~QQh5E#PEn<;12I=4rce~*co8GQxP7W!&F=(E%)QPsOy zQgf*5$T76Mfg2`6Kl0s@n>Usd*Hi^2oyIJ4d>||cKuhRG{T6!K5fDY2tkAzf^kP+t z6~+ipJQuZZh;eS~`W2ffmBJoP7|xhCQ(QT<1q!oOH~JPr;L`9f4x^%MgU3R+Qfb(c zcuSp#qqB1Exasi<<(>^s2Vz7fG~m*?d&14`2s%Zj-JYO=7tOR~XQuOLJi3B<433T3 z?e;*$d^exgw^o!)8zOt+h>@W0lPicw2_6O`j?V=>ViTXTC8f2BF-K)Z#uk<1jax5U z(E&J|g$!(Q^U#?N#ght*eC5nw8KdBO)h<@q$|8{(-Q6V#Q*XD|m^EUl5r1)HfGHEA zaa|P2rDUNO0}4XjfI*q&HJ1C#{ixHmi_T2Qqn%^HNzD{9-Z942MnGL(qLgoqK~~Cs zwcR@OJ5v(I(>bN-XQiUT=Z-m~R_5>1Wo9O9)AgDi12cz^$Lbq321PPg8mEcI7yX7= zt?acAX(~qN8pMXI;Z5yG91Y$^PPWLmPx?hq7R!|)WV<(EY89Cxa^h4vP%>rD6b-;HWsY_w+5Q+8P z4k>oFcl&g_X)E)`%a2WU$2&AAn zAs!`#6dR1MNm_&?<&MHY*1evxv@UXoJ}{s*&U(b|McbEv<{5rv3I&e_cvyFjP47QG zdvNY{?mnA4xPN+z;~G}k->Eq#-13TfHLp)QOhkN}lm)~=QIceptzcM{=!WasZ;isP zDPuH-_ryEe9)>WC*NkSj=SXf^+B?B?Ojw4PDzzgndq1w`o%M0zu5!!-;+s~Y8(KNR za)k|Sw)G8uVuLBN4KsrLvXWxPtKXCR39%Th%9xdR|p%4qd?ZAZjbLISoh}@>_0lUrI6CgWluGh%2S~ zXR~#~NeZ@6Vq!tano}#1Ot`e{nnRAyN5^ZWy0QV1*H_p!jcB1}*wE0Evr9W5yb;KR ztd2TNCf2kO5}m=e6+{)qwha1(vpA5L`|+(H4)T|6zq=$vmg9n<8k(o&?u%FAFl4J; zu$7}Ewe2TbHlpnub&Jr0@${&&C*W9#9#XD7H;9h$hHGkiH`Iy+hw5v?XEJ5KM`

    d;&bUU9YBVT=8$#*%L$)8juZs=lnl~tQ6RR3&i7SjSCVR z@fEA8$i8uP8K0Rx4F78{iF-0F)}|iiNafgc_amE*y|@12fswDaN1Zysg2+7 z`NgIaUW8R`N&+)xfQS2fnJ?#raC5607e=vtaZd;gp=qM(v@0#{xGJWaL+T0Fee9}M zd;iw#hSH#*i}e+%sN1E#`OZ71i`)_{_I^Z!A$SfQE6j3>h8i-bb~iKf!}u<5 zX~T1Vq2&T;JggsM>1v(ZF_870>dqc!ix-z<3t`Sw8n*7zI?rND1@)>$Ah%c?3n&T& zgsKFLHFLmji4_TK+hEPt5`U_zY5g6i3NM-PVjEbt9;Pv(J-8y{@jSgR6pB7{W#>qu zt*)_G%%TxcP`Z|6U{@;?;;aP4*DQ<$*!0! zAcEc?p`jaGbu$_3f@UX;xl7E(6%UG!t3)g#%jM-P9*srQ*f^m;3yZa@4x#y5k`3k_ z6-U>;bX{U;)J>-f8<=Paikh|H@-wG>V07sUeq@FUxGiZom_MFlcToLw%v|>jl#!g@ z3q!cf?y=cmPRh5hvsSc~TFtR2TgsN5DYnMevg}S8s%`f)6fWOj3Da#K8@6B)d@S6l zaUg`;LSykhkM5eIHs}7@u)mh!F?Ar=gph9w_LOOZQL)IX_4|%CL|8Wmk$Ja_W7??_ zS&>vF-67P2?54{*<^~4WX3M;lFkIpmO|HV~ZX)nQ->Mp=x+|KwSl(o<4^=&ob$%mk zGzGreYy3f15i*sbZ;2PJC4o=Z5y?zpiwOD5ZJu7~Qvl$iEvW$geJ;@ctFruii{t;E zD}Rre*=U9rcvb3-NB%<8jnOEWX{81ysWnorjjQ2c$to#lqP z?nw-lvyD03!qOwOy7q8^UABOUxTbrjGfE+h z26|M}qM3xtlW1D@7Q!-HBW&E~;Fst&!-uh=!HeiC&x|30a=HJ?Tt0)VOOM4(G~nrV zO51_U7HU({;4VvV<;)aiE={HTKiT^=A3kJ?cECA5;0d1D~pzbNIV1F#EA{8y$T`?@LtHYcDd+>Ml$KSMqJ^hs3$A<_AJRDdstRpy{w0BFsSYa!d zBZI{OKeWsx5Q@fe99f z7aNry7M~VD2(BN(O%8nFww8fSUna37#i?tweT`msdpM!`LUSMZUOo0o9<)YbOP!;hB-W{7$2M^C2 zot@ivWNs?G!4UIdfR1R#N0iR5O91mMDxIVaZY~fx$JKcAiV&g-OPv z@eU%%7H%=MlSk$6%HInP4dMjc8J~$Lc%x$(vT7>h6BaRQXF0U6ReKJ!Y2J0FMxS62mWaq6s$D}5!? zZX`iWy~!+6v1N>mBios`RN7Tn9-!!YY>_@452&*_;W602rc(&*&rxp(f5~&iu(v?wY=BZhH2}zC(8%IC#SiEV%ufKWtu7y(#?A#nr6L%>l9!JoMhS>yM|$ zSu%A&n@8r@6fX~QzMbD0h@ezuS^ipPY*3};m>!5#d4zM6@SCcY(vh^ZW_Ju-pB=et zJWFl~zh{{x1!p3;GBUGC1Skq$8Bvnm2&C(qy9pSk)2gCka zKFJUAzy|>lE}UK88C=J1j!m} zHvaiK{Vv15+G&{0Nl{XKKq1&IGG;jM)x(mm9}J|S$wiGIe<+qiwyVSu!@0=aoKo22 zxrV(seO{}zPvfv*cs9zO9s6gFPaI~|YtiwwHVdNhk~mG9rJ~#l(^rdu@rxu}uDhz0 z_L|?;D={6UEq_P|hYCS5)|R=`57&r9Rom$?ot0t925_a8DpAKagWJYMx+PH@Pt6RA zvFmZ;sn1s52yKVE5Udk@JuHbvCEbwa247Ncz!r;W{0+(5>0adW7&K3HT!t8}d>?xp z?pODj0J=abR#x>OV_cfqFdW{{pddt*RMHsX`8ya z6_Gt(%HAjh0d#ObcA&;0GtG-tUU*^b5`ARWTFHh2*}ka7Jj7^c8JSgEegk|VS-@6^ z=;DuvyQRh8n#n{QlED(2*t9gowoOJ5rBQiI>*q(*NIy-JsgS)^Og0K;2V9qLHFOFc zC*Wl%RBQkya2>WzNkNf}=Y$3n|B$uB4j*?Ql#l|2m%vr^2bqe{2@1Ne@|lE1F&K@< z%Xx$f6^&4gidE7Z2|mYz=_o0hnNdZL!l>nk(*<)%R1tR-h9b}5iDSJ;1eL>*3WHc= z?~Ld+ZI_GFRu!0%Q=Gqc`$;zK&S4T~m7PQDg}F{LT-a)vMe?EsAS#?qgwT3c*#UMC zP+kOH4$?xgW{aJo;8X}+@Efr-ls_$LD|yUGl8ERJ-VcoAaHoWwM4;M3=&WaIs+qu> zw*8c;?o&6v)Pu!`S*_L1Y#=qVjGG5lnoDeR=^3`sVxYzqiejyk3~kq8OzIoeNk->d zs|Bh!)2FU%OD!s+3u=GVbx!9t5lH%hFwU*oXfH}Xzr!?CgwLHjVnUweV&OK0X0@#o ze&lFwPe{7Tzt*-*P7`?yusuPJ+)>(T5fve|f(6kgw`?gtG@Ql`Odpz_n^tdv#w)yh z5Iwo3rwdTc3rdd(TbYB)d%3RV&t6VltB{M_)ixr}g!t@K^+*fV5IT-jr;-hu zjfYRUtzn~>t9ymJ2IU_&VT!6r7rUy|dYj!2;}mjKmD650>B~4yi(fBM97k*1v;>o% zAsMwueukGivfF=TbL2^L1th>O{30I_w9W3Pk>{_+^G}Y4%~{Ob?jgtt=!2hUpHjcL zE!<#3Z^B>BhZMW_jUX%!%NPna^Y|_D ztvn#^W;Yj*TzS#6q4nM3^(;S9+{Fw8dd=t!x=JwL86F%P)GDTp+7QrP7$&(bRY5{h zW8w~DiHNYuOTv>M#GM?L=pz+~{2LuDPw+G-mGNA?;BKKcV{%J`RJ&G3kxMLx#Tvil z4td!Z(H=LF8J^CdOLN-s4Dh>b7{}YEPVCfbVK>ketkWzBFNPJ^a)s+9Tc-3iqeb@; zwop{DE==|1um%O9&mc~61~y{2N%2=}Z~*UV&Eo+i0j zu!}m;LoVSK+Y_6(RU{g(Heoi2cIB&3Ibv8#md;Ft)#rRcFez4StCIKvtXN>P!TbEu8UOdh7CSpk=FHz zXi28cDmsn)c^EqG2ciyWVJa+MPTLf zg)a^=uv>RjC9Kg1SG%2z{d;Zkg&Tacd8m`bxPgqah~5~dJ4Qn&5@xK7Mm==ExeU86Me%`Ta$ zlM6U=@R^RC2mf-P7Pf}B1~q=mf^R7LG0b=gAz&(DBgL9!TF9m# zt6fgmjHd@}nz1t$w3b@PN6lu0+7?qsGJ+9{B^g8X!hPaqO6KB>TWo~bSvS~BpW(n^ zRL*QsQr|oyM(*rzT1l`-jNlvOg=I!dm&rKTvJ^*Qm35_sq3k$Oqt_A|O0PB@B{Ebb zd@N&{w__XH&y+*wsdJ-Hywm+5(m|WJ(4+;OBVR_%b2f45LOg4$Mti zidnGd2e z#e^MCk7Q9u@F9!KQW8NW;}jW7^oM<+^g8qV#%^U~6!&DaRYlEAXk`4!iJPX{x|hOS zSfM-0mGnsFicl8apEAQ5zDYLWc7+) z<7NyUI0pMG>vzxyjqCiPKs@$#Yqn3C@HP(cX0>jaL)MsO$mG=(?QB_KH`vv2W`D+v z0~vi>x&<>6nt<{$l4cZ;%L=rN&=+2-T&(NHueFwskU??X9mOwqDrjy;e9$+YryyHJ z7w(!>g=E46u|sZUV+UnDcEU3SU`8o*^1!L5exkUrK3KT##Umvl*P46qWavf5K{(*d zB&kQDV1c8!2}A~0uuPFZY_2dHH|+yflOU8!#84>Sf56l!xLNvt?DEy+KMM_CRSM%O zdA6!k>)pBGXd2AJJPVq8w*7MAk}UH((#q6C5gJn%Y7vo|WB@j)nv6yGen`j<#ubGyzm^sOUJ|fSk?fci7sHI&V z3W#Qz8R`gfM#6`BG%Q^=ve`^KP(vOocB^wuTWhw79481TIeN+*c^+n{n&&2y9FhP` zj<{Hh3U==yS2cuxn|SOyO8A{1&faKGs8!}gDfs?yaaWt|iS5K95QZfqSV;-QIT5;B z#Xq)o<^i{AwVXq%!BO&n+YR-crR9*4BO>ckHc8y}RH_T)^fjl9FHRn^jM>NxYc%D} zwPm-umD4N7ciVimc&*6;KggOUe7dnl<6_`tPf6r?w&>*(Dut^Ky7xgkj+R<$T|5{T z`WhRn#CX|PPeDU=#Z5&458EV$`ycl*iyR7)x|>Y|>59lmvZJ)Rg(cJf&Ss6$I@q3% zEbKZW!d5fdlWZv-x3^rmn-;<}az87A+K)JpL!7?NSXveRB~>EozBrBxHJ^(; zlF`&*oasq!4>hqPFON+zdxRsgjuMgHVZW246c4v3xXbls;TLz6i|I-y)>s-RFe=to zzS{tt5ampQNx?s^)7f>bdti{?K9JJe@h*r!0PF5COEnMK;mR7vrbnFS)4}?E%w6&})&l_4A_3|uUIM&D=sDj@R zv%zt-)Z|KRJ9;a#wu_ZoejxqUd$Lh$tpYQMD*v{uX@r#E*l#C2mx^bO$XxOqMSd@p zo^q!J(a`QD+<^{|m85NS$yvYqu%kw2LEM)F#vN-K%(8ELUiIeHDdK3ry|SUeAHt17-K>gH<4U}k(Mc7IGxm~n(hw}w zf|o?4HTsScLNa?5scZf-Ev@X2Fu9xkD2KInj=|FwX*Mygk<<#VL9lEX4X;t)gZ zl`Cg=&CE{UaPT&XB^$DE?T@x!_&O@RQ5n82HW(Ju>m;?L+lAL{7aFcr+wd1@LzS*| zC@4u zR%)5ok+Rm~({`a{eL1rh3HFl5!la0Fc zUKV>O847-4Wl?}3kK9ckt1%^5?%Yy1g3UB|)i=0n;zPU|giwmR88roq zhuT&VSD*~5dO$#v%L|)6fgy^Y9PC`N5OxfZnV532VIC`sA-*CxFM%R|rdLuvA_aqm z?uhgfQ9k}+cxV&3n)iW$M||doloL5ANMPlaXe;=EG>TEQ*&H)-d<+HGllV;$EM%5D zcX^VNI83gFn7ZtFf&L7v@;H>lRM=Og{RREH6xPAA4v#6y8_@Gp1$kv2j<=LtoVg_K zn5?$2NEU7J6H;vLu=(p(0(JM3ro|3cRAkGPLLZXYpnl>+2G$r_cG4Om&g(H_0_y5F ztIV36Ij5jO`V>F8<+yLb8kh_w3Eo3tO-FOx4|)Nipedq-)pFzI`9VBn$%qKS!f4sz zhNoO>pEO~Ug?6TJ(LuEJcEBvQlyL;yB%#Ax7;G_^sL%brTEFwVrP}7IwG)4>7wnSp zM69#oB)YK_1Z{vNM8+PY1&aBYLknxZizY^dyS2}@5PfmEUD5b2RLRw~;bz8<*c~%z ziCbXeCIFpJ*}p?7v-p*P>E4)Qa|aF{!F#9`QGA44T3OIhKfEtA*NM!XIS;<#T}k!G zRyt--O=P6-6Qp*u*_YI}w4W0LcQ+KR;9~qD^lsqgy{y%Kre)Xt$qaMxVp{9|7Th$2 z4vUn8ed;w*>~&vlu7@W{=1usLcDG4^0}&C3I7K0xj8rSb(g_1LgxJ7H~2Rmq@> zRFo@Uz(8l_Ak9tNMP5<^mQ(TcLPlR3t2|)98I0@@*CX~cdlc5JPY*t-=Ip`=IY$}T z;AAkZfhFIg1{{#;M~04*cZg%((1$uj52Kh_0C5?1t%W^LsIxZ>f#~w4E`j0>Fm(tl4X$2e1y#V=ghFxt( zubX|Lv2k15=2>ttyDS%0wlaq4qOyVPCVC^gSl=%rmhY;f&Xv-z8118v@888fv8(T7 zq+CXTQF324uP<6c(z&XD8JBd)G0lbTI39xzY+cDz>d^^&t&KWo7X(lGI27Y98mc0?*)RG>&$fHX=3X5RCD*EO-t5r;EVAFY z1t%{!sN#TcR%@}t;Nq_K{O~}Q2R44|C{k|z7|?Kd{X~t(G>%tV4^K!(B*USkVbA)j zDh3S1W+>%)x*?~qsdQ(ArFB^n);AMuG%%ZE)^0Bmowbv@ivv?%xU1E!Fj{#ZtfU?F z-CuB=GrrHYW7INPM#V%&aSoEW=i%-$@gsk99^?*(Y?k|K4_ztzUTkRVgAzB(pbgB2 zgVKHF(m)3lA~bRy=o`0eX-r~A6bx8hYLSdrpxf5FV<~{rCst0GggOtLd9{zr$9Xt- z^4ZS1ct)=A@Pbbo`=JyL&mD+{l?Vj}yhD2h0aq}lv#HKA*USkSxU(o;Q)&C}wq^IL z2hmojgL$pD*}S?;9&tIm2hR^~(!{6o4lGyj+$*Lb85;^K4Q^nQKKY%Re!`yNu_rnm zFv)s^_r^W+T4agX)>LjXKObDW=<5od=Q~?^tl!kTWt3uNo?L79^mdQA7PaYcjLH!? zw`K3Jkuztrm3|V$GO`DC{)#ATi-=BWiw|u{(F!0dz#2WqnpdEnkxMKRQk+R`jTJ|Z zl|AYBgeM$X68kO-virx)D(@*eX(N*-O$9??2TNDsP8)M{xCy{sT5O>+#U`@#z87l~ z5Pqqngp=Q7gl>^8;TSZ71zkf4oqAD+x^;DlC4_}ajTLBPSDN?s0b+YArBe63yHatK z^07_+oc%gr1~EGr^~?#XF_g*^i?0l3i@ej_Z$i(3Y-HM~pH9PIN@~V4dt^`I5s@8& zi!CnE+b%RsR1UUyNSkST7^j4uY@c5^U`z*~H+9^t&aVnb;6w_+vgR5wlajTDQyg%+ zORvL(MA=0RBxB4EDWx4$O(SGuy_*7zm7%v$ry@PDtofIGw`IAc7P+U;C;nSgQTvli>IY&LlGi&^<98n|V+N zggpB9M+7gFLaE(NF+Aj>TAbOYZ*SAcaT5882in7?F^8-89Qvo}g+g=3YIA3HRJ8nI z@_WnTMO5r)RrxJns?r_)w;tL76B@uB3+=oDd(di=R2hTs5fWnJ@-k0t04a-mxEQxh zEt>ikxM8luS0X%49FO_vIRUw)9H&H%A5ORYi+N=&BO+9s&KB#STXx8+Nzzd$F{)5k z2x1LD0^ga$Co2k{!iF5?rukLl0>}Obw1av@?t0NT8thqwKOQG-XMp2vCdLvw8X=-o zv{IF9wKB&nnZepWPR@j;Z&sNzS~!CikuJ+fshm2A4L(%aOdiLBviq8|r9|0*lDVN_ zV~{>Sze(Z2kYEWv|i%8G0tYHu9mO7>gv7R?!A}0 zn$`7!{wle+X=AB$7A}PKQ(%^H-0VEJ_A`viBS(mLj5x>kpm2!ec&@KBbgmEy<>nxx7_%hw5cyGT4HhP54%qZ5~*C0Bn=(GWi<-IaxnYJ)mdPp3bI>0e;F&8Ei z4VHmu6Q;9(#@5}6k2z;~EwUAex8q=KUeiSM+yp|Gtc}fU250w0m9pCz${W;U6>&wY zdZWOl_|Tj{HlH~S@~qvUF<#6)K<%!6;oV2JMc;`|RkMmWz@}?ddg(<&1?+;@8Pgz^ zLV#(@-HwA8Rv*p&p39Q+IGRp^&vs+^`Dqddu5FAibK(RSLutpsXrQG_I6n@yPz_pv zNRf#=opvJSQClrlp)vwPtx?vQzRFQfMY4n%@mwm1tov~b=#{(i2?&K=hU^GG=2r)= zknUj$q=cO*aEOA!g~bC~#kip1ah(jf<%S;|@@B{CnsGy@CN$>n|?`-ssjWmCCDgLagyE$C+0 zoc+Srld@!x4R~p+nExPwXARG&=Yh15Q$0{!9&1kb%hJ_jrJ0g7I-U&7i?Z|%o$yCPI6^Eh zP)55V+RD>oEv9eMy+|bU!y)Y(Q_b@3G+A0)Q0BoaxCXt}ZV|C-m_>ho*Q}fxsWsfQ zdaIhUdoW4rL|&t9+o8@etRBBdmyg9a%t>I`b<&(*4y9||Tj{UOErl$}F?`MIQu<JGEnD`>-~ z72P@{1J27U+tP|<$>jKxO-Et-1Ee%LWmgSrdmKDN!@I2~mnI^s52JLN!R3>EKEd#h z+E6m+LEQL#cz(GZv<68Fk!!wr&X4)FLb{cXPSz=?(xO1!-W}h#?zncAIoFN6UJ|?P z23c>$#~0qnuiR5rK5g7cManaO>6|XB3xMVSrxWLgd?gw!B*Uu zosF=C7Q!q(|0a|wV~3IZwJt$v->KD%`EBeRH(Qw7TTce(Ys$+Se$jA^?@UM_wS-f= zmzh@>i#j@mnYGH?&CdM1Ig>s+O{CjZ^MRO|^U2uAh)YfI5@)l@y$R*X<;E^FE{?)2 zrp^&u@N5`k1>VxT%=Cbt-d4u2g@tYZK{QAY&M*derg2Hj!#>A-=~~T2gnUKM@+T=P zm#v)yX{m`)fLrXu&0fk|a30Nj15zu^m68ob6gM_cz%8pr?emUshRcbym()n^*x2J-{{Z&N)ABHjBgbzqEO2kNNsiyhyl;BvjJ@D~;8qO0pmG zT$`OaMnI8d4SE!#Hp`0Rk(V`oJ4;dKp9Ls%Yq&3=VP{7{&ng@a+6I+XXJ=1+iVCfc zIS`>)vrGzR9B>|Qj3@hJ#4aE3FtqtkgRGeR*Iqj%|0Pdg%AZw(N`|w$Huz~u-i8gd zq#JIC!MDB_-u38{qC!Rw6f&5vVKl~Qsk85ujA*n30&nsX$M{vT1_&LLQ}&)~NBfL9 zkrf=%-Z*CGENWy1%%NA>DwlOvY`%SfMq~Ny^8G%;sRU^|iEd(ZW=X8bQoluwHD& zTJeRL9CbESJS$_f(m6JRWA|Cdq=al5*v?DZU0B^S-es`VBi8`}i{$!Fh}5_=CxS)- zT#fU{o0j^Jo$VhC0=!7c0W`?Mma+%=#>QBE_h3X$HD1?G7E3U(b^3Yr-x;Pu(6!FdMTSs!2MLFU&P!uiwOE}U z&Vkj+_9q=j&$da=fnMR?JVREy(L@RIfZraN5yeDq+o7Kg06noWw**-+Mk^yhO! zPgcVqQn>W&9u8ag@`j7AMHws-Iwj&0+e|qT_fp!0NLwkRMN@{(mMSzC31Nt=V&NT6 z#t4I({U-L=gs@vJ2OaFP1tBRbiwu*Et}sas$jhcn@lN$#H?*8r^ELlm z4^|m{30m)23#f_*en2jjcs8G@R^p7XL?t(DF=HXQk^Xfbt0nT}q)RMY$`8_V@nN~1 z3HF?{XE*lvOQTg(aLo)3rmyU<>|$eHjG?xwDb2Y5(81wU@;Ry;i@2b3w8)S`Ooj!v zius36qhL&%9cldG83vU_+X;)F5Au<5loDx})!Wx_M8t`xv2uQ!8=zz9s2_!LaAvfp z0o756pR9Stj3I7rGTZD7#ugDmwdMbMCCo$kzvoPvh*$Ix990C8aAut(3wbELr|Ys* zv%OidlHSg%p|IcT-yg3`2VyHMQ#FJ%DqP*Gvw5fgeXepntF#>-MP@4(Y%3&7Sx>v2nLeDQi?T(h!T6Ba zT4WQ|>t(|(AS*yqUB?(Zz6I{$wc>GKLMaO6IF{pSzpu7;6^La+K*M|?Vbdb>x22U% z;znnlsYQZ@KBeuLE?XU456bE(2(@!9pRs>LQX-F`C^j4;adxm5!i2D(B^=`xmaGUW zPrJ~xajwWy(19d{%-$@N%icDAj77A!O%j;>}r5sIa3VIcBQ9IUOzEV!-^Tj11Fg zcMdp4F_v(iMkw^1{1{3oN3*?!lMiji54yE3)T4EA<7VWw*+RCz)s@Nn%$W`)U81(F zfH=0t&hshOv*2Zgw`3{#@@_7GAifzT54>7~qGZ>BcUkpWh}0B>jC<^Wsd!wKSz%ev zVi_&QzAEs0d`Ktl)N3uhgN~+3j;&(=tS_4YBR|v}%y$1kGDl$ArofewiqX)pDzeOEo9LRzX?x7XO8B-7Ya^><;t1* z^4fCLxkT<77tLTE6AWV1i#M$@5VD@2L2#~z@Htw#y2J6Zn^u*pS!bz8e*J9uW0ND>}6;@^)qhx&EbI6$>j|6=J~REen-dZibXd zKD+Z@WxEEJL~HWQ*!;onQBUbG6E&tCVFkws8JI1*sfv;ts)0?ha#*WRP3kA%WtoDu z!GMfUzxU0Pb&P3=v053uLJCN{9OvTU>|HFs8yL=Tt_r?}>grl~_pV*bW9}d<(m4mogQL#vHuXSrSp$BRwj7zq`A8yH2@M1-FT8rTlBM3TG%$@y( z`lLoaD0}0QVu;Zx|AeNa3-PpI3a16_$bu6{9XxiYBE$!S_Br7>n6ICIr2+XM$3iw;72v-ZP% z(dsGNP)TsaJt$$Z=}SJ5wVWyY%oa;eBWROEXx_Exjay}3@@CD)12Tgu88RpGtO;1p z7!BGOyJWSQi@4}e&lViC5Pv_Q$23IXm|~TcF_f(?*boAeY8P*ZwqzP)CCZJbAu$T0 zrBV5e5Ja{q2_XYgewdfbMEq5S+>oU@c_FRplAX415-`gfNeX)p%m<0_*n7C0y|LEz zlf3DKf&e;&9iRj}n?~j>w?5&?vHioS#jK6dPnS8l$iQZw5cA)b1-UYICMOzAQp(6d zHRUGL-)wWDI_AW}{a_XE@2M9eYGKx4dsqq@omf#sWjqOPJ<|f1SzfGQPDI<)b0k}d z1d#7yjUr`7`;<1W7V^n7c$%f)x5=i~@Pa164sBvZylWWolVK#8A;Nkhqzfx#EQ4DZ zL}K1}Qy1LkT{>q>dbiiwmF+#~R|HKYpXRZPxY@mJYX1)E7$EubpU@=9*LK^x=I64{e({TAB@~ z>Xr@=Qk+V6@0z^2wCl>!?km#Cy;Hj;FK1Kdu5E|d8djP^Wojb`o2{wzdTuCLV0FA6 zS8Y4JcHX~^FO1LY!P8GH>vTbsgX?&8g{uQj)Jk);%JNj2eV;-R*mn5f;pw~&ljFO# z?ROiyq@{m~1@la20J`tR12HYoR+q6aR)_)cwO!tQjlu!^ zS>CadUQ$jccV1(6$#GUoOan@}b zwz2zJW=fe?h3>EAuq+O@unC1qXqyuJzPibf|EtrR)y1xz!EyF8o4Yf!wEj=pwu;tn zk2INoWHNL@^CP@lsw`j-$8WY@-df4X?um$@v>J+efZY|a6uU~OBIVxSgiqO!q8r(l z-Fz{;%8p%M|KvLfWL+kY(CtJgrbz8s@~mvdqsID=TJsuILH_FhZSU-x<2a5qkNea9 z6tj>WAblJ_q$IDF_9%`bDVt%5WRTE$9a@J1ZzN&iZ7~EXTI>7l_j{hqs;=(o0S1)3 zzPs4H+Z8e0U6qxUm6h+=mgrKFhpMebnXx{GuYkbPV^#g+1n9XP9Ze@F1-Y*qBLCFE zd?9c7Di~dut0vq6>6XU6z4;nzZo3xVyVIo!m-p`IJyBkddRAGR=MCU)+q&fm;)gre zTu7-T?>Z>N#P2$PS~}!p3j!m z255c?a(OS5TpJ6$+qPfTCL7vAl%+q_%pa1d6vj^Q9 z2fE_R!qgriYX1$-51+OBw zfqd_*V$?$f>V>bnWv}%67?M`s*(MI8rS*cn28L$8>im=G9zSOwiA7pFtUmj3FjZtM z(9Ae?7GB#LLDd*Y*QF(}F{J;6sdTW)_5?eMggA@Xt?^vvip`9w8_=+gh0OxdX}G{B z&9S*vUW+xvc*XDtYmKajj`ngRrA&NB>j}y>{nb1o3I@{jd-LU{j4E4;PZ#I^&TqCe zYb8!q)P)8ga}Ih0j5W(3gr-|w#}MkOrZMDWx1XXEB@RyKPt#w5YXaQdlf4^wtPFyQ zQ3PV7fWxOAg~?9qM0?+LlIOM#N!sfrRlx;H^6# zr0|Y4hua_0PmPF;uqo0dDEb2jtybF^Su#~N?gO%}Be*VCmrhT+c?&ie zy^+&^CeXe+W4v1$;8ry4>I(hLJX&ma4Ag}@S#KPH+;)v;8FclSprWQCNYreqr^|LQm1jfePB+F>rqtQ``R>jhJ7=T5 zZ0cL(6~dZNr`^XcU_<#s>mh~E@HZ@yj?#j$9Oyrb)y<{FG+zg{Yc%(D24nD2(R?^) z%7HcOIgjzqRKEb%`uIFbdP=Z0!Ytq3s^1M(FDqf$+bEFI*ZErOVhAK4_+b{b1QjnpfBlFGeShNj~K7C^a+toN*p{m;nNL z6`6XN@EyAaj?VUXBSVMC!5}3`)dw?X z-xpj2Ds=&E5*u-OoSjNekl;v2|Kg&VxPv!C=n_y?o26c69Y7Yargat-lb z*D9U(t>G>D1nuZh0N;r&~W3b89T5jXEU1haL$DZ$ml+FjQ$^YVMf2m~OhtDsC}0D4(0`VHYRFq+s8P zycWqowy)ZAoNp3I(rNXXYJe4?Tt#{d!vUlhb`~cz@B^RmUBk)70hQmO6TK!xCy^aT zh~-F~;i^{YE?f8fwg`htY$v`nWEyhz=}){~?vT;>#2nsl{km1T5{T~MUrV6=_~VZ| zaZH>aRVu4Sx1x6LNpHjgn1+ZtqqFkq!nT0^T6wd7(~~rhgETjeL|nB5l3h&KaHSPE zGNz(WKmPb5=eg<+{oA}lF9zU&zE`hbpUp>LFSOAkDvdIrs`yDGhOKkz0T)&jWorKJZnF!%P|~Uh^HJ# zg$mtot3Qn`d*qqu2z;%L`uQ1*t%xqzW30a@C zy6q^C_n$lFW=n7RUq*-jb@}#HXazrk)+|6bf?#)+2*mZ2@{NbzFJDM*nPPnUJ2kov zjr=PwMtx1z`-|a_h9yq%hkonMUELp-uR*m7OZH2nQfB~mp{OS1-peehYv?Q51SRB* z*w@R~{8w#R$CgMH`5{eueZKg_1Y(gRuW8~ff=2@-zz4Q_%9(BaguaNIh)66+NSDcWP32D3@03Tf zgWDYm5M9|ttk=h<7qBT2^<+zLEg(u^lk|Sme~83eWD~*lvfgH7EOr;EZbmQ4YGO~t zhwTl`)frwhVy@2Ni=FB37PM3#aMV(4VsB+VOL>2BxdYcVpO+bl2I$QPdP1a1B z#`6{>U||#t1bdDO?qGrBh1c=*aK_jkIIZ^&D5n4V z)6KojxPUOeZCVH*ce7x=jtd|?IP!tu+pHy0qWH7s8*GNmNHA6OvQEQ`GJ_iOx@(poMs~Ir@YwYX?%DXc& zG|x%L9VSgzPr=xaLQ>czpVT+|qEQ8%g%&81G*p(hV?>lPJG!7(Ny>8zEKP4O;$6Qw zIuO{iB7`F#*%i6j^2~>)tcx6y+M&TRZm6kwnvrF=Ew~Eosw90@5q;h%IanIhc6Q`G zHfvI67@TnVQ zk*TIQ`5o%{DEu7n{vmq~QQZDSgjLCjT)QYXaJ8`DI7IfVIM=q3VT|pn5Dp>yyUw{7a+lLPxZ9jQ7TR$OiX}t@K zmveLJpcP7mlhFQ;|4_p*gXOy0nTje3YhtsI)rS!C&E}iio4c|$asO+iUf^7vqp{2} zRri-?bmwzPA#)|;NJJs^AUptwXiB;REHy&utS&=9x)J*=XtN=?L^H;guYlp=jQ&pK z8kyr9-s4guQ>P)M(cd_GLg17!L@;u2JK{-QqtRFv*R~tkpmyyw?*4<u%Oi0LMB5p;2uqCYanpiN$8@zh^^uBE`b2@x@u=Ci&C0R&NieB z&Q}vL_J1vfvV0vSZoT;%`4W_p;?*p~!)`8?v4#3%YnWOG}4S#z1|bIu)kn)6mUrdy5LRcy^%*$OwNQ+KeMN8$5>l78m7rTI}`V!lPFJRxl?)??i}A~YVJ z#??&09SI5Zi*rI;;ntbF7LeOG1%d`+g!e-sbiPABD20Pdw2Eq zSh;Q-ZXRrUav5k)KtaD^`d?4hyC2qD!K~fovZvjv!Niswk+B21z#a5Mcj0g*x>p_0YGR&$v0=GXkDa+< zM9AnSJ}rB6z{VQwVG7TVh|MKqZpyTP`M(4>gc$JX1cCL)xgI|F@Mau-(JfVpGJ8*+ zCtrR=Rw4y6-U>9I+`?Bs7sL2y^A1Nf<@``(^(`-BAfD(u9(yV?7(|gWPmFg8RU`8D z#{78m_WxXAfd3^wSAbNQHGyC`8#mXRL;7na@L8yF9tEL+2kxXoL%Mhx<8mF4!?}S_ z)*sBC1nQ8JG#Z)G>b!Vzz*MxZD-5k*5mr_>Mfs`bOXT7T)E!QO34b>E#SLRlu5;;R zH@6O6?@=>h{_5NA-OYWH%y!YE;n;#3_vK|<`Xv&*yiI{Hjn(Xhs|Uhcy7j_d{RO*| z+b4>&Jkor(fh%P1JM_{RSQz)BiD@M_dG|u}1c%L+QqekB@Bm<_<$$p^f(MGDL@6~B zCx(I)eqh4zXw0$HieVJdSP=i@w>L(H(bwXU%=SmsK@R+OIOHZ_9tPhDmDAF=%dDuT zsO4j%e9(@~Tsmn^xn!ovXA^S7^^gqlLgxM+Q-}QH0+*n`lh-(<^G{WY_(N=CtV)+# z5=?z1m$>nZ-#_{4;qn=zS!s&NWPS1(L=BN>MNHS(kssgt>LC+Pu2@hrnC^%ODWeO*ZZmUt(Hlj!Vxy4deqhPb? z?T`L)@XyV~k(;VrP415nwus773Q|wEdgw$PDi3Er{IJQ9e0?$B^bCLdTZ8Me zUQicC6;Ur6nO;!}%w8Oyv<9a<38>m6jWDfn7%y`H`Q_RDCy%%9J$n4`+1;U#^Ua5U z-qg{$_2{d6zk4|P*gmWVKrw;lFj)DC(=c#0ygmgqRh(PlaP3u3Kr>TYPO_lIHe*Jm z-a*fIGQ`OFp3ske?nu9_Vt+MX>TgWQc{)58KEbuZoaU2H6o#*B?$`Q9)^xZfi_KdS zha?U;XMz$ty4ta@s)<0dti2N z^mUL+fsT03F~sFP7{S0#rT-=a%*~P@q+i3XvhFyRbrI1x{Q1=~;0pzGdWiy29iP!$ zP#v--pYlJW8xGuF@pbl9gWc6vWBVFYL2^1$*rnf=kz9}Q6*`PS_ZH*f#T=AGM{cRpGLJ`8QZg7wt|v4ohey(!+1 zIQCr|pGYCaf|6BBmHXS!)Wr#P2&fIk=A175wAb#Z`FQy0Q#Gn()2R#o z3NQq|q{oY!0VH1x@iTc~$HiAfhV(4VPUj@2fYzVl!KUd*$s%Pw9;2Atpu}EOM0@48B4I-bQP52iZ&es*i! z5b}<4RO8=&`*Wp`RPk&_G2By!g%u9FO@Ju14IB$iqK_K&RxOc2zd5yLg!M{e0FH(8sXP?F~rcB<%ECHJMcL!?>wI9kP#d|ffrOysYc_>okLt{&CV z+})5Zop)oR2yyjwTQhfVXk0zZXOf6iVGRR>3=w#}sG99)j|e1DU|oRTY0sX!Er4_# z5ah8bT?EpbsYnsI#X5inh?e4adx%gj;B=YFYQS?|_+aum*SrigCGSXkZ+MfL4Cdio z?d13bM0JGQYNZLRxBPgtGHFv<>N3ne$WFply*}o;fzZ`5hXU1<`f>ep66M3~o@*W7 zO>zmTQ^{8iow{tJR%8UGURKE-<}MA7QH^8OCBVw97>QzK2|Ag=JN6D{w_hy`-3`0r zAPW2dak;6Q>i1`ds+w*Vk9OFE`!CLbsTk)S2Y&wD;n|cBeQ{j6K29@`yq$UZkr?qU z?o1S85sFik2_LME$6N8h)VF-_de#D#>Fw~LbgE#^GP*n6TM>pPy7Y_F+rg)vs}CR- zidY(1c8wc-1NoYCs?*6w4cgo`5Yo!msmn3JuU=yjyV25MJCFyL2arnWPfOL+Qz8JC z*W>J_ceYz?;sjOmKaRL<%H`}PY{+z!y^fXmg=LG(ue!ymOjxt>ZaVqe{v-X+frFY( zlkeM4rL9Z@&1E?KWP;6v1?oS_R`Y-U8Y_*_WwSacp<6*@8*rjkwyS(f5SXklNd=sr zau%mBZX!1JbuTo<=-`5)lH6DJD9YgJD=n)y-NP->tWKl76Io43-g;dI*OeLQ^QOnz z9;PFG;pNgM{$NMVYI|Tg?ha1USR+@OT=MExqVy+T{BK}Ix+6Ogq6;>X=MX>5{wZF5 zxyDO%AAL1m=-%Bz}dr9VpuXg@= za6}7cyK1~zU1#NBMIczs8^;>=$v=PCiZq&|8=}lpf7z=40yQ$;%=~>|W+?<5L6s#R z>9w@};PLZsA3XUAD|RX5pK3ka9Fe|+{GQu~sjZZXpW+q< zF&oejvpBcEnRXYa5Tp`0>gZ^@q>2b@Lxmum01BN9R>tV%wIoD=zBi- z+oYJup6B*v%97=BF^%ukyc}+uf+m{!fm%tNP{Sc=%X6LiQ@vxp3>$VhRgpt=wp6g1 zz_5$?yWP|8IYU9D-QPLL%w=X;Bnnj(jZMg4;kmfYiQ@(sq7Hb>J(}ruC9Z37gAlLV zEEYq}%f~m<)Idaeez_7>C%@GopZPy`J~QU@%i}D|9Jm_;RX6Gj8ox*<$O*%QGJtX= zD9Qjt?frJ|m?GZ^l{PZK31+%Y4NQ0MmGY2^*B@#uujyVp%hl=j@&`xr)-)WSqSazK zi8z#5-tl!@t2`pwOGc+14fO$t!*Pf_wqIvjvCa=Be>8WIl>#rgR^Qy(PVCEMA=`&6d9-}L}G8dm81#xfFpQb2q zfe2s!qD51j-2PQ@I*Y07S7Z&4!fFDe@#hV@H z?cb_7FwKM|6g(Aa#OA2JyA0;^POSk4NUjFq2#k?1`7Vt!AOebS|EU zM_x2z5y)in1`S>&ei6)ofOC*S#AC>A_dQ>a!<;ri`#(aP^g*G-20~3L?AXrg5*RgjUB+#|HGw;h9_bj$;l^mGyFhQ09k zo&P)gx6!F2duIaU?B6EM2CyA>pAvI;a6ZJo;NTcEh+l*X^*Tt4+T*5%(aB@edAGr6 zkHk4jlZL{D%UIOL#mTD!(t%#Ri)r*E$veDhovsic_)v<(>W7U{HV@SD*~BU@<_lBR zvFL;;DeEm3I3Zpk>Lw@sF=Jf$w`!WLuPR{Iqn6uOuygEx&^gtkJwRdA(h3m7Inw%8WZacTJ5ZIqX7d@lqm zgJwoz)0?$3+zhNjon<4!f!L8PZMbCafx9^jL2nKZ-u5j)jTOz(auCydOkPYu1BGMR zynPE)iFb=*{l^^z)23%n6tSN2KCyzZl(aAyvvC&m6G?oE5R=p@!d!nwHt4%Hll!ek zbvh-j6!P_%-x7gul_>%N+fZH66oO+g@WNz}qxi^goSPxRREj&Z0OSkA*j6}RKKsnC zj=*pQ8px;)*O`jqNd$(!QZrb}D6szeLvGof7kFwgPndiU=kp6rjn310e7IgTi-sP% z)b9=>9hYoYG4dkVO$cU8uf4oy^P_$KjWHASG{#Jhm}$p^t3<%pgq11eTYFFrst+AC zoiNj|?kXf~A^}=}NW*9m_`)!U;i}sEPBt*9$jmo+ci=KG+c0^wz6}$odK9}+cxyMT zbCYYIVJgP+0mufe`K2-vH`3bJOP`EJPxj?X5OVe=M2+E=_ywgD@9iC&A$4tv;@n|n zf@g>dJ8U8?NVQLj=T|S57cjjD8aRCI-dHkTvJUN^V!zb@JhG0oq>B?;FU(Doo`(=S zH;s6!mIoTP+T&%%CdguwkZS5(sN&$=tu52Zd=ptx$(tX2eCMA(-psvpV^4OoY4 zzk)*A3Cm~V78#`c`jp|kyurK7F1+L#*ii4Yej;boc?n3K`IIA+77gI=a-ToE|Mjy+ z+kc|>+4iIR5ARa0;f(xF-RkMk?>_CuQ-+CW=Lhk)$dt>i-*2`w~60XhM7dZrIHEjqJrfK z32aV%gPuq*h4(?kg@kfjz(oVORrCE*9bM7O{qz)g@LEH%!fDiPlW!*vhD|Z^2_CsK z)2(!+P_95ctE^&@POH|WUGIj<>5v6 z!&J7WL`Y-?yMK#_;*`N?IEw*^vtPb~|A`%RA3NXOKV6(jQ$FB=DAO}@zd23WhI-qj zu3R{&ZJ@d~itb!wi(J9xdiOA_u6``Wak4yX>A{yOS$9CrQ10wY`P9?^U>#7W)AJ_ZiDQKZNBRd@r#9=J(R1BnX%gR?S5WC#S`{lRhYCSia>eB|LWxE%)h(&i!#~ za&}0uMR@(|y_*AKqj<7IDC$Mn5tX*B|F&W+R(esN+CRDC>%?3ZrT|kFgYh&ETp&a! zaN6KOkY@a<*0k|WV1~mSv2!v%RE`?P3F43#8i;0ujab zFQ5S6t4(T4PhY5&Hy~nT4tSxYxdabHkSxvOwTGFb`{n4U7HrU!&FY58&@m{82u5re zYuG4r#^td?>o2|^@S5)CPlStaWnbd{Mk=3!HAr0IR%Vr*EQH)3rh%S#uFhoK!aAKy z7A7(8D;AX4SBY;<*%z%FOjopp@jUFAOfNeMtyQ&ZKPnM;2Cs<8moG zWXmxl^n!mR0DJTaDU_?&4s--^K022 zivy_>g1!~>DNBPzu&BdEzoVrnQRg5c?~XA{2UZFx5~_3xmmC0l>GiH_T~grkYu;}d zmXe}LE3Ez$$@ms-)xy<7(CpaqC3k%e^<(b&Wzgsd{Jrn^M%T9PhtGY$P^Os&XcB4_# zrbkp#JDSio?%*v9yJ%$9(It+@j);&3uI3mHl}CYE>O2~z149UJlr;|TaEgMqtanNf zBmRDS`zdnm>sR1YEFFO!4ww0g+-B&s@Mj_t=qV(zSE`cPFRasv1xih{@Q~nTAMQ~o z{k8f&SUAgT#j{7Go+j232S6LVj$fIuxvkt*mFvmZnEplsLF<(*0NO^{)QgI-1yGGT z=<9NON)N#3~u3lh5u4V7ZQ-u%~1-*!AMNPuwi8rl&1+%Po zghAbgUW(Za8^(z?uYmt%$0`Px>+DxH`HT-Yd0q?*z!_so+5waM2X`gG{c7?@YNjZ1 z>2B9_v6LW&2UoL>)?`u6msxJQ__T+S&Ez1 zC*wT0RM=C2l>q~&$JiShGzn;-r5t8qHV!-Q5mc=x7t=#ip4bP)rojdtGl=-c%A$|J z=FfxjWjwCLVkeSSS}SVEi895DC&WPwu9MHeJ(Sab6#lUyU7Flhkk_yv!Pvxt?YY4q zhJnkKk*v12_Wrw_tH3&KfBFux{_i7TJy(2DeT$V|s1Jw|u4y5~UpAaYp-TmN+u32= zf#U<)sbBI$i8p4VFLo}D(5+nM68wkNNn3`=0sS0m-8%-}^Ev#t+p}DlQLLO53S41qIRMzaw5}fi6-InP$GAbFWx1>|N zMkh=GvuB;s`}p6W;UvN2l^C9syCqw!pq0r{Wq$X*V`jR-$Vc1MN($ShZYzk9;YGkY z1d+`SpV~)Qv)F96COB5Sxz;J0Z3qLXU{Of1PAWSrHZ7bGFSsF3UW4DGQ{2PKXW{6? zk!dlE=4tC|TMW5uXNrna@QSf0@T|RK2XWo%tUksze;0yfE>bq69^2(sMuFolr=Lf# z6h0ucri8%d%QDd{*;HxcY2ERedi2%PC(pL;J>I@M`NDw3Hh@c!Zx`NfZ=IN=%}Vk- zX{*doG!rkwN4T=&OMpRHr`@F~ydX(5%XS=n!o*wXV(QAHvUhdgr9lHf0ldx|=Ae&# zSc^NRF;%6L5caqZ?Y5eaoiN0GE4tWLP54UDxDyrd72A99K)~Z_-3wNh$5J?mnn>jy4hVF??Yjyrk8Xu79{AF zuYsGCMMLrcwVgG|MaRn>pN@<`uqFY)7KT-ekWQrJ%b#CZSJr|h!P2x+^*Q$N+?ZO+w=h%mndkqy0HdbRMvKxWjhAe6(A@zXNx>T-h-D0o~jptqK~YWOKdZs7~(c?=ynz+_-i4m#=9Pu=h>K52H-SNGt1rQ=h|noHWa= z+GAf9LP-@L>>->G<|NCf(iU63BuPa_x;q((&tmd(z6j1ePL{|Z>U|aW3k%u%PIm@W zMTh|Rif%Hvf=mgB=?s1m#SJ7Wbl;!T%M=q$*{%417h-PPnyBrPpB7>7NSFSO7+i=; zppx-{sRq;*_)@aPggovRO<$t<$(X;E1Hz|Eowf0}_5J}zak=K$Kh{&L`vjb=;;Z*#ks7mzRPt`UDFuT<$q2g>42)ksB_#WN0X=iD-AZ>-Ki(fiq+KDN@eIiWGg zz{e?F``e70?k`y`BFsS10LH!QQs3YH^4o|1w*Bzg<9lCz`{2GujRz1C@sVEC{(j` z>L_71)hyG;lj((f41Hcp?wV$aE#Y~MENP61$PU5RQOS$;!c;JjO3RaV2A$lv&}Ggc z)krt3>8?*7^ShFRA%9(;#0Q1r1BVE`Zr|NZ({Z>09+F64Rr_MD)lAfv^D#V?@f1-n z%Qp>iC6eLW7VS(FKK5Yw4$T*}gZx9j z9HL#tj@Zr@^yZYnCd)PCh1Wpj8^<^khMyJQbL0NgueZM9);nk67jrH|(~>An$x?-H zJV0saU>tt8WGQR+M5(*?G1h?fMV!}BfK$g*}IsK~xJW+ddf<~y7!98ros;f%a zaWc4LeOQ)QajBY_mvP8(o0B-Lb2vX1^#dH*IO~Mp@NyziNj7vaLoH(3e?|LmApgYj z3Y8b9`9$S1WL%m3K#s%T1S1DPYJAa#_rl3ou4WR3^9yYXOjJRIJ>(b9sG>FtlMO7n zdIl#Oi9}J{c|$s>o34mhl@z_vI000{A3aceQC0JH$e;bJEQvL8YW?PsLYLxFD{+WL z&oY`dni}hbVZR9$@C{K+`~~pPwAfs$O!y(et;odpj0M{{5kb6-$YaHmhlr6;O_QTg zKWl@HQtg21x>Uo!(AR_rIfUx)uS`@d9THH) zF+63jU*JKdQctsqEqPa@``8}0$5sRQ*tO@^-B0SVyT&1|gI#0x=ByJ8m#LQ3lUF1F zVc{wk!t8ah2N&W*MsUemm9N>pQv{qHoEIRUlh5hbm3GqkoWzOn{$TyUTAMv3T!}!V zr|WERG_{XOn@&IFsK?QD3koYETt1v?-FB7T9b@J}l-{w_3I%L4N~Y1Gpq3j75YNA3 zYkS3*v^ItlkM5*aJF2MZbEs%bT`C*|bIGBnS(#9d`yn>)#AtMdWQw}_E@!aV1AMF_3nLiFL5E)_+?ne4&C zFCT6{)M`@V6`tGHwSW04hQ;31S0-MrR8y+rchbx>LqC~n~B_kkeb(HH1*&jhQ*Wr z(>JoRzd(9i#xo+EK}~b!r3U!7-p0-Oo0m6rTet&Fqn*|uyy8|M2=5F1)e+W8kk2`&k<%>V?NsRfT1@qE(s z_puE$TK%xHF;xnd0QC@ePusq1l|sAn4NJjas_wZSZ@?b7G02M3mGs9$>My=Uoav-9 zudmuiQmTR3kZ)wCVmK~fm11g&UXko0Te0l13Y3o^m|QgjM?`o;m4;*{DdjD}&H8T- zJHSy)e(Llya+p z7yMhf<7bur3R$u>m^B5aHEP**Jt_CSJP$B3=M%m|vpltkzg~xsH5ZojHa6BoM=b+M zilH?(sy4G=8B7vzeI_(?zbUJ&%lzw>H3}NYQa3kK5i-;JXnM}k4GlJ7p*G}+qDf9p z_mF91&qL7(I}l@l3yZK=L8(%p-L8U4I9^>u4l&?604utrJZjDdz@9`!w4lVK&Y(dn zD{N z8PL58yp`j_ufh)gFeWTkH*Nv_SmyG1PFtDTfy8{{h+%`)?RI3WR4@Fw6amz#GD;cU z!lO<=`%+s!=aMum04u|BkX3O`a2BEnd{q+EpZ#>KJmdqp`@dQ0{|78}yP<>sW?TQw zwzyLM9&8Jf;yJPf;VDOVCpCqz8qpju1>2Fh?f;MI#OlJ(swHwQ+$fS=s;JX3I2-sZ zV|UKK&ZdQ~5i`Z5fl0|BNn(i4(Jw9}=^P+>!!x=2xX=1=L1vTpx@L|?VKt~w$t+V{ zoEkJfU+~7l^gA6CP|(dfqmaI<>5xP+qENdIfqoB%d)_}*UeX23AS6;;IMPBo1ySuX z!d99dOPXL1~`DplxsGpb6k%};Uu>@lXUzkrqO{pf%l42v-F78PEYe|C5!|y3L zjL2CpB}tMI5hHVUm=)|?M3kf_#aXBOrP%5QJSz zU|venQ#s`73+G1k&wi^|tj{)o>zN-DLGjr)?H_-3(sFh&Q#}RgDr}P(r1%jkh>_Y)|@sB4cR7nEW@inUN%`8B7Zkk+Iu1M(qE&NUI@pL`b6)j9O-{czjB<;;IN zM#)MuP-*u-H&cSDL-r&}54&%gE?n8VBr?LiCBHKA)&nmrVfL8{GUrK z_OBK2yM>?qXuhRcITzDh7AEe!)A`m1_#$A2QzLAu0OAe_{aLY(U3~weXZ0x8m^BS0 zKYHFA=Zcv#Bu7YAQ#Ntj`Q3>c$m!1xGr``%Wm%$;#R$?+VAGbny3Hn^Y~JyCwZGQ< zAPQMCMI1No=-?s|lF0A%((4Y?1gd%z)(6Qrx@tYUXfQAO1A0`7@amrtf(Gv@Nks{V zlf*Fp*BD_fw;lw^ittGKC+K#|D>MFGPmRJlNY95C_DV$fHe){VmbTp9@y=hEA&*hm zSz**s;y?y**7nq4Rjij#2{Cdl1jBLDsZ>IL#)^b(eW7Jtux%YC)jG-sx^nWiOW9^% zBDe@D!!?B;FfNTf#k|_1ou{OHBi660OYrF66w&p9R&D2}e3Xoh|b*O zgWc63Fwar%F@k)eOfG9F1fmZeq3Au6<+gM-4&mM(KYsYf-iMOD{89TpFviPBk$1b6 z$pKq%c!FQJ%6*M$eicnrg~{SIr3{Mso^R^7o$RKOvC=VYkudQryj-lj2r%QJRfcAj z0C3Ke1ooXh+Zvm^fHJ<%Q+F+N5~EM3P29Q&$^|T=CMeG9;dEEhH>=)#C#gle`WN*| zQJ&=XpPoLPxcwnUVs*LD4frg=^qWO$Ifc{nG{OMOU0hTQAXapLrk3y;2Zu2hu4SFH z_Y?;vpGP#Rvzeq0FsFgRcf?!{Ba-dXYHuS}IP3XZ0s#X9er`IlQ}vdxpwbH>5nzt? z+@axT48riGJ2uzRbDceH&HwWshX0h|aYSqxOnM`Kt^ z4@MNCe7xEMl;`vR8i1@k5et+0Z6}cp@~_6#Yg>(=vYMI|SdCT!O`@FdOp!s{Vv%A2 z`wy?F2_wui1$hW-F^B~skIKoUq<3vo)dD4)xO!hY;6NN(br3A-by2)f zY|O)RVrdk=r+7GBE08TezPun#1GdGHHB%I;g5F@WX}EfID!oRfx2m??4(j;8yt+a) zXox{rQ9{Fi9cdboVjTBf>3Q1L9FY?$Vib8ko&NI8Oi%6C@n2Y^^he=apC2HS>RMLf zR{a*PFJ9xhL$e?X zggiDBz=~;YTKZR3be%|okGMb40sChwNq146V4$()CX9r70kMcjWsq7-xwKkdT!DF! zg{jGY+KrUM)A>YYI<(%SgU>d&i5dUmL{7eK-O0U#5;0JLVV7Csj zVXDN6S&USvoXL)2sgo!}HUnZ~z{sqOBdUiHh(5YVFn*S(hJE&Ufi~Wdo$9+E_{Swst>2f=Sj-;kve;=~^T|!hE+rl)+k= zW6-oc&1U1;N!{Su1V_VYdt+c|Rq-?!iGrq!ITtdJO(&Kbu8SDy>A=R)&>2Sr%mM2g|N$Ll>5G({88)OwzMmE%A zMKP35#OK9OB-_Lwv&RxJxab~BE)huOiQ!NZxu%befkuMwhkPqFn+ULmOJ@YVd=zt% zW1fC}3jsHR_(fj|mFiPh1UZSqWNIj(%MlkCzX4U}hZ390yej1vEXq=72gfQN)AAdp zjH-fsqE-kF$;FsU;Es8<-6GY{7Zp-`cli89OQ8NbnOfxFSB17J6vmd63H6AI4BRoy z9H=!QLF*-A0_>U+RGG!oKE0OHVDii0#$QJY)@U%8IQRg=I*vBok9`1B))CRg>ezT$ ze%QVmj0h8KcFUI=Y3927Bng`rx~6bzAXozw%t+-LCPMx1&H0Ag9}gJ!-k)d092oHF+-|q{EEN~Ys~ZQ2ag_S2%6{!zI3v$Qa^kzFy8_C_wPYh zd@HVA$g1d~DHy?19CCSMPF~W&)qX7k?oU*-;MRYUs~c_Jt8g(JV0!(N#cSecQe^gC zdHE}tX_^HKm2L59D4{(Cg^PtOa}OH%Luauol0)%5c&uGjNw5u28NLj@AF3V+YtDrA zovJre7THm(d;@`M=D^J}!np0c75JFR8RX7Z&!AOR`{fr3SA2@ETAcHA;qV5_gDU-~26u{A`$Y^H_6Ku9ZH(x;A2|tcpZWSx2=}Z^pgQ zihT_#(T{*pj#^z{tYyqzqLegZ{2b-p79AHCjCyi3D&438-Tdnr|<-q^y{dUQzbg0!S>A}!F1hL#}&xoRSB zRHp*6gR*yo7{y0v{7N(yOD9_*i#t96I4o!m{U?nqNlx9EC6UaV#3n?3JAjs4#tKK- zp{g{Apo&nCFva!uchbuSyhjFI>D+y`(`8bLi2RPOI;G6>u6QQ9g*O22BZw4fMWYXx zht4j6+F5o)B3}`p2v}hCik9Hq^Zm@9-_0+*Rol?B=-?koqW{IzsU^p`gc7P(OGN7+ zEFOSMB7G-C3W0@50X!iizVH1NKwdNu`rxNS)ZTED7R;bm>9N#g2e9m@y-nYQT+5C=*}XWvxU)fDb4eC zqSZI0B+DQT>}H|#xVf~^DMI8V?*YE4Wu-kS3z0M6?A1BJcn#iGUyefnB~P5ZF^75& znE7;p$~XCO{O~!muGGji4e!UH@q=j=&m9HBZ{!Oj-!Nap;2LGR9(8^T$sCtK7>i1q zN-Kjuul?1NwKZdKuJtv^tH;C<&+}&BDmIldi#5>OYLEn zQF$!mw)7psraLk_Swum3NBW@ZuL!fgL{y=z{OBu1H-NMNPx_oYufd+tB@)RHXF^-c zz=dL^PX;{s3nfd++NDAEA6_>2;MLpSnJ#E56@?pp*};a6PyxuLMHl+Mouc0NL#WLl zqN$^p6A=v9u_T$iLW?P8U;{V&1iC>dEAjp{UPrnqLQ*&}@ti-`1)=8FFL z&n$_`z?NCVVLC-QCGyI7epz6QV8U0YDtD{;RapY3UFuO~xLxn`x zMGPcjEQ*ve52B_KdGxN20rtw^En76``n|flq<<)Ty%e)|ax#fS5<45%5yPJ=R2kcl z+Ip4kcc++dB+G^6Q7i1tC5Zx>w^1VLM46_#BzRd7%#cF4n4zCgMPWrO*P# zHYy~%-k~~$YY>47)z|*@$6VFX_#_QqX-u**fb98tdcN~^y8BKgAe=yp`WKE<5N$Gl zr&GJQbko?F$!hfwA8qE>WCZ-bxt#(&oDjn0kPjkow+8ZRWU~cz#O1M=ACXolxl;R6 zu|342+RyCcJDeJNBPyXSXb+i|7_Rnr@IRUSOkmzPJH1&$mE;Ez;o7-&KDlwe$FuIL zn%szg>qi%u&<5VvpLYZJhx0i>mBsm9{)>xAMY!QZorfjZxRY9R^zFFetdvj6f4|0$ z$9aq(y-rguN1}rdqg(fft+I}Y$bl!%+A&y`i@chXZh#3lfJ`UW2oRV<#6DqY{BM(x zgGa=6)pEhn5drDoa7X5P;kPL4)x%%PNxKF()@FPxWufK2uu|blLy~Ico|r@iD}S7r zi6A)zm%zV*(9Q`TM0i&0!U5Y5&fLzC-4u>%mkq#V!ZJ4dQ3L;(3vL_FHPbj_;evs*r9*}Ohn z_voaLS5|!X< zHdfS66FUfLW8;{L6EucW#=%D7GB{k6gKdV9djM2amZxeb5+0RmsR3o=%ux%kv&=^+ zrxqwBC9`c1Kz9;PXd&y%;<2x#stmaD z#6JysYgt_@H@J$5(kdQ3p zXuy|x7_~L|nUh$0E9flmAMQ}^&BdPc(vXTt9^J9B1V67Kbx5DEeTP1tzf-7;dP5jZ z!S_4gNAYGUnkqCJq@#Mbgw1P_`9@Hd6taPbjLPAy)v{fu)y}H+`c60QYW|RFA>Yu(eGVM zQUOD$NKEi4-i*Eu32CnRaaU?hY%x6Tu11$9Q&XoFo)emzK|jtTi219 z1hqsom@=1K-`FEEc%cTosEcxyXF?pD$(65I<87iXFV0y*lC_6Rk_I(-911~4#kd-- zUIwi~*2d=AXk3(o@KoWCi7KFlJlcxW<1S`xlD!Bdi{g;}i=*ZsJ{^{G8kB|e z3aUY_br%u0VHW=5TWdM4<-0lgqo#5nOoBS$*GStg>RjCF3eTy~Qtd=!xM10!u0$Dz84FOTOWseblS3 zxmL{~9OoP=#U0{~Vt_exx}&Z`QsCl!MO*r{8cdEo*)j_051`U~%B_Z#9_OrwpWX3F zez7LB#x_c(8GnE6UK}v&quxq3J=sn4!;9v25C%ZnMXvJ}F+a9#Ww@0}C#wpoq$oi5 zx0}nyE_dgQx^Bev64>PqvN4ULCs`!1)1~E}hKgfT!tBL}Sep%c@HvX1xTRrsJs@J0 zydq3kvRO505xoFgQDd!*u)+*^ir;?d>(si2$ zrpk?;2G9cnsqe{T>q?LgO_Tt9P9|>ssGA{Zv|9~8ot&Oby%mMs_?!S+9Y*sm4)LtC z4E2G~J9z*^NC*U_vG2~=;lJWpmVDxnOo7{lA2Lr3578LhE4+{c^g=-5zAeW^$IF~T zmyl?{4GQ!nNfkm`W5le4g#`lMLt9{C5#5K!rncx66XzH-7ke(2BZ_rXcY#>6SKeY_)HK z0v>2G4aug5I$nv}LHw&&lFDFZ)@L%N`d}p(`T^5?80TafN~b(9HCE*3}&k5&N3IQ6!0EjY_8oe9=mSCb2qw2V_Ho2bf~9vPr3l6?zg=Z z$uM>wjQ=_zZo?a`4Z;jD-u1otHoQ;r3K<^=WDwtxag5TEk}FaIjyG9}i~p(}0Ys;8 zO76Kb+Mn8pEa#Z+&X8#t9m7uk?;?gYZ1?UZO~AXC^vR_Sz`Iu^j1ziP z`X621(zy9-r3S^_`JOj5V+L^h|=wlLH3SDT}n2ed}%cok;K zJwJInc*I$h;HaxWx%%x6nsMKKom)c4rH<|HO?WyEl-`LC9xU-FWPl`*&VrGinoz9p zpb-FfKt1wPhd$KW{;3dPMOu4+bE-)x`-wX?mf`!GQ7F~4u1EGIm~CDAgUuyh$f`VE zLFL*XESZAylE-zGB`kYegOjFrxhVD^YqwRur84_;&#^?dP`!f$!;$Kxajja6ba4)} zR??p&ACNW~Q4okM`3fIKNwu;cjg@TpjH=yPA3e3!cFX$1T=FW5&3Emjf1eILb%f%= zZBY^2v%ej__qvJO&k7F~3Ih}67bjD0m0U_UL|Ww?U7AvKRvOSI65J4VV8LA~W8~)6 zXCj|V5WDJo;K9un1f;YqB21RGFj;#bE1R0)y&1L}(rwYPoYrVMUvrh~t|)^E`vlgpG7NCM&4;_U#u9hu=YFF*DtFj^mM3ObF@?x(uyVX*VUnXt(D0AC14A(6l+(epQyh3{gpfMW% z$wfrW7;u}(oa_w5_6Q-`^8cH22>nOIM{(LzxQdP7i9C?FGY<)}kcBc<8Zk?y+(8*+ zxOh#J`BwjUxT*|9DlA7eT^c*I?p1A`y4BG$s}PDJBf=SZzR9pqUA!v@k2*aZ1ldMV zgS`ON({gUPOsz@|QU%X~l}-F!d-p++>J7aES$4a3m*HS2)>8LIH+m z#A@~+WbCtoSjwC0CbZG1JRT_&erEm#QedS=PScl10{NdbRL^*NU&9x^# zx~OUyLWqj+TsEozMBpW+d?vFk@I`5n@;RTHP197PQy6TF4N`PG31~!XUo*C z`+g<=0;E_TThUHjJaI1U#bmez9dL|USOk|M&%Bx1lq>W!GLX2C_08tJ>IdodVls~@ zXAW_%L%~Y#x1jewNTM5zZ#G*fS3=XeNpvgUUSB%@`wb`Xu2o=lEh>5@F4>!<`U!8^ z9oNWfOKkVg7OF_{?RZVdwP)3AtbZ-F7e5=fG8B#)2FkU3>-_Y4Ty|me{zG*NnF>P8o)<@KaN*ULSJIRuKgjov2_V@wyA+OBPbf_G3f|S> zs}s>96kfOX-u1@fOS6Ho(o}2D-=P4!J2nF&KGX(g_a9i>#xd<9Ft$!MI6LJg>Qs^{TUs?b zI(R|wm0Cv2y(!s1)c`_XmJ?Ne_-cksclTa*A*5KyrJK+46m?+!qd*8W9PfO8czki3 z1^MJOK-=AelGL6AmfLn(Wh10LfrB)zr~4c!UCSXp{L-O1G3$zfWc07sAKQ!uRy5F5 z%RJxJugSr#J*pcN9%_Hc+C&_OCu+J_4nlgBb%{5ilL?9bPet=%}D@!1iiMImW(|oiwDvap&x$$lF$c7NnC_P-435;-bP(mb#6Hq15yI~>t6{8L zO=!vRV^ZJ`apd|eyxAdnNUo!hDs;n^Vu5S<`dH<#@~lUb2(0t1r1nk{f|H`hwFA$~ z*J3N3$u2=CL$)u)HO>ZGJ6V8Gax(+?q*j%pGVpxg?K~=@V*x!sN zG4dzLja&Pgk$ks|$j?Ll*_C4O^tu-fr=7^ucWeGJaPXt;?GC=wofq4-LAexyF{ZCi zxmP4~M!=`QoyDd^c%DFa(}7;#PKaL|TUrJkQaMc*rCK{w3!F{$s#|~oj!OroecAlk zFIp=lAeR6~%!6VUmwprnS=X7O&wN=%7{Qz%LR;60HSUbOXk(!g~ay>Uc}>Au&VDEWRC?Ig1|^w@>B{XP^>z_O@tw_3mel7 z*^FRB&aHyU08|w73Z96)s)-GGbx2&u`Q9s6+cyWSaU*L@?tyKPs2#)NcK+gG9+kXV zgq#4X!p?sbfahamYq{5iCCB;0=!4vi#ZFg^Ttr}#C&ZcbwIEZ*&9#?Dr-YOdgKFWX z%5P3i)WX1nu8bV`{$N~?sEb*}_PA2DJ7u5(%H~>{^?Yw~vf{NyohZ6%#7Lg6iIYVZ zYhfuj=5Lg^+QgG-^K?pq?~\n" +"Language-Team: \n" +"Language: en\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.8.12\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:138 +msgid "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" +msgstr "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:182 +msgid "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" +msgstr "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:43 +msgid "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\"\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" +msgstr "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\"\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:44 +msgid "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:58 +msgid "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/bar.env" +msgstr "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/bar.env" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:43 +msgid "" +"\n" +"\t\t # Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:56 +msgid "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" +msgstr "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:45 +msgid "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" +msgstr "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:119 +msgid "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" +msgstr "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:48 +#, c-format +msgid "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +msgstr "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#: pkg/kubectl/cmd/convert/convert.go:51 +msgid "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" +msgstr "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:41 +msgid "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs." +"extensions\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" +msgstr "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs." +"extensions\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:43 +msgid "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" +msgstr "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:44 +msgid "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +msgstr "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:44 +#, c-format +msgid "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +msgstr "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:76 +msgid "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON then create the resource " +"using the edited data\n" +"\t\tkubectl create -f docker-registry.yaml --edit -o json" +msgstr "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON then create the resource " +"using the edited data\n" +"\t\tkubectl create -f docker-registry.yaml --edit -o json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:43 +msgid "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\" --preemption-policy=\"Never\"" +msgstr "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\" --preemption-policy=\"Never\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:46 +msgid "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.extensions\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" +msgstr "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.extensions\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:61 +msgid "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +msgstr "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:64 +msgid "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a tls secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-cert" +"\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\n\t\t\t--annotation ingress.annotation1=foo \\n\t\t\t--" +"annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\n\t\t\t--rule=\"foo." +"com/=svc:port\" \\n\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\n\t\t\t--rule=\"foo." +"com/path*=svc:8080\" \\n\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\n\t\t --rule=\"foo.com/" +"=svc:https,tls\" \\n\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\n\t\t --rule=\"foo." +"com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\n\t\t --default-" +"backend=defaultsvc:http \\n\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a tls secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-cert" +"\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\n\t\t\t--annotation ingress.annotation1=foo \\n\t\t\t--" +"annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\n\t\t\t--rule=\"foo." +"com/=svc:port\" \\n\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\n\t\t\t--rule=\"foo." +"com/path*=svc:8080\" \\n\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\n\t\t --rule=\"foo.com/" +"=svc:https,tls\" \\n\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\n\t\t --rule=\"foo." +"com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\n\t\t --default-" +"backend=defaultsvc:http \\n\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:74 +msgid "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" +msgstr "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:74 +msgid "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" +msgstr "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:51 +msgid "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name)\n" +"\t\tkubectl describe pods frontend" +msgstr "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name)\n" +"\t\tkubectl describe pods frontend" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:76 +msgid "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" +msgstr "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:138 +msgid "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" +msgstr "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:55 +msgid "" +"\n" +"\t\t# Edit the service named 'docker-registry'\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +msgstr "" +"\n" +"\t\t# Edit the service named 'docker-registry'\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:44 +msgid "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:48 +msgid "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:65 +msgid "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # Kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"" +msgstr "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # Kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:105 +msgid "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7" +msgstr "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:72 +msgid "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" +msgstr "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:87 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" +msgstr "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:58 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" +msgstr "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:83 +msgid "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +msgstr "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:44 +msgid "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" +msgstr "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:56 +msgid "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" +msgstr "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:53 +msgid "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" +msgstr "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:49 +msgid "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" +msgstr "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:75 +msgid "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:62 +msgid "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --env=" +"\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and \"env=prod" +"\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --labels=" +"\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " +msgstr "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --env=" +"\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and \"env=prod" +"\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --labels=" +"\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:73 +msgid "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" +msgstr "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:80 +msgid "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" +msgstr "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:95 +msgid "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" +msgstr "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:53 +msgid "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +msgstr "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:61 +msgid "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can set it to " +"false\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" +msgstr "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can set it to " +"false\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:110 +msgid "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." +msgstr "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:126 +msgid "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requestor with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" +msgstr "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requestor with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:28 +msgid "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." +msgstr "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:41 +msgid "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." +msgstr "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:41 +msgid "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." +msgstr "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:38 +msgid "" +"\n" +"\t\tCreate a cluster role." +msgstr "" +"\n" +"\t\tCreate a cluster role." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:46 +msgid "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:40 +msgid "" +"\n" +"\t\tCreate a cron job with the specified name." +msgstr "" +"\n" +"\t\tCreate a cron job with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:40 +msgid "" +"\n" +"\t\tCreate a job with the specified name." +msgstr "" +"\n" +"\t\tCreate a job with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreate a namespace with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:41 +msgid "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." +msgstr "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:41 +msgid "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." +msgstr "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:40 +msgid "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." +msgstr "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:71 +msgid "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:41 +msgid "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." +msgstr "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:40 +msgid "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." +msgstr "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCreate a role with single rule." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:61 +msgid "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreate a service account with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:67 +msgid "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." +msgstr "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:42 +msgid "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." +msgstr "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:57 +msgid "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" +msgstr "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:45 +msgid "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource." +msgstr "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:175 +msgid "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requestor.\n" +"\t\t" +msgstr "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requestor.\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:53 +msgid "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." +msgstr "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:39 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:54 +msgid "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." +msgstr "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:92 +msgid "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tUninitialized objects are not shown unless --include-uninitialized is " +"passed.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." +msgstr "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tUninitialized objects are not shown unless --include-uninitialized is " +"passed.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:57 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." +msgstr "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:67 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." +msgstr "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:37 +msgid "" +"\n" +"\t\tDisplay the current-context." +msgstr "" +"\n" +"\t\tDisplay the current-context." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:113 +msgid "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" +msgstr "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:31 +msgid "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:31 +msgid "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:49 +msgid "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." +msgstr "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:47 +msgid "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" +msgstr "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:46 +msgid "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" +msgstr "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:35 +msgid "" +"\n" +"\t\tList the fields for supported resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tAdd the --recursive flag to display all of the fields at once without " +"descriptions.\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." +msgstr "" +"\n" +"\t\tList the fields for supported resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tAdd the --recursive flag to display all of the fields at once without " +"descriptions.\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:30 +msgid "" +"\n" +"\t\tManage the rollout of a resource." +msgstr "" +"\n" +"\t\tManage the rollout of a resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMark node as schedulable." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMark node as unschedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:57 +msgid "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." +msgstr "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:46 +msgid "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." +msgstr "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:49 +msgid "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." +msgstr "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:37 +msgid "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" +msgstr "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:47 +msgid "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." +msgstr "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:48 +msgid "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" +msgstr "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:57 +msgid "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." +msgstr "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:58 +msgid "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." +msgstr "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:55 +msgid "" +"\n" +"\t\tRoll back to a previous rollout." +msgstr "" +"\n" +"\t\tRoll back to a previous rollout." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_cluster.go:47 +msgid "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_context.go:44 +msgid "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:40 +msgid "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." +msgstr "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_authinfo.go:70 +#, c-format +msgid "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." +msgstr "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:67 +#, c-format +msgid "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." +msgstr "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:39 +msgid "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." +msgstr "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:47 +msgid "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." +msgstr "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:41 +#, c-format +msgid "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." +msgstr "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:50 +msgid "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:71 +msgid "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:78 +msgid "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:83 +msgid "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." +msgstr "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:87 +#, c-format +msgid "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." +msgstr "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:70 +#, c-format +msgid "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." +msgstr "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:36 +msgid "" +"\n" +"\t\tView previous rollout revisions and configurations." +msgstr "" +"\n" +"\t\tView previous rollout revisions and configurations." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:47 +msgid "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." +msgstr "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:47 +msgid "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" +msgstr "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:74 +msgid "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from an env file\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/bar.env" +msgstr "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from an env file\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/bar.env" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:45 +msgid "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" +msgstr "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:351 +msgid "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" +msgstr "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:50 +msgid "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" +msgstr "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:42 +msgid "" +"\n" +"\tCreate a deployment with the specified name." +msgstr "" +"\n" +"\tCreate a deployment with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:61 +msgid "" +"\n" +"\tCreate an ingress with the specified name." +msgstr "" +"\n" +"\tCreate an ingress with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:44 +msgid "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." +msgstr "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:39 +msgid "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." +msgstr "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:43 +msgid "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" +msgstr "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:40 +msgid "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." +msgstr "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:68 +msgid "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), replicaset (rs)" +msgstr "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), replicaset (rs)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:63 +msgid "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." +msgstr "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:233 +msgid "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" +msgstr "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:274 +msgid "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:95 +msgid "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" +msgstr "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:230 +msgid "" +"\n" +" Create a ClusterIP service with the specified name." +msgstr "" +"\n" +" Create a ClusterIP service with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Create a LoadBalancer service with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:271 +msgid "" +"\n" +" Create a NodePort service with the specified name." +msgstr "" +"\n" +" Create a NodePort service with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:93 +msgid "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." +msgstr "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:40 +msgid "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." +msgstr "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:49 +msgid "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " +msgstr "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:48 +msgid "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" +msgstr "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:50 +msgid " is used and no merging takes place." +msgstr " is used and no merging takes place." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L61 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L60 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L63 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L106 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L111 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:178 +msgid "Allocate a TTY for the debugging container." +msgstr "Allocate a TTY for the debugging container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L119 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:173 +msgid "Annotations to apply to the pod." +msgstr "Annotations to apply to the pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:173 +msgid "Apply a configuration to a resource by file name or stdin" +msgstr "Apply a configuration to a resource by file name or stdin" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Approve a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:106 +msgid "" +"Attach to a process that is already running inside an existing container." +msgstr "" +"Attach to a process that is already running inside an existing container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/attach.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Attach to a running container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:107 +msgid "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" +msgstr "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L115 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole this ClusterRoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole this RoleBinding should reference" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:32 +msgid "Commands for features in alpha" +msgstr "Commands for features in alpha" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:170 +msgid "Container image to use for debug container." +msgstr "Container image to use for debug container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:166 +msgid "Container name to use for debug container." +msgstr "Container name to use for debug container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/convert.go#L67 +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Convert config files between different API versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:105 +msgid "Copy files and directories to and from containers" +msgstr "Copy files and directories to and from containers" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cp.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copy files and directories to and from containers." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:248 +msgid "Create a ClusterIP service" +msgstr "Create a ClusterIP service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:323 +msgid "Create a LoadBalancer service" +msgstr "Create a LoadBalancer service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:286 +msgid "Create a NodePort service" +msgstr "Create a NodePort service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L214 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Create a TLS secret" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:81 +msgid "Create a cluster role" +msgstr "Create a cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:87 +msgid "Create a cluster role binding for a particular cluster role" +msgstr "Create a cluster role binding for a particular cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:124 +msgid "Create a config map from a local file, directory or literal value" +msgstr "Create a config map from a local file, directory or literal value" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:167 +msgid "Create a copy of the target Pod with this name." +msgstr "Create a copy of the target Pod with this name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:90 +msgid "Create a cron job with the specified name" +msgstr "Create a cron job with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:100 +msgid "Create a deployment with the specified name" +msgstr "Create a deployment with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:91 +msgid "Create a job with the specified name" +msgstr "Create a job with the specified name" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Create a namespace with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:95 +msgid "Create a pod disruption budget with the specified name" +msgstr "Create a pod disruption budget with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:92 +msgid "Create a priority class with the specified name" +msgstr "Create a priority class with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:91 +msgid "Create a quota with the specified name" +msgstr "Create a quota with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:106 +msgid "Create a resource from a file or from stdin" +msgstr "Create a resource from a file or from stdin" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:89 +msgid "Create a role binding for a particular role or cluster role" +msgstr "Create a role binding for a particular role or cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:161 +msgid "Create a role with single rule" +msgstr "Create a role with single rule" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L143 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Create a secret for use with a Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:137 +msgid "Create a secret from a local file, directory, or literal value" +msgstr "Create a secret from a local file, directory, or literal value" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L34 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Create a secret using specified subcommand" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:50 +msgid "Create a secret using specified subcommand." +msgstr "Create a secret using specified subcommand." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Create a service account with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:48 +msgid "Create a service using a specified subcommand" +msgstr "Create a service using a specified subcommand" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:49 +msgid "Create a service using a specified subcommand." +msgstr "Create a service using a specified subcommand." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:363 +msgid "Create an ExternalName service" +msgstr "Create an ExternalName service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:145 +msgid "Create an ingress with the specified name" +msgstr "Create an ingress with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:60 +msgid "Create and run a particular image in a pod." +msgstr "Create and run a particular image in a pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:149 +msgid "Create debugging sessions for troubleshooting workloads and nodes" +msgstr "Create debugging sessions for troubleshooting workloads and nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:137 +msgid "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" +msgstr "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Delete the specified cluster from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:43 +msgid "Delete the specified cluster from the kubeconfig." +msgstr "Delete the specified cluster from the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Delete the specified context from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:43 +msgid "Delete the specified context from the kubeconfig." +msgstr "Delete the specified context from the kubeconfig." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:64 +msgid "Delete the specified user from the kubeconfig" +msgstr "Delete the specified user from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:65 +msgid "Delete the specified user from the kubeconfig." +msgstr "Delete the specified user from the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L121 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Deny a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Describe one or many contexts" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:142 +msgid "Diff the live version against a would-be applied version" +msgstr "Diff the live version against a would-be applied version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:65 +msgid "Display cluster information" +msgstr "Display cluster information" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Display clusters defined in the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:42 +msgid "Display clusters defined in the kubeconfig." +msgstr "Display clusters defined in the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "Display merged kubeconfig settings or a specified kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:50 +msgid "Display one or many contexts from the kubeconfig file." +msgstr "Display one or many contexts from the kubeconfig file." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/get.go#L107 +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Display one or many resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:50 +msgid "Display resource (CPU/memory) usage" +msgstr "Display resource (CPU/memory) usage" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:81 +msgid "Display resource (CPU/memory) usage of nodes" +msgstr "Display resource (CPU/memory) usage of nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:100 +msgid "Display resource (CPU/memory) usage of pods" +msgstr "Display resource (CPU/memory) usage of pods" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:51 +msgid "Display the current-context" +msgstr "Display the current-context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:60 +msgid "Display users defined in the kubeconfig" +msgstr "Display users defined in the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:61 +msgid "Display users defined in the kubeconfig." +msgstr "Display users defined in the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L176 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparation for maintenance" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:74 +msgid "Dump relevant information for debugging and diagnosis" +msgstr "Dump relevant information for debugging and diagnosis" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Edit a resource on the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:67 +msgid "Edit latest last-applied-configuration annotations of a resource/object" +msgstr "" +"Edit latest last-applied-configuration annotations of a resource/object" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L159 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email for Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:169 +msgid "Environment variables to set in the container." +msgstr "Environment variables to set in the container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/exec.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Execute a command in a container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:90 +msgid "Execute a command in a container." +msgstr "Execute a command in a container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:115 +msgid "Experimental: Wait for a specific condition on one or many resources" +msgstr "Experimental: Wait for a specific condition on one or many resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:378 +msgid "External name of service" +msgstr "External name of service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/portforward.go#L75 +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Forward one or more local ports to a pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:79 +msgid "Get documentation for a resource" +msgstr "Get documentation for a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/help.go#L36 +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Help about any command" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:151 +msgid "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." +msgstr "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/annotate.go#L135 +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L132 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:164 +msgid "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." +msgstr "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:198 +msgid "If true, run the container in privileged mode." +msgstr "If true, run the container in privileged mode." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:174 +msgid "If true, suppress informational messages." +msgstr "If true, suppress informational messages." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:165 +msgid "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." +msgstr "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:173 +msgid "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." +msgstr "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:90 +msgid "List all visible plugin executables on a user's PATH" +msgstr "List all visible plugin executables on a user's PATH" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:54 +msgid "Manage the rollout of a resource" +msgstr "Manage the rollout of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Mark node as schedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Mark node as unschedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_pause.go#L73 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Mark the provided resource as paused" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L35 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Modify certificate resources." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Modify kubeconfig files" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L110 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:43 +msgid "No alpha commands are available in this version of kubectl" +msgstr "No alpha commands are available in this version of kubectl" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/completion.go#L97 +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Output shell completion code for the specified shell (bash or zsh)" + +#: pkg/kubectl/cmd/convert/convert.go:105 +msgid "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." +msgstr "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L157 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Password for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L226 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Path to PEM encoded public key certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L227 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Path to private key associated with given certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L82 +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Print the client and server version information" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:74 +msgid "" +"Print the client and server version information for the current context." +msgstr "" +"Print the client and server version information for the current context." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Print the list of flags inherited by all commands" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L86 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Print the logs for a container in a pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:97 +msgid "Print the supported API resources on the server" +msgstr "Print the supported API resources on the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:98 +msgid "Print the supported API resources on the server." +msgstr "Print the supported API resources on the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:58 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" +msgstr "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:59 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." +msgstr "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:62 +msgid "Provides utilities for interacting with plugins" +msgstr "Provides utilities for interacting with plugins" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:45 +msgid "Rename a context from the kubeconfig file" +msgstr "Rename a context from the kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:115 +msgid "Replace a resource by file name or stdin" +msgstr "Replace a resource by file name or stdin" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:87 +msgid "Restart a resource" +msgstr "Restart a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_resume.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Resume a paused resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L56 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role this RoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L94 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Run a particular image on the cluster" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/proxy.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Run a proxy to the Kubernetes API server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L161 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Server location for Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_cluster.go:73 +msgid "Set a cluster entry in kubeconfig" +msgstr "Set a cluster entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_context.go:61 +msgid "Set a context entry in kubeconfig" +msgstr "Set a context entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:114 +msgid "Set a new size for a deployment, replica set, or replication controller" +msgstr "" +"Set a new size for a deployment, replica set, or replication controller" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_authinfo.go:152 +msgid "Set a user entry in kubeconfig" +msgstr "Set a user entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:74 +msgid "Set an individual value in a kubeconfig file" +msgstr "Set an individual value in a kubeconfig file" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Set specific features on objects" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/use_context.go:52 +msgid "Set the current-context in a kubeconfig file" +msgstr "Set the current-context in a kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:101 +msgid "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" +msgstr "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_selector.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Set the selector on a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/describe.go#L80 +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Show details of a specific resource or group of resources" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_status.go#L57 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Show the status of the rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Synonym for --target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:134 +msgid "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" +msgstr "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "The image for the container to run." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L116 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:172 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:112 +msgid "" +"The maximum number or percentage of unavailable pods this budget requires." +msgstr "" +"The maximum number or percentage of unavailable pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"The minimum number or percentage of available pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L113 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "The name for the newly created object." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L98 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L99 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "The network protocol for the service to be created. Default is 'TCP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:182 +msgid "The port that this container exposes." +msgstr "The port that this container exposes." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L131 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L130 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:190 +msgid "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." +msgstr "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L87 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "The type of secret to create" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:33 +msgid "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." +msgstr "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:150 +msgid "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." +msgstr "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_undo.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Undo a previous rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:59 +msgid "Unset an individual value in a kubeconfig file" +msgstr "Unset an individual value in a kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:154 +msgid "Update environment variables on a pod template" +msgstr "Update environment variables on a pod template" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:115 +msgid "Update fields of a resource" +msgstr "Update fields of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_resources.go#L101 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Update resource requests/limits on objects with pod templates" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Update the annotations on a resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:110 +msgid "Update the image of a pod template" +msgstr "Update the image of a pod template" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L109 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Update the labels on a resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:102 +msgid "Update the service account of a resource" +msgstr "Update the service account of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/taint.go#L88 +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Update the taints on one or more nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:99 +msgid "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" +msgstr "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L155 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Username for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_history.go#L51 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "View rollout history" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:77 +msgid "" +"View the latest last-applied-configuration annotations of a resource/object" +msgstr "" +"View the latest last-applied-configuration annotations of a resource/object" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:171 +msgid "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." +msgstr "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:168 +msgid "When used with '--copy-to', delete the original Pod." +msgstr "When used with '--copy-to', delete the original Pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:176 +msgid "" +"When used with '--copy-to', enable process namespace sharing in the copy." +msgstr "" +"When used with '--copy-to', enable process namespace sharing in the copy." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:175 +msgid "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." +msgstr "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:177 +msgid "" +"When using an ephemeral container, target processes in this container name." +msgstr "" +"When using an ephemeral container, target processes in this container name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L45 +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:108 +msgid "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." +msgstr "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:107 +msgid "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." +msgstr "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cmd.go#L217 +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controls the Kubernetes cluster manager" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:45 +msgid "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" +msgstr "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:109 +msgid "" +"preemption-policy is the policy for preempting pods with lower priority." +msgstr "" +"preemption-policy is the policy for preempting pods with lower priority." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:41 +msgid "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" +msgstr "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:106 +msgid "the value of this priority class." +msgstr "the value of this priority class." diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..9c0c708fcd319ccb43d0fe75c347c4da1da0f923 GIT binary patch literal 153024 zcmeFa37n)?S?~XLtHBJL0s#UdZ+e0~6S}&mXR-{PRA81)GGvyfrzeXcgzD<*>B>}h zb*idorV}FDMMW;SpjW(jQAG5jpk5V0uZRnZ7f=-Ry09o-P!9=lygfN$%zQ-)~QnSA2Dn zG%rt*_YEh>mvPfe2V1Rg6T$zSsRpN%KUzfk_Sb|=X*c>az(NiszFuer*< zKjfMuxsvZU?M;#n@cyYJIrue6^4aT>WbL9Pxn_Toe4cjRbW@W2F2BF-NRm7O*qTX_ zms0P`XOpB&y^lMdB(Dd)Y%WP|<@d>3lH^Og_tM*v4-%J>VR`|W~J@kX$KLC6u-@og7ljP~Z+Zg3bfv*BKDF3rSNS{3U zRg53-2#_I2P6Kz+{_BBsIeG7^ljL^b-XBa7NR@mK@C5J^K-K&7|CS^pz~=)`Q}2%e zujKosKb$1b1dpS@`+z_2TIkI0m%I+TJ|syV`D01)x0L(Lo08-YdH&}=1wHuw3;%<@ z0blX6N%AzF@A)~#n(xbRh91CA0iO)~-CszOE1=h>fnN{2^p|-T`abp7lH?J*_ZPqE z{PuSc;f<7k$Gei`Mc{GIyOZQ{zW@DuoZm0~og{&EldFO62c7_m-aCJn@#Xv1{C<-B z3h%uf_$9s{rqD-$H~azo2Yl)uCJ9w1tHABRYyUSi2i_In-vZVD7t*;p@L&FzI)Nwu zl)r(?A8>q6eTcE<`@Kx6R|0Dvf!^TpB|5EfnE9CF^NYYT&-eYM!{>jT`N;RTeUd&< z?%98nB)<)OC6FOXp8aXY9{45T^BC9J&oCGH{@%|f$>(V2>7Qdh@qN$dljIEF|2uG; z@994<|M-3kIL7y9eUb46ehctDJpVipk|)0oQ{D}H{y#bYe-g;lN%sG<%aPXtX)>vP z+1ok(uU`JQf$yN6J;_k=S@8NIjqRn}ry$O?z{@WhO0EGu7x+E&?^VFJ^8Kk74<+lo z_fa5IDtXRBhip854|oUPKM#B+@L3NVN-Dq~1ilUUWgsk({PM$x5=2k(qDKrRKLz|L zU<$nRkweMTfZqaq3h=GKTY(=2O8#B_bwkOMfG+?Z0lo&f2K;NF_Q24$G_#EInfO>!XV4d$LndEt0E#|!p!nq5fKLSeDp2@+5UBPZ@&s>h4^aJH1M0mu z0yRJ11r$Gj8mRJ5e4^KLBXE@ORp1Wb%YdTm&jYUlei*2F9`g-D$&J9>K#kMAK;ikl zz!dl!z{`Li1xh|#^rWHW@xYsbs&@gH0$&b%Ebym*nvcH;d@k^+ZyZWq4g4>_?+5OF zvh(q~fNEzfb-eBXYCgOKco=y7WzL5$1EzfcRiN7a2vGd>k3h-2OGdmOQ^3dZ{Wf3) zxC)#Cz7N<2K79L7vLEN z{x6_@|2v?@`?7BuN{#|&fyaQa1={aG(fzVfr`OHENAbM_RDZq=DE|Fnpyug2fuif* z05!gk-Qo0~3h>!Ljn}sT#h8H}{v0SeJax?R?Eq_he>L!9z@gGmG7bDIpy*#3_x^k* z@ZEfWGw{8@7fu9uwrj}rvmXSi{a*ym1D`bMx_$&W2mIPSKF)UnAH(;T0&fPs2Kap7CxMcCcVFr9@rQtti=P0h z+%;DXB`x5Kflc5SfRbm6S9^c&1!}xM2)rHmuxp%Ni$L}3r-7FNAGLQV`Su}n3ZUfh z+nzRLdgw!@hD;B=8Te;B|0Ga)=)11< zJ@n2)L#Bs*;F&|Fhn_k-WP0fA%#i7!GtU|_J@mV0{rfBD=r`~G!HFT$LodFKdHyx< z*Eh2sT*UmpYsmD_r!5Yd9y)P~`A7LLFAtd>dg&_ji~hV0_uTC6kt{x%*;5545{ zkm;e1yl=?#(02f(hrZ_pL#Btm@g+V_U-wdn9|lSfz4SYVOy7L%%fq?_JPuv1euY2( z4&YDF-aCQPLtEc9WP0cmzHi9%(DeJ^D}MjttB@!3=h^>t$n?-R{Lql;p{HLrWO~u< zKQ?4~=v8lkPbt5PN(KM`DW+CKLLu5hkpS%K))t|OMJiM7oAVu0lbCp-}p;IriZQppU(Gp14n=l z{hvdohmHfqpRa!#^9MZs6bLDkr@S3HgXe316}moz`S=?{rib1~C!~iq-UB^&Z~uFT zOb>m+?;!_x{&wK~Jpa=Bpa<~G`+fd=^B=JOK(Bj&(nDYWCqt%(zU9M1riZTlALqCG zKQ?4~=*-8F7vS+PK*{45eA4Ij>w%IxzYcsq@FPI!p>O^a-G)===W8*O|9`cF6S5e+F*n_ji77$n?-p1^B$b_x?Zp^FyYGzVr)>KX`oZAK-W3 zm%ixu{=+{GnI3u(4`0dizxy)u29K8{!!`~d93D1&roU>~^w8f4@SztCn;v@TYlm%J z`1rPA(?g&9&|%Xk(`gWl7(Es%0Vbd?pY#%ng=x2b}@ZMhorH5XA`LM0KZ~3NS(?eHB zhi(1)-NilYVXZJ@%Kl7YWJeu&KHwF$^9ju)|*!WRsQWj)%z)+@E+ac<9`G2dcLm#RnN}@ zRsPR_(jy*7YX5!! zsCw6cs`quk5#alPqR;1ndx4L>X4ux*!$9G;1XO#k1ggEa1EnVo?e+Ke0yRHx0g9jR z1*-gy1GQeiA1Hn4GeE7&k3;yVAI|_v54{_x_J0^Cz2;Yd8mEh<9M2S(^8Fa_GT({QO%MGQV1@6uV0?-mZv_gk zzXHA$`1RKhn;!Zup!o1{Pxtx{1J&*_Pu}v1*#vv1(Y88QK0JoXQ1@Z=R9NB z^w1vw?&tf%_qqI-0g7I42=EVqM&JED5BCDaUq1o73HYZ#jqmmYPS+Da{eC}Cm|^+!PIp~2Wq^w z-QfH=3)J}E15AOh3E$rid=B6L8Yn)#`9_xqcLVkPM}Zpe-vCyD9|EeqeK!p!F9Ozp zF9v=b_)K8qkkjEUK;fG_bJ+CIQQ*h;`f}hjuzj=B{}+Mk&l3+1n;yCk_+Gw0{78^z zz;EFDj-%fGG;p5pZvbi>A9l?7x;WQ0A-o``LWNXUlx_ zPrMfYY5lk%egQs>&p(Fme-hyT0e(I8iO0*p8KCrZtyN#e_df?p|G+(DdcrG!ckt0a z@k#OD=fjuB0>vZW&gZZA{4k&R@B8>e<9aWj$5HPe0WlSl9|0cc^Cmu;C)e^hz~{q! z{+iF%@_81Y-{BMgUCn#?hY4Xi-xvA&>-k_}+Q0Yn->2}m`1rr@nd0y71!AHkKMA~q zPy9E@AK%I6FZtZe?>`RI{K3pMzFy~V{abO6?C1Ge{(fHguDPRs4|kCK9q>t%(Z7cS zp8(MR{P!CEcsw6W zB$EXn3WeHVYM@;SrjZ^FYT@{Z=_!+`qt z20kyezwqty>}$f`0)JoldxF1T6uut~?_!=Mm~7d<*YVej`P>+u{6>J^0lbaRBPsh+ zz_;*uFP{dVALH{6eDv=l4s31GpAX~nRNk2d-lqrryN1t2q0N8g@5h9{Z{qJdAN|vs zf=eX(_tX4!6`vpEQz|^ai|} z?$7e~WB8op^9y|9zbEs@?ePon5qzG`J1+qKI-mIO7|%Y%m&XEsH$41i;J5Jk7@sfk z!6Z)}%18g^`0V2I^ZLTSLk^Ok0}fH;_=hNZ&$AJfVel-x6fvu6@r(_4;$HSK? z;A{E(H$Io~T>pN}L9&zYe-ZwE6n{ULzfXkkkLLU1!rubVT0Wbx|SX|KAVnK+pSXjo|E^kj_Su#YxA{g zr;%1`txkQhUafR$X(bfTN>VV?bg;IvzSU|LRJN~ z+Vz!_X?tzH+FV|)tSq$CM*UPR9mx>%!tGiot**6NwUth(+Fa?>&UVt}_4wz=HuY(4 zsg^dHl?7GUY%SDU>0-0dXr5`Oouyj4mbTVtaLS5Kj;Dtf^*Ak+)WtO2o*rn`j0$PB zvDWVJcB|Q_rStU_t8lT|0@qq=a?HT)u@q#}ldb03>SXAIstfOyDhtc?mH5qSvR1NQ zd0U~h8j9_7S5PVyij;eQ6Fy}_if&|IcJsyXDm!+4{adM>Nui;}V7aoIu2hz53xSw= zHeYG;y}2UliHP%+)(MJRys=)o#upI@1^iLFgR21ET!$$ zS{2R&l~c9#c3NLyOe)~DXpCc63iGxwU8uKDT?mfUJXxM7U^*exC&xQ*SFtaY+udJo zx5`~m->h99f=S~eYrr4uKF^Lc>hNl%QSPcUAYb@xcR5yno`azgwV6T#<qkE>uXW68l z+{z3`q^&pC(uL;AXeX^SkTDDE=~9IippuR+G^?j-t?Hr+-Z3j#g^1NC_VNPl8V@_n zs+QJ>^>lvSCEU~?LdU{ZeW_JDS#Nh*>kq)3p^Zqy;Z7Me)t_=&^&-cOBh#WLiyc zPcOUdvO~+O%~q$f(n*gtJ2f8I53{v<*5C}Ss+Eq3fYDB+HL8`e!ob(t=_v#5m?ckr zucWQoGK5Q4n+y7G%hhV7T1&5MHtGc}!ZFxV^SzzeB1(55Z2~P_?lj@00#f$- z0HR9iTBt2n)*79hzy-{!s~JVD3MakjHDyD{Sn2s7B5i$SD^XjicJX?oUIB-~cxCOU zr7DkI-Km;NYm?>r2ZmF|U41+=m^yvMCI*md5K8CEOHiYi|NENK#EQe?;s{%YmB>_V zlb>rXo3WZ#45kTau}ekY3LV6z;kD9YIzADLc^|w!1Qi0RiZBbk$ayaaEg2K1um_sU z^?R+w@%74bgBeyEKRK=?a-#p&{`OB<$zsdu$uo~1JBl*ZmcCtI0sD3bTcg+w?6SOj zxj@KzRq`q&o~M}qtF5(_nj~<$Be6+c`x>iD74R`*a3ca@chwhb?T&ZN#QJijQ^jaX z8s+mAnSLmgQMj1<1XK#)Dv5u4WTTZ-ahpv3qV9T{X5**O(c5 zcRi&zI2o#~Dw>j^2x?b>Vs&+`w2JZzb<2BqIrSL~w9!sFD!HIJJ*tVYNoYOUIculQH<6~D}iw4mT}38p|;k_XTz7!e@7 zEV-}&4&!Mq!onL+&KxisxnEp8AA!1<78Ot$fr1P@jmWVhTR}s^;HvlO2$O)RGjc}a zYs9EEqA55cFM(!Haq#+lFe(w6flTnge3|;jRKb|l$kRA>Wvk7J7cHpZpK-_00L*Ph z&jw+66cZn>m!;D4*9> zG52|R4pO$-AgIlQIw?jtTj88+z)GC`$+2Y&&DMH( z@AKzs_7vhMOl^Ft+iFt zFXq=4r2qJ6L8H}$d0!~Zf6zu_t=ghti-2>ER#+*vEu0HM(FIjsVq5OxNLgtbwF(MB z6TRA3?`0G)=}nc>yeILhLKQ+Dt4-+%QhMvl@`z?if)fjbb9@830a1Eu2sT^g93fM% zN?9xA$sT5X@Ce|Vy=nRz6JV**Sd99MhADNF(JWbh(Hk>Fw@MBMXmIx(o!;(Wwd*MZ zIKaI$8cC*eMZ^cU=&kD+fB{Be#>XON7WYtfgH1EYvw=lcP-lFcrWfk&9E*qr?`Cw6 zx~&yeH=4{mv1&d}&ESU8qZ$=LZB^ILT=Z>V{iTv#OM|Z0OTbp0HKIeXu7M32=FU?6 z=S8DHk$MJ;G%UCt2-C=eJ%8Kz^kPn=3TNythZaeO>U`Y-BdGm7Ds$= zf!3B+&E#*pkV4Bv!`*6+x@+#~&L-nQxA57jX>7HUTZHA2qcxO~-eucjA_H{W2U#n( zW|)=z>bTfUnz^*{`1e+n^NkQcFQt4_wBn>S(*V@s_XnVj7m+?sE!m0Q@iS;r<%+r1vJvZ^d4wS)z7tzCpx@wGwsS2zmg$(1XeZ#`2;{$W0xO=6q%qvAE{4}OWQPWaV zx_AY(LL7DFm3y@2*cbhsyXZ7_VYrq$oz?aL++*uezcHp#Ej?W2kE}aB9#&yrk4(Ss zpAiIKYZY8`D5$u;qx&t+i7TaCvsMy2Q;`z~XVOlqvPc3@#)1p!=xI2#S@N0)dgZEY zJ~RABmZmT{tYWPq%}%r0Y?M`V7?WacTkVC@$hEsL-5H|J20ZkDXFAG{dABcgrW4z< z_j4cVR|wO*fh}O%D?}M_tQG#t!h{-y3k?Z0+OErd$N4F@nukByh}e79u)IuYvWA#( zuc)THpr@-Q^MI36dv`(mV25|GqCyaT1kGNVWyM(#H00R4Hw%xrE1}RxIjeb$D$7|_ z-I5kPT+v>Oa(IU@&qgelXT(i0F}Uyzlb%|qY~o7R9tuP_V45#edc@pe0l7`kwq}Ey z-*n(|@x`n=hKD+0P4;MoE7=&h{5XSaUh1E@XWCQo*YW(J`@#cuh39Q03-y>ep;WAr zAy#S$3Vt~%s~gbVQB9w{LMk2Jo(!Ww_>`qsY^CJx@_eJxJ?neB&zjtp_*`nD&}nd% ziRISeNkU_tr2!t>*h4QznZ6zd%(_`CAC8E5_wSZ-Goe!UmyyH%+I;vjY=wW@I-sRP zxX;;BQ>xes`(1*jAq|RcW0QS?uk?Wp_SS_rS)U;KFl3H zPBN988A_-C9n+K|zeIX><%%*+By=v;l#ywjHAQBV(V`Yfh(TRUQyzB@hV{=DJ_!Nu z5bhbj-^;e0)X zmb58b#Ub1fp-`X{DvL5gL=^RnxdzaE6U@6;9Zh!SCssR(f-ThM*G{rhlJLtCB(>N* zHh6-h*@@uwWwKH0_+|-KS2~>vLLMPrx3JdsbVqQ^x!pg#O1w$Qj|Xy{evu^JH3HDQ zgjXsuHO-hl=b61b3p6v7Li;7TN;Y*nd+E%C2tE_<7Rr#%9bD3uFpw(DN<@xHm3Q2; zpFrR%*O}M_h}vr2Jz;I*kZ)@CRRby@Yc%hkBp64eHnEU~@xWurSc&Brx7>2FfhrEp z*6FGFPaB}4g*_5(Q!J`W6=_#4lY+C~am>bH)Ak3nXIKO?5InMac%HVGwuX&VkNia& zR#TT^ydqzioqA5vf!(KBl-s~~S8NdRIFF&pDAtSL1<=WZFm$0#u4oZYd8+wLS?|W= zkt5Tt9H2k#Cgiz90n&gVh*I*-EeGa`hCB)=V?pYRq=}}vX%aG^+RC=e*JM2(CDlg( z?scLY%l;;EV!647m_h<|(g~I7Bb4x(SY9{u28jumdIv*hjJ(i=g>(}WpMt&a>X4{1 zz+D}xkA9`ESkh9&z_ONn5W9NTCj6r2+m@)>S|KT{P0)IwR;n#`_KsJ_eF4Jx zHf~D5N~IAgpVbQ@U3i2agx`eO)PvqgH}jyufl3##S=G%Bp>8Ht{JppGL6j+A)KgqY z%7b?fR>>;Zh(kgFEhttcxoc!dyK@j}HX5B;qnYQk$Dz2RMcSxE8Oc`ik=>pu94cWb zo{WJzT$A!hN_#=beOUl!s84l$B2A_H>8_`i z9SH6Vo`Go@kZ^>;Y+=rOq%6-IlG&lkLf+6p;{gY;AV8Lh{^CJ^~4~x(=d7`EDniTG0{`T=hO*dhfHf=E3DuL!B)@focWkyivO?n0NS+w4;O2&vW>_kZV{~~PFe(PM#VI#)Pfw;wYhtj_)q64Ko$=F? zlje=*6rt)vr;_ds695$_{t<5-^m25rm2mp+yopYd7H}6681N9=3 z*9aQ<7h&TG$tBgTSOL}f9KBKOy4G2R;h4P3(vVi9M{B;DHhL{OilYd2+&wC`|lJ`k4^(>FeulK4k;dTX7}!-N5~oN4`=cqe`n9Wq2(otA`5OIvd%f zmkn4)1Gg1AY}-zv;Z(@j>h0bp7{FfV8ktN)X`4-Gt(P;9=7skEpXHDu!TP(v4k9@? z6#v3!S`XUTVFl7PA7M6PqKoOoT01ah&Nlgcr9d{ZT&8I)WB6R8v2aT}2{};P<2LKa z5Za%3Mmx&jaUsjnQzd7T-dhtVvzNukA@2!Er?%p!bB z9%V>GH|A5f2Ub|{Mq4#eX8U%fE)kj6Ij&7)xc_5}rz za4u&RGC4<4%DOAFNCMk#*)-}qrI#z~=}L{JDhqBb<-L=@iIO4`HrqxPIv24nyk%o? zeR}y0qntwL3KH#{sCBBGnb2OkD=#~~z$P<0uVb4}?#+kvFdDt0O-?Y?RTvA&B*~V% zzc7Kl!RUnH6GxgpQh0BaAW!uacBxk^id(12X=QIRMh#LQ2^$IpL#w6a0nj+isT{`V z1L#mitui&z2!f*^C_jGK2jf_4$n_!~bLLO4+cBUTy)79@Yqyfr$iW@r*_?*_Njp?t zag$TxJk*B`VJa9}K4iydE}Jwoxj{?m^(lMA^&>mPBb(d%xH=Rc7rJEat#k-Vhp#$g z$Bs;I+=47CHIb$x>Gd#?mmYwBsBcgKs@USl+}mEdWAaYe z;{wNq_ibp><3ZdFplZejp)rjV;S-(bB5#+?GE=#WNRkpZ zl}Aobj%54f!V)+_K!9Ma%xOD&hT)1srQQ}Qmr=cSC$SU(Ou)dqvyO~~53 zlFdq!d+x4XQ|OnwO*iu!=H#SJ^B~Hu9@!cO`ToBF9g2#9`Ow%OSJ+Kw%V_@w;w^74 z7FA2d&Be1dqMw{{uoCwAvilbk-;ZqxgOqm^LiLl9um~Cj0+e4TV2+%es6+9`@_qM> zGbPy>HEu23e}942vA>Wmp9>H*L`6vtTU%Z!I-J5xz|>~bh(?_e^};MbR=9lN*wMLt zhmKCqPW37|ckH@zTvKc8(2;#NP7nOqVps(PB?JnS!HgfD4L!kzP7~C$!x`+)AuU?f zED}&?$+nFUYL#S&WHFyHwQ}A;SL~#hBCV>(k1b=FeiPCEYI3Q2U|i4BeuG}3+tLNP`9uZf+EgvZByKWWb80H@ z{)3N=PvdOsiPC4gOqBvVccGa}o^BgtQ=~oLa}eReI_Sy3GqQi<-15PaBBZD8R~fN9 z7Lz1{@ef115%Q4(a=S!{sFt5mSfFxZr+fJ~aKORadVD_Ps>-JH)}V3qreHZuDO6*4 z+>T-?Fxl4IB=Y1nq`SuLe+BN^8WuKI7X3B#&is<-?1yH$*#1H=%4!~)+%-P=wDH}O znq|!QtMvrpqTS)Psv^lzAVU4hqsj+bdUR&WD z0?vj)0yH;#b!}zv%e}A8U{=)`bWVm^mCKJ+&2WerFc<|qJxN|M2|6bcA#$K|M30?v zvM%9cg2j_K+qT9R+25X9g{St0m?GJ6tLf-{BdTI#>b{XKOh=})#q|FBN1aI@WbEX= z-wJk@j|#b|ZTNr+RGcEkdaZU2%z88vgj(0y(7P{y;r0M$u-pfdyANf8W}>ORc^=d} z5l1-lEFj-(O~oDF?Qyu{3SW$6WrkEO;+SNHD9}(>@&5lj*jwVDh<`~#SRU0W3#0U=JPO{;)sd-mM9K$&IGt1=YekPHL`fYt z7B;6$?3!?-DP!tePftdD3eVW#6wzIZs$!c2Sr;XcF--}0cDbOMw#BPmd|4AQ3f{1c z1@j;UOZL4=md2wE!dg`9d-$m&ZLhap4`EmhHa@LrF`x^Fcu+iILTz#iK!GXP=A|6L z-a6ZBtCFS2b=euAP!XZZYF%5z^ls?rtkWCj(-7rB2vdNh+x3U6!w?NRI;gugjBZdt ztcs53oA**^B_zINjOi923UCakfn=O_vDfL2PUWO|yiZ?QAY9a^Lo6di2%m>4seJ#5 zXYVl1nj+R=e>{8V)c6&bPYg|+zH(x;a70HxayM8eN_dXTI;1?~SD zeYb#ER-=c;yNT=L91NKD3P=GcwL^GbxlnLez_+@hscJ6G@a<*Bgqnwu=WKoev63 zYMhIw>&-R75+2Re>`_=O?`aRAW%y0@5|jlvgiPf%;CBjaPxrIT%Rv+O9ckQ)zI^5jxq)p@lQOQIkS;ES71Q$4%Nv5sMv912V{mu&EB?2!IVC|Z_5@pT&gDpnt7o~0d2P`rz2!Zk z)ZyDgx6mXCU@Y!)_mGFK^4o`dd5Y*lX{J71*f7@K)Yg9TQgQ zdET62ZTEyYw9_ElP7}&ooJvC(95kEJ^eXo3+L@dH?N&2zulLkzm(QveQ@7kYA4QaH zWw_$2B!S8M61af*6X#+H5kkhsg4P9caN=Qf#?x^v6vg})JiVoE#h|-~B?`S$ZR=vI z5}UU=v2rb$PCgrS@7r{UD)$wxoFv?cl&EPNMK@ZOcZVb?cq6yexb)SDK+*@f^VcGZ zq_yTMM4oJ<$RTl>g(mV<;j4JMaqTQ#uPq^7pc>rhjTCmd`4S!jz5U1Mf*1;zImI<} zAy>twi)EP?aL|dmlnq20bbfEKrf@7T_RI$LfUP*yel5$@e|%=&f$5EE56Aq{LKcS> zn^{^^QJV=!rZdM5+M&OqNnVcLY)|`$ppF~reL9N}%o3mTyHeW%QZ7%&BOEP0(R>~@yp=}N0EKMXre4<0>!*TG{)@M6b<{6pS)90D8H zxC6`FgmwP`<1U8Q4EowBw=pHwxG)P)DF z8{SyCyRosX8OQe^X9hE@*LvtRumYDxv{%{wFQ=0%#^(Ipb{>SR2=p|UH^C#d+(TsN z&Bz<^+d^xd?TRZ5qb2Bdu|&qrp6N@#dB_qug zKiQI7LPu@1a3HnIqxWev>bJP@E4KncIvfaUm~Z!`B%ZOo1r;2*0=9UhuCNa#9Z(|NQljh$;C1pEi zrpRw1RYu05wH}Un4sH!6upCQqyIsudN_UXrHA!c2)SNuJGq@Va#|m$5iK^*!s^oeD zyRSFO>B-S>EVI*Y5UN^Hcz-Y&Fli z-zOMeH)pxL#`kjLuyiho)oYD*@rW{i6{{A{iRYn|!y2E}YbKAFHikL|)Lv04osKrQ z6nT8Vw$sX{$L-|(>9&F3n$SABsIWJiw?|PIW$>U-%X`P>-CdItV4@Rqwb~T3$Rna> z&YUTuJKA*}#E?sT&k&cQRmbU<<{$15;C+il3nvt_VA1rh%oBpA?BEm86lnyJvG z0vk04AXmUAcx7h?BP35vhE&*_*fTI&Ax3CqAE||1!_iZfoXn0bce+T5yT5RG-C+5) zflbM`6?P<+Z%YdUz;H~>kaJjT^-^DWFbxWX@5c)B+F6UU$Bi2XHtvH?*#bqoSX>%M zfx|^h@6_np%2JK(Kuhc1jDB&h8}(>fZY%f;HYvqo*^33IY?x3Rw)e>FvJbQ!2$5g1``N-b^{G*10{p{CiBy%;+nSw$NAeL7$^WiK^bs zlA1$ZM~bKC-j({lIWQG0>q8F=D ztT0A+;<>1OLyU7<*RR+_sTB5T!f?jCnc~W+El`-Py3w}~0+)t=aTpb48$1@ml}f{o z#9Qh_9G#VO$4!q{DEDl5IuIi=p#hi9-4kwhN6=|1?e+u}ylAE^J2PEC?k>Mt?kW_Vk*O(RzPwQHMXc6Z`^v> ziVncx9AsdNn}^PHD4tYco37XF7??SPJXYVRF({I`(l|pjzUVj1 zYGto|NK-L7*B~}z4R2~k;%M+Tanhhe)jV zc1W?az1wHvO1L?8vt z3GpZ?q}X75P0}JHDR&eGvhMYirFD@z^nn4jan>VtFWSBYG|%uWQz&>ez{9$GeEPtN z*+X-;bNAWYp##%X9M`bQ{!Yy~;g(m-t9gCeVItz&q%0r~ijpL&Yz4!rL^oX5erptV zO&OyxyeHn#_ArEDyk<1JJx6lW(%uQCW5P1TRH+?t+52%d@2rm#ca>u%5Z|;C-O$Pj zmMd&vv#oFN6B|sCeK2ukp)f3TsDQbd<_k?tUGd9-Fz2oE>wq?*SoW+5}+>dVsage`k``slWvK$u-)zCaGcVD~`hap?- zf~_1Ssck>evJq|Ps9S^{jHkzxJpso`^pJAxxj}S{H(XQGyP;MrI85GPJrN^Sgp z&o4He@FJ{gQxcdl13cW<%X~Q}gqvI4xG;+Ci+e&~2u%}Jr(J1r$5k=a98yoX?qgTA z+WWU=N6fpdC{)vjmB^*xGFFPp6uhA4D(nlsYxtT-K2h|mx2pG`de57tE6}XOqu>A7 zRUfkfUaYTBMcpp_&3E28UF4QvvG*e)48im0SYei1G}Mqeqk{(W9ti{Wz?;`}5rWy9 zPL6pA9{8bnmH5X0pQVV5(<8OC>c zOB+XidCmiSX$P3!MCRd~sS7u&$H^)QVQ?ZFipkLT%qp-}XpD?3LL zZFP;kVit{ng3`4l1G`$G5ND;>eW~%j1_^dFI%!P02KVC8%fpQeO4Wf=HoVoAZVbB4 zc9VN-`!BFHg>+6zAgb8Hnw@E;4TuX}clyGuzUSO12flDpTZ#uNNI06s`tELy{7Ivc z9oV2Ho8I*jL(?DNQ+T3D=IbqLMhl58;d zs5rXzrRx$)qi#A?*uX?XP}Hmim!CQ91EWh<@FO!+z->vx!Tj+YyMyYdW9GVNpp4}F zUKqklb5g#2owcH^)M}1J*;2ObOtCe#mSuO+P;I-Xp>X*IOPFr^*sukY;A7!V zje{ZN78;B9d34ttwK@0KhW)h+kEsK}CWL%ju%}ELjEY58t>1UFA;P*jh|IfX9Mevf z$cm&Y=?bztn(XT zqbcx}UgHnBijb)ceM`J(EeU+4j!0$-TSUlbZu87ap8^0EZAk^_?{k6nUyFf575qeLy; zJnF6k!w}ghZyeF#pbDC2J$DRI<4tpOGw8LQCFm)UjzW)w3;PvW&9G^a&txQUr;yZM zi+xH&VO%NXU|SPSc?jr*d$aS^2UE_%vSO{HUbFc_4XcYiNE>>&yn>XbL)f*d zEKjzKv3fPB1FKiQ0C*d1Qw5b{3!KG1uZ=(G{KbJV#WSJwyRX}v}0Vhw3>nu0S zbx&fboNdhM7QXJ&`!Hx|5Ml-?kNjz+{1oC!8|vFnq1~8J)U}5T?6L(+#5LVJoly#5 zG|;1(7R@AFo zT_d*{u>i%<|AOPp54u#Mw*@=Qc3}D-cCt75MH_ESh}s2j;aE)Q?_MVj87>FxbGWry zpQoqD6P*R9-7`BhIK|^MciPFGQf8#B#hg56g6I|LEK$2&ER~7}Ynp@5q_nI;e&pz1 zF|lG&;mpo(1gGLdlXCs~j!LTP>Qa-a(F#oYEEvyNR?bv=^kk8h23rw&; zyx6Gxu=unHLU8>MZgS|`Xc8XQeio*VPnjZxu{_QDT09In4rfpmW#C0;k<(dY#|RR- z)m%M_Z+;4fr7gB>$UL@?TdW0%>vC}r{~SW3@B%w)VhUU+EEmlNgQ|#6@b1{0I&@^_ z*zDZCqjOW~4ThKx1GE(T=IQMgt&@;6w~``HqM8X5wM1d$2uqe?2@I~;wDVjFFHABX zjdu`9ws4E7ojfXkSN>jbXb>mh&iG73!5baRkX2I|pRkBgJIkSkt=a=AwCJjP-T+G+ zCpa;OPM%Zx}&Po#cdz+}FwafyXUY?G+m1mPQF?BIuGkT&-w*`eccjSqngzlcqZ zKwwjbbrvBvk+NStUQJ7frNMG(rOxN561g#{>5OUQwE<IAN^eqMm_O zoq^Z|h20!koSB5y15vZCvTRENAUaf^bwk9A{l?$YgsC+>_Mxnk;bk?O)Qf@YAYJYK zkU_?&CUo1L3nk;V6FdLjQ)W~47rB7A84dj>j^a5O2AfZX9|r@^)_i2cTN@5U+F8E zb|VR5>P=>miY;Sg9NEsirP8jt@&HBGV~h0ZctD-a36H@BHl0Ffe~x-X_)DG(Pcu+9 zfYDy(QXiN*eAo1CbJMd&_Z`0L;Gr9CV8QL*{9*Hw>P_K?F0N)}ZVr%@;Gy@nU4J}1 z!IG&9+B`DHrg(Xf^X>f3Km?^K%ktMUV}mL!$Mis~$|Ibkgx^%Hl#ZsQHM?VIe|F@q z@hrI|{GMf&6r73Z4(7;|%}e8$jl^P091kg}745h;55^Ql*a_ z-1z6~^}7uJYNuf~Cq+r|0fk_<$e7{0R}V|NelU=RCKok={GnJ5*{%{t4Cf+ub4p>8 z=Nk6n^m(n;K8?eM;n^sAb{v>FF>!=duSLh#+AN61OX4(bmWpyKOkXVq#xIg^x$de~ z+G~DWuf%kaw)`O>94Z9KSX<^!KT;zSRc)uobykKY8^D!Xsze>z3~n12>6S!sJT)^c z#;(VWr#@SKBeWgvLa-eWC2?t zqKiKw?v@sVYbFzMNCr!6V$;$T+cp_Nlt$$-t)CxJBmFc@rb6~wG1(}X9dKQ~)zB$) zf`FH$P_Y4+z;)OYcHCywR-Q!EeOUQ2w-}t>iH$Ng|>@ct0?b!<`a#5`k(Dp|hT;sb&Ii z+V)eXx=-EwQV$j%X0=v3vw_seGHxDJX)dwNrDxbii-8(fD2laCF|=KWF{y7rTzJt65T|61ENIZfm-!1e?=a>r<=MO1{;3Km40+_I(o&~O?%IDL3}Zd$zw8n5v3 zLGqIV7XVR;ZGuR<uq*Bj8n)_RZe^1q%Y$*Eq=X3aU892(-KU6 zhGf(t`59j7$Zr3U&5=rcqTjWh#FgI z;eH?Uf%;WH^lXqRg-C#V$h*e|30I|XUHPbK!H0G3xqcpnBUmxm6{pqW);i}eo_ z!7l1V54nU}Y)@?BR*`7D+JxC8+LfRom*TImDZr^R*O~ObXy~;S-NM9e7QxFnHi^y zMf6;L+fmRN|3$WBXmD!^O|z)w+qFhn?-hAKklB91bu7!KsrVyV74qgHlFs zfiy+8nx&V7zf(asbV54>7lC~Ckd5R0~?B5G|%22Cq)V&G2a zK{HmwU`Rf*W8E=j-X~$6O9rZi*xa85#zfytDs%_mA`e9OYqa=GMp{CY_spH`Ho4__ z7QQ&hz;4}9jnjvxq7Lye8jM~yRslVXZP|PdzA_UBQuB>QRO`HMVMpo;=7qi9V3EVD zinYYrO?lJ|C>AQ7pR>s}ED(#;F_CpAlkkj8;q=N_P+z;ZV}(iHqj&#@Nd5nTNS!P2 z!GDOY{}5X&DPIY(g-fx8Zb5oVV=B$I5N1v^OPE6JNZq#o<2o^Oq1US=c8$`=H@jr2 zPA=fg!Dl*l9{kIFTG$%ik}n##l(J(7k7HkOkYsi%5o;goS$AHLiOGHWWsVK^8e~|> zEt7>fC1w5fzGy5$Kes_a1=}ol3hBLUJ0zKm7}WSJ3%;S~$1vk1gn+4pjTCE^X(5|} ztadqJGoBu_X~xc2&{}FCA2pj1YFkVl$p}U)mShak3-^heDVd8iZm|(!=iFd3eU<}< zQ8}|kNqzH-7`d~fkSQ7PfS==Ks|_T zVTJA}SJESyD@x(F%o(>}G8{b2qx7Um0wb^288jyASf>4Cc%e}_uEBBM#9|kDV*$CE zEqg58!HH}{(l{TKh_`NX-DwGBGFAP07uiI4o_b?VyC>ZEL8?`xxwlAenAtaXlZ%nT zWo4SVPbYd*6fY`)rEb^@N8gtq^g6Saved2NE7Weq=k4%XOJK9Z#EFBHMl8e1kku=G zjhiua;27+$tlvQ+G_LcD0`b_}t=T?l!rM5&o7K8y4q0QCA(K~Ew6kS_-C$S8nf)0v z4rKIk=@!gPXadU1NSaYVE-TP7LSKBHa5oo`P%@ zUASvj6_N=P#16TYjUANr*a^=RfElIK$pfdJ`ibJg`cUD%7mt*LTx;&Zlc5(K2jPG- zlcXMrf(4G@CJ-51!7@euh`GXS+_Vo^O@dG^5ksMP{{d5{;AZLnvCCJN|130oRVj?C zmfYUvJA^vR`e zrMjw1Ezd!eSOxFJ4VEQ>iYH)7P9bzBqZvGG-$)tkIM= z*OuMtR!*-P-)-~N;TWg>q$?sL$&S+M7M4u^JDW91>tK66 zvasuj2wTl)PqL+W+}?8KZdwS_$o;GcYCqyY4srT6V`)|NmsE+U`{Fn*+*B&1Kjexy z#(h`Tx!BfZ(pg900$l}pi(Qd$AcKo;6Ph9p=!-^GIj^SKgjo{bk?7abCBo(rF(q1Y zNJdkKai*uZJ=DaGygW9=>=BN{I!Z)(hy6~HQas$E;4asjg}s`hngB9A&TYRWMvCiiTm_SMP4 zG{{(YuK9+ES&wqj^QgcKPOUO*wvqtOe3HUmshY2`urDCiZ}TZ%7IKZzW@xss#06MR zO-LM;hvvMrZ+bnyF0Dnf`toc^(Mbw(x6e$ccKsnvV%^uL6>!$m^b4I-ccP@{S5E_s zYjHF&z*yP+toWf4CXVDu_tI~axlnZ=-S63`wwp=jKu)DIv}I{T@?DzQTTm-P#OOQ} z0xAam&=qv2ELuW`;$0Ad0M^~(mTDfd!<99TO^-Our-SwTxVz+StP89kcUst{5d7ZC z5c6xF#O*L{ToAb`??sCUh57ma3_@l+=`eiB`%U&=4K}aMYXp~N zTeF<1ew9FBqnxdoVgp~re2WDQdeqby!`(BHcqnULi77-@D>IWLy@Qkp=&+2nOy~D0 zkR4FMjLX(bOE{3k);S2K>g8FyaICk(@8r@be^2>vT2^3N9-dqyPXn>Vlr^C!Q3byt zW`pBusmYbtcJx+eZ5J!G{6PAx_hh5iS_NhhRsL;R(+DZSallS`E)~xlk-6kKiu_(G zJ>_&+zgk?!zTuJ5mngUP*HB5dENkOd_AXM%3eVfGciFGm8?3NZJC|TUdHr^m5yaoO z@9|6l)zH!J(a`QD+<^{|m85NS$yvYqu%kw2LEM)F#vN-K%(8ELUiIeHDdK3ry|SUeAHt17-K>gH<4U}k(Mc7IGxm~n(hw}w zf|o?4HTsScLNa?5scZf-Ev@X2Fu9xkD2KInj=|FwX*Mygk<<#VL9lEX4X;t)gZ zl`Cg=&CE{UaOgIPB^$DE?T@x!_&O@RQ5n82HW(Ju>m;?L+lAL{7aFcr+wd1@LzS*| zC@4u zR%)5ok+Rm~Gj^e6eL1rh3HFl5!la0Fc zUKV>O847-4Wl?}3kK9ckt1%^5?%Yy1g3UB|)i=0n;zPU|giwmR88roq zhuT&VSD*~5dO$#v%L|)6fgy^Y9PC`N5OxfZnV532VIC`sA-*CxFM%R|rdLuvDg}du z?x^$TEQ*&H)-Vhjb>llV;$EM%5D zcX^VNI83gFn7ZtFf&L7v@;H>lRM=Og{RREH6xPAA4v#6y8_@Gp1$kv2j<=LtoVg_K zn5?$2NEU7J6H;vLu=(p(0(JM3ro|3cRAkGPLLZXYpnl>+2G$r_cG4Om&g(H_0_y5F ztIV36Ij^8W`V>F8<%Dm+8kh_w3Eo3tO-FOx4|)Nipedq-)pFzI`9VBn$%qKS!f4sz zhNoO>pEO~Ug?6TJ(LuEJcEBvQlyL;yB%#Ax7;G_^sL%brTEFwVrP}7IwG)4>7wnSp zM67e-B)YK_1Z{vNM8+PY1&aBYLknxZizY^dyS2}@5PfmEUD5b2RLRw~;bz8<*c~%z ziCbXeCIFpJ*}p?7v-p*P>E4**a|aI{#e1j~QGA44T3OIhKfEtA*NM!Xy#T)AT}k!G zRyt--O=P6-6Qp*u*_YI}w4WCPcQ+KR;9~qD^lsqgy{y%Kre)Xt$qaMxVp{9|7Th$2 z4vUn8ed;w*>~&vlu7@W{=1usLcDG4^0}&C3I7K0xj8rSb(g_1LgxJ7H~2Rmq@> zRFo@Uz(8l_Ak9tNMP5<^mQ(TcLPlR3t2|)98I0@@*CX~cdlc5JPY*t-=Ip`=IY$}T z;AAkZfhFIg1{{#;M~04*cZg%((1$uj52Kh_0C5?1t%W^LsIxZ>f#~w4E`j0>Fm(tl4X$2e1y#V=ghFxt( zubX|Lv2k15=2>ttyDS%0wlaq4qOyVPCVC^gSl_Q8mhY;f&Xv-z8118v@888fv8(T7 zq+CIOQF324uP<6c(z&XD8JBd)G0lbTI39xzY+cDz>d^^&t&KWo7X(lGI27Y98mc0?*)RG>&$fHX=3X5RCD*EO-t5r;EVAFY z1t%{!sN#TcR%@}t;Nq_K{O~}Q2R44|C{k|z7|?Kd{bY^EG>%tV4^K!(B*USkVbA)j zDh3S1W+>%)x*?~qsdQ(ArFB^n);AMuG%%ZE)^0Bmowbv@ivv?%xU1E!Fj{#ZtfU?F z-CuB=GrrHaW7INPM#V%&aSoEW=i%-$@gsk90pt#cY?k|K4_ztzUTkRVgAzB(pbgB2 zgVKHF(m)3lA~bRV=o`0eX-r~A6bx8hYLSdrpxf5FV<~{rCst0Igt`End9{zr$9Xt- z^4ZS1ct)=A@Pbbo`=JyL&mD+{l?Vj}yhD2h0aq}lv#HKA*USkSxU(o;Q)&C}wq^IL z2hmojgL$pD*}S?;9&tIm2hR^~(!{6o4lGyj{41s*85;^K4Q^nQKKY%Re!`yNu_rnm zFv)s^_r^W+T4agX)>LjXzYtuy=<5od=Q~?^tl!kTWt3uNo?L79^mdQA7PaYcjLK0t zw`K3Jkuztrm3|V$GO`DC{)#ATi-=BWiw|u{(F!0dz#2WynpdEnkxMKRQk+R`jTJ|Z zl|AYBgeM$X68kO-virx)D(@*eX(N*-O$9??2TNDsP8)M{xCy{sT5O@S#U`@#z87l~ z5Pqqngp=Q7gl>^8;TSZ71zkf4oqAD+x^;DlC4_}ajTLBPSDN?s0b+YArBe63yHatK z^07_+oC7*w1~EGr^~?#XF_g*^i?0l3i@ej_Z$i(3Y-HM~pH9PIN@~V4dt^`I5s@8& zi!CnE+b%XuR1UUyNSkST7^j4uY@c5^U`z*~H+9^t&aVnb;6w_+vgR5wlajTDQyg%+ zORvL(MA=0RBxB4EDWx4$O(SGuy_*7zm7%v$ryC}Ma3|Oi@c+%>6!PI5IL5G74x(@u9pssl%{-_B zLLUA5BZ3!7q15iC7#{LbEzWGyx3_8JIEj441MOkcn8Q_k4*k>gVxhTXwYf7pDq8+9 z`MqWFA}V&Ys{EEORp}1@TMzAk2@T+mg?3(nJ!rK_s*J(+2njK9d6}m+fRx2OT#VbM z7EOH%+%Q+-D-oU`j>mlToPgX?j#DDX52suH#k{hX5fLg*XNz^vEj#4ZBlNE(eVM7jc)BLJ&fn)yz+CjY{cfIHv4fZU;ACHr^Gr;jS6Jv=TjSx{P zTB%C5TA5>(%wX*wCuc&_H>=DUEu2A%NS9@#R8F151|O%k)y;rMx0}NP&mwSJo_sRohw8Fd9RAt znBTO|I?HE1zD&0%-rMhijUJ^lGm3ThHHc0sI&DB>d9O@arY($?9umi<4suLf%!SEB zgJmGvgy}4xv30lNW6oJ#i);nr?KoJQ*EA75H-XS4Yh&}8!P$LLrR;Ww@&@%-MO@LU z-Y9S>J~Stg&1X)7JZCp(j2CkcP`j&Nc=wTQ(RZR#)vV$Tu<06=UV70`0lOe}#x#hf z5MbJJx8op&)km|x=d$EHj;52~v)x#JewxIAYa64>oH)V7P}*@Y8ffVf&X0pFRD)I^ zQe+}er=3W7)K*JXsEoi+Ym{}SuX2=Aku0G`JeLY0>weq0z#peAv?m4`PIQI zqe7A2!+Wb9V7+efutD#h&A)#3+(10OR_p>#53`fPg6*QL@>J`p= zv;2;*XvoB5AKkKc3GUBQ>d-zB`Fpr zvtRgnQkD#|0WXae^B*Metl{}J5?!b}EE@8EgbW%o6|x3nyi9EE%KuwDnZ4klN=|pkG{pU>bEiC)YP7^<3E3wJ<3K-HnPssowQb6$;y)^)~3r zb4==}YT8Dl*fCa!rcQq)W{czrN@-C5g|WU?asf@>N=wOB1xS0M!`g!E$NlESWUrZ1 z9#dCVMNSYSmZXlzXL{_xxPjUIE(}ek8=I{oHwlSf;=!PEDfA6*gye-R;YGJ7XZnn* zgJp&Cm1{glw&hxjJ_NS5>H?O7l~wkIHY}Js#)HhCQFM8$~<@l*Pz$hEh2Uev*_>dnw3){wT63E zZ&g!v4<<>S$ZNE1JJdOb)#LZ*^0D}aISDMgPMQ(Z#EB&>(rH~~#hOc>DN?*)t zbXglSe+%;r5-%x3!wTf&@#-7{(^Ragu=I&siVa`9r|W#pBy)!_okLYY-C;I+1#S4W zqFaY#z=em*COJbMZ zAnVQe_`)0cm3yknr;Qt_NO|TjeKUui<@fuQhT+x&GFI7FgbPbPtHQR3a748y*oqso zvk|t?LYT!D-h@(R>@ae_)+H$IJGGiIzm1*aW(#wB>&f7JO?g?vFB-1#oe2q~mT-#q zGV=;!QAejRvsRhA*_od=XVPbkgw=j{v>7P zvbB>SEj3XJaEqO|*-Lo~&ZBv6Kx(DAQnI0l;>P9)xMkI--To?O7{9I>xt!UMi|Fg&UVV+>?Q<+bqhEGy|~^n{FNV+08FyLI;!4oLSdr!BKt%R|mj&rw$B%ADq?MfS;wi-8KfebV@b0UmPx^k11{i=@#H{^*yRHrhBp6ckQI~v+H0rezvKx_`Lk+J$#8bp20u;7+pvL_ zbi)lX_}2HryB>W~RLJOoLI(3SjK&x(b@siI5sj8W;7wlQ7{4mk0HK3&%HDJBXrD1B zvVvpU8|ON0Myl#LF89uB&ZnWPwl;IRf|+^QO)de?3kj1Rdm_(3(vNvjDdVIb#ruyo zztXty+LkUT&dYClEpT2e6VYLeOiVK*_SMC7Mqly9##~^(^5$SpvR}=N2z&3Ocjst? zTqd!WrnMEUq&>G^_ZQbOq#N*+$+!&|E41c0N%=UD*&K|vzLqvFT9~R)BZ!#`){D(p zE4~nuqt1njXJu?wI>%;k>^|$5l#opW+j&X53#)s^y9}0kUYUb5cbbcd^-j~=Yn;*fP$vq@1p8_HUc{z7i( z$!Zux3YVVU!(r=Q-f;1?D1${pr$l^Wn<*#aUP`+VX)9&4Xv)ypQibLsAq!~v4#)lIndluWY^291VQt9PlI9=MGU{ZJN5;N46^98W@SzUH6n z!78IKLF+wd0afw956Gnw&*n4LN}LgvsN{w%W-KH((!cIwwM3qrbctn4`9WGPJ}lQW z!Jd=$?8Y8{X|#$8u9?BX^pzc!U2M#YG1OKyr5X1hIyjt4K1Y>f5f^lh78z2A$*{my zG5_#s6pU%JBaJ^i!=SQgJ7LlDK|V5$QX&nrdiz?Ah&UNFR?cs819U7M^`lS@&5ZUm zpgIcilQqwnF~rSHW}BVC*djuxw)|hOgn0=6_nb)+@rpizV~Ri$&a9JUArGbZbX}He zwl^zQ(%X486!u&F`{R}AKx~C&s)mq8g{ymYHcvLxerJ{shi8sub4A|YeJ#A~c}8J+ zz-N{9+_#%4Da0ror-2#f zDNTJHpcMmho}K~*%yai2fHtF_dBMX|u6g5>1m)T&M@Bx@^q?o}=$;SVfD|&>MoaYx zE=tqQ*2Lm0PK}d@Lr<3K#6RrVS@kz~?~K9ZtWR#Pk)Um_>6#er0_vuV_WA~ah3R6W zg7T=@gAFF%2xhQ#0NfJWTP|0tjv3GixPCAmZj*TKEa3)8HZ3IK9NXkv2boe}@FAw% zKXe^n>OZN&Ve;pviHJY^*s|n^ocE|OxB%>$<)Q{U9)HbM4EGNtN3rK*w)){PnTW7- zlV0jUOFYoRBS=!+3a4v%+yI7WBxuI+$(?_y&QMTwb7#|W_ zi)^BLy=>S8WCdud>lkClx4>PzRy^)YC`F+h$8tRF_to~U0L0LTop?1FIGxm>2O5`yV#fD=f&JOlMm=G4Ugk#*ok`+Pa zX&0I{&J}qII*_E0*_(xOSsbM6E+&&Pan7OcE?7Tgdjex-xm6In$w}OVqX% z5Xbh|c|OH@7QC$RmMkS--pvIN#5belfmdr#l#agQA^6_2YjD=h0- zEThHPR|S5L59y?xdab2*(9u-M@pTM<^<@)a5C0C>_Dl5X2b;4h=6SbKvC+F2;cYlv&l@kJTnCQErlCx}}*$ zjoKo~SGqD<;Z5lVIt{?}Wd%_chtE9R7UN{}^93QOg)F+^H{prs%+dV!Lcz(fTsd1` zUR#bjm&je?q8ZF%f*B2+s8oK1WMecQ{^l)2ebc>l_uyub(Y{3^Nv3 zv_OMdX6>E3m;&q?qildYLTxY7H!(+jMW?n!-pM0#&qQsG8Y_&pT8M$Pj3 zSbSeraJ7Wyh=}>kf6@L6Z_;{xq86er?7XtKQVz2<+gCIl*q0E)-*Rsz3%%KW)z#jJ3%mB<5Z(~>y zun*H@D$k+-RXiOt%-OLnf1Ab6qC4;?6SR9W9f(;NOsNyxAS%tR=ZFxV%dLj9ZfiUQc ztmH1cfI_UYSSaYJau63QDi#UuwQkKO^g!)|amhCL!!5ZAUaZJYYcX761VN{YxwF4e zpVY_)Wp7+^f|_h?8K6J++Rt`U43S>Diw4=H_o8W+B(Lrc$)_%Az zT0LbODhZCb2PG^veaR=XmNR9a*<$Hw1Z|QC&AS%8ajWc0-mLj}KxR-SL*_)DH391x zqd^;Em#j8(5f>fm*@A->;_nCan1%=(Q>?NwhO*TK8$v)*?c(jwmP~`JM7i-aBt~Jh zG%BAFg2*-{A!IH`dyI zk~f`D5J0D}1C)Sg)5yH#)+anUwtpD4n6)wb=`tr58QAO-V*cB*AXmoD4@*I#6Dx|Sj3>dZXIcO=%ZnAviDpVr3JLOz)WPqP&KHrdn~UeF}ip-rracMT(cGK?fML|9LRbYX>zWpE3F zNX#2=>Vn(6OXrPA@Ag`|vb_hrJcAB|6QSkykEzcf8k_gQH5ZDmRw*4?DBa?b>M4|B zjwd|ZDV=O`fJH_3G__0Y$8R*Z&DvhV(vkMb`a)^{+R1ilt~r%XAK7>4@V1#_rP*+* zZs{N)#i?}nuE}dkyY`eOuS|DOPVL!KVpHd?ZHL(!R+>X)Y9k1nt*LZBHVT8A(p;^wJe6kOr%(j89XWJlI`6~e_^xdS z+{P|x>7Qc3JlmPTt5aXO)=nmeKIId0Hf3XC^+fKLbc4%tRw~59DXr1 zSsKF@t6>)%CR=PR))9gN)^2`+f>P2YD)S^Evfg#hVRm~N(@HsnnlQa?747qN7t#pZ z*!?UsrOc~B_t$b*7KdBdghC~>O$mNq-DJrB)#=UZV%N^#IQyB+-I-Zh|0iu*MQgW5 zn#?~k89Jf)5#B9T7BGn8H`_07tz=~PM8r^94aGdb?h06nT_seJa_?`#r))^kjqJ;A zz8GF*$F8q`@|^^-E|W*-cA^tgr1mU%R<`0%WBo_1c@3%{fA#;iclOP397meR{b_%S zS;!8MJ`NyKl2=Q66i1Pi&9Fo=NNBwdtwVt~lCbc$7=je7^?mmHJx^v;SNHS)14>@s zUF_cNikR-M%F4>h%J*zbbScS0)z+fSSRcbzKw#;ys(x|;^xTe)rW2Hc+}90}f9hbq zkhgplj4sSo6K;WYOJm>Oe2q1?T?_Bs>C%MDdw2AnD6dC7tE|oQ25`4+-SPzS!<}m` zq}1N(jX73etPFomfUBa;1X0a0(=#l!pap@kYl5s#24(23g+qg+aD`eY>{?sTXUl4Y z^}%=_2C&+}Z%=%D(Ld?sw|b~=uX4hX36-Q5b^A-S{i_`(18fpL@*T##7ZHzr%WtX> zY)FU_5bF!6QioKC{SD)#*|fTyE}XIQvcDz|_9Yq$0mv|gd_oPScqZ8ihV|gd{XaZ> z_U*HWzkBq2``Mqqeg5#-zdn4%`tmic*~&(~_%$zdPZgpd&tn

    AmOA|M=wDgYJz3 zUGe2{NE2fupU_qAmw%hK{`Xd*Yby?ole*t?@ftA5t!#mBG^L5xGcYn+;o3I0)7F`K zj$!bTZ_k=(4<5C-(yxUbEq<2$uKiNGjpfV9<0lUux{8se{$TP?|MX9fFm0WaQ}^1o zFMjZYXA4;`8wl?kieXkYA&Z4BZ6v+M>3<+G(hU5)b;!_Q;zPcW`2{go1(7hp)m!o` zTUodyv_AF#NI+x|5xBO8j1RRq3_}`cn$hO(j!{Iu;};@HxQTe=smJ>->MK~bnhG{K>Mhf)=`ULT)rDM!4Ezn7-wpSd8kcG((^)uUPJ z1AqIvQk{p%Z|A4S2VKdA&j1M&PWGs_-3|PoCHes95B4V0!^!ky;miDi89w6#NVS}N zuTiG=D<~p~A<)4=TliYoy`-aed{=fNAXsVo;UZQ9>07u>;<9R^EkgZ#1gWipR}tJm zzIRqJ>LCL4!q?rhSNeSnNvrQ{69>}Ldcj@;L$hCX{>gNYpEHoeBCQ=(pZz$PDl!&m zW*j>UuWgN>Y7C_7(h}Gh(*MF#I#^|Uf*nOdoJH)`c&>BBW=7QwXxPTWW`XE5Tws*u z*xV|w#TsI~Vt9nLM%F_|d%2NPCcdNf1ZA84Y90{<18Mrb`EpZ6m9536i}QcyH`|%D z5~nKaLW7Sv2R#DDnq?3|(=D%K2z6D{81k{(Pf>~z2dDF==`X=G0dDTe-VHof2EoKA z0x@$OzQ6<%OS66FSdDcMt|QNu`DRh2QLfmu$>}1BdVd`jd-imu*K}sJ`p#nxM_j~J zu7W-5!!cF1~;U_G>_>TtL{cw_%D) z?ll&%`Rkdwnc)ZAy6@S(+Br*-CVQ~-Pz0$ZhjFqF^{+>5OCvBN;xM{E!uTQZ)*TR1 zc*mN4I-d@ z0*LtZrs>(9efnWwpUprTeLYcOHWU7auA4Nc^Ca>tAX|)F3iVQ?sY*U6slGDy93xBr zxH72mPzD#JIch_}5_YNUmvUa3?Loxu1L7|umOg>;z@p>|23X^sDo#ia82+E8%AyTO7wxDiHup>e$iI5*Rio9Sh4Zz?=;*O;xnCazv5A z5YJV53Dsf;kmz6Ua~g&n=?dB!Hdm`BM(p4)^!BehWeKb1*Qc6fSmel*)s z0O!`Pt_J4#l!jw?M^0XreS1k-;!Ddf&qxtn6{v0;z}e}>n97toTR-33*<}* z4->v)x4_Za{%%|pmTMvi(qYk>A}B{UtOQDO=I{Vo&;`^Fd}(Cp5IGp6B&qsf#_aoo zi$J9=piN>UE|0TQ$q5o13F%*4G!u95W(ZvZ%4)OJ%d7*)0@k$7qJnb0^YhaoASC-M z5b$c}h}yLgHfJ0L$Y?HHzqoTNpu*WI=wO4!mmlB^m;cF!zZ{`}@E&O=Ri0YN2ex9Z zt5RApjS(fV(y@?P{T>=_Ts30{mS|*qb&gcYoGRG@me$Q;APrrsh;F=CEwTEj9L>hF zBu63G);br7i?xl8PtyST7gN?7BCzk8kF)dY@THru*tfnoQStbOH^28Xht}R<9blc{ zCX`^dt~VvPdRV*b8fIR8&lrIKBy|;(sSF^n+=s0YCN^;JN5DYUZps%|LBBA=U;+I& z1YcsqWfg_0Ou5p^`dIU_FvO<`RM7yIRD((3zHHq5SFNZ&@?bfa><7y#K?XGtfuhBV zwi{mC%VgW1SSt54oXSOUTx2H6-03g*?^T5JWq@BxD8Ce}a%g`-X}kscno_tw{n6t0 zH~=e(-K&G38>N-E2b?OU8An{osva2xa6{h}rJaS|3V*Oekw5*=S2^tBgqRfUJCWBS z8OZikdyex>B1t-}K2r^_B9yC0Z(%rq^uo^Kga&@#Grns$**Ku`J9MJggyP>JotmxfG3&OZH#*UKF;I-i)s`>kKMDpvy0J^X74)E|HRaVL(6 z^P@^-)#z5#&OPakSOC)yQD<~kK3&)r&|fQW_HTNU=5dhb#*v7tmO!$L=^C!I0!PMF z^y$YRf8;z@{h@!Gcj(0cJka;*_3N|w2<(M6dPJpB22>S4X~eK~PCeklilS@{0=E53 z^+e!&C7ekMK$Ik)%sO9KKLoUd>sqgE^7RAi17xnOEK70oZYZLU>S`Vdr`OoYh=*KF zNgN(nw@73V%rtWDNjuuAs|Xy0beG3nAwWxL=trM)9FXoo^uCmiZ_nR%HqyTnq}>E7 zKs;ok5Cy6~|CftO&99aCPBw`8BkP>p4KBk36!j#ZVGOHusqEq9n4%O$h5H<<;vR9T7QBKw! z%d;M;7{43d(1Zo|U7unR`lCcb1dkFYx^8n=4s{9Go;J<=mVsyOhjlrILksbg1F2A< z`)!q|x0vR;dXRU^)6MS3u!ZMW&F^ys3k}qqGIoB=7Av#_C)2D5t4nYmpich%git!H zJr$xccxDnTR(bQzZ1-rVf9i+Jhkee)_g|f=pb?u>`vAC3@bVSLdxV)StS}+#lUBDK z1@iuL$J}h`E&t2t@V_qKz6!12C(xP&=tdCi&JuyRo>IQ?@cZQp$t_ciPk*OI*P)Sr z<;AG4$$Eb=9MZ7FDgMxJ-MOp#S}uhw=40Vx2D~tM14z5|+_fbl@DU@CYGM*f(*F;QbQF`?rn` zyUI}Ry;k|Mg;I7rm9mQ^NdH#|KRhP0@es!s&gv}I!liR)x7G*5%y+>@2a|`TT+#hb zrq9%_v`Q7CW*W=fk=}Itk>K>BnZ5b;*$G;#ZrP~eA7t7+NgGO zR>!B#etz?NjepkiwOXnY%&rqZq2pn}BekVg^HE1MybRT+K zEiRRAtlh=zRVIm#E}v-;Zj+G3g~1kf4~Z3R;gqGC|Gn$g z-I7IaZ|8iL|BYY!h`fpwpd?2n@fG;Mc27C8ji1mLaT5`VB?;*=*{-SF$@-o0NOo|$ zBLSi-yNLDr`1ArcC8C~e>8%AsDQuG7Z~70Bc#CWzxL(%VjEu$ZBGt|4MOjVksrazH zp}9K4Yevk~Ief7*{oR6=3IvW?icRdTtY<0jFD`fBy5{pTBhdi8`9M#IRH;0t>&Eh; zH+ZtXyYts{^dpnDl6#KH5XBpY=KEJghO9D01U_jF7{r%5y9e}qt)g}}^t_A>Z;cjV z^>yA?;*2Y>21VFhfLz9zI)bMj?X)45-Iy7?a&r1M0R7^WnhbhhwOnH)pOt;}Ue9>m zq693Af`MSqQNbN7ki76Zz8=mvoUM?vx_imFvp7vY(e!f%s-B+!-W;d({sG1GUw^u} zw;2}@#DHl4cKJA1tB>FOyM`%y>=yX2GlW?wX_ptH~dMUsZf(sqo9Qf5aN^eRbtZh@ug?M1xnS4Rf| zdsc*S1SGp6H(Q?h@RW6tLsC05SjG)CHBU3L47UYWfnAlP&nlwNJ0%B8gWArH+{b23 z>I{PuE`LhWxD}~>P*EJma?v+9{${G)b>R}MQjf{O>CWCg*q98B(MpMbVvPGQ0x>ex z6eqt!Js*XiL!WjGe2_&1%zxPpTeCKM6#c zhJ`WN4~bf_HFzh^)YpQJ`~S#=QmrfKgHr|T|KaP;AAbAr!K3Xb&t~f<1TL+2f$?%~ zE*-Q≻h7|M4GcIA*Y1cRN#2MPW^B7P9&fV!qjYb9-}F)+X+MjnoUAt8+A#Ii~9V z@{I0$E-7TLWE_boq#lF^01-_|cYvivD4o@12uL?#zXfeJB$sH$*zy%HT%6J0iCiOd zoWpxuYGmp(gf#jaXHN*6lIO6g?Fvq=MpQ*Rta7AE*>H|*R0cxwD0ChiydX_ML)CXv z1uJDNb#nCl#i80Qb#JA_AfSpgwucBt4sJ(0scSSE%i`L0BOBDNorvbH>s&IDw4w8h z_PZd)=iJlL+>GwkWcs@IEmcXm*?z-f)h4$FYHuJ5z49LSM_Pm+0!!-WY|Iv;(55C{ z&4Hwc3vtGz3rVdEyfZ|ckNCl4h@KAdLJ9}Jaz8UM-4W?bTb%B^+{BR$yf`?LxBIzh zk`3Y$FeYd9l>ru%I$OwONCw=aNo@@TPCE%*v=^~8T;3%RKwekvOl46@v&q?pl)?FG zBF6r&rH~e3i%TGbNScfMU)W3!QyTk{Krf$1yps4tUre?x=1uMU;YRU7Jy=$1=DN$+ zzF=wTaLO2{4^*eZw0s(RyYds?Dx(Y<7qDDCtr+r3;t8=-rKY6`31Rn!vn{5FH*Y~5 zA2@f>{l|2xQM-z*nJZi2#&qfqR`V!)eo)fSJhwDI>PyVG2$d(Kjm3Jbdq;%E!_&B$ zDYzpcVSaH=h%4MWlh*=r8>c|fV2tp7D1^>;=m({6aEVq??GrEB^=a#pX_lz_<_Ig- zjl<1@O;0WZ4GJjeS4{uw$$Iz0dMlWzV zesFWOC+bIoTO^U=clodP160C99ikrkmh%}mr+cQLbgl-87irjsLYH|IQR=ZXSBwZ5 z-NdJ5j}F*aqdiRF*%7h1WXw&O7BK&p0EZ9*9-Sbt9y!;;2Or*y!!Np}Dp6+d$@ApP zugFTIV8&a4=963a>gQq@A8p>@sHU7Bs;s`{WemgI3bQ5!3y>RtFcuTil*sH%_cXIng zk(NiA?>2CS?0tt`8UqXCUNkYSfX5z$9 zkirj47#@u|mRd24A{q-^O>xBm=zyyxEVy|-ra zJlWj{Rx26gWZEf`$yCop(JMa37iYQbk?;A8O^?dtW_d;>i^YY6jCC5g}!CLD+3(?k;+x$adTwlEfu^l-J+fUazIj z5emgL)^P~ntaYVSWeKreWvjH>W^gIhdbzJn2cpa3C1x_&A`F9JQz5rl%4!sBHog7P zUk?7cSyz}%SC717j0m<{4T3N!D*ta}K}9gI4Na}p>g^C4xtKsq0{?31#lq^eO)h>G zJqlb}Ry#uWFCubNwX4bf5yBQxSxQ0b=~fS&h(qP!?1vvVIg+n0=9`}3Z+~lWUDgZg z!l)wZWh2uoN`cvnRLUKXDoc&W6{gfToIbD;%!9>IrCOYRgF$wAg0MsMI^? z`A&uyIo}id(a#;}w^i(~=1cvJ2{})P2g4`0R+!U#@`=LmbUCJyayu~_^I^YWPrI@5`^??*j3gY$FeRW8izl>S_XWffKD$_AgbdtnhUB! z_T*FkXLQ4X+bh1#zG|?$`f6-nV=72aM=IM(A0RR}M7H{zc%Jt6IVJPShnxO?4Rx&s zh4;$dUyFCPOCqyBditXwsw3aJ{o&^Af7!fqd-KjmtH6h$4OpL68=AT}p$-AHq1c?$rJwfN{WKpBKYgl3wQM?-p`z(ly6#H^3`zlp zz?bxRaWg>0QSH5=M31wE-9XE?Qv`9&eaAPm`E5)fNbKKznm$FM6x|CT<20*ZUIUw10a=;0X<2~3`A)?g-nTe>^Q*XwrDbNQ7Jf`3 zLu9BsRPe=45(L%c7U0v>U-Jb90M@zZ>vzBT0YA|3!TMcSiA=|{xa7f9hu_a`ts6q# zQI2Z-`)_})6ta9xiP4>s0w`C2tsTk>kd}+@e&_t0(&SgcKh+k_HdJL{gVoG7fcx}p z{qAI4!Uv8xRVt9SqMx>eNtGMRZXbi~0=?^L;CNw8i z02)|H^HLFx;h5vigPQMl&JL9#=B8Eg@w32pUca3a;*zA%+@i74LH*HCy18OttHR@T z<6fFsGVrAmW@vy2z)2ChY|6PSSe)FTQ->|yZ$54iP~_?KWni9?V)CDel8X1=`u7{A zth?BC{f~dYd3WR|dNUO%BDYuv&;Zd={B92s$_1P*Q&|mo&I=z*KIfX3fu`ggY3~hhGLykPysMoY zpMa>2a9gc3f%TRjk5(paN=sda*$3H4xT@F3TsIK9TINuono>Wme@>!&xZQKDN zA$2PGs-aVtZPbd4z|_ks*~8qW;W4UlthxkPxfLT(tSmt%Q+UVT!R+>{g`vA)cN|24 zA0RF_Ra5=`>`+zH&EnAxn{fZd888*&yyL*nzdJme5~44TOV`I~29mckFFz6^zQvu1 zVk|;&iZbDY)$w>M9+>);4_?n&z%soZK9o)s%vnZvr+X{H&_tJhae6!W)N}O#C{*X`s0br=Lu)nXo|pN7-ur&tGGuF}iG42PJeXh-?E+w90msPYD8(^(Co*^Ha{^ z6vj=&#=h=_rWhSuP*jrp${s}-Jbk5Q6{mZ+C7RV~w09z_Dal)}%iy{)1AX4~Slh#N zq%XW&+Qc91s99|fEXUo!Ng8Y9N|Q@o-Aa`H#Ebt8tVnlcCqi_=M)Dlur`bQn%P-e> zscwAmJyx%k0B^7cHO3@OmRnrm#nHQ1J{Q2awtQ`C>oQiw$Hh_R4GgNN%e#pr1po}| z_qHQNJ5$l(l5v2xKAp+b|NQB_`wy3mo@IV@#Y%3MA6jG<8SjfgPM$t_;G)06N%tNv zelS@;f);Mj7i1OzSOcFshMkxg2EOLmv1=B)fm3INug#QA$kW@#_U{P5MzUk{FG z!E9HJSF7u+9IOZgt9j#C<39Q44_lE&b96(LdFn4)^jq=z2`8(2ag%$H%q4yP(|sLqxORudR@ zF@Lvv`aNeTh_w4V2bsCdOp8RJs-m$887w>(w>fd#07KLPkGV%P-LAxSO>Pk4b(_Uv zsCoJLW||s^D9?F=>$1pm{0~#t^`FH zfT+FS?j2L)JE77>1~|b?x2b{Y?!8hTQt|pjjpa4nYiGGS-Cq9SXx^HJ<5RR+EGH3% zGRr%@j%$@iM0?5Tw4tD2JZE}+uk|25ugl#nhSn%q5PNM`)XT8^rg6qJfi2m9=%=5xxToCYs$+md2v5RV25A3BXp=rDl-NM1NrfHzIlNf2;* zupcdE!_99e1il$}1d0a~VkCNOvhn!zdH5x8M~I$L|9XQuXjLPNSL8=MNgLOmB+8h` zHhvqjku~u;amhHRq1mGVSPPNCmpI)5@XT)~kas({lJM!9Kun$~w!wmse+3FgR;nvX zc0#KPtwC=oa1A_Zm3=l2keF=+DsvMD8?y>Ba*}(57WTGdFqDpYppu?0BHgeT9>4Q{ zXa6=jm1OTsV4VHiq}c$raaoW#pFLwF{{$optf17ZO~VYXo+rM zO41qXnUj$6MiVA;9bLBM<&$;DwWz?M)1Fns2h0Kow3u#@Z^5T)thYK@v%WEWaVwPp zi-)JwMP1lpjN59z;)6mLQ)Td`af{Gah_?!^RBZvnq{kN9LOL!Df4hzHvW@SBU}ezE zNNjquc7~gQRj9LUL^u#TvZW1|%sp^7hau?A;lbO!C8)8YSy~QadXLGADQKW@EStA) zfhzHCajgHiqhQ+f?1>`QQ{E?55SEe_24gnPf_@^2PZ460T1A-a&&URS*Jg6R)u>LV zq?JOxKJ!~5@U1dMAYdD+E1E)Z3G-1iJ~rjOn$P_iTQ&&%ZHdf}X~h$q_T{m~fQ{_?oaXg?wud%0cy^qoxyP z8rEHfgiRzs3lM1-EdpN{<}h4Ud*8_hCKZ|aChrbh24)*3kJh(g0#%PBxpf$f#M&d?V8++-K(dfy(TnR$X-h`+z+!DW_l;XX;gEOSAO;MaXtW59> zQDKKoqy?$=N%8#Z#qt8C7eNDuuiYC<#!J?r{Zs6>8h}UEk(P9ELhFUOY0~o$V&|q2 zZ`JZZ!&ZB|?AQcZY!XsUy$e+wyt}n!I+<@GODcKuqmS?W^T(UHmu~FIZZ_>c=Z7zy zwkLLnKJUd8{k?q;G{@J`a)CjjiVi^o$qhP)-pq|xMIbrNiSAcW zNIPNqOxz-alwY4RoR>Fvm)V7vTmu{Geb!Ipj5;p?$upmFgwmn`9A578hxfmJ_GtT0 z^gi2ubpPR9$~Byk->F+Y9s1p;-FV6{5%PT~8^8fkoaBsDFtkc!!*|NJMqt;GF^-`< z(GFV-+y#w=Scyh=fjz?Bai*ifvc(LkjJWzeD{%49*Y7s*yUH+=$hTB7VO>1vk^Xxvx{AJd~zj8^!{b665R@4F2LDOGHJ&wb!zgS>otKB1-kA^tO``}9l%fBBy3A#R}Rc;Ly z3U}X-_&Q3+uCAe67-RQJT0|Zb4X3QMl5y3BnuF@CkbQ*kxxaUtd*61!E1F>zpc1;Y zT_&X>OhF6!T`$Ll9}>O}QXCXLjal^_SRZ_8S%GQA9;5$bZ#+%|w0L;J5Qnlsa~{bs z^BMy$GE3AF5eD!Y3u{ify`cu?tqf4ziUfgH5Yy|p2*CPfpkr4|IgzdlCDGEMlcn85 zKkPnZx#x$loP+NrHo*K|nv?_q6T+&wh~eb4n0L|#QnnCSA3nA%fb|3s$wvn=79@@2n9|X zJP6W^U)7p6z6q@Gk(pO}A5Mx#dwd|%co*K6FBKcQqI1O2&MwFmvo`_^ip0bNd$aHo zwNj{9sH-X)+De~{LI-Mt`?HN-WXA=C>Od(=yV|mhv39nX^s!xRU~7SNTS6eB*!~3+ z0DQGcZRzO?weki;Y|H^Klr)#%fe4bNS-kczb9BEP9o2#jy0Tf_5E(iK1rfoB4Py-( zWzM)fc4+;@_XA$j-TaAg@vZDj+}}v$bFc=9OWew=vXh078^kow6VKI|j9XZzlgYv) z=6%J268kFg&FQJT2WnhmiH!;cQbIhh76dP}EQPwqz{No!Sr-|Lu~}4;z{EsgwBeuo zCgqN3Eh|K!5;54&B@Kh^@s!;`^i!F+{t1+l-1UXQrL`{!Vab`4ZU4x^N^o2*Wru9J zrq~6kWz`_fu=Bx0rH_Uyfi9oH9Q@WKnqNG~60nkkn2c&*jvwxiT%@_M*Jge#+hcJc zbwbd$f<9$wum~1)*ywk(6ea2$MC9EuhUvgcAw@!!PT`URU@yJib*)PZTz<{_4Z~7W zG--v^pCTFG!mV1kdI*{wTfXG3&!K+IJ--YZ9f8019sejqgeo%Vg6&=L!nHW?+e26~ zN7x>rIJu{9PZk6KsAwe-U~$b2?*A^=f6Tb~kG%N@Y-ZUBZ)jDwNO=Cz>&9+0YTERO zN@_;f=D!;T=v<(3bU131YTH*j`gV*sZ6E?S%+p2Os`5M#TXdr04vIRifNSk_5F}47zQ3t(U zw`D#L;8O}Kw>uzNwEa?!~$$Wj83uV7KiLgi^uT|~nfnfOo{gX$J>@3TF*X-E(v z1__V+({F{pcxMjw{V3-x0YyDW3((aIOvttDJ$b4S!lIxTF|DXcm^|^O)vsWd^^P#8 z+t5ofn_?EfdM#UOi4Rna{u72B)DHq{z%OfMK0a# znl6?S#PHy1){*?;>rnbQcx~-&>Fz;=iCPui{}uYOab%bp$|hKYtVCmCG1TWKv3 zakWs{TM8hh2DsUYH`bp5$PvV~U!``L>752zC)N!6;$R(Ia*#p}-e^jia5GDB)B0qb z2bT(aDzGwO0QDGqLxUy(Ewq%wEX>AX=RJa|73E@jXv!1&px89nz+(mx-&k4n5!n2B zP`-@Em00XVvPx@34LMP!c=3cdsKIse8MudX`j5gtR-{Xl+Y0g;79<#(Sg<`e7{oAe zxiXT~_SW8iw{sO(r|nPQLDv6$1gz(ZFRE{`(hKzgQNlGXr1;B*vnX__KyN!ctUGXg zU_13oo+$CgO!URh#Syxdt6YNrusUhWFgc)~L#=y-gnGQR~Y$dn_5X>yVPw3F*3XeScf38 z+2K?B2x}Ib?bZaxiZ|CfWwQ-o02M3>Db`74hsCCa6XFFoSotg*oj5Wr zhS5B2eQk>&m+eeZQ3_r$76qQQckCdpTbY+wH9rleAe$o+oXU z8H#4&W%vkJmV5~?DC@MlG=&!=iDuc3qfeN43tdcIc~thU?z=Q-;3t6BdBYs^u@7r; z$26v@bP~cI*P-23^RW|#xNn8Gn}92hT1T8AL>^4+l#ddbP>#$+F6GxaW=BeRBGAVL zCiPwG6k90BHbHq)eBJsbM`WYQBgO$tK)qr?9}v<4#t86-SRbX zld@%^*yp z*Bi<;qZ~>x5b+%QaB{DqqQS;$%tUr0u-lMD%_O8AkXo0@wXItW)}isd3?Z|u8+*$s zM6Kg-4`H1MFOO`lR~*&JUHyGVkewU14*&8sZ36bb3Hf1^=@@Bc9dPP%SdWutxmA1Y z%R(rr;)6Ye6T+Ni`Bd6s%a>cUS-w}fgaS2p1 zJ}}jQ+5%rnwwRE|-Jh*K$Dkbg8p89=F~<;3zKF9Q((5YIUE0vsE0nB0CE- zs_JcS_woYrh21scujG{~-RMAByr~+g$g+6G;q9DT2JMa2IVgHR+tbHZx;7^?1{wG` zrE7niant=J%SD74C>p@HS6%A++h2bB@ZYu{K6`xc%Woe%`r->N+{NS%pG$_fzz<$R z%&KM%fED9m^>(IiPQK>KRD-tg%#jo?3$l`j)7ffy8>iIHvFx>+*c7E@m>z{{woV-- z?53J!`gk(EaF3zSYsp>HEU_g#uaPB0^FZaxmnt>y!APaD3npq1WxZn`t@@SHMFO39M>g%(a?{`f@&or!t-*>Sg(+ zA+AI+eA}X(iNePoEPwo&o=fqkdZH*tBN5^Q2&uP7nW5fmxFthA7{#IaqIQsf$d^O3 ztJo3S`GVe@64+$9hP?0^h( zcdQS~5-ToMGxIVIIc{?jr*#hJ$D)3KLmOwE@EcxEBr3^<4rZuDO#82B{|)4ySYDy> z;xwPAT!xG*vmeND_?uwl07#85+VEaD8Ozm7!f<|}O@WCjsIZ6p;u%%cW?`~{C0Ebj zWFwI%iaT#eCw0>m5v!7-HyS5^YWSlEYA>p4-VXV*pOq!CMoz8YJW}XVTxul_vFKSw zvqn>6oiOY-!2-S^iiy7f9-0=LYn2He0+_kPaU8+;Uim0pEiY|ty z?DY#gs8s4{HnAn|igX{_801RyM2 z#X^|94))+eoX7|+S*!9j+jok9vxD;jl{kqalI-ip`5#ArHA6RR%r-UmJX!LZQ z4UVSvF=^B3ryTV-x^6*XWrWLzQ?1*svb$r}zu;Ic;@5!1)$2a!;7&9IptW6|n!c@?SxSol7V7KvIa073K)eTOJaxL(ID}QP>PuF=K0$3SBfpxE_vC6nye{ zR_In1kTA2`1SC^lv25smx4)k2=iO0$mVS`acv_%@?zhS~K=XR)q zn56b348~eZ-ty)^?#bamABjN3->9@a&eIesOI^Jr4Q4ZuI}lRydW@zXT*R<=@_+h9 zR`wT2kIQ&QgfpmV&b-tB|JK{MIe+u=rfv&&plP(z8iZHe3Zz_Nda2ix{;as@Us2&C zMG*8m5|_m3>i(4r^;}XUEYxM!Yn*Khv zfkvwzRyL+e!4jYz;_hkNm#tE0SH58>_)FD2*W(S?12+a)ak`TJcu4)lw}>;HbmsL{ z`$$SPFdOoX>{JZL1*}p`P0=foePk<^JywD85d@Q~X5fejkEqg+%p|3}CAeAt?O~@_ z$i>{sd?J(3iiG2{Q;1C`O7SWT4ja~$b$EI)XK&QzhcbDQJY%a)*`Scv9x9!JW5>B7 zSv%vdG(i(TTQyOOKi-@&h|si+%q=P@LTk#pd+iVy-7fHIPXFVAc)1tmnUzv*74U+8 zD|h^?(qADCIl7_2CM?v3JW({s$>|<4 zjqG_SI$;N53~*r)7Aq)K3bfl*PzlGYi^w4cd*rjOh6P|{I1aKZ&I!&!6oIcwg8H+cj+KXeAb0;aOa1?VrEWKL@ZW6fzu6X7 z%HM-+VNyItwjey^=+IV4F8@j3d%g(RH=L~nQ|cOUmzKQ72@@?O`>@hGeY6)Kr!s*6*D z#^(#(SeSmNg8~Y=S!Wc|cQqZ7NJbQD*CEjF;c(CU$I45(fEk2DiVH_tNT(pGT}IeS z(_=|9uCbuI)Gi+lUlH~55;{^5hA)<24B-p&NvJ8+#92~oB-_Ousedg=uzC1B1&0wi z>!l<~G9qGR&JMGJo$F{MM);Dru&T85RK{`5QWS+&)(Zxo3PBa4 zTzSWmcR-u-SD~VmiD060Fm#VA_U!2Nod{pkH&8NBk+`Ccm-m+g5o#DCB5|N@N>i*8 zBjd0c5anw^Hf+n%2Web2%;ocR-#Z*dJ=mK4E$>U`wg~2_wDry?`cm2Cmg~Vx9(Rwvh+4k(kVTKnj9Hp8={3uRA)@&i}fk!o^IaKqDk+rJMs(#5p5?@Bcbn@+K) z$X`U?6xTw`()Uxu-FKW?-eqhdycylE@49%c9cnHnjvr*Y?g%Rbt52ehn+ZB82K?7` zr$lI~vwIG5j<(5hX!2_e;HP_s2(9ULa)eAbT&rhG1N%bUHFmE=97yS-=@!I{YXZzm zNqQ=WTz%o(i2m7c6^r%R=5IaoV=*2 z>tzuUDFD?-sFMY*W&~Khe$@1P2~SDJTI;F+L&b2A1v3>o zh2(w$g&1}1QRISB>_@Q{YAe6yG!)XBlx09Zgvq%kLj04@V!AqqzP%rA8@QbLPsb=( zX$C6o9_VIDP<6Q#|T0t#|W7t&clkTEjN{Ohy!pq%4MzF%Y6@N)sX*liN*f4 z0)Dsfvmed3G%M#~n#;n(y>~j_`T$=9%y4RiO%*`gA)!Aj_OXlafAp*#_f#vL78BtjDTyODq~Pn5}JErmezp(7N%XR_Ru&c-3!`{TzC|JeIb(w9GK-v`Ec87cB^*D^U^ z3l2~43s9tPonk|9wmSi;UtTJiuq^^Yw5v= zLX?kJTY&O>{$B%-l_z3hQorpavO)gUxO#1?5mZ)FvjVHpN}x%U^PMR&h+8aDEMWiP zH8o*`d8Qx_VJ!x+K;%(5nUwTSE|K7Hjh*vrp}Jb2gcDcqO9vc?W2+8=MZGSH7mAH} zcup*h;`bB}r)veW<;Ry7#A(2`II?DnVpY%^Y&H#7uTG`csPtCVw%b7+ADCBHs0Ix& z2rEix_^%^PBT|gxzAHUX+nOVCLPd-s&!^L0zM1K%{W|^&i!kG!vE-x?wDz5gyX1vEl$K5Y;Zei=@}@$toLJa=doM1hdU zh5}eItxZe+%8ITNN$?T(CpuvNY$fR~$`cGU*4%`VP%j`B@u&<^iz$~@%Zn>8FS0N- z*-yKXa(FtQs7!~}dvx&G1~)O|U!2IvSG3j6C5B@=oL~~L44LK+H*gON)oujy>aNVz z+M^G{ppq6QGrz9%qX$O&ts`L`(r@JmB87%)8N(xIGfAH!C{*c{+6Md}ZUgMrAvR2v zSTT!{DwQ+YQ7m;5WyoegYz!Egm2pHBvXz2uzJYNmUf>_E-jPYCcIW%2CGaN7M-d;Y z91HSIf$o>c@x*eG(t*N*C)7d(YjVTm7sLl|XOpBGc&uH?|D72v)HbTQ{b2D0hIQUjJ91sKR+L`j*fe1X+gX*aNK z`qBHw8f4)qb*ZQkHaQ9Nom^JzU!8FF?bBxuzj*X-VoTn#Z2P11i?~N6H!8w6*aq8T zxlUY5vR!`eEHq53PWUUhVaSj=x;VLNPmc|hVirD%@(o)+;DOY?-cs2PAD?zgiIXM= z?^<|1au192WM*3<=_eaStkgBHlCoa$TQ|B~93&ddaNjh z@`?Do7>Z<@7-aTX0tOe|W631~sXQ?pN+Q?vu`$p{@coc)rDhWW)^O>JpqGzgPIAoC zuWupXMi9T~OQBMI>WUyIQJ72(C3HFB0^>KJ>ike*Q<+z#{DMVU3hm%n#ba82!<11~ zkWbVK!6CUAa|zrrueMvH8v3F_iti4e-)ITcUnf(G9Q>-#R)xaYk}{zlQIUZ=hM5Dk z1|(>`L`;BPQ-Uh9c-p7eavDs28Ql2mNWmHn1``J#U|7e|ru(rEV9Giox>y|>FUt?x zSA!8@g3WIEawE-Lcb_C-^Fr4YZVg1M&jVy-rb-b|lb8-?VO(P{tM9Z7-QQg~*11i! zuK4R!klPZ0SXaU%yxF!}Y9UD3W48dY+jDT?>3cXa68@q*+s6A6e0xXxzrRPUuD6>L zzwRD0lPO(bN)mwTQ!ZvmRgGT}m|=~1zWw0Q;|xI)9l@7Q_EqYK?*-;NK>z+d=!$Q} z)eBh_T{HzFc#1-YYME z1v5>vV4<=tJ`E+br=W1LkY(;cBY)^Dc13b1z6X!Bt11b$0V>0n!S_SeBVo;%u)b6E zX38Qvij{95P|X~;d8YhHH9>P~s~19ON~{Wp0W}!O5Z6!SG=~?~+GjW)HF9k@A>(Lg zWU$o0l^@&t2UyoeER|J}=qc-{R_e{T7h18e zK_&VTFv?M@3yigl*-Mm?MvR}M+}on#;=)n(nDcan>h4w47HN$R7FK}nib@kZJI~Uz zH=%0-T^BhLk{VF?X3XQF7ZUAey@g!7_Fv)44^dHdrL-*(hVhToPq0(#n5cBf6X(lG z=Z#BCKn7JcniEaBxR!Tm*pA^bWneF*iqsoh*jkScsa=qk^i8A%deP7_gdkT<@DiM+2(N(9EdEOPzWVi4J;C%#ecH!LiTic zRAX496k}Y3vva5L55z=n*XDa{(wIqS%J9XfzQvPG{lL|x6WadF-($cDqNqe0u)%rj zPa{y&XLP~oUK|@Z;jN>#3kH@jwP!#JrWH1T#BY@%rSr$g#QFKl13c3hUYR|AAsmqi zhpD!`u2(~hY{X?CRVFS5CT)i5BC;hCekKfw!$vZ}vW>YMvibZK-dR`fZa3y||J(EV z4!hN@!Ed@Q_u`A26zsn4!WiZD3=tE-UHoPWOKGKdM>4T-cGdo zrj%qEq=DTmlpZ&iHabO!oa8;gH?^#^CuJdW2AsV*Cm656+v>}42%zMNlQ-s2?*TKP zE>QUN_CVBOkIO17;2hER9Y4)kO1IG$pe_$q(vLUz9_ym`%DSB!e&aiZT zRE!R&WCP~Ce1$QJG!dt!(#iaNVCmsE1iFZKc6e-b>RlP6GFSxHx=(L^&vmIi%rYvE zW!#p&L)dgjW+#g%DDOxgRQ(lU)|ZGXw3Q!yrRWBb7T`&rbLTbKGrB|~8RAT6YZX@6vIaM9X+B&7$c|a2(|+Gwi8u=R__^#k z_f=p9HaiY_(RaMx5_!t&C*x$%ExP|D_#7o8OQTVphRqb$EH!(iPht^~9@1RVKmVB} zQ5o1WYdB1&D5peTInOT(j1f%u>Qv=!Rlh1r;1mo&vNnxarR1)miUVeM$#tlZ2)l@Z zM2tm|QszO_G$N1Q^)bL+8N6kS23@~bcbD`JWv`cF_D)VFaY$lk13O~)bA>8n8&X@Z zviOEh<0%v;9EIC}jifw-JmwN+6RoPbxF)i>$sLuOF%P^x2cA51(!S zsRmp@4pAsTHfY3WAK`EdSda^EJe&*!*tDNH`Ak|%g~UfTys6~|@&i`Y) zlvSx2cAm-@QbGCGdtZM2u(%+^heG#id?9Qn2?U=bGdM5Qf#1biwb4WzD6kY-z}QBG zgx5P%r*I7-P@($Tzy6r3IvSs(;VX?vRtAtgUr*0>-cEPlsRV=*Xi@*daSEbM=I?ZB z7ng1t8#7t09^#|T{F;n_|2MZ&z=sn;xE%691n$;AUX5(FppLja7V{(03ME%+e=4?z zcvSnDeSC*gLvKVSv<2-U(-Ong{to^plb;F98)v6CYp9a^AR=5l*Ul$5&i8oMT~(7C z@o)X;0u$Q68~gKa0RM14C#bSG-^+h-F{ubQe5muV1RHl!i;lh>H=LF7N%`;B`0+T8 z5v12?%H>FO@L_c8{;*Zn5fM4?NzeS8JNQsHLcwlW=qELK(uJpYUH>S9cJ}x$T>`6S5C%l$OK%08<^97Q>b<$xb6y%!G0CGlR+ixBHKY#d6SnWr$Mbgzl~Hd9qbc}) z=ldw$EJag=MuT)z@0PH6Ei&H-%927h^3km}{y_}S6YpLS=)%}}X~_QJcp|Gxb;7|a zd>&#cUPXR^$d>N;dNeK9Ikq{kjE(NtMyOYY_5~K$9+lgdL|y9SHvJ`H**f~Yi%BYA zC>4n*ANuy{Vn9JVxNlQd-e$ZaYC6%VEQ}lCRMlVc*}{2JC~X-~?)7iPjga*HwuEdA zz=^}pr4-~S_lqU%kn92CO2{<}%%kc`tLPA7WQpqteU@V%lmzCmyT}((WMk_(GLxW| zhz3*UlIt6LLGdrwB^M)Ye=&8a7ogjCXYiQ=%^T1!_~{6 zRmj@dTpNvxauA*>{4o)R5XS{dX_rS^aeCavtWB~Pfn-q}(tmN(9K@%?a!!M?a9%+* z$hGbw;x^2}e|&2#$F+PnCx6sb?t@8CC;S>|+eMv=TfO{UOg{E+1m(N3TNuz4uJzZQ z#nD1js6meTXE&C@7rPOI*OtiN=A41zSF*SGg$_M2TtHw+#Y5%QCv?d-{IidG^)=V3 zIfUb!L#4Pw+))fLhfa6Ybw~L6sB*=>B$d z`Pk*|oKe?}xLyLg+(9;`ar7jMBzC&A+|y8TY)Y8D7!hl;K@UDhF%-8n%&rGStdduR z2}?GsCM}{DU@K~@wGmdBAy4u9Pod}s7bB-qIY>&O{ibd+&bNN5tFzf#zl_Ig%F3Eg zv|XDw*9ahWz$wF7RTNv{~WsbbZ9$=ZqB(2TgreC^l^T1TO z(bE8WKp^!!nQUDN(xHhGfX~UqtsiwW1dVp90jQJHlc~3&up6HfV5`Gu-o+uFm6oAC z5PByMfCveJpfvW~IXnDUJj;?#9Fi$;yYNHiso^0SgL{P+a)4e4NZhyOxafG9Q|J;B z4Y)ypz9gwaNNeo4(ep-(Il{Ns?qTzJnI-lWOn5&o54J}Gp^O35hO8D*&vIw7Q7j=F zb&nwW5kU=<>Hq`vx7J7r3hk zLbVLIb9DA7EYtl|ZAI1I70tc7k-KBq$^Tu%kcREvy`%|v_mV!jv;lbcs)TVuk4pce z%Uc>ZpRLrOxO@N7w!qzMl||55;`#&Rm~Q@$LhBtNwpk-2x0yE`y4 zmPw?gW9=7rCWG$Ri?0jFC3IyTw}h~AwunjU_MOP)RN5A1TK;NtRP%t=C>^iDOu6SL zZwHS!ixM1l6)0D~-9a<%yRUOg2)Wd;-MtA<$AQv2@xg;79)%2$B+^+h(o++P6&^GK z;0~xqe(KPNTH8Mr0<1`D4{%O3DP=!#$Hp>ze=`cDn%4Emz67(aYk#o0Z)bF#8pYA!9$QG)1kYG4cy)>>>i;*tQf!0d; zljH-^CL;<0ktJW@!zig%_M@?q4WCi9JL{vT*4l1af0#>NWwH6Lo%HY1p{I^eT(~VN zf_wJ2!}ne{ar;@}!9rnRqWt1y%B_-1>4r$FyrWA~iq1*{+C+jIq7E#$OJ$7Q-1Fx5_jeyK^C%5#!4e*sgye?gA5n1 zi89~n9}icRfk=hrsHRI}ht|ES%~Q8JdS(?uQG`T2E3*FHJIBi21pVKSQ{|OMx|w7e zW4Ii#;6sGcv9oN1q9~fzWhViD8WfTU7Y_T6)UN``tT?KnIkLz6xj+hg^iG z00HMb><56&?pG^#{z{uSsw61ZMlmw7u}&UDu#WnCr~^_)vTaNcw`i26&er(i?5D;^ zMA4I1hxk8Sc2?_6>zxTq#`?8C91x)G&(tPHSwMZfDAzX`7OIPP1>sSrhl3#72x_nw zpn6))EtjcP$w8`sn-y%ky0cEb=+&`ZyQ+{WFN_y4?LX)hFpWoAa321ynuzEl%9gEt zCF4CJ3?=}#S#BlJ&GNJpF~UBu_BgFrQ5>coMTeUVu?H@(KnsrKr0NPMJ4qFuq~G???%||(}!Qxa8Xipj@TapTU#>Gxbbdg0r>@{>Xk7LcpK{Cv$ouq zKq^eR?B(WU(N05pHQY;{s&2PgYnH~3*|~^S~rPq<=g8^=YPN91m3j@tgb~x&%`Brvs6FfO}pb7 zd2Na9{@FqmNxmJg3Ay&HnvM0ZrS{@y<5q^kQNuvFmT#S(eviv8Y~FvUP9al4%J|PM z>f7k(c#DzqzxEtq6$(q+=ScCq@L!bDuu5~$%_4B3E$ZE4f zw7>Z-Z23r`(~HI@#+bM$zH*`I(&5^ zdW6F3*513`SbS+VFjkss?fE+tfOp4cV8n;o!0i45Yuh-ceFVlf3SUhGJM@CV*;g5M zrFldr0EHts3_+C9P-uRkvU$jfq~$`4=7km>H;>n?;83%PXjS)QT}%zqRJfrjIq?+=eJjgTh1Y4_TXtY!ixU&%<9ad3`I}9zjc*ub4I8;@H+#CinMa$u?esf!o1eYTPonS!3XY_rG zU>y@JK32b@PY3}i+6~|K00~^;nxiMAgnE^c58I3IQzmDzP#AUD*gLzJ-TCmt<6B`MoPBcp&R2)*7_}ad@V!1S2D^v6*N9osk0tW5($tdE z;Sr0@e?|YxFM+(Ps0Hhz)S}p$znW0Y*1E57I=Yth;inrwC$eHNlW*=FQRkg7@FyoSzLnJ8NkVW^^tg85dHGsw zr8C(j2xZ9jrMSk~U~4A}5Nez>uk|IHT!EhFzEszt2OFEq)mjWo@gS(GSQz`8@gzq6 zB)M^GUo(>LmJ#`Rs6V?>44z*1qT#d?nfh+cKL!qdw7uQIm%8&}+cqedLNLbk^(ps? zgw6=~6u7h4lnBog=x#dD3)~6ui(^a6phGIB>7rC?hiZYdsa|ypFu-x?z_c%$Kl?>% zr3B;>;D~up%;M6I;vnlfQ}mfH%LpTw6GUk1TCv8Rk(Yeg^rb+ir3Lo{mE_PxO+b1k zG;m{bsiSF@oA!90Hnl?pQ^!a kx1?)BGEA>^%k}|yT?8EfuL-Qse{=}62W9%$+|9NB0}^|2yZ`_I literal 0 HcmV?d00001 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po new file mode 100644 index 000000000..d07da117b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po @@ -0,0 +1,5077 @@ +# Test translations for unit tests. +# Copyright (C) 2016 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR brendan.d.burns@gmail.com, 2016. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-03-14 21:33-0800\n" +"Last-Translator: Brendan Burns \n" +"Language-Team: \n" +"Language: en\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.6.10\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:138 +msgid "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" +msgstr "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:182 +msgid "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" +msgstr "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:43 +msgid "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\"\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" +msgstr "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\"\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:44 +msgid "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:58 +msgid "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/bar.env" +msgstr "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/bar.env" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:43 +msgid "" +"\n" +"\t\t # Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:56 +msgid "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" +msgstr "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:45 +msgid "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" +msgstr "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:119 +msgid "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" +msgstr "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:48 +#, c-format +msgid "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +msgstr "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#: pkg/kubectl/cmd/convert/convert.go:51 +msgid "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" +msgstr "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:41 +msgid "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs." +"extensions\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" +msgstr "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs." +"extensions\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:43 +msgid "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" +msgstr "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:44 +msgid "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +msgstr "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:44 +#, c-format +msgid "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +msgstr "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:76 +msgid "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON then create the resource " +"using the edited data\n" +"\t\tkubectl create -f docker-registry.yaml --edit -o json" +msgstr "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON then create the resource " +"using the edited data\n" +"\t\tkubectl create -f docker-registry.yaml --edit -o json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:43 +msgid "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\" --preemption-policy=\"Never\"" +msgstr "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\" --preemption-policy=\"Never\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:46 +msgid "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.extensions\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" +msgstr "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.extensions\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:61 +msgid "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +msgstr "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:64 +msgid "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a tls secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-cert" +"\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\n\t\t\t--annotation ingress.annotation1=foo \\n\t\t\t--" +"annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\n\t\t\t--rule=\"foo." +"com/=svc:port\" \\n\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\n\t\t\t--rule=\"foo." +"com/path*=svc:8080\" \\n\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\n\t\t --rule=\"foo.com/" +"=svc:https,tls\" \\n\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\n\t\t --rule=\"foo." +"com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\n\t\t --default-" +"backend=defaultsvc:http \\n\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a tls secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-cert" +"\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\n\t\t\t--annotation ingress.annotation1=foo \\n\t\t\t--" +"annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\n\t\t\t--rule=\"foo." +"com/=svc:port\" \\n\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\n\t\t\t--rule=\"foo." +"com/path*=svc:8080\" \\n\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\n\t\t --rule=\"foo.com/" +"=svc:https,tls\" \\n\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\n\t\t --rule=\"foo." +"com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\n\t\t --default-" +"backend=defaultsvc:http \\n\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:74 +msgid "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" +msgstr "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:74 +msgid "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" +msgstr "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:51 +msgid "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name)\n" +"\t\tkubectl describe pods frontend" +msgstr "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name)\n" +"\t\tkubectl describe pods frontend" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:76 +msgid "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" +msgstr "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:138 +msgid "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" +msgstr "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:55 +msgid "" +"\n" +"\t\t# Edit the service named 'docker-registry'\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +msgstr "" +"\n" +"\t\t# Edit the service named 'docker-registry'\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:44 +msgid "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:48 +msgid "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:65 +msgid "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # Kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"" +msgstr "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # Kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:105 +msgid "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7" +msgstr "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:72 +msgid "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" +msgstr "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:87 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" +msgstr "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:58 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" +msgstr "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:83 +msgid "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +msgstr "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:44 +msgid "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" +msgstr "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:56 +msgid "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" +msgstr "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:53 +msgid "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" +msgstr "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:49 +msgid "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" +msgstr "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:75 +msgid "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:62 +msgid "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --env=" +"\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and \"env=prod" +"\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --labels=" +"\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " +msgstr "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --env=" +"\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and \"env=prod" +"\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --labels=" +"\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:73 +msgid "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" +msgstr "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:80 +msgid "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" +msgstr "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:95 +msgid "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" +msgstr "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:53 +msgid "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +msgstr "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:61 +msgid "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can set it to " +"false\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" +msgstr "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can set it to " +"false\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:110 +msgid "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." +msgstr "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:126 +msgid "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requestor with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" +msgstr "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requestor with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:28 +msgid "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." +msgstr "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:41 +msgid "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." +msgstr "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:41 +msgid "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." +msgstr "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:38 +msgid "" +"\n" +"\t\tCreate a cluster role." +msgstr "" +"\n" +"\t\tCreate a cluster role." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:46 +msgid "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:40 +msgid "" +"\n" +"\t\tCreate a cron job with the specified name." +msgstr "" +"\n" +"\t\tCreate a cron job with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:40 +msgid "" +"\n" +"\t\tCreate a job with the specified name." +msgstr "" +"\n" +"\t\tCreate a job with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreate a namespace with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:41 +msgid "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." +msgstr "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:41 +msgid "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." +msgstr "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:40 +msgid "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." +msgstr "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:71 +msgid "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:41 +msgid "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." +msgstr "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:40 +msgid "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." +msgstr "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCreate a role with single rule." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:61 +msgid "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreate a service account with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:67 +msgid "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." +msgstr "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:42 +msgid "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." +msgstr "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:57 +msgid "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" +msgstr "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:45 +msgid "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource." +msgstr "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:175 +msgid "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requestor.\n" +"\t\t" +msgstr "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requestor.\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:53 +msgid "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." +msgstr "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:39 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:54 +msgid "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." +msgstr "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:92 +msgid "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tUninitialized objects are not shown unless --include-uninitialized is " +"passed.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." +msgstr "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tUninitialized objects are not shown unless --include-uninitialized is " +"passed.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:57 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." +msgstr "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:67 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." +msgstr "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:37 +msgid "" +"\n" +"\t\tDisplay the current-context." +msgstr "" +"\n" +"\t\tDisplay the current-context." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:113 +msgid "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" +msgstr "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:31 +msgid "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:31 +msgid "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:49 +msgid "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." +msgstr "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:47 +msgid "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" +msgstr "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:46 +msgid "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" +msgstr "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:35 +msgid "" +"\n" +"\t\tList the fields for supported resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tAdd the --recursive flag to display all of the fields at once without " +"descriptions.\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." +msgstr "" +"\n" +"\t\tList the fields for supported resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tAdd the --recursive flag to display all of the fields at once without " +"descriptions.\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:30 +msgid "" +"\n" +"\t\tManage the rollout of a resource." +msgstr "" +"\n" +"\t\tManage the rollout of a resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMark node as schedulable." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMark node as unschedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:57 +msgid "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." +msgstr "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:46 +msgid "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." +msgstr "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:49 +msgid "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." +msgstr "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:37 +msgid "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" +msgstr "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:47 +msgid "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." +msgstr "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:48 +msgid "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" +msgstr "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:57 +msgid "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." +msgstr "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:58 +msgid "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." +msgstr "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:55 +msgid "" +"\n" +"\t\tRoll back to a previous rollout." +msgstr "" +"\n" +"\t\tRoll back to a previous rollout." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_cluster.go:47 +msgid "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_context.go:44 +msgid "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:40 +msgid "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." +msgstr "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_authinfo.go:70 +#, c-format +msgid "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." +msgstr "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:67 +#, c-format +msgid "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." +msgstr "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:39 +msgid "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." +msgstr "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:47 +msgid "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." +msgstr "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:41 +#, c-format +msgid "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." +msgstr "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:50 +msgid "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:71 +msgid "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:78 +msgid "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:83 +msgid "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." +msgstr "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:87 +#, c-format +msgid "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." +msgstr "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:70 +#, c-format +msgid "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." +msgstr "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:36 +msgid "" +"\n" +"\t\tView previous rollout revisions and configurations." +msgstr "" +"\n" +"\t\tView previous rollout revisions and configurations." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:47 +msgid "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." +msgstr "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:47 +msgid "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" +msgstr "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:74 +msgid "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from an env file\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/bar.env" +msgstr "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from an env file\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/bar.env" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:45 +msgid "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" +msgstr "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:351 +msgid "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" +msgstr "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:50 +msgid "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" +msgstr "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:42 +msgid "" +"\n" +"\tCreate a deployment with the specified name." +msgstr "" +"\n" +"\tCreate a deployment with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:61 +msgid "" +"\n" +"\tCreate an ingress with the specified name." +msgstr "" +"\n" +"\tCreate an ingress with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:44 +msgid "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." +msgstr "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:39 +msgid "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." +msgstr "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:43 +msgid "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" +msgstr "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:40 +msgid "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." +msgstr "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:68 +msgid "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), replicaset (rs)" +msgstr "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), replicaset (rs)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:63 +msgid "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." +msgstr "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:233 +msgid "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" +msgstr "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:274 +msgid "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:95 +msgid "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" +msgstr "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:230 +msgid "" +"\n" +" Create a ClusterIP service with the specified name." +msgstr "" +"\n" +" Create a ClusterIP service with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Create a LoadBalancer service with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:271 +msgid "" +"\n" +" Create a NodePort service with the specified name." +msgstr "" +"\n" +" Create a NodePort service with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:93 +msgid "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." +msgstr "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:40 +msgid "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." +msgstr "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:49 +msgid "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " +msgstr "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:48 +msgid "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" +msgstr "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:50 +msgid " is used and no merging takes place." +msgstr " is used and no merging takes place." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L61 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L60 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L63 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L106 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L111 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:178 +msgid "Allocate a TTY for the debugging container." +msgstr "Allocate a TTY for the debugging container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L119 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:173 +msgid "Annotations to apply to the pod." +msgstr "Annotations to apply to the pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:173 +msgid "Apply a configuration to a resource by file name or stdin" +msgstr "Apply a configuration to a resource by file name or stdin" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Approve a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:106 +msgid "" +"Attach to a process that is already running inside an existing container." +msgstr "" +"Attach to a process that is already running inside an existing container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/attach.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Attach to a running container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:107 +msgid "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" +msgstr "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L115 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole this ClusterRoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole this RoleBinding should reference" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:32 +msgid "Commands for features in alpha" +msgstr "Commands for features in alpha" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:170 +msgid "Container image to use for debug container." +msgstr "Container image to use for debug container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:166 +msgid "Container name to use for debug container." +msgstr "Container name to use for debug container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/convert.go#L67 +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Convert config files between different API versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:105 +msgid "Copy files and directories to and from containers" +msgstr "Copy files and directories to and from containers" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cp.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copy files and directories to and from containers." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:248 +msgid "Create a ClusterIP service" +msgstr "Create a ClusterIP service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:323 +msgid "Create a LoadBalancer service" +msgstr "Create a LoadBalancer service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:286 +msgid "Create a NodePort service" +msgstr "Create a NodePort service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L214 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Create a TLS secret" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:81 +msgid "Create a cluster role" +msgstr "Create a cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:87 +msgid "Create a cluster role binding for a particular cluster role" +msgstr "Create a cluster role binding for a particular cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:124 +msgid "Create a config map from a local file, directory or literal value" +msgstr "Create a config map from a local file, directory or literal value" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:167 +msgid "Create a copy of the target Pod with this name." +msgstr "Create a copy of the target Pod with this name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:90 +msgid "Create a cron job with the specified name" +msgstr "Create a cron job with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:100 +msgid "Create a deployment with the specified name" +msgstr "Create a deployment with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:91 +msgid "Create a job with the specified name" +msgstr "Create a job with the specified name" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Create a namespace with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:95 +msgid "Create a pod disruption budget with the specified name" +msgstr "Create a pod disruption budget with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:92 +msgid "Create a priority class with the specified name" +msgstr "Create a priority class with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:91 +msgid "Create a quota with the specified name" +msgstr "Create a quota with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:106 +msgid "Create a resource from a file or from stdin" +msgstr "Create a resource from a file or from stdin" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:89 +msgid "Create a role binding for a particular role or cluster role" +msgstr "Create a role binding for a particular role or cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:161 +msgid "Create a role with single rule" +msgstr "Create a role with single rule" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L143 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Create a secret for use with a Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:137 +msgid "Create a secret from a local file, directory, or literal value" +msgstr "Create a secret from a local file, directory, or literal value" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L34 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Create a secret using specified subcommand" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:50 +msgid "Create a secret using specified subcommand." +msgstr "Create a secret using specified subcommand." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Create a service account with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:48 +msgid "Create a service using a specified subcommand" +msgstr "Create a service using a specified subcommand" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:49 +msgid "Create a service using a specified subcommand." +msgstr "Create a service using a specified subcommand." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:363 +msgid "Create an ExternalName service" +msgstr "Create an ExternalName service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:145 +msgid "Create an ingress with the specified name" +msgstr "Create an ingress with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:60 +msgid "Create and run a particular image in a pod." +msgstr "Create and run a particular image in a pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:149 +msgid "Create debugging sessions for troubleshooting workloads and nodes" +msgstr "Create debugging sessions for troubleshooting workloads and nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:137 +msgid "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" +msgstr "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Delete the specified cluster from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:43 +msgid "Delete the specified cluster from the kubeconfig." +msgstr "Delete the specified cluster from the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Delete the specified context from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:43 +msgid "Delete the specified context from the kubeconfig." +msgstr "Delete the specified context from the kubeconfig." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:64 +msgid "Delete the specified user from the kubeconfig" +msgstr "Delete the specified user from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:65 +msgid "Delete the specified user from the kubeconfig." +msgstr "Delete the specified user from the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L121 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Deny a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Describe one or many contexts" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:142 +msgid "Diff the live version against a would-be applied version" +msgstr "Diff the live version against a would-be applied version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:65 +msgid "Display cluster information" +msgstr "Display cluster information" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Display clusters defined in the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:42 +msgid "Display clusters defined in the kubeconfig." +msgstr "Display clusters defined in the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "Display merged kubeconfig settings or a specified kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:50 +msgid "Display one or many contexts from the kubeconfig file." +msgstr "Display one or many contexts from the kubeconfig file." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/get.go#L107 +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Display one or many resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:50 +msgid "Display resource (CPU/memory) usage" +msgstr "Display resource (CPU/memory) usage" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:81 +msgid "Display resource (CPU/memory) usage of nodes" +msgstr "Display resource (CPU/memory) usage of nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:100 +msgid "Display resource (CPU/memory) usage of pods" +msgstr "Display resource (CPU/memory) usage of pods" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:51 +msgid "Display the current-context" +msgstr "Display the current-context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:60 +msgid "Display users defined in the kubeconfig" +msgstr "Display users defined in the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:61 +msgid "Display users defined in the kubeconfig." +msgstr "Display users defined in the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L176 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparation for maintenance" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:74 +msgid "Dump relevant information for debugging and diagnosis" +msgstr "Dump relevant information for debugging and diagnosis" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Edit a resource on the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:67 +msgid "Edit latest last-applied-configuration annotations of a resource/object" +msgstr "" +"Edit latest last-applied-configuration annotations of a resource/object" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L159 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email for Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:169 +msgid "Environment variables to set in the container." +msgstr "Environment variables to set in the container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/exec.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Execute a command in a container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:90 +msgid "Execute a command in a container." +msgstr "Execute a command in a container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:115 +msgid "Experimental: Wait for a specific condition on one or many resources" +msgstr "Experimental: Wait for a specific condition on one or many resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:378 +msgid "External name of service" +msgstr "External name of service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/portforward.go#L75 +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Forward one or more local ports to a pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:79 +msgid "Get documentation for a resource" +msgstr "Get documentation for a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/help.go#L36 +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Help about any command" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:151 +msgid "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." +msgstr "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/annotate.go#L135 +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L132 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:164 +msgid "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." +msgstr "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:198 +msgid "If true, run the container in privileged mode." +msgstr "If true, run the container in privileged mode." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:174 +msgid "If true, suppress informational messages." +msgstr "If true, suppress informational messages." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:165 +msgid "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." +msgstr "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:173 +msgid "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." +msgstr "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:90 +msgid "List all visible plugin executables on a user's PATH" +msgstr "List all visible plugin executables on a user's PATH" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:54 +msgid "Manage the rollout of a resource" +msgstr "Manage the rollout of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Mark node as schedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Mark node as unschedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_pause.go#L73 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Mark the provided resource as paused" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L35 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Modify certificate resources." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Modify kubeconfig files" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L110 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:43 +msgid "No alpha commands are available in this version of kubectl" +msgstr "No alpha commands are available in this version of kubectl" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/completion.go#L97 +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Output shell completion code for the specified shell (bash or zsh)" + +#: pkg/kubectl/cmd/convert/convert.go:105 +msgid "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." +msgstr "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L157 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Password for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L226 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Path to PEM encoded public key certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L227 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Path to private key associated with given certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L82 +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Print the client and server version information" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:74 +msgid "" +"Print the client and server version information for the current context." +msgstr "" +"Print the client and server version information for the current context." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Print the list of flags inherited by all commands" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L86 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Print the logs for a container in a pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:97 +msgid "Print the supported API resources on the server" +msgstr "Print the supported API resources on the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:98 +msgid "Print the supported API resources on the server." +msgstr "Print the supported API resources on the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:58 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" +msgstr "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:59 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." +msgstr "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:62 +msgid "Provides utilities for interacting with plugins" +msgstr "Provides utilities for interacting with plugins" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:45 +msgid "Rename a context from the kubeconfig file" +msgstr "Rename a context from the kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:115 +msgid "Replace a resource by file name or stdin" +msgstr "Replace a resource by file name or stdin" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:87 +msgid "Restart a resource" +msgstr "Restart a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_resume.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Resume a paused resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L56 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role this RoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L94 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Run a particular image on the cluster" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/proxy.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Run a proxy to the Kubernetes API server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L161 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Server location for Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_cluster.go:73 +msgid "Set a cluster entry in kubeconfig" +msgstr "Set a cluster entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_context.go:61 +msgid "Set a context entry in kubeconfig" +msgstr "Set a context entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:114 +msgid "Set a new size for a deployment, replica set, or replication controller" +msgstr "" +"Set a new size for a deployment, replica set, or replication controller" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_authinfo.go:152 +msgid "Set a user entry in kubeconfig" +msgstr "Set a user entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:74 +msgid "Set an individual value in a kubeconfig file" +msgstr "Set an individual value in a kubeconfig file" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Set specific features on objects" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/use_context.go:52 +msgid "Set the current-context in a kubeconfig file" +msgstr "Set the current-context in a kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:101 +msgid "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" +msgstr "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_selector.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Set the selector on a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/describe.go#L80 +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Show details of a specific resource or group of resources" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_status.go#L57 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Show the status of the rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Synonym for --target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:134 +msgid "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" +msgstr "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "The image for the container to run." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L116 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:172 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:112 +msgid "" +"The maximum number or percentage of unavailable pods this budget requires." +msgstr "" +"The maximum number or percentage of unavailable pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"The minimum number or percentage of available pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L113 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "The name for the newly created object." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L98 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L99 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "The network protocol for the service to be created. Default is 'TCP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:182 +msgid "The port that this container exposes." +msgstr "The port that this container exposes." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L131 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L130 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:190 +msgid "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." +msgstr "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L87 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "The type of secret to create" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:33 +msgid "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." +msgstr "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:150 +msgid "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." +msgstr "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_undo.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Undo a previous rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:59 +msgid "Unset an individual value in a kubeconfig file" +msgstr "Unset an individual value in a kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:154 +msgid "Update environment variables on a pod template" +msgstr "Update environment variables on a pod template" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:115 +msgid "Update fields of a resource" +msgstr "Update fields of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_resources.go#L101 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Update resource requests/limits on objects with pod templates" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Update the annotations on a resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:110 +msgid "Update the image of a pod template" +msgstr "Update the image of a pod template" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L109 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Update the labels on a resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:102 +msgid "Update the service account of a resource" +msgstr "Update the service account of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/taint.go#L88 +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Update the taints on one or more nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:99 +msgid "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" +msgstr "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L155 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Username for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_history.go#L51 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "View rollout history" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:77 +msgid "" +"View the latest last-applied-configuration annotations of a resource/object" +msgstr "" +"View the latest last-applied-configuration annotations of a resource/object" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:171 +msgid "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." +msgstr "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:168 +msgid "When used with '--copy-to', delete the original Pod." +msgstr "When used with '--copy-to', delete the original Pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:176 +msgid "" +"When used with '--copy-to', enable process namespace sharing in the copy." +msgstr "" +"When used with '--copy-to', enable process namespace sharing in the copy." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:175 +msgid "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." +msgstr "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:177 +msgid "" +"When using an ephemeral container, target processes in this container name." +msgstr "" +"When using an ephemeral container, target processes in this container name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L45 +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:108 +msgid "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." +msgstr "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:107 +msgid "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." +msgstr "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cmd.go#L217 +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controls the Kubernetes cluster manager" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:45 +msgid "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" +msgstr "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:109 +msgid "" +"preemption-policy is the policy for preempting pods with lower priority." +msgstr "" +"preemption-policy is the policy for preempting pods with lower priority." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:41 +msgid "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" +msgstr "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:106 +msgid "the value of this priority class." +msgstr "the value of this priority class." diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..5a22e42887671a565d9af384359c088ddcaf69a7 GIT binary patch literal 1233 zcmZ`&yKWOf6kQ-ZWI7rekVXgz0vcmmJi>-VByoZ)#llJ)pr9G=-d#`FomtJyqC`|w zd;uK=qQXDG54eGbnu?#GgN8F+2OCgUIy#;^_ug}7=J@-Cg|`8|bHF9wIdC3$2l($Z z&;Y(n`6u`?aPveE+yHmLYhVYy2>t}V3jPXy1N;Cu)~`+lK^;5-Ujcsu`}i;LQ}E(K z5HRp-u+RGdMrnZU;4E+&Kxp!vobs6|``LpwW#ovtAu(G}Boj$7i?Y%>GAuDVpKQG- z2NY?Q$n=;3tsEV=V+w3!LX>=m(?UV>0cJK>~O=%&!kkkzG$DCilz zPm$xDm_H$7krA>K!Z-#`HKzlS7ih+Zlx4cMLxnbucWo+TzFwx5_jSlNyK9~8TJJI6 zp_kIP;B5$ptkr1V6^r*m`L&-h)B4 ztjG0%4?IkBA+vg<^Hr3pv=k}vK8ZYJf7T(MsMhUtH)m&P)*H1A{~QWphXtnbN4kYq zk!qPmLt${eD|`JdzBJ48LnUPRriyTo+r*s9ii zcXFK@Ro{?RjeA*XM8>zY$!*9~F\n" +"Language-Team: \n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.6.10\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Supprimer le cluster spécifié du kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Supprimer le contexte spécifié du kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Décrire un ou plusieurs contextes" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Afficher les cluster définis dans kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" +"Afficher les paramètres fusionnés de kubeconfig ou d'un fichier kubeconfig " +"spécifié" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Modifier des fichiers kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Mettre à jour les annotations d'une ressource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/apply.go#L98 +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "" +#~ "Appliquer une configuration à une ressource par nom de fichier ou depuis " +#~ "stdin" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#~ msgid "Displays the current-context" +#~ msgstr "Affiche le contexte actuel" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "Définit un cluster dans kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "Définit un contexte dans kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "Définit un utilisateur dans kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "Définit une valeur individuelle dans un fichier kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "Définit le contexte courant dans un fichier kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "Supprime une valeur individuelle dans un fichier kubeconfig" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "" +#~ "watch n'est compatible qu'avec les ressources individuelles et les " +#~ "collections de ressources. - %d ressource a été trouvée. " +#~ msgstr[1] "" +#~ "watch n'est compatible qu'avec les ressources individuelles et les " +#~ "collections de ressources. - %d ressources ont été trouvées. " diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..afe881717e17b7ab914289896ad3f4d0a9ef067f GIT binary patch literal 20017 zcmc(nYm6jUb;rvwuN}<8#5}^YYy-24r+Xj9_OkW@v$JD7_Uw9?S(`Vern{!6;_j}d zs(N;3lY9W70O3Od5sDH-Vw8M9B8s925<)^=4nYb?QKTe7D2jpzA|Ox_A*2K)D8K)? z_f}W;%&gZ2iI!{6R`-3Jd*0{Vv;LRoJn5qmpDmsj^Q>MQMIQwI{8RXY&m&KZqRYTf z06z_U@u4WX3;6fI4d659-1{c5b4?We7Vy)+Yp;u@VOq$Ht+=Sr9TrzY#F@~_$puuYy*GAzyCS#81LWq@I^0l@2f!3;TM3~?-8Kp z{~2%*_$~kaEia0qBfPHwHLe7*RPQda1Mro=?*c{d8*X%Xp90>@dk4r*(ffgtgHHlq1N@?YzXoFI z+%Zt&PXXTxd?)Z8;Mag!|5f~$0^bPy2Jnx7%fOFw*aY}>;Fo~QAn|v=>zO1u{~A!| z{~_=+@OdCF`dk2Vv}hNo^?w~GI{XC?k&3JB9r%9U-vV?0 z68JUXBJj6aWDWQYp!U0cA&Nf4_zL(xKnao7`2l~hMD!|z=r-U6Q2T!nC^~-zIM4WR z0^iB|vk*$j?GSi`@Ba<>A>N<)YGe)g8^HhM`xmg-oxC5qBZ@}AW#Bu2e+cXYZ#apJ z0N(@rTi{(V<1@ggo^ta2d7#ey9iZs+5R3gH@O?nOik^;9ke=KP)cGz0K{5I)@HxP5 z0VUV}3)Hw*G6>Y7w*qy}Uj|CwzTn~afI9b$uZ^M?0nY(NmkhWD{0Q(Rz^?-#VRX&w z7z=CzUkqFWN^S-~&HFG=>wFF;h%4z6nG`qaOm^70Ixck$@cb)f$9 z`TR{d>HPX|1p6%Vr({7N(L8(*8Iw)Pr9L-0V1M4kpNIWBM=@IKRWb@mBE-hdcLcb} z^H!eM^E{VliwE-C2k|xg@N6I|VxQ|h>;T`%BfhXFe4Y~i0(^)^vi}Yq)I7qJ)Sq?#x2FLmCr{@=B%|wi z!smYea-K(WKgT0Ih~^F*isPH(jqUtmJV>h|>y+_UUc^bi9}n|xTH1hz$D3)V>c>?+ zvTx@OoEG=8opk61Y3bRS@%j7DE7`LW$%gSL=j$skn8f8M?POb7$1S5xk~nN}z?yB% zX?JoE%Z6!DI;7W%Y*@ux{iIjM*>F28vMTMyo4ZD}PCgibZxhRWq*F~z)|or~j1$}8 z%Cy)?iNMnHw85f1d2!r%e)xgpZZ6c%z<#;s83!$^PwoA~)*UiR6fH9bN!Udo4% z+_<=v^$@>)TE?4cbumR%y4luNYD{i&tR0`ptL^wLi|1BZK=Oa@#{K6d7lQ;vND7pq z(@95gx-IftkUTlbi>u^e*4HjVb%7lUv^Oty7h*K~f%Vlm5pPH1YQg9REeyS^;@c|s zo3JPv;zcFsj-x}Xm5(c?2SGAbXvZsPZBq2m=rAiQu>@6Pa$c~keHG+%L9%VzvvWVG z(z2dj9}E_l0J>e{iy6jg6ZS|A!H9`v)#XPCtSIJB(2nmPLX~6~kIOV}<+0O4FknV! zI~n#;&GoBd>#A&M_NQ%3D^jq*MAQPw#<0Y+%vlol){oP}i@cw@k!3dQ@%LifpBW-; z&t%p*oqUXTUuoTQN%63mQ!%aVY^U8ZV(3?#`f)s*{dvP1S5MY{QdX^GG{Wq3r2xp{ zxNrtH8Nw9%D}uQjmuUqpde~eM2#M>sb2F*J9F@*?O2XPEd+-@UijAmk#ufJ3OttiQ z)P=k#ZV5&6ac?`G4@O-m?#8XH_%7}6+WGh*gNpHRfxSd3aoVP9on*KhqbV5AQBpX} z#G!oiXOVevM(%B?=4E=Z!A~)-8xM9THb=t)8XY%j!v5yK{T7~OYrVYxq2>9#&nA9E z5D0dfnZthJ5W{%+Qk53Nq<bRCPwDZ@c}Dlc!D|J9#W@dUiY*#hnlXII3XlWS2nN z8NKPP+?ZB2+{)uFv*$eeS6=C;=y0^kk9x|tY>z9i49?DSwrc-!`hazsI9Onb+*%-E zGIBM?U^6C8^ps%FvBl0sXosc4Th+R13m<@uCf2N(r577D7V(Z}LBm_+E=7mViYiC-9F2PZDZ5ThvmccD!k#a_h@Uag+^7K&Ta!HCly7Lh#9 zk6`*RkVdx}(XApM*f&Lr7xm^&P7$ZyXE3Mszepn6N20}UH&g7N^e5(ZWv#ZZhZPEu zEhIh1r2CMwVwe&H9WkS95uPVzZRuU0CE~J@JTGdQ- z3V!ZTh!}pFtpux=lI56R4%TXA4IT_3MxQ9y1)(|NT7e0!5u42NZr4IVVaR{OeAqJb zE-0Mf3*^JFt(Oti_b##Sgu+Xo#(1HYa3D-cyn`?iK#Z~nUE1XGI?x@-M<{f|JXgy2 zWH8IDH&nnNVI$Ow7E65-?~-B0`Nd(p$*ZyQ%jGAzBrQ&yP``;T-EPF9v>F$opj~Vk3YwWmh#cA z&xVxY1V566kfa-%!qwEn8`tLa!s-U+r_4pI`pZUb$YWVr-6 z168HQQ74m41nSM2m;{FeDuv2Csz_<`5mq+wsf%3AfFDUr6puyASTr^YQx#PP#Y&54 znW24S`z)`QFQpv}sR-utXg1PIP~E%p;-Xw%Krkls)7T3AC(K_RhGU_HZK@J0rED4$9;G%Omg&kC< z;eo;^%$WamMhD91yfQinJsb~+1Zr83fCWLW1rlx$mI;sGj=TwXc2vP1GSxwVh>ffQ zne^L{u??#^E`~@i1k!Ja5%T!@Y z-0q~UDg*bU?gfq9m3tE@*wd;-c$k#wXx9kKCz`irwump6+ee}` z9OcCv?bGlX)s=HZ0xKVoG1et@Ys=^2bf`_b@o2nBz!pE8PO+q2kENEhqjPHjTjiaM zx32RGu4>ufB-5y<<@ilIhENK;4W+Mkyf=QCRjn`apk>2Fw!I+?Wv%#?(bf zs{hpW#_&r!I%8Ja0H;YlkC8>5oXiT_z9QgiU$wT>jE7u3Lh+G)4_~sdE>>)1vi7kR z&h%E0FW?`dja^*A?!bsm4Zy0WEopmnfl-byVua>26hBuvtlbzhQpsYJBjOVWQjfQU zp`UJ1by1q5{GgUJE8!>$L0KnZh|d=rlIF7_RKow}sV_7KIIh8X5R4Y+j#6%I@S9>Q zw=6sOT-7YCc;t3>c_DSb#u(ak^G5>UsJ2FY(#O;`PpVIRrRt;*x z4hkE%%**i?IN-rhyH3UhW(fIUDOJ6Mz9~A8l9xw~R!zg~957xdru3#taau-xwkSV|Z*5Y;VmAi) zKy%unH-1-U4x{A_d=jL{-Y&nR2at-|D$RJ6*|6imjLQXo#25>!8@K39W+Na8)}9+P zlNY=HL-sIIF26@q=Uej%mbl`vn=CSX&R1N_$W0wF3DoKYd6(rW(-fWUUD*X!o(q?J z5oO=Ot}Gkw#NARKi7e3M0$&;eiZU=${xsyBt`=1g3*@>#X{~fyzPNJAaynO>np;n$ z@vU=ZFYC6>jC*D4LVhYlB9ap_L1B=T^=wPmXrHeQa(?S?>^(xFtA+xLvA_T5rLfx+*M4*G@lh z;qKPS$vB}-t)#WA5EJ`#Dn7ZHRdZ{7F4g)i?9TvSFdW%ydHTei+;r%d(}#!gji=+| zNA8?^bL$PR-lpTP<*72)uE>3vGg2$eIq}i%6HiCYU5)8DuilgcT}7i0y$f zQ!O4(Xx+dfWNPoa%^R25mXF!pL_rv@6G)cIao!ZDP^sBxk1KS3?6(J`U<*$RlLB|m zVXojbH^!@vyZHn2Eb-z(TXI^*3QkR=!(AGuFf$YJf!pnPBOi)pD&JFAHoYbvP>$z1 z9b8G?Iwe*tw?;;DF>Vgso78)FPMS`KM_;9o>wyng3VC1gFsZI0^Goaum~Yh zG|py1fknK0)|mpeXz->eWm*%i1N|s)XF00$P8|#j-(%qpP399n`pH^^F|@+D6<6Xyo>;TS8a>ftB3Yg{O#Hv!$UH z^>~dg#<^pInKPBrav9N7pdRQJ4pDk&9Ju20^8JLeX%8c5qJg9s^9lZYMM9NwQym>L zKTD0V5?pHr^rOX~y)#ybn4vCJN*|SpBB&0AW}e0nQrSt%i4m(6?&v|r%|&uKCjrd> zzk}Bm*STma3B|F)h#9BDMROD4OuY%U8c>!pxK%qYIF|H!S#E)w`kAf@2O|XPcW@WV zRu1hXK`fa<#W7}5Eh%L>ybOANOQ@CfEB;hFia&qe;|`QE9Fz^M1SS>ZdBJbAOA}6N7AeC50EUq_csH z=0P+-Eq+(-Ntx;jxyp5)V3&xq^N+9h1#t^OSgpa7d~uLhcf2ci!AO{;gS2W8&(&7N zE&l{hSubOEe`><~QF~#g?8KCi9b_jcBcS_bMt_LJLhUe^;8r(j?PLW$(IShfwiu~v zm-G>3op=zd)y35H%MN7nCYib&U7zw1rp~@W<}o&u_ObAYVpg5noR;>gXNDldZLc_* zHXU$ayJpV}tGj5jZY#mNHO!DR1>i;H&3Dfz3Y-K**eRJjnJX?*$epu;I&0dZT^MC_ zayuWmVNib;c<$MfYVCdvJk&CX^>(Z?>98+M9)Sa0b*?FRXrO=_Kd5;o{2&>ocV?xw z_w-~0b1yeb%WQ_@r1@gl;9ui9yfEXwSudhC%be9Pu~k$8j2v5>8TwdYh@_%M5JQuI-IfyECeAzrt=*0n77Zj|^U~#WD&H{@Y zKZIVK>G0>^saouyIWCKPLTP8I&Kx6$<*PCr{$CJ?gf2yu{i!L=={UMOI7Fyox^wEU zY9FHU`z1g1NMAr^k09buQm@5v$fvmTwM=M{sG`V_|ahVx?UgCR|3g()1=n5O-4bXihzG4Ae z&`@B7KzQQB4DGZ+4v_$nlm&?ny$7>WHk^o8shXE$7Y+N~9Y``6&pwdT#GvtIfy zL7l_n9mugw1a+#3N5do`O=(-}%GhTtO2^CZfemd8T&S6P9yZOkKr_7v3&U3%(1vmOT+TzQtj)Zk_!c}ZnV`decD|FF6N0Su$Sj=)&QZj?in_15x47t&Px*BDo8{nP{yZdBYt zgo2UMm8iB-Hxl+4MR(T|`fm1~LvrRik(^b+e!~SxqFH%Avp-Mv!qwShU%7<6xL`u^ zY)AC54)wgjUe8?B8$_Kyb$3p+!mV3X6&5xsJGNLOh>B_#qV6U&C{dA=a!?!XssCNh z)O%Pz*0xhUls+XNQbLmZrdpMvwY-!Vy9+T3I2faYpmpo6*|-jDg7v@+Qd^<~hW;sd z4Yt93LX}iCo_zcND?9 z9G^FNk(F^q@XRX)$HiC$VYSpYuL<;n;z2@<6>8qjv=aR3X!_@=Vw!Fm%$N#GT@zV_ zS}7vY7u@j}gbpXKfV9Ad+ZY`xwWE3iHo4K{q=3c3=1yu&PL$2-SG72;&X0$VkM7?# z6Ku+&V?}aBA(o4Z-SStUR7c<_D@{&}s0)rseA9$K$xRDfb>oUjLF`t@GheZYvJ7iX zSIZM*3^uFbf>?J7O;H=gwZ12$8bnB4_JnF8hfD0Pw65!gG8xvPbV4{#AIyBMy&>wc zWmirN8@%C$NH-`8%$i|v60Nv4B2{vod3V{k`M?l6NWHx5Sytd8UTQOo+?JMfnq*WE z4(x>D(uh+)(KrAvJ<73wls)IuZ*lPsFwRghEQO!j6MH9}FI&v!%3+CBcw zd5sUkh=qeys1D?JOPMKfhf*}7P*&G|VvjFExMQnzkY7;3$-&Xb_MY>`bX%xf3fZCg zD@%9}9aB&E)WHG&cn^Ldb0!y~h$zZVdC}vaU=7WS%Gv3%69qw3Y2~pt(nWvyG(#Z^ z%2U@>g_bqFhWKOFEA&-G^%~8D!ia8;bn?3|Ax^uOdNw6)j63~Q)OZ?ss+v(;#A@M{ z(=yV7>0nFlwT`xYsMW|X5i507!NI2QIjeu`75s=27}sf~t_Fq7<|X7Dl$%MK-6Wl%0)mY~%Ov9UcahzH8?BNZTHV-}Qq_@2QGoup XDN;wV%C22iaaJ@{C+b-vT8RE1AL5_v literal 0 HcmV?d00001 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po new file mode 100644 index 000000000..ca119f64f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po @@ -0,0 +1,3249 @@ +# Italian translation. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR evolution85@gmail.com, 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: kubernetes\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-08-28 15:20+0200\n" +"Last-Translator: Luca Berton \n" +"Language-Team: Luca Berton \n" +"Language: it_IT\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Generator: Poedit 1.8.7.1\n" +"X-Poedit-SourceCharset: UTF-8\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Mostra metriche per tutti i nodi\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Mostra metriche per un determinato nodo\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Ottieni la documentazione della risorsa e i relativi campi\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Ottieni la documentazione di un campo specifico di una risorsa\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Stampa i flag ereditati da tutti i comandi\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Stampa le versioni client e server per il current context\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Stampa le versioni API supportate\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Mostra metriche di tutti i pod nello spazio dei nomi predefinito\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Mostra metriche di tutti i pod nello spazio dei nomi specificato\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Mostra metriche per un pod e i suoi relativi container\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Mostra metriche per i pod definiti da label name = myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvertire i file di configurazione tra diverse versioni API. Sono\n" +"\t\taccettati i formati YAML e JSON.\n" +"\n" +"\t\tIl comando prende il nome di file, la directory o l'URL come input e lo " +"converte nel formato\n" +"\t\tdi versione specificata dal flag -output-version. Se la versione di " +"destinazione non è specificata o\n" +"\t\tnon supportata, viene convertita nella versione più recente.\n" +"\n" +"\t\tL'output predefinito verrà stampato su stdout nel formato YAML. Si può " +"usare l'opzione -o\n" +"\t\tper cambiare la destinazione di output." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreare un namespace con il nome specificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCrea un ruolo con una singola regola." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreare un service account con il nome specificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tContrassegna il nodo come programmabile." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tContrassegnare il nodo come non programmabile." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tImposta le annotazioni dell'ultima-configurazione-applicata impostandola " +"in modo che corrisponda al contenuto di un file.\n" +"\t\tCiò determina l'aggiornamento dell'ultima-configurazione-applicata come " +"se 'kubectl apply -f ' fosse stato eseguito,\n" +"\t\tsenza aggiornare altre parti dell'oggetto." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Crea un nuovo namespace denominato my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Crea un nuovo service account denominato my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCrea un servizio ExternalName con il nome specificato.\n" +"\n" +"\tIl servizio ExternalName fa riferimento a un indirizzo DNS esterno \n" +"\tsolo pod, che permetteranno agli autori delle applicazioni di utilizzare i " +"servizi di riferimento\n" +"\tche esistono fuori dalla piattaforma, su altri cluster, o localmente.." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp fornisce assistenza per qualsiasi comando nell'applicazione.\n" +"\tBasta digitare kubectl help [path to command] per i dettagli completi." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Creare un nuovo servizio LoadBalancer denominato my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump dello stato corrente del cluster verso stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump dello stato corrente del cluster verso /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump di tutti i namespaces verso stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump di un set di namespace verso /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Crea un servizio LoadBalancer con il nome specificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"Un insieme delimitato-da-virgole di quota scopes che devono corrispondere a " +"ciascun oggetto gestito dalla quota." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"Un insieme delimitato-da-virgola di coppie risorsa = quantità che " +"definiscono un hard limit." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"Un label selector da utilizzare per questo budget. Sono supportati solo i " +"selettori equality-based selector." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"Un selettore di label da utilizzare per questo servizio. Sono supportati " +"solo equality-based selector. Se vuota (default) dedurre il selettore dal " +"replication controller o replica set.)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Indirizzo IP esterno aggiuntivo (non gestito da Kubernetes) da accettare per " +"il servizio. Se questo IP viene indirizzato a un nodo, è possibile accedere " +"da questo IP in aggiunta al service IP generato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"Un override JSON inline per l'oggetto generato. Se questo non è vuoto, viene " +"utilizzato per ignorare l'oggetto generato. Richiede che l'oggetto fornisca " +"un campo valido apiVersion." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Approva una richiesta di firma del certificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assegnare il proprio ClusterIP o impostare su 'None' per un servizio " +"'headless' (nessun bilanciamento del carico)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Collega a un container in esecuzione" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP da assegnare al servizio. Lasciare vuoto per allocare " +"automaticamente o impostare su 'None' per creare un servizio headless." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole a cui questo ClusterRoleBinding fa riferimento" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole a cui questo RoleBinding fa riferimento" + +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Convertire i file di configurazione tra diverse versioni APIs" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copiare file e directory da e verso i container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Crea un secret TLS" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Crea un namespace con il nome specificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Crea un secret da utilizzare con un registro Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Crea un secret utilizzando un subcommand specificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Creare un account di servizio con il nome specificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Elimina il cluster specificato dal kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Elimina il context specificato dal kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Nega una richiesta di firma del certificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Descrive uno o più context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Mostra i cluster definiti nel kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" +"Visualizza le impostazioni merged di kubeconfig o un file kubeconfig " +"specificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Visualizza una o più risorse" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparazione alla manutenzione" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Modificare una risorsa sul server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email per il registro Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Esegui un comando in un contenitore" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Inoltra una o più porte locali a un pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Aiuto per qualsiasi comando" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"Se non è vuoto, impostare l'affinità di sessione per il servizio; Valori " +"validi: 'None', 'ClientIP'" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"Se non è vuoto, l'aggiornamento delle annotazioni avrà successo solo se " +"questa è la resource-version corrente per l'oggetto. Valido solo quando si " +"specifica una singola risorsa." + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"Se non vuoto, l'aggiornamento delle label avrà successo solo se questa è la " +"resource-version corrente per l'oggetto. Valido solo quando si specifica una " +"singola risorsa." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Contrassegnare il nodo come programmabile" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Contrassegnare il nodo come non programmabile" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Imposta la risorsa indicata in pausa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Modificare le risorse del certificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Modifica i file kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Nome o numero di porta nel container verso il quale il servizio deve " +"dirigere il traffico. Opzionale." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Restituisce solo i log dopo una data specificata (RFC3339). Predefinito " +"tutti i log. È possibile utilizzare solo uno tra data-inizio/a-partire-da." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "" +"Codice di completamento shell di output per la shell specificata (bash o zsh)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Password per l'autenticazione al registro di Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Percorso certificato di chiave pubblica codificato PEM." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Percorso alla chiave privata associata a un certificato specificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Prerequisito per la versione delle risorse. Richiede che la versione " +"corrente delle risorse corrisponda a questo valore per scalare." + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Stampa per client e server le informazioni sulla versione" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Stampa l'elenco flag ereditati da tutti i comandi" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Stampa i log per container in un pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Riprendere una risorsa in pausa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Ruolo di riferimento per RoleBinding" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Esegui una particolare immagine nel cluster" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Eseguire un proxy al server Kubernetes API" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Posizione del server per il Registro Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Imposta caratteristiche specifiche sugli oggetti" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Impostare il selettore di una risorsa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Mostra i dettagli di una specifica risorsa o un gruppo di risorse" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Mostra lo stato del rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Sinonimo di --target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "L'immagine per il container da eseguire." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"La politica di pull dell'immagine per il container. Se lasciato vuoto, " +"questo valore non verrà specificato dal client e predefinito dal server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"Il numero minimo o la percentuale di pod disponibili che questo budget " +"richiede." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "Il nome dell'oggetto appena creato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"Il nome dell'oggetto appena creato. Se non specificato, verrà utilizzato il " +"nome della risorsa di input." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"Il nome del generatore API da utilizzare. Ci sono 2 generatori: 'service/v1' " +"e 'service/v2'. L'unica differenza tra loro è che la porta di servizio in v1 " +"è denominata \"predefinita\", mentre viene lasciata unnamed in v2. Il valore " +"predefinito è 'service/v2'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "" +"Il protocollo di rete per il servizio da creare. Il valore predefinito è " +"'TCP'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"La porta che il servizio deve servire. Copiato dalla risorsa esposta, se non " +"specificata" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"I limiti delle richieste di risorse per questo contenitore. Ad esempio, " +"'cpu=200m,memory=512Mi'. Si noti che i componenti lato server possono " +"assegnare i limiti a seconda della configurazione del server, ad esempio " +"intervalli di limiti." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"La risorsa necessita di richieste di requisiti per questo pod. Ad esempio, " +"'cpu = 100m, memoria = 256Mi'. Si noti che i componenti lato server possono " +"assegnare i requisiti a seconda della configurazione del server, ad esempio " +"intervalli di limiti." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "Tipo di segreto da creare" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Annulla un precedente rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Aggiorna richieste di risorse/limiti sugli oggetti con pod template" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Aggiorna annotazioni di risorsa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Aggiorna label di una risorsa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Aggiorna i taints su uno o più nodi" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Nome utente per l'autenticazione nel registro Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "Visualizza la storia del rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Dove eseguire l'output dei file. Se vuota o '-' utilizza lo stdout, " +"altrimenti crea una gerarchia di directory in quella directory" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "flag di riavvio finto)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "Kubectl controlla il gestore cluster di Kubernetes" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Creare un ClusterRoleBinding per user1, user2 e group1 utilizzando " +#~ "il cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Crea un RoleBinding per user1, user2, and group1 utilizzando " +#~ "l'admin ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Crea un nuovo configmap denominato my-config in base alla " +#~ "cartella bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Crea un nuovo configmap denominato my-config con le chiavi " +#~ "specificate anziché i nomi dei file su disco\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Crea un nuovo configmap denominato my-config con key1 = config1 e " +#~ "key2 = config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Se non si dispone ancora di un file .dockercfg, è possibile " +#~ "creare un secret dockercfg direttamente utilizzando:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Applica la configurazione pod.json a un pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Applicare il JSON passato in stdin a un pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Nota: --prune è ancora in in Alpha\n" +#~ "\t\t# Applica la configurazione manifest.yaml che corrisponde alla label " +#~ "app = nginx ed elimina tutte le altre risorse che non sono nel file e " +#~ "nella label corrispondente app = nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Applica la configurazione manifest.yaml ed elimina tutti gli altri " +#~ "configmaps non presenti nel file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Auto scale un deployment \"foo\", con il numero di pod compresi " +#~ "tra 2 e 10, utilizzo della CPU target specificato in modo da utilizzare " +#~ "una politica di autoscaling predefinita:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale un controller di replica \"foo\", con il numero di pod " +#~ "compresi tra 1 e 5, utilizzo dell'utilizzo della CPU a 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Converte 'pod.yaml' alla versione più recente e stampa in stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Converte lo stato live della risorsa specificata da 'pod.yaml' " +#~ "nella versione più recente.\n" +#~ "\t\t# e stampa in stdout nel formato json.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Converte tutti i file nella directory corrente alla versione più " +#~ "recente e li crea tutti.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un ClusterRole denominato \"pod-reader\" che consente " +#~ "all'utente di eseguire \"get\", \"watch\" e \"list\" sui pod\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Crea un ClusterRole denominato \"pod-reader\" con ResourceName " +#~ "specificato\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un ruolo denominato \"pod-reader\" che consente all'utente di " +#~ "eseguire \"get\", \"watch\" e \"list\" sui pod\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Crea un ruolo denominato \"pod-reader\" con ResourceName " +#~ "specificato\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea una nuova resourcequota chiamata my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Creare una nuova resourcequota denominata best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un pod disruption budget chiamato my-pdb che seleziona tutti i " +#~ "pod con label app = rail\n" +#~ "\t\t# e richiede che almeno uno di essi sia disponibile in qualsiasi " +#~ "momento.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Crea un pod disruption budget con nome my-pdb che seleziona tutti i " +#~ "pod con label app = nginx \n" +#~ "\t\t# e richiede che almeno la metà dei pod selezionati sia disponibile " +#~ "in qualsiasi momento.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un pod utilizzando i dati in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Crea un pod basato sul JSON passato in stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Modifica i dati in docker-registry.yaml in JSON utilizzando il " +#~ "formato API v1 quindi creare la risorsa utilizzando i dati modificati.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un servizio per un nginx replicato, che serve nella porta 80 e " +#~ "si collega ai container sulla porta 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Crea un servizio per un controller di replica identificato per tipo " +#~ "e nome specificato in \"nginx-controller.yaml\", che serve nella porta 80 " +#~ "e si collega ai container sulla porta 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Crea un servizio per un pod valid-pod, che serve nella porta 444 " +#~ "con il nome \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Crea un secondo servizio basato sul servizio sopra, esponendo la " +#~ "porta container 8443 come porta 443 con il nome \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Crea un servizio per un'applicazione di replica in porta 4100 che " +#~ "bilanci il traffico UDP e denominato \"video stream\".\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Crea un servizio per un nginx replicato utilizzando l'insieme di " +#~ "replica, che serve nella porta 80 e si collega ai contenitori sulla porta " +#~ "8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Crea un servizio per una distribuzione di nginx, che serve nella " +#~ "porta 80 e si collega ai contenitori della porta 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Elimina un pod utilizzando il tipo e il nome specificati in pod." +#~ "json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Elimina un pod in base al tipo e al nome del JSON passato in " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Elimina i baccelli ei servizi con gli stessi nomi \"baz\" e \"foo" +#~ "\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Elimina i baccelli ei servizi con il nome dell'etichetta = " +#~ "myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Eliminare un pod con un ritardo minimo\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Forza elimina un pod in un nodo morto\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Elimina tutti i pod\n" +#~ "\t\tkubectl delete pods --all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Descrive un nodo\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Descrive un pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Descrive un pod identificato da tipo e nome in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Descrive tutti i pod\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Descrive i pod con label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Descrivere tutti i pod gestiti dal controller di replica \"frontend" +#~ "\" (rc-created pods\n" +#~ "\t\t# ottiene il nome del rc come un prefisso del nome pod).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", anche se ci sono i baccelli non gestiti da " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet o StatefulSet su di " +#~ "esso.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# Come sopra, ma interrompere se ci sono i baccelli non gestiti da " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, e " +#~ "utilizzare un periodo di grazia di 15 minuti.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Modifica il servizio denominato 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Usa un editor alternativo\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Modifica il lavoro 'myjob' in JSON utilizzando il formato API v1:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Modifica la distribuzione 'mydeployment' in YAML e salvare la " +#~ "configurazione modificata nella sua annotazione:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ottieni l'output dalla 'data' di esecuzione del pod 123456-7890, " +#~ "utilizzando il primo contenitore per impostazione predefinita\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Ottieni l'output dalla data di esecuzione in ruby-container del pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Passare alla modalità raw terminal, invia stdin a 'bash' in ruby-" +#~ "container del pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ottieni l'output dal pod 123456-7890 in esecuzione, utilizzando il " +#~ "primo contenitore per impostazione predefinita\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Ottieni l'output dal ruby-container del pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Passa alla modalità raw terminal, invia stdin a 'bash' in ruby-" +#~ "container del pod 123456-7890\n" +#~ "\t\t# e invia stdout/stderr da 'bash' al client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Ottieni l'output dal primo pod di una ReplicaSet denominata nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Installa il completamento di bash su un Mac utilizzando homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Carica il codice di completamento kubectl per bash nella shell " +#~ "corrente\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Scrive il codice di completamento bash in un file e lo carica da ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Carica il codice di completamento kubectl per zsh [1] nella shell " +#~ "corrente\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Elenca tutti i pod in formato output ps.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# Elenca tutti i pod in formato output ps con maggiori informazioni " +#~ "(ad esempio il nome del nodo).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# Elenca un controller di replica singolo con NAME specificato nel " +#~ "formato di output ps.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# Elenca un singolo pod nel formato di uscita JSON.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Elenca un pod identificato per tipo e nome specificato in \"pod.yaml" +#~ "\" nel formato di uscita JSON.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Restituisce solo il valore di fase del pod specificato.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# Elenca tutti i controller e servizi di replica insieme in formato " +#~ "output ps.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# Elenca una o più risorse per il tipo e per i nomi.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Elenca tutte le risorse con tipi diversi.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ascolta localmente le porte 5000 e 6000, inoltrando i dati da/verso " +#~ "le porte 5000 e 6000 nel pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Ascolta localmente la porta 8888, inoltra a 5000 nel pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Ascolta localmente una porta casuale, inoltra a 5000 nel pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Ascolta localmente una porta casuale, inoltra a 5000 nel pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Segna il nodo \"foo\" come programmabile.\n" +#~ "\t\t$ Kubectl uncordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Segna il nodo \"foo\" come non programmabile.\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aggiorna parzialmente un nodo utilizzando merge patch strategica\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Aggiorna parzialmente un nodo identificato dal tipo e dal nome " +#~ "specificato in \"node.json\" utilizzando merge patch strategica\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Aggiorna l'immagine di un contenitore; spec.containers [*]. name è " +#~ "richiesto perché è una chiave di fusione\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Aggiorna l'immagine di un contenitore utilizzando una patch json " +#~ "con array posizionali\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Stampa l'indirizzo dei servizi master e cluster\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Sostituire un pod utilizzando i dati in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Sostituire un pod usando il JSON passato da stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Aggiorna la versione dell'immagine (tag) di un singolo container di " +#~ "pod a v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Forza la sostituzione, cancellazione e quindi ricreare la risorsa\n" +#~ "\t\tkubectl replace --force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Restituisce snapshot log dal pod nginx con un solo container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Restituisce snapshot log dei pod definiti dalla label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Restituisce snapshot log del container ruby terminato nel pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Iniziare a trasmettere i log del contenitore ruby nel pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Visualizza solo le ultime 20 righe di output del pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Mostra tutti i log del pod nginx scritti nell'ultima ora\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Restituisce snapshot log dal primo contenitore di un lavoro " +#~ "chiamato hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Restituisce snapshot logs del container nginx-1 del deployment " +#~ "chiamato nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Esegui un proxy verso kubernetes apiserver sulla porta 8011, che " +#~ "fornisce contenuti statici da ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Esegui un proxy verso kubernetes apiserver su una porta locale " +#~ "arbitraria.\n" +#~ "\t\t# La porta selezionata per il server verrà inviata a stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Esegui un proxy verso kubernetes apiserver, cambiando il prefisso " +#~ "api in k8s-api\n" +#~ "\t\t# Questo comporta, ad es., pod api disponibili presso localhost:8001/" +#~ "k8s-api/v1/pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Scala un replicaset denominato 'foo' a 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scala una risorsa identificata per tipo e nome specificato in \"foo." +#~ "yaml\" a 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# Se la distribuzione corrente di mysql è 2, scala mysql a 3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scalare più controllori di replica.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scala il lavoro denominato 'cron' a 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Imposta l'ultima-configurazione-applicata di una risorsa che " +#~ "corrisponda al contenuto di un file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Esegue set-last-applied per ogni file di configurazione in una " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Imposta la configurazione dell'ultima applicazione di una risorsa " +#~ "che corrisponda al contenuto di un file, creerà l'annotazione se non " +#~ "esiste già.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Spegni foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop di tutti i pod e servizi con label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Spegnere il servizio definito in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Spegnere tutte le resources in path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute Ï€ to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute Ï€ to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Avviare un'unica istanza di nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Avviare un'unica istanza di hazelcast e lasciare che il container " +#~ "esponga la porta 5701.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Avviare una singola istanza di hazelcast ed imposta le variabili " +#~ "ambiente \"DNS_DOMAIN=cluster\" e \"POD_NAMESPACE=default\" nel " +#~ "container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Avviare un'istanza replicata di nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Stampare gli oggetti API corrispondenti senza crearli.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Avviare un'unica istanza di nginx, ma overload le spec del " +#~ "deployment con un insieme parziale di valori analizzati da JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Avviare un pod di busybox e tenerlo in primo piano, non riavviarlo " +#~ "se esce.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Avviare il container nginx utilizzando il comando predefinito, ma " +#~ "utilizzare argomenti personalizzati (arg1 .. argN) per quel comando.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Avviare il container nginx utilizzando un diverso comando e " +#~ "argomenti personalizzati.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Avviare il contenitore perl per calcolare Ï€ a 2000 posti e " +#~ "stamparlo.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Avviare il cron job per calcolare Ï€ a 2000 posti e stampare ogni 5 " +#~ "minuti.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aggiorna il nodo \"foo\" con un marcatore con il tasto 'dedicated' " +#~ "e il valore 'special-user' ed effettua 'NoSchedule'.\n" +#~ "\t\t# Se un marcatore con quel tasto e l'effetto già esiste, il suo " +#~ "valore viene sostituito come specificato.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Rimuove dal nodo 'foo' il marcatore con il tasto 'dedicated' ed " +#~ "effettua 'NoSchedule' se esiste.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Rimuovi dal nodo 'foo' tutti i marcatori con chiave 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aggiorna il pod 'foo' con l'etichetta 'unhealthy' e il valore " +#~ "'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Aggiorna il pod 'foo' con l'etichetta 'status' e il valore " +#~ "'unhealthy', sovrascrivendo qualsiasi valore esistente.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aggiorna tutti i pod nello spazio dei nomi\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aggiorna un pod identificato dal tipo e dal nome in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aggiorna il pod 'foo' solo se la risorsa è invariata dalla versione " +#~ "1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Aggiorna il pod 'foo' rimuovendo un'etichetta denominata 'bar' se " +#~ "esiste.\n" +#~ "\t\t# Non richiede la flag -overwrite.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aggiorna i pod di frontend-v1 usando i dati del replication " +#~ "controller in frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Aggiorna i pod di frontend-v1 usando i dati JSON passati da stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Aggiorna i pod di frontend-v1 in frontend-v2 solo cambiando " +#~ "l'immagine e modificando\n" +#~ "\t\t# il nome del replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Aggiorna i pod di frontend solo cambiando l'immaginee mantenendo il " +#~ "vecchio none.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Interrompee ed invertire un rollout esistente in corso (da " +#~ "frontend-v1 a frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Visualizza le annotazioni dell'ultima-configurazione-applicata per " +#~ "tipo/nome in YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# # Visualizza le annotazioni dell'ultima-configurazione-applicata " +#~ "per file in JSON.\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\tApplicare una configurazione a una risorsa per nomefile o stdin.\n" +#~ "\t\tQuesta risorsa verrà creata se non esiste ancora.\n" +#~ "\t\tPer utilizzare 'apply', creare sempre la risorsa inizialmente con " +#~ "'apply' o 'create --save-config'.\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML.\n" +#~ "\n" +#~ "\t\tDisclaimer Alpha: la funzionalità --prune non è ancora completa. Non " +#~ "utilizzare a meno che non si sia a conoscenza di quale sia lo stato " +#~ "attuale. Vedi https://issues.k8s.io/34274." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\n" +#~ "Crea un ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un ClusterRoleBinding per un ClusterRole particolare." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un RoleBinding per un particolare Ruolo o ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un TLS secret dalla coppia di chiavi pubblica/privata.\n" +#~ "\n" +#~ "\t\tLa coppia di chiavi pubblica/privata deve esistere prima. Il " +#~ "certificato chiave pubblica deve essere .PEM codificato e corrispondere " +#~ "alla chiave privata data." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreare un configmap basato su un file, una directory o un valore " +#~ "literal specificato.\n" +#~ "\n" +#~ "\t\tUn singolo configmap può includere una o più coppie chiave/valore.\n" +#~ "\n" +#~ "\t\tQuando si crea una configmap basata su un file, il valore predefinito " +#~ "sarà il nome di base del file e il valore sarà\n" +#~ "\t\tpredefinito per il contenuto del file. Se il nome di base è una " +#~ "chiave non valida, è possibile specificare un tasto alternativo.\n" +#~ "\n" +#~ "\t\tQuando si crea un configmap basato su una directory, ogni file il cui " +#~ "nome di base è una chiave valida nella directory verrà\n" +#~ "\t\tpacchettizzata nel configmap. Le voci di directory tranne i file " +#~ "regolari vengono ignorati (ad esempio sottodirectory,\n" +#~ "\t\tsymlinks, devices, pipes, ecc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreare un nuovo secret per l'utilizzo con i registri Docker.\n" +#~ "\n" +#~ "\t\tDockercfg secrets vengono utilizzati per autenticare i registri " +#~ "Docker.\n" +#~ "\n" +#~ "\t\tQuando utilizzi la riga di comando Docker per il push delle immagini, " +#~ "è possibile eseguire l'autenticazione eseguendo correttamente un " +#~ "determinato registry\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " Questo produce un file ~ / .dockercfg che viene utilizzato dai " +#~ "successivi comandi \"docker push\" e \"docker pull\"\n" +#~ "\t\tper autenticarsi nel registry. L'indirizzo email è facoltativo.\n" +#~ "\n" +#~ "\t\tDurante la creazione di applicazioni, è possibile avere un Docker " +#~ "registry che richiede l'autenticazione. Affinché i \n" +#~ "\t\tnodi eseguano pull di immagini per vostro conto, devono avere le " +#~ "credenziali. È possibile fornire queste informazioni \n" +#~ "\t\tcreando un dockercfg secret e collegandolo al tuo account di servizio." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un pod disruption budget con il nome specificato, selector e il " +#~ "numero minimo di pod disponibili" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea una risorsa per nome file o stdin.\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea una resourcequota con il nome specificato, hard limits e gli " +#~ "scope opzionali" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un secret basato su un file, una directory o un valore specifico " +#~ "literal.\n" +#~ "\n" +#~ "\t\tUn singolo secret può includere una o più coppie chiave/valore.\n" +#~ "\n" +#~ "\t\tQuando si crea un secret basato su un file, la chiave per " +#~ "impostazione predefinita sarà il nome di base del file e il valore sarà\n" +#~ "\t\tpredefinito al contenuto del file. Se il nome di base è una chiave " +#~ "non valida, è possibile specificare un tasto alternativo.\n" +#~ "\n" +#~ "\n" +#~ "\t\tQuando si crea un segreto basato su una directory, ogni file il cui " +#~ "nome di base è una chiave valida nella directory verrà \n" +#~ "\t\\paccehttizzataw in un secret. Le voci di directory tranne i file " +#~ "regolari vengono ignorati (ad esempio sottodirectory,\n" +#~ "\t\tsymlinks, devices, pipes, ecc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea ed esegue un'immagine particolare, eventualmente replicata.\n" +#~ "\n" +#~ "\t\tCrea un deployment o un job per gestire i container creati." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un autoscaler che automaticamente sceglie e imposta il numero di " +#~ "pod che vengono eseguiti in un cluster di kubernetes.\n" +#~ "\n" +#~ "\t\tEsegue una ricerca di un Deployment, ReplicaSet o " +#~ "ReplicationController per nome e crea un autoscaler che utilizza la " +#~ "risorsa indicata come riferimento.\n" +#~ "\t\tUn autoscaler può aumentare o diminuire automaticamente il numero di " +#~ "pod distribuiti all'interno del sistema se necessario." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tCancella risorse secondo nomi di file, stdin, risorse e nomi, o per " +#~ "selettori di risorse e etichette.\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML. È possibile specificare un solo " +#~ "tipo di argomenti: nome file,\n" +#~ "\t\trisorse e nomi, o risorse e selettore di etichette.\n" +#~ "\n" +#~ "\t\tAlcune risorse, come i pod, supportano cacellazione corretta. Queste " +#~ "risorse definiscono un periodo di default\n" +#~ "\t\tprima che siano forzatamente terminate (il grace period) ma si può " +#~ "sostituire quel valore con\n" +#~ "\t\til falg --grace-period, o passare --now per impostare il grace-period " +#~ "a 1. Poiché queste risorse spesso\n" +#~ "\t\trappresentano entità del cluster, la cancellazione non può essere " +#~ "presa in carico immediatamente. Se il nodo\n" +#~ "\t\tche ospita un pod è spento o non raggiungibile da API server, " +#~ "termination può richiedere molto più tempo\n" +#~ "\t\tdel grace period. Per forzare la cancellazione di una resource,\tdevi " +#~ "obbligatoriamente indicare un grace\tperiod di 0 e specificare\n" +#~ "\t\til flag --force.\n" +#~ "\n" +#~ "\t\tIMPORTANTE: Fozare la cancellazione dei pod non attende conferma che " +#~ "i processi del pod siano\n" +#~ "\t\tterminati, che può lasciare questi processi in esecuzione fino a " +#~ "quando il nodo rileva la cancellazione\n" +#~ "\t\tcompletata correttamente. Se i tuoi processi utilizzano " +#~ "l'archiviazione condivisa o parlano con un'API remota e\n" +#~ "\t\tdipendono dal nome del pod per identificarsi, la forzata eliminazione " +#~ "di questi pod può comportare\n" +#~ "\t\tpiù processi in esecuzione su macchine diverse che utilizzando la " +#~ "stessa identificazione che può portare\n" +#~ "\t\tcorruzione o inconsistenza dei dati. Forza i pod solo quando si è " +#~ "sicuri che il pod sia\n" +#~ "\t\tterminato, o se la tua applicazione può can tollerare più copie dello " +#~ "stesso pod in esecuzione contemporaneamente.\n" +#~ "\t\tInoltre, se forzate l'eliminazione dei i nodi, lo scheduler può può " +#~ "creare nuovi nodi su questi nodi prima che il nodo\n" +#~ "\t\tabbia liberato quelle risorse e provocando immediatamente evict di " +#~ "tali pod.\n" +#~ "\n" +#~ "\n" +#~ "\t\tNotare che il comando di eliminazione NON fa verificare la versione " +#~ "delle risorse, quindi se qualcuno\n" +#~ "\t\tinvia un aggiornamento ad una risorsa quando invii un eliminazione, " +#~ "il loro aggiornamento\n" +#~ "\t\tsaranno persi insieme al resto della risorsa." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tDeprecated: chiudere correttamente una risorsa per nome o nome file.\n" +#~ "\n" +#~ "\t\tIl comando stop è deprecato, tutte le sue funzionalità sono coperte " +#~ "dal comando delete.\n" +#~ "\t\tVedere 'kubectl delete --help' per ulteriori dettagli.\n" +#~ "\n" +#~ "\t\tTenta di arrestare ed eliminare una risorsa che supporta la corretta " +#~ "terminazione.\n" +#~ "\t\tSe la risorsa è scalabile, verrà scalata a 0 prima dell'eliminazione." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\tVisualizza l'utilizzo di risorse (CPU/Memoria/Storage) dei nodi.\n" +#~ "\n" +#~ "\t\tIl comando top-node consente di visualizzare il consumo di risorse " +#~ "dei nodi." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\tVisualizza l'utilizzo di risorse (CPU/Memoria/Storage) dei pod.\n" +#~ "\n" +#~ "\t\tIl comando \"top pod\" consente di visualizzare il consumo delle " +#~ "risorse dei pod.\n" +#~ "\n" +#~ "\t\tA causa del ritardo della pipeline metrica, potrebbero non essere " +#~ "disponibili per alcuni minuti\n" +#~ "\t\teal momento della creazione dei pod." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\tVisualizza l'utilizzo di risorse (CPU/Memoria/Storage).\n" +#~ "\n" +#~ "\t\tIl comando top consente di visualizzare il consumo di risorse per " +#~ "nodi o pod.\n" +#~ "\n" +#~ "\t\tQuesto comando richiede che Heapster sia configurato correttamente e " +#~ "che funzioni sul server." + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tDrain node in preparazione alla manutenzione.\n" +#~ "\n" +#~ "\t\tIl nodo indicato verrà contrassegnato unschedulable per impedire che " +#~ "nuovi pod arrivino.\n" +#~ "\t\t'drain' evict i pod se l'APIServer supporta eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Altrimenti, usa il " +#~ "normale DELETE\n" +#~ "\t\tper eliminare i pod.\n" +#~ "\t\tIl 'drain' evicts o la cancellazione di tutti all pod tranne mirror " +#~ "pods (che non possono essere eliminati\n" +#~ "\t\tattraverso API server). Se ci sono i pod gestiti da DaemonSet, " +#~ "drain non procederà\n" +#~ "\t\tsenza --ignore-daemonsets e, a prescindere da ciò, non cancellerà " +#~ "alcun\n" +#~ "\t\tpod gestitto da DaemonSet,poiché questi pods verrebbero " +#~ "immediatamente sostituiti dal\n" +#~ "\t\tDaemonSet controller, che ignora le marcature unschedulable. Se ci " +#~ "sono\n" +#~ "\t\tpod che non sono né mirror pod né gestiti dal ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet o Job, allora drain non cancellerà " +#~ "alcun pod finché non\n" +#~ "\t\tuserai --force. --force permetterà alla cancellazione di procedere " +#~ "se la risorsa gestita da uno\n" +#~ "\t\to più pod è mancante.\n" +#~ "\n" +#~ "\t\t'drain' attende il termine corretto. Non devi operare sulla macchina " +#~ "finché\n" +#~ "\t\til comando non viene completato.\n" +#~ "\n" +#~ "\t\tQuando sei pronto per riportare il nodo al servizio, utilizza kubectl " +#~ "uncordon, per\n" +#~ "\t\trimettere il nodo schedulable nuovamente.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\tModificare una risorsa dall'editor predefinito.\n" +#~ "\n" +#~ "\t\tIl comando di modifica consente di modificare direttamente qualsiasi " +#~ "risorsa API che è possibile recuperare tramite gli\n" +#~ "\t\tstrumenti di riga di comando. Apre l'editor definito dalle variabili " +#~ "d'ambiente\n" +#~ "\t\tKUBE_EDITOR o EDITOR, o ritornare a 'vi' per Linux o 'notepad' per " +#~ "Windows.\n" +#~ "\t\tÈ possibile modificare più oggetti, anche se le modifiche vengono " +#~ "applicate una alla volta. Il comando\n" +#~ "\t\taccetta sia nomi di file che argomenti da riga di comando, anche se i " +#~ "file a cui fa riferimento devono\n" +#~ "\t\tessere state salvate precedentemente le versioni delle risorse.\n" +#~ "\n" +#~ "\t\tLa modifica viene eseguita con la versione API utilizzata per " +#~ "recuperare la risorsa.\n" +#~ "\t\tPer modificare utilizzando una specifica versione API, fully-qualify " +#~ "la risorsa, versione e il gruppo.\n" +#~ "\n" +#~ "\t\tIl formato predefinito è YAML. Per modificare in JSON, specificare \"-" +#~ "o json\".\n" +#~ "\n" +#~ "\t\tIl flag --windows-line-endings può essere utilizzato per forzare i " +#~ "fine linea Windows,\n" +#~ "\t\taltrimenti verrà utilizzato il default per il sistema operativo.\n" +#~ "\n" +#~ "\t\tNel caso in cui si verifica un errore durante l'aggiornamento, verrà " +#~ "creato un file temporaneo sul disco\n" +#~ "\t\tche contiene le modifiche non apportate. L'errore più comune durante " +#~ "l'aggiornamento di una risorsa\n" +#~ "\t\tè una modifica da pare di un altro editor della risorsa sul server. " +#~ "Quando questo si verifica, dovrai\n" +#~ "\t\tapplicare le modifiche alla versione più recente della risorsa o " +#~ "aggiornare il tua copia\n" +#~ "\t\ttemporanea salvata per includere l'ultima versione delle risorse." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tIn output codice di completamento shell output per la shell " +#~ "specificata (bash o zsh).\n" +#~ "\t\tIl codice di shell deve essere valorizzato per fornire completamento\n" +#~ "\t\tinterattivo dei comandi kubectl. Questo può essere eseguito " +#~ "richiamandolo\n" +#~ "\t\tda .bash_profile.\n" +#~ "\n" +#~ "\t\tNota: questo richiede il framework di completamento bash, che non è " +#~ "installato\n" +#~ "\t\tper impostazione predefinita su Mac. Questo può essere installato " +#~ "utilizzando homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tUna volta installato, bash_completion deve essere valutato. Ciò può " +#~ "essere fatto aggiungendo la\n" +#~ "\t\tseguente riga al file .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNota per gli utenti zsh: [1] i completamenti zsh sono supportati solo " +#~ "nelle versioni zsh> = 5.2" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tEseguire un rolling update del ReplicationController specificato.\n" +#~ "\n" +#~ "\t\tSostituisce il replication controller specificato con un nuovo " +#~ "replication controller aggiornando un pod alla volta per usare il\n" +#~ "\t\tnuovo PodTemplate. Il new-controller.json deve specificare lo stesso " +#~ "namespace del\n" +#~ "\t\tcontroller di replica esistente e sovrascrivere almeno una etichetta " +#~ "(comune) nella sua replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tSostituire una risorsa per nomefile o stdin.\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML. Se si sostituisce una risorsa " +#~ "esistente, \n" +#~ "\t\tè necessario fornire la specifica completa delle risorse. Questo può " +#~ "essere ottenuta da\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tFare riferimento ai modelli https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html per trovare se un campo è mutevole." + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tImposta una nuova dimensione per Deployment, ReplicaSet, Replication " +#~ "Controller, o Job.\n" +#~ "\n" +#~ "\t\tScala consente anche agli utenti di specificare una o più condizioni " +#~ "preliminari per l'azione della scala.\n" +#~ "\n" +#~ "\t\tSe --current-replicas o --resource-version sono specificate, viene " +#~ "convalidata prima di\n" +#~ "\t\ttentare scale, ed è garantito che la precondizione vale quando\n" +#~ "\t\tscale viene inviata al server.." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tPer proxy tutti i kubernetes api e nient'altro, utilizzare:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tPer proxy solo una parte dei kubernetes api e anche alcuni file " +#~ "static\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tQuanto sopra consente 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tPer eseguire il proxy tutti i kubernetes api in una radice diversa, " +#~ "utilizzare:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tQuanto sopra ti permette 'curl localhost:8001/custom/api/v1/pods'" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tAggiorna i campi di una risorsa utilizzando la merge patch " +#~ "strategica\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML.\n" +#~ "\n" +#~ "\t\tSi prega di fare riferimento ai modelli in https://htmlpreview.github." +#~ "io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/" +#~ "v1/definitions.html per trovare se un campo è mutevole." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." +#~ msgstr "" +#~ "\n" +#~ "\t\tAggiorna le label di una risorsa.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d " +#~ "characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." +#~ msgstr "" +#~ "\n" +#~ "\t\tAggiorna i marcatori su uno o più nodi.\n" +#~ "\n" +#~ "\t\t* Un marcatore è costituita da una chiave, un valore e un effetto. " +#~ "Come argomento qui, viene espresso come chiave = valore: effetto.\n" +#~ "\t\t* La chiave deve iniziare con una lettera o un numero e può contenere " +#~ "lettere, numeri, trattini, punti e sottolineature, fino a% [1] d " +#~ "caratteri.\n" +#~ "\t\t* Il valore deve iniziare con una lettera o un numero e può contenere " +#~ "lettere, numeri, trattini, punti e sottolineature, fino a% [2] d " +#~ "caratteri.\n" +#~ "\t\t* L'effetto deve essere NoSchedule, PreferNoSchedule o NoExecute.\n" +#~ "\t\t* Attualmente il marcatore può essere applicato solo al nodo." + +#~ msgid "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." +#~ msgstr "" +#~ "\n" +#~ "\t\tVisualizza le annotazioni dell'ultima-configurazione-applicata per " +#~ "tipo/nome o file.\n" +#~ "\n" +#~ "\t\tL'output predefinito verrà stampato su stdout nel formato YAML. Si " +#~ "può usare l'opzione -o\n" +#~ "\t\tPer cambiare il formato di output." + +#~ msgid "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" +#~ msgstr "" +#~ "\n" +#~ "\t # !!!Nota importante!!!\n" +#~ "\t # Richiede che il binario 'tar' sia presente nel tuo contenitore\n" +#~ "\t # immagine. Se 'tar' non è presente, 'kubectl cp' non riesce.\n" +#~ "\n" +#~ "\t # Copia /tmp/foo_dir directory locale in /tmp/bar_dir in un pod " +#~ "remoto nello spazio dei nomi predefinito\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copia /tmp/foo file locale in /tmp/bar in un pod remoto in un " +#~ "contenitore specifico\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copia /tmp/foo file locale in /tmp/bar in un pod remoto nello " +#~ "spazio dei nomi \n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copia /tmp/foo da un pod remoto in /tmp/bar localmente\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # Crea un nuovo secret TLS denominato tls-secret con la coppia di " +#~ "dati fornita:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Crea un nuovo secret denominato my-secret con i tasti per ogni file " +#~ "nella barra delle cartelle\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Crea un nuovo secret denominato my-secret con le chiavi specificate " +#~ "anziché i nomi sul disco\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Crea un nuovo secret denominato my-secret con key1 = supersecret e " +#~ "key2 = topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Crea un nuovo servizio ExternalName denominato my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # Creare un nuovo servizio clusterIP denominato my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Creare un nuovo servizio clusterIP denominato my-cs (in modalità " +#~ "headless)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # Crea una nuovo deployment chiamato my-dep che esegue l'immagine " +#~ "busybox.\n" +#~ " kubectl create deployment my-dep --image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # Creare un nuovo servizio nodeport denominato my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if " +#~ "it exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" +#~ msgstr "" +#~ "\n" +#~ " # Aggiorna il pod 'foo' con annotazione 'description'e il valore 'my " +#~ "frontend'.\n" +#~ " # Se la stessa annotazione è impostata più volte, verrà applicato " +#~ "solo l'ultimo valore\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Aggiorna un pod identificato per tipo e nome in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Aggiorna pod 'foo' con la annotazione 'description' e il valore 'my " +#~ "frontend running nginx', sovrascrivendo qualsiasi valore esistente.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Aggiorna tutti i baccelli nel namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Aggiorna il pod 'foo' solo se la risorsa è invariata dalla versione " +#~ "1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Aggiorna il pod 'foo' rimuovendo un'annotazione denominata " +#~ "'descrizione' se esiste.\n" +#~ " # Non richiede flag -overwrite.\n" +#~ " kubectl annotate pods foo description-" + +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Crea un servizio clusterIP con il nome specificato." + +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Creare un deployment con il nome specificato." + +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Creare un servizio nodeport con il nome specificato." + +#~ msgid "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or " +#~ "specify --all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." +#~ msgstr "" +#~ "\n" +#~ " Dump delle informazioni di cluster idonee per il debug e la " +#~ "diagnostica di problemi di cluster. Per impostazione predefinita, tutto\n" +#~ "    verso stdout. È possibile specificare opzionalmente una directory con " +#~ "--output-directory. Se si specifica una directory, kubernetes \n" +#~ " creearà un insieme di file in quella directory. Per impostazione " +#~ "predefinita, dumps solo i dati del namespace \"kube-system\", ma è\n" +#~ " possibile passare ad namespace diverso con il flag --namespaces o " +#~ "specificare --all-namespaces per il dump di tutti i namespace.\n" +#~ "\n" +#~ "     Il comando esegue dump anche dei log di tutti i pod del cluster, " +#~ "questi log vengono scaricati in directory differenti\n" +#~ "     basati sul namespace e sul nome del pod." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Visualizza gli indirizzi del master e dei servizi con label kubernetes." +#~ "io/cluster-service=true\n" +#~ "  Per ulteriore debug e diagnosticare i problemi di cluster, utilizzare " +#~ "'kubectl cluster-info dump'." + +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "Un calendario in formato Cron del lavoro che deve essere eseguito." + +#~ msgid "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." +#~ msgstr "" +#~ "Un override JSON inline per l'oggetto di servizio generato. Se questo non " +#~ "è vuoto, viene utilizzato per ignorare l'oggetto generato. Richiede che " +#~ "l'oggetto fornisca un campo valido apiVersion. Utilizzato solo se --" +#~ "expose è true." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "Applica una configurazione risorsa per nomefile o stdin" + +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "Auto-scale a Deployment, ReplicaSet, o ReplicationController" + +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Nome container che avrà la sua immagine aggiornata. Soltanto rilevante " +#~ "quando --image è specificato, altrimenti ignorato. Necessario quando si " +#~ "utilizza --image su un contenitore a più contenitori" + +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "Crea un ClusterRoleBinding per un ClusterRole particolare" + +#~ msgid "Create a LoadBalancer service." +#~ msgstr "Creare un servizio LoadBalancer." + +#~ msgid "Create a NodePort service." +#~ msgstr "Crea un servizio NodePort." + +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "Crea un RoleBinding per un particolare Role o ClusterRole" + +#~ msgid "Create a clusterIP service." +#~ msgstr "Crea un servizio clusterIP." + +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "" +#~ "Crea un configmap da un file locale, una directory o un valore letterale" + +#~ msgid "Create a deployment with the specified name." +#~ msgstr "Creare un deployment con il nome specificato." + +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "Crea un pod disruption budget con il nome specificato." + +#~ msgid "Create a quota with the specified name." +#~ msgstr "Crea una quota con il nome specificato." + +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "Crea una risorsa per nome file o stdin" + +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "" +#~ "Crea un secret da un file locale, una directory o un valore letterale" + +#~ msgid "Create a service using specified subcommand." +#~ msgstr "Crea un servizio utilizzando il subcommand specificato." + +#~ msgid "Create an ExternalName service." +#~ msgstr "Crea un servizio ExternalName." + +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" +#~ msgstr "" +#~ "Elimina risorse selezionate per nomi di file, stdin, risorse e nomi, o " +#~ "per risorsa e selettore di label" + +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "Deprecated: spegne correttamente una risorsa per nome o nome file" + +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "Visualizza l'utilizzo di risorse (CPU/Memoria) per nodo" + +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "Visualizza l'utilizzo di risorse (CPU/Memoria) per pod." + +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "Visualizza l'utilizzo di risorse (CPU/Memoria)." + +#~ msgid "Display cluster info" +#~ msgstr "Visualizza informazioni sul cluster" + +#~ msgid "Displays the current-context" +#~ msgstr "Visualizza il current-context" + +#~ msgid "Documentation of resources" +#~ msgstr "Documentazione delle risorse" + +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "" +#~ "Dump di un sacco di informazioni pertinenti per il debug e la diagnosi" + +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Politica esplicita per il pull delle immagini container. Richiesto quando " +#~ "--image è uguale all'immagine esistente, altrimenti ignorata." + +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP da assegnare al Load Balancer. Se vuota, un IP effimero verrà creato e " +#~ "utilizzato (specifico per provider cloud)." + +#~ msgid "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" +#~ msgstr "" +#~ "Immagine da utilizzare per aggiornare il replication controller. Deve " +#~ "essere diversa dall'immagine esistente (nuova immagine o nuovo tag " +#~ "immagine). Non può essere utilizzata con --filename/-f" + +#~ msgid "Manage a deployment rollout" +#~ msgstr "Gestisci un deployment rollout" + +#~ msgid "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" +#~ msgstr "" +#~ "Output dell'oggetto formattato con la versione del gruppo fornito (per " +#~ "esempio: 'extensions/v1beta1').)" + +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "Eseguire un rolling update del ReplicationController specificato" + +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "Sostituire una risorsa per nomefile o stdin" + +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" +#~ msgstr "" +#~ "Imposta una nuova dimensione per Deployment, ReplicaSet, Replication " +#~ "Controller, o Job" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Imposta l'annotazione dell'ultima-configurazione-applicata ad un oggetto " +#~ "live per abbinare il contenuto di un file." + +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "Imposta una voce cluster in kubeconfig" + +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "Imposta una voce context in kubeconfig" + +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "Imposta una voce utente in kubeconfig" + +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "Imposta un singolo valore in un file kubeconfig" + +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "Imposta il current-context in un file kubeconfig" + +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" +#~ msgstr "" +#~ "Prende un replication controller, service, deployment o un pod e lo " +#~ "espone come nuovo servizio Kubernetes" + +#~ msgid "" +#~ "The key to use to differentiate between two different controllers, " +#~ "default 'deployment'. Only relevant when --image is specified, ignored " +#~ "otherwise" +#~ msgstr "" +#~ "La chiave da utilizzare per distinguere tra due controller diversi, " +#~ "predefinito \"deployment\". Rilevante soltanto quando --image è " +#~ "specificato, altrimenti ignorato" + +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "Il nome del generatore API da utilizzare, si veda http://kubernetes.io/" +#~ "docs/user-guide/kubectl-conventions/#generators per un elenco." + +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "" +#~ "Il nome del generatore API da utilizzare. Attualmente c'è solo 1 " +#~ "generatore." + +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "Il nome del generatore da utilizzare per la creazione di un servizio. " +#~ "Utilizzato solo se --expose è true" + +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "La porta che questo contenitore espone. Se --expose è true, questa è " +#~ "anche la porta utilizzata dal servizio creato." + +#~ msgid "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to " +#~ "'OnFailure' a job is created, if set to 'Never', a regular pod is " +#~ "created. For the latter two --replicas must be 1. Default 'Always', for " +#~ "CronJobs `Never`." +#~ msgstr "" +#~ "La politica di riavvio per questo Pod. Valori accettati [Always, " +#~ "OnFailure, Never]. Se impostato su 'Always' viene creato un deployment, " +#~ "se impostato su 'OnFailure' viene creato un job, se impostato su 'Never', " +#~ "viene creato un pod. Per questi ultimi due le - repliche devono essere 1. " +#~ "Predefinito 'Always', per CronJobs `Never`." + +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "Digitare per questo servizio: ClusterIP, NodePort o LoadBalancer. " +#~ "Ppredefinito è 'ClusterIP'." + +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "Annulla singolo valore in un file kubeconfig" + +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "Aggiornare campo/i risorsa utilizzando merge patch strategici" + +#~ msgid "Update image of a pod template" +#~ msgstr "Aggiorna immagine di un pod template" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "" +#~ "Visualizza ultime annotazioni dell'ultima configurazione applicata per " +#~ "risorsa/oggetto" + +#~ msgid "external name of service" +#~ msgstr "nome esterno del servizio" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..f917b6c5bf8d5101b436c689223441c1899c7ada GIT binary patch literal 19210 zcmeI33v6UpdB@K-kg{$bEx-Kv1HFG))U63MDj;rEOFwkfc=$RRyT3T4+lp1RCuopdukfAoZ#K zzH`pKGxpk>rL?Ik%_wL8zBA|Xo$vjfd-DBrU-yvWQ=wf(n>bCW`@s*M&OdxU`v#>B zgO7pFftPJm>IU#{z!~tY9{W58mNqE$8Spvqv@?_{^81(1RO)TuOW-wN&slbzcYtr< z`Q6~_>Hh)nPM(jPtyBbl58S}_yWgnPL7pehQ7Q%h9Q+Hu-+r!AJ9z$&^OU*^y#9Rp zf)9Zn_yh2HaMuM&eH~O6D)lVz4e;$?<07T@gO7r5244Xi;5iptI`0JwJbw+`3jP=z z1mALrQY@)%2d@Hsun7K!dwvld;Q7Z6FMX4Jo&bdp9|C2)&w?`k1#l<$BlrE)mnzlA z^EfE|5|F9XCqWH91#SYr1+o?OGw^Qk421V6SOXshH!>KK)ng!2tFM4pg5Lr8PyH+Z zuw`{ILJ)opfC=~@xC=b}tx8=Djyr6C*Yf-rcn|mw;AU{QR%#1)80-a~24&s91AiU- z2`KzN|8h(B5O@X8C6J%gy`bp9li>B>U%KZFFiZBWLFqpPE`jd_Zv}q{%KTUHqYvH= z9s|Dwj)A|+W<9V4eiIynh`$EUVUXzg4?)@ge}co{g%B_NnFZOJYJxKVBcSl%PeD{l z{WBPY8xVr%K?({V?gKx#0euG#^Slk=J_mjg6npWRWCna3l;@v=VlP|!t-U;qvWXr| zFd17@9{{fbKMe}Ke+vqqehP}coQ;y*$@2v;KLSHg?B)C5L7snrvZmnvt=3+?1@7Sa zliRGld>h=&b00$Wz!ZEQ{4V%)@Hels_VTXnR(=~1&VIh%42u5T3yQsb4iq{s9kTW^ z4YHK_Fevu&hv0eOF;MjVUqIRCl@KG(GoaYZhe46UQHMVPg%4N5EJ#sPAVbwhz&pS{ z14ZvHW{}v+Zcyk-!ArmgK@C0)A`{Mrg8JU;~P0yplodNSeg5C|*P6W~4IKY@r&P10HD{t$>NsiUCS z%in{dZ$AZ5RdvybrF$4erBwyg-~*uerKdnxul^Afy*O{w(l-cR!Se*T1lGY@!B;?; z{|1c12TR~F@UKArQ%7&M`}!I9O`iAPV(sM$24BYWE1<~hjpJ5MJ3x_h6O?^C1Iqm0 z0a0!B@1XSGwAI((b0or)(kr_agp+ z4MsllB}kM%(0`C&0J6zcCkMFQ|dh_dalr_Ac5@ zvUDDsid zv;~FgsM~07rR}4MU1y)uvp>N1(XOMthlY77Tu1)d>;8HJcqQ%EXsDPvgO+{v@W&MG zDq0Us^hot=+^F>xdS)S7)HOeigHoa^QLMdcRfkd8PfUjcjXA%RR&^TH&9^=44%73& zK|i#eWa`x;_0*oxvG+~woE(#-uh6&nsZJMsU5-kPnjfZK8bqOvD(1`BPohR#^0gP1 zb&x_?;8)8+QZ}uBs9yDgP}d{AKKX>6PU?Oss01ZDjV$8nY>9Q|EXtT-DT1&d^y9=5 zJsk&Osw-7*KG8wA;KxDgm-SrJaIF;8YS7zBWmK14tr#qOE>#0|Y&Mtp@j*Y#_fqy9Npwe~iBuEA%je0$bg`Az!!BNb#=BrFk`$f0S@~m9-Qa{Ou=Nm%= zMgi@rdNIH`%^@B!LnvZkl9u^V6jnIqc2LxNLb&3Ex{>(05NT_NP{4@Nf)~#FGSbNmbFPl9RWC^kUcHXnDT@K1i;dViTrWf@ z=1&x}sS`hi7xVaBVGtTuvioLGiaScI?-UIy8r?(AI8uB>?lVrZ)|@X>H|k~Bi{U14 zG-}K*=-$k!3yDo#sOTGHg&TYIBAwz!*w0$RDv`7qF7tR{Q)4MO&$<^|%0xoZ-22ga zk&J}5kzAJkVuw6Mcx7E{wtbFt&&Vj*L2dDOc3f-X)nd)3cRv)Iyw_O7^~e;0l~#>m zy=)Vq9y^r!ap+a2pdVRTWm->(@D6MCkA35IO1KJSjFAvu%&dBJYDRnIG7=SWCaLe0 z5qQtWC`2sc6Z`ezLV!vbLnKomcQ9&Ia>~>|MzNVTUnC=wu$V4*DT@UddQ_=s0(dHJ z4d+DQH8>||$4*sedl~|%FqP*3w5^dt>V1Tje1$i>L3M!EKJ{Ph&$U#GAf|_Vl z+N}Gzbj|3y>Yi9M-oU!f_n7%A4g5>lC*)TXmeqV#snA5^m3MhnFD#*VnE-NeSLYHl zXq6&m{i+O}%LXH9x)YGnQhj*awbyMQ+CH#-AX{{_QLF1xmIGL>;OoRML9}&xE0?l; z3PD(jbeYjTj{K9)v|0GDIgK_u&KJxYC!ZObt>-M{@nv|Oc{*t@i6we#5(%Rtr?L$e zV_@4(N%pK*%)UtN@N`70+*ZxR>rf*DbJ48k7YlVv;sQ^*U!^mrBaygLP->bIW>-co;T{lT8M77|} zTOr+sp2eY064Ym$vdQoqGjmUG6D?sbGfCvd3>hv?+$0M9j%FJcB>f>*l4LOvrW;jo zV?!at&?lp%z?%1|a!g##yw&(LG6-Qtl`PmMp%Lj?j0?^QYmaha*QA0%A^(L@STOAE zmvn+J&<{gxK8@a{jF8j~_nb<_ zCk`_S=0iysMA=C7)Xqe{(M>8$9W91>#JU1_iWJ4zU`g*(6#BhppPuerAP|zdC%yb^ znT!00PnkPs_C8imQ?WoJ4w4qLrl^Q1AXR%6I*42h6_z2g$6T8ye2*ovQ_s=YHG~Vks)8~3c|ARB_Xe>mSK-e3r-w#^4`CAjiW}Q zdec=yQsHEAB*;>dvNi^nbB}18yVKc;8TQAX)Ao0{=Uy+PZg;gqjbz1N89(gl(Fk`b zHat&u5;sri-jI~Oqnt?^b1rk}?0QPcy&A=3N*ouV=jDWpzjoj9lL=HNRbOX-Js4PL z0EyAbjbPMIN^yW5qUWMi_#(-r%wa>KMuUXxj4!NdBrXPU7Gni`Vh8o-17>)YSNq!bI{hHHJrHp=_vR zm6>7ru^OXm)rftytj7-dB^;?R%+=8>BoCSC-WkjaWYtUvA2cm(g1OLzHT2VoF)X!?g4=YM*`;)N3l_KIavxCZn zlt}`dnv6Ku!E`zzP}U1K=6+pe1BsfFiVjAHjT)Iit_z}InIh*3i8M&dgpMp6ITvpI zs3d>rR0#zlH!>%XUbU!<*zlT-I7E9n!1OAVS|YOSbX4kcy=m{wBU`s_y}qxgNA2m0 zRd6aJ8RfDqV+s|KcF8ZK0km(n4_IVV!ke&yHO*-e5yodY)>X&ziO!eAoO6B8ouKWv zi6oIkHr9ueg+4V+pj?cwKIy*7bR{^VfvHQ#=xd9*>9I-ehq6dn*Bf&rZ2Ew|LL|k! zFK0;yW#7_)rJ_>6Q`yD^durJsNrrKv77;hiHiV>1w%O5FQJ42A|EjIDU+uiQbA;%H?5e~42~D0<|7tHM zJxH1dqv`q9RXe>nX{NBZVP@jACMa>173-iz{+p|}jWx>kilal+F5&`xeOsO#o5O9< zdOZ4!0pp2tX{=HZ`s9a@oc?pJ*FavfqIG78>0m9%)iL6c+dDG{+wMfbo_*!kk_#So z&6A4P<@fAMCYBc~W@f7PhB-KsrkTSyuT?Js(yu27b$b3KFBrA9N|b6g0Z&K5LYjDXqu~vFbVgY z1oabz2w0C{y|25oIFD-*SHt`io@w zHmN1(y7RnK>n8PwIc3g#imi#v{17DEH0R&Bpltlf9ag@I>ntbpu0j!Em>RdJ=_SYT zw88eT5}KVq-h6ONFCn3FMl!D5O=HpvF~5UbOg#mY zn9>QP*PO+{T3cHgDN}TqJ~Cu5_un$MO&Nq(mh%=O;)Iz{mPVz>rbfb_Y;iUVTbvkH zPBuF-?Pf9YX}xe{G=&`(HTgbA&+S|_GZR*jw1Jm-5%GdLv7(8%nd}Umc~_?~a#mmu zA+nccY_CJ(_00&%KjcZQAxSFL8^ePG1GWB|U*ko{@U|_3lL4~OQ<2HR38aQhDnBV~ zB}Nh3ixWlqYC8U+MJZ=Sl%``HD7Ztt}H#} z-E8UD&EU3cfAQVq3;xpkG3TmSimR#67^^O7Us#qy39|KrL4@+W<_|(HyLa!uCyy7u$ zvZ(OcU%VJln_6a)sO1H6lj%98iAxJj@eB5NE_=zRIQFeI#j_Dk>|5$SWfDzZ;ERPo zqBxip@iePD?O9YzE)dE4lfrnp;0{-YaB-=;gD%WR1!q`EK{C3iXRj{~pfH)t2j#-9 z#(YwkjfV8ty@+3GoWdyb8`6UVg9C+uEro$?df@t@ZQHIMxPD-uXF^_1 z&0;1AQjX#weOt8P2YL^Ooi`kiKZ~4u-gG!R9Pv87b|d}6`33=|Fzb7@A)P-D>Gyj* zljD;xjC>yUIfMzW5fGqZ!T z(A_&3`K1bzLU!+Dbnj$zW~(j%Z*=cu$Sp^kaAg%x_fCdnqWL?3?wyS8oeX=mqkGgjX#cm>c8r3jV-Msto4ErcjV~HPuzR_p)av$=$Cn>_@}(!f;)b@CKW)DKaz3DQ zvS(j@@)^T0M^nZDH+M(ZRnxAsljYxUEj@46gPfIwbT5E(FMyc;OD213@IQ1hWyL*@ z*X{zy3e!0D#Qnz~ea>0jRQC!9SA)$}k?s|c?iCPuGyR%e0cmvxABn18Z7o0DS~}W} zi!RogklMWw(!CMVy%ECYzU+FSxn9z}5z@U8GE0TG?v0R_KJmh_-+5AsmgQ$8&tHD3 zwe)yv>Gy1Pb?g(LYAro*{EzP=70Rx8Wa*x{Dt79ZL{9MLkGGaTF2$UbIA3Zle_m?4 xr3Xp7j(;jElU9$RELwiVkhrqn|H9poHP~_F*<*k3x#Q0~x4IDee|n?jzW^5t0Pp|+ literal 0 HcmV?d00001 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po new file mode 100644 index 000000000..d6f4aa2c4 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po @@ -0,0 +1,3365 @@ +# Test translations for unit tests. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR girikuncoro@gmail.com, 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2020-01-05 09:55+0900\n" +"Last-Translator: Kohei Ota \n" +"Language-Team: \n" +"Language: ja\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.2.4\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreate a namespace with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCreate a role with single rule." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreate a service account with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMark node as schedulable." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMark node as unschedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Create a LoadBalancer service with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L61 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L60 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L63 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L106 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L111 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L119 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Approve a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/attach.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Attach to a running container" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L115 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole this ClusterRoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole this RoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/convert.go#L67 +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Convert config files between different API versions" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cp.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copy files and directories to and from containers." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L214 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Create a TLS secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Create a namespace with the specified name" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L143 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Create a secret for use with a Docker registry" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L34 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Create a secret using specified subcommand" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Create a service account with the specified name" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "指定ã—ãŸã‚³ãƒ³ãƒ†ã‚­ã‚¹ãƒˆã‚’kubeconfigã‹ã‚‰å‰Šé™¤ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "指定ã—ãŸã‚³ãƒ³ãƒ†ã‚­ã‚¹ãƒˆã‚’kubeconfigã‹ã‚‰å‰Šé™¤ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L121 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Deny a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "1ã¤ã¾ãŸã¯è¤‡æ•°ã®ã‚³ãƒ³ãƒ†ã‚­ã‚¹ãƒˆã‚’記述ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "kubeconfigã§å®šç¾©ã•ã‚ŒãŸã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã‚’表示ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "マージã•ã‚ŒãŸkubeconfigã®è¨­å®šã¾ãŸã¯æŒ‡å®šã•ã‚ŒãŸkubeconfigを表示ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/get.go#L107 +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "1ã¤ã¾ãŸã¯è¤‡æ•°ã®ãƒªã‚½ãƒ¼ã‚¹ã‚’表示ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L176 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparation for maintenance" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Edit a resource on the server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L159 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email for Docker registry" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/exec.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Execute a command in a container" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/portforward.go#L75 +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Forward one or more local ports to a pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/help.go#L36 +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Help about any command" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/annotate.go#L135 +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L132 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Mark node as schedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Mark node as unschedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_pause.go#L73 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Mark the provided resource as paused" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L35 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Modify certificate resources." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "kubeconfigを変更ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L110 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/completion.go#L97 +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Output shell completion code for the specified shell (bash or zsh)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L157 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Password for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L226 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Path to PEM encoded public key certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L227 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Path to private key associated with given certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L82 +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Print the client and server version information" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Print the list of flags inherited by all commands" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L86 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Print the logs for a container in a pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_resume.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Resume a paused resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L56 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role this RoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L94 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Run a particular image on the cluster" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/proxy.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Run a proxy to the Kubernetes API server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L161 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Server location for Docker registry" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Set specific features on objects" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_selector.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "リソースã®ã‚»ãƒ¬ã‚¯ã‚¿ãƒ¼ã‚’設定ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/describe.go#L80 +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Show details of a specific resource or group of resources" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_status.go#L57 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Show the status of the rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Synonym for --target-port" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "The image for the container to run." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L116 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"The minimum number or percentage of available pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L113 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "The name for the newly created object." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L98 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L99 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "The network protocol for the service to be created. Default is 'TCP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L131 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L130 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L87 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "The type of secret to create" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_undo.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "ç¾åœ¨ã®ãƒ­ãƒ¼ãƒ«ã‚¢ã‚¦ãƒˆã‚’å–り消ã™" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_resources.go#L101 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Update resource requests/limits on objects with pod templates" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "リソースã®ã‚¢ãƒŽãƒ†ãƒ¼ã‚·ãƒ§ãƒ³ã‚’æ›´æ–°ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L109 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "リソースã®ãƒ©ãƒ™ãƒ«ã‚’æ›´æ–°ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/taint.go#L88 +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Update the taints on one or more nodes" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L155 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Username for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_history.go#L51 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "ロールアウトã®å±¥æ­´ã‚’表示ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L45 +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cmd.go#L217 +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controls the Kubernetes cluster manager" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\t\tkubectl port-forward pod/mypod 5000 " +#~ "6000\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in a pod selected by the deployment\t\tkubectl port-" +#~ "forward deployment/mydeployment 5000 6000\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in a pod selected by the service\t\tkubectl port-" +#~ "forward service/myservice 5000 6000\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\t" +#~ "\tkubectl port-forward pod/mypod 8888:5000\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\t" +#~ "\tkubectl port-forward pod/mypod :5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\t\tkubectl port-forward pod/mypod 5000 " +#~ "6000\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in a pod selected by the deployment\t\tkubectl port-" +#~ "forward deployment/mydeployment 5000 6000\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in a pod selected by the service\t\tkubectl port-" +#~ "forward service/myservice 5000 6000\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\t" +#~ "\tkubectl port-forward pod/mypod 8888:5000\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\t" +#~ "\tkubectl port-forward pod/mypod :5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute Ï€ to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute Ï€ to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute Ï€ to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute Ï€ to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L68 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L43 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L43 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L49 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create.go#L56 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d " +#~ "characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d " +#~ "characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." + +#~ msgid "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." +#~ msgstr "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." + +#~ msgid "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" +#~ msgstr "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if " +#~ "it exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" +#~ msgstr "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if " +#~ "it exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Create a deployment with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Create a nodeport service with the specified name." + +#~ msgid "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or " +#~ "specify --all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." +#~ msgstr "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or " +#~ "specify --all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L136 +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "A schedule in the Cron format the job should be run with." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L134 +#~ msgid "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." +#~ msgstr "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "ファイルåã¾ãŸã¯æ¨™æº–入力ã§ãƒªã‚½ãƒ¼ã‚¹ã«ã‚³ãƒ³ãƒ•ã‚£ã‚°ã‚’é©ç”¨ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L55 +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "Auto-scale a Deployment, ReplicaSet, or ReplicationController" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L101 +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L43 +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "Create a ClusterRoleBinding for a particular ClusterRole" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L181 +#~ msgid "Create a LoadBalancer service." +#~ msgstr "Create a LoadBalancer service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L124 +#~ msgid "Create a NodePort service." +#~ msgstr "Create a NodePort service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L43 +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "Create a RoleBinding for a particular Role or ClusterRole" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L68 +#~ msgid "Create a clusterIP service." +#~ msgstr "Create a clusterIP service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_configmap.go#L59 +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "Create a configmap from a local file, directory or literal value" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "Create a deployment with the specified name." +#~ msgstr "Create a deployment with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L49 +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "Create a pod disruption budget with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#~ msgid "Create a quota with the specified name." +#~ msgstr "Create a quota with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create.go#L56 +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "ファイルåã¾ãŸã¯æ¨™æº–入力ã§ãƒªã‚½ãƒ¼ã‚¹ã‚’作æˆã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L73 +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "Create a secret from a local file, directory or literal value" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L36 +#~ msgid "Create a service using specified subcommand." +#~ msgstr "Create a service using specified subcommand." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L240 +#~ msgid "Create an ExternalName service." +#~ msgstr "Create an ExternalName service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/delete.go#L130 +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" +#~ msgstr "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/stop.go#L58 +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "Deprecated: Gracefully shut down a resource by name or filename" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_node.go#L77 +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "Display Resource (CPU/Memory) usage of nodes" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_pod.go#L79 +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "Display Resource (CPU/Memory) usage of pods" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top.go#L43 +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "Display Resource (CPU/Memory) usage." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo.go#L49 +#~ msgid "Display cluster info" +#~ msgstr "クラスターã®æƒ…報を表示ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#~ msgid "Displays the current-context" +#~ msgstr "current-contextを表示ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/explain.go#L50 +#~ msgid "Documentation of resources" +#~ msgstr "リソースã®èª¬æ˜Žã‚’表示ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L37 +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "Dump lots of relevant info for debugging and diagnosis" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L102 +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L105 +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L98 +#~ msgid "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" +#~ msgstr "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout.go#L46 +#~ msgid "Manage a deployment rollout" +#~ msgstr "Manage a deployment rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L115 +#~ msgid "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" +#~ msgstr "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L84 +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "Perform a rolling update of the given ReplicationController" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/replace.go#L70 +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "Replace a resource by filename or stdin" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L71 +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" +#~ msgstr "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "kubeconfigã«ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã‚¨ãƒ³ãƒˆãƒªã‚’設定ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "kubeconfigã«ã‚³ãƒ³ãƒ†ã‚­ã‚¹ãƒˆã‚¨ãƒ³ãƒˆãƒªã‚’設定ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "kubeconfigã«ãƒ¦ãƒ¼ã‚¶ãƒ¼ã‚¨ãƒ³ãƒˆãƒªã‚’設定ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "kubeconfigã«å€‹åˆ¥ã®å¤‰æ•°ã‚’設定ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "kubeconfigã«current-contextを設定ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L87 +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" +#~ msgstr "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L100 +#~ msgid "" +#~ "The key to use to differentiate between two different controllers, " +#~ "default 'deployment'. Only relevant when --image is specified, ignored " +#~ "otherwise" +#~ msgstr "" +#~ "The key to use to differentiate between two different controllers, " +#~ "default 'deployment'. Only relevant when --image is specified, ignored " +#~ "otherwise" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L113 +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L66 +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "" +#~ "The name of the API generator to use. Currently there is only 1 generator." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L133 +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L121 +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L128 +#~ msgid "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to " +#~ "'OnFailure' a job is created, if set to 'Never', a regular pod is " +#~ "created. For the latter two --replicas must be 1. Default 'Always', for " +#~ "CronJobs `Never`." +#~ msgstr "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to " +#~ "'OnFailure' a job is created, if set to 'Never', a regular pod is " +#~ "created. For the latter two --replicas must be 1. Default 'Always', for " +#~ "CronJobs `Never`." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L101 +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "kubeconfigã‹ã‚‰å¤‰æ•°ã‚’個別ã«å‰Šé™¤ã™ã‚‹" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/patch.go#L91 +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "Update field(s) of a resource using strategic merge patch" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_image.go#L94 +#~ msgid "Update image of a pod template" +#~ msgstr "Update image of a pod template" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "" +#~ "View latest last-applied-configuration annotations of a resource/object" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L253 +#~ msgid "external name of service" +#~ msgstr "external name of service" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "" +#~ "watchã¯å˜ä¸€ãƒªã‚½ãƒ¼ã‚¹åŠã³ãƒªã‚½ãƒ¼ã‚¹ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã®ã¿ã‚µãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã™- %d個ã®" +#~ "リソースãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸ" +#~ msgstr[1] "" +#~ "watchã¯å˜ä¸€ãƒªã‚½ãƒ¼ã‚¹åŠã³ãƒªã‚½ãƒ¼ã‚¹ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã®ã¿ã‚µãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã™- %d個ã®" +#~ "リソースãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸ" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..70398367b1e135574b96ee3cb3b88e7651720156 GIT binary patch literal 1274 zcmaJ;O=}ZD7+$r05N}=tQFsrYEEBi2*1EM~?S}*#thE&dLAIO8X6$Ar%xt8^OD!ps zen1L_228YIL@gBx77^?Z(2GC8o2Q+eym|87Bs9Uc4m>VR9nHobpmk~ z@d$AeQ9^|48^T0yX2l6w-Z-j+=aiq>0$X&Sq zg?t71Oq*fcMIJ*Abrs}VKV*~|SY{kY97W(!Z5?j#u?C0PjX~;BpMsyFpfc29Nk$Xk zcv=&N$LomWbOe1!F@XIA0SYUWRnyGM@UH9dg)EJfG33YrDOOX3iDy*=;#m0CI;tE2i?Q{msNQ!!%h>X#UkUohk zmnF!}tgFbluwW?O9~+5`-XUZ3DGM#gAU3w3)9mOWW*0Fpf!S@Dm)p&&rWqNrl~2Z{ z&6Q{SLRt{BIXsNQKuR#U#&C|(!Ot6R+GbwV5$WqF;cnJ;DH*4B+JfD^1ykbj?XgHa z79VQH?}(a_f$$|b_L<3GknlDa@j)2x1t(=ooNV93_%P{d)`fYJRFWa?;5fN!LC+-f zBM-Z-K<_O1o;wFpKk}RH}ztrn%fHA?tq^AsMkx?!iQjKAy}&XPpZGZ zug\n" +"Language-Team: \n" +"Language: ko_KR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.0.6\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "kubeconfigì—ì„œ ì§€ì •ëœ í´ëŸ¬ìŠ¤í„°ë¥¼ 삭제합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "kubeconfigì—ì„œ ì§€ì •ëœ ì»¨í…스트를 삭제합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "하나 ë˜ëŠ” 여러 컨í…스트를 설명합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "kubeconfigì— ì •ì˜ëœ í´ëŸ¬ìŠ¤í„°ë¥¼ 표시합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "ë³‘í•©ëœ kubeconfig 설정 ë˜ëŠ” ì§€ì •ëœ kubeconfig 파ì¼ì„ 표시합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "kubeconfig 파ì¼ì„ 수정합니다" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "ìžì›ì— 대한 주ì„ì„ ì—…ë°ì´íŠ¸í•©ë‹ˆë‹¤" + +# https://github.com/kubernetes/kubernetes/blob/masterpkg/kubectl/cmd/apply.go#L98 +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "êµ¬ì„±ì„ íŒŒì¼ ì´ë¦„ ë˜ëŠ” stdinì— ì˜í•œ ìžì›ì— ì ìš©í•©ë‹ˆë‹¤" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#~ msgid "Displays the current-context" +#~ msgstr "현재-컨í…스트를 표시합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "kubeconfigì—ì„œ í´ëŸ¬ìŠ¤í„° í•­ëª©ì„ ì„¤ì •í•©ë‹ˆë‹¤" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "kubeconfigì—ì„œ 컨í…스트 í•­ëª©ì„ ì„¤ì •í•©ë‹ˆë‹¤" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "kubeconfigì—ì„œ ì‚¬ìš©ìž í•­ëª©ì„ ì„¤ì •í•©ë‹ˆë‹¤" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "kubeconfig 파ì¼ì—ì„œ 단ì¼ê°’ì„ ì„¤ì •í•©ë‹ˆë‹¤" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "kubeconfig 파ì¼ì—ì„œ 현재-컨í…스트를 설정합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "kubeconfig 파ì¼ì—ì„œ 단ì¼ê°’ ì„¤ì •ì„ í•´ì œí•©ë‹ˆë‹¤" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "" +#~ "watch는 ë‹¨ì¼ ë¦¬ì†ŒìŠ¤ì™€ 리소스 모ìŒë§Œì„ 지ì›í•©ë‹ˆë‹¤ - %d ê°œ ìžì›ì„ 발견하였습" +#~ "니다" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..e3c52ec8a6385909fecbd80b18650921e47976a6 GIT binary patch literal 19980 zcmc(mYm6jUb;oNl5Oy#IWAlhHP+(woVY>HW3})>$o_!eB-p4SrYx6LVn(msJvb($5 zRn@aQ&Wj)+@+_@NHr{`5T@<|w z_@?U_3;ZZB0sbrSPk={W5Ji6hj9wT;p8|Xl_-f$jMNxDg@MFN20>1|w0k3_r!}CMH z7Vn<}-VFRU@F4JIKORMF8NChoT3`xn1Ap1S{~2&U@89- zQ{X)CTmJjky(EhE@_quSaV3zYqK^P$;O_$W0RI-qQKJ6>z7Kdc%=;Ly5Bv?_ZYG0r zbP>qX(c{2Z0sjHWf6@Q&KaL!|7$%5b`++6!SAa)=&wP0ly$*Q7!x8Weyk7)91pF#+ zAMoxtiVgrD2F?P152$^=415jnKY*h5^{;Sv9|GRQdk4r*(T9MNgWmzZ3HX=(eHX;i zxnrQl9|C>=_#WU}fnNt|{nzqi3Va*zo50Tij{|>;!zRFQ0RIMf93=h@crBA8=U)fv z{QnC)416KTi#}(894*=aYW?2?iVmL#B2v+JfCX?DOpqK@K+)mnfXOc89r)|K-vD#} z2>1uUw*uE#z=wce1RjDJ!hhGHD7ppsS>S`f{{%vU=$#;?eLetWN^~_wL2_^$cno+RD1G@H z@Fl?S0wveahS)mSEkLc?2ZCz!2vGX)k3iA;X?HmNYXdd^exQE806YhL1b74R`#{nE z`Zqhi9`i5(A|laG10hv((SQHGfmhAD`ImsN;QguORz{xz@>BFp;CbL4h$8v^xQAZ@A}Z1K2&?4$Fi>>8 z4=6hQ3{doW6e#-q3sCZL-Ca&Tj{vprGEn<{68KW!SAjPH{~P$Lz#ABJ8u%%ozW)KR z1AGNS`90tVfgCZ~IN|Kb9|Ql1_Z#mY#V>0o4|Ap`mAK@&%=))1rcIj{N0#ZbXjrp;?z-69y z^4!Vu0-hBf$Zj9R*v|VrASz;?YdsYG-@_xhei@JG^)8;22eL#@<4Jhtc%II)#52ot z70>-V`dq{FHXiAKK0n4I`M8(oEYE#B`W)eTF3&r7WWV%5luTAI1<^BjP-*ieZwHE} z(kFcu958)`{bm=Xd(!Lhd1m+n_+B3I_T48CGrM=k@lEm4T7Ew6r&W=4%6KI&;-uG$2YELwZNP)0<+M}v;wm57w=+9V zi&wMtbl?VQ>FJsA=`+WUzxVX~spH!EruZ#s6<2F%+|4_qembaB_)kX|UVK^3p`lGQTK25V`NRcSX~-Y}|l@_rwDn^@*Uoodr$ow?J?II$hBOpEoj zhbey5sY=Tf5Mf#aO}KJ+-lXk9F~+UEzBQ1zSHVn*x0|8kO`}{ zA{j57IcB2AE{$;v+(rRJOd~=!U4iw+-d?htq86ZkxW92yFWYR=+B`xuKAI08xp8qN zTSfeOX&Eo4)%g@v>1Hb{sWG|9v37hUuh!ys%%3{R0+Ro`m(HA)T=WwZAt_LbPA470 z>9)voR`TQ|FRqdYSzo&d)j4)3(B8b*n2XWubBibAM7$l2syU+@v@rCtif^mjZ^EKz zh!>ThJB|*mRz9kj9t6o$p&g$S(rhi`Y#?_Oxmy}g284fW!T`2&vI4YdM zO$IQ<{zNc0;xetE#VR&e1VZ9E?%YhOFh`}coszJ&$sT;hkYXchn{k=FmQyV~8g?Ns zid#a_e6+e2&jzC|6gT45N_@L^xMMax&!A#7m}4)IN}RUoS|=H7#Aph}bC?tkGjS+i zegK&lXXM_F*1Sy5H~1;$b>sfV#O7#tK%?U(P1xTYxZT3jY^|5~KeRl*x7oyx2m--Q zQ*+oa9AXe3zfh&cAnBb3e|Y7DX(u7VI&3?>NX_hYFct6^CBeVgs`0VYOL5Ze!cnoa ztkR?l!)JEq1K1*)I2WH^%Mb}uL?Q)Z2NSD~7p4(BDs0($lYk~}v06(iHp@`-d}SrZ zfmbqX7$+RRakA4Jv1`FT*sEUNVT%p1@~vqPZ&T#!8Oj>3>CIvlL?GBc@3I=`AfZcH zUlLVq4AYvsHv3(}L>i4XaHjL!w%*DJ`_fHu`Mr{DV?I+NbTUu6N0MGL=pc820X4gO z%cV`45~MJ$$K>TO8BQ}Chg5Zjhi`eq8*e>y>;7B!hfR--`op*rVgN@KY@O^9NIRpq zc`G-jl?_(%xXbJrkN)LX+9x{ftMYxG@-5rr@+*V0vz)EkzZ~9SohA+zSR%I;NSKUV z$uZcBi4#2~*mG>La}nBM>F`#yuG+#oV55mOYo_VNMvX63b z%P891_RedEVEFJuqhwHJ722L;Qe3w!K$8A@B=Z5;(r&~)KTme?BLbv$#0>!CG8J6?H&=y^L#I+ z4+Cj*s}Wr(^1gjjqF%<^v6LP253e}jC`GV;zT zoZt)O!?3NF5!JUYvFL=tOP|Jgp_XtUOi8?sFcCluvwL0I2bi+JX%J^h3 z%WQR^fI-4Ws29zb`X=5W!;JIugZQX31<({OO4%UE?CE@v&e}PXcyN`wug@GL=5Exxv z367;@r^t{)qlgTCm^=3^>fUB*Wge#SgvXJEsmZd^6h}@g z2t~M=hAlJ;V+xN20LNs7GJ{;MbDgy%(6Un5+u~ljiW|Y`VZIJ|(=->)90rED4$9;G$DGg&kC< z;eo;^%$WamN(aj5v@$vfJs9ffQ zne^I`u??#^Dh5a|1pHgvuvOm4 zc?m@8L@p*2RjgOx8ZK z!kOL*@&)`uw6uXs*ytOvsR3B^v?Xni&N9joMvTy$hT`WchqW7HhALSMb3}aNKu44DtD5L(+U!gi83|JoSZUAIH@n^@Gs@-C@eD4SrK> z<(6d~pR1as6_4ERE-!>omib{3wUFYs+8zwj^F;YBsKx2}dft@{Q~qI9=3rA?PGswY zkf>?(@0wB0|I`Mn-o+1AW!`5fVut0o?U+$2!}A=R{3_S%?eV_#1GBi`ZsUuCvo;OU z<7}bwPbD~927p;#Ig*)nzl?BmQO(>mh=pnwOSmcb-9&snWV$UXv)FberxUo zGZ1kk=N2NT!i+1c@=oqTBhe>p&S4>oXT#>j&K_OxtC)Y<$sL)baN>4Myie(Q5~~I^ zVF!f`T;}EY3-ZL37>^rxXL!~_y~LWi%(0h_%sI^r#X1b8-DoH)EhqK^H^OKP4PJu663@r zIydO5C_%Kop5++7VD-+qm~RsuNXg56MysY_b`BV?6H|Iqr8q4kKUOqZ^q??Vs0pYs*xGjdZ$Oaiq!LEd0_$}~l1Z9{g!mFL1G zUqsorzah(pJ8`$vdm{@pxxkl(fT9e{CVv|8PFIU6hy`-ppR`VNTfVq*D4tnNrRS|v ztmtpf+*18+RE|K`{2 zM`UMC>JI8G+EBvGyf_peO^RM##tRg|pwjJ~D(R-9J6Dyaaqr&0gBgR>5uT)VHck3q z5#A5Q!|J_97H3YKICXr2jX#COC8_TI)CZer`6bVKoLc@n|k6x?Rm&D5!!LPH`u(H}pm_6z<}YKxQU zLKgme;x3FG*N{(Eh)uCQ?Tjyx4CJS=2G&|Y;W%I2 z6i*Peb%h|ORd7K~6V(!?jk#M}H6{z}BrUq0gXxo-kWkNVth=da;|iA~EP*S~va$5m zCF6g4|F6kh+U3r4|Hh6xg@tA2gItBGOOqZR?@1d6yNU2tS@rEdJ8ps|8EuH02v;Qt z!?dT4gQwQg!8%Dn47<)B7C87Az$uJL2VtJ|cfjo_wzuQ?{xTDZ@X6Zv!?kSP{A9flXB3!7bfq3CVHwQGkyCkDhlFePsK$>KnOu%FdoTxm#qJ|sa}XGkfd>{c zbb81r7^aley}X;nu*!KC|1CmkfEvVd@z^=DP#S8-OG*dBT<&Jm2iPkfNOQ)II_(aN zffUHN;1^w(gv{cFz*Yel6Exf_)*J3IE1a4HuID*`lziVO6cP{B;Rxg0;vZxhi~C&>wd zCSgY>iSDv4w}aL*V{2^XYEu>+ChputVkD{(b&;6(h1nLQ;xk5Qh&f+^OyQ-JliC?F z;O_su806~>6R3!yFQeG(4cVr@VL*)ZSS zRU#qu)@753(@nAz1a(teNw_z1i<2N>^R&9_Dq)w@^_6hXI9MM)Dr-5jJ0X)Y=d7NR z944auX`%aWq(no?bg-WH)>B-qbC!w8u=nnhJ$)Aj8{#mujP;k`v@os2%~%P-&BW|1 zCXl1SY&NJbHn2)o%2!%x%3?Kq)py6AjFSg25KfA9fz9$t_AG?JDsTa*t6^Rab9i4B zE*CeRph?AJ<4dWvMV*A>A-pkDLD}5$wI^(q*}i+FK05aD0XhLiURU<$h{)(nZ8-dP zPm&0YWqjf2sr9t%U_v$v;tnPxQVN7prYd%Oy|%=kBXdfE>h63mLhl;sMu(nfC?9En*RPZF{`sl6{yAbj&^hAQdbyON!ihS~T;n)!(LuVTO zcs5hdj0SN`^P|M#m>IeM5JbsdY_~=2f9ohH9-zh+8Cl5jWOq~Cx1KyqsJTaM^O4wI zj!+*{?tJ@JnNj$K!B%}6Uz~(#b`V-Ig7-+eS%)-==wXlf?o~z9+|f8*=8da?y^ik_ zqRJY1k7a?Xot&j{2E=G;ZuAif3>%CelOGD>l`$BVXrH0)!$vYItdR7^kCGE<#7Mn3 zZmbL9K#<`&1`Hl8(^_1Oe6-eVr7_loP=g|}O%hq{lx)Pmra&J=WBe8YC8&u`6LF~* zR|M+?J*=~+R+qxuYW*9>9^jZQq_yn5*+8>CzDP?AXwS>_X$;uwRw1jLgVPyR4W(E^ zokMT+wOBW&^@UynI=PI5BkF$R$4I2}W^2tzs_a5T$0*-oV}yVMHR&7OHhW5Be!9&D z8nF^JD&-1Haxi~6wwY4l}i#E~gOH(VPI%WXgL@-uvj7t%sA zmNns>Q&UFBVU_ST3K5fJRoyw5`8vDB#!_N*sim#wk6#lW>*K=|*)*z_^bGpYl4c$E zTeF*9A*OV0I*kgeghjS7Uz(c7Q}x*CdEVM7yca`8%k;dvpsnd-!z_Q*xk-t4#BhF? ztjbc?ksJDT6jpYk>94YjSWLP0ovwE9h?oB zq+aUoKd_j28GdVqxRb&M7O}BusF+L?(+54xQT*GmveQ$2N}I8z7mLP1n?jd>Km$2n z(NilALBDK-)h3O*zMrR#wqO&jI0JjPh8|P#P-wrk|-FT@hjHh(JE`v>mCjt)*@WLIN+&n^YMypkUG}>sV2BunCW? zIu@Olu|!Bga{M^GcJPbLRHb9NJw&vYY`x3NO#B;yhhwokNTHN8utFSG(p75^a`Ba1 zzcCi`O{4%C(gL*!4abkstwlgUe-OLXR|-k9S7^Z(<4myYoXK?z`GrApn|>j~4LxXv zfJ?5x2La`WA)=CEr$5$76x4Fb-X<6kj=J8Vk?S2=N(XB`e@91TSkx*MlmxPLf=$j2O$@jcj{wHvt%YLz zsQyV-*T;x{S@^ zz4(~SRC|#{f>(Iy5@rU$!W>S^tv9uch#>ODTTrQ>Lv;+5#FveOCJrER%`}9D4Z+2c zcn=;;MyeHpo+IOcR~}vTfTk{L5XO7zHl*mJ{%umj;9^rL;x_JuwvjDgNnII{zbK>0 zl*pA$ym-N!S}nM6(%-@A)ay{t z!0uasCuN^|& zB??Nku3$_f9h}sLl%|HepQ%=+sk7F5a8j2T*23JVJ+vwRqR0?gy-Z3&DR4}+GNqgQ zhvNBgt>KdfWtA?gKvGNH{D?BaKfzWbw2H#`6`GkC;EI(d0gaBPbf9v+s$rCA80ADj z*!pxq^XY6lQ*)6)n|qu1$#*n)F=zXxN(&3n34EacpuR<#b$v||Q2AAxHRVYuKb1zj z^{jAhO_EUl3ra`bnm21`st{O0EuYX&I0);*XlupcU(`q{$*N7fYn`D{WI;uT3u=I2 zt(|+B&Mb?-F1f_I%3)d)6TDz)qIW~lm|!M~;s$|y$k|=7iaNaO2eJd;wZOxr>$SfO z3+T!Y1E9|PLX|wT8EzHH+}6Xvg1YJGN)Nhio6}P}Xu#qB?WA^lV&c78a8u!!Xq5o0 zce#>0jGQ4FuH}=E9C~GWLAAt1>-;#0{$%|;g034%uE8QZZ$F@U-3z6`mXp*gk)I_r zJ}S8JN&UooZr;$x+N`ks2>vO*wMDrBAun=WQ5J>jhjKK&r~;31b4zT6d!0_v@6F2k z(AFcRo9d2JQQs1KhT<)frPRwPQx_ok+F#4cBD%UGc)89g#mlY~FIk7e+_5A*L^PpU yMY@Fj2UEUq*SY*FDQltP>fUY|C$)`<#1pJvK`)hd#$x(%q}kR`766k*u<(D7lkN-v literal 0 HcmV?d00001 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po new file mode 100644 index 000000000..dad451281 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po @@ -0,0 +1,3250 @@ +# Brazilian Portuguese translation. +# Copyright (C) 2020 +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR ctadeu@gmail.com, 2020. +# +msgid "" +msgstr "" +"Project-Id-Version: \n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2020-12-11 17:03+0100\n" +"Last-Translator: Carlos Panato \n" +"Language-Team: \n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.4.2\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" +"X-Poedit-KeywordsList: \n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Mostra as métricas para todos os nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Mostra as métricas para um node específico\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Mostra a documentação do recurso e seus campos\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Mostra a documentação de um campo específico de um recurso\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Mostra as opções herdadas por todos os comandos\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Imprime a versão do cliente e do servidor para o contexto atual\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Mostra as versões de API suportadas\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Mostra as métricas para todos os pods no namespace default\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Mostra as métricas para todos os pods em um dado namespace\n" +"\t\tkubectl top pod —namespace=NAMESPACE\n" +"\n" +"\t\t# Mostra as métricas para um dado pod e seus containers\n" +"\t\tkubectl top pod POD_NAME —containers\n" +"\n" +"\t\t# Mostra as métricas para os pods definidos pelo label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert os arquivos de configuração para diferentes versões de API. " +"Ambos formatos YAML\n" +"\t\\e JSON são aceitos.\n" +"\n" +"\t\tO command recebe o nome do arquivo, diretório ou URL como entrada, e " +"converteno formato\n" +"\t\tpara a versão especificada pelo parametro —output-version. Se a versão " +"desejada não é especificada ou \n" +"\t\tnão é suportada, converte para a última versã disponível.\n" +"\n" +"\t\tA saída padrão é no formato YAML. Pode ser utilizadoa opção -o\n" +"\t\tpara mudar o formato de saída." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCria um namespace com um nome especificado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCria uma role com uma única regra." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCria uma conta de serviço com um nome especificado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tRemove a restrição de execução de workloads no node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tAplica a restrição de execução de workloads no node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tDefine a annotation last-applied-configuration configurando para ser " +"igual ao conteúdo do arquivo.\n" +"\t\tIsto resulta no last-applied-configuration ser atualizado quando o " +"'kubectl apply -f ' executa,\n" +"\t\tnão atualizando as outras partes do objeto." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Cria um novo namespace chamado my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Cria um novo service account chamado my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCria um serviço do tipo ExternalName com o nome especificado.\n" +"\n" +"\tServiço ExternalName referencia um endereço externo de DNS ao invés de\n" +"\tapenas pods, o que permite aos desenvolvedores de aplicações referenciar " +"serviços\n" +"\tque existem fora da plataforma, em outros clusters ou localmente." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provê ajuda para qualquer comando na aplicação.\n" +"\tDigite simplesmente kubectl help [caminho do comando] para detalhes " +"completos." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Cria um novo serviço do tipo LoadBalancer chamado my-lbs\n" +" kubectl create service loadbalancer my-lbs —tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Coleta o estado corrente do cluster e exibe no stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Coleta o estado corrente do custer para /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Coleta informação de todos os namespaces para stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Coleta o conjunto especificado de namespaces para /path/to/cluster-" +"state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Cria um serviço do tipo LoadBalancer com o nome especificado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"Lista de valores delimitados por vírgulas para um conjunto de escopos de " +"quota que devem corresponder para cada objeto rastreado pela quota." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"Lista de valores delimitados por vírgulas ajusta os pares resource=quantity " +"que define um limite rigído." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"Um seletor de label a ser usado para o PDB. Apenas seletores baseado em " +"igualdade são suportados." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"Um seletor de label para ser utilizado neste serviço. Apenas seletores " +"baseado em igualdade são suportados. Se vazio (por padrão) o seletor do " +"replication controller ou replica set será utilizado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Um IP externo adicional (não gerenciado pelo Kubernetes) para ser usado no " +"serviço. Se este IP for roteado para um nó, o serviço pode ser acessado por " +"este IP além de seu IP de serviço gerado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"Uma substituição inline JSON para o objeto gerado. Se não estiver vazio, ele " +"será usado para substituir o objeto gerado. Requer que o objeto forneça um " +"campo apiVersion válido." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Aprova uma solicitação de assinatura de certificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Atribuir o seu próprio ClusterIP ou configura para 'None' para um serviço " +"'headless' (sem loadbalancing)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Se conecta a um container em execução" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP que será atribuído ao serviço. Deixe vazio para auto atribuição, " +"ou configure para 'None' para criar um serviço headless." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole que esse ClusterRoleBinding deve referenciar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole que esse RoleBinding deve referenciar" + +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Converte arquivos de configuração entre versões de API diferentes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copia arquivos e diretórios de e para containers." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Cria uma secret do tipo TLS" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Cria a namespace com um nome especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Cria um secret para ser utilizado com o Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Cria um secret utilizando um sub-comando especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Cria uma conta de serviço com um nome especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Apaga o cluster especificado do kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Apaga o contexto especificado do kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Rejeita o pedido de assinatura do certificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Mostra um ou mais contextos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Mostra os clusters definidos no kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" +"Mostra a configuração do kubeconfig mescladas ou um arquivo kubeconfig " +"especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Mostra um ou mais recursos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drenar o node para preparação de manutenção" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Edita um recurso no servidor" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email para o Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Executa um comando em um container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Encaminhar uma ou mais portas locais para um pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Ajuda sobre qualquer comando" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"Se não vazio, configura a afinidade de sessão para o serviço; valores " +"válidos: 'None', 'ClientIP'" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"Se não estiver vazio, a atualização dos annotation só terá êxito se esta for " +"a versão do recurso atual para o objeto. Válido apenas ao especificar um " +"único recurso." + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"Se não estiver vazio, a atualização dos labels só terá êxito se esta for a " +"versão do recurso atual para o objeto. Válido apenas ao especificar um único " +"recurso." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Marca o node como agendável" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Marca o node como não agendável" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Marca o recurso fornecido como pausado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Edita o certificado dos recursos." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Edita o arquivo kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Nome ou o número da porta em um container em que o serviço deve direcionar o " +"tráfego. Opcional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Apenas retorna os logs após uma data específica (RFC3339). Padrão para todos " +"os logs. Apenas um since-time / since deve ser utilizado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Saída do autocomplete de shell para um Shell específico (bash ou zsh)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Senha para a autenticação do registro do Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Caminho para a chave pública em formato PEM." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Caminho para a chave private associada a um certificado fornecido." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Pré-condição para a versão do recurso. Requer que a versão do recurso atual " +"corresponda a este valor para escalar." + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Mostra a informação de versão do cliente e do servidor" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Mostra a lista de opções herdadas por todos os comandos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Mostra os logs de um container em um pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Retoma um recurso pausado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role que a RoleBinding deve referenciar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Executa uma imagem específica no cluster" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Executa um proxy para o servidor de API do Kubernetes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Localização do servidor para o registro do Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Define funcionalidades específicas em objetos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Define um seletor em um recurso" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Mostra os detalhes de um recurso específico ou de um grupo de recursos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Mostra o status de uma atualização dinamica" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Sinônimo para —target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "A imagem para o container executar." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"A política de obtenção de imagens. Se deixado em branco, este valor não será " +"especificado pelo cliente e será utilizado o padrão do servidor" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"Um número mínimo ou porcentagem de pods disponíveis que este budget requer." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "O nome para o objeto recém criado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"O nome para o objeto recém criado. Se não especificado, o nome do input " +"resource será utilizado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"O nome do gerador de API a ser usado. Existem 2 geradores: 'service/v1' e " +"'service/v2'. A única diferença entre eles é que a porta de serviço na v1 é " +"chamada de 'default', enquanto ela é deixada sem nome na v2. O padrão é " +"'service/v2'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "O protocolo de rede para o serviço ser criado. Padrão é 'TCP'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"A porta para que o serviço possa servir. Copiado do recurso sendo exposto, " +"se não especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"O recurso requerido para este container. Por exemplo, 'cpu=200m," +"memory=512Mi'. Observe que os componentes do lado do servidor podem " +"atribuir limites, dependendo da configuração do servidor, como intervalos de " +"limite." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"O recurso requerido de requests para este container. Por exemplo, 'cpu=100m," +"memory=256Mi'. Observe que os componentes do lado do servidor podem " +"atribuir requests, dependendo da configuração do servidor, como intervalos " +"de limite." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "O tipo de segredo para criar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Desfazer o rollout anterior" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "" +"Atualizar os recursos de request/limites em um objeto com template de pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Atualizar as anotações de um recurso" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Atualizar os labels de um recurso" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Atualizar o taints de um ou mais nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Nome de usuário para a autenticação no Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "Visualizar o histórico de rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Onde colocar os arquivos de saída. Se vazio ou '-' usa o stdout do terminal, " +"caso contrário, cria uma hierarquia no diretório configurado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controla o gerenciador de cluster do Kubernetes" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Criar o ClusterRoleBinding para user1, user2, e group1 utilizando " +#~ "o ClusterRole cluster-admin\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin —user=user1 —user=user2 —group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Criar uma RoleBinding para user1, user2, e group1 utilizando o " +#~ "admin ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin —user=user1 —" +#~ "user=user2 —group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Criar um novo configmap com o nome de my-config baseado na pasta " +#~ "bar\n" +#~ "\t\t kubectl create configmap my-config —from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Cria um novo configmap com o nome my-config, onde cada chave " +#~ "possui o valor especificado em um arquivo distinto no disco\n" +#~ "\t\t kubectl create configmap my-config —from-file=key1=/path/to/bar/" +#~ "file1.txt —from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Criar um novo configmap com o nome de my-config com key1=config1 " +#~ "e key2=config2\n" +#~ "\t\t kubectl create configmap my-config —from-literal=key1=config1 —from-" +#~ "literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Se você ainda não tem o arquivo .dockercfg, você pode gerar " +#~ "diretamente o dockercfg secret utilizando o comando:\n" +#~ "\t\t kubectl create secret docker-registry my-secret —docker-" +#~ "server=DOCKER_REGISTRY_SERVER —docker-username=DOCKER_USER —docker-" +#~ "password=DOCKER_PASSWORD —docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aplica a configuração do arquivo pod.json a um pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Aplica o JSON recebido via stdin para um pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Nota: —prune ainda está em Alpha\n" +#~ "\t\t# Aplica a configuração do manifest.yaml que conter o label app=nginx " +#~ "e remove todos os outros recursos que não estejam no arquivo e não " +#~ "contenham o label.\n" +#~ "\t\tkubectl apply —prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Aplica a configuração do manifest.yaml e remove todos os outros " +#~ "configmaps que não estão no arquivo.\n" +#~ "\t\tkubectl apply —prune -f manifest.yaml —all —prune-whitelist=core/v1/" +#~ "ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Escala automaticamente um deployment \"foo\", com o número de pods " +#~ "entre 2 e 10, sem especificar a utilização da CPU o padrão da política de " +#~ "autoscaling será utilizado:\n" +#~ "\t\tkubectl autoscale deployment foo —min=2 —max=10\n" +#~ "\n" +#~ "\t\t# Escala automaticamente um replication controller \"foo\", com o " +#~ "número de pods entre 1 and 5, e definindo a utilização da CPU em 80%:\n" +#~ "\t\tkubectl autoscale rc foo —max=5 —cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# converte o arquivo 'pod.yaml' para a versão mais atual e imprime a " +#~ "saída para o stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Converte o estado atual do recurso especificado pelo 'pod.yaml' " +#~ "para a versão mais atual\n" +#~ "\t\t# e imprime a saída para o stdout no formato json.\n" +#~ "\t\tkubectl convert -f pod.yaml —local -o json\n" +#~ "\n" +#~ "\t\t# Converte todos os arquivos dentro do diretório atual para a versão " +#~ "mais recente e cria todos.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um ClusterRole com o nome de \"pod-reader\" que permite o " +#~ "usuário realizar \"get\", \"watch\" e \"list\" em pods\n" +#~ "\t\tkubectl create clusterrole pod-reader —verb=get,list,watch —" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Cria a ClusterRole com o nome de \"pod-reader\" com um ResourceName " +#~ "especificado\n" +#~ "\t\tkubectl create clusterrole pod-reader —verb=get,list,watch —" +#~ "resource=pods —resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria uma Role com o nome de \"pod-reader\" que permite o usuário " +#~ "realizar \"get\", \"watch\" e \"list\" em pods\n" +#~ "\t\tkubectl create role pod-reader —verb=get —verb=list —verb=watch —" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Cria uma Role com o nome de \"pod-reader\" com um ResourceName " +#~ "especificado\n" +#~ "\t\tkubectl create role pod-reader —verb=get —verg=list —verb=watch —" +#~ "resource=pods —resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um novo resourcequota com o nome de my-quota\n" +#~ "\t\tkubectl create quota my-quota —hard=cpu=1,memory=1G,pods=2,services=3," +#~ "replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Cria um novo resourcequota com o nome de best-effort\n" +#~ "\t\tkubectl create quota best-effort —hard=pods=100 —scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um pod disruption budget com o nome de my-pdb que irá " +#~ "selecionar todos os pods com o label app=rails\n" +#~ "\t\t# e requer que pelo menos um deles esteja disponível a qualquer " +#~ "momento.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb —selector=app=rails —min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Cria um pod disruption budget com o nome de my-pdb que irá " +#~ "selecionar todos os pods com o label app=nginx\n" +#~ "\t\t# e requer pelo menos que metade dos pods selecionados estejam " +#~ "disponíveis em qualquer momento.\n" +#~ "\t\tkubectl create pdb my-pdb —selector=app=nginx —min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um pod utilizando o arquivo pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Cria um pod utilizando o JSON recebido via stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edita o conteúdo do arquivo docker-registry.yaml em JSON utilizando " +#~ "o formato da API v1, criando o recurso com o conteúdo editado.\n" +#~ "\t\tkubectl create -f docker-registry.yaml —edit —output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um serviço para um nginx replicado, que escuta na porta 80 e " +#~ "conecta na porta 8000 dos containers.\n" +#~ "\\t\tkubectl expose rc nginx —port=80 —target-port=8000\n" +#~ "\n" +#~ "\t\t# Cria um serviço para um replication controller identificado por " +#~ "tipo e com o nome especificado em \"nginx-controller.yaml\", que escuta " +#~ "na porta 80 e conecta na porta 8000 dos containers.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml —port=80 —target-port=8000\n" +#~ "\n" +#~ "\t\t# Cria um serviço para um pod valid-pod, que escuta na porta 444 com " +#~ "o nome \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod —port=444 —name=frontend\n" +#~ "\n" +#~ "\t\t# Cria um segundo serviço baseado no serviço acima, expondo a porta " +#~ "8443 do container como porta 443 e com nome \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx —port=443 —target-port=8443 —name=nginx-" +#~ "https\n" +#~ "\n" +#~ "\t\t# Cria um serviço para uma aplicação streaming replicada na porta " +#~ "4100 com trafico balanceado UDP e nome 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer —port=4100 —protocol=udp —name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Cria um serviço para um nginx replicado usando o replica set, que " +#~ "escuta na porta 80 e conecta na porta 8000 dos containers.\n" +#~ "\t\tkubectl expose rs nginx —port=80 —target-port=8000\n" +#~ "\n" +#~ "\t\t# Cria um serviço para um deployment nginx, que escuta na porta 80 e " +#~ "conecta na porta 8000 dos containers.\n" +#~ "\t\tkubectl expose deployment nginx —port=80 —target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Remove um pod usando o tipo e nome especificado no arquivo pod." +#~ "json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Remove um pod baseado no tipo e nome no JSON passado na entrada de " +#~ "comando(stdin).\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Remove pods e serviços com os nomes \"baz\" e \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Remove pods e serviços com label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Remove um pod com um mínimo de delay\n" +#~ "\t\tkubectl delete pod foo —now\n" +#~ "\n" +#~ "\t\t# Força a remoção de um pod em um node morto\n" +#~ "\t\tkubectl delete pod foo —grace-period=0 —force\n" +#~ "\n" +#~ "\t\t# Remove todos os pods\n" +#~ "\t\tkubectl delete pods —all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Descreve um node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Descreve um pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Descreve um pod identificado pelo tipo e nome no arquivo \"pod.json" +#~ "\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Descreve todos os pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Descreve os pods com label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Descreve todos os pods gerenciados pelo replication controller " +#~ "'frontend' (rc-created pods\n" +#~ "\t\t# tem o nome de rc como prefixo no nome do pod).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Drena o node \"foo\", mesmo se os pods não são gerenciados por um " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet ou StatefulSet.\n" +#~ "\t\t$ kubectl drain foo —force\n" +#~ "\n" +#~ "\t\t# Mesmo que acima, mas é interrompido se os pods não são gerenciados " +#~ "por um ReplicationController, ReplicaSet, Job, DaemonSet ou StatefulSet, " +#~ "e tem espera por 15 minutos.\n" +#~ "\t\t$ kubectl drain foo —grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Edita o serviço chamado 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Usa um editor alternativo\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edita o Job 'myjob' em JSON utilizando o format da API v1:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edita o deployment 'mydeployment' em YAML e salva a configuração " +#~ "modificada em sua annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml —save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Pega a saída de execução do comando 'date' do pod 123456-7890, " +#~ "usando o primeiro container por padrão\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Pega a saída de execução do comando 'date' no ruby-container do pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Troca para raw terminal mode, envia stdin para o 'bash' no ruby-" +#~ "container do pod 123456-7890\n" +#~ "\t\t# e envia stdout/stderr do 'bash' de volta para o cliente\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t — bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Pega a saída do pod em execução 123456-7890, utilizando o primeiro " +#~ "container por padrão\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Pega a saída do ruby-container do pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Troca para raw terminal mode, envia stdin para o 'bash' no ruby-" +#~ "container do pod 123456-7890\n" +#~ "\t\t# e envia stdout/stderr do 'bash' de volta para o cliente\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Pega a saída do primeiro pod de um ReplicaSet chamado nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Instala o auto completar do bash no Mac utilizando homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew —prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Carrega o código de auto complentar do kubectl para o bash no shell " +#~ "corrente\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Escreve o código de autocompletar do bash no arquivo de perfil e " +#~ "faz o source se é para o .bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Carrega o código de auto complentar do kubectl para zsh[1] no shell " +#~ "em utilização\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Lista todos os pods no formato de saída ps.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# Lista todos os pods no formato de saída ps com mais informações " +#~ "(como o nome do node).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# Lista um único replication controller com o nome especificado no " +#~ "formato de saída ps\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# Lista um único pod e usa o formato de saída JSON.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Lista o pod identificado com o tipo e nome especificado no \"pod." +#~ "yaml\" e usa o formato de saída JSON.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Mostra apenas em que estágio o pod especificado está.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 —template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# Lista todos os replication controllers e services juntos no formato " +#~ "de saída ps.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# Lista um ou mais recursos pelo seu tipo e nomes.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Lista todos os recursos e com tipos diferentes.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Escuta nas portas locais 5000 e 6000, e redireciona os dados de/" +#~ "para as portas 5000 e 6000 no pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Escuta na porta local 8888 localmente, e redireciona para a porta " +#~ "5000 no pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Escuta uma porta local aleatória, e redireciona para a porta 5000 " +#~ "no pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Escuta uma porta local aleatória, e redireciona para a porta 5000 " +#~ "no pod\\n\n" +#~ "\t\tkubectl port-forward mypod 0:5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Remove a restrição de execução de Pods no node \"foo\".\n" +#~ "\t\t$ kubectl uncordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Restringe a execução de novos Pods no node \"foo\".\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Atualiza parcialmente um node utilizando a estratégia merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Atualiza parcialmente um node identificado pelo tipo e nome no " +#~ "arquivo \"node.json\" utilizando a estratégia merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Atualiza uma imagem em um container; spec.containers[*].name é " +#~ "requerido pois será usado como índice para a mudança\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Atualiza uma imagem em um container utilizando o json patch com " +#~ "positional arrays\n" +#~ "\t\tkubectl patch pod valid-pod —type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Mostra o endereço do servidor de gerenciamento e dos serviços do " +#~ "cluster\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Substitui um pod utlizando os dados contidos em pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Troca um pod com base no JSON fornecido no stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Atualiza uma versão de imagem (tag) de um pod com um único " +#~ "container para v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Força a troca, removendo e recriando o recurso\n" +#~ "\t\tkubectl replace —force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Retorna os logs do pod nginx com um único container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Retorna os logs dos pods definidos pelo label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Retorna os logs do container ruby finalizado do pod web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Começa o streaming de logs de um ruby container no pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Mostra apenas as 20 linhas mais recentes de saída do pod nginx\n" +#~ "\t\tkubectl logs —tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Mostra todos os logs do pod nginx escrito na última hora\n" +#~ "\t\tkubectl logs —since=1h nginx\n" +#~ "\n" +#~ "\t\t# Retorna os logs do primeiro container com o Job chamado hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Retorna os logs do container nginx-1 de um deployment chamado " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Executa um proxy para o apiserver do kubernetes na porta 8011, " +#~ "servindo um conteúdo estático do caminho ./local/www/\n" +#~ "\t\tkubectl proxy —port=8011 —www=./local/www/\n" +#~ "\n" +#~ "\t\t# Executa um proxy para o apiserver do kubernetes em uma porta local " +#~ "arbitrária.\n" +#~ "\t\t# A porta escolhida para o servidor será utilizada para o saída de " +#~ "stdout.\n" +#~ "\t\tkubectl proxy —port=0\n" +#~ "\n" +#~ "\t\t# Executa um proxy para o apiserver do kubernetes, mudando o prefixo " +#~ "do api para k8s-api\n" +#~ "\t\t# Com isso a api dos pods estarão disponível em localhost:8001/k8s-" +#~ "api/v1/pods/\n" +#~ "\t\tkubectl proxy —api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Escala um replicaset chamado 'foo' para 3.\n" +#~ "\t\tkubectl scale —replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Escala um recurso identificado pelo tipo e nome especificado no " +#~ "arquivo \"foo.yaml\" para 3.\n" +#~ "\t\tkubectl scale —replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# Se um deployment chamado mysql tem tamanho 2, escala o mysql para " +#~ "3.\n" +#~ "\t\tkubectl scale —current-replicas=2 —replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Escala múltiplos replication controllers.\n" +#~ "\t\tkubectl scale —replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Escala um Job chamado 'cron' para 3.\n" +#~ "\t\tkubectl scale —replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ajusta o last-applied-configuration de um recurso para corresponder " +#~ "ao conteúdo de um arquivo.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Executa o set-last-applied em todos os arquivos de configuração no " +#~ "diretório.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Ajusta o last-applied-configuration de um recurso para corresponder " +#~ "ao conteúdo de um arquivo, será criada uma annotation se esta ainda não " +#~ "existe.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml —create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Termina o replicationcontroller foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Para os pods e serviços com o label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Termina o serviço definido no arquivo service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Termina todos os recursos no caminho do diretório path/to/" +#~ "resources\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute Ï€ to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute Ï€ to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Inicia uma única instância de nginx.\n" +#~ "\t\tkubectl run nginx —image=nginx\n" +#~ "\n" +#~ "\t\t# Inicia uma única instância do hazelcast e expõe a porta 5701 do " +#~ "container.\n" +#~ "\t\tkubectl run hazelcast —image=hazelcast —port=5701\n" +#~ "\n" +#~ "\t\t# Inicia uma única instância do hazelcast e seta as variáveis de " +#~ "ambiente \"DNS_DOMAIN=cluster\" e \"POD_NAMESPACE=default\" no " +#~ "container.\n" +#~ "\t\tkubectl run hazelcast —image=hazelcast —env=\"DNS_DOMAIN=cluster\" —" +#~ "env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Inicia uma instância replicada de nginx.\n" +#~ "\t\tkubectl run nginx —image=nginx —replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Mostra os objetos da API correspondente sem criar elas.\n" +#~ "\t\tkubectl run nginx —image=nginx —dry-run\n" +#~ "\n" +#~ "\t\t# Inicia uma única instância de nginx, mas sobrescreve a spec do " +#~ "deployment com um conjunto parcial de valores passeados do JSON.\n" +#~ "\t\tkubectl run nginx —image=nginx —overrides='{ \"apiVersion\": \"v1\", " +#~ "\"spec\": { … } }'\n" +#~ "\n" +#~ "\t\t# Inicia um pod de busybox e mantém ele em primeiro plano, não " +#~ "reinicia se ele já existe.\n" +#~ "\t\tkubectl run -i -t busybox —image=busybox —restart=Never\n" +#~ "\n" +#~ "\t\t# Inicia um container nginx usando o comando padrão, mas utiliza " +#~ "argumentos customizados (arg1 .. argN) para o comando.\n" +#~ "\t\tkubectl run nginx —image=nginx — … \n" +#~ "\n" +#~ "\t\t# Inicia um container nginx usando um comando diferente e argumentos " +#~ "customizados.\n" +#~ "\t\tkubectl run nginx —image=nginx —command — … \n" +#~ "\n" +#~ "\t\t# Inicia um container perl para processar Ï€ to 2000 posições e mostra " +#~ "a saída.\n" +#~ "\t\tkubectl run pi —image=perl —restart=OnFailure — perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Inicia um cron job para processar as 2000 posições de Ï€ e mostra a " +#~ "saída a cada 5 minutos.\n" +#~ "\t\tkubectl run pi —schedule=\"0/5 * * * ?\" —image=perl —" +#~ "restart=OnFailure — perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Atualiza a restrição para a chave 'dedicated' e o valor 'special-" +#~ "user' e o efeito 'NoSchedule' para o node 'foo'.\n" +#~ "\t\t# Se o taint com esta chave e efeito já existirem, o seu valor é " +#~ "substituído pelo especificado.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove a restrição com a chave 'dedicated' e efeito 'NoSchedule' do " +#~ "nodo 'foo' se existir.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove o node 'foo' todos os taints com a chave 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Atualiza o pod 'foo' com o label 'unhealthy' e valor 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Atualiza o pod 'foo' com o label 'status' e valor 'unhealthy', " +#~ "sobrescrevendo qualquer valor existente.\n" +#~ "\t\tkubectl label —overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Atualiza todos os pods no namespace corrente\n" +#~ "\t\tkubectl label pods —all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Atualiza o pod identificado pelo tipo e nome em \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Atualiza o pod 'foo' apenas se o recurso não foi modificado na " +#~ "versão 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy —resource-version=1\n" +#~ "\n" +#~ "\t\t# Atualiza o pod 'foo' removendo o label chamado 'bar', se ele " +#~ "existir.\n" +#~ "\t\t# Não necessita a flag —overwrite.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Atualiza os pods de frontend-v1 utilizando os dados do novo " +#~ "replication controller definido em frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Atualiza os pods do frontend-v1 utilizando os dados em JSON " +#~ "passados pelo stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Atualiza os pods do frontend-v1 para frontend-v2 trocando a imagem, " +#~ "e trocando o\n" +#~ "\t\t# nome do replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Atualiza os pods do frontend trocando a imagem, e mantendo o nome " +#~ "antigo.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Cancela e reverte um rollout existente em progresso (de frontend-v1 " +#~ "para frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Visualiza a anotação last-applied-configuration pelo tipo/nome no " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# Visualiza a anotação last-applied-configuration no arquivo JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\tAplica a configuração em um recurso usando um nome de arquivo ou " +#~ "stdin.\n" +#~ "\t\tEste recurso será criado se ele não existir.\n" +#~ "\t\tPara utilizar o 'apply', sempre crie o recurso inicialmente com " +#~ "'apply' ou 'create --save-config'.\n" +#~ "\n" +#~ "\t\tFormatos JSON e YAML são aceitos.\n" +#~ "\n" +#~ "\t\tNota Alpha: a funcionalidade --prune não está completa. Não utilize a " +#~ "não ser que você saibe qual é o estado corrente. Veja https://issues.k8s." +#~ "io/34274." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um ClusterRoleBinding para um ClusterRole específico." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria uma RoleBinding para uma Role específica ou ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um TLS secret de uma chave pública/privada fornecida.\n" +#~ "\n" +#~ "\t\tA chave pública/privada deve existir antes. O certificado da chave " +#~ "deve ser codificada como PEM, e ter sido gerada pela chave privada." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um configmap com base em um arquivo, diretório, ou um valor " +#~ "literal especificado.\n" +#~ "\n" +#~ "\t\tUm configmap único pode conter um ou mais pares de chave/valor.\n" +#~ "\n" +#~ "\t\tQuando criar um configmap com base em um arquivo, a chave será por " +#~ "padrão o nome do arquivo, e o valor será\n" +#~ "\t\tpor padrão o conteúdo do arquivo. Se o nome do arquivo for uma chave " +#~ "inválida, você deve especificar uma chave alternativa.\n" +#~ "\n" +#~ "\t\tQuando criar um configmap com base em um diretório, cada arquivo cujo " +#~ "o nome é uma chave válida no diretório será\n" +#~ "\t\tcolocada no configmap. Qualquer entrada de diretório, exceto as com " +#~ "arquivos válidos serão ignorados (por exemplo: sub-diretórios,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um novo secret para utilizar com Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets são utilizados para autenticar Docker registries.\n" +#~ "\n" +#~ "\t\tQuando utilizando a linha de comando do Docker para realizar envio " +#~ "das images, você pode se autenticar para um registro fornecido " +#~ "executando\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER —username=DOCKER_USER —" +#~ "password=DOCKER_PASSWORD —email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " Isso irá gerar um arquivo ~/.dockercfg que será utilizado para os " +#~ "comandos 'docker push' e 'docker pull' \n" +#~ "\t\tse autenticarem no registro. O endereço de email é opcional.\n" +#~ "\n" +#~ "\t\tQuando criar aplicações, você pode ter um Docker registry que requer " +#~ "autenticação. Para que \n" +#~ "\t\tos nodes possam baixar as imagens em seu nome, eles devem ter as " +#~ "credenciais. Você pode prover esta informação\n" +#~ "\t\tcriando um dockercfg secret e anexando-o à sua conta de serviço." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um pod disruption budget com o nome especificado, seletor, e o " +#~ "número mínimo de pode disponíveis" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um recurso por nome de arquivo ou stdin.\n" +#~ "\n" +#~ "\t\tOs formatos JSON e YAML são aceitos." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um resourcequota com o nome especificado, limits rigídos e " +#~ "escopo opcional" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um secret com base em um arquivo, diretório, ou um valor literal " +#~ "especificado.\n" +#~ "\n" +#~ "\t\tUm secret único pode conter um ou mais pares de chave/valor.\n" +#~ "\n" +#~ "\t\tQuando criar um secret com base em um arquivo, a chave será por " +#~ "padrão o nome do arquivo, e o valor será\n" +#~ "\t\tpor padrão o conteúdo do arquivo. Se o nome do arquivo for uma chave " +#~ "inválida, você deve especificar uma chave alternativa.\n" +#~ "\n" +#~ "\t\tQuando criar um secret com base em um diretório, cada arquivo cujo o " +#~ "nome é uma chave válida no diretório será\n" +#~ "\t\tcolocada no configmap. Qualquer entrada de diretório, exceto as com " +#~ "arquivos válidos serão ignorados (por exemplo: sub-diretórios,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria e executa uma imagem específica, possivelmente replicada.\n" +#~ "\n" +#~ "\t\tCria um deployment ou job para gerenciar o(s) container(s) criado(s)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um autoscaler que automaticamente escolhe e configura quantos " +#~ "pods irão executar em um cluster kubernetes.\n" +#~ "\n" +#~ "\t\tProcura por um Deployment, ReplicaSet, ou ReplicationController por " +#~ "nome e cria um autoscaler que utiliza o recurso fornecido como " +#~ "referência.\n" +#~ "\t\tUm autoscaler pode automaticamente aumentar ou reduzir o número de " +#~ "pods quando necessário." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe —grace-period flag, or pass —now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe —force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete —help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\tMostra os Recursos (CPU/Memória/Armazenamento) utilizados nos nodes.\n" +#~ "\n" +#~ "\t\tO comando top-node permite que você veja o consumo de recursos dos " +#~ "nodes." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\tMostra a utilização de recursos dos pods (CPU/Memória/" +#~ "Armazenamento).\n" +#~ "\n" +#~ "\t\tO comando 'top pod' deixa você ver a utilização dos recusrsos dos " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDevido ao atraso da pipeline de métricas, o resultado pode estar " +#~ "indisponível por alguns minutos\n" +#~ "\t\tdesde a criação do pod." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\tMostra a utilização de recursos (CPU/Memória/Armazenamento).\n" +#~ "\n" +#~ "\t\tO comando top deixa você ver a utilização de recursos de nodes e " +#~ "pods.\n" +#~ "\n" +#~ "\t\tEste comando necessita que o Heapster esteja corretamente configurado " +#~ "e rodando no servidor. " + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout —ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse —force. —force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag —windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew —prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tAplica uma atualização contínua em um ReplicationController.\n" +#~ "\n" +#~ "\t\tTroca o replication controller especificado por um novo replication " +#~ "controller atualizando um pod por vez para utilizar o\n" +#~ "\t\tnovo PodTemplate. O new-controller.json deve ser especificado no " +#~ "mesmo namespace que o\n" +#~ "\t\treplication controller existente e sobrescrever pelo menos uma label " +#~ "comum no seu replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tSubstitui um recurso pelo especificado em um arquivo ou via stdin.\n" +#~ "\n" +#~ "\t\tOs formatos JSON and YAML são aceitos. Quando substituindo recursos " +#~ "existentes,\n" +#~ "\t\t especificação completa do recurso deve ser fornecida. Isto pode ser " +#~ "obtido com\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tConsulte os modelos em https://htmlpreview.github.io/?https://github." +#~ "com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +#~ "html para descobrir se um campo é mutável." + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tDefine um novo tamanho para um Deployment, ReplicaSet, Replication " +#~ "Controller, ou Job.\n" +#~ "\n" +#~ "\t\tScale deixa os usuários especificar uma ou mais pre-condições para a " +#~ "ação de scale.\n" +#~ "\n" +#~ "\t\tSe --current-replicas ou --resource-version forem especificados, será " +#~ "validado antes\n" +#~ "\t\tda tentativa de scale, e garante que a pre-condição é verdadeira " +#~ "quando\n" +#~ "\t\to scale é enviado para o servidor." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tPara fazer o proxy the todas as apis do kubernetes, utilize:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy —api-prefix=/\n" +#~ "\n" +#~ "\t\tPara fazer o proxy de parte da api do kubernetes e alguns arquivos " +#~ "estáticos:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy —www=/my/files —www-prefix=/static/ —api-prefix=/" +#~ "api/\n" +#~ "\n" +#~ "\t\tCom os comandos acima você pode fazer 'curl localhost:8001/api/v1/" +#~ "pods'.\n" +#~ "\n" +#~ "\t\tPara fazer o proxy the todas as apis do kubernetes em um caminho " +#~ "diferente, utilize:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy —api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tCom o comando acima você pode fazer 'curl localhost:8001/custom/api/" +#~ "v1/pods'" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tAtualiza o(s) campo(s) de um recurso usando strategic merge patch\n" +#~ "\n" +#~ "\t\tFormatos JSON e YAML são aceitos.\n" +#~ "\n" +#~ "\t\tConsulte os modelos em https://htmlpreview.github.io/?https://github." +#~ "com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +#~ "html para descobrir se um campo é mutável." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." +#~ msgstr "" +#~ "\n" +#~ "\t\tAtualiza labels em um recurso.\n" +#~ "\n" +#~ "\t\t* Um label deve começar com uma letra ou número, e pode conter letra, " +#~ "números, hífens, pontos e sublinhados, com no máximo %[1]d caracteres.\n" +#~ "\t\t* Se --overwrite for verdadeiro, então labels podem ser " +#~ "sobreescritos, caso contrário a sobreescrita irá falhar.\n" +#~ "\t\t* Se --resource-version for especificado, então as atualizações " +#~ "usarão esta versão do recurso, caso contrário, a versão do recurso " +#~ "existente será usada." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d " +#~ "characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." +#~ msgstr "" +#~ "\n" +#~ "\t\tAtualiza os taints em um ou mais nodes.\n" +#~ "\n" +#~ "\t\t* Um taint consiste em uma chave, valor e efeito. Como arqgumento, é " +#~ "expressado como chave=valor:efeito.\n" +#~ "\t\t* Uma chave deve começar com uma letra ou número, e pode conter " +#~ "letras, números, hífens, pontos e sublinhados, com no máximo %[1]d " +#~ "caractéres.\n" +#~ "\t\t* Um valor deve começar com uma letra ou número, e pode conter " +#~ "letras, números, hífens, pontos e sublinhados, com no máximo %[2]d " +#~ "caractéres.\n" +#~ "\t\t* O efeito deve ser NoSchedule, PreferNoSchedule ou NoExecute.\n" +#~ "\t\t* Atualmente taint pode ser aplicado apenas para nodes." + +#~ msgid "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." +#~ msgstr "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." + +#~ msgid "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" +#~ msgstr "" +#~ "\n" +#~ "\t # !!!Nota Importante!!!\n" +#~ "\t # Necessita que o binário 'tar' esteja presente na imagem do\n" +#~ "\t # container. Se 'tar' não estiver presente, o 'kubectl cp' irá " +#~ "falhar.\n" +#~ "\n" +#~ "\t # Copia o diretório local /tmp/foo_dir para /tmp/bar_dir no pod " +#~ "remoto no namespace default\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copia o arquivo local /tmp/foo para /tmp/bar no pod remoto no " +#~ "container específico\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copia o arquivo local /tmp/foo para /tmp/bar no pod remoto no " +#~ "namespace \n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copia /tmp/foo do pod remoto para /tmp/bar localmente\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # Cria um novo segredo TLS chamado tls-secret com o par the chaves " +#~ "fornecido:\n" +#~ "\t kubectl create secret tls tls-secret —cert=path/to/tls.cert —key=path/" +#~ "to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Cria um novo segredo chamado my-secret com as chaves para cada " +#~ "arquivo no diretório bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Cria um novo segredo chamado my-secret com chaves especificadas em " +#~ "vez dos nomes dos arquivos\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Cria um novo segredo chamado my-secret com key1=supersecret e " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Cria um novo serviço do tipo ExternalName chamado my-ns \n" +#~ "\tkubectl create service externalname my-ns —external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # Cria um novo serviço clusterIP chamado my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Cria um novo serviço clusterIP chamado my-cs (em modo headless)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # Cria um novo deployment chamado my-dep que executa uma imagem " +#~ "busybox.\n" +#~ " kubectl create deployment my-dep —image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # Cria um novo serviço nodeport chamado my-ns\n" +#~ " kubectl create service nodeport my-ns —tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if " +#~ "it exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" +#~ msgstr "" +#~ "\n" +#~ " # Atualiza o pod 'foo' com a annotation 'description' e o valor 'my " +#~ "frontend'.\n" +#~ " # Se a mesma annotation é configurada várias vezes, apenas o último " +#~ "valor será utilizado\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Atualiza o pod identificado pelo tipo e nome definido no \"pod.json" +#~ "\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Atualiza o pod 'foo' com a annotation 'description' e o valor 'my " +#~ "frontend running nginx', sobreescrevendo qualquer valor existente.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Atualiza todos os pods no namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Atualiza o pod 'foo' apenas se o recurso não foi modificado na " +#~ "versão 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Atualiza o pod 'foo' removendo a annotation chamada 'description' " +#~ "se ela existir.\n" +#~ " # Não necessita da flag --overwrite.\n" +#~ " kubectl annotate pods foo description-" + +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Cria um serviço do tipo clusterIP com o nome especificado." + +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Cria um deployment com o nome especificado." + +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Cria um serviço do tipo nodeport com o nome especificado." + +#~ msgid "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or " +#~ "specify --all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." +#~ msgstr "" +#~ "\n" +#~ " Coleta informações do cluster para debugar e diagnosticar problemas " +#~ "do dele. Por padrão, exibe tudo para o\n" +#~ " stdout. Você pode, se quiser, especificar um diretório com --output-" +#~ "directory. Se especificar o diretório, kubernetes irá\n" +#~ " montar um conjunto de arquivos no diretório. Por padrão, apenas " +#~ "coleta informações no namespace 'kube-system' , mas você pode\n" +#~ " trocar para um namespace diferente com a flag --namespaces, ou " +#~ "especificar --all-namespaces para todos os namespaces.\n" +#~ "\n" +#~ " O comando também coleta os logs de todos os pods no cluster, estes " +#~ "logs são salvos em outros diretórios\n" +#~ " baseado no namespace e nome do pod." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Mostra os endereços dos servidores de gerenciamento e serviços com o " +#~ "label kubernetes.io/cluster-service=true\n" +#~ " Para debugar e diagnosticar outros problemas do cluster, utilize " +#~ "'kubectl cluster-info dump'." + +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "Agendamento no formato Cron em qual o job deve rodar." + +#~ msgid "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." +#~ msgstr "" +#~ "Uma substituição inline JSON para o objeto de serviço gerado. Se não " +#~ "estiver vazio, ele será usado para substituir o objeto gerado. Requer que " +#~ "o objeto forneça o campo apiVersion válido. Usado apenas se --expose for " +#~ "true." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "" +#~ "Aplica a configuração para um recurso utilizado um nome de arquivo ou " +#~ "stdin" + +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "Auto-escala um Deployment, ReplicaSet ou ReplicationController" + +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Nome do contêiner que terá sua imagem atualizada. Relevante apenas quando " +#~ "--image for especificado, caso contrário, ignorado. Obrigatório ao usar --" +#~ "image em um pod com vários contêineres" + +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "Cria um ClusterRoleBinding para um ClusterRole especifico" + +#~ msgid "Create a LoadBalancer service." +#~ msgstr "Cria um serviço do tipo LoadBalancer." + +#~ msgid "Create a NodePort service." +#~ msgstr "Cria um serviço do tipo NodePort." + +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "Cria um RoleBinding para uma Role ou ClusterRole especifico" + +#~ msgid "Create a clusterIP service." +#~ msgstr "Cria um serviço do tipo clusterIP." + +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "" +#~ "Cria um configmap com base em um arquivo, diretório, ou um valor literal" + +#~ msgid "Create a deployment with the specified name." +#~ msgstr "Cria um deployment com um nome especificado." + +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "Cria um pod disruption budget com um nome especificado." + +#~ msgid "Create a quota with the specified name." +#~ msgstr "Cria uma quota com um nome especificado." + +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "Cria um recurso por nome de arquivo ou stdin" + +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "" +#~ "Cria um secret com base em um arquivo, diretório ou um valor literal" + +#~ msgid "Create a service using specified subcommand." +#~ msgstr "Cria um service utilizando um sub-comando especificado." + +#~ msgid "Create an ExternalName service." +#~ msgstr "Cria um serviço do tipo ExternalName." + +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" +#~ msgstr "" +#~ "Apaga os recusros por nome de arquivos, stdin, recursos e nomes, ou por " +#~ "recursos e seletor de label" + +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "Descontinuado: Termina um recurso por nome ou nome de arquivo" + +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "Mostra a utilização de recursos (CPU/Memória) nos nodes" + +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "Mostra a utilização de recursos (CPU/Memória) nos pods" + +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "Mostra a utilização de recursos (CPU/Memória)." + +#~ msgid "Display cluster info" +#~ msgstr "Mostra as informações do cluster" + +#~ msgid "Displays the current-context" +#~ msgstr "Mostra o contexto corrente" + +#~ msgid "Documentation of resources" +#~ msgstr "Documentação dos recursos" + +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "" +#~ "Realiza o dump de muitas informações relevantes para debugging e " +#~ "diagnósticos" + +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Política explícita para quando extrair imagens de contêiner. Obrigatório " +#~ "quando --image for igual à imagem existente, caso contrário, será " +#~ "ignorado." + +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP para ser alocado no Load Balancer. Se vazio, um IP efêmero será criado " +#~ "e utilizado (específico para cada provedor cloud)." + +#~ msgid "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" +#~ msgstr "" +#~ "Imagem a ser utilizada para atualizar o replication controller. Deve ser " +#~ "diferente da imagem atual (pode ser uma nova imagem ou uma nova tag). Não " +#~ "pode ser utilizada com —filename/-f" + +#~ msgid "Manage a deployment rollout" +#~ msgstr "Gerencia um deployment rollout" + +#~ msgid "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" +#~ msgstr "" +#~ "Imprime o objeto formatado com a dada versão de grupo (por exemplo: " +#~ "'extensions/v1beta1').)" + +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "Executa uma atualização contínua" + +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "Substitui um recurso por um nome de arquivo ou stdin" + +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" +#~ msgstr "" +#~ "Define um novo tamanho para um Deployment, ReplicaSet, Replication " +#~ "Controller, ou Job" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Define a anotação last-applied-configuration em um objeto existente para " +#~ "corresponder ao conteúdo do arquivo." + +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "Define um cluster no arquivo kubeconfig" + +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "Define um contexto no arquivo kubeconfig" + +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "Define um usuário no arquivo kubeconfig" + +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "Define um valor individual no arquivo kubeconfig" + +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "Define o current-context no arquivo kubeconfig" + +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" +#~ msgstr "" +#~ "Pega um replication controlar, service, deployment ou pod e expõe como um " +#~ "novo Serviço do Kubernetes" + +#~ msgid "" +#~ "The key to use to differentiate between two different controllers, " +#~ "default 'deployment'. Only relevant when --image is specified, ignored " +#~ "otherwise" +#~ msgstr "" +#~ "A chave utilizada para diferenciar entre dois controlares diferentes, " +#~ "padrão 'deployment'. Apenas relevante quando --image é especificado, é " +#~ "ignorado caso contrário" + +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "O nome do gerador de API a ser usado, veja a lista em http://kubernetes." +#~ "io/docs/user-guide/kubectl-conventions/#generators." + +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "" +#~ "O nome do gerador de API a ser usado. Atualmente existe apenas 1 gerador." + +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "O nome do recurso para ser utilizado quando criando um serviço. Apenas " +#~ "utilizado se —expose é verdadeiro" + +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "A porta que o container expõe. Se —expose é verdadeiro, esta também é a " +#~ "porta utilizada pelo serviço quando for criado." + +#~ msgid "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to " +#~ "'OnFailure' a job is created, if set to 'Never', a regular pod is " +#~ "created. For the latter two --replicas must be 1. Default 'Always', for " +#~ "CronJobs `Never`." +#~ msgstr "" +#~ "A politica de restart para este Pod. Possíveis valores [Always, " +#~ "OnFailure, Never]. Se configurado para 'Always' um deployment é criado, " +#~ "se configurado para 'OnFailure' um job é criado, se configurado para " +#~ "'Never', um pod é criado. Para os dois últimos —replicas deve ser 1. " +#~ "Valor padrão 'Always', para CronJobs `Never`." + +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "Tipo para este serviço: ClusterIP, NodePort, ou LoadBalancer. Valor " +#~ "padrão é 'ClusterIP'." + +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "Remover um valor individual do arquivo kubeconfig" + +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "" +#~ "Atualizar o(s) campo(s) de um recurso usando a estratégia de merge patch" + +#~ msgid "Update image of a pod template" +#~ msgstr "Atualizar a imagem de um template de pod" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "" +#~ "Visualizar a última anotação last-applied-configuration de um recurso/" +#~ "objeto" + +#~ msgid "external name of service" +#~ msgstr "nome externo do serviço" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot new file mode 100644 index 000000000..4e60cfc99 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot @@ -0,0 +1,3291 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: \n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2023-06-27 12:09-0400\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:142 +msgid "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:185 +msgid "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:43 +msgid "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\".\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:44 +msgid "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:57 +msgid "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/foo.env --" +"from-env-file=path/to/bar.env" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:56 +msgid "" +"\n" +"\t\t # If you do not already have a .dockercfg file, create a dockercfg " +"secret directly\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:63 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:41 +msgid "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:153 +msgid "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Apply the configuration from all files that end with '.json'\n" +"\t\tkubectl apply -f '*.json'\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/" +"ConfigMap" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:50 +#, c-format +msgid "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +msgstr "" + +#: pkg/kubectl/cmd/convert/convert.go:52 +msgid "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:41 +msgid "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs.apps\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:44 +msgid "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:44 +msgid "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:44 +#, c-format +msgid "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:78 +msgid "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in registry.yaml in JSON then create the resource using " +"the edited data\n" +"\t\tkubectl create -f registry.yaml --edit -o json" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:43 +msgid "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --" +"description=\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --" +"description=\"high priority\" --preemption-policy=\"Never\"" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:43 +msgid "" +"\n" +"\t\t# Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\tkubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1\n" +"\n" +"\t\t# Create a role binding for serviceaccount monitoring:sa-dev using the " +"admin role\n" +"\t\tkubectl create rolebinding admin-binding --role=admin --" +"serviceaccount=monitoring:sa-dev" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:46 +msgid "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.apps\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:67 +msgid "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:64 +msgid "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a TLS secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-" +"cert\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\\n" +"\t\t\t--annotation ingress.annotation1=foo \\\n" +"\t\t\t--annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\\n" +"\t\t\t--rule=\"foo.com/=svc:port\" \\\n" +"\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\\n" +"\t\t\t--rule=\"foo.com/path*=svc:8080\" \\\n" +"\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\\n" +"\t\t --rule=\"foo.com/=svc:https,tls\" \\\n" +"\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\\n" +"\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\\n" +"\t\t --default-backend=defaultsvc:http \\\n" +"\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:78 +msgid "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create an interactive debugging session for the pod in the file pod." +"yaml and immediately attach to it.\n" +"\t\t# (requires the EphemeralContainers feature to be enabled in the " +"cluster)\n" +"\t\tkubectl debug -f pod.yaml -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:80 +msgid "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete resources from all files that end with '.json'\n" +"\t\tkubectl delete -f '*.json'\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:51 +msgid "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe pods -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller\n" +"\t\t# (rc-created pods get the name of the rc as a prefix in the pod name)\n" +"\t\tkubectl describe pods frontend" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:75 +msgid "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:140 +msgid "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set, or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set, or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:60 +msgid "" +"\n" +"\t\t# Edit the service named 'registry'\n" +"\t\tkubectl edit svc/registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config\n" +"\n" +"\t\t# Edit the 'status' subresource for the 'mydeployment' deployment\n" +"\t\tkubectl edit deployment mydeployment --subresource='status'" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:47 +msgid "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:50 +msgid "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:47 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get all the fields in the resource\n" +"\t\tkubectl explain pods --recursive\n" +"\n" +"\t\t# Get the explanation for deployment in supported api versions\n" +"\t\tkubectl explain deployments --api-version=apps/v1\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers\n" +"\t\t\n" +"\t\t# Get the documentation of resources in different format\n" +"\t\tkubectl explain deployment --output=plaintext-openapiv2" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:66 +msgid "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"\n" +"\n" +"\n" +"\t\t# Load the kubectl completion code for fish[2] into the current shell\n" +"\t\t kubectl completion fish | source\n" +"\t\t# To load completions for each session, execute once:\n" +"\t\t kubectl completion fish > ~/.config/fish/completions/kubectl.fish\n" +"\n" +"\t\t# Load the kubectl completion code for powershell into the current " +"shell\n" +"\t\t kubectl completion powershell | Out-String | Invoke-Expression\n" +"\t\t# Set kubectl completion code for powershell to run on startup\n" +"\t\t## Save completion code to a script and execute in the profile\n" +"\t\t kubectl completion powershell > $HOME\\.kube\\completion.ps1\n" +"\t\t Add-Content $PROFILE \"$HOME\\.kube\\completion.ps1\"\n" +"\t\t## Execute completion code in the profile\n" +"\t\t Add-Content $PROFILE \"if (Get-Command kubectl -ErrorAction " +"SilentlyContinue) {\n" +"\t\t kubectl completion powershell | Out-String | Invoke-Expression\n" +"\t\t }\"\n" +"\t\t## Add completion code directly to the $PROFILE script\n" +"\t\t kubectl completion powershell >> $PROFILE" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:46 +msgid "" +"\n" +"\t\t# List all available plugins\n" +"\t\tkubectl plugin list" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:100 +msgid "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +"\n" +"\t\t# List the 'status' subresource for a single pod\n" +"\t\tkubectl get pod web-pod-13je7 --subresource status" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:73 +msgid "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:89 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:60 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:89 +msgid "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'\n" +"\n" +"\t\t# Update a deployment's replicas through the 'scale' subresource using a " +"merge patch\n" +"\t\tkubectl patch deployment nginx-deployment --subresource='scale' --" +"type='merge' -p '{\"spec\":{\"replicas\":2}}'" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:45 +msgid "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:49 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:35 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:57 +msgid "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/\\1:v4/' " +"| kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:55 +msgid "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:50 +msgid "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/example1 rc/example2 rc/example3\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:75 +msgid "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:76 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:63 +msgid "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --" +"env=\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and " +"\"env=prod\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --" +"labels=\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:78 +msgid "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:81 +msgid "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label myLabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:101 +msgid "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:54 +msgid "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:67 +msgid "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can wait for other " +"targets after an equal delimiter (compared after Unicode simple case " +"folding, which is a more general form of case-insensitivity)\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status phase to be " +"\"Running\"\n" +"\t\tkubectl wait --for=jsonpath='{.status.phase}'=Running pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:144 +msgid "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:130 +msgid "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requester with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:30 +msgid "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." +msgstr "" + +#: pkg/kubectl/cmd/convert/convert.go:41 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:41 +msgid "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:41 +msgid "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:38 +msgid "" +"\n" +"\t\tCreate a cluster role." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:45 +msgid "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:41 +msgid "" +"\n" +"\t\tCreate a cron job with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:41 +msgid "" +"\n" +"\t\tCreate a job with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:40 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:41 +msgid "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:41 +msgid "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:40 +msgid "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:73 +msgid "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:41 +msgid "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:40 +msgid "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:70 +msgid "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:61 +msgid "" +"\n" +"\t\tCreate a secret with specified type.\n" +"\t\t\n" +"\t\tA docker-registry type secret is for accessing a container registry.\n" +"\n" +"\t\tA generic type secret indicate an Opaque secret type.\n" +"\n" +"\t\tA tls type secret holds TLS certificate and its associated key." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:72 +msgid "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:44 +msgid "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:61 +msgid "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:47 +msgid "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource.\n" +"\n" +"\t\tAfter a CustomResourceDefinition is deleted, invalidation of discovery " +"cache may take up\n" +"\t\tto 6 hours. If you don't want to wait, you might want to run \"kubectl " +"api-resources\" to refresh\n" +"\t\tthe discovery cache." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:178 +msgid "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requester.\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:37 +msgid "" +"\n" +"\t\tDescribe fields and structure of various resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:52 +msgid "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:51 +msgid "" +"\n" +"\t\tDisplay events.\n" +"\n" +"\t\tPrints a table of the most important information about events.\n" +"\t\tYou can request events for a namespace, for all namespace, or\n" +"\t\tfiltered to only those pertaining to a specified resource." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:55 +msgid "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:89 +msgid "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:58 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:68 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:39 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:37 +msgid "" +"\n" +"\t\tDisplay the current-context." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:115 +msgid "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:32 +msgid "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tWhen attempting to open the editor, it will first attempt to use the " +"shell\n" +"\t\tthat has been defined in the 'SHELL' environment variable. If this is " +"not defined,\n" +"\t\tthe default shell will be used, which is '/bin/bash' for Linux or 'cmd' " +"for Windows.\n" +"\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:31 +msgid "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:55 +msgid "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:53 +msgid "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:50 +msgid "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:31 +msgid "" +"\n" +"\t\tManage the rollout of one or many resources." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:86 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:57 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:59 +msgid "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:47 +msgid "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash, zsh, fish, " +"or powershell).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:51 +msgid "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:37 +msgid "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:47 +msgid "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:49 +msgid "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:60 +msgid "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:60 +msgid "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:56 +msgid "" +"\n" +"\t\tRoll back to a previous rollout." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_cluster.go:47 +msgid "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_context.go:45 +msgid "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:41 +msgid "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_credentials.go:69 +#, c-format +msgid "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:67 +#, c-format +msgid "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:39 +msgid "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:48 +msgid "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:42 +#, c-format +msgid "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:52 +msgid "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:79 +msgid "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:82 +msgid "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tNote: Strategic merge patch is not supported for custom resources." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:112 +msgid "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:93 +#, c-format +msgid "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:71 +#, c-format +msgid "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:38 +msgid "" +"\n" +"\t\tView previous rollout revisions and configurations." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:48 +msgid "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:47 +msgid "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:43 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:83 +msgid "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from env files\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/foo.env " +"--from-env-file=path/to/bar.env" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:45 +msgid "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:352 +msgid "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:58 +msgid "" +"\n" +"\t# List recent events in the default namespace\n" +"\tkubectl events\n" +"\n" +"\t# List recent events in all namespaces\n" +"\tkubectl events --all-namespaces\n" +"\n" +"\t# List recent events for the specified pod, then wait for more events and " +"list them as they arrive\n" +"\tkubectl events --for pod/web-pod-13je7 --watch\n" +"\n" +"\t# List recent events in YAML format\n" +"\tkubectl events -oyaml\n" +"\n" +"\t# List recent only events of type 'Warning' or 'Normal'\n" +"\tkubectl events --types=Warning,Normal" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:51 +msgid "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:42 +msgid "" +"\n" +"\tCreate a deployment with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:345 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:61 +msgid "" +"\n" +"\tCreate an ingress with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:44 +msgid "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:39 +msgid "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:44 +msgid "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:41 +msgid "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:76 +msgid "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), statefulset (sts), cronjob (cj), replicaset (rs)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:64 +msgid "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:234 +msgid "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:312 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:275 +msgid "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:103 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:124 +msgid "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:231 +msgid "" +"\n" +" Create a ClusterIP service with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:309 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:272 +msgid "" +"\n" +" Create a NodePort service with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:94 +msgid "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:41 +msgid "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:49 +msgid "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:48 +msgid "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:50 +msgid " is used and no merging takes place." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:180 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:183 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:193 +msgid "Allocate a TTY for the debugging container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/util/override_options.go:50 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:191 +msgid "Annotations to apply to the pod." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:203 +msgid "Apply a configuration to a resource by file name or stdin" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:129 +msgid "Approve a certificate signing request" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:264 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:109 +msgid "" +"Attach to a process that is already running inside an existing container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:108 +msgid "Attach to a running container" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:108 +msgid "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:186 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:107 +msgid "ClusterRole this RoleBinding should reference" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:33 +msgid "Commands for features in alpha" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:185 +msgid "Container image to use for debug container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:181 +msgid "Container name to use for debug container." +msgstr "" + +#: pkg/kubectl/cmd/convert/convert.go:96 +msgid "Convert config files between different API versions" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:98 +msgid "Copy files and directories to and from containers" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:99 +msgid "Copy files and directories to and from containers." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:249 +msgid "Create a ClusterIP service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:324 +msgid "Create a LoadBalancer service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:287 +msgid "Create a NodePort service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:81 +msgid "Create a cluster role" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:87 +msgid "Create a cluster role binding for a particular cluster role" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:121 +msgid "Create a config map from a local file, directory or literal value" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:182 +msgid "Create a copy of the target Pod with this name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:91 +msgid "Create a cron job with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:100 +msgid "Create a deployment with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:92 +msgid "Create a job with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:84 +msgid "Create a namespace with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:95 +msgid "Create a pod disruption budget with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:92 +msgid "Create a priority class with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:91 +msgid "Create a quota with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:108 +msgid "Create a resource from a file or from stdin" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:92 +msgid "Create a role binding for a particular role or cluster role" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:171 +msgid "Create a role with single rule" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:146 +msgid "Create a secret from a local file, directory, or literal value" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using a specified subcommand" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:48 +msgid "Create a service using a specified subcommand" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:49 +msgid "Create a service using a specified subcommand." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:364 +msgid "Create an ExternalName service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:145 +msgid "Create an ingress with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:61 +msgid "Create and run a particular image in a pod." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:162 +msgid "Create debugging sessions for troubleshooting workloads and nodes" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:194 +msgid "" +"Debugging profile. Options are \"legacy\", \"general\", \"baseline\", " +"\"netadmin\", or \"restricted\"." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:146 +msgid "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:43 +msgid "Delete the specified cluster from the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:43 +msgid "Delete the specified context from the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:65 +msgid "Delete the specified user from the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:66 +msgid "Delete the specified user from the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:177 +msgid "Deny a certificate signing request" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:75 +msgid "Describe one or many contexts" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:136 +msgid "Diff the live version against a would-be applied version" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:66 +msgid "Display cluster information" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:42 +msgid "Display clusters defined in the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:82 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:53 +msgid "Display one or many contexts from the kubeconfig file." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:164 +msgid "Display one or many resources" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:50 +msgid "Display resource (CPU/memory) usage" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:82 +msgid "Display resource (CPU/memory) usage of nodes" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:101 +msgid "Display resource (CPU/memory) usage of pods" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:51 +msgid "Display the current-context" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:60 +msgid "Display users defined in the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:61 +msgid "Display users defined in the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:186 +msgid "Drain node in preparation for maintenance" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:75 +msgid "Dump relevant information for debugging and diagnosis" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:83 +msgid "Edit a resource on the server" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:67 +msgid "Edit latest last-applied-configuration annotations of a resource/object" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:184 +msgid "Environment variables to set in the container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:91 +msgid "Execute a command in a container" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:92 +msgid "Execute a command in a container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:124 +msgid "Experimental: Wait for a specific condition on one or many resources" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:379 +msgid "External name of service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:110 +msgid "Forward one or more local ports to a pod" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:102 +msgid "Get documentation for a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:179 +msgid "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:185 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:187 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:159 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:179 +msgid "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:207 +msgid "If true, run the container in privileged mode." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:189 +msgid "If true, suppress informational messages." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:180 +msgid "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:188 +msgid "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:94 +msgid "List all visible plugin executables on a user's PATH" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:126 +msgid "List events" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:61 +msgid "Manage the rollout of a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:100 +msgid "Mark node as schedulable" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:71 +msgid "Mark node as unschedulable" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:85 +msgid "Mark the provided resource as paused" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:48 +msgid "Modify certificate resources" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +msgid "Modify certificate resources." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:182 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:41 +msgid "No alpha commands are available in this version of kubectl" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:176 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:134 +msgid "" +"Output shell completion code for the specified shell (bash, zsh, fish, or " +"powershell)" +msgstr "" + +#: pkg/kubectl/cmd/convert/convert.go:106 +msgid "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:129 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:80 +msgid "Print the client and server version information" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:81 +msgid "" +"Print the client and server version information for the current context." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:154 +msgid "Print the logs for a container in a pod" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:102 +msgid "Print the supported API resources on the server" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:103 +msgid "Print the supported API resources on the server." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:59 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:60 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:66 +msgid "Provides utilities for interacting with plugins" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:45 +msgid "Rename a context from the kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:119 +msgid "Replace a resource by file name or stdin" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:93 +msgid "Restart a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:89 +msgid "Resume a paused resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:108 +msgid "Role this RoleBinding should reference" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:153 +msgid "Run a particular image on the cluster" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:124 +msgid "Run a proxy to the Kubernetes API server" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_cluster.go:76 +msgid "Set a cluster entry in kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_context.go:62 +msgid "Set a context entry in kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:114 +msgid "Set a new size for a deployment, replica set, or replication controller" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_credentials.go:157 +msgid "Set a user entry in kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:74 +msgid "Set an individual value in a kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:41 +msgid "Set specific features on objects" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/use_context.go:52 +msgid "Set the current-context in a kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:101 +msgid "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:150 +msgid "Show details of a specific resource or group of resources" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:104 +msgid "Show the status of the rollout" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:163 +msgid "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "The image for the container to run." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:187 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:112 +msgid "" +"The maximum number or percentage of unavailable pods this budget requires." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:184 +msgid "The name for the newly created object." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:126 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:176 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:177 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:197 +msgid "The port that this container exposes." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:203 +msgid "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:164 +msgid "The type of secret to create" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:34 +msgid "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:178 +msgid "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:88 +msgid "Undo a previous rollout" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:59 +msgid "Unset an individual value in a kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:156 +msgid "Update environment variables on a pod template" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:126 +msgid "Update fields of a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:153 +msgid "Update the annotations on a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:118 +msgid "Update the image of a pod template" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:139 +msgid "Update the labels on a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:102 +msgid "Update the service account of a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:110 +msgid "Update the taints on one or more nodes" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:99 +msgid "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:86 +msgid "View rollout history" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:78 +msgid "" +"View the latest last-applied-configuration annotations of a resource/object" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:186 +msgid "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:183 +msgid "When used with '--copy-to', delete the original Pod." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:191 +msgid "" +"When used with '--copy-to', enable process namespace sharing in the copy." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:190 +msgid "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:192 +msgid "" +"When using an ephemeral container, target processes in this container name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:86 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:108 +msgid "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:89 +msgid "dummy restart flag)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:107 +msgid "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:317 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:51 +msgid "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:109 +msgid "" +"preemption-policy is the policy for preempting pods with lower priority." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:42 +msgid "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:106 +msgid "the value of this priority class." +msgstr "" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..42bc2ad6595a487412a153aa1e33f5a7a5d0e07d GIT binary patch literal 18566 zcmd6tdvILmb=a?J$F&&8iR{F2;>5YO+(5Lk;6s)zQ>JacWYH!mnxt$ek{vI0FTk}H zyX$?Bg3@{big=L#0ZDul6bXqhJqUmjEs_91v}3pZqnSzDX{VVuX%idw?jn^%2YvKAWt}X9!oIc>cd=LNe zJNsV8c^WtZybAoSyB+8A!2bl?34H%Dy>AAl?sA+nz^lM_z0Yx)cz^wu9OvV}D)7_5 zW$)K@z6ks%_fG(Sf$@I`{3iFEzw9`9;GY8T;`vv8#c}pyGmc;3?p51OFaa z0p9arrLzZ^;Ql7?e&AmLR|7x#8;--0&ewpS1bVOhzBOqIGeg^yw@O=pH6tE3= z40tz_A=w!Lg4&q?eggPkf&6p+H~+C^=fem=__Y#P0zL;^4}8z>IL@bln=PyWKg0b9 z@O!{N0j>ak#dVxjz^8!?z}JAX?tcOPF7R)F!tY=Gn9{ulcrW)UAg`Prpy=+W1J!k>M>j{!%3 ze*^p>FbTZEAfe}j2;sj0yMf)nZ=w&+qM)~{% zQ1rbFlzp86%Dn#wgr&~^vG5ZQ>9{RGS$D66M}ac#2SAbUp94Pvy!Z18zi8p(K#|*C zpwROIQ1tVM_WjR*-{$@vgx(7*0QUp`7x2Tt?|(t@9RtdGZvcM{_+y~F|65=lxa^DS zm!1ZS{`@Vl5xC+@s_#z#WxxA?-vM3*%KiJ-DgQnP{B77q9lU%M?hKke*jS>=YIl)p2jaLd;}=_O9MszgTM~}r-2pV ze+G)ZJo1S8w=l}cc{T3i@!~7oL|F3W%W+NBz(al`H&-EoPkwFU= zY%xF43EA(ja0&nAcaH)m1^hOb@K0odshAk%0k?AfGp;0;=+nEoc5*dvy^HG!F8RHm zOXMiBk>5Y(k~s8ru3cQ;;F8~Zu3zT*JudNI@zZ~Ze_#1cOe2WV+GkyMe~wQ--pPmrV;vAzJULLN=MnxrIEBmqwU$O9d~LY00D%jVzLM!zC8M z*(7tCQh5l=_VJLf3*?29{i)WKhlu8sBS0H-TvrCqk60o8^^!}3_zG^n2`2b5WNv^Hrec97LdQTt$mB! zEU-vo?g-)WhI|f|8!5JAT2a5OS8|)Z@?H;9NoQJGJR{_|#G2go`SNb}_ttIQ0s_(h zuk3tuo9IPb5<^H9F^W{mDTpXOm^G6itsdh6s!T>Z2LK^mBxFtB6ue9!V8$zcpB(}SW7WebA!WSCc zy$mW=a*eDdtP)9^>4GPjYj?2}oM$0fRLVp``R4DU^CB4uZyO?6dV6E?6yc@aw)VQu zk>Md3DV(#_gnV704JPA-))_?#699-DEnAL`9sXvX@LF@MU-BbBINJVxzlvcLtR( zhDfGB?qJj^WtFLdjEV+rv`9!MVX?eBS!S^ehMsR}aS7mMacejy0>6=DDqCUIBJ1F< zviTHCw2LSo@v=mlVt!8sV|91Soyk>DfzbEakQE~b4c(b(6OAgj7raQiX7=NSq*yfG zKwamz48E2M{w3`Z^0OtDb@|Oxp)L7jdVMmR%%#w~Pyms*v&|(lX`Uj56+RZCV_6m<+G| zUL~0;XUbT6G9!j8%V-`%Jlvfurd>mopwgVErn;UEp)B0^h= z`8M;U=n+M&yOW?u(9bfMUE4RJkxk2;b?J0Qa{pwu?p`-7Ge3XPU#4GWU~kSj^DSQ4fiRj_k&LWrSH zW=nz9>QUvGxEy+`&D)Vd4rXM@f;9=vldctU!4YBgSvKsNR8T17UoM|Z81^7_< z!%!Qc5w72kVuvb))jkq_#(ZKuL?^Lumd26YAS6e)_a!IFk;`JC5a_L+1Wb`uE6+)E9-R^}o< z;#26(!M>c;%Vn`ZBMy=lv!Rmn8DjGl4AjyYAq!XCA*rb`OG}14Sf9zIg)b%Ys%#qe*tFpGNlTA@=QWPAAzx^> z)sR#;VI0YXDM{Kj1{ZOUXdJoIU0ZgtKlWU%KZ`u~Vi|RPs7^JK6@O*?aMInF$6bmI zx00O{+ix3Qk(54G&Xg+6Hgkv%y`$vb*@(-OI4(ku$_X2Pb>H&J1S*rNFET+J1B*-` zFOj5Y@1#Mz;39tB`JMm9n+ zwFET-U8UGo>ywQNM2p2K2?G+O6s6509VuD7fR|0$(nUf|8!stLBp-7&;n7$qoGMvm zXjop+*~HMS5&L{uZ+gm0;YfvHwvJ{Yxd~18;e2tggual#JTin7x*gdD<5%oiFDV+BBQ5BIgkx~4|A zVT0)+ZQkB6ZvokKy{xnKmZt0`kuOyU35Y5qy*72#G9}D5L;lrkeE2K_LKkl$y1L-K zyXj-761R(iAZCD+zP5qav;b`;aUdF)I`AKjt~nKStbTUpyqHs3>nmY}Ek(p>hK^XKS9u4yF@}K;bCdn0-6X21?F0spw#IuF^&(5b1&_Sg6R6LLv>)GNB_3N7jX_ zAC=?}ol2oVA_(!fl%P-=O{r7)vd6T3ie^ zb=-XgmOjnZk~W!9=$vlcJYZbTe(_zma-tta0q9V*V*=7jc1}`_(8r zHqC9(dOZ401B@q*q%luH7!x%i>HgE#s~|5~QJq=I3{Xq5b&PoA`p!&a+ja!B_Z3-7 zBzV}>N-AEE_u)ej%ZC+%nX0{F8fS7>q+TFCI6K=33GHo$+4KNR_cS5a?(AZeGDZX; zb7F>{mv&fFY)nB~7KJ=2UJoGAcvCQBy%xGIQszi~5NVoe;YbyNvDVWNTQ9~m%~nO2 zgnehZFJ!h6xY{ahq0@rgg2z`I;--j|uPl3rxze*V%_Ci1>p~c1DnC4;Ad>to@&`F@ zFIm1OwFF(;&%3i>Qh%5(bLdmFCNlUTNVsYG-;tm+{zMKdTEz~Q&b+NqL>Q*VZE_hU zZFsKL^|un5tv_C|XH^3sI30Viy1`6C_1Ih3`KJ>cehh%JHmQ+}Yqw}jdLibwXO*d^ zU=k>;KpIRh4%TXIWu#2eVaCXm)sg>}xpm4QL|M*Ph&&x;LRmSV%4=#Q{0SFlv#`aA zVJ@;=8@5|4CO$0|j*O89Jk(R%4`B zpiPMIV;S3vX}tR|g7Th9N~|GCN);+=SFc>z*4XB?@gZdG1FKeV%^(YRTi)d01X4pL zm7f&05~GN1(vFZorbSw~T-qymS~B4zLRR6cS-sv-Z{$lHRUnOP(suAs~`BxJKs&T;6Hdjrmu>n*qREBQFU=1%cZ3$LAJgplgIgmUhgr@`R4F} zlDxFS@G9<@H2@QJTv~6^DfUW^oh_WN=eK53%wxv}*+g@$M~}Ui${ayUG_XaGWcKFc zxAPcU5qYI~QDuJG@%0QJ7wi^YJgj_ibG~7G1T4YZ^To{Qi~0VC?%|}gzDHFX5)G0p z@rlQL$)dt%f5p8SYExw zmRd9E#QI8WDX}ZR#@)1a-R3RJwm+KKf%v7yNo+)ZYuwcM~{?M{- zCcdmaHQCelyeHM^{Y@*IR`Et}6FbczhYd==8doXcbBFnK6&$}+JvR~bbp*qGwFCRA z`)*WE?hpEVg0nqlL?~JL5KpEn--6|Hg`L*liL-(y+eKRY!ud9FHn zG3c9y7ENknw9Gkbp%eY7=l#9`fAqzm_geMUG5@*#V0gekFv8zpsP^>+!>`3KkW-4X z3jgnQ?b3(Gg8oifP7;CJX54>I&=M?3KM1Eh=uSo6lTcJ zIcF;3g8cdL#6M}wKT{FME(E!7#~*zLs{GL#{)tPnXL~Ft#v)7LQs*h=Al7dLGuJ0f zh8LclkiADtSYnk04pjKVmu9XH_(T1IqArt}8wcTVZR)jP;KkXlf$G#Kx;iuY=1)8J z1&7Z2gU7=26zo!4*}>VH{>?$&i`~?wPt06@MKx?@^1RMPaE)&D)Tlpoq|ps}kJdW6 z%n^#2sbMxG`q6d%t(*P+jW=gIPS&P-+@EwDR$m`=PW-`r!I5zmaFsN_cU+A^HHke$ zWcU~M1uqN-ua98ucFd_!OkGFn@}fS*rXodz7lBdHM%V8>p^HFVFx(Lw9oJEIi?fr* zYoq7boSu`=b*-5|)!81ur(18C;17;5a%O6}dic_u;}bK}qru@DFog*^I=%<79A;D} zPl(;4_ZP?g-c$bM#LV>$22uMwrf`}VC{B-P&qoTUJ$mht=jlA{=|iaUyEYR8&Mm*CAy^SMB|n?+z`BRsdn)t z6au!Pj(-0Q6frn6q{_rr{O&GHi>0l?-8|`!-K_OoT?%RK#aDwdY zSjSFU^6Z3e$TzyB%A=YC2j-rp(6vN7^NvkeB5oc*{rq#nF>_ABA0GElchoxi#RiX$ zsSh>J4C`?z{&=_F)ma<8?oS*Rv$%B7pBTh@;7EMMcKNvYvx!6gjW_(kaghJCqtnXe zRCo2(xtZ$+uzaEvi$QQiBCIs6X3^<5?(3HsH8VLmGj&9rSui*>bL*6Uebhhjve7&g zVs^M=_V6gqE7Blxxq|z+&Zu1*^Kb3{mg^5d(wKzvTgUv-{@}<}P}jyTR42PPVD?sT|^d{Dfl1SyU(Y&%^%n?k}=?^Sww8?)^i~PiXkj(6NNupL@KbZX&uf!q{xb zF@NwHUT*f}VbPGTf!P;N*ZQCFPhAzSI^8upGU<0+5>IG+whn;pwQFZ;*IyC+9UGMJ zWjv>-{bHy!Xm4E03qXR%uc+$rwY#gCG=XyGL!27Nck?ko$&I%jTNK)yl{5M4nQbA~v?y$%Bc>&pJ?^#SUT1BJz+m9nplIr?sj}shxt~z;oo~pCD zRrO$Sur_U-f%vC$C-K0s*{0=!MCX&G{yR)B9IbKvpg3hnz&WRS^JwkXv&u>QEaXFjou|$t z6zA-bVM+#n;&}$l?i-~bR#)n_ej*F2aA%qd?-nw}T zW(RK^F@K-b!&&6Yu_F)*peJZjPfk5APLz_Ah|<|=LosEX)=EL2 zshfI-`~zpOJDf_FFizWAQm4M6^Mhk=ss}d4X`)bkpBm3?`K3gG<&KG(N5vfxu~^~j z%zF_pd5vd^It-yk=>%N5f9>_))ur!4~EawPd2zxpAQ{^7~c5P$Czc?BV;3RfM^;%C-bU3Go{XNq0(WDC_dCP`vwqSV<24y8{=pk!8tllx`Uu{u2pGY6^@(m2=J*qL+YK#FvFs?;d8u>r?A zIKx+DZ@EDPG0~*Si8!tgf`fg0FsNQWT0J7!i8^9U*|2OfD?G0WC6c>G&3Ije0X&SD zqE@KebiB$iD#6hz8OGfbA(UtSZ2esG|cXHwuSs_J{=o=jeO+sW=)bTQ&=j~tX z4uauVA{vE!(+m|o?mBN-Ej*JRr%~_=o%NK-^rcNs>H34!4Z_gTXJ#f}(+;GpafB!~@b- zH3@y#@RyV!pxM3e#vR62DWH0EW><20Z)`nV!$J#;~m-{DKRMlAqM;bZu0zBUo;gr$?u2QTb*{LUVBG26p8 auQ5S1eXRaXUt2BlKxEsWrYVAoI{zQE6Bkba literal 0 HcmV?d00001 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po new file mode 100644 index 000000000..29bd5844d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po @@ -0,0 +1,3236 @@ +# Test translations for unit tests. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR shiywang@redhat.com, 2017. +# FIRST AUTHOR zhengjiajin@caicloud.io, 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2022-07-04 18:54+0800\n" +"Last-Translator: zhengjiajin \n" +"Language-Team: \n" +"Language: zh\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" +"X-Generator: Poedit 3.0.1\n" +"X-Poedit-SourceCharset: UTF-8\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # 显示所有节点的指标\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # 显示指定节点的指标\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# 获å–资æºåŠå…¶å­—段的文档\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# 获å–资æºæŒ‡å®šå­—段的文档\n" +"\t\tkubectl explain pods.spec.containers" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# 输出所有命令继承的 flags\n" +"\t\tkubectl options" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# 输出当å‰å®¢æˆ·ç«¯å’ŒæœåŠ¡ç«¯çš„版本\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# 输出支æŒçš„ API 版本\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# 显示 default 命å空间下所有 Pods 的指标\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# 显示指定命å空间下所有 Pods 的指标\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# 显示指定 Pod 和它的容器的 metrics\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# 显示指定 label 为 name=myLabel çš„ Pods çš„ metrics\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use -" +"o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\t在ä¸åŒçš„ API 版本之间转æ¢é…ç½®æ–‡ä»¶ã€‚æŽ¥å— YAML\n" +"\t\tå’Œ JSON æ ¼å¼ã€‚\n" +"\n" +"\t\t这个命令以文件å, 目录, 或者 URL 作为输入,并通过 —output-version å‚æ•°\n" +"\t\t 转æ¢åˆ°æŒ‡å®šç‰ˆæœ¬çš„æ ¼å¼ã€‚如果没有指定目标版本或者所指定版本\n" +"\t\tä¸æ”¯æŒ, 则转æ¢ä¸ºæœ€æ–°ç‰ˆæœ¬ã€‚\n" +"\n" +"\t\t默认以 YAML æ ¼å¼è¾“出到标准输出。å¯ä»¥ä½¿ç”¨ -o option\n" +"\t\t修改目标输出的格å¼ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\t用给定å称创建一个命å空间。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\t创建一个具有å•ä¸€è§„则的角色。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\t用指定的å称创建一个æœåŠ¡è´¦æˆ·ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\t标记节点为å¯è°ƒåº¦ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\t标记节点为ä¸å¯è°ƒåº¦ã€‚" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\t设置最新的 last-applied-configuration 注解,使之匹é…æŸæ–‡ä»¶çš„内容。\n" +"\t\t这会导致 last-applied-configuration 被更新,就åƒæ‰§è¡Œäº† kubectl apply -f " +" 一样,\n" +"\t\tåªæ˜¯ä¸ä¼šæ›´æ–°å¯¹è±¡çš„其他部分。" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # 创建一个å为 my-namespace 的新命å空间\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # 创建一个å为 my-service-account çš„æ–°æœåŠ¡å¸æˆ·\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\t创建具有指定å称的 ExternalName æœåŠ¡ã€‚\n" +"\n" +"\tExternalName æœåŠ¡å¼•ç”¨å¤–部 DNS 地å€è€Œä¸æ˜¯ Pod 地å€ï¼Œ\n" +"\t这将å…许应用程åºä½œè€…引用存在于平å°å¤–ã€å…¶ä»–集群上或本地的æœåŠ¡ã€‚" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp 为应用程åºä¸­çš„任何命令æ供帮助。\n" +"\tåªéœ€é”®å…¥ kubectl help [命令路径] å³å¯èŽ·å¾—完整的详细信æ¯ã€‚" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # 创建一个å称为 my-lbs çš„æ–°è´Ÿè½½å‡è¡¡æœåŠ¡\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # 导出当å‰çš„集群状æ€ä¿¡æ¯åˆ°æ ‡å‡†è¾“出\n" +" kubectl cluster-info dump\n" +"\n" +" # 导出当å‰çš„集群状æ€åˆ° /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # 导出所有命å空间到标准输出\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # 导出一组命å空间到 /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" 使用一个指定的å称创建一个 LoadBalancer æœåŠ¡ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L61 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object tracked " +"by the quota." +msgstr "一组以逗å·åˆ†éš”çš„é…é¢èŒƒå›´ï¼Œå¿…须全部匹é…é…é¢æ‰€è·Ÿè¸ªçš„æ¯ä¸ªå¯¹è±¡ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L60 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "一组以逗å·åˆ†éš”的资æº=æ•°é‡å¯¹ï¼Œç”¨äºŽå®šä¹‰ç¡¬æ€§é™åˆ¶ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L63 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "一个用于该预算的标签选择器。åªæ”¯æŒåŸºäºŽç­‰å€¼æ¯”较的选择器è¦æ±‚。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L106 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"用于此æœåŠ¡çš„标签选择器。仅支æŒåŸºäºŽç­‰å€¼æ¯”较的选择器è¦æ±‚。如果为空(默认),则从" +"副本控制器或副本集中推断选择器。)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L111 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"为æœåŠ¡æ‰€æŽ¥å—的其他外部 IP 地å€ï¼ˆä¸ç”± Kubernetes 管ç†ï¼‰ã€‚如果这个 IP 被路由到一" +"个节点,除了其生æˆçš„æœåŠ¡ IP 外,还å¯ä»¥é€šè¿‡è¿™ä¸ª IP 访问æœåŠ¡ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L119 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it is " +"used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"针对所生æˆå¯¹è±¡çš„å†…è” JSON 覆盖。如果这一对象是éžç©ºçš„,将用于覆盖所生æˆçš„对象。" +"è¦æ±‚对象æ供有效的 apiVersion 字段。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "批准一个è¯ä¹¦ç­¾ç½²è¯·æ±‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "为“无头â€æœåŠ¡ï¼ˆæ— è´Ÿè½½å¹³è¡¡ï¼‰åˆ†é…你自己的 ClusterIP 或设置为“无。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/attach.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "挂接到一个è¿è¡Œä¸­çš„容器" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L115 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or set " +"to 'None' to create a headless service." +msgstr "" +"è¦åˆ†é…ç»™æœåŠ¡çš„ ClusterIP。留空表示自动分é…,或设置为 “None†以创建无头æœåŠ¡ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRoleBinding 应该指定 ClusterRole" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "RoleBinding 应该指定 ClusterRole" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/convert.go#L67 +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "在ä¸åŒçš„ API 版本之间转æ¢é…置文件" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cp.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "将文件和目录å¤åˆ¶åˆ°å®¹å™¨ä¸­æˆ–从容器中å¤åˆ¶å‡ºæ¥ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L214 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "创建一个 TLS secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "用指定的å称创建一个命å空间" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L143 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "创建一个给 Docker registry 使用的 Secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L34 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "使用指定的å­å‘½ä»¤åˆ›å»ºä¸€ä¸ª Secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "创建一个指定å称的æœåŠ¡è´¦æˆ·" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "从 kubeconfig 中删除指定的集群" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "从 kubeconfig 中删除指定的上下文" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L121 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "æ‹’ç»ä¸€ä¸ªè¯ä¹¦ç­¾å请求" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "æ述一个或多个上下文" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "显示在 kubeconfig 中定义的集群" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "显示åˆå¹¶çš„ kubeconfig é…置或一个指定的 kubeconfig 文件" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/get.go#L107 +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "显示一个或多个资æº" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L176 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "清空节点以准备维护" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "编辑æœåŠ¡å™¨ä¸Šçš„资æº" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L159 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "用于 Docker é•œåƒåº“的邮件地å€" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/exec.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "在æŸä¸ªå®¹å™¨ä¸­æ‰§è¡Œä¸€ä¸ªå‘½ä»¤" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/portforward.go#L75 +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "将一个或多个本地端å£è½¬å‘到æŸä¸ª Pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/help.go#L36 +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "关于任何命令的帮助" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal values: " +"'None', 'ClientIP'" +msgstr "如果éžç©ºï¼Œåˆ™å°†æœåŠ¡çš„会è¯äº²å’Œæ€§è®¾ç½®ä¸ºæ­¤å€¼ï¼›åˆæ³•å€¼ï¼š'None'ã€'ClientIP'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/annotate.go#L135 +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single resource." +msgstr "" +"如果éžç©ºï¼Œåˆ™åªæœ‰å½“所给值是对象的当å‰èµ„æºç‰ˆæœ¬æ—¶ï¼Œæ³¨è§£æ›´æ–°æ‰ä¼šæˆåŠŸã€‚ 仅在指定å•" +"个资æºæ—¶æœ‰æ•ˆã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L132 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single resource." +msgstr "" +"如果éžç©ºï¼Œåˆ™æ ‡ç­¾æ›´æ–°åªæœ‰åœ¨æ‰€ç»™å€¼æ˜¯å¯¹è±¡çš„当å‰èµ„æºç‰ˆæœ¬æ—¶æ‰ä¼šæˆåŠŸã€‚仅在指定å•ä¸ªèµ„" +"æºæ—¶æœ‰æ•ˆã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "标记节点为å¯è°ƒåº¦" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "标记节点为ä¸å¯è°ƒåº¦" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_pause.go#L73 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "将所指定的资æºæ ‡è®°ä¸ºå·²æš‚åœ" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L35 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "修改è¯ä¹¦èµ„æºã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "修改 kubeconfig 文件" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L110 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"此为端å£çš„å称或端å£å·ï¼ŒæœåŠ¡åº”å°†æµé‡å®šå‘到容器上的这一端å£ã€‚此属性为å¯é€‰ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"仅返回在指定日期 (RFC3339) 之åŽçš„日志。默认为所有日志。åªèƒ½ä½¿ç”¨ since-time / " +"since 之一。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/completion.go#L97 +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "为指定的 Shell(Bash 或 zsh) 输出 Shell 补全代ç ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L157 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "用于 Docker é•œåƒåº“身份验è¯çš„密ç " + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L226 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "PEM ç¼–ç çš„公钥è¯ä¹¦çš„路径。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L227 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "与给定è¯ä¹¦å…³è”çš„ç§é’¥çš„路径。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L82 +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource version " +"match this value in order to scale." +msgstr "资æºç‰ˆæœ¬çš„å‰ææ¡ä»¶ã€‚è¦æ±‚当å‰èµ„æºç‰ˆæœ¬ä¸Žæ­¤å€¼åŒ¹é…æ‰èƒ½è¿›è¡Œæ‰©ç¼©æ“作。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "输出客户端和æœåŠ¡ç«¯çš„版本信æ¯" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "输出所有命令的层级关系" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L86 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "æ‰“å° Pod 中容器的日志" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_resume.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "æ¢å¤æš‚åœçš„资æº" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L56 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "RoleBinding 应该引用的 Role" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L94 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "在集群上è¿è¡Œç‰¹å®šé•œåƒ" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/proxy.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "è¿è¡Œä¸€ä¸ªæŒ‡å‘ Kubernetes API æœåŠ¡å™¨çš„代ç†" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L161 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Docker é•œåƒåº“çš„æœåŠ¡å™¨ä½ç½®" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "为对象设置指定特性" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_selector.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "为资æºè®¾ç½®é€‰æ‹©å™¨" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/describe.go#L80 +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "显示特定资æºæˆ–资æºç»„的详细信æ¯" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_status.go#L57 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "显示上线的状æ€" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "--target-port çš„åŒä¹‰è¯" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "指定容器è¦è¿è¡Œçš„é•œåƒ." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L116 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "容器的镜åƒæ‹‰å–策略。如果留空,该值将ä¸ç”±å®¢æˆ·ç«¯æŒ‡å®šï¼Œç”±æœåŠ¡å™¨é»˜è®¤è®¾ç½®" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "The minimum number or percentage of available pods this budget requires." +msgstr "此预算è¦æ±‚çš„å¯ç”¨ Pod 的最å°æ•°é‡æˆ–百分比。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L113 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "新创建的对象的å称。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "新创建的对象的å称。如果未指定,将使用输入资æºçš„å称。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L98 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in v1 " +"is named 'default', while it is left unnamed in v2. Default is 'service/v2'." +msgstr "" +"è¦ä½¿ç”¨çš„ API 生æˆå™¨çš„å称。有两个生æˆå™¨ã€‚'service/v1' å’Œ 'service/v2'。它们之" +"间唯一的区别是,v1 中的æœåŠ¡ç«¯å£è¢«å‘½å为 'default',如果在 v2 中没有指定å称。" +"默认是 'service/v2'。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L99 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "è¦åˆ›å»ºçš„æœåŠ¡çš„网络å议。默认为 “TCPâ€ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "æœåŠ¡è¦ä½¿ç”¨çš„端å£ã€‚如果没有指定,则从被暴露的资æºå¤åˆ¶" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L131 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"这个容器的资æºéœ€æ±‚é™åˆ¶ã€‚例如,\"cpu=200m,内存=512Mi\"。请注æ„,æœåŠ¡å™¨ç«¯çš„组件" +"å¯èƒ½ä¼šæ ¹æ®æœåŠ¡å™¨çš„é…ç½®æ¥åˆ†é…é™åˆ¶ï¼Œä¾‹å¦‚é™åˆ¶èŒƒå›´ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L130 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, 'cpu=100m," +"memory=256Mi'. Note that server side components may assign requests " +"depending on the server configuration, such as limit ranges." +msgstr "" +"这个容器的资æºéœ€æ±‚请求。例如,\"cpu=200m,内存=512Mi\"。请注æ„,æœåŠ¡å™¨ç«¯çš„组件" +"å¯èƒ½ä¼šæ ¹æ®æœåŠ¡å™¨çš„é…ç½®æ¥åˆ†é…é™åˆ¶ï¼Œä¾‹å¦‚é™åˆ¶èŒƒå›´ã€‚" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L87 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "è¦åˆ›å»ºçš„ Secret 类型" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_undo.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "撤销上一次的上线" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_resources.go#L101 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "使用 Pod 模æ¿æ›´æ–°å¯¹è±¡çš„资æºè¯·æ±‚/é™åˆ¶" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "更新一个资æºçš„注解" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L109 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "æ›´æ–°æŸèµ„æºä¸Šçš„标签" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/taint.go#L88 +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "更新一个或者多个节点上的污点" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L155 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "用于 Docker é•œåƒåº“身份验è¯çš„用户å" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_history.go#L51 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "显示上线历å²" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L45 +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"在哪里输出文件。如果为空或 “-†则使用标准输出,å¦åˆ™åœ¨è¯¥ç›®å½•ä¸­åˆ›å»ºç›®å½•å±‚次结构" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "å‡çš„é‡å¯æ ‡å¿—)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cmd.go#L217 +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl 控制 Kubernetes 集群管ç†å™¨" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using the " +#~ "cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # 使用 cluster-admin ClusterRole 为 user1, user2, and group1 创建一" +#~ "个 ClusterRoleBinding\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # 使用 admin ClusterRole 为 user1, user2, and group1 创建一个 " +#~ "RoleBinding\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys instead " +#~ "of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # 通过文件夹 bar 创建一个å称为 my-config çš„ configmap\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # 创建一个å称为 my-config çš„ configmap 并指定 keys 而ä¸æ˜¯ä½¿ç”¨ç£ç›˜ä¸Š" +#~ "所在的文件å\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # 创建一个å称为 my-config çš„ configmap 且 key1=config1 å’Œ " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # 如果你还没有 .dockercfg 文件, ä½ å¯ä»¥ç›´æŽ¥ä½¿ç”¨ä¸‹é¢çš„命令创建一个 " +#~ "dockercfg 类型的 Secret:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +#~ "ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# å°† pod.json 上的é…置应用于 pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# 将传入 stdin çš„ JSON 应用到一个 pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune ä»ç„¶åœ¨ Alpha\n" +#~ "\t\t# 应用在 manifest.yaml 中匹é…标签 app=nginx 的资æºé…置并删除所有ä¸åœ¨è¿™" +#~ "个文件中并匹é…标签app=nginx 的资æº\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# 应用 manifest.yaml çš„é…置并删除所有ä¸åœ¨è¿™ä¸ªæ–‡ä»¶ä¸­çš„ ConfigMaps。\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +#~ "ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, target CPU utilization specified so a default autoscaling policy " +#~ "will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of pods " +#~ "between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 自动弹性伸缩 deployment \"foo\", pods çš„æ•°é‡åœ¨ 2 å’Œ 10 之间, 目标 " +#~ "CPU 指定为默认的弹性伸缩策略:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# 自动弹性伸缩 replication controller \"foo\", pods çš„æ•°é‡åœ¨ 1 å’Œ 5 之" +#~ "é—´, 目标 CPU 利用率为 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 将’pod.yaml' 转æ¢ä¸ºæœ€æ–°ç‰ˆæœ¬å¹¶æ‰“å°åˆ° stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# å°† ‘pod.yaml' 指定的资æºçš„实时状æ€è½¬æ¢ä¸ºæœ€æ–°ç‰ˆæœ¬\n" +#~ "\t\t# 并以 json æ ¼å¼æ‰“å°åˆ° stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# 将当å‰ç›®å½•ä¸‹çš„所以文件转æ¢ä¸ºæœ€æ–°ç‰ˆæœ¬å¹¶åˆ›å»ºå®ƒä»¬.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 创建一个å为 \"pod-reader\" çš„ ClusterRole, å…许用户在 pods 上执行 " +#~ "“get\", \"watch\" å’Œ \"list\"\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# 创建一个å为 \"pod-reader\" ClusterRole, 其中指定了 ResourceName\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +#~ "replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 创建一个å为 my-quota çš„ resourcequota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +#~ "replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# 创建一个å为 best-effort çš„ resourcequota\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 创建一个å称为 my-pdb çš„ pod disruption budget 并将会选择所有 " +#~ "app=rails 标签的 pods\n" +#~ "\t\t# 并è¦æ±‚他们在åŒä¸€æ—¶é—´ä¸­æœ€å°‘有一个å¯ç”¨. \n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# 创建一个å称为 my-pdb çš„ pod disruption budget 并将会选择所有 " +#~ "app=rails 标签的 pods\n" +#~ "\t\t# 并è¦æ±‚他们在åŒä¸€æ—¶é—´ä¸­æœ€å°‘有一åŠå¯ç”¨.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 使用在 pod.json çš„ æ•°æ®åˆ›å»ºä¸€ä¸ª pod.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# æ ¹æ®ä¼ å…¥ stdin çš„ JSON 创建一个 pod.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# 使用 v1 API æ ¼å¼åœ¨ JSON 中编辑在 docker-registry.yaml 中的数æ®ç„¶åŽä½¿" +#~ "用被编辑åŽçš„æ•°æ®åˆ›å»ºèµ„æº.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 and " +#~ "connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type and " +#~ "name specified in \"nginx-controller.yaml\", which serves on port 80 and " +#~ "connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port 4100 " +#~ "balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 为一个 replicated nginx 创建一个 service, æœåŠ¡åœ¨ç«¯å£ 80 并连接到 " +#~ "containers çš„8000端å£.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# 使用在 \"nginx-controller.yaml\\ 中指定的 type å’Œ name 为一个" +#~ "replication controller 创建一个 service, æœåŠ¡åœ¨ç«¯å£ 80 并连接到 containers " +#~ "çš„8000端å£.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# 为å为 valid-pod çš„ pod 创建一个 service, æœåŠ¡åœ¨ç«¯å£ 444 并命å为 " +#~ "\"frontend\" \n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# 基于上é¢çš„ service 创建第二个 service, æš´éœ²å®¹å™¨ç«¯å£ 8443 并命å为 " +#~ "\"nginx-https\" 端å£ä¸º 443 \n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# 为一个å称为 streaming 的应用创建一个 service æš´éœ²ç«¯å£ 4100, å议为 " +#~ "UDP å称为 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# 为一个å称为 nginx çš„ replica set 创建一个 service, æœåŠ¡åœ¨ ç«¯å£ 80 且" +#~ "è¿žæŽ¥åˆ°å®¹å™¨ç«¯å£ 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# 为一个å称为 nginx çš„ deployment 创建一个 service, æœåŠ¡åœ¨ç«¯å£ 80 且 " +#~ "连接到 containers çš„ 8000 端å£.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 使用 pod.json 中的类型和å称删除一个 pod.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# 基于é‡å®šå‘到 stdin 中的 JSON 的类型和å称删除一个 pod.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# 删除å为 \"baz\" å’Œ \"foo\" çš„ pod å’Œ service\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# 删除标签为 name=myLabel çš„ pods å’Œ services.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# 删除最å°å»¶è¿Ÿçš„ pod\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# 强制删除å为 foo çš„ pod\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# 删除所有 pods\n" +#~ "\t\tkubectl delete pods --all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# æ述一个 node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# æ述一个 pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# æ述一个被 \"pod.json\" 中的类型和å称标识的 pod\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# æ述所有 pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# æ述标签为 name=myLabel çš„ pods\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# æ述所有被å称为 'frontend' çš„ replication controller 管ç†çš„ pods(rc-" +#~ "创建 pods\n" +#~ "\t\t# 并使用 rc çš„å称作为 pod çš„å‰ç¼€).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet 或者 StatefulSet, and " +#~ "use a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 驱é€èŠ‚点 \"foo\", å³ä½¿å¾ˆå¤š pods 没有被一个在 node 上的 " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet 或者 StatefulSet 管ç†.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# åŒä¸Š, 如果存在 pods 没有被一个 ReplicationController, ReplicaSet, " +#~ "Job, DaemonSet 或者 StatefulSet 管ç†è¶…过 15 分钟则退出.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 编辑å为 'docker-registry' çš„ service:\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# 使用一个å¯é€‰æ‹©çš„编辑器\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# 使用 v1 API æ ¼å¼çš„ JSON 编辑å为 'myjob' çš„ job:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# 在 YAML 中编辑å为 'mydeployment' çš„ deployment 并在它的注解中ä¿å­˜ä¿®" +#~ "改åŽçš„é…ç½®:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the first " +#~ "container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container " +#~ "from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 从è¿è¡Œä¸­pod 123456-7890 获å–执行 'date' 的输出, 默认使用第一个容器\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# 从 pod 123456-7890 的容器 ruby-container 获å–执行 'date' 的输出\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# 切æ¢åˆ° terminal 模å¼, å‘é€ stdin 到è¿è¡Œåœ¨ pod 123456-7890 的容器 " +#~ "ruby-container 'bash' \n" +#~ "\t\t# 并从 'bash' å‘é€ stdout/stderr 返回到 client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container " +#~ "from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 从è¿è¡Œä¸­pod 123456-7890 获å–执行 'date' 的输出, 默认使用第一个容器\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# 从 pod 123456-7890 的容器 ruby-container 获å–输出\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# 切æ¢åˆ° terminal 模å¼, å‘é€ stdin 到è¿è¡Œåœ¨ pod 123456-7890 的容器 " +#~ "ruby-container 'bash' \n" +#~ "\t\t# 并从 'bash' å‘é€ stdout/stderr 返回到 client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# 从å称为 nginx çš„ ReplicaSet 获å–第一个 pod 的输出\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 在一个 Mac 中使用 homebrew 安装 bash 补全\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash 补全支æŒ\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# 导入 kubectl 补全代ç åˆ°å½“å‰ shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# 写入 bash 补全代ç åˆ°ä¸€ä¸ªæ–‡ä»¶å¹¶ source 如果它是 .bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell 补全\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# 为 zsh[1] 导入 kubectl 补全代ç åˆ°å½“å‰ shell\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 以 ps 输出格å¼åˆ—出所有 pod.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# 以 ps 输出格å¼åˆ—出所有 pod(如节点å称).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# 获å–å称为 web çš„ replicationcontroller.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# 使用 JSON æ ¼å¼åŒ–输出显示一个å•ç‹¬çš„ pod.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# 显示一个被 \"pod.yaml\" 中的 type å’Œ name 标识的 pod 并使用 JSON æ ¼å¼" +#~ "化输出.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# åªè¿”回被指定 pod 中 phase 的值.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# 显示所有的 replication controllers å’Œ services 并格å¼åŒ–输出.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# 显示一个或者更多 resources 通过它们的 type å’Œ names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# 使用ä¸åŒçš„ types 显示所有 resources.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +#~ "5000 and 6000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 在本地监å¬ç«¯å£ 5000 å’Œ 6000 , forwarding æ•°æ® to/from 在 pod 5000 å’Œ " +#~ "6000 端å£\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# 在本地监å¬ç«¯å£ 8888 , forwarding 到 pod çš„ 5000端å£\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# 在本地éšæœºç›‘å¬ä¸€ä¸ªç«¯å£ , forwarding 到 pod çš„ 5000端å£\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# 在本地éšæœºç›‘å¬ä¸€ä¸ªç«¯å£ , forwarding 到 pod çš„ 5000端å£\n" +#~ "\t\tkubectl port-forward mypod 0:5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 标记 node \"foo\" 为 schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 标记 node \"foo\" 为 unschedulable.\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified in " +#~ "\"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 使用 strategic merge patch 部分更新一个 node\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# 使用 strategic merge patch 部分更新一个被 \"node.json\" çš„ type å’Œ " +#~ "name 标示 çš„ node.\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# 更新一个 container çš„ image; spec.containers[*].name 是必须的 因为它" +#~ "是一个 merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# 使用一个 json patch 更新一个指定å标的 container çš„ image \n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 输出 master å’Œ cluster services 的地å€\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 使用在 pod.json 中的数æ®æ›¿æ¢ä¸€ä¸ª pod.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# 基于被é‡å®šå‘到 stdin 中的 JSON 替æ¢ä¸€ä¸ª pod.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# 更新一个å•ç‹¬å®¹å™¨çš„ pod çš„ image 版本 (tag) 到 v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# 强制替æ¢, 删除然åŽé‡æ–°åˆ›å»ºè¿™ä¸ª resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 返回仅有一个容器 pod å称为 nginx çš„ snapshot 日志\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# 返回 label 为 app=nginx çš„ pods çš„ snapshot 日志\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# è¿è¡Œ proxy 到 kubernetes apiserver çš„ 8011 端å£ä¸Š, æœåŠ¡é™æ€å†…容路径" +#~ "为 ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# 在任æ„的本地端å£ä¸Šè¿è¡Œä¸€ä¸ª proxy 到 kubernetes apiserver.\n" +#~ "\t\t# 为这个 server 挑选的端å£å°†ä¼šè¢«è¾“出到 stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# è¿è¡Œä¸€ä¸ª proxy 到 kubernetes apiserver, 修改 api prefix 为 k8s-api\n" +#~ "\t\t# 这会使 e.g. 这个 pods 的有效 api 为 localhost:8001/k8s-api/v1/pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo." +#~ "yaml\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Scale 一个å称为 ‘foo’ çš„ replicaset æœæœ¬æ•°ä¸º 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale 指定的 \"foo.yaml\" çš„ type å’Œ name 标识的 resource 副本数é‡ä¸º " +#~ "3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# 如果å称为 mysql çš„ deployment 当å‰å‰¯æœ¬æ•°é‡ä¸º 2, scale mysql 到 3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale 多个 replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale å称为 ’cron’ çš„ job 副本数é‡ä¸º 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 设置一个资æºçš„ last-applied-configuration 去匹é…一个文件的内容.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# 设置一个资æºçš„ last-applied-configuration 去匹é…一个文件的内容, 如果" +#~ "ä¸å­˜åœ¨å°†ä¼šåˆ›å»ºä¸€ä¸ª annotation.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --" +#~ "env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +#~ "\"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute Ï€ to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute Ï€ to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --" +#~ "env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +#~ "\"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute Ï€ to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute Ï€ to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\t通过文件å或标准输入æµ(stdin)对资æºè¿›è¡Œé…ç½®.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L68 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\t创建一个 ClusterRole." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L43 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\t 为指定的 ClusterRole 创建一个 ClusterRoleBinding." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L43 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\t为指定的 Role 或者 ClusterRole 创建一个 RoleBinding." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\t为指定的 public/private key pair 创建一个 TLS secret.\n" +#~ "\n" +#~ "\t\tpublic/private key pair 必须在传递å‰å­˜åœ¨. public key certificate å¿…é¡»" +#~ "以 .PEM 被编ç ä¸”匹é…指定的 private key." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L49 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create.go#L56 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\t通过文件å或者标准输入æµ(stdin)创建一个资æº.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments may " +#~ "be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +#~ "pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of the " +#~ "same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments may " +#~ "be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +#~ "pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of the " +#~ "same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered by " +#~ "delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered by " +#~ "delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\t显示 node 的资æº(CPU/Memory/Storage)使用.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\t显示 pods 资æº(CPU/Memory/Storage)使用.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\t显示资æº(CPU/Memory/Storage)使用.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use " +#~ "normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\t清ç†èŠ‚点为节点维护åšå‡†å¤‡.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use " +#~ "normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +#~ "Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\t使用默认的编辑器修改资æº.\n" +#~ "\n" +#~ "\t\tedit 命令å…许你通过命令行直接修改 API 资æº.\n" +#~ "\t\t它会打开你在 KUBE_EDITOR 或者EDITOR 环境å˜é‡ä¸­å®šä¹‰çš„编辑器\n" +#~ "\t\t或者回滚到 Linux vi 编辑器或者 Windows notepad.\n" +#~ "\t\tä½ å¯ä»¥ä¿®æ”¹å¤šä¸ªå¯¹è±¡, 虽然æ¯æ¬¡åªèƒ½ä¿®æ”¹ä¸€æ¬¡. 这个命令\n" +#~ "\t\tåŒæ—¶ä¹ŸæŽ¥å—文件å作为命令行å‚æ•°, 尽管这些文件你指出必须是\n" +#~ "\t\t你之å‰ä¿å­˜çš„资æºç‰ˆæœ¬.\n" +#~ "\n" +#~ "\t\tEditing 是通过用于获å–资æºçš„API版本完æˆçš„.\n" +#~ "\t\t为了能通过指定的 API 版本修改, 请完全é™å®š resource, version å’Œ group.\n" +#~ "\n" +#~ "\t\t默认是 YAML æ ¼å¼. 想在 JSON 中修改, 指定 \"-o json\".\n" +#~ "\n" +#~ "\t\t--windows-line-endings 命令行å‚æ•°å¯ä»¥ç”¨æ¥å¼ºåˆ¶ä½¿ç”¨ Windows line " +#~ "endings,\n" +#~ "\t\tå¦åˆ™ä¼šä½¿ç”¨ä½ æ“作系统的默认值.\n" +#~ "\n" +#~ "\t\t如果更新时å‘生错误,将在ç£ç›˜ä¸Šåˆ›å»ºä¸€ä¸ªä¸´æ—¶æ–‡ä»¶\n" +#~ "\t\t里é¢åŒ…å«æ‚¨æœªåº”用的更改. 更新资æºæ—¶æœ€å¸¸è§çš„错误\n" +#~ "\t\t是å¦ä¸€ä¸ªç¼–辑器也在æœåŠ¡å™¨ä¸­ä¿®æ”¹è¿™ä¸ªèµ„æº. 当å‘生这ç§æƒ…况时, ä½ å°†\n" +#~ "\t\t需è¦åº”用你的修改到资æºçš„最新版本, 或者更新你被ä¿å­˜çš„临时文件\n" +#~ "\t\tå¤åˆ¶å®ƒå¹¶ä½¿ç”¨æœ€æ–°çš„版本." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in versions " +#~ "of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in versions " +#~ "of zsh >= 5.2" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\t完æˆæŒ‡å®šçš„ ReplicationController 的滚动å‡çº§.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +#~ "the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ msgstr "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +#~ "the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain letters, " +#~ "numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain letters, " +#~ "numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain letters, " +#~ "numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." +#~ msgstr "" +#~ "\n" +#~ "\t\t更新一个或者多个 node 上的 taints.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain letters, " +#~ "numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." + +#~ msgid "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name or " +#~ "file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." +#~ msgstr "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name or " +#~ "file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." + +#~ msgid "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" +#~ msgstr "" +#~ "\n" +#~ "\t # !!!注æ„!!!\n" +#~ "\t # è¦æ±‚容器中有 'tar' 命令\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # å¤åˆ¶æœ¬åœ°ç›®å½• /tmp/foo_dir 到 default namespace 下的远程 pod çš„ /" +#~ "tmp/bar_dir 路径 \n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # å¤åˆ¶ /tmp/foo local 本地文件到指定远程 pod 的指定容器的 /tmp/bar " +#~ "路径\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# å¤åˆ¶ /tmp/foo 本地文件到在 namespace 下的æŸä¸ª pod " +#~ "çš„ /tmp/bar 路径\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# 从一个远程的 pod çš„ /tmp/foo 路径å¤åˆ¶åˆ°æœ¬åœ° /tmp/bar 路径\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # 使用æ供的 key pair å称为tls-secret çš„ secret:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # 创建一个å称为 my-cs çš„ clusterIP service\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # 创建一个å称为 my-cs çš„ clusterIP service (在 headless 模å¼)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # 创建一个å称为 my-dep çš„ deployment 并è¿è¡Œ busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # 创建一个å称为 my-ns çš„ nodeport service\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value 'my " +#~ "frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value 'my " +#~ "frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend running " +#~ "nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if it " +#~ "exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" +#~ msgstr "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value 'my " +#~ "frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value 'my " +#~ "frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend running " +#~ "nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # æ›´æ–°å称为 'foo' çš„ pod, 删除一个å称为 'description' çš„ annotation " +#~ "如果它存在. \n" +#~ " # ä¸è¦æ±‚使用 --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " 使用一个指定的å称创建一个 clusterIP service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " 使用一个指定的å称创建一个 deployment." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " 使用一个指定的å称创建一个 nodeport service." + +#~ msgid "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or specify " +#~ "--all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." +#~ msgstr "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or specify " +#~ "--all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L136 +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "A schedule in the Cron format the job should be run with." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L134 +#~ msgid "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." +#~ msgstr "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "通过文件å或标准输入æµ(stdin)对资æºè¿›è¡Œé…ç½®" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L55 +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "" +#~ "自动调整一个 Deployment, ReplicaSet, 或者 ReplicationController 的副本数é‡" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L101 +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L43 +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "为一个指定的 ClusterRole 创建一个 ClusterRoleBinding" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L181 +#~ msgid "Create a LoadBalancer service." +#~ msgstr "创建一个 LoadBalancer service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L124 +#~ msgid "Create a NodePort service." +#~ msgstr "创建一个 NodePort service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L43 +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "为一个指定的 Role 或者 ClusterRole创建一个 RoleBinding" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L68 +#~ msgid "Create a clusterIP service." +#~ msgstr "创建一个 clusterIP service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_configmap.go#L59 +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "从本地 file, directory 或者 literal value 创建一个 configmap" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "Create a deployment with the specified name." +#~ msgstr "创建一个指定å称的 deployment." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L49 +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "创建一个指定å称的 pod disruption budget." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#~ msgid "Create a quota with the specified name." +#~ msgstr "创建一个指定å称的 quota." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create.go#L56 +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "通过文件å或者标准输入æµ(stdin)创建一个资æº" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L73 +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "从本地 file, directory 或者 literal value 创建一个 secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L36 +#~ msgid "Create a service using specified subcommand." +#~ msgstr "使用指定的 subcommand 创建一个 service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L240 +#~ msgid "Create an ExternalName service." +#~ msgstr "Create an ExternalName service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/delete.go#L130 +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by resources " +#~ "and label selector" +#~ msgstr "" +#~ "Delete resources by filenames, stdin, resources and names, or by resources " +#~ "and label selector" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/stop.go#L58 +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "Deprecated: Gracefully shut down a resource by name or filename" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_node.go#L77 +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "显示 nodes çš„ Resource (CPU/Memory) 使用" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_pod.go#L79 +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "显示 pods çš„ Resource (CPU/Memory) 使用" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top.go#L43 +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "显示 Resource (CPU/Memory) 使用." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo.go#L49 +#~ msgid "Display cluster info" +#~ msgstr "显示集群信æ¯" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#~ msgid "Displays the current-context" +#~ msgstr "显示当å‰çš„ context" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/explain.go#L50 +#~ msgid "Documentation of resources" +#~ msgstr "查看资æºçš„文档" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L37 +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "Dump lots of relevant info for debugging and diagnosis" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L102 +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L105 +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L98 +#~ msgid "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" +#~ msgstr "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout.go#L46 +#~ msgid "Manage a deployment rollout" +#~ msgstr "管ç†ä¸€ä¸ª deployment çš„ rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L115 +#~ msgid "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" +#~ msgstr "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L84 +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "完æˆæŒ‡å®šçš„ ReplicationController 的滚动å‡çº§" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/replace.go#L70 +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "通过 filename 或者 stdin替æ¢ä¸€ä¸ªèµ„æº" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L71 +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job" +#~ msgstr "" +#~ "为 Deployment, ReplicaSet, Replication Controller 或者 Job 设置一个新的副本" +#~ "æ•°é‡" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "设置 kubeconfig 文件中的一个集群æ¡ç›®" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "设置 kubeconfig 文件中的一个 context æ¡ç›®" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "设置 kubeconfig 文件中的一个用户æ¡ç›®" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "设置 kubeconfig 文件中的一个å•ä¸ªå€¼" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "设置 kubeconfig 文件中的当å‰ä¸Šä¸‹æ–‡" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L87 +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it as " +#~ "a new Kubernetes Service" +#~ msgstr "" +#~ "使用 replication controller, service, deployment 或者 pod 并暴露它作为一个 " +#~ "æ–°çš„ Kubernetes Service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L100 +#~ msgid "" +#~ "The key to use to differentiate between two different controllers, default " +#~ "'deployment'. Only relevant when --image is specified, ignored otherwise" +#~ msgstr "" +#~ "这个 key 使用有区别在两个ä¸åŒçš„ controllers, 默认 'deployment'. åªæœ‰å½“ --" +#~ "image 指定值, å¦åˆ™å¿½ç•¥" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L113 +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "使用 API generator çš„åå­—, 在 http://kubernetes.io/docs/user-guide/kubectl-" +#~ "conventions/#generators 查看列表." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L66 +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "使用 API generator çš„åå­—. ç›®å‰åªæœ‰ 1 个 generator." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L133 +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "使用 gnerator çš„å称创建一个 service. åªæœ‰åœ¨ --expose 为 true 的时候使用" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L121 +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L128 +#~ msgid "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to 'OnFailure' " +#~ "a job is created, if set to 'Never', a regular pod is created. For the " +#~ "latter two --replicas must be 1. Default 'Always', for CronJobs `Never`." +#~ msgstr "" +#~ "这个 Pod çš„ restart policy. Legal values [Always, OnFailure, Never]. 如果" +#~ "设置为 'Always' 一个 deployment 被创建, 如果设置为 ’OnFailure' 一个 job 被" +#~ "创建, 如果设置为 'Never', 一个普通的 pod 被创建. 对于åŽé¢ä¸¤ä¸ª --replicas å¿…" +#~ "须为 1. 默认 'Always', 为 CronJobs 设置为 `Never`." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L101 +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "对于æœåŠ¡çš„类型: ClusterIP, NodePort, 或者 LoadBalancer. 默认是 'ClusterIP’." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "å–消设置 kubeconfig 文件中的一个å•ä¸ªå€¼" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/patch.go#L91 +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "使用 strategic merge patch 更新一个资æºçš„ field(s)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_image.go#L94 +#~ msgid "Update image of a pod template" +#~ msgstr "更新一个 pod template çš„é•œåƒ" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "显示最åŽçš„ resource/object çš„ last-applied-configuration annotations" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L253 +#~ msgid "external name of service" +#~ msgstr "æœåŠ¡çš„外部å称" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections - " +#~ "%d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections - " +#~ "%d resources were found" +#~ msgstr[0] "" +#~ "watch 仅支æŒå•ç‹¬çš„资æºæˆ–者资æºé›†åˆ - 找到了 %d 个资æºwatch is only " +#~ "supported on individual resources and resource collections - %d resource " +#~ "was found" +#~ msgstr[1] "" +#~ "watch 仅支æŒå•ç‹¬çš„资æºæˆ–者资æºé›†åˆ - 找到了 %d 个资æºwatch is only " +#~ "supported on individual resources and resource collections - %d resources " +#~ "were found" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..9be9a30d32495cd8128cf281bd0703873e9a9b91 GIT binary patch literal 1187 zcma)4&rcIU6do0Sq~5$}q9$|jP{gU*k|-`hL@1^SjSUv{;APoPwg!;@M*{YHM}5?#nqr-!SQV(vvW#Z1 z?G}}$5@)5z*IgsUVQLGWWzIeV!8LuT_Yo*tvSEq^2S}W^_!x#{zCfWqrV6eFhDJ$+ zGc?OM^uYK(m4nbcl@0{#_JkGHnsG<@>8+-FG=*fDOHoo}Saz&g>6;ZADrBr+2o~o; zTbcrA!<%qL+zri5M|#Trs2VZAOP-ga$07Nm^Q5xt12v#CMqM zGAoaJa~5~-)lp04ZGU@B6tyeFNW>QTZjf=OXgQRmsg*bJ&e_D{xll5ZjPG_FjToU` zfA|~23t)fMZ+jcr$$<8B;n?F^wx zX}R`tsrkjcN<&DqaZWh-bKd)}AK$)Q4-y1Z?H13DW ln=9VT*F7BG{3mZVXn1>d(p#T~BHrrT?KiK(O`o?F{Q*85ssjK3 literal 0 HcmV?d00001 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.po new file mode 100644 index 000000000..58fd63a9f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.po @@ -0,0 +1,81 @@ +# Test translations for unit tests. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR warmchang@outlook.com, 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: hello-world\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-06-02 09:13+0800\n" +"Last-Translator: William Chang \n" +"Language-Team: \n" +"Language: zh\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.0.2\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "刪除 kubeconfig 檔案中指定的å¢é›†(cluster)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "刪除 kubeconfig 檔案中指定的 context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "æ述一個或多個 context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "顯示 kubeconfig 檔案中定義的å¢é›†(cluster)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "顯示åˆä½µçš„ kubeconfig é…置或一個指定的 kubeconfig 檔案" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "修改 kubeconfig 檔案" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "更新一個資æºçš„注解(annotations)" + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "通éŽæª”案å或標準輸入æµ(stdin)å°è³‡æºé€²è¡Œé…ç½®" + +#~ msgid "Displays the current-context" +#~ msgstr "顯示目å‰çš„ context" + +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "設置 kubeconfig 檔案中的一個å¢é›†(cluster)æ¢ç›®" + +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "設置 kubeconfig 檔案中的一個 context æ¢ç›®" + +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "設置 kubeconfig 檔案中的一個使用者æ¢ç›®" + +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "設置 kubeconfig 檔案中的一個值" + +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "設置 kubeconfig æª”æ¡ˆä¸­çš„ç›®å‰ context" + +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "å–消設置 kubeconfig 檔案中的一個值" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "一次åªèƒ½ watch 一個資æºæˆ–è³‡æ–™é›†åˆ - 找到了 %d 個資æº" +#~ msgstr[1] "一次åªèƒ½ watch 一個資æºæˆ–è³‡æ–™é›†åˆ - 找到了 %d 個資æº" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..5487fb8ccf9de73999585748dd9b9ed6ce09d34c GIT binary patch literal 563 zcmZXQ&2H2%5Xa5efpFu(g~M>D1cynJEmSKni_n%9RoWG+vLJ5YO{Q6+#E$F&LJz*(~_C!vfd%`4x%%2wyA)0+v)yvR4Vdr1LLUFB=}yN2o3a`p+|lB19YT6?N3Lzy4vWg%DT zjPLyrDmqDZHKf8z\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.6.10\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Language-Team: \n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"Language: en\n" + +msgid "test_plural" +msgid_plural "test_plural" +msgstr[0] "there was %d item" +msgstr[1] "there were %d items" + +msgid "test_string" +msgstr "foo" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000000000000000000000000000000000..44fe7b5d964a89516db4c6379aa5fb87a941e120 GIT binary patch literal 563 zcmZXQ&2H2%5XW6w4x~3OTzVJ|mEbUG5>U1BvIuQyQKemVRkjs3>P?2MQDR5718uLU zkHG8oSvpx$^}t9!&G>K6hv)v~&R@Z{&FnGnndi)m8SILA#VBT%xncfn3311MpLy{t z<{q$Tc~gho##j;}kPqLxrgy6OKlZ_CQ;UVOKdB6OTHz%*udR_#QwVe&u(n9os_h!` zxTdDD$vJh_1)Td@S2*o!k4q~dS)AcH{nA4=epZ1b6j^?Vd4c&mC<@6zHp;SOrhLGq zQ^q$cSSMlX$W+R}w0FkChgCeLRk|94Pj#zwla{vqn9NV-$4?XHX_g#W6UcDAn~oa} z10Qsa(q;_hS~*W)agT}el=u7p^4rcB+dns7;~9k#I9G@e-3dI=^j-cO1##BlP)!0QR*Z^yidm@0ux aBEp&+!EfbZzXBboeN+bdpX9|#-HHc9@}Y(R literal 0 HcmV?d00001 diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.po new file mode 100644 index 000000000..9944046ae --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.po @@ -0,0 +1,28 @@ +# Test translations for unit tests. +# Copyright (C) 2016 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR brendan.d.burns@gmail.com, 2016. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-12-12 20:03+0000\n" +"PO-Revision-Date: 2016-12-13 22:12-0800\n" +"Last-Translator: Brendan Burns \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.6.10\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Language-Team: \n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"Language: en\n" + +msgid "test_plural" +msgid_plural "test_plural" +msgstr[0] "there was %d item" +msgstr[1] "there were %d items" + +msgid "test_string" +msgstr "baz" diff --git a/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go b/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go new file mode 100644 index 000000000..0265b9fb1 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go @@ -0,0 +1,104 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interrupt + +import ( + "os" + "os/signal" + "sync" + "syscall" +) + +// terminationSignals are signals that cause the program to exit in the +// supported platforms (linux, darwin, windows). +var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT} + +// Handler guarantees execution of notifications after a critical section (the function passed +// to a Run method), even in the presence of process termination. It guarantees exactly once +// invocation of the provided notify functions. +type Handler struct { + notify []func() + final func(os.Signal) + once sync.Once +} + +// Chain creates a new handler that invokes all notify functions when the critical section exits +// and then invokes the optional handler's notifications. This allows critical sections to be +// nested without losing exactly once invocations. Notify functions can invoke any cleanup needed +// but should not exit (which is the responsibility of the parent handler). +func Chain(handler *Handler, notify ...func()) *Handler { + if handler == nil { + return New(nil, notify...) + } + return New(handler.Signal, append(notify, handler.Close)...) +} + +// New creates a new handler that guarantees all notify functions are run after the critical +// section exits (or is interrupted by the OS), then invokes the final handler. If no final +// handler is specified, the default final is `os.Exit(1)`. A handler can only be used for +// one critical section. +func New(final func(os.Signal), notify ...func()) *Handler { + return &Handler{ + final: final, + notify: notify, + } +} + +// Close executes all the notification handlers if they have not yet been executed. +func (h *Handler) Close() { + h.once.Do(func() { + for _, fn := range h.notify { + fn() + } + }) +} + +// Signal is called when an os.Signal is received, and guarantees that all notifications +// are executed, then the final handler is executed. This function should only be called once +// per Handler instance. +func (h *Handler) Signal(s os.Signal) { + h.once.Do(func() { + for _, fn := range h.notify { + fn() + } + if h.final == nil { + os.Exit(1) + } + h.final(s) + }) +} + +// Run ensures that any notifications are invoked after the provided fn exits (even if the +// process is interrupted by an OS termination signal). Notifications are only invoked once +// per Handler instance, so calling Run more than once will not behave as the user expects. +func (h *Handler) Run(fn func() error) error { + ch := make(chan os.Signal, 1) + signal.Notify(ch, terminationSignals...) + defer func() { + signal.Stop(ch) + close(ch) + }() + go func() { + sig, ok := <-ch + if !ok { + return + } + h.Signal(sig) + }() + defer h.Close() + return fn() +} diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS new file mode 100644 index 000000000..cb873612b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse +reviewers: + - apelisse diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go new file mode 100644 index 000000000..08194d580 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package openapi is a collection of libraries for fetching the openapi spec +// from a Kubernetes server and then indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata +// such as the patchStrategy and patchMergeKey for creating patches. +package openapi // k8s.io/kubectl/pkg/util/openapi diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go new file mode 100644 index 000000000..74955da36 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go @@ -0,0 +1,177 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + openapi_v2 "github.com/google/gnostic-models/openapiv2" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/yaml" +) + +// OpenAPIResourcesGetter represents a function to return +// OpenAPI V2 resource specifications. Used for lazy-loading +// these resource specifications. +type OpenAPIResourcesGetter interface { + OpenAPISchema() (Resources, error) +} + +// Resources interface describe a resources provider, that can give you +// resource based on group-version-kind. +type Resources interface { + LookupResource(gvk schema.GroupVersionKind) proto.Schema + GetConsumes(gvk schema.GroupVersionKind, operation string) []string +} + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// document is an implementation of `Resources`. It looks for +// resources in an openapi Schema. +type document struct { + // Maps gvk to model name + resources map[schema.GroupVersionKind]string + models proto.Models + doc *openapi_v2.Document +} + +var _ Resources = &document{} + +// NewOpenAPIData creates a new `Resources` out of the openapi document +func NewOpenAPIData(doc *openapi_v2.Document) (Resources, error) { + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + resources := map[schema.GroupVersionKind]string{} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic("ListModels returns a model that can't be looked-up.") + } + gvkList := parseGroupVersionKind(model) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + resources[gvk] = modelName + } + } + } + + return &document{ + resources: resources, + models: models, + doc: doc, + }, nil +} + +func (d *document) LookupResource(gvk schema.GroupVersionKind) proto.Schema { + modelName, found := d.resources[gvk] + if !found { + return nil + } + return d.models.LookupModel(modelName) +} + +func (d *document) GetConsumes(gvk schema.GroupVersionKind, operation string) []string { + for _, path := range d.doc.GetPaths().GetPath() { + for _, ex := range path.GetValue().GetPatch().GetVendorExtension() { + if ex.GetValue().GetYaml() == "" || + ex.GetName() != "x-kubernetes-group-version-kind" { + continue + } + + var value map[string]string + err := yaml.Unmarshal([]byte(ex.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + if value["group"] == gvk.Group && value["kind"] == gvk.Kind && value["version"] == gvk.Version { + switch operation { + case "GET": + return path.GetValue().GetGet().GetConsumes() + case "PATCH": + return path.GetValue().GetPatch().GetConsumes() + case "HEAD": + return path.GetValue().GetHead().GetConsumes() + case "PUT": + return path.GetValue().GetPut().GetConsumes() + case "POST": + return path.GetValue().GetPost().GetConsumes() + case "OPTIONS": + return path.GetValue().GetOptions().GetConsumes() + case "DELETE": + return path.GetValue().GetDelete().GetConsumes() + } + } + } + } + + return nil +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go new file mode 100644 index 000000000..3179161ee --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "sync" + + openapi_v2 "github.com/google/gnostic-models/openapiv2" + "k8s.io/client-go/discovery" +) + +// CachedOpenAPIGetter fetches the openapi schema once and then caches it in memory +type CachedOpenAPIGetter struct { + openAPIClient discovery.OpenAPISchemaInterface + + // Cached results + sync.Once + openAPISchema *openapi_v2.Document + err error +} + +var _ discovery.OpenAPISchemaInterface = &CachedOpenAPIGetter{} + +// NewOpenAPIGetter returns an object to return OpenAPIDatas which reads +// from a server, and then stores in memory for subsequent invocations +func NewOpenAPIGetter(openAPIClient discovery.OpenAPISchemaInterface) *CachedOpenAPIGetter { + return &CachedOpenAPIGetter{ + openAPIClient: openAPIClient, + } +} + +// OpenAPISchema implements OpenAPISchemaInterface. +func (g *CachedOpenAPIGetter) OpenAPISchema() (*openapi_v2.Document, error) { + g.Do(func() { + g.openAPISchema, g.err = g.openAPIClient.OpenAPISchema() + }) + + // Return the saved result. + return g.openAPISchema, g.err +} + +type CachedOpenAPIParser struct { + openAPIClient discovery.OpenAPISchemaInterface + + // Cached results + sync.Once + openAPIResources Resources + err error +} + +func NewOpenAPIParser(openAPIClient discovery.OpenAPISchemaInterface) *CachedOpenAPIParser { + return &CachedOpenAPIParser{ + openAPIClient: openAPIClient, + } +} + +func (p *CachedOpenAPIParser) Parse() (Resources, error) { + p.Do(func() { + oapi, err := p.openAPIClient.OpenAPISchema() + if err != nil { + p.err = err + return + } + p.openAPIResources, p.err = NewOpenAPIData(oapi) + }) + + return p.openAPIResources, p.err +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go b/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go new file mode 100644 index 000000000..447a39621 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "github.com/spf13/cobra" +) + +type CommandGroup struct { + Message string + Commands []*cobra.Command +} + +type CommandGroups []CommandGroup + +func (g CommandGroups) Add(c *cobra.Command) { + for _, group := range g { + c.AddCommand(group.Commands...) + } +} + +func (g CommandGroups) Has(c *cobra.Command) bool { + for _, group := range g { + for _, command := range group.Commands { + if command == c { + return true + } + } + } + return false +} + +func AddAdditionalCommands(g CommandGroups, message string, cmds []*cobra.Command) CommandGroups { + group := CommandGroup{Message: message} + for _, c := range cmds { + // Don't show commands that have no short description + if !g.Has(c) && len(c.Short) != 0 { + group.Commands = append(group.Commands, c) + } + } + if len(group.Commands) == 0 { + return g + } + return append(g, group) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go b/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go new file mode 100644 index 000000000..fdfdf08ee --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go @@ -0,0 +1,76 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/mitchellh/go-wordwrap" + flag "github.com/spf13/pflag" +) + +const offset = 10 + +// HelpFlagPrinter is a printer that +// processes the help flag and print +// it to i/o writer +type HelpFlagPrinter struct { + wrapLimit uint + out io.Writer +} + +// NewHelpFlagPrinter will initialize a HelpFlagPrinter given the +// i/o writer +func NewHelpFlagPrinter(out io.Writer, wrapLimit uint) *HelpFlagPrinter { + return &HelpFlagPrinter{ + wrapLimit: wrapLimit, + out: out, + } +} + +// PrintHelpFlag will beautify the help flags and print it out to p.out +func (p *HelpFlagPrinter) PrintHelpFlag(flag *flag.Flag) { + formatBuf := new(bytes.Buffer) + writeFlag(formatBuf, flag) + + wrappedStr := formatBuf.String() + flagAndUsage := strings.Split(formatBuf.String(), "\n") + flagStr := flagAndUsage[0] + + // if the flag usage is longer than one line, wrap it again + if len(flagAndUsage) > 1 { + nextLines := strings.Join(flagAndUsage[1:], " ") + wrappedUsages := wordwrap.WrapString(nextLines, p.wrapLimit-offset) + wrappedStr = flagStr + "\n" + wrappedUsages + } + appendTabStr := strings.ReplaceAll(wrappedStr, "\n", "\n\t") + + fmt.Fprintf(p.out, appendTabStr+"\n\n") +} + +// writeFlag will output the help flag based +// on the format provided by getFlagFormat to i/o writer +func writeFlag(out io.Writer, f *flag.Flag) { + deprecated := "" + if f.Deprecated != "" { + deprecated = fmt.Sprintf(" (DEPRECATED: %s)", f.Deprecated) + } + fmt.Fprintf(out, getFlagFormat(f), f.Shorthand, f.Name, f.DefValue, f.Usage, deprecated) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go b/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go new file mode 100644 index 000000000..962cd9eec --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "fmt" + "io" + "strings" + + "github.com/russross/blackfriday/v2" +) + +const linebreak = "\n" + +// ASCIIRenderer implements blackfriday.Renderer +var _ blackfriday.Renderer = &ASCIIRenderer{} + +// ASCIIRenderer is a blackfriday.Renderer intended for rendering markdown +// documents as plain text, well suited for human reading on terminals. +type ASCIIRenderer struct { + Indentation string + + listItemCount uint + listLevel uint +} + +// render markdown to text +func (r *ASCIIRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + switch node.Type { + case blackfriday.Text: + raw := string(node.Literal) + lines := strings.Split(raw, linebreak) + for _, line := range lines { + trimmed := strings.Trim(line, " \n\t") + if len(trimmed) > 0 && trimmed[0] != '_' { + w.Write([]byte(" ")) + } + w.Write([]byte(trimmed)) + } + case blackfriday.HorizontalRule, blackfriday.Hardbreak: + w.Write([]byte(linebreak + "----------" + linebreak)) + case blackfriday.Code, blackfriday.CodeBlock: + w.Write([]byte(linebreak)) + lines := []string{} + for _, line := range strings.Split(string(node.Literal), linebreak) { + trimmed := strings.Trim(line, " \t") + // Adding 4 times of indentation will let blackfriday to accept + // this literal as Code or CodeBlock again in next invocation + indented := strings.Repeat(r.Indentation, 4) + trimmed + lines = append(lines, indented) + } + w.Write([]byte(strings.Join(lines, linebreak))) + case blackfriday.Image: + w.Write(node.LinkData.Destination) + case blackfriday.Link: + w.Write([]byte(" ")) + w.Write(node.LinkData.Destination) + case blackfriday.Paragraph: + if r.listLevel == 0 { + w.Write([]byte(linebreak)) + } + case blackfriday.List: + if entering { + w.Write([]byte(linebreak)) + r.listLevel++ + } else { + r.listLevel-- + r.listItemCount = 0 + } + case blackfriday.Item: + if entering { + r.listItemCount++ + for i := 0; uint(i) < r.listLevel; i++ { + w.Write([]byte(r.Indentation)) + } + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + w.Write([]byte(fmt.Sprintf("%d. ", r.listItemCount))) + } else { + w.Write([]byte("* ")) + } + } else { + w.Write([]byte(linebreak)) + } + default: + normalText(w, node.Literal) + } + return blackfriday.GoToNext +} + +func normalText(w io.Writer, text []byte) { + w.Write([]byte(strings.Trim(string(text), " \n\t"))) +} + +// RenderHeader writes document preamble and TOC if requested. +func (r *ASCIIRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + +} + +// RenderFooter writes document footer. +func (r *ASCIIRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { + io.WriteString(w, "\n") +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go b/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go new file mode 100644 index 000000000..09094ffdf --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "strings" + + "github.com/MakeNowJust/heredoc" + "github.com/russross/blackfriday/v2" + "github.com/spf13/cobra" +) + +const Indentation = ` ` + +// LongDesc normalizes a command's long description to follow the conventions. +func LongDesc(s string) string { + if len(s) == 0 { + return s + } + return normalizer{s}.heredoc().markdown().trim().string +} + +// Examples normalizes a command's examples to follow the conventions. +func Examples(s string) string { + if len(s) == 0 { + return s + } + return normalizer{s}.trim().indent().string +} + +// Normalize perform all required normalizations on a given command. +func Normalize(cmd *cobra.Command) *cobra.Command { + if len(cmd.Long) > 0 { + cmd.Long = LongDesc(cmd.Long) + } + if len(cmd.Example) > 0 { + cmd.Example = Examples(cmd.Example) + } + return cmd +} + +// NormalizeAll perform all required normalizations in the entire command tree. +func NormalizeAll(cmd *cobra.Command) *cobra.Command { + if cmd.HasSubCommands() { + for _, subCmd := range cmd.Commands() { + NormalizeAll(subCmd) + } + } + Normalize(cmd) + return cmd +} + +type normalizer struct { + string +} + +func (s normalizer) markdown() normalizer { + bytes := []byte(s.string) + formatted := blackfriday.Run(bytes, blackfriday.WithExtensions(blackfriday.NoIntraEmphasis), blackfriday.WithRenderer(&ASCIIRenderer{Indentation: Indentation})) + s.string = string(formatted) + return s +} + +func (s normalizer) heredoc() normalizer { + s.string = heredoc.Doc(s.string) + return s +} + +func (s normalizer) trim() normalizer { + s.string = strings.TrimSpace(s.string) + return s +} + +func (s normalizer) indent() normalizer { + indentedLines := []string{} + for _, line := range strings.Split(s.string, "\n") { + trimmed := strings.TrimSpace(line) + indented := Indentation + trimmed + indentedLines = append(indentedLines, indented) + } + s.string = strings.Join(indentedLines, "\n") + return s +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/templater.go b/vendor/k8s.io/kubectl/pkg/util/templates/templater.go new file mode 100644 index 000000000..8fe181a05 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/templater.go @@ -0,0 +1,319 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "bytes" + "fmt" + "strings" + "text/template" + "unicode" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + + "k8s.io/kubectl/pkg/util/term" +) + +type FlagExposer interface { + ExposeFlags(cmd *cobra.Command, flags ...string) FlagExposer +} + +func ActsAsRootCommand(cmd *cobra.Command, filters []string, groups ...CommandGroup) FlagExposer { + if cmd == nil { + panic("nil root command") + } + templater := &templater{ + RootCmd: cmd, + UsageTemplate: MainUsageTemplate(), + HelpTemplate: MainHelpTemplate(), + CommandGroups: groups, + Filtered: filters, + } + cmd.SetFlagErrorFunc(templater.FlagErrorFunc()) + cmd.SilenceUsage = true + cmd.SetUsageFunc(templater.UsageFunc()) + cmd.SetHelpFunc(templater.HelpFunc()) + return templater +} + +func UseOptionsTemplates(cmd *cobra.Command) { + templater := &templater{ + UsageTemplate: OptionsUsageTemplate(), + HelpTemplate: OptionsHelpTemplate(), + } + cmd.SetUsageFunc(templater.UsageFunc()) + cmd.SetHelpFunc(templater.HelpFunc()) +} + +type templater struct { + UsageTemplate string + HelpTemplate string + RootCmd *cobra.Command + CommandGroups + Filtered []string +} + +func (templater *templater) FlagErrorFunc(exposedFlags ...string) func(*cobra.Command, error) error { + return func(c *cobra.Command, err error) error { + c.SilenceUsage = true + switch c.CalledAs() { + case "options": + return fmt.Errorf("%s\nRun '%s' without flags.", err, c.CommandPath()) + default: + return fmt.Errorf("%s\nSee '%s --help' for usage.", err, c.CommandPath()) + } + } +} + +func (templater *templater) ExposeFlags(cmd *cobra.Command, flags ...string) FlagExposer { + cmd.SetUsageFunc(templater.UsageFunc(flags...)) + return templater +} + +func (templater *templater) HelpFunc() func(*cobra.Command, []string) { + return func(c *cobra.Command, s []string) { + t := template.New("help") + t.Funcs(templater.templateFuncs()) + template.Must(t.Parse(templater.HelpTemplate)) + out := term.NewResponsiveWriter(c.OutOrStdout()) + err := t.Execute(out, c) + if err != nil { + c.Println(err) + } + } +} + +func (templater *templater) UsageFunc(exposedFlags ...string) func(*cobra.Command) error { + return func(c *cobra.Command) error { + t := template.New("usage") + t.Funcs(templater.templateFuncs(exposedFlags...)) + template.Must(t.Parse(templater.UsageTemplate)) + out := term.NewResponsiveWriter(c.OutOrStderr()) + return t.Execute(out, c) + } +} + +func (templater *templater) templateFuncs(exposedFlags ...string) template.FuncMap { + return template.FuncMap{ + "trim": strings.TrimSpace, + "trimRight": func(s string) string { return strings.TrimRightFunc(s, unicode.IsSpace) }, + "trimLeft": func(s string) string { return strings.TrimLeftFunc(s, unicode.IsSpace) }, + "gt": cobra.Gt, + "eq": cobra.Eq, + "rpad": rpad, + "appendIfNotPresent": appendIfNotPresent, + "flagsNotIntersected": flagsNotIntersected, + "visibleFlags": visibleFlags, + "flagsUsages": flagsUsages, + "cmdGroups": templater.cmdGroups, + "cmdGroupsString": templater.cmdGroupsString, + "rootCmd": templater.rootCmdName, + "isRootCmd": templater.isRootCmd, + "optionsCmdFor": templater.optionsCmdFor, + "usageLine": templater.usageLine, + "reverseParentsNames": templater.reverseParentsNames, + "exposed": func(c *cobra.Command) *flag.FlagSet { + exposed := flag.NewFlagSet("exposed", flag.ContinueOnError) + if len(exposedFlags) > 0 { + for _, name := range exposedFlags { + if flag := c.Flags().Lookup(name); flag != nil { + exposed.AddFlag(flag) + } + } + } + return exposed + }, + } +} + +func (templater *templater) cmdGroups(c *cobra.Command, all []*cobra.Command) []CommandGroup { + if len(templater.CommandGroups) > 0 && c == templater.RootCmd { + all = filter(all, templater.Filtered...) + return AddAdditionalCommands(templater.CommandGroups, "Other Commands:", all) + } + all = filter(all, "options") + return []CommandGroup{ + { + Message: "Available Commands:", + Commands: all, + }, + } +} + +func (t *templater) cmdGroupsString(c *cobra.Command) string { + groups := []string{} + for _, cmdGroup := range t.cmdGroups(c, c.Commands()) { + cmds := []string{cmdGroup.Message} + for _, cmd := range cmdGroup.Commands { + if cmd.IsAvailableCommand() { + cmds = append(cmds, " "+rpad(cmd.Name(), cmd.NamePadding())+" "+cmd.Short) + } + } + groups = append(groups, strings.Join(cmds, "\n")) + } + return strings.Join(groups, "\n\n") +} + +func (t *templater) rootCmdName(c *cobra.Command) string { + return t.rootCmd(c).CommandPath() +} + +func (t *templater) reverseParentsNames(c *cobra.Command) []string { + reverseParentsNames := []string{} + parents := t.parents(c) + for i := len(parents) - 1; i >= 0; i-- { + reverseParentsNames = append(reverseParentsNames, parents[i].Name()) + } + return reverseParentsNames +} + +func (t *templater) isRootCmd(c *cobra.Command) bool { + return t.rootCmd(c) == c +} + +func (t *templater) parents(c *cobra.Command) []*cobra.Command { + parents := []*cobra.Command{c} + for current := c; !t.isRootCmd(current) && current.HasParent(); { + current = current.Parent() + parents = append(parents, current) + } + return parents +} + +func (t *templater) rootCmd(c *cobra.Command) *cobra.Command { + if c != nil && !c.HasParent() { + return c + } + if t.RootCmd == nil { + panic("nil root cmd") + } + return t.RootCmd +} + +func (t *templater) optionsCmdFor(c *cobra.Command) string { + if !c.Runnable() { + return "" + } + rootCmdStructure := t.parents(c) + for i := len(rootCmdStructure) - 1; i >= 0; i-- { + cmd := rootCmdStructure[i] + if _, _, err := cmd.Find([]string{"options"}); err == nil { + return cmd.CommandPath() + " options" + } + } + return "" +} + +func (t *templater) usageLine(c *cobra.Command) string { + usage := c.UseLine() + suffix := "[options]" + if c.HasFlags() && !strings.Contains(usage, suffix) { + usage += " " + suffix + } + return usage +} + +// flagsUsages will print out the kubectl help flags +func flagsUsages(f *flag.FlagSet) (string, error) { + flagBuf := new(bytes.Buffer) + wrapLimit, err := term.GetWordWrapperLimit() + if err != nil { + wrapLimit = 0 + } + printer := NewHelpFlagPrinter(flagBuf, wrapLimit) + + f.VisitAll(func(flag *flag.Flag) { + if flag.Hidden { + return + } + printer.PrintHelpFlag(flag) + }) + + return flagBuf.String(), nil +} + +// getFlagFormat will output the flag format +func getFlagFormat(f *flag.Flag) string { + var format string + format = "--%s=%s:\n%s%s" + if f.Value.Type() == "string" { + format = "--%s='%s':\n%s%s" + } + + if len(f.Shorthand) > 0 { + format = " -%s, " + format + } else { + format = " %s" + format + } + + return format +} + +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +func flagsNotIntersected(l *flag.FlagSet, r *flag.FlagSet) *flag.FlagSet { + f := flag.NewFlagSet("notIntersected", flag.ContinueOnError) + l.VisitAll(func(flag *flag.Flag) { + if r.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) + return f +} + +func visibleFlags(l *flag.FlagSet) *flag.FlagSet { + hidden := "help" + f := flag.NewFlagSet("visible", flag.ContinueOnError) + l.VisitAll(func(flag *flag.Flag) { + if flag.Name != hidden { + f.AddFlag(flag) + } + }) + return f +} + +func filter(cmds []*cobra.Command, names ...string) []*cobra.Command { + out := []*cobra.Command{} + for _, c := range cmds { + if c.Hidden { + continue + } + skip := false + for _, name := range names { + if name == c.Name() { + skip = true + break + } + } + if skip { + continue + } + out = append(out, c) + } + return out +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/templates.go b/vendor/k8s.io/kubectl/pkg/util/templates/templates.go new file mode 100644 index 000000000..454695c0b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/templates.go @@ -0,0 +1,104 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "strings" + "unicode" +) + +const ( + // SectionVars is the help template section that declares variables to be used in the template. + SectionVars = `{{$isRootCmd := isRootCmd .}}` + + `{{$rootCmd := rootCmd .}}` + + `{{$visibleFlags := visibleFlags (flagsNotIntersected .LocalFlags .PersistentFlags)}}` + + `{{$explicitlyExposedFlags := exposed .}}` + + `{{$optionsCmdFor := optionsCmdFor .}}` + + `{{$usageLine := usageLine .}}` + + `{{$reverseParentsNames := reverseParentsNames .}}` + + // SectionAliases is the help template section that displays command aliases. + SectionAliases = `{{if gt .Aliases 0}}Aliases: +{{.NameAndAliases}} + +{{end}}` + + // SectionExamples is the help template section that displays command examples. + SectionExamples = `{{if .HasExample}}Examples: +{{trimRight .Example}} + +{{end}}` + + // SectionSubcommands is the help template section that displays the command's subcommands. + SectionSubcommands = `{{if .HasAvailableSubCommands}}{{cmdGroupsString .}} + +{{end}}` + + // SectionFlags is the help template section that displays the command's flags. + SectionFlags = `{{ if or $visibleFlags.HasFlags $explicitlyExposedFlags.HasFlags}}Options: +{{ if $visibleFlags.HasFlags}}{{trimRight (flagsUsages $visibleFlags)}}{{end}}{{ if $explicitlyExposedFlags.HasFlags}}{{ if $visibleFlags.HasFlags}} +{{end}}{{trimRight (flagsUsages $explicitlyExposedFlags)}}{{end}} + +{{end}}` + + // SectionUsage is the help template section that displays the command's usage. + SectionUsage = `{{if and .Runnable (ne .UseLine "") (ne .UseLine $rootCmd)}}Usage: + {{$usageLine}} + +{{end}}` + + // SectionTipsHelp is the help template section that displays the '--help' hint. + SectionTipsHelp = `{{if .HasSubCommands}}Use "{{range $reverseParentsNames}}{{.}} {{end}} --help" for more information about a given command. +{{end}}` + + // SectionTipsGlobalOptions is the help template section that displays the 'options' hint for displaying global flags. + SectionTipsGlobalOptions = `{{if $optionsCmdFor}}Use "{{$optionsCmdFor}}" for a list of global command-line options (applies to all commands). +{{end}}` +) + +// MainHelpTemplate if the template for 'help' used by most commands. +func MainHelpTemplate() string { + return `{{with or .Long .Short }}{{. | trim}}{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// MainUsageTemplate if the template for 'usage' used by most commands. +func MainUsageTemplate() string { + sections := []string{ + "\n\n", + SectionVars, + SectionAliases, + SectionExamples, + SectionSubcommands, + SectionFlags, + SectionUsage, + SectionTipsHelp, + SectionTipsGlobalOptions, + } + return strings.TrimRightFunc(strings.Join(sections, ""), unicode.IsSpace) +} + +// OptionsHelpTemplate if the template for 'help' used by the 'options' command. +func OptionsHelpTemplate() string { + return "" +} + +// OptionsUsageTemplate if the template for 'usage' used by the 'options' command. +func OptionsUsageTemplate() string { + return `{{ if .HasInheritedFlags}}The following options can be passed to any command: + +{{flagsUsages .InheritedFlags}}{{end}}` +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resize.go b/vendor/k8s.io/kubectl/pkg/util/term/resize.go new file mode 100644 index 000000000..636b8bef4 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/resize.go @@ -0,0 +1,132 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "fmt" + + "github.com/moby/term" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// GetSize returns the current size of the user's terminal. If it isn't a terminal, +// nil is returned. +func (t TTY) GetSize() *remotecommand.TerminalSize { + outFd, isTerminal := term.GetFdInfo(t.Out) + if !isTerminal { + return nil + } + return GetSize(outFd) +} + +// GetSize returns the current size of the terminal associated with fd. +func GetSize(fd uintptr) *remotecommand.TerminalSize { + winsize, err := term.GetWinsize(fd) + if err != nil { + runtime.HandleError(fmt.Errorf("unable to get terminal size: %v", err)) + return nil + } + + return &remotecommand.TerminalSize{Width: winsize.Width, Height: winsize.Height} +} + +// MonitorSize monitors the terminal's size. It returns a TerminalSizeQueue primed with +// initialSizes, or nil if there's no TTY present. +func (t *TTY) MonitorSize(initialSizes ...*remotecommand.TerminalSize) remotecommand.TerminalSizeQueue { + outFd, isTerminal := term.GetFdInfo(t.Out) + if !isTerminal { + return nil + } + + t.sizeQueue = &sizeQueue{ + t: *t, + // make it buffered so we can send the initial terminal sizes without blocking, prior to starting + // the streaming below + resizeChan: make(chan remotecommand.TerminalSize, len(initialSizes)), + stopResizing: make(chan struct{}), + } + + t.sizeQueue.monitorSize(outFd, initialSizes...) + + return t.sizeQueue +} + +// sizeQueue implements remotecommand.TerminalSizeQueue +type sizeQueue struct { + t TTY + // resizeChan receives a Size each time the user's terminal is resized. + resizeChan chan remotecommand.TerminalSize + stopResizing chan struct{} +} + +// make sure sizeQueue implements the resize.TerminalSizeQueue interface +var _ remotecommand.TerminalSizeQueue = &sizeQueue{} + +// monitorSize primes resizeChan with initialSizes and then monitors for resize events. With each +// new event, it sends the current terminal size to resizeChan. +func (s *sizeQueue) monitorSize(outFd uintptr, initialSizes ...*remotecommand.TerminalSize) { + // send the initial sizes + for i := range initialSizes { + if initialSizes[i] != nil { + s.resizeChan <- *initialSizes[i] + } + } + + resizeEvents := make(chan remotecommand.TerminalSize, 1) + + monitorResizeEvents(outFd, resizeEvents, s.stopResizing) + + // listen for resize events in the background + go func() { + defer runtime.HandleCrash() + + for { + select { + case size, ok := <-resizeEvents: + if !ok { + return + } + + select { + // try to send the size to resizeChan, but don't block + case s.resizeChan <- size: + // send successful + default: + // unable to send / no-op + } + case <-s.stopResizing: + return + } + } + }() +} + +// Next returns the new terminal size after the terminal has been resized. It returns nil when +// monitoring has been stopped. +func (s *sizeQueue) Next() *remotecommand.TerminalSize { + size, ok := <-s.resizeChan + if !ok { + return nil + } + return &size +} + +// stop stops the background goroutine that is monitoring for terminal resizes. +func (s *sizeQueue) stop() { + close(s.stopResizing) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go new file mode 100644 index 000000000..e361b1adb --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go @@ -0,0 +1,62 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "os" + "os/signal" + + "golang.org/x/sys/unix" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// monitorResizeEvents spawns a goroutine that waits for SIGWINCH signals (these indicate the +// terminal has resized). After receiving a SIGWINCH, this gets the terminal size and tries to send +// it to the resizeEvents channel. The goroutine stops when the stop channel is closed. +func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) { + go func() { + defer runtime.HandleCrash() + + winch := make(chan os.Signal, 1) + signal.Notify(winch, unix.SIGWINCH) + defer signal.Stop(winch) + + for { + select { + case <-winch: + size := GetSize(fd) + if size == nil { + return + } + + // try to send size + select { + case resizeEvents <- *size: + // success + default: + // not sent + } + case <-stop: + return + } + } + }() +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go new file mode 100644 index 000000000..adccf8734 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go @@ -0,0 +1,62 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// monitorResizeEvents spawns a goroutine that periodically gets the terminal size and tries to send +// it to the resizeEvents channel if the size has changed. The goroutine stops when the stop channel +// is closed. +func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) { + go func() { + defer runtime.HandleCrash() + + size := GetSize(fd) + if size == nil { + return + } + lastSize := *size + + for { + // see if we need to stop running + select { + case <-stop: + return + default: + } + + size := GetSize(fd) + if size == nil { + return + } + + if size.Height != lastSize.Height || size.Width != lastSize.Width { + lastSize.Height = size.Height + lastSize.Width = size.Width + resizeEvents <- *size + } + + // sleep to avoid hot looping + time.Sleep(250 * time.Millisecond) + } + }() +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/term.go b/vendor/k8s.io/kubectl/pkg/util/term/term.go new file mode 100644 index 000000000..93a992fe3 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/term.go @@ -0,0 +1,115 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "io" + "os" + + "k8s.io/cli-runtime/pkg/printers" + + "github.com/moby/term" + + "k8s.io/kubectl/pkg/util/interrupt" +) + +// SafeFunc is a function to be invoked by TTY. +type SafeFunc func() error + +// TTY helps invoke a function and preserve the state of the terminal, even if the process is +// terminated during execution. It also provides support for terminal resizing for remote command +// execution/attachment. +type TTY struct { + // In is a reader representing stdin. It is a required field. + In io.Reader + // Out is a writer representing stdout. It must be set to support terminal resizing. It is an + // optional field. + Out io.Writer + // Raw is true if the terminal should be set raw. + Raw bool + // TryDev indicates the TTY should try to open /dev/tty if the provided input + // is not a file descriptor. + TryDev bool + // Parent is an optional interrupt handler provided to this function - if provided + // it will be invoked after the terminal state is restored. If it is not provided, + // a signal received during the TTY will result in os.Exit(0) being invoked. + Parent *interrupt.Handler + + // sizeQueue is set after a call to MonitorSize() and is used to monitor SIGWINCH signals when the + // user's terminal resizes. + sizeQueue *sizeQueue +} + +// IsTerminalIn returns true if t.In is a terminal. Does not check /dev/tty +// even if TryDev is set. +func (t TTY) IsTerminalIn() bool { + return printers.IsTerminal(t.In) +} + +// IsTerminalOut returns true if t.Out is a terminal. Does not check /dev/tty +// even if TryDev is set. +func (t TTY) IsTerminalOut() bool { + return printers.IsTerminal(t.Out) +} + +// IsTerminal returns whether the passed object is a terminal or not. +// Deprecated: use printers.IsTerminal instead. +var IsTerminal = printers.IsTerminal + +// AllowsColorOutput returns true if the specified writer is a terminal and +// the process environment indicates color output is supported and desired. +// Deprecated: use printers.AllowsColorOutput instead. +var AllowsColorOutput = printers.AllowsColorOutput + +// Safe invokes the provided function and will attempt to ensure that when the +// function returns (or a termination signal is sent) that the terminal state +// is reset to the condition it was in prior to the function being invoked. If +// t.Raw is true the terminal will be put into raw mode prior to calling the function. +// If the input file descriptor is not a TTY and TryDev is true, the /dev/tty file +// will be opened (if available). +func (t TTY) Safe(fn SafeFunc) error { + inFd, isTerminal := term.GetFdInfo(t.In) + + if !isTerminal && t.TryDev { + if f, err := os.Open("/dev/tty"); err == nil { + defer f.Close() + inFd = f.Fd() + isTerminal = term.IsTerminal(inFd) + } + } + if !isTerminal { + return fn() + } + + var state *term.State + var err error + if t.Raw { + state, err = term.MakeRaw(inFd) + } else { + state, err = term.SaveState(inFd) + } + if err != nil { + return err + } + return interrupt.Chain(t.Parent, func() { + if t.sizeQueue != nil { + t.sizeQueue.stop() + } + + term.RestoreTerminal(inFd, state) + }).Run(fn) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go b/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go new file mode 100644 index 000000000..e3f600880 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go @@ -0,0 +1,146 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "errors" + "io" + "os" + + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/moby/term" + + "k8s.io/client-go/tools/remotecommand" +) + +type wordWrapWriter struct { + limit uint + writer io.Writer +} + +// NewResponsiveWriter creates a Writer that detects the column width of the +// terminal we are in, and adjusts every line width to fit and use recommended +// terminal sizes for better readability. Does proper word wrapping automatically. +// +// if terminal width >= 120 columns use 120 columns +// if terminal width >= 100 columns use 100 columns +// if terminal width >= 80 columns use 80 columns +// +// In case we're not in a terminal or if it's smaller than 80 columns width, +// doesn't do any wrapping. +func NewResponsiveWriter(w io.Writer) io.Writer { + file, ok := w.(*os.File) + if !ok { + return w + } + fd := file.Fd() + if !term.IsTerminal(fd) { + return w + } + + terminalSize := GetSize(fd) + if terminalSize == nil { + return w + } + limit := getTerminalLimitWidth(terminalSize) + + return NewWordWrapWriter(w, limit) +} + +// NewWordWrapWriter is a Writer that supports a limit of characters on every line +// and does auto word wrapping that respects that limit. +func NewWordWrapWriter(w io.Writer, limit uint) io.Writer { + return &wordWrapWriter{ + limit: limit, + writer: w, + } +} + +func getTerminalLimitWidth(terminalSize *remotecommand.TerminalSize) uint { + var limit uint + switch { + case terminalSize.Width >= 120: + limit = 120 + case terminalSize.Width >= 100: + limit = 100 + case terminalSize.Width >= 80: + limit = 80 + } + return limit +} + +func GetWordWrapperLimit() (uint, error) { + stdout := os.Stdout + fd := stdout.Fd() + if !term.IsTerminal(fd) { + return 0, errors.New("file descriptor is not a terminal") + } + terminalSize := GetSize(fd) + if terminalSize == nil { + return 0, errors.New("terminal size is nil") + } + return getTerminalLimitWidth(terminalSize), nil +} + +func (w wordWrapWriter) Write(p []byte) (nn int, err error) { + if w.limit == 0 { + return w.writer.Write(p) + } + original := string(p) + wrapped := wordwrap.WrapString(original, w.limit) + return w.writer.Write([]byte(wrapped)) +} + +// NewPunchCardWriter is a NewWordWrapWriter that limits the line width to 80 columns. +func NewPunchCardWriter(w io.Writer) io.Writer { + return NewWordWrapWriter(w, 80) +} + +type maxWidthWriter struct { + maxWidth uint + currentWidth uint + written uint + writer io.Writer +} + +// NewMaxWidthWriter is a Writer that supports a limit of characters on every +// line, but doesn't do any word wrapping automatically. +func NewMaxWidthWriter(w io.Writer, maxWidth uint) io.Writer { + return &maxWidthWriter{ + maxWidth: maxWidth, + writer: w, + } +} + +func (m maxWidthWriter) Write(p []byte) (nn int, err error) { + for _, b := range p { + if m.currentWidth == m.maxWidth { + m.writer.Write([]byte{'\n'}) + m.currentWidth = 0 + } + if b == '\n' { + m.currentWidth = 0 + } + _, err := m.writer.Write([]byte{b}) + if err != nil { + return int(m.written), err + } + m.written++ + m.currentWidth++ + } + return len(p), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/validation/schema.go b/vendor/k8s.io/kubectl/pkg/validation/schema.go new file mode 100644 index 000000000..fb8841b9c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/validation/schema.go @@ -0,0 +1,153 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "bytes" + "encoding/json" + "fmt" + + ejson "github.com/exponent-io/jsonpath" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/klog/v2" +) + +// Schema is an interface that knows how to validate an API object serialized to a byte array. +type Schema interface { + ValidateBytes(data []byte) error +} + +// NullSchema always validates bytes. +type NullSchema struct{} + +// ValidateBytes never fails for NullSchema. +func (NullSchema) ValidateBytes(data []byte) error { return nil } + +// NoDoubleKeySchema is a schema that disallows double keys. +type NoDoubleKeySchema struct{} + +// ValidateBytes validates bytes. +func (NoDoubleKeySchema) ValidateBytes(data []byte) error { + var list []error + if err := validateNoDuplicateKeys(data, "metadata", "labels"); err != nil { + list = append(list, err) + } + if err := validateNoDuplicateKeys(data, "metadata", "annotations"); err != nil { + list = append(list, err) + } + return utilerrors.NewAggregate(list) +} + +func validateNoDuplicateKeys(data []byte, path ...string) error { + r := ejson.NewDecoder(bytes.NewReader(data)) + // This is Go being unfriendly. The 'path ...string' comes in as a + // []string, and SeekTo takes ...interface{}, so we can't just pass + // the path straight in, we have to copy it. *sigh* + ifacePath := []interface{}{} + for ix := range path { + ifacePath = append(ifacePath, path[ix]) + } + found, err := r.SeekTo(ifacePath...) + if err != nil { + return err + } + if !found { + return nil + } + seen := map[string]bool{} + for { + tok, err := r.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case json.Delim: + if t.String() == "}" { + return nil + } + case ejson.KeyString: + if seen[string(t)] { + return fmt.Errorf("duplicate key: %s", string(t)) + } + seen[string(t)] = true + } + } +} + +// ConjunctiveSchema encapsulates a schema list. +type ConjunctiveSchema []Schema + +// ValidateBytes validates bytes per a ConjunctiveSchema. +func (c ConjunctiveSchema) ValidateBytes(data []byte) error { + var list []error + schemas := []Schema(c) + for ix := range schemas { + if err := schemas[ix].ValidateBytes(data); err != nil { + list = append(list, err) + } + } + return utilerrors.NewAggregate(list) +} + +func NewParamVerifyingSchema(s Schema, verifier resource.Verifier, directive string) Schema { + return ¶mVerifyingSchema{ + schema: s, + verifier: verifier, + directive: directive, + } +} + +// paramVerifyingSchema only performs validation +// based on the fieldValidation query param +// being unsupported by the apiserver, because +// server-side validation will be performed instead +// of client-side validation. +type paramVerifyingSchema struct { + schema Schema + verifier resource.Verifier + directive string +} + +// ValidateBytes validates bytes per a ParamVerifyingSchema +func (c *paramVerifyingSchema) ValidateBytes(data []byte) error { + obj, err := parse(data) + if err != nil { + return err + } + + gvk, errs := getObjectKind(obj) + if errs != nil { + return utilerrors.NewAggregate(errs) + } + + err = c.verifier.HasSupport(gvk) + if resource.IsParamUnsupportedError(err) { + switch c.directive { + case metav1.FieldValidationStrict: + return c.schema.ValidateBytes(data) + case metav1.FieldValidationWarn: + klog.Warningf("cannot perform warn validation if server-side field validation is unsupported, skipping validation") + default: + // can't be reached + klog.Warningf("unexpected field validation directive: %s, skipping validation", c.directive) + } + return nil + } + return err +} diff --git a/vendor/k8s.io/kubectl/pkg/validation/validation.go b/vendor/k8s.io/kubectl/pkg/validation/validation.go new file mode 100644 index 000000000..47c74e5b8 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/validation/validation.go @@ -0,0 +1,144 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "errors" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/kube-openapi/pkg/util/proto/validation" + "k8s.io/kubectl/pkg/util/openapi" +) + +// schemaValidation validates the object against an OpenAPI schema. +type schemaValidation struct { + resourcesGetter openapi.OpenAPIResourcesGetter +} + +// NewSchemaValidation creates a new Schema that can be used +// to validate objects. +func NewSchemaValidation(resourcesGetter openapi.OpenAPIResourcesGetter) Schema { + return &schemaValidation{ + resourcesGetter: resourcesGetter, + } +} + +// ValidateBytes will validates the object against using the Resources +// object. +func (v *schemaValidation) ValidateBytes(data []byte) error { + obj, err := parse(data) + if err != nil { + return err + } + + gvk, errs := getObjectKind(obj) + if errs != nil { + return utilerrors.NewAggregate(errs) + } + + if (gvk == schema.GroupVersionKind{Version: "v1", Kind: "List"}) { + return utilerrors.NewAggregate(v.validateList(obj)) + } + return utilerrors.NewAggregate(v.validateResource(obj, gvk)) +} + +func (v *schemaValidation) validateList(object interface{}) []error { + fields, ok := object.(map[string]interface{}) + if !ok || fields == nil { + return []error{errors.New("invalid object to validate")} + } + + allErrors := []error{} + if _, ok := fields["items"].([]interface{}); !ok { + return []error{errors.New("invalid object to validate")} + } + for _, item := range fields["items"].([]interface{}) { + if gvk, errs := getObjectKind(item); errs != nil { + allErrors = append(allErrors, errs...) + } else { + allErrors = append(allErrors, v.validateResource(item, gvk)...) + } + } + return allErrors +} + +func (v *schemaValidation) validateResource(obj interface{}, gvk schema.GroupVersionKind) []error { + // This lazy-loads the OpenAPI V2 specifications, caching the specs. + resources, err := v.resourcesGetter.OpenAPISchema() + if err != nil { + return []error{err} + } + resource := resources.LookupResource(gvk) + if resource == nil { + // resource is not present, let's just skip validation. + return nil + } + + return validation.ValidateModel(obj, resource, gvk.Kind) +} + +func parse(data []byte) (interface{}, error) { + var obj interface{} + out, err := yaml.ToJSON(data) + if err != nil { + return nil, err + } + if err := json.Unmarshal(out, &obj); err != nil { + return nil, err + } + return obj, nil +} + +func getObjectKind(object interface{}) (schema.GroupVersionKind, []error) { + var listErrors []error + fields, ok := object.(map[string]interface{}) + if !ok || fields == nil { + listErrors = append(listErrors, errors.New("invalid object to validate")) + return schema.GroupVersionKind{}, listErrors + } + + var group string + var version string + apiVersion := fields["apiVersion"] + if apiVersion == nil { + listErrors = append(listErrors, errors.New("apiVersion not set")) + } else if _, ok := apiVersion.(string); !ok { + listErrors = append(listErrors, errors.New("apiVersion isn't string type")) + } else { + gv, err := schema.ParseGroupVersion(apiVersion.(string)) + if err != nil { + listErrors = append(listErrors, err) + } else { + group = gv.Group + version = gv.Version + } + } + kind := fields["kind"] + if kind == nil { + listErrors = append(listErrors, errors.New("kind not set")) + } else if _, ok := kind.(string); !ok { + listErrors = append(listErrors, errors.New("kind isn't string type")) + } + if listErrors != nil { + return schema.GroupVersionKind{}, listErrors + } + + return schema.GroupVersionKind{Group: group, Version: version, Kind: kind.(string)}, nil +} diff --git a/vendor/k8s.io/utils/buffer/ring_growing.go b/vendor/k8s.io/utils/buffer/ring_growing.go new file mode 100644 index 000000000..86965a513 --- /dev/null +++ b/vendor/k8s.io/utils/buffer/ring_growing.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +// RingGrowing is a growing ring buffer. +// Not thread safe. +type RingGrowing struct { + data []interface{} + n int // Size of Data + beg int // First available element + readable int // Number of data items available +} + +// NewRingGrowing constructs a new RingGrowing instance with provided parameters. +func NewRingGrowing(initialSize int) *RingGrowing { + return &RingGrowing{ + data: make([]interface{}, initialSize), + n: initialSize, + } +} + +// ReadOne reads (consumes) first item from the buffer if it is available, otherwise returns false. +func (r *RingGrowing) ReadOne() (data interface{}, ok bool) { + if r.readable == 0 { + return nil, false + } + r.readable-- + element := r.data[r.beg] + r.data[r.beg] = nil // Remove reference to the object to help GC + if r.beg == r.n-1 { + // Was the last element + r.beg = 0 + } else { + r.beg++ + } + return element, true +} + +// WriteOne adds an item to the end of the buffer, growing it if it is full. +func (r *RingGrowing) WriteOne(data interface{}) { + if r.readable == r.n { + // Time to grow + newN := r.n * 2 + newData := make([]interface{}, newN) + to := r.beg + r.readable + if to <= r.n { + copy(newData, r.data[r.beg:to]) + } else { + copied := copy(newData, r.data[r.beg:]) + copy(newData[copied:], r.data[:(to%r.n)]) + } + r.beg = 0 + r.data = newData + r.n = newN + } + r.data[(r.readable+r.beg)%r.n] = data + r.readable++ +} diff --git a/vendor/k8s.io/utils/exec/README.md b/vendor/k8s.io/utils/exec/README.md new file mode 100644 index 000000000..7944e8dd3 --- /dev/null +++ b/vendor/k8s.io/utils/exec/README.md @@ -0,0 +1,5 @@ +# Exec + +This package provides an interface for `os/exec`. It makes it easier to mock +and replace in tests, especially with the [FakeExec](testing/fake_exec.go) +struct. diff --git a/vendor/k8s.io/utils/exec/doc.go b/vendor/k8s.io/utils/exec/doc.go new file mode 100644 index 000000000..cbb44bdb5 --- /dev/null +++ b/vendor/k8s.io/utils/exec/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec provides an injectable interface and implementations for running commands. +package exec // import "k8s.io/utils/exec" diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go new file mode 100644 index 000000000..d9c91e3ca --- /dev/null +++ b/vendor/k8s.io/utils/exec/exec.go @@ -0,0 +1,256 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "context" + "io" + "io/fs" + osexec "os/exec" + "syscall" + "time" +) + +// ErrExecutableNotFound is returned if the executable is not found. +var ErrExecutableNotFound = osexec.ErrNotFound + +// Interface is an interface that presents a subset of the os/exec API. Use this +// when you want to inject fakeable/mockable exec behavior. +type Interface interface { + // Command returns a Cmd instance which can be used to run a single command. + // This follows the pattern of package os/exec. + Command(cmd string, args ...string) Cmd + + // CommandContext returns a Cmd instance which can be used to run a single command. + // + // The provided context is used to kill the process if the context becomes done + // before the command completes on its own. For example, a timeout can be set in + // the context. + CommandContext(ctx context.Context, cmd string, args ...string) Cmd + + // LookPath wraps os/exec.LookPath + LookPath(file string) (string, error) +} + +// Cmd is an interface that presents an API that is very similar to Cmd from os/exec. +// As more functionality is needed, this can grow. Since Cmd is a struct, we will have +// to replace fields with get/set method pairs. +type Cmd interface { + // Run runs the command to the completion. + Run() error + // CombinedOutput runs the command and returns its combined standard output + // and standard error. This follows the pattern of package os/exec. + CombinedOutput() ([]byte, error) + // Output runs the command and returns standard output, but not standard err + Output() ([]byte, error) + SetDir(dir string) + SetStdin(in io.Reader) + SetStdout(out io.Writer) + SetStderr(out io.Writer) + SetEnv(env []string) + + // StdoutPipe and StderrPipe for getting the process' Stdout and Stderr as + // Readers + StdoutPipe() (io.ReadCloser, error) + StderrPipe() (io.ReadCloser, error) + + // Start and Wait are for running a process non-blocking + Start() error + Wait() error + + // Stops the command by sending SIGTERM. It is not guaranteed the + // process will stop before this function returns. If the process is not + // responding, an internal timer function will send a SIGKILL to force + // terminate after 10 seconds. + Stop() +} + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// Implements Interface in terms of really exec()ing. +type executor struct{} + +// New returns a new Interface which will os/exec to run commands. +func New() Interface { + return &executor{} +} + +// Command is part of the Interface interface. +func (executor *executor) Command(cmd string, args ...string) Cmd { + return (*cmdWrapper)(maskErrDotCmd(osexec.Command(cmd, args...))) +} + +// CommandContext is part of the Interface interface. +func (executor *executor) CommandContext(ctx context.Context, cmd string, args ...string) Cmd { + return (*cmdWrapper)(maskErrDotCmd(osexec.CommandContext(ctx, cmd, args...))) +} + +// LookPath is part of the Interface interface +func (executor *executor) LookPath(file string) (string, error) { + path, err := osexec.LookPath(file) + return path, handleError(maskErrDot(err)) +} + +// Wraps exec.Cmd so we can capture errors. +type cmdWrapper osexec.Cmd + +var _ Cmd = &cmdWrapper{} + +func (cmd *cmdWrapper) SetDir(dir string) { + cmd.Dir = dir +} + +func (cmd *cmdWrapper) SetStdin(in io.Reader) { + cmd.Stdin = in +} + +func (cmd *cmdWrapper) SetStdout(out io.Writer) { + cmd.Stdout = out +} + +func (cmd *cmdWrapper) SetStderr(out io.Writer) { + cmd.Stderr = out +} + +func (cmd *cmdWrapper) SetEnv(env []string) { + cmd.Env = env +} + +func (cmd *cmdWrapper) StdoutPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StdoutPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) StderrPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StderrPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) Start() error { + err := (*osexec.Cmd)(cmd).Start() + return handleError(err) +} + +func (cmd *cmdWrapper) Wait() error { + err := (*osexec.Cmd)(cmd).Wait() + return handleError(err) +} + +// Run is part of the Cmd interface. +func (cmd *cmdWrapper) Run() error { + err := (*osexec.Cmd)(cmd).Run() + return handleError(err) +} + +// CombinedOutput is part of the Cmd interface. +func (cmd *cmdWrapper) CombinedOutput() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).CombinedOutput() + return out, handleError(err) +} + +func (cmd *cmdWrapper) Output() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).Output() + return out, handleError(err) +} + +// Stop is part of the Cmd interface. +func (cmd *cmdWrapper) Stop() { + c := (*osexec.Cmd)(cmd) + + if c.Process == nil { + return + } + + c.Process.Signal(syscall.SIGTERM) + + time.AfterFunc(10*time.Second, func() { + if !c.ProcessState.Exited() { + c.Process.Signal(syscall.SIGKILL) + } + }) +} + +func handleError(err error) error { + if err == nil { + return nil + } + + switch e := err.(type) { + case *osexec.ExitError: + return &ExitErrorWrapper{e} + case *fs.PathError: + return ErrExecutableNotFound + case *osexec.Error: + if e.Err == osexec.ErrNotFound { + return ErrExecutableNotFound + } + } + + return err +} + +// ExitErrorWrapper is an implementation of ExitError in terms of os/exec ExitError. +// Note: standard exec.ExitError is type *os.ProcessState, which already implements Exited(). +type ExitErrorWrapper struct { + *osexec.ExitError +} + +var _ ExitError = &ExitErrorWrapper{} + +// ExitStatus is part of the ExitError interface. +func (eew ExitErrorWrapper) ExitStatus() int { + ws, ok := eew.Sys().(syscall.WaitStatus) + if !ok { + panic("can't call ExitStatus() on a non-WaitStatus exitErrorWrapper") + } + return ws.ExitStatus() +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +// Exited is to check if the process has finished +func (e CodeExitError) Exited() bool { + return true +} + +// ExitStatus is for checking the error code +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/utils/exec/fixup_go118.go b/vendor/k8s.io/utils/exec/fixup_go118.go new file mode 100644 index 000000000..acf45f1cd --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go118.go @@ -0,0 +1,32 @@ +//go:build !go1.19 +// +build !go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + osexec "os/exec" +) + +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + return cmd +} + +func maskErrDot(err error) error { + return err +} diff --git a/vendor/k8s.io/utils/exec/fixup_go119.go b/vendor/k8s.io/utils/exec/fixup_go119.go new file mode 100644 index 000000000..55874c929 --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go119.go @@ -0,0 +1,40 @@ +//go:build go1.19 +// +build go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "errors" + osexec "os/exec" +) + +// maskErrDotCmd reverts the behavior of osexec.Cmd to what it was before go1.19 +// specifically set the Err field to nil (LookPath returns a new error when the file +// is resolved to the current directory. +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + cmd.Err = maskErrDot(cmd.Err) + return cmd +} + +func maskErrDot(err error) error { + if err != nil && errors.Is(err, osexec.ErrDot) { + return nil + } + return err +} diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 000000000..0d6392752 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/pointer/README.md b/vendor/k8s.io/utils/pointer/README.md new file mode 100644 index 000000000..2ca8073dc --- /dev/null +++ b/vendor/k8s.io/utils/pointer/README.md @@ -0,0 +1,3 @@ +# Pointer + +This package provides some functions for pointer-based operations. diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go new file mode 100644 index 000000000..b673a6425 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -0,0 +1,249 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Deprecated: Use functions in k8s.io/utils/ptr instead: ptr.To to obtain +// a pointer, ptr.Deref to dereference a pointer, ptr.Equal to compare +// dereferenced pointers. +package pointer + +import ( + "time" + + "k8s.io/utils/ptr" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +// +// Deprecated: Use ptr.AllPtrFieldsNil instead. +var AllPtrFieldsNil = ptr.AllPtrFieldsNil + +// Int returns a pointer to an int. +var Int = ptr.To[int] + +// IntPtr is a function variable referring to Int. +// +// Deprecated: Use ptr.To instead. +var IntPtr = Int // for back-compat + +// IntDeref dereferences the int ptr and returns it if not nil, or else +// returns def. +var IntDeref = ptr.Deref[int] + +// IntPtrDerefOr is a function variable referring to IntDeref. +// +// Deprecated: Use ptr.Deref instead. +var IntPtrDerefOr = IntDeref // for back-compat + +// Int32 returns a pointer to an int32. +var Int32 = ptr.To[int32] + +// Int32Ptr is a function variable referring to Int32. +// +// Deprecated: Use ptr.To instead. +var Int32Ptr = Int32 // for back-compat + +// Int32Deref dereferences the int32 ptr and returns it if not nil, or else +// returns def. +var Int32Deref = ptr.Deref[int32] + +// Int32PtrDerefOr is a function variable referring to Int32Deref. +// +// Deprecated: Use ptr.Deref instead. +var Int32PtrDerefOr = Int32Deref // for back-compat + +// Int32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Int32Equal = ptr.Equal[int32] + +// Uint returns a pointer to an uint +var Uint = ptr.To[uint] + +// UintPtr is a function variable referring to Uint. +// +// Deprecated: Use ptr.To instead. +var UintPtr = Uint // for back-compat + +// UintDeref dereferences the uint ptr and returns it if not nil, or else +// returns def. +var UintDeref = ptr.Deref[uint] + +// UintPtrDerefOr is a function variable referring to UintDeref. +// +// Deprecated: Use ptr.Deref instead. +var UintPtrDerefOr = UintDeref // for back-compat + +// Uint32 returns a pointer to an uint32. +var Uint32 = ptr.To[uint32] + +// Uint32Ptr is a function variable referring to Uint32. +// +// Deprecated: Use ptr.To instead. +var Uint32Ptr = Uint32 // for back-compat + +// Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else +// returns def. +var Uint32Deref = ptr.Deref[uint32] + +// Uint32PtrDerefOr is a function variable referring to Uint32Deref. +// +// Deprecated: Use ptr.Deref instead. +var Uint32PtrDerefOr = Uint32Deref // for back-compat + +// Uint32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Uint32Equal = ptr.Equal[uint32] + +// Int64 returns a pointer to an int64. +var Int64 = ptr.To[int64] + +// Int64Ptr is a function variable referring to Int64. +// +// Deprecated: Use ptr.To instead. +var Int64Ptr = Int64 // for back-compat + +// Int64Deref dereferences the int64 ptr and returns it if not nil, or else +// returns def. +var Int64Deref = ptr.Deref[int64] + +// Int64PtrDerefOr is a function variable referring to Int64Deref. +// +// Deprecated: Use ptr.Deref instead. +var Int64PtrDerefOr = Int64Deref // for back-compat + +// Int64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Int64Equal = ptr.Equal[int64] + +// Uint64 returns a pointer to an uint64. +var Uint64 = ptr.To[uint64] + +// Uint64Ptr is a function variable referring to Uint64. +// +// Deprecated: Use ptr.To instead. +var Uint64Ptr = Uint64 // for back-compat + +// Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else +// returns def. +var Uint64Deref = ptr.Deref[uint64] + +// Uint64PtrDerefOr is a function variable referring to Uint64Deref. +// +// Deprecated: Use ptr.Deref instead. +var Uint64PtrDerefOr = Uint64Deref // for back-compat + +// Uint64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Uint64Equal = ptr.Equal[uint64] + +// Bool returns a pointer to a bool. +var Bool = ptr.To[bool] + +// BoolPtr is a function variable referring to Bool. +// +// Deprecated: Use ptr.To instead. +var BoolPtr = Bool // for back-compat + +// BoolDeref dereferences the bool ptr and returns it if not nil, or else +// returns def. +var BoolDeref = ptr.Deref[bool] + +// BoolPtrDerefOr is a function variable referring to BoolDeref. +// +// Deprecated: Use ptr.Deref instead. +var BoolPtrDerefOr = BoolDeref // for back-compat + +// BoolEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +var BoolEqual = ptr.Equal[bool] + +// String returns a pointer to a string. +var String = ptr.To[string] + +// StringPtr is a function variable referring to String. +// +// Deprecated: Use ptr.To instead. +var StringPtr = String // for back-compat + +// StringDeref dereferences the string ptr and returns it if not nil, or else +// returns def. +var StringDeref = ptr.Deref[string] + +// StringPtrDerefOr is a function variable referring to StringDeref. +// +// Deprecated: Use ptr.Deref instead. +var StringPtrDerefOr = StringDeref // for back-compat + +// StringEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +var StringEqual = ptr.Equal[string] + +// Float32 returns a pointer to a float32. +var Float32 = ptr.To[float32] + +// Float32Ptr is a function variable referring to Float32. +// +// Deprecated: Use ptr.To instead. +var Float32Ptr = Float32 + +// Float32Deref dereferences the float32 ptr and returns it if not nil, or else +// returns def. +var Float32Deref = ptr.Deref[float32] + +// Float32PtrDerefOr is a function variable referring to Float32Deref. +// +// Deprecated: Use ptr.Deref instead. +var Float32PtrDerefOr = Float32Deref // for back-compat + +// Float32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Float32Equal = ptr.Equal[float32] + +// Float64 returns a pointer to a float64. +var Float64 = ptr.To[float64] + +// Float64Ptr is a function variable referring to Float64. +// +// Deprecated: Use ptr.To instead. +var Float64Ptr = Float64 + +// Float64Deref dereferences the float64 ptr and returns it if not nil, or else +// returns def. +var Float64Deref = ptr.Deref[float64] + +// Float64PtrDerefOr is a function variable referring to Float64Deref. +// +// Deprecated: Use ptr.Deref instead. +var Float64PtrDerefOr = Float64Deref // for back-compat + +// Float64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Float64Equal = ptr.Equal[float64] + +// Duration returns a pointer to a time.Duration. +var Duration = ptr.To[time.Duration] + +// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else +// returns def. +var DurationDeref = ptr.Deref[time.Duration] + +// DurationEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +var DurationEqual = ptr.Equal[time.Duration] diff --git a/vendor/k8s.io/utils/trace/README.md b/vendor/k8s.io/utils/trace/README.md new file mode 100644 index 000000000..1e9c69389 --- /dev/null +++ b/vendor/k8s.io/utils/trace/README.md @@ -0,0 +1,67 @@ +# Trace + +This package provides an interface for recording the latency of operations and logging details +about all operations where the latency exceeds a limit. + +## Usage + +To create a trace: + +```go +func doSomething() { + opTrace := trace.New("operation", Field{Key: "fieldKey1", Value: "fieldValue1"}) + defer opTrace.LogIfLong(100 * time.Millisecond) + // do something +} +``` + +To split an trace into multiple steps: + +```go +func doSomething() { + opTrace := trace.New("operation") + defer opTrace.LogIfLong(100 * time.Millisecond) + // do step 1 + opTrace.Step("step1", Field{Key: "stepFieldKey1", Value: "stepFieldValue1"}) + // do step 2 + opTrace.Step("step2") +} +``` + +To nest traces: + +```go +func doSomething() { + rootTrace := trace.New("rootOperation") + defer rootTrace.LogIfLong(100 * time.Millisecond) + + func() { + nestedTrace := rootTrace.Nest("nested", Field{Key: "nestedFieldKey1", Value: "nestedFieldValue1"}) + defer nestedTrace.LogIfLong(50 * time.Millisecond) + // do nested operation + }() +} +``` + +Traces can also be logged unconditionally or introspected: + +```go +opTrace.TotalTime() // Duration since the Trace was created +opTrace.Log() // unconditionally log the trace +``` + +### Using context.Context to nest traces + +`context.Context` can be used to manage nested traces. Create traces by calling `trace.GetTraceFromContext(ctx).Nest`. +This is safe even if there is no parent trace already in the context because `(*(Trace)nil).Nest()` returns +a top level trace. + +```go +func doSomething(ctx context.Context) { + opTrace := trace.FromContext(ctx).Nest("operation") // create a trace, possibly nested + ctx = trace.ContextWithTrace(ctx, opTrace) // make this trace the parent trace of the context + defer opTrace.LogIfLong(50 * time.Millisecond) + + doSomethingElse(ctx) +} +``` \ No newline at end of file diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go new file mode 100644 index 000000000..559aebb59 --- /dev/null +++ b/vendor/k8s.io/utils/trace/trace.go @@ -0,0 +1,319 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "sync" + "time" + + "k8s.io/klog/v2" +) + +var klogV = func(lvl klog.Level) bool { + return klog.V(lvl).Enabled() +} + +// Field is a key value pair that provides additional details about the trace. +type Field struct { + Key string + Value interface{} +} + +func (f Field) format() string { + return fmt.Sprintf("%s:%v", f.Key, f.Value) +} + +func writeFields(b *bytes.Buffer, l []Field) { + for i, f := range l { + b.WriteString(f.format()) + if i < len(l)-1 { + b.WriteString(",") + } + } +} + +func writeTraceItemSummary(b *bytes.Buffer, msg string, totalTime time.Duration, startTime time.Time, fields []Field) { + b.WriteString(fmt.Sprintf("%q ", msg)) + if len(fields) > 0 { + writeFields(b, fields) + b.WriteString(" ") + } + + b.WriteString(fmt.Sprintf("%vms (%v)", durationToMilliseconds(totalTime), startTime.Format("15:04:05.000"))) +} + +func durationToMilliseconds(timeDuration time.Duration) int64 { + return timeDuration.Nanoseconds() / 1e6 +} + +type traceItem interface { + // rLock must be called before invoking time or writeItem. + rLock() + // rUnlock must be called after processing the item is complete. + rUnlock() + + // time returns when the trace was recorded as completed. + time() time.Time + // writeItem outputs the traceItem to the buffer. If stepThreshold is non-nil, only output the + // traceItem if its the duration exceeds the stepThreshold. + // Each line of output is prefixed by formatter to visually indent nested items. + writeItem(b *bytes.Buffer, formatter string, startTime time.Time, stepThreshold *time.Duration) +} + +type traceStep struct { + stepTime time.Time + msg string + fields []Field +} + +// rLock doesn't need to do anything because traceStep instances are immutable. +func (s traceStep) rLock() {} +func (s traceStep) rUnlock() {} + +func (s traceStep) time() time.Time { + return s.stepTime +} + +func (s traceStep) writeItem(b *bytes.Buffer, formatter string, startTime time.Time, stepThreshold *time.Duration) { + stepDuration := s.stepTime.Sub(startTime) + if stepThreshold == nil || *stepThreshold == 0 || stepDuration >= *stepThreshold || klogV(4) { + b.WriteString(fmt.Sprintf("%s---", formatter)) + writeTraceItemSummary(b, s.msg, stepDuration, s.stepTime, s.fields) + } +} + +// Trace keeps track of a set of "steps" and allows us to log a specific +// step if it took longer than its share of the total allowed time +type Trace struct { + // constant fields + name string + fields []Field + startTime time.Time + parentTrace *Trace + // fields guarded by a lock + lock sync.RWMutex + threshold *time.Duration + endTime *time.Time + traceItems []traceItem +} + +func (t *Trace) rLock() { + t.lock.RLock() +} + +func (t *Trace) rUnlock() { + t.lock.RUnlock() +} + +func (t *Trace) time() time.Time { + if t.endTime != nil { + return *t.endTime + } + return t.startTime // if the trace is incomplete, don't assume an end time +} + +func (t *Trace) writeItem(b *bytes.Buffer, formatter string, startTime time.Time, stepThreshold *time.Duration) { + if t.durationIsWithinThreshold() || klogV(4) { + b.WriteString(fmt.Sprintf("%v[", formatter)) + writeTraceItemSummary(b, t.name, t.TotalTime(), t.startTime, t.fields) + if st := t.calculateStepThreshold(); st != nil { + stepThreshold = st + } + t.writeTraceSteps(b, formatter+" ", stepThreshold) + b.WriteString("]") + return + } + // If the trace should not be written, still check for nested traces that should be written + for _, s := range t.traceItems { + if nestedTrace, ok := s.(*Trace); ok { + nestedTrace.writeItem(b, formatter, startTime, stepThreshold) + } + } +} + +// New creates a Trace with the specified name. The name identifies the operation to be traced. The +// Fields add key value pairs to provide additional details about the trace, such as operation inputs. +func New(name string, fields ...Field) *Trace { + return &Trace{name: name, startTime: time.Now(), fields: fields} +} + +// Step adds a new step with a specific message. Call this at the end of an execution step to record +// how long it took. The Fields add key value pairs to provide additional details about the trace +// step. +func (t *Trace) Step(msg string, fields ...Field) { + t.lock.Lock() + defer t.lock.Unlock() + if t.traceItems == nil { + // traces almost always have less than 6 steps, do this to avoid more than a single allocation + t.traceItems = make([]traceItem, 0, 6) + } + t.traceItems = append(t.traceItems, traceStep{stepTime: time.Now(), msg: msg, fields: fields}) +} + +// Nest adds a nested trace with the given message and fields and returns it. +// As a convenience, if the receiver is nil, returns a top level trace. This allows +// one to call FromContext(ctx).Nest without having to check if the trace +// in the context is nil. +func (t *Trace) Nest(msg string, fields ...Field) *Trace { + newTrace := New(msg, fields...) + if t != nil { + newTrace.parentTrace = t + t.lock.Lock() + t.traceItems = append(t.traceItems, newTrace) + t.lock.Unlock() + } + return newTrace +} + +// Log is used to dump all the steps in the Trace. It also logs the nested trace messages using indentation. +// If the Trace is nested it is not immediately logged. Instead, it is logged when the trace it is nested within +// is logged. +func (t *Trace) Log() { + endTime := time.Now() + t.lock.Lock() + t.endTime = &endTime + t.lock.Unlock() + // an explicit logging request should dump all the steps out at the higher level + if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace + t.logTrace() + } +} + +// LogIfLong only logs the trace if the duration of the trace exceeds the threshold. +// Only steps that took longer than their share or the given threshold are logged. +// If klog is at verbosity level 4 or higher and the trace took longer than the threshold, +// all substeps and subtraces are logged. Otherwise, only those which took longer than +// their own threshold. +// If the Trace is nested it is not immediately logged. Instead, it is logged when the trace it +// is nested within is logged. +func (t *Trace) LogIfLong(threshold time.Duration) { + t.lock.Lock() + t.threshold = &threshold + t.lock.Unlock() + t.Log() +} + +// logTopLevelTraces finds all traces in a hierarchy of nested traces that should be logged but do not have any +// parents that will be logged, due to threshold limits, and logs them as top level traces. +func (t *Trace) logTrace() { + t.lock.RLock() + defer t.lock.RUnlock() + if t.durationIsWithinThreshold() { + var buffer bytes.Buffer + traceNum := rand.Int31() + + totalTime := t.endTime.Sub(t.startTime) + buffer.WriteString(fmt.Sprintf("Trace[%d]: %q ", traceNum, t.name)) + if len(t.fields) > 0 { + writeFields(&buffer, t.fields) + buffer.WriteString(" ") + } + + // if any step took more than it's share of the total allowed time, it deserves a higher log level + buffer.WriteString(fmt.Sprintf("(%v) (total time: %vms):", t.startTime.Format("02-Jan-2006 15:04:05.000"), totalTime.Milliseconds())) + stepThreshold := t.calculateStepThreshold() + t.writeTraceSteps(&buffer, fmt.Sprintf("\nTrace[%d]: ", traceNum), stepThreshold) + buffer.WriteString(fmt.Sprintf("\nTrace[%d]: [%v] [%v] END\n", traceNum, t.endTime.Sub(t.startTime), totalTime)) + + klog.Info(buffer.String()) + return + } + + // If the trace should not be logged, still check if nested traces should be logged + for _, s := range t.traceItems { + if nestedTrace, ok := s.(*Trace); ok { + nestedTrace.logTrace() + } + } +} + +func (t *Trace) writeTraceSteps(b *bytes.Buffer, formatter string, stepThreshold *time.Duration) { + lastStepTime := t.startTime + for _, stepOrTrace := range t.traceItems { + stepOrTrace.rLock() + stepOrTrace.writeItem(b, formatter, lastStepTime, stepThreshold) + lastStepTime = stepOrTrace.time() + stepOrTrace.rUnlock() + } +} + +func (t *Trace) durationIsWithinThreshold() bool { + if t.endTime == nil { // we don't assume incomplete traces meet the threshold + return false + } + return t.threshold == nil || *t.threshold == 0 || t.endTime.Sub(t.startTime) >= *t.threshold +} + +// TotalTime can be used to figure out how long it took since the Trace was created +func (t *Trace) TotalTime() time.Duration { + return time.Since(t.startTime) +} + +// calculateStepThreshold returns a threshold for the individual steps of a trace, or nil if there is no threshold and +// all steps should be written. +func (t *Trace) calculateStepThreshold() *time.Duration { + if t.threshold == nil { + return nil + } + lenTrace := len(t.traceItems) + 1 + traceThreshold := *t.threshold + for _, s := range t.traceItems { + nestedTrace, ok := s.(*Trace) + if ok { + nestedTrace.lock.RLock() + if nestedTrace.threshold != nil { + traceThreshold = traceThreshold - *nestedTrace.threshold + lenTrace-- + } + nestedTrace.lock.RUnlock() + } + } + + // the limit threshold is used when the threshold( + //remaining after subtracting that of the child trace) is getting very close to zero to prevent unnecessary logging + limitThreshold := *t.threshold / 4 + if traceThreshold < limitThreshold { + traceThreshold = limitThreshold + lenTrace = len(t.traceItems) + 1 + } + + stepThreshold := traceThreshold / time.Duration(lenTrace) + return &stepThreshold +} + +// ContextTraceKey provides a common key for traces in context.Context values. +type ContextTraceKey struct{} + +// FromContext returns the trace keyed by ContextTraceKey in the context values, if one +// is present, or nil If there is no trace in the Context. +// It is safe to call Nest() on the returned value even if it is nil because ((*Trace)nil).Nest returns a top level +// trace. +func FromContext(ctx context.Context) *Trace { + if v, ok := ctx.Value(ContextTraceKey{}).(*Trace); ok { + return v + } + return nil +} + +// ContextWithTrace returns a context with trace included in the context values, keyed by ContextTraceKey. +func ContextWithTrace(ctx context.Context, trace *Trace) context.Context { + return context.WithValue(ctx, ContextTraceKey{}, trace) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3b2714e14..4f890f03f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,6 +17,9 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/stubs +# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 +## explicit; go 1.20 +github.com/AdaLogics/go-fuzz-headers # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore @@ -58,21 +61,38 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas -# github.com/BurntSushi/toml v0.3.1 -## explicit +# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 +## explicit; go 1.16 +github.com/Azure/go-ansiterm +github.com/Azure/go-ansiterm/winterm +# github.com/BurntSushi/toml v1.3.2 +## explicit; go 1.16 github.com/BurntSushi/toml +github.com/BurntSushi/toml/internal +# github.com/MakeNowJust/heredoc v1.0.0 +## explicit; go 1.12 +github.com/MakeNowJust/heredoc # github.com/Masterminds/goutils v1.1.1 ## explicit github.com/Masterminds/goutils -# github.com/Masterminds/semver v1.5.0 -## explicit -github.com/Masterminds/semver -# github.com/Masterminds/sprig v2.22.0+incompatible -## explicit -github.com/Masterminds/sprig +# github.com/Masterminds/semver/v3 v3.2.1 +## explicit; go 1.18 +github.com/Masterminds/semver/v3 +# github.com/Masterminds/sprig/v3 v3.2.3 +## explicit; go 1.13 +github.com/Masterminds/sprig/v3 +# github.com/Masterminds/squirrel v1.5.4 +## explicit; go 1.14 +github.com/Masterminds/squirrel +# github.com/Microsoft/hcsshim v0.11.4 +## explicit; go 1.18 +github.com/Microsoft/hcsshim/osversion # github.com/aliyun/aliyun-oss-go-sdk v2.1.8+incompatible ## explicit github.com/aliyun/aliyun-oss-go-sdk/oss +# github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 +## explicit; go 1.12 +github.com/asaskevich/govalidator # github.com/aws/aws-sdk-go v1.55.6 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws @@ -135,6 +155,36 @@ github.com/beorn7/perks/quantile # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/chai2010/gettext-go v1.0.2 +## explicit; go 1.14 +github.com/chai2010/gettext-go +github.com/chai2010/gettext-go/mo +github.com/chai2010/gettext-go/plural +github.com/chai2010/gettext-go/po +# github.com/containerd/containerd v1.7.11 +## explicit; go 1.19 +github.com/containerd/containerd/archive/compression +github.com/containerd/containerd/content +github.com/containerd/containerd/content/local +github.com/containerd/containerd/errdefs +github.com/containerd/containerd/filters +github.com/containerd/containerd/images +github.com/containerd/containerd/labels +github.com/containerd/containerd/log +github.com/containerd/containerd/pkg/randutil +github.com/containerd/containerd/platforms +github.com/containerd/containerd/reference +github.com/containerd/containerd/reference/docker +github.com/containerd/containerd/remotes +github.com/containerd/containerd/remotes/docker +github.com/containerd/containerd/remotes/docker/auth +github.com/containerd/containerd/remotes/docker/schema1 +github.com/containerd/containerd/remotes/errors +github.com/containerd/containerd/tracing +github.com/containerd/containerd/version +# github.com/containerd/log v0.1.0 +## explicit; go 1.20 +github.com/containerd/log # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver @@ -150,6 +200,60 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/docker/cli v24.0.6+incompatible +## explicit +github.com/docker/cli/cli/config +github.com/docker/cli/cli/config/configfile +github.com/docker/cli/cli/config/credentials +github.com/docker/cli/cli/config/types +# github.com/docker/distribution v2.8.2+incompatible +## explicit +github.com/docker/distribution +github.com/docker/distribution/digestset +github.com/docker/distribution/metrics +github.com/docker/distribution/reference +github.com/docker/distribution/registry/api/errcode +github.com/docker/distribution/registry/api/v2 +github.com/docker/distribution/registry/client +github.com/docker/distribution/registry/client/auth +github.com/docker/distribution/registry/client/auth/challenge +github.com/docker/distribution/registry/client/transport +github.com/docker/distribution/registry/storage/cache +github.com/docker/distribution/registry/storage/cache/memory +# github.com/docker/docker v24.0.7+incompatible +## explicit +github.com/docker/docker/api/types +github.com/docker/docker/api/types/blkiodev +github.com/docker/docker/api/types/container +github.com/docker/docker/api/types/filters +github.com/docker/docker/api/types/mount +github.com/docker/docker/api/types/network +github.com/docker/docker/api/types/registry +github.com/docker/docker/api/types/strslice +github.com/docker/docker/api/types/swarm +github.com/docker/docker/api/types/swarm/runtime +github.com/docker/docker/api/types/versions +github.com/docker/docker/api/types/volume +github.com/docker/docker/errdefs +github.com/docker/docker/pkg/homedir +github.com/docker/docker/pkg/ioutils +github.com/docker/docker/pkg/jsonmessage +github.com/docker/docker/pkg/longpath +github.com/docker/docker/registry +# github.com/docker/docker-credential-helpers v0.7.0 +## explicit; go 1.18 +github.com/docker/docker-credential-helpers/client +github.com/docker/docker-credential-helpers/credentials +# github.com/docker/go-connections v0.4.0 +## explicit +github.com/docker/go-connections/nat +github.com/docker/go-connections/tlsconfig +# github.com/docker/go-metrics v0.0.1 +## explicit; go 1.11 +github.com/docker/go-metrics +# github.com/docker/go-units v0.5.0 +## explicit +github.com/docker/go-units # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize @@ -157,21 +261,36 @@ github.com/dustin/go-humanize ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/evanphx/json-patch v4.12.0+incompatible +# github.com/evanphx/json-patch v5.7.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.8.0 ## explicit; go 1.18 github.com/evanphx/json-patch/v5 github.com/evanphx/json-patch/v5/internal/json -# github.com/ghodss/yaml v1.0.0 +# github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d ## explicit -github.com/ghodss/yaml +github.com/exponent-io/jsonpath +# github.com/fatih/color v1.13.0 +## explicit; go 1.13 +github.com/fatih/color +# github.com/felixge/httpsnoop v1.0.3 +## explicit; go 1.13 +github.com/felixge/httpsnoop +# github.com/go-errors/errors v1.4.2 +## explicit; go 1.14 +github.com/go-errors/errors +# github.com/go-gorp/gorp/v3 v3.1.0 +## explicit; go 1.18 +github.com/go-gorp/gorp/v3 # github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr github.com/go-logr/logr/slogr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 github.com/go-openapi/jsonpointer @@ -264,6 +383,9 @@ github.com/google/s2a-go/internal/v2/certverifier github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore github.com/google/s2a-go/stream +# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 +## explicit; go 1.13 +github.com/google/shlex # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid @@ -297,9 +419,20 @@ github.com/gophercloud/gophercloud/testhelper/client ## explicit; go 1.13 github.com/gophercloud/utils/env github.com/gophercloud/utils/openstack/clientconfig +# github.com/gorilla/mux v1.8.0 +## explicit; go 1.12 +github.com/gorilla/mux # github.com/gorilla/websocket v1.5.0 ## explicit; go 1.12 github.com/gorilla/websocket +# github.com/gosuri/uitable v0.0.4 +## explicit +github.com/gosuri/uitable +github.com/gosuri/uitable/util/strutil +github.com/gosuri/uitable/util/wordwrap +# github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 +## explicit +github.com/gregjones/httpcache # github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware @@ -311,10 +444,16 @@ github.com/grpc-ecosystem/go-grpc-prometheus github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities -# github.com/huandu/xstrings v1.5.0 +# github.com/hashicorp/errwrap v1.1.0 +## explicit +github.com/hashicorp/errwrap +# github.com/hashicorp/go-multierror v1.1.1 +## explicit; go 1.13 +github.com/hashicorp/go-multierror +# github.com/huandu/xstrings v1.4.0 ## explicit; go 1.12 github.com/huandu/xstrings -# github.com/imdario/mergo v0.3.11 +# github.com/imdario/mergo v0.3.13 ## explicit; go 1.13 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 @@ -323,6 +462,10 @@ github.com/inconshreveable/mousetrap # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath +# github.com/jmoiron/sqlx v1.3.5 +## explicit; go 1.10 +github.com/jmoiron/sqlx +github.com/jmoiron/sqlx/reflectx # github.com/jonboulle/clockwork v0.2.2 ## explicit; go 1.13 github.com/jonboulle/clockwork @@ -341,27 +484,66 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash +# github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 +## explicit +github.com/lann/builder +# github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 +## explicit +github.com/lann/ps +# github.com/lib/pq v1.10.9 +## explicit; go 1.13 +github.com/lib/pq +github.com/lib/pq/oid +github.com/lib/pq/scram +# github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de +## explicit +github.com/liggitt/tabwriter # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mitchellh/copystructure v1.0.0 -## explicit +# github.com/mattn/go-colorable v0.1.13 +## explicit; go 1.15 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.17 +## explicit; go 1.15 +github.com/mattn/go-isatty +# github.com/mattn/go-runewidth v0.0.9 +## explicit; go 1.9 +github.com/mattn/go-runewidth +# github.com/mitchellh/copystructure v1.2.0 +## explicit; go 1.15 github.com/mitchellh/copystructure -# github.com/mitchellh/reflectwalk v1.0.0 +# github.com/mitchellh/go-wordwrap v1.0.1 +## explicit; go 1.14 +github.com/mitchellh/go-wordwrap +# github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk +# github.com/moby/locker v1.0.1 +## explicit; go 1.13 +github.com/moby/locker # github.com/moby/spdystream v0.2.0 ## explicit; go 1.13 github.com/moby/spdystream github.com/moby/spdystream/spdy +# github.com/moby/term v0.5.0 +## explicit; go 1.18 +github.com/moby/term +github.com/moby/term/windows # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 +# github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 +## explicit +github.com/monochromegane/go-gitignore +# github.com/morikuni/aec v1.0.0 +## explicit +github.com/morikuni/aec # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg @@ -405,6 +587,16 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types +# github.com/opencontainers/go-digest v1.0.0 +## explicit; go 1.13 +github.com/opencontainers/go-digest +# github.com/opencontainers/image-spec v1.1.0-rc5 +## explicit; go 1.18 +github.com/opencontainers/image-spec/specs-go +github.com/opencontainers/image-spec/specs-go/v1 +# github.com/peterbourgon/diskv v2.0.1+incompatible +## explicit +github.com/peterbourgon/diskv # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -430,14 +622,27 @@ github.com/prometheus/procfs/internal/util # github.com/robfig/cron/v3 v3.0.1 ## explicit; go 1.12 github.com/robfig/cron/v3 +# github.com/rubenv/sql-migrate v1.5.2 +## explicit; go 1.17 +github.com/rubenv/sql-migrate +github.com/rubenv/sql-migrate/sqlparse +# github.com/russross/blackfriday/v2 v2.1.0 +## explicit +github.com/russross/blackfriday/v2 # github.com/satori/go.uuid v1.2.0 ## explicit +# github.com/shopspring/decimal v1.3.1 +## explicit; go 1.13 +github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus # github.com/soheilhy/cmux v0.1.5 ## explicit; go 1.11 github.com/soheilhy/cmux +# github.com/spf13/cast v1.5.0 +## explicit; go 1.18 +github.com/spf13/cast # github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra @@ -447,9 +652,21 @@ github.com/spf13/pflag # github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 ## explicit; go 1.15 github.com/tmc/grpc-websocket-proxy/wsproxy +# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb +## explicit +github.com/xeipuuv/gojsonpointer +# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 +## explicit +github.com/xeipuuv/gojsonreference +# github.com/xeipuuv/gojsonschema v1.2.0 +## explicit +github.com/xeipuuv/gojsonschema # github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 ## explicit github.com/xiang90/probing +# github.com/xlab/treeprint v1.2.0 +## explicit; go 1.13 +github.com/xlab/treeprint # go.etcd.io/bbolt v1.3.11 ## explicit; go 1.22 go.etcd.io/bbolt @@ -551,6 +768,38 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 +## explicit; go 1.19 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil +# go.opentelemetry.io/otel v1.19.0 +## explicit; go 1.20 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.21.0 +# go.opentelemetry.io/otel/metric v1.19.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +# go.opentelemetry.io/otel/trace v1.19.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/trace +# go.starlark.net v0.0.0-20230525235612-a134d8f9ddca +## explicit; go 1.16 +go.starlark.net/internal/compile +go.starlark.net/internal/spell +go.starlark.net/resolve +go.starlark.net/starlark +go.starlark.net/starlarkstruct +go.starlark.net/syntax # go.uber.org/mock v0.5.0 ## explicit; go 1.22 go.uber.org/mock/gomock @@ -573,6 +822,7 @@ go.uber.org/zap/zapcore ## explicit; go 1.20 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish +golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/cryptobyte @@ -580,6 +830,13 @@ golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/openpgp +golang.org/x/crypto/openpgp/armor +golang.org/x/crypto/openpgp/clearsign +golang.org/x/crypto/openpgp/elgamal +golang.org/x/crypto/openpgp/errors +golang.org/x/crypto/openpgp/packet +golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt # golang.org/x/net v0.34.0 @@ -610,10 +867,12 @@ golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.10.0 ## explicit; go 1.18 +golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/cpu +golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows @@ -818,8 +1077,48 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 +# helm.sh/helm/v3 v3.14.0 +## explicit; go 1.21 +helm.sh/helm/v3/internal/fileutil +helm.sh/helm/v3/internal/resolver +helm.sh/helm/v3/internal/sympath +helm.sh/helm/v3/internal/third_party/dep/fs +helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util +helm.sh/helm/v3/internal/tlsutil +helm.sh/helm/v3/internal/urlutil +helm.sh/helm/v3/internal/version +helm.sh/helm/v3/pkg/action +helm.sh/helm/v3/pkg/chart +helm.sh/helm/v3/pkg/chart/loader +helm.sh/helm/v3/pkg/chartutil +helm.sh/helm/v3/pkg/cli +helm.sh/helm/v3/pkg/downloader +helm.sh/helm/v3/pkg/engine +helm.sh/helm/v3/pkg/getter +helm.sh/helm/v3/pkg/helmpath +helm.sh/helm/v3/pkg/helmpath/xdg +helm.sh/helm/v3/pkg/ignore +helm.sh/helm/v3/pkg/kube +helm.sh/helm/v3/pkg/kube/fake +helm.sh/helm/v3/pkg/lint +helm.sh/helm/v3/pkg/lint/rules +helm.sh/helm/v3/pkg/lint/support +helm.sh/helm/v3/pkg/plugin +helm.sh/helm/v3/pkg/postrender +helm.sh/helm/v3/pkg/provenance +helm.sh/helm/v3/pkg/pusher +helm.sh/helm/v3/pkg/registry +helm.sh/helm/v3/pkg/release +helm.sh/helm/v3/pkg/releaseutil +helm.sh/helm/v3/pkg/repo +helm.sh/helm/v3/pkg/storage +helm.sh/helm/v3/pkg/storage/driver +helm.sh/helm/v3/pkg/time +helm.sh/helm/v3/pkg/uploader # k8s.io/api v0.29.2 ## explicit; go 1.21 +k8s.io/api/admission/v1 +k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1alpha1 k8s.io/api/admissionregistration/v1beta1 @@ -854,6 +1153,7 @@ k8s.io/api/flowcontrol/v1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 k8s.io/api/flowcontrol/v1beta3 +k8s.io/api/imagepolicy/v1alpha1 k8s.io/api/networking/v1 k8s.io/api/networking/v1alpha1 k8s.io/api/networking/v1beta1 @@ -872,6 +1172,11 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 +# k8s.io/apiextensions-apiserver v0.29.2 +## explicit; go 1.21 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 # k8s.io/apimachinery v0.29.2 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality @@ -879,10 +1184,12 @@ k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/api/validation +k8s.io/apimachinery/pkg/api/validation/path k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion @@ -899,7 +1206,10 @@ k8s.io/apimachinery/pkg/runtime/serializer/streaming k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/dump +k8s.io/apimachinery/pkg/util/duration k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/httpstream @@ -919,6 +1229,7 @@ k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/util/version k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version @@ -926,6 +1237,15 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect +# k8s.io/apiserver v0.29.2 +## explicit; go 1.21 +k8s.io/apiserver/pkg/endpoints/deprecation +# k8s.io/cli-runtime v0.29.0 +## explicit; go 1.21 +k8s.io/cli-runtime/pkg/genericclioptions +k8s.io/cli-runtime/pkg/genericiooptions +k8s.io/cli-runtime/pkg/printers +k8s.io/cli-runtime/pkg/resource # k8s.io/client-go v0.29.2 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -977,6 +1297,8 @@ k8s.io/client-go/applyconfigurations/storage/v1 k8s.io/client-go/applyconfigurations/storage/v1alpha1 k8s.io/client-go/applyconfigurations/storage/v1beta1 k8s.io/client-go/discovery +k8s.io/client-go/discovery/cached/disk +k8s.io/client-go/discovery/cached/memory k8s.io/client-go/dynamic k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme @@ -1033,6 +1355,8 @@ k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/metadata k8s.io/client-go/openapi +k8s.io/client-go/openapi/cached +k8s.io/client-go/openapi3 k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install k8s.io/client-go/pkg/apis/clientauthentication/v1 @@ -1042,15 +1366,28 @@ k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/watch k8s.io/client-go/restmapper +k8s.io/client-go/scale +k8s.io/client-go/scale/scheme +k8s.io/client-go/scale/scheme/appsint +k8s.io/client-go/scale/scheme/appsv1beta1 +k8s.io/client-go/scale/scheme/appsv1beta2 +k8s.io/client-go/scale/scheme/autoscalingv1 +k8s.io/client-go/scale/scheme/extensionsint +k8s.io/client-go/scale/scheme/extensionsv1beta1 k8s.io/client-go/testing +k8s.io/client-go/third_party/forked/golang/template k8s.io/client-go/tools/auth +k8s.io/client-go/tools/cache +k8s.io/client-go/tools/cache/synctrack k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/metrics +k8s.io/client-go/tools/pager k8s.io/client-go/tools/reference k8s.io/client-go/tools/remotecommand +k8s.io/client-go/tools/watch k8s.io/client-go/transport k8s.io/client-go/transport/spdy k8s.io/client-go/transport/websocket @@ -1059,25 +1396,13 @@ k8s.io/client-go/util/connrotation k8s.io/client-go/util/exec k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir +k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/helm v2.17.0+incompatible -## explicit -k8s.io/helm/pkg/chartutil -k8s.io/helm/pkg/engine -k8s.io/helm/pkg/helm -k8s.io/helm/pkg/ignore -k8s.io/helm/pkg/manifest -k8s.io/helm/pkg/proto/hapi/chart -k8s.io/helm/pkg/proto/hapi/release -k8s.io/helm/pkg/proto/hapi/services -k8s.io/helm/pkg/proto/hapi/version -k8s.io/helm/pkg/releaseutil -k8s.io/helm/pkg/renderutil -k8s.io/helm/pkg/storage/errors -k8s.io/helm/pkg/sympath -k8s.io/helm/pkg/version +# k8s.io/component-base v0.29.2 +## explicit; go 1.21 +k8s.io/component-base/version # k8s.io/klog/v2 v2.110.1 ## explicit; go 1.13 k8s.io/klog/v2 @@ -1097,16 +1422,45 @@ k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto +k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/validation/spec +# k8s.io/kubectl v0.29.0 +## explicit; go 1.21 +k8s.io/kubectl/pkg/cmd/util +k8s.io/kubectl/pkg/scheme +k8s.io/kubectl/pkg/util/i18n +k8s.io/kubectl/pkg/util/interrupt +k8s.io/kubectl/pkg/util/openapi +k8s.io/kubectl/pkg/util/templates +k8s.io/kubectl/pkg/util/term +k8s.io/kubectl/pkg/validation # k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 ## explicit; go 1.18 +k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing +k8s.io/utils/exec k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net +k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices +k8s.io/utils/trace +# oras.land/oras-go v1.2.4 +## explicit; go 1.18 +oras.land/oras-go/pkg/artifact +oras.land/oras-go/pkg/auth +oras.land/oras-go/pkg/auth/docker +oras.land/oras-go/pkg/content +oras.land/oras-go/pkg/context +oras.land/oras-go/pkg/oras +oras.land/oras-go/pkg/registry +oras.land/oras-go/pkg/registry/remote +oras.land/oras-go/pkg/registry/remote/auth +oras.land/oras-go/pkg/registry/remote/internal/errutil +oras.land/oras-go/pkg/registry/remote/internal/syncutil +oras.land/oras-go/pkg/target # sigs.k8s.io/controller-runtime v0.17.5 ## explicit; go 1.21 sigs.k8s.io/controller-runtime/pkg/client @@ -1122,6 +1476,89 @@ sigs.k8s.io/controller-runtime/pkg/log ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json +# sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 +## explicit; go 1.19 +sigs.k8s.io/kustomize/api/filters/annotations +sigs.k8s.io/kustomize/api/filters/fieldspec +sigs.k8s.io/kustomize/api/filters/filtersutil +sigs.k8s.io/kustomize/api/filters/fsslice +sigs.k8s.io/kustomize/api/filters/iampolicygenerator +sigs.k8s.io/kustomize/api/filters/imagetag +sigs.k8s.io/kustomize/api/filters/labels +sigs.k8s.io/kustomize/api/filters/nameref +sigs.k8s.io/kustomize/api/filters/namespace +sigs.k8s.io/kustomize/api/filters/patchjson6902 +sigs.k8s.io/kustomize/api/filters/patchstrategicmerge +sigs.k8s.io/kustomize/api/filters/prefix +sigs.k8s.io/kustomize/api/filters/refvar +sigs.k8s.io/kustomize/api/filters/replacement +sigs.k8s.io/kustomize/api/filters/replicacount +sigs.k8s.io/kustomize/api/filters/suffix +sigs.k8s.io/kustomize/api/filters/valueadd +sigs.k8s.io/kustomize/api/hasher +sigs.k8s.io/kustomize/api/ifc +sigs.k8s.io/kustomize/api/image +sigs.k8s.io/kustomize/api/internal/accumulator +sigs.k8s.io/kustomize/api/internal/builtins +sigs.k8s.io/kustomize/api/internal/generators +sigs.k8s.io/kustomize/api/internal/git +sigs.k8s.io/kustomize/api/internal/kusterr +sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig +sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers +sigs.k8s.io/kustomize/api/internal/plugins/execplugin +sigs.k8s.io/kustomize/api/internal/plugins/fnplugin +sigs.k8s.io/kustomize/api/internal/plugins/loader +sigs.k8s.io/kustomize/api/internal/plugins/utils +sigs.k8s.io/kustomize/api/internal/target +sigs.k8s.io/kustomize/api/internal/utils +sigs.k8s.io/kustomize/api/internal/validate +sigs.k8s.io/kustomize/api/konfig +sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts +sigs.k8s.io/kustomize/api/krusty +sigs.k8s.io/kustomize/api/kv +sigs.k8s.io/kustomize/api/loader +sigs.k8s.io/kustomize/api/provenance +sigs.k8s.io/kustomize/api/provider +sigs.k8s.io/kustomize/api/resmap +sigs.k8s.io/kustomize/api/resource +sigs.k8s.io/kustomize/api/types +# sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 +## explicit; go 1.19 +sigs.k8s.io/kustomize/kyaml/comments +sigs.k8s.io/kustomize/kyaml/errors +sigs.k8s.io/kustomize/kyaml/ext +sigs.k8s.io/kustomize/kyaml/fieldmeta +sigs.k8s.io/kustomize/kyaml/filesys +sigs.k8s.io/kustomize/kyaml/fn/runtime/container +sigs.k8s.io/kustomize/kyaml/fn/runtime/exec +sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil +sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark +sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml +sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util +sigs.k8s.io/kustomize/kyaml/kio +sigs.k8s.io/kustomize/kyaml/kio/filters +sigs.k8s.io/kustomize/kyaml/kio/kioutil +sigs.k8s.io/kustomize/kyaml/openapi +sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi +sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2 +sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi +sigs.k8s.io/kustomize/kyaml/order +sigs.k8s.io/kustomize/kyaml/resid +sigs.k8s.io/kustomize/kyaml/runfn +sigs.k8s.io/kustomize/kyaml/sets +sigs.k8s.io/kustomize/kyaml/sliceutil +sigs.k8s.io/kustomize/kyaml/utils +sigs.k8s.io/kustomize/kyaml/yaml +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation +sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field +sigs.k8s.io/kustomize/kyaml/yaml/merge2 +sigs.k8s.io/kustomize/kyaml/yaml/merge3 +sigs.k8s.io/kustomize/kyaml/yaml/schema +sigs.k8s.io/kustomize/kyaml/yaml/walk # sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath diff --git a/vendor/oras.land/oras-go/LICENSE b/vendor/oras.land/oras-go/LICENSE new file mode 100644 index 000000000..a67d16938 --- /dev/null +++ b/vendor/oras.land/oras-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 ORAS Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/oras.land/oras-go/pkg/artifact/consts.go b/vendor/oras.land/oras-go/pkg/artifact/consts.go new file mode 100644 index 000000000..929348723 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/artifact/consts.go @@ -0,0 +1,22 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package artifact + +const ( + // UnknownConfigMediaType is the default mediaType used when no + // config media type is specified. + UnknownConfigMediaType = "application/vnd.unknown.config.v1+json" +) diff --git a/vendor/oras.land/oras-go/pkg/auth/client.go b/vendor/oras.land/oras-go/pkg/auth/client.go new file mode 100644 index 000000000..2c3be4a61 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/auth/client.go @@ -0,0 +1,45 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "context" + "errors" + "net/http" + + "github.com/containerd/containerd/remotes" +) + +// Common errors +var ( + ErrNotLoggedIn = errors.New("not logged in") +) + +// Client provides authentication operations for remotes. +type Client interface { + // Login logs in to a remote server identified by the hostname. + // Deprecated: use LoginWithOpts + Login(ctx context.Context, hostname, username, secret string, insecure bool) error + // LoginWithOpts logs in to a remote server identified by the hostname with custom options + LoginWithOpts(options ...LoginOption) error + // Logout logs out from a remote server identified by the hostname. + Logout(ctx context.Context, hostname string) error + // Resolver returns a new authenticated resolver. + // Deprecated: use ResolverWithOpts + Resolver(ctx context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error) + // ResolverWithOpts returns a new authenticated resolver with custom options. + ResolverWithOpts(options ...ResolverOption) (remotes.Resolver, error) +} diff --git a/vendor/oras.land/oras-go/pkg/auth/client_opts.go b/vendor/oras.land/oras-go/pkg/auth/client_opts.go new file mode 100644 index 000000000..f385b2408 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/auth/client_opts.go @@ -0,0 +1,123 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "context" + "net/http" +) + +type ( + // LoginOption allows specifying various settings on login. + LoginOption func(*LoginSettings) + + // LoginSettings represent all the various settings on login. + LoginSettings struct { + Context context.Context + Hostname string + Username string + Secret string + CertFile string + KeyFile string + CAFile string + Insecure bool + UserAgent string + } +) + +// WithLoginContext returns a function that sets the Context setting on login. +func WithLoginContext(context context.Context) LoginOption { + return func(settings *LoginSettings) { + settings.Context = context + } +} + +// WithLoginHostname returns a function that sets the Hostname setting on login. +func WithLoginHostname(hostname string) LoginOption { + return func(settings *LoginSettings) { + settings.Hostname = hostname + } +} + +// WithLoginUsername returns a function that sets the Username setting on login. +func WithLoginUsername(username string) LoginOption { + return func(settings *LoginSettings) { + settings.Username = username + } +} + +// WithLoginSecret returns a function that sets the Secret setting on login. +func WithLoginSecret(secret string) LoginOption { + return func(settings *LoginSettings) { + settings.Secret = secret + } +} + +// WithLoginInsecure returns a function that sets the Insecure setting to true on login. +func WithLoginInsecure() LoginOption { + return func(settings *LoginSettings) { + settings.Insecure = true + } +} + +// WithLoginTLS returns a function that sets the tls settings on login. +func WithLoginTLS(certFile, keyFile, caFile string) LoginOption { + return func(settings *LoginSettings) { + settings.CertFile = certFile + settings.KeyFile = keyFile + settings.CAFile = caFile + } +} + +// WithLoginUserAgent returns a function that sets the UserAgent setting on login. +func WithLoginUserAgent(userAgent string) LoginOption { + return func(settings *LoginSettings) { + settings.UserAgent = userAgent + } +} + +type ( + // ResolverOption allows specifying various settings on the resolver. + ResolverOption func(*ResolverSettings) + + // ResolverSettings represent all the various settings on a resolver. + ResolverSettings struct { + Client *http.Client + PlainHTTP bool + Headers http.Header + } +) + +// WithResolverClient returns a function that sets the Client setting on resolver. +func WithResolverClient(client *http.Client) ResolverOption { + return func(settings *ResolverSettings) { + settings.Client = client + } +} + +// WithResolverPlainHTTP returns a function that sets the PlainHTTP setting to true on resolver. +func WithResolverPlainHTTP() ResolverOption { + return func(settings *ResolverSettings) { + settings.PlainHTTP = true + } +} + +// WithResolverHeaders returns a function that sets the Headers setting on resolver. +func WithResolverHeaders(headers http.Header) ResolverOption { + return func(settings *ResolverSettings) { + settings.Headers = headers + } +} diff --git a/vendor/oras.land/oras-go/pkg/auth/docker/client.go b/vendor/oras.land/oras-go/pkg/auth/docker/client.go new file mode 100644 index 000000000..b225c89c0 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/auth/docker/client.go @@ -0,0 +1,123 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "os" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/pkg/errors" + + "oras.land/oras-go/pkg/auth" +) + +// Client provides authentication operations for docker registries. +type Client struct { + configs []*configfile.ConfigFile +} + +// NewClient creates a new auth client based on provided config paths. +// If not config path is provided, the default path is used. +// Credentials are read from the first config and fall backs to next. +// All changes will only be written to the first config file. +func NewClient(configPaths ...string) (auth.Client, error) { + if len(configPaths) == 0 { + cfg, err := config.Load(config.Dir()) + if err != nil { + return nil, err + } + if !cfg.ContainsAuth() { + cfg.CredentialsStore = credentials.DetectDefaultStore(cfg.CredentialsStore) + } + + return &Client{ + configs: []*configfile.ConfigFile{cfg}, + }, nil + } + + var configs []*configfile.ConfigFile + for _, path := range configPaths { + cfg, err := loadConfigFile(path) + if err != nil { + return nil, errors.Wrap(err, path) + } + configs = append(configs, cfg) + } + + return &Client{ + configs: configs, + }, nil +} + +// NewClientWithDockerFallback creates a new auth client +// which falls back on Docker's default config path. +// This allows support for ~/.docker/config.json as a fallback, +// as well as support for the DOCKER_CONFIG environment variable. +func NewClientWithDockerFallback(configPaths ...string) (auth.Client, error) { + if len(configPaths) == 0 { + return NewClient() + } + + var configs []*configfile.ConfigFile + for _, path := range configPaths { + cfg, err := loadConfigFile(path) + if err != nil { + return nil, errors.Wrap(err, path) + } + configs = append(configs, cfg) + } + + // Add the Docker default config last + dockerFallbackCfg, err := config.Load(config.Dir()) + if err != nil { + return nil, err + } + if !dockerFallbackCfg.ContainsAuth() { + dockerFallbackCfg.CredentialsStore = credentials.DetectDefaultStore(dockerFallbackCfg.CredentialsStore) + } + configs = append(configs, dockerFallbackCfg) + + return &Client{ + configs: configs, + }, nil +} + +func (c *Client) primaryCredentialsStore(hostname string) credentials.Store { + return c.configs[0].GetCredentialsStore(hostname) +} + +// loadConfigFile reads the configuration files from the given path. +func loadConfigFile(path string) (*configfile.ConfigFile, error) { + cfg := configfile.New(path) + if _, err := os.Stat(path); err == nil { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + if err := cfg.LoadFromReader(file); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + return nil, err + } + if !cfg.ContainsAuth() { + cfg.CredentialsStore = credentials.DetectDefaultStore(cfg.CredentialsStore) + } + return cfg, nil +} diff --git a/vendor/oras.land/oras-go/pkg/auth/docker/login.go b/vendor/oras.land/oras-go/pkg/auth/docker/login.go new file mode 100644 index 000000000..bd155e530 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/auth/docker/login.go @@ -0,0 +1,103 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + + ctypes "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/api/types" + "github.com/docker/docker/registry" + + iface "oras.land/oras-go/pkg/auth" +) + +const IndexHostname = "index.docker.io" + +// Login logs in to a docker registry identified by the hostname. +// Deprecated: use LoginWithOpts +func (c *Client) Login(ctx context.Context, hostname, username, secret string, insecure bool) error { + settings := &iface.LoginSettings{ + Context: ctx, + Hostname: hostname, + Username: username, + Secret: secret, + Insecure: insecure, + } + return c.login(settings) +} + +// LoginWithOpts logs in to a docker registry identified by the hostname with custom options. +func (c *Client) LoginWithOpts(options ...iface.LoginOption) error { + settings := &iface.LoginSettings{} + for _, option := range options { + option(settings) + } + return c.login(settings) +} + +func (c *Client) login(settings *iface.LoginSettings) error { + hostname := resolveHostname(settings.Hostname) + cred := types.AuthConfig{ + Username: settings.Username, + ServerAddress: hostname, + } + if settings.Username == "" { + cred.IdentityToken = settings.Secret + } else { + cred.Password = settings.Secret + } + + opts := registry.ServiceOptions{} + + if settings.Insecure { + opts.InsecureRegistries = []string{hostname} + } + + // Login to ensure valid credential + remote, err := registry.NewService(opts) + if err != nil { + return err + } + ctx := settings.Context + if ctx == nil { + ctx = context.Background() + } + userAgent := settings.UserAgent + if userAgent == "" { + userAgent = "oras" + } + + var token string + if (settings.CertFile != "" && settings.KeyFile != "") || settings.CAFile != "" { + _, token, err = c.loginWithTLS(ctx, remote, settings.CertFile, settings.KeyFile, settings.CAFile, &cred, userAgent) + } else { + _, token, err = remote.Auth(ctx, &cred, userAgent) + } + + if err != nil { + return err + } + + if token != "" { + cred.Username = "" + cred.Password = "" + cred.IdentityToken = token + } + + // Store credential + return c.primaryCredentialsStore(hostname).Store(ctypes.AuthConfig(cred)) +} diff --git a/vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go b/vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go new file mode 100644 index 000000000..7b5c2f8ba --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go @@ -0,0 +1,220 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// The following functions are adapted from github.com/docker/docker/registry +// We need these to support passing in a transport that has custom TLS configuration +// They are not exposed in the docker/registry package that's why they are copied here + +type loginCredentialStore struct { + authConfig *types.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +// loginWithTLS tries to login to the v2 registry server. +// A custom tls.Config is used to override the default TLS configuration of the different registry endpoints. +// The tls.Config is created using the provided certificate, certificate key and certificate authority. +func (c *Client) loginWithTLS(ctx context.Context, service *registry.Service, certFile, keyFile, caFile string, authConfig *types.AuthConfig, userAgent string) (string, string, error) { + tlsConfig, err := tlsconfig.Client(tlsconfig.Options{CAFile: caFile, CertFile: certFile, KeyFile: keyFile}) + if err != nil { + return "", "", err + } + + endpoints, err := c.getEndpoints(authConfig.ServerAddress, service) + if err != nil { + return "", "", err + } + + var status, token string + for _, endpoint := range endpoints { + endpoint.TLSConfig = tlsConfig + status, token, err = loginV2(authConfig, endpoint, userAgent) + + if err != nil { + if isNotAuthorizedError(err) { + return "", "", err + } + + logrus.WithError(err).Infof("Error logging in to endpoint, trying next endpoint") + continue + } + + return status, token, nil + } + + return "", "", err +} + +// getEndpoints returns the endpoints for the given hostname. +func (c *Client) getEndpoints(address string, service *registry.Service) ([]registry.APIEndpoint, error) { + var registryHostName = IndexHostname + + if address != "" { + if !strings.HasPrefix(address, "https://") && !strings.HasPrefix(address, "http://") { + address = fmt.Sprintf("https://%s", address) + } + u, err := url.Parse(address) + if err != nil { + return nil, errdefs.InvalidParameter(errors.Wrapf(err, "unable to parse server address")) + } + registryHostName = u.Host + } + + // Lookup endpoints for authentication using "LookupPushEndpoints", which + // excludes mirrors to prevent sending credentials of the upstream registry + // to a mirror. + endpoints, err := service.LookupPushEndpoints(registryHostName) + if err != nil { + return nil, errdefs.InvalidParameter(err) + } + + return endpoints, nil +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *types.AuthConfig, endpoint registry.APIEndpoint, userAgent string) (string, string, error) { + var ( + endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + modifiers = registry.Headers(userAgent, nil) + authTransport = transport.NewTransport(newTransport(endpoint.TLSConfig), modifiers...) + credentialAuthConfig = *authConfig + creds = loginCredentialStore{authConfig: &credentialAuthConfig} + ) + + logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) + + loginClient, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + if err != nil { + return "", "", err + } + + req, err := http.NewRequest(http.MethodGet, endpointStr, nil) + if err != nil { + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + err = translateV2AuthError(err) + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + } + + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + return "", "", errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) +} + +// newTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func newTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + tlsConfig = tlsconfig.ServerDefault() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: direct.DialContext, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } +} + +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) { + challengeManager, err := registry.PingV2Registry(endpoint, authTransport) + if err != nil { + return nil, err + } + + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: registry.AuthClientID, + Scopes: scopes, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + + return &http.Client{ + Transport: transport.NewTransport(authTransport, modifiers...), + Timeout: 15 * time.Second, + }, nil +} + +func translateV2AuthError(err error) error { + switch e := err.(type) { + case *url.Error: + switch e2 := e.Err.(type) { + case errcode.Error: + switch e2.Code { + case errcode.ErrorCodeUnauthorized: + return errdefs.Unauthorized(err) + } + } + } + + return err +} + +func isNotAuthorizedError(err error) bool { + return strings.Contains(err.Error(), "401 Unauthorized") +} diff --git a/vendor/oras.land/oras-go/pkg/auth/docker/logout.go b/vendor/oras.land/oras-go/pkg/auth/docker/logout.go new file mode 100644 index 000000000..ada1f2687 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/auth/docker/logout.go @@ -0,0 +1,42 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + + "github.com/docker/cli/cli/config/configfile" + + "oras.land/oras-go/pkg/auth" +) + +// Logout logs out from a docker registry identified by the hostname. +func (c *Client) Logout(_ context.Context, hostname string) error { + hostname = resolveHostname(hostname) + + var configs []*configfile.ConfigFile + for _, config := range c.configs { + if _, ok := config.AuthConfigs[hostname]; ok { + configs = append(configs, config) + } + } + if len(configs) == 0 { + return auth.ErrNotLoggedIn + } + + // Log out form the primary config only as backups are read-only. + return c.primaryCredentialsStore(hostname).Erase(hostname) +} diff --git a/vendor/oras.land/oras-go/pkg/auth/docker/resolver.go b/vendor/oras.land/oras-go/pkg/auth/docker/resolver.go new file mode 100644 index 000000000..e749c2d2c --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/auth/docker/resolver.go @@ -0,0 +1,86 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "net/http" + + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + ctypes "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/registry" + + iface "oras.land/oras-go/pkg/auth" +) + +// Resolver returns a new authenticated resolver. +// Deprecated: use ResolverWithOpts +func (c *Client) Resolver(_ context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error) { + return docker.NewResolver(docker.ResolverOptions{ + Credentials: c.Credential, + Client: client, + PlainHTTP: plainHTTP, + }), nil +} + +// ResolverWithOpts returns a new authenticated resolver with custom options. +func (c *Client) ResolverWithOpts(options ...iface.ResolverOption) (remotes.Resolver, error) { + settings := &iface.ResolverSettings{} + for _, option := range options { + option(settings) + } + return docker.NewResolver(docker.ResolverOptions{ + Credentials: c.Credential, + Client: settings.Client, + PlainHTTP: settings.PlainHTTP, + Headers: settings.Headers, + }), nil +} + +// Credential returns the login credential of the request host. +func (c *Client) Credential(hostname string) (string, string, error) { + hostname = resolveHostname(hostname) + var ( + auth ctypes.AuthConfig + err error + ) + for _, cfg := range c.configs { + auth, err = cfg.GetAuthConfig(hostname) + if err != nil { + // fall back to next config + continue + } + if auth.IdentityToken != "" { + return "", auth.IdentityToken, nil + } + if auth.Username == "" && auth.Password == "" { + // fall back to next config + continue + } + return auth.Username, auth.Password, nil + } + return "", "", err +} + +// resolveHostname resolves Docker specific hostnames +func resolveHostname(hostname string) string { + switch hostname { + case registry.IndexHostname, registry.IndexName, registry.DefaultV2Registry.Host: + return registry.IndexServer + } + return hostname +} diff --git a/vendor/oras.land/oras-go/pkg/content/consts.go b/vendor/oras.land/oras-go/pkg/content/consts.go new file mode 100644 index 000000000..ae59fdaa4 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/consts.go @@ -0,0 +1,57 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + // DefaultBlobMediaType specifies the default blob media type + DefaultBlobMediaType = ocispec.MediaTypeImageLayer + // DefaultBlobDirMediaType specifies the default blob directory media type + DefaultBlobDirMediaType = ocispec.MediaTypeImageLayerGzip +) + +const ( + // TempFilePattern specifies the pattern to create temporary files + TempFilePattern = "oras" +) + +const ( + // AnnotationDigest is the annotation key for the digest of the uncompressed content + AnnotationDigest = "io.deis.oras.content.digest" + // AnnotationUnpack is the annotation key for indication of unpacking + AnnotationUnpack = "io.deis.oras.content.unpack" +) + +const ( + // OCIImageIndexFile is the file name of the index from the OCI Image Layout Specification + // Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md#indexjson-file + OCIImageIndexFile = "index.json" +) + +const ( + // DefaultBlocksize default size of each slice of bytes read in each write through in gunzipand untar. + // Simply uses the same size as io.Copy() + DefaultBlocksize = 32768 +) + +const ( + // what you get for a blank digest + BlankHash = digest.Digest("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") +) diff --git a/vendor/oras.land/oras-go/pkg/content/decompress.go b/vendor/oras.land/oras-go/pkg/content/decompress.go new file mode 100644 index 000000000..ecde9c9a5 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/decompress.go @@ -0,0 +1,151 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "errors" + "strings" + + ctrcontent "github.com/containerd/containerd/content" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Decompress store to decompress content and extract from tar, if needed, wrapping +// another store. By default, a FileStore will simply take each artifact and write it to +// a file, as a MemoryStore will do into memory. If the artifact is gzipped or tarred, +// you might want to store the actual object inside tar or gzip. Wrap your Store +// with Decompress, and it will check the media-type and, if relevant, +// gunzip and/or untar. +// +// For example: +// +// fileStore := NewFileStore(rootPath) +// Decompress := store.NewDecompress(fileStore, WithBlocksize(blocksize)) +// +// The above example works if there is no tar, i.e. each artifact is just a single file, perhaps gzipped, +// or if there is only one file in each tar archive. In other words, when each content.Writer has only one target output stream. +// However, if you have multiple files in each tar archive, each archive of which is an artifact layer, then +// you need a way to select how to handle each file in the tar archive. In other words, when each content.Writer has more than one +// target output stream. In that case, use the following example: +// +// multiStore := NewMultiStore(rootPath) // some store that can handle different filenames +// Decompress := store.NewDecompress(multiStore, WithBlocksize(blocksize), WithMultiWriterIngester()) +// +type Decompress struct { + pusher remotes.Pusher + blocksize int + multiWriterIngester bool +} + +func NewDecompress(pusher remotes.Pusher, opts ...WriterOpt) Decompress { + // we have to reprocess the opts to find the blocksize + var wOpts WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + // TODO: we probably should handle errors here + continue + } + } + + return Decompress{pusher, wOpts.Blocksize, wOpts.MultiWriterIngester} +} + +// Push get a content.Writer +func (d Decompress) Push(ctx context.Context, desc ocispec.Descriptor) (ctrcontent.Writer, error) { + // the logic is straightforward: + // - if there is a desc in the opts, and the mediatype is tar or tar+gzip, then pass the correct decompress writer + // - else, pass the regular writer + var ( + writer ctrcontent.Writer + err error + multiIngester MultiWriterPusher + ok bool + ) + + // check to see if we are supposed to use a MultiWriterIngester + if d.multiWriterIngester { + multiIngester, ok = d.pusher.(MultiWriterPusher) + if !ok { + return nil, errors.New("configured to use multiwriter ingester, but ingester does not implement multiwriter") + } + } + + // figure out if compression and/or archive exists + // before we pass it down, we need to strip anything we are removing here + // and possibly update the digest, since the store indexes things by digest + hasGzip, hasTar, modifiedMediaType := checkCompression(desc.MediaType) + desc.MediaType = modifiedMediaType + // determine if we pass it blocksize, only if positive + writerOpts := []WriterOpt{} + if d.blocksize > 0 { + writerOpts = append(writerOpts, WithBlocksize(d.blocksize)) + } + + writer, err = d.pusher.Push(ctx, desc) + if err != nil { + return nil, err + } + + // do we need to wrap with an untar writer? + if hasTar { + // if not multiingester, get a regular writer + if multiIngester == nil { + writer = NewUntarWriter(writer, writerOpts...) + } else { + writers, err := multiIngester.Pushers(ctx, desc) + if err != nil { + return nil, err + } + writer = NewUntarWriterByName(writers, writerOpts...) + } + } + if hasGzip { + if writer == nil { + writer, err = d.pusher.Push(ctx, desc) + if err != nil { + return nil, err + } + } + writer = NewGunzipWriter(writer, writerOpts...) + } + return writer, nil +} + +// checkCompression check if the mediatype uses gzip compression or tar. +// Returns if it has gzip and/or tar, as well as the base media type without +// those suffixes. +func checkCompression(mediaType string) (gzip, tar bool, mt string) { + mt = mediaType + gzipSuffix := "+gzip" + gzipAltSuffix := ".gzip" + tarSuffix := ".tar" + switch { + case strings.HasSuffix(mt, gzipSuffix): + mt = mt[:len(mt)-len(gzipSuffix)] + gzip = true + case strings.HasSuffix(mt, gzipAltSuffix): + mt = mt[:len(mt)-len(gzipAltSuffix)] + gzip = true + } + + if strings.HasSuffix(mt, tarSuffix) { + mt = mt[:len(mt)-len(tarSuffix)] + tar = true + } + return +} diff --git a/vendor/oras.land/oras-go/pkg/content/errors.go b/vendor/oras.land/oras-go/pkg/content/errors.go new file mode 100644 index 000000000..72b2e727a --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/errors.go @@ -0,0 +1,33 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import "errors" + +// Common errors +var ( + ErrNotFound = errors.New("not_found") + ErrNoName = errors.New("no_name") + ErrUnsupportedSize = errors.New("unsupported_size") + ErrUnsupportedVersion = errors.New("unsupported_version") + ErrInvalidReference = errors.New("invalid_reference") +) + +// FileStore errors +var ( + ErrPathTraversalDisallowed = errors.New("path_traversal_disallowed") + ErrOverwriteDisallowed = errors.New("overwrite_disallowed") +) diff --git a/vendor/oras.land/oras-go/pkg/content/file.go b/vendor/oras.land/oras-go/pkg/content/file.go new file mode 100644 index 000000000..70f46d1ae --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/file.go @@ -0,0 +1,534 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "bytes" + "compress/gzip" + "context" + _ "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// File provides content via files from the file system +type File struct { + DisableOverwrite bool + AllowPathTraversalOnWrite bool + + // Reproducible enables stripping times from added files + Reproducible bool + + root string + descriptor *sync.Map // map[digest.Digest]ocispec.Descriptor + pathMap *sync.Map // map[name string](file string) + memoryMap *sync.Map // map[digest.Digest]([]byte) + refMap *sync.Map // map[string]ocispec.Descriptor + tmpFiles *sync.Map + ignoreNoName bool +} + +// NewFile creats a new file target. It represents a single root reference and all of its components. +func NewFile(rootPath string, opts ...WriterOpt) *File { + // we have to process the opts to find if they told us to change defaults + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + continue + } + } + return &File{ + root: rootPath, + descriptor: &sync.Map{}, + pathMap: &sync.Map{}, + memoryMap: &sync.Map{}, + refMap: &sync.Map{}, + tmpFiles: &sync.Map{}, + ignoreNoName: wOpts.IgnoreNoName, + } +} + +func (s *File) Resolver() remotes.Resolver { + return s +} + +func (s *File) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { + desc, ok := s.getRef(ref) + if !ok { + return "", ocispec.Descriptor{}, fmt.Errorf("unknown reference: %s", ref) + } + return ref, desc, nil +} + +func (s *File) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + if _, ok := s.refMap.Load(ref); !ok { + return nil, fmt.Errorf("unknown reference: %s", ref) + } + return s, nil +} + +// Fetch get an io.ReadCloser for the specific content +func (s *File) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + // first see if it is in the in-memory manifest map + manifest, ok := s.getMemory(desc) + if ok { + return ioutil.NopCloser(bytes.NewReader(manifest)), nil + } + desc, ok = s.get(desc) + if !ok { + return nil, ErrNotFound + } + name, ok := ResolveName(desc) + if !ok { + return nil, ErrNoName + } + path := s.ResolvePath(name) + return os.Open(path) +} + +func (s *File) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { + var tag, hash string + parts := strings.SplitN(ref, "@", 2) + if len(parts) > 0 { + tag = parts[0] + } + if len(parts) > 1 { + hash = parts[1] + } + return &filePusher{ + store: s, + ref: tag, + hash: hash, + }, nil +} + +type filePusher struct { + store *File + ref string + hash string +} + +func (s *filePusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + name, ok := ResolveName(desc) + now := time.Now() + if !ok { + // if we were not told to ignore NoName, then return an error + if !s.store.ignoreNoName { + return nil, ErrNoName + } + + // just return a nil writer - we do not want to calculate the hash, so just use + // whatever was passed in the descriptor + return NewIoContentWriter(ioutil.Discard, WithOutputHash(desc.Digest)), nil + } + path, err := s.store.resolveWritePath(name) + if err != nil { + return nil, err + } + file, afterCommit, err := s.store.createWritePath(path, desc, name) + if err != nil { + return nil, err + } + + return &fileWriter{ + store: s.store, + file: file, + desc: desc, + digester: digest.Canonical.Digester(), + status: content.Status{ + Ref: name, + Total: desc.Size, + StartedAt: now, + UpdatedAt: now, + }, + afterCommit: afterCommit, + }, nil +} + +// Add adds a file reference from a path, either directory or single file, +// and returns the reference descriptor. +func (s *File) Add(name, mediaType, path string) (ocispec.Descriptor, error) { + if path == "" { + path = name + } + path = s.MapPath(name, path) + + fileInfo, err := os.Stat(path) + if err != nil { + return ocispec.Descriptor{}, err + } + + var desc ocispec.Descriptor + if fileInfo.IsDir() { + desc, err = s.descFromDir(name, mediaType, path) + } else { + desc, err = s.descFromFile(fileInfo, mediaType, path) + } + if err != nil { + return ocispec.Descriptor{}, err + } + if desc.Annotations == nil { + desc.Annotations = make(map[string]string) + } + desc.Annotations[ocispec.AnnotationTitle] = name + + s.set(desc) + return desc, nil +} + +// Load is a lower-level memory-only version of Add. Rather than taking a path, +// generating a descriptor and creating a reference, it takes raw data and a descriptor +// that describes that data and stores it in memory. It will disappear at process +// termination. +// +// It is especially useful for adding ephemeral data, such as config, that must +// exist in order to walk a manifest. +func (s *File) Load(desc ocispec.Descriptor, data []byte) error { + s.memoryMap.Store(desc.Digest, data) + return nil +} + +// Ref gets a reference's descriptor and content +func (s *File) Ref(ref string) (ocispec.Descriptor, []byte, error) { + desc, ok := s.getRef(ref) + if !ok { + return ocispec.Descriptor{}, nil, ErrNotFound + } + // first see if it is in the in-memory manifest map + manifest, ok := s.getMemory(desc) + if !ok { + return ocispec.Descriptor{}, nil, ErrNotFound + } + return desc, manifest, nil +} + +func (s *File) descFromFile(info os.FileInfo, mediaType, path string) (ocispec.Descriptor, error) { + file, err := os.Open(path) + if err != nil { + return ocispec.Descriptor{}, err + } + defer file.Close() + digest, err := digest.FromReader(file) + if err != nil { + return ocispec.Descriptor{}, err + } + + if mediaType == "" { + mediaType = DefaultBlobMediaType + } + return ocispec.Descriptor{ + MediaType: mediaType, + Digest: digest, + Size: info.Size(), + }, nil +} + +func (s *File) descFromDir(name, mediaType, root string) (ocispec.Descriptor, error) { + // generate temp file + file, err := s.tempFile() + if err != nil { + return ocispec.Descriptor{}, err + } + defer file.Close() + s.MapPath(name, file.Name()) + + // compress directory + digester := digest.Canonical.Digester() + zw := gzip.NewWriter(io.MultiWriter(file, digester.Hash())) + defer zw.Close() + tarDigester := digest.Canonical.Digester() + if err := tarDirectory(root, name, io.MultiWriter(zw, tarDigester.Hash()), s.Reproducible); err != nil { + return ocispec.Descriptor{}, err + } + + // flush all + if err := zw.Close(); err != nil { + return ocispec.Descriptor{}, err + } + if err := file.Sync(); err != nil { + return ocispec.Descriptor{}, err + } + + // generate descriptor + if mediaType == "" { + mediaType = DefaultBlobDirMediaType + } + info, err := file.Stat() + if err != nil { + return ocispec.Descriptor{}, err + } + return ocispec.Descriptor{ + MediaType: mediaType, + Digest: digester.Digest(), + Size: info.Size(), + Annotations: map[string]string{ + AnnotationDigest: tarDigester.Digest().String(), + AnnotationUnpack: "true", + }, + }, nil +} + +func (s *File) tempFile() (*os.File, error) { + file, err := ioutil.TempFile("", TempFilePattern) + if err != nil { + return nil, err + } + s.tmpFiles.Store(file.Name(), file) + return file, nil +} + +// Close frees up resources used by the file store +func (s *File) Close() error { + var errs []string + s.tmpFiles.Range(func(name, _ interface{}) bool { + if err := os.Remove(name.(string)); err != nil { + errs = append(errs, err.Error()) + } + return true + }) + if len(errs) > 0 { + return errors.New(strings.Join(errs, "; ")) + } + return nil +} + +func (s *File) resolveWritePath(name string) (string, error) { + path := s.ResolvePath(name) + if !s.AllowPathTraversalOnWrite { + base, err := filepath.Abs(s.root) + if err != nil { + return "", err + } + target, err := filepath.Abs(path) + if err != nil { + return "", err + } + rel, err := filepath.Rel(base, target) + if err != nil { + return "", ErrPathTraversalDisallowed + } + rel = filepath.ToSlash(rel) + if strings.HasPrefix(rel, "../") || rel == ".." { + return "", ErrPathTraversalDisallowed + } + } + if s.DisableOverwrite { + if _, err := os.Stat(path); err == nil { + return "", ErrOverwriteDisallowed + } else if !os.IsNotExist(err) { + return "", err + } + } + return path, nil +} + +func (s *File) createWritePath(path string, desc ocispec.Descriptor, prefix string) (*os.File, func() error, error) { + if value, ok := desc.Annotations[AnnotationUnpack]; !ok || value != "true" { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return nil, nil, err + } + file, err := os.Create(path) + return file, nil, err + } + + if err := os.MkdirAll(path, 0755); err != nil { + return nil, nil, err + } + file, err := s.tempFile() + checksum := desc.Annotations[AnnotationDigest] + afterCommit := func() error { + return extractTarGzip(path, prefix, file.Name(), checksum) + } + return file, afterCommit, err +} + +// MapPath maps name to path +func (s *File) MapPath(name, path string) string { + path = s.resolvePath(path) + s.pathMap.Store(name, path) + return path +} + +// ResolvePath returns the path by name +func (s *File) ResolvePath(name string) string { + if value, ok := s.pathMap.Load(name); ok { + if path, ok := value.(string); ok { + return path + } + } + + // using the name as a fallback solution + return s.resolvePath(name) +} + +func (s *File) resolvePath(path string) string { + if filepath.IsAbs(path) { + return path + } + return filepath.Join(s.root, path) +} + +func (s *File) set(desc ocispec.Descriptor) { + s.descriptor.Store(desc.Digest, desc) +} + +func (s *File) get(desc ocispec.Descriptor) (ocispec.Descriptor, bool) { + value, ok := s.descriptor.Load(desc.Digest) + if !ok { + return ocispec.Descriptor{}, false + } + desc, ok = value.(ocispec.Descriptor) + return desc, ok +} + +func (s *File) getMemory(desc ocispec.Descriptor) ([]byte, bool) { + value, ok := s.memoryMap.Load(desc.Digest) + if !ok { + return nil, false + } + content, ok := value.([]byte) + return content, ok +} + +func (s *File) getRef(ref string) (ocispec.Descriptor, bool) { + value, ok := s.refMap.Load(ref) + if !ok { + return ocispec.Descriptor{}, false + } + desc, ok := value.(ocispec.Descriptor) + return desc, ok +} + +// StoreManifest stores a manifest linked to by the provided ref. The children of the +// manifest, such as layers and config, should already exist in the file store, either +// as files linked via Add(), or via Load(). If they do not exist, then a typical +// Fetcher that walks the manifest will hit an unresolved hash. +// +// StoreManifest does *not* validate their presence. +func (s *File) StoreManifest(ref string, desc ocispec.Descriptor, manifest []byte) error { + s.refMap.Store(ref, desc) + s.memoryMap.Store(desc.Digest, manifest) + return nil +} + +type fileWriter struct { + store *File + file *os.File + desc ocispec.Descriptor + digester digest.Digester + status content.Status + afterCommit func() error +} + +func (w *fileWriter) Status() (content.Status, error) { + return w.status, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *fileWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +func (w *fileWriter) Write(p []byte) (n int, err error) { + n, err = w.file.Write(p) + w.digester.Hash().Write(p[:n]) + w.status.Offset += int64(len(p)) + w.status.UpdatedAt = time.Now() + return n, err +} + +func (w *fileWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + if w.file == nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") + } + file := w.file + w.file = nil + + if err := file.Sync(); err != nil { + file.Close() + return errors.Wrap(err, "sync failed") + } + + fileInfo, err := file.Stat() + if err != nil { + file.Close() + return errors.Wrap(err, "stat failed") + } + if err := file.Close(); err != nil { + return errors.Wrap(err, "failed to close file") + } + + if size > 0 && size != fileInfo.Size() { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fileInfo.Size(), size) + } + if dgst := w.digester.Digest(); expected != "" && expected != dgst { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) + } + + w.store.set(w.desc) + if w.afterCommit != nil { + return w.afterCommit() + } + return nil +} + +// Close the writer, flushing any unwritten data and leaving the progress in +// tact. +func (w *fileWriter) Close() error { + if w.file == nil { + return nil + } + + w.file.Sync() + err := w.file.Close() + w.file = nil + return err +} + +func (w *fileWriter) Truncate(size int64) error { + if size != 0 { + return ErrUnsupportedSize + } + w.status.Offset = 0 + w.digester.Hash().Reset() + if _, err := w.file.Seek(0, io.SeekStart); err != nil { + return err + } + return w.file.Truncate(0) +} diff --git a/vendor/oras.land/oras-go/pkg/content/gunzip.go b/vendor/oras.land/oras-go/pkg/content/gunzip.go new file mode 100644 index 000000000..65c4646f2 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/gunzip.go @@ -0,0 +1,72 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "compress/gzip" + "fmt" + "io" + + "github.com/containerd/containerd/content" +) + +// NewGunzipWriter wrap a writer with a gunzip, so that the stream is gunzipped +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewGunzipWriter(writer content.Writer, opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) { + gr, err := gzip.NewReader(r) + if err != nil { + done <- fmt.Errorf("error creating gzip reader: %v", err) + return + } + // write out the uncompressed data + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = gr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("GunzipWriter data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := w.Write(b[:l]); err2 != nil { + err = fmt.Errorf("GunzipWriter: error writing to underlying writer: %v", err2) + break + } + if err == io.EOF { + // clear the error + err = nil + break + } + } + gr.Close() + done <- err + }, opts...) +} diff --git a/vendor/oras.land/oras-go/pkg/content/interface.go b/vendor/oras.land/oras-go/pkg/content/interface.go new file mode 100644 index 000000000..f5e312cca --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/interface.go @@ -0,0 +1,26 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "github.com/containerd/containerd/remotes" +) + +// ProvideIngester is the interface that groups the basic Read and Write methods. +type Store interface { + remotes.Pusher + remotes.Fetcher +} diff --git a/vendor/oras.land/oras-go/pkg/content/iowriter.go b/vendor/oras.land/oras-go/pkg/content/iowriter.go new file mode 100644 index 000000000..e9e2ed8bf --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/iowriter.go @@ -0,0 +1,112 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "io/ioutil" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +// IoContentWriter writer that wraps an io.Writer, so the results can be streamed to +// an open io.Writer. For example, can be used to pull a layer and write it to a file, or device. +type IoContentWriter struct { + writer io.Writer + digester digest.Digester + size int64 + hash *digest.Digest +} + +// NewIoContentWriter create a new IoContentWriter. +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewIoContentWriter(writer io.Writer, opts ...WriterOpt) content.Writer { + w := writer + if w == nil { + w = ioutil.Discard + } + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + ioc := &IoContentWriter{ + writer: w, + digester: digest.Canonical.Digester(), + // we take the OutputHash, since the InputHash goes to the passthrough writer, + // which then passes the processed output to us + hash: wOpts.OutputHash, + } + return NewPassthroughWriter(ioc, func(r io.Reader, w io.Writer, done chan<- error) { + // write out the data to the io writer + var ( + err error + ) + // we could use io.Copy, but calling it with the default blocksize is identical to + // io.CopyBuffer. Otherwise, we would need some way to let the user flag "I want to use + // io.Copy", when it should not matter to them + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + _, err = io.CopyBuffer(w, r, b) + done <- err + }, opts...) +} + +func (w *IoContentWriter) Write(p []byte) (n int, err error) { + n, err = w.writer.Write(p) + if err != nil { + return 0, err + } + w.size += int64(n) + if w.hash == nil { + w.digester.Hash().Write(p[:n]) + } + return +} + +func (w *IoContentWriter) Close() error { + return nil +} + +// Digest may return empty digest or panics until committed. +func (w *IoContentWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (w *IoContentWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + return nil +} + +// Status returns the current state of write +func (w *IoContentWriter) Status() (content.Status, error) { + return content.Status{}, nil +} + +// Truncate updates the size of the target blob +func (w *IoContentWriter) Truncate(size int64) error { + return nil +} diff --git a/vendor/oras.land/oras-go/pkg/content/manifest.go b/vendor/oras.land/oras-go/pkg/content/manifest.go new file mode 100644 index 000000000..9830920b4 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/manifest.go @@ -0,0 +1,95 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package content + +import ( + "encoding/json" + "sort" + + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + artifact "oras.land/oras-go/pkg/artifact" +) + +// GenerateManifest generates a manifest. The manifest will include the provided config, +// and descs as layers. Raw bytes will be returned. +func GenerateManifest(config *ocispec.Descriptor, annotations map[string]string, descs ...ocispec.Descriptor) ([]byte, ocispec.Descriptor, error) { + // Config - either it was set, or we have to set it + if config == nil { + _, configGen, err := GenerateConfig(nil) + if err != nil { + return nil, ocispec.Descriptor{}, err + } + config = &configGen + } + return pack(*config, annotations, descs) +} + +// GenerateConfig generates a blank config with optional annotations. +func GenerateConfig(annotations map[string]string) ([]byte, ocispec.Descriptor, error) { + configBytes := []byte("{}") + dig := digest.FromBytes(configBytes) + config := ocispec.Descriptor{ + MediaType: artifact.UnknownConfigMediaType, + Digest: dig, + Size: int64(len(configBytes)), + Annotations: annotations, + } + return configBytes, config, nil +} + +// GenerateManifestAndConfig generates a config and then a manifest. Raw bytes will be returned. +func GenerateManifestAndConfig(manifestAnnotations map[string]string, configAnnotations map[string]string, descs ...ocispec.Descriptor) (manifest []byte, manifestDesc ocispec.Descriptor, config []byte, configDesc ocispec.Descriptor, err error) { + config, configDesc, err = GenerateConfig(configAnnotations) + if err != nil { + return nil, ocispec.Descriptor{}, nil, ocispec.Descriptor{}, err + } + manifest, manifestDesc, err = GenerateManifest(&configDesc, manifestAnnotations, descs...) + if err != nil { + return nil, ocispec.Descriptor{}, nil, ocispec.Descriptor{}, err + } + return +} + +// pack given a bunch of descriptors, create a manifest that references all of them +func pack(config ocispec.Descriptor, annotations map[string]string, descriptors []ocispec.Descriptor) ([]byte, ocispec.Descriptor, error) { + if descriptors == nil { + descriptors = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs + } + // sort descriptors alphanumerically by sha hash so it always is consistent + sort.Slice(descriptors, func(i, j int) bool { + return descriptors[i].Digest < descriptors[j].Digest + }) + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value. does not pertain to OCI or docker version + }, + Config: config, + Layers: descriptors, + Annotations: annotations, + } + manifestBytes, err := json.Marshal(manifest) + if err != nil { + return nil, ocispec.Descriptor{}, err + } + manifestDescriptor := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageManifest, + Digest: digest.FromBytes(manifestBytes), + Size: int64(len(manifestBytes)), + } + + return manifestBytes, manifestDescriptor, nil +} diff --git a/vendor/oras.land/oras-go/pkg/content/memory.go b/vendor/oras.land/oras-go/pkg/content/memory.go new file mode 100644 index 000000000..5c194b15a --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/memory.go @@ -0,0 +1,284 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Memory provides content from the memory +type Memory struct { + descriptor map[digest.Digest]ocispec.Descriptor + content map[digest.Digest][]byte + nameMap map[string]ocispec.Descriptor + refMap map[string]ocispec.Descriptor + lock *sync.Mutex +} + +// NewMemory creats a new memory store +func NewMemory() *Memory { + return &Memory{ + descriptor: make(map[digest.Digest]ocispec.Descriptor), + content: make(map[digest.Digest][]byte), + nameMap: make(map[string]ocispec.Descriptor), + refMap: make(map[string]ocispec.Descriptor), + lock: &sync.Mutex{}, + } +} + +func (s *Memory) Resolver() remotes.Resolver { + return s +} + +func (s *Memory) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { + desc, ok := s.refMap[ref] + if !ok { + return "", ocispec.Descriptor{}, fmt.Errorf("unknown reference: %s", ref) + } + return ref, desc, nil +} + +func (s *Memory) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + if _, ok := s.refMap[ref]; !ok { + return nil, fmt.Errorf("unknown reference: %s", ref) + } + return s, nil +} + +// Fetch get an io.ReadCloser for the specific content +func (s *Memory) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + _, content, ok := s.Get(desc) + if !ok { + return nil, ErrNotFound + } + return ioutil.NopCloser(bytes.NewReader(content)), nil +} + +func (s *Memory) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { + var tag, hash string + parts := strings.SplitN(ref, "@", 2) + if len(parts) > 0 { + tag = parts[0] + } + if len(parts) > 1 { + hash = parts[1] + } + return &memoryPusher{ + store: s, + ref: tag, + hash: hash, + }, nil +} + +type memoryPusher struct { + store *Memory + ref string + hash string +} + +func (s *memoryPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + name, _ := ResolveName(desc) + now := time.Now() + // is this the root? + if desc.Digest.String() == s.hash { + s.store.refMap[s.ref] = desc + } + return &memoryWriter{ + store: s.store, + buffer: bytes.NewBuffer(nil), + desc: desc, + digester: digest.Canonical.Digester(), + status: content.Status{ + Ref: name, + Total: desc.Size, + StartedAt: now, + UpdatedAt: now, + }, + }, nil +} + +// Add adds content, generating a descriptor and returning it. +func (s *Memory) Add(name, mediaType string, content []byte) (ocispec.Descriptor, error) { + var annotations map[string]string + if name != "" { + annotations = map[string]string{ + ocispec.AnnotationTitle: name, + } + } + + if mediaType == "" { + mediaType = DefaultBlobMediaType + } + + desc := ocispec.Descriptor{ + MediaType: mediaType, + Digest: digest.FromBytes(content), + Size: int64(len(content)), + Annotations: annotations, + } + + s.Set(desc, content) + return desc, nil +} + +// Set adds the content to the store +func (s *Memory) Set(desc ocispec.Descriptor, content []byte) { + s.lock.Lock() + defer s.lock.Unlock() + + s.descriptor[desc.Digest] = desc + s.content[desc.Digest] = content + + if name, ok := ResolveName(desc); ok && name != "" { + s.nameMap[name] = desc + } +} + +// Get finds the content from the store +func (s *Memory) Get(desc ocispec.Descriptor) (ocispec.Descriptor, []byte, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + desc, ok := s.descriptor[desc.Digest] + if !ok { + return ocispec.Descriptor{}, nil, false + } + content, ok := s.content[desc.Digest] + return desc, content, ok +} + +// GetByName finds the content from the store by name (i.e. AnnotationTitle) +func (s *Memory) GetByName(name string) (ocispec.Descriptor, []byte, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + desc, ok := s.nameMap[name] + if !ok { + return ocispec.Descriptor{}, nil, false + } + content, ok := s.content[desc.Digest] + return desc, content, ok +} + +// StoreManifest stores a manifest linked to by the provided ref. The children of the +// manifest, such as layers and config, should already exist in the file store, either +// as files linked via Add(), or via Set(). If they do not exist, then a typical +// Fetcher that walks the manifest will hit an unresolved hash. +// +// StoreManifest does *not* validate their presence. +func (s *Memory) StoreManifest(ref string, desc ocispec.Descriptor, manifest []byte) error { + s.refMap[ref] = desc + s.Add("", desc.MediaType, manifest) + return nil +} + +func descFromBytes(b []byte, mediaType string) (ocispec.Descriptor, error) { + digest, err := digest.FromReader(bytes.NewReader(b)) + if err != nil { + return ocispec.Descriptor{}, err + } + + if mediaType == "" { + mediaType = DefaultBlobMediaType + } + return ocispec.Descriptor{ + MediaType: mediaType, + Digest: digest, + Size: int64(len(b)), + }, nil +} + +type memoryWriter struct { + store *Memory + buffer *bytes.Buffer + desc ocispec.Descriptor + digester digest.Digester + status content.Status +} + +func (w *memoryWriter) Status() (content.Status, error) { + return w.status, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *memoryWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +func (w *memoryWriter) Write(p []byte) (n int, err error) { + n, err = w.buffer.Write(p) + w.digester.Hash().Write(p[:n]) + w.status.Offset += int64(len(p)) + w.status.UpdatedAt = time.Now() + return n, err +} + +func (w *memoryWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + if w.buffer == nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") + } + content := w.buffer.Bytes() + w.buffer = nil + + if size > 0 && size != int64(len(content)) { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", len(content), size) + } + if dgst := w.digester.Digest(); expected != "" && expected != dgst { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) + } + + w.store.Set(w.desc, content) + return nil +} + +func (w *memoryWriter) Close() error { + w.buffer = nil + return nil +} + +func (w *memoryWriter) Truncate(size int64) error { + if size != 0 { + return ErrUnsupportedSize + } + w.status.Offset = 0 + w.digester.Hash().Reset() + w.buffer.Truncate(0) + return nil +} diff --git a/vendor/oras.land/oras-go/pkg/content/multireader.go b/vendor/oras.land/oras-go/pkg/content/multireader.go new file mode 100644 index 000000000..8dba773da --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/multireader.go @@ -0,0 +1,56 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "fmt" + "io" + + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MultiReader store to read content from multiple stores. It finds the content by asking each underlying +// store to find the content, which it does based on the hash. +// +// Example: +// fileStore := NewFileStore(rootPath) +// memoryStore := NewMemoryStore() +// // load up content in fileStore and memoryStore +// multiStore := MultiReader([]content.Provider{fileStore, memoryStore}) +// +// You now can use multiStore anywhere that content.Provider is accepted +type MultiReader struct { + stores []remotes.Fetcher +} + +// AddStore add a store to read from +func (m *MultiReader) AddStore(store ...remotes.Fetcher) { + m.stores = append(m.stores, store...) +} + +// ReaderAt get a reader +func (m MultiReader) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + for _, store := range m.stores { + r, err := store.Fetch(ctx, desc) + if r != nil && err == nil { + return r, nil + } + } + // we did not find any + return nil, fmt.Errorf("not found") +} diff --git a/vendor/oras.land/oras-go/pkg/content/multiwriter.go b/vendor/oras.land/oras-go/pkg/content/multiwriter.go new file mode 100644 index 000000000..1e69bcdf3 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/multiwriter.go @@ -0,0 +1,42 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + + ctrcontent "github.com/containerd/containerd/content" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MultiWriterIngester an ingester that can provide a single writer or multiple writers for a single +// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer +// that is a tar file with multiple files, each of which should go to a different stream, some of which +// should not be handled at all. +type MultiWriterIngester interface { + ctrcontent.Ingester + Writers(ctx context.Context, opts ...ctrcontent.WriterOpt) (func(string) (ctrcontent.Writer, error), error) +} + +// MultiWriterPusher a pusher that can provide a single writer or multiple writers for a single +// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer +// that is a tar file with multiple files, each of which should go to a different stream, some of which +// should not be handled at all. +type MultiWriterPusher interface { + remotes.Pusher + Pushers(ctx context.Context, desc ocispec.Descriptor) (func(string) (ctrcontent.Writer, error), error) +} diff --git a/vendor/oras.land/oras-go/pkg/content/oci.go b/vendor/oras.land/oras-go/pkg/content/oci.go new file mode 100644 index 000000000..c5849ad76 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/oci.go @@ -0,0 +1,335 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package content + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/remotes" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// OCI provides content from the file system with the OCI-Image layout. +// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md +type OCI struct { + content.Store + + root string + index *ocispec.Index + nameMap map[string]ocispec.Descriptor +} + +// NewOCI creates a new OCI store +func NewOCI(rootPath string) (*OCI, error) { + fileStore, err := local.NewStore(rootPath) + if err != nil { + return nil, err + } + + store := &OCI{ + Store: fileStore, + root: rootPath, + } + if err := store.validateOCILayoutFile(); err != nil { + return nil, err + } + if err := store.LoadIndex(); err != nil { + return nil, err + } + + return store, nil +} + +// LoadIndex reads the index.json from the file system +func (s *OCI) LoadIndex() error { + path := filepath.Join(s.root, OCIImageIndexFile) + indexFile, err := os.Open(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + s.index = &ocispec.Index{ + Versioned: specs.Versioned{ + SchemaVersion: 2, // historical value + }, + } + s.nameMap = make(map[string]ocispec.Descriptor) + + return nil + } + defer indexFile.Close() + + if err := json.NewDecoder(indexFile).Decode(&s.index); err != nil { + return err + } + + s.nameMap = make(map[string]ocispec.Descriptor) + for _, desc := range s.index.Manifests { + if name := desc.Annotations[ocispec.AnnotationRefName]; name != "" { + s.nameMap[name] = desc + } + } + + return nil +} + +// SaveIndex writes the index.json to the file system +func (s *OCI) SaveIndex() error { + // first need to update the index + var descs []ocispec.Descriptor + for name, desc := range s.nameMap { + if desc.Annotations == nil { + desc.Annotations = map[string]string{} + } + desc.Annotations[ocispec.AnnotationRefName] = name + descs = append(descs, desc) + } + s.index.Manifests = descs + indexJSON, err := json.Marshal(s.index) + if err != nil { + return err + } + + path := filepath.Join(s.root, OCIImageIndexFile) + return ioutil.WriteFile(path, indexJSON, 0644) +} + +func (s *OCI) Resolver() remotes.Resolver { + return s +} + +func (s *OCI) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { + if err := s.LoadIndex(); err != nil { + return "", ocispec.Descriptor{}, err + } + desc, ok := s.nameMap[ref] + if !ok { + return "", ocispec.Descriptor{}, fmt.Errorf("reference %s not in store", ref) + } + return ref, desc, nil +} + +func (s *OCI) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + if err := s.LoadIndex(); err != nil { + return nil, err + } + if _, ok := s.nameMap[ref]; !ok { + return nil, fmt.Errorf("reference %s not in store", ref) + } + return s, nil +} + +// Fetch get an io.ReadCloser for the specific content +func (s *OCI) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + readerAt, err := s.Store.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + // just wrap the ReaderAt with a Reader + return ioutil.NopCloser(&ReaderAtWrapper{readerAt: readerAt}), nil +} + +// Pusher get a remotes.Pusher for the given ref +func (s *OCI) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { + // separate the tag based ref from the hash + var ( + baseRef, hash string + ) + parts := strings.SplitN(ref, "@", 2) + baseRef = parts[0] + if len(parts) > 1 { + hash = parts[1] + } + return &ociPusher{oci: s, ref: baseRef, digest: hash}, nil +} + +// AddReference adds or updates an reference to index. +func (s *OCI) AddReference(name string, desc ocispec.Descriptor) { + if desc.Annotations == nil { + desc.Annotations = map[string]string{ + ocispec.AnnotationRefName: name, + } + } else { + desc.Annotations[ocispec.AnnotationRefName] = name + } + + if _, ok := s.nameMap[name]; ok { + s.nameMap[name] = desc + + for i, ref := range s.index.Manifests { + if name == ref.Annotations[ocispec.AnnotationRefName] { + s.index.Manifests[i] = desc + return + } + } + + // Process should not reach here. + // Fallthrough to `Add` scenario and recover. + s.index.Manifests = append(s.index.Manifests, desc) + return + } + + s.index.Manifests = append(s.index.Manifests, desc) + s.nameMap[name] = desc +} + +// DeleteReference deletes an reference from index. +func (s *OCI) DeleteReference(name string) { + if _, ok := s.nameMap[name]; !ok { + return + } + + delete(s.nameMap, name) + for i, desc := range s.index.Manifests { + if name == desc.Annotations[ocispec.AnnotationRefName] { + s.index.Manifests[i] = s.index.Manifests[len(s.index.Manifests)-1] + s.index.Manifests = s.index.Manifests[:len(s.index.Manifests)-1] + return + } + } +} + +// ListReferences lists all references in index. +func (s *OCI) ListReferences() map[string]ocispec.Descriptor { + return s.nameMap +} + +// validateOCILayoutFile ensures the `oci-layout` file +func (s *OCI) validateOCILayoutFile() error { + layoutFilePath := filepath.Join(s.root, ocispec.ImageLayoutFile) + layoutFile, err := os.Open(layoutFilePath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + + layout := ocispec.ImageLayout{ + Version: ocispec.ImageLayoutVersion, + } + layoutJSON, err := json.Marshal(layout) + if err != nil { + return err + } + + return ioutil.WriteFile(layoutFilePath, layoutJSON, 0644) + } + defer layoutFile.Close() + + var layout *ocispec.ImageLayout + err = json.NewDecoder(layoutFile).Decode(&layout) + if err != nil { + return err + } + if layout.Version != ocispec.ImageLayoutVersion { + return ErrUnsupportedVersion + } + + return nil +} + +// Info will return metadata about content available in the content store. +// Abort completely cancels the ingest operation targeted by ref. +func (s *OCI) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + return s.Store.Info(ctx, dgst) +} + +// TODO: implement (needed to create a content.Store) +// Update updates mutable information related to content. +// If one or more fieldpaths are provided, only those +// fields will be updated. +// Mutable fields: +// labels.* +func (s *OCI) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return content.Info{}, errors.New("not yet implemented: Update (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// Walk will call fn for each item in the content store which +// match the provided filters. If no filters are given all +// items will be walked. +func (s *OCI) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + return errors.New("not yet implemented: Walk (content.Store interface)") +} + +// Delete removes the content from the store. +func (s *OCI) Delete(ctx context.Context, dgst digest.Digest) error { + return s.Store.Delete(ctx, dgst) +} + +// TODO: implement (needed to create a content.Store) +func (s *OCI) Status(ctx context.Context, ref string) (content.Status, error) { + // Status returns the status of the provided ref. + return content.Status{}, errors.New("not yet implemented: Status (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// ListStatuses returns the status of any active ingestions whose ref match the +// provided regular expression. If empty, all active ingestions will be +// returned. +func (s *OCI) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + return []content.Status{}, errors.New("not yet implemented: ListStatuses (content.Store interface)") +} + +// TODO: implement (needed to create a content.Store) +// Abort completely cancels the ingest operation targeted by ref. +func (s *OCI) Abort(ctx context.Context, ref string) error { + return errors.New("not yet implemented: Abort (content.Store interface)") +} + +// ReaderAt provides contents +func (s *OCI) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + return s.Store.ReaderAt(ctx, desc) +} + +// ociPusher to push content for a single referencem can handle multiple descriptors. +// Needs to be able to recognize when a root manifest is being pushed and to create the tag +// for it. +type ociPusher struct { + oci *OCI + ref string + digest string +} + +// Push get a writer for a single Descriptor +func (p *ociPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + // do we need to create a tag? + switch desc.MediaType { + case ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + // if the hash of the content matches that which was provided as the hash for the root, mark it + if p.digest != "" && p.digest == desc.Digest.String() { + if err := p.oci.LoadIndex(); err != nil { + return nil, err + } + p.oci.nameMap[p.ref] = desc + if err := p.oci.SaveIndex(); err != nil { + return nil, err + } + } + } + + return p.oci.Store.Writer(ctx, content.WithDescriptor(desc), content.WithRef(p.ref)) +} diff --git a/vendor/oras.land/oras-go/pkg/content/opts.go b/vendor/oras.land/oras-go/pkg/content/opts.go new file mode 100644 index 000000000..f6eaac692 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/opts.go @@ -0,0 +1,112 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "errors" + + "github.com/opencontainers/go-digest" +) + +type WriterOpts struct { + InputHash *digest.Digest + OutputHash *digest.Digest + Blocksize int + MultiWriterIngester bool + IgnoreNoName bool +} + +type WriterOpt func(*WriterOpts) error + +func DefaultWriterOpts() WriterOpts { + return WriterOpts{ + InputHash: nil, + OutputHash: nil, + Blocksize: DefaultBlocksize, + IgnoreNoName: true, + } +} + +// WithInputHash provide the expected input hash to a writer. Writers +// may suppress their own calculation of a hash on the stream, taking this +// hash instead. If the Writer processes the data before passing it on to another +// Writer layer, this is the hash of the *input* stream. +// +// To have a blank hash, use WithInputHash(BlankHash). +func WithInputHash(hash digest.Digest) WriterOpt { + return func(w *WriterOpts) error { + w.InputHash = &hash + return nil + } +} + +// WithOutputHash provide the expected output hash to a writer. Writers +// may suppress their own calculation of a hash on the stream, taking this +// hash instead. If the Writer processes the data before passing it on to another +// Writer layer, this is the hash of the *output* stream. +// +// To have a blank hash, use WithInputHash(BlankHash). +func WithOutputHash(hash digest.Digest) WriterOpt { + return func(w *WriterOpts) error { + w.OutputHash = &hash + return nil + } +} + +// WithBlocksize set the blocksize used by the processor of data. +// The default is DefaultBlocksize, which is the same as that used by io.Copy. +// Includes a safety check to ensure the caller doesn't actively set it to <= 0. +func WithBlocksize(blocksize int) WriterOpt { + return func(w *WriterOpts) error { + if blocksize <= 0 { + return errors.New("blocksize must be greater than or equal to 0") + } + w.Blocksize = blocksize + return nil + } +} + +// WithMultiWriterIngester the passed ingester also implements MultiWriter +// and should be used as such. If this is set to true, but the ingester does not +// implement MultiWriter, calling Writer should return an error. +func WithMultiWriterIngester() WriterOpt { + return func(w *WriterOpts) error { + w.MultiWriterIngester = true + return nil + } +} + +// WithErrorOnNoName some ingesters, when creating a Writer, do not return an error if +// the descriptor does not have a valid name on the descriptor. Passing WithErrorOnNoName +// tells the writer to return an error instead of passing the data to a nil writer. +func WithErrorOnNoName() WriterOpt { + return func(w *WriterOpts) error { + w.IgnoreNoName = false + return nil + } +} + +// WithIgnoreNoName some ingesters, when creating a Writer, return an error if +// the descriptor does not have a valid name on the descriptor. Passing WithIgnoreNoName +// tells the writer not to return an error, but rather to pass the data to a nil writer. +// +// Deprecated: Use WithErrorOnNoName +func WithIgnoreNoName() WriterOpt { + return func(w *WriterOpts) error { + w.IgnoreNoName = true + return nil + } +} diff --git a/vendor/oras.land/oras-go/pkg/content/passthrough.go b/vendor/oras.land/oras-go/pkg/content/passthrough.go new file mode 100644 index 000000000..d54c4256c --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/passthrough.go @@ -0,0 +1,286 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "context" + "errors" + "io" + "time" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +// PassthroughWriter takes an input stream and passes it through to an underlying writer, +// while providing the ability to manipulate the stream before it gets passed through +type PassthroughWriter struct { + writer content.Writer + pipew *io.PipeWriter + digester digest.Digester + size int64 + underlyingWriter *underlyingWriter + reader *io.PipeReader + hash *digest.Digest + done chan error +} + +// NewPassthroughWriter creates a pass-through writer that allows for processing +// the content via an arbitrary function. The function should do whatever processing it +// wants, reading from the Reader to the Writer. When done, it must indicate via +// sending an error or nil to the Done +func NewPassthroughWriter(writer content.Writer, f func(r io.Reader, w io.Writer, done chan<- error), opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + r, w := io.Pipe() + pw := &PassthroughWriter{ + writer: writer, + pipew: w, + digester: digest.Canonical.Digester(), + underlyingWriter: &underlyingWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + hash: wOpts.OutputHash, + }, + reader: r, + hash: wOpts.InputHash, + done: make(chan error, 1), + } + go f(r, pw.underlyingWriter, pw.done) + return pw +} + +func (pw *PassthroughWriter) Write(p []byte) (n int, err error) { + n, err = pw.pipew.Write(p) + if pw.hash == nil { + pw.digester.Hash().Write(p[:n]) + } + pw.size += int64(n) + return +} + +func (pw *PassthroughWriter) Close() error { + if pw.pipew != nil { + pw.pipew.Close() + } + pw.writer.Close() + return nil +} + +// Digest may return empty digest or panics until committed. +func (pw *PassthroughWriter) Digest() digest.Digest { + if pw.hash != nil { + return *pw.hash + } + return pw.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (pw *PassthroughWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + if pw.pipew != nil { + pw.pipew.Close() + } + err := <-pw.done + if pw.reader != nil { + pw.reader.Close() + } + if err != nil && err != io.EOF { + return err + } + + // Some underlying writers will validate an expected digest, so we need the option to pass it + // that digest. That is why we caluclate the digest of the underlying writer throughout the write process. + return pw.writer.Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...) +} + +// Status returns the current state of write +func (pw *PassthroughWriter) Status() (content.Status, error) { + return pw.writer.Status() +} + +// Truncate updates the size of the target blob +func (pw *PassthroughWriter) Truncate(size int64) error { + return pw.writer.Truncate(size) +} + +// underlyingWriter implementation of io.Writer to write to the underlying +// io.Writer +type underlyingWriter struct { + writer content.Writer + digester digest.Digester + size int64 + hash *digest.Digest +} + +// Write write to the underlying writer +func (u *underlyingWriter) Write(p []byte) (int, error) { + n, err := u.writer.Write(p) + if err != nil { + return 0, err + } + + if u.hash == nil { + u.digester.Hash().Write(p) + } + u.size += int64(len(p)) + return n, nil +} + +// Size get total size written +func (u *underlyingWriter) Size() int64 { + return u.size +} + +// Digest may return empty digest or panics until committed. +func (u *underlyingWriter) Digest() digest.Digest { + if u.hash != nil { + return *u.hash + } + return u.digester.Digest() +} + +// PassthroughMultiWriter single writer that passes through to multiple writers, allowing the passthrough +// function to select which writer to use. +type PassthroughMultiWriter struct { + writers []*PassthroughWriter + pipew *io.PipeWriter + digester digest.Digester + size int64 + reader *io.PipeReader + hash *digest.Digest + done chan error + startedAt time.Time + updatedAt time.Time +} + +func NewPassthroughMultiWriter(writers func(name string) (content.Writer, error), f func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error), opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + r, w := io.Pipe() + + pmw := &PassthroughMultiWriter{ + startedAt: time.Now(), + updatedAt: time.Now(), + done: make(chan error, 1), + digester: digest.Canonical.Digester(), + hash: wOpts.InputHash, + pipew: w, + reader: r, + } + + // get our output writers + getwriter := func(name string) io.Writer { + writer, err := writers(name) + if err != nil || writer == nil { + return nil + } + pw := &PassthroughWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + underlyingWriter: &underlyingWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + hash: wOpts.OutputHash, + }, + done: make(chan error, 1), + } + pmw.writers = append(pmw.writers, pw) + return pw.underlyingWriter + } + go f(r, getwriter, pmw.done) + return pmw +} + +func (pmw *PassthroughMultiWriter) Write(p []byte) (n int, err error) { + n, err = pmw.pipew.Write(p) + if pmw.hash == nil { + pmw.digester.Hash().Write(p[:n]) + } + pmw.size += int64(n) + pmw.updatedAt = time.Now() + return +} + +func (pmw *PassthroughMultiWriter) Close() error { + pmw.pipew.Close() + for _, w := range pmw.writers { + w.Close() + } + return nil +} + +// Digest may return empty digest or panics until committed. +func (pmw *PassthroughMultiWriter) Digest() digest.Digest { + if pmw.hash != nil { + return *pmw.hash + } + return pmw.digester.Digest() +} + +// Commit commits the blob (but no roll-back is guaranteed on an error). +// size and expected can be zero-value when unknown. +// Commit always closes the writer, even on error. +// ErrAlreadyExists aborts the writer. +func (pmw *PassthroughMultiWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + pmw.pipew.Close() + err := <-pmw.done + if pmw.reader != nil { + pmw.reader.Close() + } + if err != nil && err != io.EOF { + return err + } + + // Some underlying writers will validate an expected digest, so we need the option to pass it + // that digest. That is why we caluclate the digest of the underlying writer throughout the write process. + for _, w := range pmw.writers { + // maybe this should be Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...) + w.done <- err + if err := w.Commit(ctx, size, expected, opts...); err != nil { + return err + } + } + return nil +} + +// Status returns the current state of write +func (pmw *PassthroughMultiWriter) Status() (content.Status, error) { + return content.Status{ + StartedAt: pmw.startedAt, + UpdatedAt: pmw.updatedAt, + Total: pmw.size, + }, nil +} + +// Truncate updates the size of the target blob, but cannot do anything with a multiwriter +func (pmw *PassthroughMultiWriter) Truncate(size int64) error { + return errors.New("truncate unavailable on multiwriter") +} diff --git a/vendor/oras.land/oras-go/pkg/content/readerat.go b/vendor/oras.land/oras-go/pkg/content/readerat.go new file mode 100644 index 000000000..7e2cb9b80 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/readerat.go @@ -0,0 +1,68 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package content + +import ( + "io" + + "github.com/containerd/containerd/content" +) + +// ensure interface +var ( + _ content.ReaderAt = sizeReaderAt{} +) + +type readAtCloser interface { + io.ReaderAt + io.Closer +} + +type sizeReaderAt struct { + readAtCloser + size int64 +} + +func (ra sizeReaderAt) Size() int64 { + return ra.size +} + +func NopCloserAt(r io.ReaderAt) nopCloserAt { + return nopCloserAt{r} +} + +type nopCloserAt struct { + io.ReaderAt +} + +func (n nopCloserAt) Close() error { + return nil +} + +// readerAtWrapper wraps a ReaderAt to give a Reader +type ReaderAtWrapper struct { + offset int64 + readerAt io.ReaderAt +} + +func (r *ReaderAtWrapper) Read(p []byte) (n int, err error) { + n, err = r.readerAt.ReadAt(p, r.offset) + r.offset += int64(n) + return +} + +func NewReaderAtWrapper(readerAt io.ReaderAt) *ReaderAtWrapper { + return &ReaderAtWrapper{readerAt: readerAt} +} diff --git a/vendor/oras.land/oras-go/pkg/content/registry.go b/vendor/oras.land/oras-go/pkg/content/registry.go new file mode 100644 index 000000000..aa22567c6 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/registry.go @@ -0,0 +1,84 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package content + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "os" + + auth "oras.land/oras-go/pkg/auth/docker" + + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" +) + +// RegistryOptions provide configuration options to a Registry +type RegistryOptions struct { + Configs []string + Username string + Password string + Insecure bool + PlainHTTP bool +} + +// Registry provides content from a spec-compliant registry. Create an use a new one for each +// registry with unique configuration of RegistryOptions. +type Registry struct { + remotes.Resolver +} + +// NewRegistry creates a new Registry store +func NewRegistry(opts RegistryOptions) (*Registry, error) { + return &Registry{ + Resolver: newResolver(opts.Username, opts.Password, opts.Insecure, opts.PlainHTTP, opts.Configs...), + }, nil +} + +func newResolver(username, password string, insecure bool, plainHTTP bool, configs ...string) remotes.Resolver { + + opts := docker.ResolverOptions{ + PlainHTTP: plainHTTP, + } + + client := http.DefaultClient + if insecure { + client.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + } + opts.Client = client + + if username != "" || password != "" { + opts.Credentials = func(hostName string) (string, string, error) { + return username, password, nil + } + return docker.NewResolver(opts) + } + cli, err := auth.NewClient(configs...) + if err != nil { + fmt.Fprintf(os.Stderr, "WARNING: Error loading auth file: %v\n", err) + } + resolver, err := cli.Resolver(context.Background(), client, plainHTTP) + if err != nil { + fmt.Fprintf(os.Stderr, "WARNING: Error loading resolver: %v\n", err) + resolver = docker.NewResolver(opts) + } + return resolver +} diff --git a/vendor/oras.land/oras-go/pkg/content/untar.go b/vendor/oras.land/oras-go/pkg/content/untar.go new file mode 100644 index 000000000..fe95a81a4 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/untar.go @@ -0,0 +1,157 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "archive/tar" + "fmt" + "io" + + "github.com/containerd/containerd/content" +) + +// NewUntarWriter wrap a writer with an untar, so that the stream is untarred +// +// By default, it calculates the hash when writing. If the option `skipHash` is true, +// it will skip doing the hash. Skipping the hash is intended to be used only +// if you are confident about the validity of the data being passed to the writer, +// and wish to save on the hashing time. +func NewUntarWriter(writer content.Writer, opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) { + tr := tar.NewReader(r) + var err error + for { + _, err := tr.Next() + if err == io.EOF { + // clear the error, since we do not pass an io.EOF + err = nil + break // End of archive + } + if err != nil { + // pass the error on + err = fmt.Errorf("UntarWriter tar file header read error: %v", err) + break + } + // write out the untarred data + // we can handle io.EOF, just go to the next file + // any other errors should stop and get reported + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = tr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("UntarWriter file data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := w.Write(b[:l]); err2 != nil { + err = fmt.Errorf("UntarWriter error writing to underlying writer: %v", err2) + break + } + if err == io.EOF { + // go to the next file + break + } + } + // did we break with a non-nil and non-EOF error? + if err != nil && err != io.EOF { + break + } + } + done <- err + }, opts...) +} + +// NewUntarWriterByName wrap multiple writers with an untar, so that the stream is untarred and passed +// to the appropriate writer, based on the filename. If a filename is not found, it is up to the called func +// to determine how to process it. +func NewUntarWriterByName(writers func(string) (content.Writer, error), opts ...WriterOpt) content.Writer { + // process opts for default + wOpts := DefaultWriterOpts() + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil + } + } + + // need a PassthroughMultiWriter here + return NewPassthroughMultiWriter(writers, func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error) { + tr := tar.NewReader(r) + var err error + for { + header, err := tr.Next() + if err == io.EOF { + // clear the error, since we do not pass an io.EOF + err = nil + break // End of archive + } + if err != nil { + // pass the error on + err = fmt.Errorf("UntarWriter tar file header read error: %v", err) + break + } + // get the filename + filename := header.Name + + // get the writer for this filename + w := getwriter(filename) + if w == nil { + continue + } + + // write out the untarred data + // we can handle io.EOF, just go to the next file + // any other errors should stop and get reported + b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) + for { + var n int + n, err = tr.Read(b) + if err != nil && err != io.EOF { + err = fmt.Errorf("UntarWriter file data read error: %v\n", err) + break + } + l := n + if n > len(b) { + l = len(b) + } + if _, err2 := w.Write(b[:l]); err2 != nil { + err = fmt.Errorf("UntarWriter error writing to underlying writer at for name '%s': %v", filename, err2) + break + } + if err == io.EOF { + // go to the next file + break + } + } + // did we break with a non-nil and non-EOF error? + if err != nil && err != io.EOF { + break + } + } + done <- err + }, opts...) +} diff --git a/vendor/oras.land/oras-go/pkg/content/utils.go b/vendor/oras.land/oras-go/pkg/content/utils.go new file mode 100644 index 000000000..c31c54aae --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/content/utils.go @@ -0,0 +1,223 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package content + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ResolveName resolves name from descriptor +func ResolveName(desc ocispec.Descriptor) (string, bool) { + name, ok := desc.Annotations[ocispec.AnnotationTitle] + return name, ok +} + +// tarDirectory walks the directory specified by path, and tar those files with a new +// path prefix. +func tarDirectory(root, prefix string, w io.Writer, stripTimes bool) error { + tw := tar.NewWriter(w) + defer tw.Close() + if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rename path + name, err := filepath.Rel(root, path) + if err != nil { + return err + } + name = filepath.Join(prefix, name) + name = filepath.ToSlash(name) + + // Generate header + var link string + mode := info.Mode() + if mode&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + header, err := tar.FileInfoHeader(info, link) + if err != nil { + return errors.Wrap(err, path) + } + header.Name = name + header.Uid = 0 + header.Gid = 0 + header.Uname = "" + header.Gname = "" + + if stripTimes { + header.ModTime = time.Time{} + header.AccessTime = time.Time{} + header.ChangeTime = time.Time{} + } + + // Write file + if err := tw.WriteHeader(header); err != nil { + return errors.Wrap(err, "tar") + } + if mode.IsRegular() { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + if _, err := io.Copy(tw, file); err != nil { + return errors.Wrap(err, path) + } + } + + return nil + }); err != nil { + return err + } + return nil +} + +// extractTarDirectory extracts tar file to a directory specified by the `root` +// parameter. The file name prefix is ensured to be the string specified by the +// `prefix` parameter and is trimmed. +func extractTarDirectory(root, prefix string, r io.Reader) error { + tr := tar.NewReader(r) + for { + header, err := tr.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + // Name check + name := header.Name + path, err := ensureBasePath(root, prefix, name) + if err != nil { + return err + } + path = filepath.Join(root, path) + + // Link check + switch header.Typeflag { + case tar.TypeLink, tar.TypeSymlink: + link := header.Linkname + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(name), link) + } + if _, err := ensureBasePath(root, prefix, link); err != nil { + return err + } + } + + // Create content + switch header.Typeflag { + case tar.TypeReg: + err = writeFile(path, tr, header.FileInfo().Mode()) + case tar.TypeDir: + err = os.MkdirAll(path, header.FileInfo().Mode()) + case tar.TypeLink: + err = os.Link(header.Linkname, path) + case tar.TypeSymlink: + err = os.Symlink(header.Linkname, path) + default: + continue // Non-regular files are skipped + } + if err != nil { + return err + } + + // Change access time and modification time if possible (error ignored) + os.Chtimes(path, header.AccessTime, header.ModTime) + } +} + +// ensureBasePath ensures the target path is in the base path, +// returning its relative path to the base path. +func ensureBasePath(root, base, target string) (string, error) { + path, err := filepath.Rel(base, target) + if err != nil { + return "", err + } + cleanPath := filepath.ToSlash(filepath.Clean(path)) + if cleanPath == ".." || strings.HasPrefix(cleanPath, "../") { + return "", fmt.Errorf("%q is outside of %q", target, base) + } + + // No symbolic link allowed in the relative path + dir := filepath.Dir(path) + for dir != "." { + if info, err := os.Lstat(filepath.Join(root, dir)); err != nil { + if !os.IsNotExist(err) { + return "", err + } + } else if info.Mode()&os.ModeSymlink != 0 { + return "", fmt.Errorf("no symbolic link allowed between %q and %q", base, target) + } + dir = filepath.Dir(dir) + } + + return path, nil +} + +func writeFile(path string, r io.Reader, perm os.FileMode) error { + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, r) + return err +} + +func extractTarGzip(root, prefix, filename, checksum string) error { + file, err := os.Open(filename) + if err != nil { + return err + } + defer file.Close() + zr, err := gzip.NewReader(file) + if err != nil { + return err + } + defer zr.Close() + var r io.Reader = zr + var verifier digest.Verifier + if checksum != "" { + if digest, err := digest.Parse(checksum); err == nil { + verifier = digest.Verifier() + r = io.TeeReader(r, verifier) + } + } + if err := extractTarDirectory(root, prefix, r); err != nil { + return err + } + if verifier != nil && !verifier.Verified() { + return errors.New("content digest mismatch") + } + return nil +} diff --git a/vendor/oras.land/oras-go/pkg/context/context.go b/vendor/oras.land/oras-go/pkg/context/context.go new file mode 100644 index 000000000..af0ac8469 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/context/context.go @@ -0,0 +1,24 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package context + +import "context" + +// Background returns a default context with logger discarded. +func Background() context.Context { + ctx := context.Background() + return WithLoggerDiscarded(ctx) +} diff --git a/vendor/oras.land/oras-go/pkg/context/logger.go b/vendor/oras.land/oras-go/pkg/context/logger.go new file mode 100644 index 000000000..2fb2e5271 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/context/logger.go @@ -0,0 +1,50 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package context + +import ( + "context" + "io" + "io/ioutil" + + "github.com/containerd/containerd/log" + "github.com/sirupsen/logrus" +) + +// WithLogger returns a new context with the provided logger. +// This method wraps github.com/containerd/containerd/log.WithLogger() +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return log.WithLogger(ctx, logger) +} + +// WithLoggerFromWriter returns a new context with the logger, writting to the provided logger. +func WithLoggerFromWriter(ctx context.Context, writer io.Writer) context.Context { + logger := logrus.New() + logger.Out = writer + entry := logrus.NewEntry(logger) + return WithLogger(ctx, entry) +} + +// WithLoggerDiscarded returns a new context with the logger, writting to nothing. +func WithLoggerDiscarded(ctx context.Context) context.Context { + return WithLoggerFromWriter(ctx, ioutil.Discard) +} + +// GetLogger retrieves the current logger from the context. +// This method wraps github.com/containerd/containerd/log.GetLogger() +func GetLogger(ctx context.Context) *logrus.Entry { + return log.GetLogger(ctx) +} diff --git a/vendor/oras.land/oras-go/pkg/oras/copy.go b/vendor/oras.land/oras-go/pkg/oras/copy.go new file mode 100644 index 000000000..ebf63276c --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/oras/copy.go @@ -0,0 +1,213 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package oras + +import ( + "bytes" + "context" + "fmt" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "oras.land/oras-go/pkg/target" +) + +// Copy copy a ref from one target.Target to a ref in another target.Target. If toRef is blank, reuses fromRef +// Returns the root +// Descriptor of the copied item. Can use the root to retrieve child elements from target.Target. +func Copy(ctx context.Context, from target.Target, fromRef string, to target.Target, toRef string, opts ...CopyOpt) (ocispec.Descriptor, error) { + if from == nil { + return ocispec.Descriptor{}, ErrFromTargetUndefined + } + if to == nil { + return ocispec.Descriptor{}, ErrToTargetUndefined + } + // blank toRef + if toRef == "" { + toRef = fromRef + } + opt := copyOptsDefaults() + for _, o := range opts { + if err := o(opt); err != nil { + return ocispec.Descriptor{}, err + } + } + + if from == nil { + return ocispec.Descriptor{}, ErrFromResolverUndefined + } + if to == nil { + return ocispec.Descriptor{}, ErrToResolverUndefined + } + + // for the "from", we resolve the ref, then use resolver.Fetcher to fetch the various content blobs + // for the "to", we simply use resolver.Pusher to push the various content blobs + + _, desc, err := from.Resolve(ctx, fromRef) + if err != nil { + return ocispec.Descriptor{}, err + } + + fetcher, err := from.Fetcher(ctx, fromRef) + if err != nil { + return ocispec.Descriptor{}, err + } + // construct the reference we send to the pusher using the digest, so it knows what the root is + pushRef := fmt.Sprintf("%s@%s", toRef, desc.Digest.String()) + pusher, err := to.Pusher(ctx, pushRef) + if err != nil { + return ocispec.Descriptor{}, err + } + + if err := transferContent(ctx, desc, fetcher, pusher, opt); err != nil { + return ocispec.Descriptor{}, err + } + return desc, nil +} + +func transferContent(ctx context.Context, desc ocispec.Descriptor, fetcher remotes.Fetcher, pusher remotes.Pusher, opts *copyOpts) error { + var descriptors, manifests []ocispec.Descriptor + lock := &sync.Mutex{} + picker := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if isAllowedMediaType(desc.MediaType, opts.allowedMediaTypes...) { + if opts.filterName(desc) { + lock.Lock() + defer lock.Unlock() + descriptors = append(descriptors, desc) + } + return nil, nil + } + return nil, nil + }) + + // we use a hybrid store - a cache wrapping the underlying pusher - for two reasons: + // 1. so that we can cache the manifests as pushing them, then retrieve them later to push in reverse order after the blobs + // 2. so that we can retrieve them to analyze and find children in the Dispatch routine + store := opts.contentProvideIngesterPusherFetcher + if store == nil { + store = newHybridStoreFromPusher(pusher, opts.cachedMediaTypes, true) + } + + // fetchHandler pushes to the *store*, which may or may not cache it + baseFetchHandler := func(p remotes.Pusher, f remotes.Fetcher) images.HandlerFunc { + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + cw, err := p.Push(ctx, desc) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return nil, err + } + + return nil, nil + } + defer cw.Close() + + rc, err := f.Fetch(ctx, desc) + if err != nil { + return nil, err + } + defer rc.Close() + return nil, content.Copy(ctx, cw, rc, desc.Size, desc.Digest) + }) + } + + // track all of our manifests that will be cached + fetchHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if isAllowedMediaType(desc.MediaType, opts.cachedMediaTypes...) { + lock.Lock() + defer lock.Unlock() + manifests = append(manifests, desc) + } + return baseFetchHandler(store, fetcher)(ctx, desc) + }) + + handlers := []images.Handler{ + filterHandler(opts, opts.allowedMediaTypes...), + } + handlers = append(handlers, opts.baseHandlers...) + handlers = append(handlers, + fetchHandler, + picker, + images.ChildrenHandler(&ProviderWrapper{Fetcher: store}), + ) + handlers = append(handlers, opts.callbackHandlers...) + + if err := opts.dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { + return err + } + + // we cached all of the manifests, so push those out + // Iterate in reverse order as seen, parent always uploaded after child + for i := len(manifests) - 1; i >= 0; i-- { + _, err := baseFetchHandler(pusher, store)(ctx, manifests[i]) + if err != nil { + return err + } + } + + // if the option to request the root manifest was passed, accommodate it + if opts.saveManifest != nil && len(manifests) > 0 { + rc, err := store.Fetch(ctx, manifests[0]) + if err != nil { + return fmt.Errorf("could not get root manifest to save based on CopyOpt: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(rc); err != nil { + return fmt.Errorf("unable to read data for root manifest to save based on CopyOpt: %v", err) + } + // get the root manifest from the store + opts.saveManifest(buf.Bytes()) + } + + // if the option to request the layers was passed, accommodate it + if opts.saveLayers != nil && len(descriptors) > 0 { + opts.saveLayers(descriptors) + } + return nil +} + +func filterHandler(opts *copyOpts, allowedMediaTypes ...string) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch { + case isAllowedMediaType(desc.MediaType, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex): + return nil, nil + case isAllowedMediaType(desc.MediaType, allowedMediaTypes...): + if opts.filterName(desc) { + return nil, nil + } + log.G(ctx).Warnf("blob no name: %v", desc.Digest) + default: + log.G(ctx).Warnf("unknown type: %v", desc.MediaType) + } + return nil, images.ErrStopHandler + } +} + +func isAllowedMediaType(mediaType string, allowedMediaTypes ...string) bool { + if len(allowedMediaTypes) == 0 { + return true + } + for _, allowedMediaType := range allowedMediaTypes { + if mediaType == allowedMediaType { + return true + } + } + return false +} diff --git a/vendor/oras.land/oras-go/pkg/oras/errors.go b/vendor/oras.land/oras-go/pkg/oras/errors.go new file mode 100644 index 000000000..6c5f9f240 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/oras/errors.go @@ -0,0 +1,42 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oras + +import ( + "errors" + "fmt" +) + +// Common errors +var ( + ErrResolverUndefined = errors.New("resolver undefined") + ErrFromResolverUndefined = errors.New("from target resolver undefined") + ErrToResolverUndefined = errors.New("to target resolver undefined") + ErrFromTargetUndefined = errors.New("from target undefined") + ErrToTargetUndefined = errors.New("from target undefined") +) + +// Path validation related errors +var ( + ErrDirtyPath = errors.New("dirty path") + ErrPathNotSlashSeparated = errors.New("path not slash separated") + ErrAbsolutePathDisallowed = errors.New("absolute path disallowed") + ErrPathTraversalDisallowed = errors.New("path traversal disallowed") +) + +// ErrStopProcessing is used to stop processing an oras operation. +// This error only makes sense in sequential pulling operation. +var ErrStopProcessing = fmt.Errorf("stop processing") diff --git a/vendor/oras.land/oras-go/pkg/oras/opts.go b/vendor/oras.land/oras-go/pkg/oras/opts.go new file mode 100644 index 000000000..042a775b0 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/oras/opts.go @@ -0,0 +1,254 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package oras + +import ( + "context" + "fmt" + "io" + "path/filepath" + "strings" + "sync" + + "github.com/containerd/containerd/images" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/semaphore" + orascontent "oras.land/oras-go/pkg/content" +) + +func copyOptsDefaults() *copyOpts { + return ©Opts{ + dispatch: images.Dispatch, + filterName: filterName, + cachedMediaTypes: []string{ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex}, + validateName: ValidateNameAsPath, + } +} + +type CopyOpt func(o *copyOpts) error + +type copyOpts struct { + allowedMediaTypes []string + dispatch func(context.Context, images.Handler, *semaphore.Weighted, ...ocispec.Descriptor) error + baseHandlers []images.Handler + callbackHandlers []images.Handler + contentProvideIngesterPusherFetcher orascontent.Store + filterName func(ocispec.Descriptor) bool + cachedMediaTypes []string + + saveManifest func([]byte) + saveLayers func([]ocispec.Descriptor) + validateName func(desc ocispec.Descriptor) error + + userAgent string +} + +// ValidateNameAsPath validates name in the descriptor as file path in order +// to generate good packages intended to be pulled using the FileStore or +// the oras cli. +// For cross-platform considerations, only unix paths are accepted. +func ValidateNameAsPath(desc ocispec.Descriptor) error { + // no empty name + path, ok := orascontent.ResolveName(desc) + if !ok || path == "" { + return orascontent.ErrNoName + } + + // path should be clean + if target := filepath.ToSlash(filepath.Clean(path)); target != path { + return errors.Wrap(ErrDirtyPath, path) + } + + // path should be slash-separated + if strings.Contains(path, "\\") { + return errors.Wrap(ErrPathNotSlashSeparated, path) + } + + // disallow absolute path: covers unix and windows format + if strings.HasPrefix(path, "/") { + return errors.Wrap(ErrAbsolutePathDisallowed, path) + } + if len(path) > 2 { + c := path[0] + if path[1] == ':' && path[2] == '/' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + return errors.Wrap(ErrAbsolutePathDisallowed, path) + } + } + + // disallow path traversal + if strings.HasPrefix(path, "../") || path == ".." { + return errors.Wrap(ErrPathTraversalDisallowed, path) + } + + return nil +} + +// dispatchBFS behaves the same as images.Dispatch() but in sequence with breath-first search. +func dispatchBFS(ctx context.Context, handler images.Handler, weighted *semaphore.Weighted, descs ...ocispec.Descriptor) error { + for i := 0; i < len(descs); i++ { + desc := descs[i] + children, err := handler.Handle(ctx, desc) + if err != nil { + switch err := errors.Cause(err); err { + case images.ErrSkipDesc: + continue // don't traverse the children. + case ErrStopProcessing: + return nil + } + return err + } + descs = append(descs, children...) + } + return nil +} + +func filterName(desc ocispec.Descriptor) bool { + // needs to be filled in + return true +} + +// WithAdditionalCachedMediaTypes adds media types normally cached in memory when pulling. +// This does not replace the default media types, but appends to them +func WithAdditionalCachedMediaTypes(cachedMediaTypes ...string) CopyOpt { + return func(o *copyOpts) error { + o.cachedMediaTypes = append(o.cachedMediaTypes, cachedMediaTypes...) + return nil + } +} + +// WithAllowedMediaType sets the allowed media types +func WithAllowedMediaType(allowedMediaTypes ...string) CopyOpt { + return func(o *copyOpts) error { + o.allowedMediaTypes = append(o.allowedMediaTypes, allowedMediaTypes...) + return nil + } +} + +// WithAllowedMediaTypes sets the allowed media types +func WithAllowedMediaTypes(allowedMediaTypes []string) CopyOpt { + return func(o *copyOpts) error { + o.allowedMediaTypes = append(o.allowedMediaTypes, allowedMediaTypes...) + return nil + } +} + +// WithPullByBFS opt to pull in sequence with breath-first search +func WithPullByBFS(o *copyOpts) error { + o.dispatch = dispatchBFS + return nil +} + +// WithPullBaseHandler provides base handlers, which will be called before +// any pull specific handlers. +func WithPullBaseHandler(handlers ...images.Handler) CopyOpt { + return func(o *copyOpts) error { + o.baseHandlers = append(o.baseHandlers, handlers...) + return nil + } +} + +// WithPullCallbackHandler provides callback handlers, which will be called after +// any pull specific handlers. +func WithPullCallbackHandler(handlers ...images.Handler) CopyOpt { + return func(o *copyOpts) error { + o.callbackHandlers = append(o.callbackHandlers, handlers...) + return nil + } +} + +// WithContentProvideIngester opt to the provided Provider and Ingester +// for file system I/O, including caches. +func WithContentStore(store orascontent.Store) CopyOpt { + return func(o *copyOpts) error { + o.contentProvideIngesterPusherFetcher = store + return nil + } +} + +// WithPullEmptyNameAllowed allows pulling blobs with empty name. +func WithPullEmptyNameAllowed() CopyOpt { + return func(o *copyOpts) error { + o.filterName = func(ocispec.Descriptor) bool { + return true + } + return nil + } +} + +// WithPullStatusTrack report results to stdout +func WithPullStatusTrack(writer io.Writer) CopyOpt { + return WithPullCallbackHandler(pullStatusTrack(writer)) +} + +func pullStatusTrack(writer io.Writer) images.Handler { + var printLock sync.Mutex + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if name, ok := orascontent.ResolveName(desc); ok { + digestString := desc.Digest.String() + if err := desc.Digest.Validate(); err == nil { + if algo := desc.Digest.Algorithm(); algo == digest.SHA256 { + digestString = desc.Digest.Encoded()[:12] + } + } + printLock.Lock() + defer printLock.Unlock() + fmt.Fprintln(writer, "Downloaded", digestString, name) + } + return nil, nil + }) +} + +// WithNameValidation validates the image title in the descriptor. +// Pass nil to disable name validation. +func WithNameValidation(validate func(desc ocispec.Descriptor) error) CopyOpt { + return func(o *copyOpts) error { + o.validateName = validate + return nil + } +} + +// WithUserAgent set the user agent string in http communications +func WithUserAgent(agent string) CopyOpt { + return func(o *copyOpts) error { + o.userAgent = agent + return nil + } +} + +// WithLayerDescriptors passes the slice of Descriptors for layers to the +// provided func. If the passed parameter is nil, returns an error. +func WithLayerDescriptors(save func([]ocispec.Descriptor)) CopyOpt { + return func(o *copyOpts) error { + if save == nil { + return errors.New("layers save func must be non-nil") + } + o.saveLayers = save + return nil + } +} + +// WithRootManifest passes the root manifest for the artifacts to the provided +// func. If the passed parameter is nil, returns an error. +func WithRootManifest(save func(b []byte)) CopyOpt { + return func(o *copyOpts) error { + if save == nil { + return errors.New("manifest save func must be non-nil") + } + o.saveManifest = save + return nil + } +} diff --git a/vendor/oras.land/oras-go/pkg/oras/provider.go b/vendor/oras.land/oras-go/pkg/oras/provider.go new file mode 100644 index 000000000..97736ad3c --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/oras/provider.go @@ -0,0 +1,79 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package oras + +import ( + "context" + "errors" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ProviderWrapper wraps a remote.Fetcher to make a content.Provider, which is useful for things +type ProviderWrapper struct { + Fetcher remotes.Fetcher +} + +func (p *ProviderWrapper) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + if p.Fetcher == nil { + return nil, errors.New("no Fetcher provided") + } + return &fetcherReaderAt{ + ctx: ctx, + fetcher: p.Fetcher, + desc: desc, + offset: 0, + }, nil +} + +type fetcherReaderAt struct { + ctx context.Context + fetcher remotes.Fetcher + desc ocispec.Descriptor + rc io.ReadCloser + offset int64 +} + +func (f *fetcherReaderAt) Close() error { + if f.rc == nil { + return nil + } + return f.rc.Close() +} + +func (f *fetcherReaderAt) Size() int64 { + return f.desc.Size +} + +func (f *fetcherReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + // if we do not have a readcloser, get it + if f.rc == nil || f.offset != off { + rc, err := f.fetcher.Fetch(f.ctx, f.desc) + if err != nil { + return 0, err + } + f.rc = rc + } + + n, err = io.ReadFull(f.rc, p) + if err != nil { + return n, err + } + f.offset += int64(n) + return n, err +} diff --git a/vendor/oras.land/oras-go/pkg/oras/store.go b/vendor/oras.land/oras-go/pkg/oras/store.go new file mode 100644 index 000000000..edb835625 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/oras/store.go @@ -0,0 +1,213 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oras + +import ( + "context" + "io" + "io/ioutil" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" + + orascontent "oras.land/oras-go/pkg/content" +) + +type hybridStore struct { + cache *orascontent.Memory + cachedMediaTypes []string + cacheOnly bool + provider content.Provider + ingester content.Ingester +} + +func newHybridStoreFromPusher(pusher remotes.Pusher, cachedMediaTypes []string, cacheOnly bool) *hybridStore { + // construct an ingester from a pusher + ingester := pusherIngester{ + pusher: pusher, + } + return &hybridStore{ + cache: orascontent.NewMemory(), + cachedMediaTypes: cachedMediaTypes, + ingester: ingester, + cacheOnly: cacheOnly, + } +} + +func (s *hybridStore) Set(desc ocispec.Descriptor, content []byte) { + s.cache.Set(desc, content) +} + +func (s *hybridStore) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + reader, err := s.cache.Fetch(ctx, desc) + if err == nil { + return reader, err + } + if s.provider != nil { + rat, err := s.provider.ReaderAt(ctx, desc) + return ioutil.NopCloser(orascontent.NewReaderAtWrapper(rat)), err + } + return nil, err +} + +func (s *hybridStore) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return s.Writer(ctx, content.WithDescriptor(desc)) +} + +// Writer begins or resumes the active writer identified by desc +func (s *hybridStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + + if isAllowedMediaType(wOpts.Desc.MediaType, s.cachedMediaTypes...) || s.ingester == nil { + pusher, err := s.cache.Pusher(ctx, "") + if err != nil { + return nil, err + } + cacheWriter, err := pusher.Push(ctx, wOpts.Desc) + if err != nil { + return nil, err + } + // if we cache it only, do not pass it through + if s.cacheOnly { + return cacheWriter, nil + } + ingesterWriter, err := s.ingester.Writer(ctx, opts...) + switch { + case err == nil: + return newTeeWriter(wOpts.Desc, cacheWriter, ingesterWriter), nil + case errdefs.IsAlreadyExists(err): + return cacheWriter, nil + } + return nil, err + } + return s.ingester.Writer(ctx, opts...) +} + +// teeWriter tees the content to one or more content.Writer +type teeWriter struct { + writers []content.Writer + digester digest.Digester + status content.Status +} + +func newTeeWriter(desc ocispec.Descriptor, writers ...content.Writer) *teeWriter { + now := time.Now() + return &teeWriter{ + writers: writers, + digester: digest.Canonical.Digester(), + status: content.Status{ + Total: desc.Size, + StartedAt: now, + UpdatedAt: now, + }, + } +} + +func (t *teeWriter) Close() error { + g := new(errgroup.Group) + for _, w := range t.writers { + w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + return w.Close() + }) + } + return g.Wait() +} + +func (t *teeWriter) Write(p []byte) (n int, err error) { + g := new(errgroup.Group) + for _, w := range t.writers { + w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + n, err := w.Write(p[:]) + if err != nil { + return err + } + if n != len(p) { + return io.ErrShortWrite + } + return nil + }) + } + err = g.Wait() + n = len(p) + if err != nil { + return n, err + } + _, _ = t.digester.Hash().Write(p[:n]) + t.status.Offset += int64(len(p)) + t.status.UpdatedAt = time.Now() + + return n, nil +} + +// Digest may return empty digest or panics until committed. +func (t *teeWriter) Digest() digest.Digest { + return t.digester.Digest() +} + +func (t *teeWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + g := new(errgroup.Group) + for _, w := range t.writers { + w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + return w.Commit(ctx, size, expected, opts...) + }) + } + return g.Wait() +} + +// Status returns the current state of write +func (t *teeWriter) Status() (content.Status, error) { + return t.status, nil +} + +// Truncate updates the size of the target blob +func (t *teeWriter) Truncate(size int64) error { + g := new(errgroup.Group) + for _, w := range t.writers { + w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + return w.Truncate(size) + }) + } + return g.Wait() +} + +// pusherIngester simple wrapper to get an ingester from a pusher +type pusherIngester struct { + pusher remotes.Pusher +} + +func (p pusherIngester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + return p.pusher.Push(ctx, wOpts.Desc) +} diff --git a/vendor/oras.land/oras-go/pkg/registry/reference.go b/vendor/oras.land/oras-go/pkg/registry/reference.go new file mode 100644 index 000000000..299651cd1 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/reference.go @@ -0,0 +1,177 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package registry + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/opencontainers/go-digest" + errdef "oras.land/oras-go/pkg/content" +) + +// regular expressions for components. +var ( + // repositoryRegexp is adapted from the distribution implementation. + // The repository name set under OCI distribution spec is a subset of the + // the docker spec. For maximum compability, the docker spec is verified at + // the client side. Further check is left to the server side. + // References: + // - https://github.com/distribution/distribution/blob/v2.7.1/reference/regexp.go#L53 + // - https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests + repositoryRegexp = regexp.MustCompile(`^[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*(?:/[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*)*$`) + + // tagRegexp checks the tag name. + // The docker and OCI spec have the same regular expression. + // Reference: https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests + tagRegexp = regexp.MustCompile(`^[\w][\w.-]{0,127}$`) +) + +// Reference references to a descriptor in the registry. +type Reference struct { + // Registry is the name of the registry. + // It is usually the domain name of the registry optionally with a port. + Registry string + + // Repository is the name of the repository. + Repository string + + // Reference is the reference of the object in the repository. + // A reference can be a tag or a digest. + Reference string +} + +// ParseReference parses a string into a artifact reference. +// If the reference contains both the tag and the digest, the tag will be +// dropped. +// Digest is recognized only if the corresponding algorithm is available. +func ParseReference(raw string) (Reference, error) { + parts := strings.SplitN(raw, "/", 2) + if len(parts) == 1 { + return Reference{}, fmt.Errorf("%w: missing repository", errdef.ErrInvalidReference) + } + registry, path := parts[0], parts[1] + var repository string + var reference string + if index := strings.Index(path, "@"); index != -1 { + // digest found + repository = path[:index] + reference = path[index+1:] + + // drop tag since the digest is present. + if index := strings.Index(repository, ":"); index != -1 { + repository = repository[:index] + } + } else if index := strings.Index(path, ":"); index != -1 { + // tag found + repository = path[:index] + reference = path[index+1:] + } else { + // empty reference + repository = path + } + res := Reference{ + Registry: registry, + Repository: repository, + Reference: reference, + } + if err := res.Validate(); err != nil { + return Reference{}, err + } + return res, nil +} + +// Validate validates the entire reference. +func (r Reference) Validate() error { + err := r.ValidateRegistry() + if err != nil { + return err + } + err = r.ValidateRepository() + if err != nil { + return err + } + return r.ValidateReference() +} + +// ValidateRegistry validates the registry. +func (r Reference) ValidateRegistry() error { + uri, err := url.ParseRequestURI("dummy://" + r.Registry) + if err != nil || uri.Host != r.Registry { + return fmt.Errorf("%w: invalid registry", errdef.ErrInvalidReference) + } + return nil +} + +// ValidateRepository validates the repository. +func (r Reference) ValidateRepository() error { + if !repositoryRegexp.MatchString(r.Repository) { + return fmt.Errorf("%w: invalid repository", errdef.ErrInvalidReference) + } + return nil +} + +// ValidateReference validates the reference. +func (r Reference) ValidateReference() error { + if r.Reference == "" { + return nil + } + if _, err := r.Digest(); err == nil { + return nil + } + if !tagRegexp.MatchString(r.Reference) { + return fmt.Errorf("%w: invalid tag", errdef.ErrInvalidReference) + } + return nil +} + +// Host returns the host name of the registry. +func (r Reference) Host() string { + if r.Registry == "docker.io" { + return "registry-1.docker.io" + } + return r.Registry +} + +// ReferenceOrDefault returns the reference or the default reference if empty. +func (r Reference) ReferenceOrDefault() string { + if r.Reference == "" { + return "latest" + } + return r.Reference +} + +// Digest returns the reference as a digest. +func (r Reference) Digest() (digest.Digest, error) { + return digest.Parse(r.Reference) +} + +// String implements `fmt.Stringer` and returns the reference string. +// The resulted string is meaningful only if the reference is valid. +func (r Reference) String() string { + if r.Repository == "" { + return r.Registry + } + ref := r.Registry + "/" + r.Repository + if r.Reference == "" { + return ref + } + if d, err := r.Digest(); err == nil { + return ref + "@" + d.String() + } + return ref + ":" + r.Reference +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/auth/cache.go b/vendor/oras.land/oras-go/pkg/registry/remote/auth/cache.go new file mode 100644 index 000000000..070588373 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/auth/cache.go @@ -0,0 +1,158 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package auth + +import ( + "context" + "strings" + "sync" + + errdef "oras.land/oras-go/pkg/content" + "oras.land/oras-go/pkg/registry/remote/internal/syncutil" +) + +// DefaultCache is the sharable cache used by DefaultClient. +var DefaultCache Cache = NewCache() + +// Cache caches the auth-scheme and auth-token for the "Authorization" header in +// accessing the remote registry. +// Precisely, the header is `Authorization: auth-scheme auth-token`. +// The `auth-token` is a generic term as `token68` in RFC 7235 section 2.1. +type Cache interface { + // GetScheme returns the auth-scheme part cached for the given registry. + // A single registry is assumed to have a consistent scheme. + // If a registry has different schemes per path, the auth client is still + // workable. However, the cache may not be effective as the cache cannot + // correctly guess the scheme. + GetScheme(ctx context.Context, registry string) (Scheme, error) + + // GetToken returns the auth-token part cached for the given registry of a + // given scheme. + // The underlying implementation MAY cache the token for all schemes for the + // given registry. + GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) + + // Set fetches the token using the given fetch function and caches the token + // for the given scheme with the given key for the given registry. + // The return values of the fetch function is returned by this function. + // The underlying implementation MAY combine the fetch operation if the Set + // function is invoked multiple times at the same time. + Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) +} + +// cacheEntry is a cache entry for a single registry. +type cacheEntry struct { + scheme Scheme + tokens sync.Map // map[string]string +} + +// concurrentCache is a cache suitable for concurrent invocation. +type concurrentCache struct { + status sync.Map // map[string]*syncutil.Once + cache sync.Map // map[string]*cacheEntry +} + +// NewCache creates a new go-routine safe cache instance. +func NewCache() Cache { + return &concurrentCache{} +} + +// GetScheme returns the auth-scheme part cached for the given registry. +func (cc *concurrentCache) GetScheme(ctx context.Context, registry string) (Scheme, error) { + entry, ok := cc.cache.Load(registry) + if !ok { + return SchemeUnknown, errdef.ErrNotFound + } + return entry.(*cacheEntry).scheme, nil +} + +// GetToken returns the auth-token part cached for the given registry of a given +// scheme. +func (cc *concurrentCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) { + entryValue, ok := cc.cache.Load(registry) + if !ok { + return "", errdef.ErrNotFound + } + entry := entryValue.(*cacheEntry) + if entry.scheme != scheme { + return "", errdef.ErrNotFound + } + if token, ok := entry.tokens.Load(key); ok { + return token.(string), nil + } + return "", errdef.ErrNotFound +} + +// Set fetches the token using the given fetch function and caches the token +// for the given scheme with the given key for the given registry. +// Set combines the fetch operation if the Set is invoked multiple times at the +// same time. +func (cc *concurrentCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) { + // fetch token + statusKey := strings.Join([]string{ + registry, + scheme.String(), + key, + }, " ") + statusValue, _ := cc.status.LoadOrStore(statusKey, syncutil.NewOnce()) + fetchOnce := statusValue.(*syncutil.Once) + fetchedFirst, result, err := fetchOnce.Do(ctx, func() (interface{}, error) { + return fetch(ctx) + }) + if fetchedFirst { + cc.status.Delete(statusKey) + } + if err != nil { + return "", err + } + token := result.(string) + if !fetchedFirst { + return token, nil + } + + // cache token + newEntry := &cacheEntry{ + scheme: scheme, + } + entryValue, exists := cc.cache.LoadOrStore(registry, newEntry) + entry := entryValue.(*cacheEntry) + if exists && entry.scheme != scheme { + // there is a scheme change, which is not expected in most scenarios. + // force invalidating all previous cache. + entry = newEntry + cc.cache.Store(registry, entry) + } + entry.tokens.Store(key, token) + + return token, nil +} + +// noCache is a cache implementation that does not do cache at all. +type noCache struct{} + +// GetScheme always returns not found error as it has no cache. +func (noCache) GetScheme(ctx context.Context, registry string) (Scheme, error) { + return SchemeUnknown, errdef.ErrNotFound +} + +// GetToken always returns not found error as it has no cache. +func (noCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) { + return "", errdef.ErrNotFound +} + +// Set calls fetch directly without caching. +func (noCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) { + return fetch(ctx) +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/auth/challenge.go b/vendor/oras.land/oras-go/pkg/registry/remote/auth/challenge.go new file mode 100644 index 000000000..e2d28a11e --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/auth/challenge.go @@ -0,0 +1,166 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package auth + +import ( + "strconv" + "strings" +) + +// Scheme define the authentication method. +type Scheme byte + +const ( + // SchemeUnknown represents unknown or unsupported schemes + SchemeUnknown Scheme = iota + + // SchemeBasic represents the "Basic" HTTP authentication scheme. + // Reference: https://tools.ietf.org/html/rfc7617 + SchemeBasic + + // SchemeBearer represents the Bearer token in OAuth 2.0. + // Reference: https://tools.ietf.org/html/rfc6750 + SchemeBearer +) + +// parseScheme parse the authentication scheme from the given string +// case-insensitively. +func parseScheme(scheme string) Scheme { + switch { + case strings.EqualFold(scheme, "basic"): + return SchemeBasic + case strings.EqualFold(scheme, "bearer"): + return SchemeBearer + } + return SchemeUnknown +} + +// String return the string for the scheme. +func (s Scheme) String() string { + switch s { + case SchemeBasic: + return "Basic" + case SchemeBearer: + return "Bearer" + } + return "Unknown" +} + +// parseChallenge parses the "WWW-Authenticate" header returned by the remote +// registry, and extracts parameters if scheme is Bearer. +// References: +// - https://docs.docker.com/registry/spec/auth/token/#how-to-authenticate +// - https://tools.ietf.org/html/rfc7235#section-2.1 +func parseChallenge(header string) (scheme Scheme, params map[string]string) { + // as defined in RFC 7235 section 2.1, we have + // challenge = auth-scheme [ 1*SP ( token68 / #auth-param ) ] + // auth-scheme = token + // auth-param = token BWS "=" BWS ( token / quoted-string ) + // + // since we focus parameters only on Bearer, we have + // challenge = auth-scheme [ 1*SP #auth-param ] + schemeString, rest := parseToken(header) + scheme = parseScheme(schemeString) + + // fast path for non bearer challenge + if scheme != SchemeBearer { + return + } + + // parse params for bearer auth. + // combining RFC 7235 section 2.1 with RFC 7230 section 7, we have + // #auth-param => auth-param *( OWS "," OWS auth-param ) + var key, value string + for { + key, rest = parseToken(skipSpace(rest)) + if key == "" { + return + } + + rest = skipSpace(rest) + if rest == "" || rest[0] != '=' { + return + } + rest = skipSpace(rest[1:]) + if rest == "" { + return + } + + if rest[0] == '"' { + prefix, err := strconv.QuotedPrefix(rest) + if err != nil { + return + } + value, err = strconv.Unquote(prefix) + if err != nil { + return + } + rest = rest[len(prefix):] + } else { + value, rest = parseToken(rest) + if value == "" { + return + } + } + if params == nil { + params = map[string]string{ + key: value, + } + } else { + params[key] = value + } + + rest = skipSpace(rest) + if rest == "" || rest[0] != ',' { + return + } + rest = rest[1:] + } +} + +// isNotTokenChar reports whether rune is not a `tchar` defined in RFC 7230 +// section 3.2.6. +func isNotTokenChar(r rune) bool { + // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" + // / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" + // / DIGIT / ALPHA + // ; any VCHAR, except delimiters + return (r < 'A' || r > 'Z') && (r < 'a' || r > 'z') && + (r < '0' || r > '9') && !strings.ContainsRune("!#$%&'*+-.^_`|~", r) +} + +// parseToken finds the next token from the given string. If no token found, +// an empty token is returned and the whole of the input is returned in rest. +// Note: Since token = 1*tchar, empty string is not a valid token. +func parseToken(s string) (token, rest string) { + if i := strings.IndexFunc(s, isNotTokenChar); i != -1 { + return s[:i], s[i:] + } + return s, "" +} + +// skipSpace skips "bad" whitespace (BWS) defined in RFC 7230 section 3.2.3. +func skipSpace(s string) string { + // OWS = *( SP / HTAB ) + // ; optional whitespace + // BWS = OWS + // ; "bad" whitespace + if i := strings.IndexFunc(s, func(r rune) bool { + return r != ' ' && r != '\t' + }); i != -1 { + return s[i:] + } + return s +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/auth/client.go b/vendor/oras.land/oras-go/pkg/registry/remote/auth/client.go new file mode 100644 index 000000000..ef1c9479d --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/auth/client.go @@ -0,0 +1,367 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package auth + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "oras.land/oras-go/pkg/registry/remote/internal/errutil" +) + +// DefaultClient is the default auth-decorated client. +var DefaultClient = &Client{ + Header: http.Header{ + "User-Agent": {"oras-go"}, + }, + Cache: DefaultCache, +} + +// maxResponseBytes specifies the default limit on how many response bytes are +// allowed in the server's response from authorization service servers. +// A typical response message from authorization service servers is around 1 to +// 4 KiB. Since the size of a token must be smaller than the HTTP header size +// limit, which is usually 16 KiB. As specified by the distribution, the +// response may contain 2 identical tokens, that is, 16 x 2 = 32 KiB. +// Hence, 128 KiB should be sufficient. +// References: https://docs.docker.com/registry/spec/auth/token/ +var maxResponseBytes int64 = 128 * 1024 // 128 KiB + +// defaultClientID specifies the default client ID used in OAuth2. +// See also ClientID. +var defaultClientID = "oras-go" + +// Client is an auth-decorated HTTP client. +// Its zero value is a usable client that uses http.DefaultClient with no cache. +type Client struct { + // Client is the underlying HTTP client used to access the remote + // server. + // If nil, http.DefaultClient is used. + Client *http.Client + + // Header contains the custom headers to be added to each request. + Header http.Header + + // Credential specifies the function for resolving the credential for the + // given registry (i.e. host:port). + // `EmptyCredential` is a valid return value and should not be considered as + // an error. + // If nil, the credential is always resolved to `EmptyCredential`. + Credential func(context.Context, string) (Credential, error) + + // Cache caches credentials for direct accessing the remote registry. + // If nil, no cache is used. + Cache Cache + + // ClientID used in fetching OAuth2 token as a required field. + // If empty, a default client ID is used. + // Reference: https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token + ClientID string + + // ForceAttemptOAuth2 controls whether to follow OAuth2 with password grant + // instead the distribution spec when authenticating using username and + // password. + // References: + // - https://docs.docker.com/registry/spec/auth/jwt/ + // - https://docs.docker.com/registry/spec/auth/oauth/ + ForceAttemptOAuth2 bool +} + +// client returns an HTTP client used to access the remote registry. +// http.DefaultClient is return if the client is not configured. +func (c *Client) client() *http.Client { + if c.Client == nil { + return http.DefaultClient + } + return c.Client +} + +// send adds headers to the request and sends the request to the remote server. +func (c *Client) send(req *http.Request) (*http.Response, error) { + for key, values := range c.Header { + req.Header[key] = append(req.Header[key], values...) + } + return c.client().Do(req) +} + +// credential resolves the credential for the given registry. +func (c *Client) credential(ctx context.Context, reg string) (Credential, error) { + if c.Credential == nil { + return EmptyCredential, nil + } + return c.Credential(ctx, reg) +} + +// cache resolves the cache. +// noCache is return if the cache is not configured. +func (c *Client) cache() Cache { + if c.Cache == nil { + return noCache{} + } + return c.Cache +} + +// SetUserAgent sets the user agent for all out-going requests. +func (c *Client) SetUserAgent(userAgent string) { + if c.Header == nil { + c.Header = http.Header{} + } + c.Header.Set("User-Agent", userAgent) +} + +// Do sends the request to the remote server with resolving authentication +// attempted. +// On authentication failure due to bad credential, +// - Do returns error if it fails to fetch token for bearer auth. +// - Do returns the registry response without error for basic auth. +func (c *Client) Do(originalReq *http.Request) (*http.Response, error) { + ctx := originalReq.Context() + req := originalReq.Clone(ctx) + + // attempt cached auth token + var attemptedKey string + cache := c.cache() + registry := originalReq.Host + scheme, err := cache.GetScheme(ctx, registry) + if err == nil { + switch scheme { + case SchemeBasic: + token, err := cache.GetToken(ctx, registry, SchemeBasic, "") + if err == nil { + req.Header.Set("Authorization", "Basic "+token) + } + case SchemeBearer: + scopes := GetScopes(ctx) + attemptedKey = strings.Join(scopes, " ") + token, err := cache.GetToken(ctx, registry, SchemeBearer, attemptedKey) + if err == nil { + req.Header.Set("Authorization", "Bearer "+token) + } + } + } + + resp, err := c.send(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusUnauthorized { + return resp, nil + } + + // attempt again with credentials for recognized schemes + challenge := resp.Header.Get("Www-Authenticate") + scheme, params := parseChallenge(challenge) + switch scheme { + case SchemeBasic: + resp.Body.Close() + + token, err := cache.Set(ctx, registry, SchemeBasic, "", func(ctx context.Context) (string, error) { + return c.fetchBasicAuth(ctx, registry) + }) + if err != nil { + return nil, fmt.Errorf("%s %q: %w", resp.Request.Method, resp.Request.URL, err) + } + + req = originalReq.Clone(ctx) + req.Header.Set("Authorization", "Basic "+token) + case SchemeBearer: + resp.Body.Close() + + // merge hinted scopes with challenged scopes + scopes := GetScopes(ctx) + if scope := params["scope"]; scope != "" { + scopes = append(scopes, strings.Split(scope, " ")...) + scopes = CleanScopes(scopes) + } + key := strings.Join(scopes, " ") + + // attempt the cache again if there is a scope change + if key != attemptedKey { + if token, err := cache.GetToken(ctx, registry, SchemeBearer, key); err == nil { + req = originalReq.Clone(ctx) + req.Header.Set("Authorization", "Bearer "+token) + + resp, err := c.send(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusUnauthorized { + return resp, nil + } + resp.Body.Close() + } + } + + // attempt with credentials + realm := params["realm"] + service := params["service"] + token, err := cache.Set(ctx, registry, SchemeBearer, key, func(ctx context.Context) (string, error) { + return c.fetchBearerToken(ctx, registry, realm, service, scopes) + }) + if err != nil { + return nil, fmt.Errorf("%s %q: %w", resp.Request.Method, resp.Request.URL, err) + } + + req = originalReq.Clone(ctx) + req.Header.Set("Authorization", "Bearer "+token) + default: + return resp, nil + } + + return c.send(req) +} + +// fetchBasicAuth fetches a basic auth token for the basic challenge. +func (c *Client) fetchBasicAuth(ctx context.Context, registry string) (string, error) { + cred, err := c.credential(ctx, registry) + if err != nil { + return "", fmt.Errorf("failed to resolve credential: %w", err) + } + if cred == EmptyCredential { + return "", errors.New("credential required for basic auth") + } + if cred.Username == "" || cred.Password == "" { + return "", errors.New("missing username or password for basic auth") + } + auth := cred.Username + ":" + cred.Password + return base64.StdEncoding.EncodeToString([]byte(auth)), nil +} + +// fetchBearerToken fetches an access token for the bearer challenge. +func (c *Client) fetchBearerToken(ctx context.Context, registry, realm, service string, scopes []string) (string, error) { + cred, err := c.credential(ctx, registry) + if err != nil { + return "", err + } + if cred.AccessToken != "" { + return cred.AccessToken, nil + } + if cred == EmptyCredential || (cred.RefreshToken == "" && !c.ForceAttemptOAuth2) { + return c.fetchDistributionToken(ctx, realm, service, scopes, cred.Username, cred.Password) + } + return c.fetchOAuth2Token(ctx, realm, service, scopes, cred) +} + +// fetchDistributionToken fetches an access token as defined by the distribution +// specification. +// It fetches anonymous tokens if no credential is provided. +// References: +// - https://docs.docker.com/registry/spec/auth/jwt/ +// - https://docs.docker.com/registry/spec/auth/token/ +func (c *Client) fetchDistributionToken(ctx context.Context, realm, service string, scopes []string, username, password string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) + if err != nil { + return "", err + } + if username != "" || password != "" { + req.SetBasicAuth(username, password) + } + q := req.URL.Query() + if service != "" { + q.Set("service", service) + } + for _, scope := range scopes { + q.Add("scope", scope) + } + req.URL.RawQuery = q.Encode() + + resp, err := c.send(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", errutil.ParseErrorResponse(resp) + } + + // As specified in https://docs.docker.com/registry/spec/auth/token/ section + // "Token Response Fields", the token is either in `token` or + // `access_token`. If both present, they are identical. + var result struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + } + lr := io.LimitReader(resp.Body, maxResponseBytes) + if err := json.NewDecoder(lr).Decode(&result); err != nil { + return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) + } + if result.AccessToken != "" { + return result.AccessToken, nil + } + if result.Token != "" { + return result.Token, nil + } + return "", fmt.Errorf("%s %q: empty token returned", resp.Request.Method, resp.Request.URL) +} + +// fetchOAuth2Token fetches an OAuth2 access token. +// Reference: https://docs.docker.com/registry/spec/auth/oauth/ +func (c *Client) fetchOAuth2Token(ctx context.Context, realm, service string, scopes []string, cred Credential) (string, error) { + form := url.Values{} + if cred.RefreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", cred.RefreshToken) + } else if cred.Username != "" && cred.Password != "" { + form.Set("grant_type", "password") + form.Set("username", cred.Username) + form.Set("password", cred.Password) + } else { + return "", errors.New("missing username or password for bearer auth") + } + form.Set("service", service) + clientID := c.ClientID + if clientID == "" { + clientID = defaultClientID + } + form.Set("client_id", clientID) + if len(scopes) != 0 { + form.Set("scope", strings.Join(scopes, " ")) + } + body := strings.NewReader(form.Encode()) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, body) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := c.send(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", errutil.ParseErrorResponse(resp) + } + + var result struct { + AccessToken string `json:"access_token"` + } + lr := io.LimitReader(resp.Body, maxResponseBytes) + if err := json.NewDecoder(lr).Decode(&result); err != nil { + return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) + } + if result.AccessToken != "" { + return result.AccessToken, nil + } + return "", fmt.Errorf("%s %q: empty token returned", resp.Request.Method, resp.Request.URL) +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/auth/credential.go b/vendor/oras.land/oras-go/pkg/registry/remote/auth/credential.go new file mode 100644 index 000000000..2cd9f774b --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/auth/credential.go @@ -0,0 +1,39 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package auth + +// EmptyCredential represents an empty credential. +var EmptyCredential Credential + +// Credential contains authentication credentials used to access remote +// registries. +type Credential struct { + // Username is the name of the user for the remote registry. + Username string + + // Password is the secret associated with the username. + Password string + + // RefreshToken is a bearer token to be sent to the authorization service + // for fetching access tokens. + // A refresh token is often referred as an identity token. + // Reference: https://docs.docker.com/registry/spec/auth/oauth/ + RefreshToken string + + // AccessToken is a bearer token to be sent to the registry. + // An access token is often referred as a registry token. + // Reference: https://docs.docker.com/registry/spec/auth/token/ + AccessToken string +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/auth/scope.go b/vendor/oras.land/oras-go/pkg/registry/remote/auth/scope.go new file mode 100644 index 000000000..3b865dd60 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/auth/scope.go @@ -0,0 +1,231 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package auth + +import ( + "context" + "sort" + "strings" +) + +// Actions used in scopes. +// Reference: https://docs.docker.com/registry/spec/auth/scope/ +const ( + // ActionPull represents generic read access for resources of the repository + // type. + ActionPull = "pull" + + // ActionPush represents generic write access for resources of the + // repository type. + ActionPush = "push" + + // ActionDelete represents the delete permission for resources of the + // repository type. + ActionDelete = "delete" +) + +// ScopeRegistryCatalog is the scope for registry catalog access. +const ScopeRegistryCatalog = "registry:catalog:*" + +// ScopeRepository returns a repository scope with given actions. +// Reference: https://docs.docker.com/registry/spec/auth/scope/ +func ScopeRepository(repository string, actions ...string) string { + actions = cleanActions(actions) + if repository == "" || len(actions) == 0 { + return "" + } + return strings.Join([]string{ + "repository", + repository, + strings.Join(actions, ","), + }, ":") +} + +// scopesContextKey is the context key for scopes. +type scopesContextKey struct{} + +// WithScopes returns a context with scopes added. Scopes are de-duplicated. +// Scopes are used as hints for the auth client to fetch bearer tokens with +// larger scopes. +// For example, uploading blob to the repository "hello-world" does HEAD request +// first then POST and PUT. The HEAD request will return a challenge for scope +// `repository:hello-world:pull`, and the auth client will fetch a token for +// that challenge. Later, the POST request will return a challenge for scope +// `repository:hello-world:push`, and the auth client will fetch a token for +// that challenge again. By invoking `WithScopes()` with the scope +// `repository:hello-world:pull,push`, the auth client with cache is hinted to +// fetch a token via a single token fetch request for all the HEAD, POST, PUT +// requests. +// Passing an empty list of scopes will virtually remove the scope hints in the +// context. +// Reference: https://docs.docker.com/registry/spec/auth/scope/ +func WithScopes(ctx context.Context, scopes ...string) context.Context { + scopes = CleanScopes(scopes) + return context.WithValue(ctx, scopesContextKey{}, scopes) +} + +// AppendScopes appends additional scopes to the existing scopes in the context +// and returns a new context. The resulted scopes are de-duplicated. +// The append operation does modify the existing scope in the context passed in. +func AppendScopes(ctx context.Context, scopes ...string) context.Context { + if len(scopes) == 0 { + return ctx + } + return WithScopes(ctx, append(GetScopes(ctx), scopes...)...) +} + +// GetScopes returns the scopes in the context. +func GetScopes(ctx context.Context) []string { + if scopes, ok := ctx.Value(scopesContextKey{}).([]string); ok { + return append([]string(nil), scopes...) + } + return nil +} + +// CleanScopes merges and sort the actions in ascending order if the scopes have +// the same resource type and name. The final scopes are sorted in ascending +// order. In other words, the scopes passed in are de-duplicated and sorted. +// Therefore, the output of this function is deterministic. +// If there is a wildcard `*` in the action, other actions in the same resource +// type and name are ignored. +func CleanScopes(scopes []string) []string { + // fast paths + switch len(scopes) { + case 0: + return nil + case 1: + scope := scopes[0] + i := strings.LastIndex(scope, ":") + if i == -1 { + return []string{scope} + } + actionList := strings.Split(scope[i+1:], ",") + actionList = cleanActions(actionList) + if len(actionList) == 0 { + return nil + } + actions := strings.Join(actionList, ",") + scope = scope[:i+1] + actions + return []string{scope} + } + + // slow path + var result []string + + // merge recognizable scopes + resourceTypes := make(map[string]map[string]map[string]struct{}) + for _, scope := range scopes { + // extract resource type + i := strings.Index(scope, ":") + if i == -1 { + result = append(result, scope) + continue + } + resourceType := scope[:i] + + // extract resource name and actions + rest := scope[i+1:] + i = strings.LastIndex(rest, ":") + if i == -1 { + result = append(result, scope) + continue + } + resourceName := rest[:i] + actions := rest[i+1:] + if actions == "" { + // drop scope since no action found + continue + } + + // add to the intermediate map for de-duplication + namedActions := resourceTypes[resourceType] + if namedActions == nil { + namedActions = make(map[string]map[string]struct{}) + resourceTypes[resourceType] = namedActions + } + actionSet := namedActions[resourceName] + if actionSet == nil { + actionSet = make(map[string]struct{}) + namedActions[resourceName] = actionSet + } + for _, action := range strings.Split(actions, ",") { + if action != "" { + actionSet[action] = struct{}{} + } + } + } + + // reconstruct scopes + for resourceType, namedActions := range resourceTypes { + for resourceName, actionSet := range namedActions { + if len(actionSet) == 0 { + continue + } + var actions []string + for action := range actionSet { + if action == "*" { + actions = []string{"*"} + break + } + actions = append(actions, action) + } + sort.Strings(actions) + scope := resourceType + ":" + resourceName + ":" + strings.Join(actions, ",") + result = append(result, scope) + } + } + + // sort and return + sort.Strings(result) + return result +} + +// cleanActions removes the duplicated actions and sort in ascending order. +// If there is a wildcard `*` in the action, other actions are ignored. +func cleanActions(actions []string) []string { + // fast paths + switch len(actions) { + case 0: + return nil + case 1: + if actions[0] == "" { + return nil + } + return actions + } + + // slow path + sort.Strings(actions) + n := 0 + for i := 0; i < len(actions); i++ { + if actions[i] == "*" { + return []string{"*"} + } + if actions[i] != actions[n] { + n++ + if n != i { + actions[n] = actions[i] + } + } + } + n++ + if actions[0] == "" { + if n == 1 { + return nil + } + return actions[1:n] + } + return actions[:n] +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go b/vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go new file mode 100644 index 000000000..201ecce89 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go @@ -0,0 +1,83 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package errutil + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "unicode" +) + +// maxErrorBytes specifies the default limit on how many response bytes are +// allowed in the server's error response. +// A typical error message is around 200 bytes. Hence, 8 KiB should be +// sufficient. +var maxErrorBytes int64 = 8 * 1024 // 8 KiB + +// requestError contains a single error. +type requestError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// Error returns a error string describing the error. +func (e requestError) Error() string { + code := strings.Map(func(r rune) rune { + if r == '_' { + return ' ' + } + return unicode.ToLower(r) + }, e.Code) + if e.Message == "" { + return code + } + return fmt.Sprintf("%s: %s", code, e.Message) +} + +// requestErrors is a bundle of requestError. +type requestErrors []requestError + +// Error returns a error string describing the error. +func (errs requestErrors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + } + var errmsgs []string + for _, err := range errs { + errmsgs = append(errmsgs, err.Error()) + } + return strings.Join(errmsgs, "; ") +} + +// ParseErrorResponse parses the error returned by the remote registry. +func ParseErrorResponse(resp *http.Response) error { + var errmsg string + var body struct { + Errors requestErrors `json:"errors"` + } + lr := io.LimitReader(resp.Body, maxErrorBytes) + if err := json.NewDecoder(lr).Decode(&body); err == nil && len(body.Errors) > 0 { + errmsg = body.Errors.Error() + } else { + errmsg = http.StatusText(resp.StatusCode) + } + return fmt.Errorf("%s %q: unexpected status code %d: %s", resp.Request.Method, resp.Request.URL, resp.StatusCode, errmsg) +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go b/vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go new file mode 100644 index 000000000..a685360b6 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go @@ -0,0 +1,69 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package syncutil + +import "context" + +// Once is an object that will perform exactly one action. +// Unlike sync.Once, this Once allowes the action to have return values. +type Once struct { + result interface{} + err error + status chan bool +} + +// NewOnce creates a new Once instance. +func NewOnce() *Once { + status := make(chan bool, 1) + status <- true + return &Once{ + status: status, + } +} + +// Do calls the function f if and only if Do is being called first time or all +// previous function calls are cancelled, deadline exceeded, or panicking. +// When `once.Do(ctx, f)` is called multiple times, the return value of the +// first call of the function f is stored, and is directly returned for other +// calls. +// Besides the return value of the function f, including the error, Do returns +// true if the function f passed is called first and is not cancelled, deadline +// exceeded, or panicking. Otherwise, returns false. +func (o *Once) Do(ctx context.Context, f func() (interface{}, error)) (bool, interface{}, error) { + defer func() { + if r := recover(); r != nil { + o.status <- true + panic(r) + } + }() + for { + select { + case inProgress := <-o.status: + if !inProgress { + return false, o.result, o.err + } + result, err := f() + if err == context.Canceled || err == context.DeadlineExceeded { + o.status <- true + return false, nil, err + } + o.result, o.err = result, err + close(o.status) + return true, result, err + case <-ctx.Done(): + return false, nil, ctx.Err() + } + } +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/repository.go b/vendor/oras.land/oras-go/pkg/registry/remote/repository.go new file mode 100644 index 000000000..0fbd37b66 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/repository.go @@ -0,0 +1,171 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package remote + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + + errdef "oras.land/oras-go/pkg/content" + "oras.land/oras-go/pkg/registry" + "oras.land/oras-go/pkg/registry/remote/auth" + "oras.land/oras-go/pkg/registry/remote/internal/errutil" +) + +// Client is an interface for a HTTP client. +type Client interface { + // Do sends an HTTP request and returns an HTTP response. + // + // Unlike http.RoundTripper, Client can attempt to interpret the response + // and handle higher-level protocol details such as redirects and + // authentication. + // + // Like http.RoundTripper, Client should not modify the request, and must + // always close the request body. + Do(*http.Request) (*http.Response, error) +} + +// Repository is an HTTP client to a remote repository. +type Repository struct { + // Client is the underlying HTTP client used to access the remote registry. + // If nil, auth.DefaultClient is used. + Client Client + + // Reference references the remote repository. + Reference registry.Reference + + // PlainHTTP signals the transport to access the remote repository via HTTP + // instead of HTTPS. + PlainHTTP bool + + // ManifestMediaTypes is used in `Accept` header for resolving manifests from + // references. It is also used in identifying manifests and blobs from + // descriptors. + // If an empty list is present, default manifest media types are used. + ManifestMediaTypes []string + + // TagListPageSize specifies the page size when invoking the tag list API. + // If zero, the page size is determined by the remote registry. + // Reference: https://docs.docker.com/registry/spec/api/#tags + TagListPageSize int + + // ReferrerListPageSize specifies the page size when invoking the Referrers + // API. + // If zero, the page size is determined by the remote registry. + // Reference: https://github.com/oras-project/artifacts-spec/blob/main/manifest-referrers-api.md + ReferrerListPageSize int + + // MaxMetadataBytes specifies a limit on how many response bytes are allowed + // in the server's response to the metadata APIs, such as catalog list, tag + // list, and referrers list. + // If zero, a default (currently 4MiB) is used. + MaxMetadataBytes int64 +} + +// NewRepository creates a client to the remote repository identified by a +// reference. +// Example: localhost:5000/hello-world +func NewRepository(reference string) (*Repository, error) { + ref, err := registry.ParseReference(reference) + if err != nil { + return nil, err + } + return &Repository{ + Reference: ref, + }, nil +} + +// client returns an HTTP client used to access the remote repository. +// A default HTTP client is return if the client is not configured. +func (r *Repository) client() Client { + if r.Client == nil { + return auth.DefaultClient + } + return r.Client +} + +// parseReference validates the reference. +// Both simplified or fully qualified references are accepted as input. +// A fully qualified reference is returned on success. +func (r *Repository) parseReference(reference string) (registry.Reference, error) { + ref, err := registry.ParseReference(reference) + if err != nil { + ref = registry.Reference{ + Registry: r.Reference.Registry, + Repository: r.Reference.Repository, + Reference: reference, + } + if err = ref.ValidateReference(); err != nil { + return registry.Reference{}, err + } + return ref, nil + } + if ref.Registry == r.Reference.Registry && ref.Repository == r.Reference.Repository { + return ref, nil + } + return registry.Reference{}, fmt.Errorf("%w %q: expect %q", errdef.ErrInvalidReference, ref, r.Reference) +} + +// Tags lists the tags available in the repository. +func (r *Repository) Tags(ctx context.Context, fn func(tags []string) error) error { + ctx = withScopeHint(ctx, r.Reference, auth.ActionPull) + url := buildRepositoryTagListURL(r.PlainHTTP, r.Reference) + var err error + for err == nil { + url, err = r.tags(ctx, fn, url) + } + if err != errNoLink { + return err + } + return nil +} + +// tags returns a single page of tag list with the next link. +func (r *Repository) tags(ctx context.Context, fn func(tags []string) error, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", err + } + if r.TagListPageSize > 0 { + q := req.URL.Query() + q.Set("n", strconv.Itoa(r.TagListPageSize)) + req.URL.RawQuery = q.Encode() + } + + resp, err := r.client().Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", errutil.ParseErrorResponse(resp) + } + var page struct { + Tags []string `json:"tags"` + } + lr := limitReader(resp.Body, r.MaxMetadataBytes) + if err := json.NewDecoder(lr).Decode(&page); err != nil { + return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) + } + if err := fn(page.Tags); err != nil { + return "", err + } + + return parseLink(resp) +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/url.go b/vendor/oras.land/oras-go/pkg/registry/remote/url.go new file mode 100644 index 000000000..f01752020 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/url.go @@ -0,0 +1,42 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package remote + +import ( + "fmt" + + "oras.land/oras-go/pkg/registry" +) + +// buildScheme returns HTTP scheme used to access the remote registry. +func buildScheme(plainHTTP bool) string { + if plainHTTP { + return "http" + } + return "https" +} + +// buildRepositoryBaseURL builds the base endpoint of the remote repository. +// Format: :///v2/ +func buildRepositoryBaseURL(plainHTTP bool, ref registry.Reference) string { + return fmt.Sprintf("%s://%s/v2/%s", buildScheme(plainHTTP), ref.Host(), ref.Repository) +} + +// buildRepositoryTagListURL builds the URL for accessing the tag list API. +// Format: :///v2//tags/list +// Reference: https://docs.docker.com/registry/spec/api/#tags +func buildRepositoryTagListURL(plainHTTP bool, ref registry.Reference) string { + return buildRepositoryBaseURL(plainHTTP, ref) + "/tags/list" +} diff --git a/vendor/oras.land/oras-go/pkg/registry/remote/utils.go b/vendor/oras.land/oras-go/pkg/registry/remote/utils.go new file mode 100644 index 000000000..eaa84f0f4 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/remote/utils.go @@ -0,0 +1,72 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package remote + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "oras.land/oras-go/pkg/registry" + "oras.land/oras-go/pkg/registry/remote/auth" +) + +// defaultMaxMetadataBytes specifies the default limit on how many response +// bytes are allowed in the server's response to the metadata APIs. +// See also: Repository.MaxMetadataBytes +var defaultMaxMetadataBytes int64 = 4 * 1024 * 1024 // 4 MiB + +// errNoLink is returned by parseLink() when no Link header is present. +var errNoLink = errors.New("no Link header in response") + +// parseLink returns the URL of the response's "Link" header, if present. +func parseLink(resp *http.Response) (string, error) { + link := resp.Header.Get("Link") + if link == "" { + return "", errNoLink + } + if link[0] != '<' { + return "", fmt.Errorf("invalid next link %q: missing '<'", link) + } + if i := strings.IndexByte(link, '>'); i == -1 { + return "", fmt.Errorf("invalid next link %q: missing '>'", link) + } else { + link = link[1:i] + } + + linkURL, err := resp.Request.URL.Parse(link) + if err != nil { + return "", err + } + return linkURL.String(), nil +} + +// limitReader returns a Reader that reads from r but stops with EOF after n +// bytes. If n is zero, defaultMaxMetadataBytes is used. +func limitReader(r io.Reader, n int64) io.Reader { + if n == 0 { + n = defaultMaxMetadataBytes + } + return io.LimitReader(r, n) +} + +// withScopeHint adds a hinted scope to the context. +func withScopeHint(ctx context.Context, ref registry.Reference, actions ...string) context.Context { + scope := auth.ScopeRepository(ref.Repository, actions...) + return auth.AppendScopes(ctx, scope) +} diff --git a/vendor/oras.land/oras-go/pkg/registry/repository.go b/vendor/oras.land/oras-go/pkg/registry/repository.go new file mode 100644 index 000000000..bcffbe8d2 --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/registry/repository.go @@ -0,0 +1,57 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package registry + +import ( + "context" +) + +// Repository is an ORAS target and an union of the blob and the manifest CASs. +// As specified by https://docs.docker.com/registry/spec/api/, it is natural to +// assume that content.Resolver interface only works for manifests. Tagging a +// blob may be resulted in an `ErrUnsupported` error. However, this interface +// does not restrict tagging blobs. +// Since a repository is an union of the blob and the manifest CASs, all +// operations defined in the `BlobStore` are executed depending on the media +// type of the given descriptor accordingly. +// Furthurmore, this interface also provides the ability to enforce the +// separation of the blob and the manifests CASs. +type Repository interface { + + // Tags lists the tags available in the repository. + // Since the returned tag list may be paginated by the underlying + // implementation, a function should be passed in to process the paginated + // tag list. + // Note: When implemented by a remote registry, the tags API is called. + // However, not all registries supports pagination or conforms the + // specification. + // References: + // - https://github.com/opencontainers/distribution-spec/blob/main/spec.md#content-discovery + // - https://docs.docker.com/registry/spec/api/#tags + // See also `Tags()` in this package. + Tags(ctx context.Context, fn func(tags []string) error) error +} + +// Tags lists the tags available in the repository. +func Tags(ctx context.Context, repo Repository) ([]string, error) { + var res []string + if err := repo.Tags(ctx, func(tags []string) error { + res = append(res, tags...) + return nil + }); err != nil { + return nil, err + } + return res, nil +} diff --git a/vendor/oras.land/oras-go/pkg/target/target.go b/vendor/oras.land/oras-go/pkg/target/target.go new file mode 100644 index 000000000..4398f14eb --- /dev/null +++ b/vendor/oras.land/oras-go/pkg/target/target.go @@ -0,0 +1,26 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package target + +import ( + "github.com/containerd/containerd/remotes" +) + +// Target represents a place to which one can send/push or retrieve/pull artifacts. +// Anything that implements the Target interface can be used as a place to send or +// retrieve artifacts. +type Target interface { + remotes.Resolver +} diff --git a/vendor/sigs.k8s.io/kustomize/api/LICENSE b/vendor/sigs.k8s.io/kustomize/api/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/annotations/annotations.go b/vendor/sigs.k8s.io/kustomize/api/filters/annotations/annotations.go new file mode 100644 index 000000000..4998f5a3e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/annotations/annotations.go @@ -0,0 +1,52 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package annotations + +import ( + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/filters/fsslice" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type annoMap map[string]string + +type Filter struct { + // Annotations is the set of annotations to apply to the inputs + Annotations annoMap `yaml:"annotations,omitempty"` + + // FsSlice contains the FieldSpecs to locate the namespace field + FsSlice types.FsSlice + + trackableSetter filtersutil.TrackableSetter +} + +var _ kio.Filter = Filter{} +var _ kio.TrackableFilter = &Filter{} + +// WithMutationTracker registers a callback which will be invoked each time a field is mutated +func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { + f.trackableSetter.WithMutationTracker(callback) +} + +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + keys := yaml.SortedMapKeys(f.Annotations) + _, err := kio.FilterAll(yaml.FilterFunc( + func(node *yaml.RNode) (*yaml.RNode, error) { + for _, k := range keys { + if err := node.PipeE(fsslice.Filter{ + FsSlice: f.FsSlice, + SetValue: f.trackableSetter.SetEntry( + k, f.Annotations[k], yaml.NodeTagString), + CreateKind: yaml.MappingNode, // Annotations are MappingNodes. + CreateTag: yaml.NodeTagMap, + }); err != nil { + return nil, err + } + } + return node, nil + })).Filter(nodes) + return nodes, err +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/annotations/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/annotations/doc.go new file mode 100644 index 000000000..b1f6a0b66 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/annotations/doc.go @@ -0,0 +1,6 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package annotations contains a kio.Filter implementation of the kustomize +// annotations transformer. +package annotations diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/doc.go new file mode 100644 index 000000000..6f643630a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/doc.go @@ -0,0 +1,6 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package fieldspec contains a yaml.Filter to modify a resource +// that matches the FieldSpec. +package fieldspec diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/fieldspec.go b/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/fieldspec.go new file mode 100644 index 000000000..8e4e78ca6 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/fieldspec.go @@ -0,0 +1,182 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package fieldspec + +import ( + "fmt" + "strings" + + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/kustomize/kyaml/utils" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +var _ yaml.Filter = Filter{} + +// Filter possibly mutates its object argument using a FieldSpec. +// If the object matches the FieldSpec, and the node found +// by following the fieldSpec's path is non-null, this filter calls +// the setValue function on the node at the end of the path. +// If any part of the path doesn't exist, the filter returns +// without doing anything and without error, unless it was set +// to create the path. If set to create, it creates a tree of maps +// along the path, and the leaf node gets the setValue called on it. +// Error on GVK mismatch, empty or poorly formed path. +// Filter expect kustomize style paths, not JSON paths. +// Filter stores internal state and should not be reused +type Filter struct { + // FieldSpec contains the path to the value to set. + FieldSpec types.FieldSpec `yaml:"fieldSpec"` + + // Set the field using this function + SetValue filtersutil.SetFn + + // CreateKind defines the type of node to create if the field is not found + CreateKind yaml.Kind + + CreateTag string + + // path keeps internal state about the current path + path []string +} + +func (fltr Filter) Filter(obj *yaml.RNode) (*yaml.RNode, error) { + // check if the FieldSpec applies to the object + if match := isMatchGVK(fltr.FieldSpec, obj); !match { + return obj, nil + } + fltr.path = utils.PathSplitter(fltr.FieldSpec.Path, "/") + if err := fltr.filter(obj); err != nil { + return nil, errors.WrapPrefixf(err, + "considering field '%s' of object %s", fltr.FieldSpec.Path, resid.FromRNode(obj)) + } + return obj, nil +} + +// Recursively called. +func (fltr Filter) filter(obj *yaml.RNode) error { + if len(fltr.path) == 0 { + // found the field -- set its value + return fltr.SetValue(obj) + } + if obj.IsTaggedNull() || obj.IsNil() { + return nil + } + switch obj.YNode().Kind { + case yaml.SequenceNode: + return fltr.handleSequence(obj) + case yaml.MappingNode: + return fltr.handleMap(obj) + case yaml.AliasNode: + return fltr.filter(yaml.NewRNode(obj.YNode().Alias)) + default: + return errors.Errorf("expected sequence or mapping node") + } +} + +// handleMap calls filter on the map field matching the next path element +func (fltr Filter) handleMap(obj *yaml.RNode) error { + fieldName, isSeq := isSequenceField(fltr.path[0]) + if fieldName == "" { + return fmt.Errorf("cannot set or create an empty field name") + } + // lookup the field matching the next path element + var operation yaml.Filter + var kind yaml.Kind + tag := yaml.NodeTagEmpty + switch { + case !fltr.FieldSpec.CreateIfNotPresent || fltr.CreateKind == 0 || isSeq: + // don't create the field if we don't find it + operation = yaml.Lookup(fieldName) + if isSeq { + // The query path thinks this field should be a sequence; + // accept this hint for use later if the tag is NodeTagNull. + kind = yaml.SequenceNode + } + case len(fltr.path) <= 1: + // create the field if it is missing: use the provided node kind + operation = yaml.LookupCreate(fltr.CreateKind, fieldName) + kind = fltr.CreateKind + tag = fltr.CreateTag + default: + // create the field if it is missing: must be a mapping node + operation = yaml.LookupCreate(yaml.MappingNode, fieldName) + kind = yaml.MappingNode + tag = yaml.NodeTagMap + } + + // locate (or maybe create) the field + field, err := obj.Pipe(operation) + if err != nil { + return errors.WrapPrefixf(err, "fieldName: %s", fieldName) + } + if field == nil { + // No error if field not found. + return nil + } + + // if the value exists, but is null and kind is set, + // then change it to the creation type + // TODO: update yaml.LookupCreate to support this + if field.YNode().Tag == yaml.NodeTagNull && yaml.IsCreate(kind) { + field.YNode().Kind = kind + field.YNode().Tag = tag + } + + // copy the current fltr and change the path on the copy + var next = fltr + // call filter for the next path element on the matching field + next.path = fltr.path[1:] + return next.filter(field) +} + +// seq calls filter on all sequence elements +func (fltr Filter) handleSequence(obj *yaml.RNode) error { + if err := obj.VisitElements(func(node *yaml.RNode) error { + // set an accurate FieldPath for nested elements + node.AppendToFieldPath(obj.FieldPath()...) + // recurse on each element -- re-allocating a Filter is + // not strictly required, but is more consistent with field + // and less likely to have side effects + // keep the entire path -- it does not contain parts for sequences + return fltr.filter(node) + }); err != nil { + return errors.WrapPrefixf(err, + "visit traversal on path: %v", fltr.path) + } + return nil +} + +// isSequenceField returns true if the path element is for a sequence field. +// isSequence also returns the path element with the '[]' suffix trimmed +func isSequenceField(name string) (string, bool) { + shorter := strings.TrimSuffix(name, "[]") + return shorter, shorter != name +} + +// isMatchGVK returns true if the fs.GVK matches the obj GVK. +func isMatchGVK(fs types.FieldSpec, obj *yaml.RNode) bool { + if kind := obj.GetKind(); fs.Kind != "" && fs.Kind != kind { + // kind doesn't match + return false + } + + // parse the group and version from the apiVersion field + group, version := resid.ParseGroupVersion(obj.GetApiVersion()) + + if fs.Group != "" && fs.Group != group { + // group doesn't match + return false + } + + if fs.Version != "" && fs.Version != version { + // version doesn't match + return false + } + + return true +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/filtersutil/setters.go b/vendor/sigs.k8s.io/kustomize/api/filters/filtersutil/setters.go new file mode 100644 index 000000000..f77666785 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/filtersutil/setters.go @@ -0,0 +1,105 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filtersutil + +import ( + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// SetFn is a function that accepts an RNode to possibly modify. +type SetFn func(*yaml.RNode) error + +// SetScalar returns a SetFn to set a scalar value +func SetScalar(value string) SetFn { + return SetEntry("", value, yaml.NodeTagEmpty) +} + +// SetEntry returns a SetFn to set a field or a map entry to a value. +// It can be used with an empty name to set both a value and a tag on a scalar node. +// When setting only a value on a scalar node, use SetScalar instead. +func SetEntry(name, value, tag string) SetFn { + n := &yaml.Node{ + Kind: yaml.ScalarNode, + Value: value, + Tag: tag, + } + return func(node *yaml.RNode) error { + return node.PipeE(yaml.FieldSetter{ + Name: name, + Value: yaml.NewRNode(n), + }) + } +} + +type TrackableSetter struct { + // SetValueCallback will be invoked each time a field is set + setValueCallback func(name, value, tag string, node *yaml.RNode) +} + +// WithMutationTracker registers a callback which will be invoked each time a field is mutated +func (s *TrackableSetter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) *TrackableSetter { + s.setValueCallback = callback + return s +} + +// SetScalar returns a SetFn to set a scalar value. +// if a mutation tracker has been registered, the tracker will be invoked each +// time a scalar is set +func (s TrackableSetter) SetScalar(value string) SetFn { + return s.SetEntry("", value, yaml.NodeTagEmpty) +} + +// SetScalarIfEmpty returns a SetFn to set a scalar value only if it isn't already set. +// If a mutation tracker has been registered, the tracker will be invoked each +// time a scalar is actually set. +func (s TrackableSetter) SetScalarIfEmpty(value string) SetFn { + return s.SetEntryIfEmpty("", value, yaml.NodeTagEmpty) +} + +// SetEntry returns a SetFn to set a field or a map entry to a value. +// It can be used with an empty name to set both a value and a tag on a scalar node. +// When setting only a value on a scalar node, use SetScalar instead. +// If a mutation tracker has been registered, the tracker will be invoked each +// time an entry is set. +func (s TrackableSetter) SetEntry(name, value, tag string) SetFn { + origSetEntry := SetEntry(name, value, tag) + return func(node *yaml.RNode) error { + if s.setValueCallback != nil { + s.setValueCallback(name, value, tag, node) + } + return origSetEntry(node) + } +} + +// SetEntryIfEmpty returns a SetFn to set a field or a map entry to a value only if it isn't already set. +// It can be used with an empty name to set both a value and a tag on a scalar node. +// When setting only a value on a scalar node, use SetScalar instead. +// If a mutation tracker has been registered, the tracker will be invoked each +// time an entry is actually set. +func (s TrackableSetter) SetEntryIfEmpty(key, value, tag string) SetFn { + origSetEntry := SetEntry(key, value, tag) + return func(node *yaml.RNode) error { + if hasExistingValue(node, key) { + return nil + } + if s.setValueCallback != nil { + s.setValueCallback(key, value, tag, node) + } + return origSetEntry(node) + } +} + +func hasExistingValue(node *yaml.RNode, key string) bool { + if node.IsNilOrEmpty() { + return false + } + if err := yaml.ErrorIfInvalid(node, yaml.ScalarNode); err == nil { + return yaml.GetValue(node) != "" + } + entry := node.Field(key) + if entry.IsNilOrEmpty() { + return false + } + return yaml.GetValue(entry.Value) != "" +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/doc.go new file mode 100644 index 000000000..b0f197722 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/doc.go @@ -0,0 +1,6 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package fsslice contains a yaml.Filter to modify a resource if +// it matches one or more FieldSpec entries. +package fsslice diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/fsslice.go b/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/fsslice.go new file mode 100644 index 000000000..9eb5c1313 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/fsslice.go @@ -0,0 +1,47 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package fsslice + +import ( + "sigs.k8s.io/kustomize/api/filters/fieldspec" + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +var _ yaml.Filter = Filter{} + +// Filter ranges over an FsSlice to modify fields on a single object. +// An FsSlice is a range of FieldSpecs. A FieldSpec is a GVK plus a path. +type Filter struct { + // FieldSpecList list of FieldSpecs to set + FsSlice types.FsSlice `yaml:"fsSlice"` + + // SetValue is called on each field that matches one of the FieldSpecs + SetValue filtersutil.SetFn + + // CreateKind is used to create fields that do not exist + CreateKind yaml.Kind + + // CreateTag is used to set the tag if encountering a null field + CreateTag string +} + +func (fltr Filter) Filter(obj *yaml.RNode) (*yaml.RNode, error) { + for i := range fltr.FsSlice { + // apply this FieldSpec + // create a new filter for each iteration because they + // store internal state about the field paths + _, err := (&fieldspec.Filter{ + FieldSpec: fltr.FsSlice[i], + SetValue: fltr.SetValue, + CreateKind: fltr.CreateKind, + CreateTag: fltr.CreateTag, + }).Filter(obj) + if err != nil { + return nil, err + } + } + return obj, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/doc.go new file mode 100644 index 000000000..3fe20a6d6 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/doc.go @@ -0,0 +1,6 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package gkesagenerator contains a kio.Filter that that generates a +// iampolicy-related resources for a given cloud provider +package iampolicygenerator diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/iampolicygenerator.go b/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/iampolicygenerator.go new file mode 100644 index 000000000..97ea31693 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/iampolicygenerator.go @@ -0,0 +1,55 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package iampolicygenerator + +import ( + "fmt" + + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type Filter struct { + IAMPolicyGenerator types.IAMPolicyGeneratorArgs `json:",inline,omitempty" yaml:",inline,omitempty"` +} + +// Filter adds a GKE service account object to nodes +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + switch f.IAMPolicyGenerator.Cloud { + case types.GKE: + IAMPolicyResources, err := f.generateGkeIAMPolicyResources() + if err != nil { + return nil, err + } + nodes = append(nodes, IAMPolicyResources...) + default: + return nil, fmt.Errorf("cloud provider %s not supported yet", f.IAMPolicyGenerator.Cloud) + } + return nodes, nil +} + +func (f Filter) generateGkeIAMPolicyResources() ([]*yaml.RNode, error) { + var result []*yaml.RNode + input := fmt.Sprintf(` +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + iam.gke.io/gcp-service-account: %s@%s.iam.gserviceaccount.com + name: %s +`, f.IAMPolicyGenerator.ServiceAccount.Name, + f.IAMPolicyGenerator.ProjectId, + f.IAMPolicyGenerator.KubernetesService.Name) + + if f.IAMPolicyGenerator.Namespace != "" { + input += fmt.Sprintf("\n namespace: %s", f.IAMPolicyGenerator.Namespace) + } + + sa, err := yaml.Parse(input) + if err != nil { + return nil, err + } + + return append(result, sa), nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/doc.go new file mode 100644 index 000000000..d919491dd --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/doc.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package imagetag contains two kio.Filter implementations to cover the +// functionality of the kustomize imagetag transformer. +// +// Filter updates fields based on a FieldSpec and an ImageTag. +// +// LegacyFilter doesn't use a FieldSpec, and instead only updates image +// references if the field is name image and it is underneath a field called +// either containers or initContainers. +package imagetag diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/imagetag.go b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/imagetag.go new file mode 100644 index 000000000..24ab99f74 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/imagetag.go @@ -0,0 +1,72 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package imagetag + +import ( + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/filters/fsslice" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filter modifies an "image tag", the value used to specify the +// name, tag, version digest etc. of (docker) container images +// used by a pod template. +type Filter struct { + // imageTag is the tag we want to apply to the inputs + // The name of the image is used as a key, and other fields + // can specify a new name, tag, etc. + ImageTag types.Image `json:"imageTag,omitempty" yaml:"imageTag,omitempty"` + + // FsSlice contains the FieldSpecs to locate an image field, + // e.g. Path: "spec/myContainers[]/image" + FsSlice types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` + + trackableSetter filtersutil.TrackableSetter +} + +var _ kio.Filter = Filter{} +var _ kio.TrackableFilter = &Filter{} + +// WithMutationTracker registers a callback which will be invoked each time a field is mutated +func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { + f.trackableSetter.WithMutationTracker(callback) +} + +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + _, err := kio.FilterAll(yaml.FilterFunc(f.filter)).Filter(nodes) + return nodes, err +} + +func (f Filter) filter(node *yaml.RNode) (*yaml.RNode, error) { + // FsSlice is an allowlist, not a denyList, so to deny + // something via configuration a new config mechanism is + // needed. Until then, hardcode it. + if f.isOnDenyList(node) { + return node, nil + } + if err := node.PipeE(fsslice.Filter{ + FsSlice: f.FsSlice, + SetValue: imageTagUpdater{ + ImageTag: f.ImageTag, + trackableSetter: f.trackableSetter, + }.SetImageValue, + }); err != nil { + return nil, err + } + return node, nil +} + +func (f Filter) isOnDenyList(node *yaml.RNode) bool { + meta, err := node.GetMeta() + if err != nil { + // A missing 'meta' field will cause problems elsewhere; + // ignore it here to keep the signature simple. + return false + } + // Ignore CRDs + // https://github.com/kubernetes-sigs/kustomize/issues/890 + return meta.Kind == `CustomResourceDefinition` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/legacy.go b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/legacy.go new file mode 100644 index 000000000..d6f5b33f2 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/legacy.go @@ -0,0 +1,104 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package imagetag + +import ( + "sigs.k8s.io/kustomize/api/internal/utils" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// LegacyFilter is an implementation of the kio.Filter interface +// that scans through the provided kyaml data structure and updates +// any values of any image fields that is inside a sequence under +// a field called either containers or initContainers. The field is only +// update if it has a value that matches and image reference and the name +// of the image is a match with the provided ImageTag. +type LegacyFilter struct { + ImageTag types.Image `json:"imageTag,omitempty" yaml:"imageTag,omitempty"` +} + +var _ kio.Filter = LegacyFilter{} + +func (lf LegacyFilter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + return kio.FilterAll(yaml.FilterFunc(lf.filter)).Filter(nodes) +} + +func (lf LegacyFilter) filter(node *yaml.RNode) (*yaml.RNode, error) { + meta, err := node.GetMeta() + if err != nil { + return nil, err + } + + // We do not make any changes if the type of the resource + // is CustomResourceDefinition. + if meta.Kind == `CustomResourceDefinition` { + return node, nil + } + + fff := findFieldsFilter{ + fields: []string{"containers", "initContainers"}, + fieldCallback: checkImageTagsFn(lf.ImageTag), + } + if err := node.PipeE(fff); err != nil { + return nil, err + } + return node, nil +} + +type fieldCallback func(node *yaml.RNode) error + +// findFieldsFilter is an implementation of the kio.Filter +// interface. It will walk the data structure and look for fields +// that matches the provided list of field names. For each match, +// the value of the field will be passed in as a parameter to the +// provided fieldCallback. +// TODO: move this to kyaml/filterutils +type findFieldsFilter struct { + fields []string + + fieldCallback fieldCallback +} + +func (f findFieldsFilter) Filter(obj *yaml.RNode) (*yaml.RNode, error) { + return obj, f.walk(obj) +} + +func (f findFieldsFilter) walk(node *yaml.RNode) error { + switch node.YNode().Kind { + case yaml.MappingNode: + return node.VisitFields(func(n *yaml.MapNode) error { + err := f.walk(n.Value) + if err != nil { + return err + } + key := n.Key.YNode().Value + if utils.StringSliceContains(f.fields, key) { + return f.fieldCallback(n.Value) + } + return nil + }) + case yaml.SequenceNode: + return errors.Wrap(node.VisitElements(f.walk)) + } + return nil +} + +func checkImageTagsFn(imageTag types.Image) fieldCallback { + return func(node *yaml.RNode) error { + if node.YNode().Kind != yaml.SequenceNode { + return nil + } + + return node.VisitElements(func(n *yaml.RNode) error { + // Look up any fields on the provided node that is named + // image. + return n.PipeE(yaml.Get("image"), imageTagUpdater{ + ImageTag: imageTag, + }) + }) + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go new file mode 100644 index 000000000..d2a728498 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go @@ -0,0 +1,71 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package imagetag + +import ( + "sigs.k8s.io/kustomize/api/filters/filtersutil" + + "sigs.k8s.io/kustomize/api/image" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// imageTagUpdater is an implementation of the kio.Filter interface +// that will update the value of the yaml node based on the provided +// ImageTag if the current value matches the format of an image reference. +type imageTagUpdater struct { + Kind string `yaml:"kind,omitempty"` + ImageTag types.Image `yaml:"imageTag,omitempty"` + trackableSetter filtersutil.TrackableSetter +} + +func (u imageTagUpdater) SetImageValue(rn *yaml.RNode) error { + if err := yaml.ErrorIfInvalid(rn, yaml.ScalarNode); err != nil { + return err + } + + value := rn.YNode().Value + + if !image.IsImageMatched(value, u.ImageTag.Name) { + return nil + } + + name, tag, digest := image.Split(value) + if u.ImageTag.NewName != "" { + name = u.ImageTag.NewName + } + + // overriding tag or digest will replace both original tag and digest values + switch { + case u.ImageTag.NewTag != "" && u.ImageTag.Digest != "": + tag = u.ImageTag.NewTag + digest = u.ImageTag.Digest + case u.ImageTag.NewTag != "": + tag = u.ImageTag.NewTag + digest = "" + case u.ImageTag.Digest != "": + tag = "" + digest = u.ImageTag.Digest + case u.ImageTag.TagSuffix != "": + tag += u.ImageTag.TagSuffix + digest = "" + } + + // build final image name + if tag != "" { + name += ":" + tag + } + if digest != "" { + name += "@" + digest + } + + return u.trackableSetter.SetScalar(name)(rn) +} + +func (u imageTagUpdater) Filter(rn *yaml.RNode) (*yaml.RNode, error) { + if err := u.SetImageValue(rn); err != nil { + return nil, err + } + return rn, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/labels/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/labels/doc.go new file mode 100644 index 000000000..978033c7e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/labels/doc.go @@ -0,0 +1,6 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package labels contains a kio.Filter implementation of the kustomize +// labels transformer. +package labels diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/labels/labels.go b/vendor/sigs.k8s.io/kustomize/api/filters/labels/labels.go new file mode 100644 index 000000000..b67d4d4b1 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/labels/labels.go @@ -0,0 +1,53 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package labels + +import ( + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/filters/fsslice" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type labelMap map[string]string + +// Filter sets labels. +type Filter struct { + // Labels is the set of labels to apply to the inputs + Labels labelMap `yaml:"labels,omitempty"` + + // FsSlice identifies the label fields. + FsSlice types.FsSlice + + trackableSetter filtersutil.TrackableSetter +} + +var _ kio.Filter = Filter{} +var _ kio.TrackableFilter = &Filter{} + +// WithMutationTracker registers a callback which will be invoked each time a field is mutated +func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { + f.trackableSetter.WithMutationTracker(callback) +} + +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + keys := yaml.SortedMapKeys(f.Labels) + _, err := kio.FilterAll(yaml.FilterFunc( + func(node *yaml.RNode) (*yaml.RNode, error) { + for _, k := range keys { + if err := node.PipeE(fsslice.Filter{ + FsSlice: f.FsSlice, + SetValue: f.trackableSetter.SetEntry( + k, f.Labels[k], yaml.NodeTagString), + CreateKind: yaml.MappingNode, // Labels are MappingNodes. + CreateTag: yaml.NodeTagMap, + }); err != nil { + return nil, err + } + } + return node, nil + })).Filter(nodes) + return nodes, err +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/nameref/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/nameref/doc.go new file mode 100644 index 000000000..78f938933 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/nameref/doc.go @@ -0,0 +1,6 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package nameref contains a kio.Filter implementation of the kustomize +// name reference transformer. +package nameref diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go b/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go new file mode 100644 index 000000000..4815f10a2 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go @@ -0,0 +1,413 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package nameref + +import ( + "fmt" + "strings" + + "sigs.k8s.io/kustomize/api/filters/fieldspec" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filter updates a name references. +type Filter struct { + // Referrer refers to another resource X by X's name. + // E.g. A Deployment can refer to a ConfigMap. + // The Deployment is the Referrer, + // the ConfigMap is the ReferralTarget. + // This filter seeks to repair the reference in Deployment, given + // that the ConfigMap's name may have changed. + Referrer *resource.Resource + + // NameFieldToUpdate is the field in the Referrer + // that holds the name requiring an update. + // This is the field to write. + NameFieldToUpdate types.FieldSpec + + // ReferralTarget is the source of the new value for + // the name, always in the 'metadata/name' field. + // This is the field to read. + ReferralTarget resid.Gvk + + // Set of resources to scan to find the ReferralTarget. + ReferralCandidates resmap.ResMap +} + +// At time of writing, in practice this is called with a slice with only +// one entry, the node also referred to be the resource in the Referrer field. +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + return kio.FilterAll(yaml.FilterFunc(f.run)).Filter(nodes) +} + +// The node passed in here is the same node as held in Referrer; +// that's how the referrer's name field is updated. +// Currently, however, this filter still needs the extra methods on Referrer +// to consult things like the resource Id, its namespace, etc. +// TODO(3455): No filter should use the Resource api; all information +// about names should come from annotations, with helper methods +// on the RNode object. Resource should get stupider, RNode smarter. +func (f Filter) run(node *yaml.RNode) (*yaml.RNode, error) { + if err := f.confirmNodeMatchesReferrer(node); err != nil { + // sanity check. + return nil, err + } + f.NameFieldToUpdate.Gvk = f.Referrer.GetGvk() + if err := node.PipeE(fieldspec.Filter{ + FieldSpec: f.NameFieldToUpdate, + SetValue: f.set, + }); err != nil { + return nil, errors.WrapPrefixf( + err, "updating name reference in '%s' field of '%s'", + f.NameFieldToUpdate.Path, f.Referrer.CurId().String()) + } + return node, nil +} + +// This function is called on the node found at FieldSpec.Path. +// It's some node in the Referrer. +func (f Filter) set(node *yaml.RNode) error { + if yaml.IsMissingOrNull(node) { + return nil + } + switch node.YNode().Kind { + case yaml.ScalarNode: + return f.setScalar(node) + case yaml.MappingNode: + return f.setMapping(node) + case yaml.SequenceNode: + return applyFilterToSeq(seqFilter{ + setScalarFn: f.setScalar, + setMappingFn: f.setMapping, + }, node) + default: + return fmt.Errorf("node must be a scalar, sequence or map") + } +} + +// This method used when NameFieldToUpdate doesn't lead to +// one scalar field (typically called 'name'), but rather +// leads to a map field (called anything). In this case we +// must complete the field path, looking for both a 'name' +// and a 'namespace' field to help select the proper +// ReferralTarget to read the name and namespace from. +func (f Filter) setMapping(node *yaml.RNode) error { + if node.YNode().Kind != yaml.MappingNode { + return fmt.Errorf("expect a mapping node") + } + nameNode, err := node.Pipe(yaml.FieldMatcher{Name: "name"}) + if err != nil { + return errors.WrapPrefixf(err, "trying to match 'name' field") + } + if nameNode == nil { + // This is a _configuration_ error; the field path + // specified in NameFieldToUpdate.Path doesn't resolve + // to a map with a 'name' field, so we have no idea what + // field to update with a new name. + return fmt.Errorf("path config error; no 'name' field in node") + } + candidates, err := f.filterMapCandidatesByNamespace(node) + if err != nil { + return err + } + oldName := nameNode.YNode().Value + // use allNamesAndNamespacesAreTheSame to compare referral candidates for functional identity, + // because we source both name and namespace values from the referral in this case. + referral, err := f.selectReferral(oldName, candidates, allNamesAndNamespacesAreTheSame) + if err != nil || referral == nil { + // Nil referral means nothing to do. + return err + } + f.recordTheReferral(referral) + if referral.GetName() == oldName && referral.GetNamespace() == "" { + // The name has not changed, nothing to do. + return nil + } + if err = node.PipeE(yaml.FieldSetter{ + Name: "name", + StringValue: referral.GetName(), + }); err != nil { + return err + } + if referral.GetNamespace() == "" { + // Don't write an empty string into the namespace field, as + // it should not replace the value "default". The empty + // string is handled as a wild card here, not as an implicit + // specification of the "default" k8s namespace. + return nil + } + return node.PipeE(yaml.FieldSetter{ + Name: "namespace", + StringValue: referral.GetNamespace(), + }) +} + +func (f Filter) filterMapCandidatesByNamespace( + node *yaml.RNode) ([]*resource.Resource, error) { + namespaceNode, err := node.Pipe(yaml.FieldMatcher{Name: "namespace"}) + if err != nil { + return nil, errors.WrapPrefixf(err, "trying to match 'namespace' field") + } + if namespaceNode == nil { + return f.ReferralCandidates.Resources(), nil + } + namespace := namespaceNode.YNode().Value + nsMap := f.ReferralCandidates.GroupedByOriginalNamespace() + if candidates, ok := nsMap[namespace]; ok { + return candidates, nil + } + nsMap = f.ReferralCandidates.GroupedByCurrentNamespace() + // This could be nil, or an empty list. + return nsMap[namespace], nil +} + +func (f Filter) setScalar(node *yaml.RNode) error { + // use allNamesAreTheSame to compare referral candidates for functional identity, + // because we only source the name from the referral in this case. + referral, err := f.selectReferral( + node.YNode().Value, f.ReferralCandidates.Resources(), allNamesAreTheSame) + if err != nil || referral == nil { + // Nil referral means nothing to do. + return err + } + f.recordTheReferral(referral) + if referral.GetName() == node.YNode().Value { + // The name has not changed, nothing to do. + return nil + } + return node.PipeE(yaml.FieldSetter{StringValue: referral.GetName()}) +} + +// In the resource, make a note that it is referred to by the Referrer. +func (f Filter) recordTheReferral(referral *resource.Resource) { + referral.AppendRefBy(f.Referrer.CurId()) +} + +// getRoleRefGvk returns a Gvk in the roleRef field. Return error +// if the roleRef, roleRef/apiGroup or roleRef/kind is missing. +func getRoleRefGvk(n *resource.Resource) (*resid.Gvk, error) { + roleRef, err := n.Pipe(yaml.Lookup("roleRef")) + if err != nil { + return nil, err + } + if roleRef.IsNil() { + return nil, fmt.Errorf("roleRef cannot be found in %s", n.MustString()) + } + apiGroup, err := roleRef.Pipe(yaml.Lookup("apiGroup")) + if err != nil { + return nil, err + } + if apiGroup.IsNil() { + return nil, fmt.Errorf( + "apiGroup cannot be found in roleRef %s", roleRef.MustString()) + } + kind, err := roleRef.Pipe(yaml.Lookup("kind")) + if err != nil { + return nil, err + } + if kind.IsNil() { + return nil, fmt.Errorf( + "kind cannot be found in roleRef %s", roleRef.MustString()) + } + return &resid.Gvk{ + Group: apiGroup.YNode().Value, + Kind: kind.YNode().Value, + }, nil +} + +// sieveFunc returns true if the resource argument satisfies some criteria. +type sieveFunc func(*resource.Resource) bool + +// doSieve uses a function to accept or ignore resources from a list. +// If list is nil, returns immediately. +// It's a filter obviously, but that term is overloaded here. +func doSieve(list []*resource.Resource, fn sieveFunc) (s []*resource.Resource) { + for _, r := range list { + if fn(r) { + s = append(s, r) + } + } + return +} + +func acceptAll(r *resource.Resource) bool { + return true +} + +func previousNameMatches(name string) sieveFunc { + return func(r *resource.Resource) bool { + for _, id := range r.PrevIds() { + if id.Name == name { + return true + } + } + return false + } +} + +func previousIdSelectedByGvk(gvk *resid.Gvk) sieveFunc { + return func(r *resource.Resource) bool { + for _, id := range r.PrevIds() { + if id.IsSelected(gvk) { + return true + } + } + return false + } +} + +// If the we are updating a 'roleRef/name' field, the 'apiGroup' and 'kind' +// fields in the same 'roleRef' map must be considered. +// If either object is cluster-scoped, there can be a referral. +// E.g. a RoleBinding (which exists in a namespace) can refer +// to a ClusterRole (cluster-scoped) object. +// https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole +// Likewise, a ClusterRole can refer to a Secret (in a namespace). +// Objects in different namespaces generally cannot refer to other +// with some exceptions (e.g. RoleBinding and ServiceAccount are both +// namespaceable, but the former can refer to accounts in other namespaces). +func (f Filter) roleRefFilter() sieveFunc { + if !strings.HasSuffix(f.NameFieldToUpdate.Path, "roleRef/name") { + return acceptAll + } + roleRefGvk, err := getRoleRefGvk(f.Referrer) + if err != nil { + return acceptAll + } + return previousIdSelectedByGvk(roleRefGvk) +} + +func prefixSuffixEquals(other resource.ResCtx) sieveFunc { + return func(r *resource.Resource) bool { + return r.PrefixesSuffixesEquals(other) + } +} + +func (f Filter) sameCurrentNamespaceAsReferrer() sieveFunc { + referrerCurId := f.Referrer.CurId() + if referrerCurId.IsClusterScoped() { + // If the referrer is cluster-scoped, let anything through. + return acceptAll + } + return func(r *resource.Resource) bool { + if r.CurId().IsClusterScoped() { + // Allow cluster-scoped through. + return true + } + if r.GetKind() == "ServiceAccount" { + // Allow service accounts through, even though they + // are in a namespace. A RoleBinding in another namespace + // can reference them. + return true + } + return referrerCurId.IsNsEquals(r.CurId()) + } +} + +// selectReferral picks the best referral from a list of candidates. +func (f Filter) selectReferral( + // The name referral that may need to be updated. + oldName string, + candidates []*resource.Resource, + // function that returns whether two referrals are identical for the purposes of the transformation + candidatesIdentical func(resources []*resource.Resource) bool) (*resource.Resource, error) { + candidates = doSieve(candidates, previousNameMatches(oldName)) + candidates = doSieve(candidates, previousIdSelectedByGvk(&f.ReferralTarget)) + candidates = doSieve(candidates, f.roleRefFilter()) + candidates = doSieve(candidates, f.sameCurrentNamespaceAsReferrer()) + if len(candidates) == 1 { + return candidates[0], nil + } + candidates = doSieve(candidates, prefixSuffixEquals(f.Referrer)) + if len(candidates) == 1 { + return candidates[0], nil + } + if len(candidates) == 0 { + return nil, nil + } + if candidatesIdentical(candidates) { + // Just take the first one. + return candidates[0], nil + } + ids := getIds(candidates) + return nil, fmt.Errorf("found multiple possible referrals: %s\n%s", ids, f.failureDetails(candidates)) +} + +func (f Filter) failureDetails(resources []*resource.Resource) string { + msg := strings.Builder{} + msg.WriteString(fmt.Sprintf("\n**** Too many possible referral targets to referrer:\n%s\n", f.Referrer.MustYaml())) + for i, r := range resources { + msg.WriteString(fmt.Sprintf("--- possible referral %d:\n%s\n", i, r.MustYaml())) + } + return msg.String() +} + +func allNamesAreTheSame(resources []*resource.Resource) bool { + name := resources[0].GetName() + for i := 1; i < len(resources); i++ { + if name != resources[i].GetName() { + return false + } + } + return true +} + +func allNamesAndNamespacesAreTheSame(resources []*resource.Resource) bool { + name := resources[0].GetName() + namespace := resources[0].GetNamespace() + for i := 1; i < len(resources); i++ { + if name != resources[i].GetName() || namespace != resources[i].GetNamespace() { + return false + } + } + return true +} + +func getIds(rs []*resource.Resource) string { + var result []string + for _, r := range rs { + result = append(result, r.CurId().String()) + } + return strings.Join(result, ", ") +} + +func checkEqual(k, a, b string) error { + if a != b { + return fmt.Errorf( + "node-referrerOriginal '%s' mismatch '%s' != '%s'", + k, a, b) + } + return nil +} + +func (f Filter) confirmNodeMatchesReferrer(node *yaml.RNode) error { + meta, err := node.GetMeta() + if err != nil { + return err + } + gvk := f.Referrer.GetGvk() + if err = checkEqual( + "APIVersion", meta.APIVersion, gvk.ApiVersion()); err != nil { + return err + } + if err = checkEqual( + "Kind", meta.Kind, gvk.Kind); err != nil { + return err + } + if err = checkEqual( + "Name", meta.Name, f.Referrer.GetName()); err != nil { + return err + } + if err = checkEqual( + "Namespace", meta.Namespace, f.Referrer.GetNamespace()); err != nil { + return err + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/nameref/seqfilter.go b/vendor/sigs.k8s.io/kustomize/api/filters/nameref/seqfilter.go new file mode 100644 index 000000000..0caab4c9b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/nameref/seqfilter.go @@ -0,0 +1,60 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package nameref + +import ( + "fmt" + + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type setFn func(*yaml.RNode) error + +type seqFilter struct { + setScalarFn setFn + setMappingFn setFn +} + +func (sf seqFilter) Filter(node *yaml.RNode) (*yaml.RNode, error) { + if yaml.IsMissingOrNull(node) { + return node, nil + } + switch node.YNode().Kind { + case yaml.ScalarNode: + // Kind: Role/ClusterRole + // FieldSpec is rules.resourceNames + err := sf.setScalarFn(node) + return node, err + case yaml.MappingNode: + // Kind: RoleBinding/ClusterRoleBinding + // FieldSpec is subjects + // Note: The corresponding fieldSpec had been changed from + // from path: subjects/name to just path: subjects. This is + // what get mutatefield to request the mapping of the whole + // map containing namespace and name instead of just a simple + // string field containing the name + err := sf.setMappingFn(node) + return node, err + default: + return node, fmt.Errorf( + "%#v is expected to be either a string or a map of string", node) + } +} + +// applyFilterToSeq will apply the filter to each element in the sequence node +func applyFilterToSeq(filter yaml.Filter, node *yaml.RNode) error { + if node.YNode().Kind != yaml.SequenceNode { + return fmt.Errorf("expect a sequence node but got %v", node.YNode().Kind) + } + + for _, elem := range node.Content() { + rnode := yaml.NewRNode(elem) + err := rnode.PipeE(filter) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/namespace/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/namespace/doc.go new file mode 100644 index 000000000..539758b28 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/namespace/doc.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package namespace contains a kio.Filter implementation of the kustomize +// namespace transformer. +// +// Special cases for known Kubernetes resources have been hardcoded in addition +// to those defined by the FsSlice. +package namespace diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/namespace/namespace.go b/vendor/sigs.k8s.io/kustomize/api/filters/namespace/namespace.go new file mode 100644 index 000000000..9279abb9d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/namespace/namespace.go @@ -0,0 +1,217 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package namespace + +import ( + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/filters/fsslice" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type Filter struct { + // Namespace is the namespace to apply to the inputs + Namespace string `yaml:"namespace,omitempty"` + + // FsSlice contains the FieldSpecs to locate the namespace field + FsSlice types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` + + // UnsetOnly means only blank namespace fields will be set + UnsetOnly bool `json:"unsetOnly" yaml:"unsetOnly"` + + // SetRoleBindingSubjects determines which subject fields in RoleBinding and ClusterRoleBinding + // objects will have their namespace fields set. Overrides field specs provided for these types, if any. + // - defaultOnly (default): namespace will be set only on subjects named "default". + // - allServiceAccounts: namespace will be set on all subjects with "kind: ServiceAccount" + // - none: all subjects will be skipped. + SetRoleBindingSubjects RoleBindingSubjectMode `json:"setRoleBindingSubjects" yaml:"setRoleBindingSubjects"` + + trackableSetter filtersutil.TrackableSetter +} + +type RoleBindingSubjectMode string + +const ( + DefaultSubjectsOnly RoleBindingSubjectMode = "defaultOnly" + SubjectModeUnspecified RoleBindingSubjectMode = "" + AllServiceAccountSubjects RoleBindingSubjectMode = "allServiceAccounts" + NoSubjects RoleBindingSubjectMode = "none" +) + +var _ kio.Filter = Filter{} +var _ kio.TrackableFilter = &Filter{} + +// WithMutationTracker registers a callback which will be invoked each time a field is mutated +func (ns *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { + ns.trackableSetter.WithMutationTracker(callback) +} + +func (ns Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + return kio.FilterAll(yaml.FilterFunc(ns.run)).Filter(nodes) +} + +// Run runs the filter on a single node rather than a slice +func (ns Filter) run(node *yaml.RNode) (*yaml.RNode, error) { + // Special handling for metadata.namespace and metadata.name -- :( + // never let SetEntry handle metadata.namespace--it will incorrectly include cluster-scoped resources + // only update metadata.name if api version is expected one--so-as it leaves other resources of kind namespace alone + apiVersion := node.GetApiVersion() + ns.FsSlice = ns.removeUnneededMetaFieldSpecs(apiVersion, ns.FsSlice) + gvk := resid.GvkFromNode(node) + if err := ns.metaNamespaceHack(node, gvk); err != nil { + return nil, err + } + + // Special handling for (cluster) role binding subjects -- :( + if isRoleBinding(gvk.Kind) { + ns.FsSlice = ns.removeRoleBindingSubjectFieldSpecs(ns.FsSlice) + if err := ns.roleBindingHack(node); err != nil { + return nil, err + } + } + + // transformations based on data -- :) + err := node.PipeE(fsslice.Filter{ + FsSlice: ns.FsSlice, + SetValue: ns.fieldSetter(), + CreateKind: yaml.ScalarNode, // Namespace is a ScalarNode + CreateTag: yaml.NodeTagString, + }) + invalidKindErr := &yaml.InvalidNodeKindError{} + if err != nil && errors.As(err, &invalidKindErr) && invalidKindErr.ActualNodeKind() != yaml.ScalarNode { + return nil, errors.WrapPrefixf(err, "namespace field specs must target scalar nodes") + } + return node, errors.WrapPrefixf(err, "namespace transformation failed") +} + +// metaNamespaceHack is a hack for implementing the namespace transform +// for the metadata.namespace field on namespace scoped resources. +func (ns Filter) metaNamespaceHack(obj *yaml.RNode, gvk resid.Gvk) error { + if gvk.IsClusterScoped() { + return nil + } + f := fsslice.Filter{ + FsSlice: []types.FieldSpec{ + {Path: types.MetadataNamespacePath, CreateIfNotPresent: true}, + }, + SetValue: ns.fieldSetter(), + CreateKind: yaml.ScalarNode, // Namespace is a ScalarNode + } + _, err := f.Filter(obj) + return err +} + +// roleBindingHack is a hack for implementing the transformer's SetRoleBindingSubjects option +// for RoleBinding and ClusterRoleBinding resource types. +// +// In NoSubjects mode, it does nothing. +// +// In AllServiceAccountSubjects mode, it sets the namespace on subjects with "kind: ServiceAccount". +// +// In DefaultSubjectsOnly mode (default mode), RoleBinding and ClusterRoleBinding have namespace set on +// elements of the "subjects" field if and only if the subject elements +// "name" is "default". Otherwise the namespace is not set. +// Example: +// +// kind: RoleBinding +// subjects: +// - name: "default" # this will have the namespace set +// ... +// - name: "something-else" # this will not have the namespace set +// ... +func (ns Filter) roleBindingHack(obj *yaml.RNode) error { + var visitor filtersutil.SetFn + switch ns.SetRoleBindingSubjects { + case NoSubjects: + return nil + case DefaultSubjectsOnly, SubjectModeUnspecified: + visitor = ns.setSubjectsNamedDefault + case AllServiceAccountSubjects: + visitor = ns.setServiceAccountNamespaces + default: + return errors.Errorf("invalid value %q for setRoleBindingSubjects: "+ + "must be one of %q, %q or %q", ns.SetRoleBindingSubjects, + DefaultSubjectsOnly, NoSubjects, AllServiceAccountSubjects) + } + + // Lookup the subjects field on all elements. + obj, err := obj.Pipe(yaml.Lookup(subjectsField)) + if err != nil || yaml.IsMissingOrNull(obj) { + return err + } + // Use the appropriate visitor to set the namespace field on the correct subset of subjects + return errors.WrapPrefixf(obj.VisitElements(visitor), "setting namespace on (cluster)role binding subjects") +} + +func isRoleBinding(kind string) bool { + return kind == roleBindingKind || kind == clusterRoleBindingKind +} + +func (ns Filter) setServiceAccountNamespaces(o *yaml.RNode) error { + name, err := o.Pipe(yaml.Lookup("kind"), yaml.Match("ServiceAccount")) + if err != nil || yaml.IsMissingOrNull(name) { + return errors.WrapPrefixf(err, "looking up kind on (cluster)role binding subject") + } + return setNamespaceField(o, ns.fieldSetter()) +} + +func (ns Filter) setSubjectsNamedDefault(o *yaml.RNode) error { + name, err := o.Pipe(yaml.Lookup("name"), yaml.Match("default")) + if err != nil || yaml.IsMissingOrNull(name) { + return errors.WrapPrefixf(err, "looking up name on (cluster)role binding subject") + } + return setNamespaceField(o, ns.fieldSetter()) +} + +func setNamespaceField(node *yaml.RNode, setter filtersutil.SetFn) error { + node, err := node.Pipe(yaml.LookupCreate(yaml.ScalarNode, "namespace")) + if err != nil { + return errors.WrapPrefixf(err, "setting namespace field on (cluster)role binding subject") + } + return setter(node) +} + +// removeRoleBindingSubjectFieldSpecs removes from the list fieldspecs that +// have hardcoded implementations +func (ns Filter) removeRoleBindingSubjectFieldSpecs(fs types.FsSlice) types.FsSlice { + var val types.FsSlice + for i := range fs { + if isRoleBinding(fs[i].Kind) && fs[i].Path == subjectsNamespacePath { + continue + } + val = append(val, fs[i]) + } + return val +} + +func (ns Filter) removeUnneededMetaFieldSpecs(apiVersion string, fs types.FsSlice) types.FsSlice { + var val types.FsSlice + for i := range fs { + if fs[i].Path == types.MetadataNamespacePath { + continue + } + if apiVersion != types.MetadataNamespaceApiVersion && fs[i].Path == types.MetadataNamePath { + continue + } + val = append(val, fs[i]) + } + return val +} + +func (ns *Filter) fieldSetter() filtersutil.SetFn { + if ns.UnsetOnly { + return ns.trackableSetter.SetEntryIfEmpty("", ns.Namespace, yaml.NodeTagString) + } + return ns.trackableSetter.SetEntry("", ns.Namespace, yaml.NodeTagString) +} + +const ( + subjectsField = "subjects" + subjectsNamespacePath = "subjects/namespace" + roleBindingKind = "RoleBinding" + clusterRoleBindingKind = "ClusterRoleBinding" +) diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/doc.go new file mode 100644 index 000000000..ec4cfa821 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/doc.go @@ -0,0 +1,6 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package namespace contains a kio.Filter implementation of the kustomize +// patchjson6902 transformer +package patchjson6902 diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go b/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go new file mode 100644 index 000000000..5749d6ddf --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go @@ -0,0 +1,65 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package patchjson6902 + +import ( + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" + k8syaml "sigs.k8s.io/yaml" +) + +type Filter struct { + Patch string + + decodedPatch jsonpatch.Patch +} + +var _ kio.Filter = Filter{} + +func (pf Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + decodedPatch, err := pf.decodePatch() + if err != nil { + return nil, err + } + pf.decodedPatch = decodedPatch + return kio.FilterAll(yaml.FilterFunc(pf.run)).Filter(nodes) +} + +func (pf Filter) decodePatch() (jsonpatch.Patch, error) { + patch := pf.Patch + // If the patch doesn't look like a JSON6902 patch, we + // try to parse it to json. + if !strings.HasPrefix(pf.Patch, "[") { + p, err := k8syaml.YAMLToJSON([]byte(patch)) + if err != nil { + return nil, err + } + patch = string(p) + } + decodedPatch, err := jsonpatch.DecodePatch([]byte(patch)) + if err != nil { + return nil, err + } + return decodedPatch, nil +} + +func (pf Filter) run(node *yaml.RNode) (*yaml.RNode, error) { + // We don't actually use the kyaml library for manipulating the + // yaml here. We just marshal it to json and rely on the + // jsonpatch library to take care of applying the patch. + // This means ordering might not be preserved with this filter. + b, err := node.MarshalJSON() + if err != nil { + return nil, err + } + res, err := pf.decodedPatch.Apply(b) + if err != nil { + return nil, err + } + err = node.UnmarshalJSON(res) + return node, err +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/doc.go new file mode 100644 index 000000000..1733fd8a2 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/doc.go @@ -0,0 +1,6 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package patchstrategicmerge contains a kio.Filter implementation of the +// kustomize strategic merge patch transformer. +package patchstrategicmerge diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/patchstrategicmerge.go b/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/patchstrategicmerge.go new file mode 100644 index 000000000..1a70d19aa --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/patchstrategicmerge.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package patchstrategicmerge + +import ( + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/merge2" +) + +type Filter struct { + Patch *yaml.RNode +} + +var _ kio.Filter = Filter{} + +// Filter does a strategic merge patch, which can delete nodes. +func (pf Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + var result []*yaml.RNode + for i := range nodes { + r, err := merge2.Merge( + pf.Patch, nodes[i], + yaml.MergeOptions{ + ListIncreaseDirection: yaml.MergeOptionsListPrepend, + }, + ) + if err != nil { + return nil, err + } + if r != nil { + result = append(result, r) + } + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/prefix/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/prefix/doc.go new file mode 100644 index 000000000..95236859f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/prefix/doc.go @@ -0,0 +1,6 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package prefix contains a kio.Filter implementation of the kustomize +// PrefixTransformer. +package prefix diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/prefix/prefix.go b/vendor/sigs.k8s.io/kustomize/api/filters/prefix/prefix.go new file mode 100644 index 000000000..daa375d1f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/prefix/prefix.go @@ -0,0 +1,50 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package prefix + +import ( + "fmt" + + "sigs.k8s.io/kustomize/api/filters/fieldspec" + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filter applies resource name prefix's using the fieldSpecs +type Filter struct { + Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` + + FieldSpec types.FieldSpec `json:"fieldSpec,omitempty" yaml:"fieldSpec,omitempty"` + + trackableSetter filtersutil.TrackableSetter +} + +var _ kio.Filter = Filter{} +var _ kio.TrackableFilter = &Filter{} + +// WithMutationTracker registers a callback which will be invoked each time a field is mutated +func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { + f.trackableSetter.WithMutationTracker(callback) +} + +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + return kio.FilterAll(yaml.FilterFunc(f.run)).Filter(nodes) +} + +func (f Filter) run(node *yaml.RNode) (*yaml.RNode, error) { + err := node.PipeE(fieldspec.Filter{ + FieldSpec: f.FieldSpec, + SetValue: f.evaluateField, + CreateKind: yaml.ScalarNode, // Name is a ScalarNode + CreateTag: yaml.NodeTagString, + }) + return node, err +} + +func (f Filter) evaluateField(node *yaml.RNode) error { + return f.trackableSetter.SetScalar(fmt.Sprintf( + "%s%s", f.Prefix, node.YNode().Value))(node) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/refvar/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/refvar/doc.go new file mode 100644 index 000000000..e30719871 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/refvar/doc.go @@ -0,0 +1,6 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package refvar contains a kio.Filter implementation of the kustomize +// refvar transformer (find and replace $(FOO) style variables in strings). +package refvar diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/refvar/expand.go b/vendor/sigs.k8s.io/kustomize/api/filters/refvar/expand.go new file mode 100644 index 000000000..3bcbd7a53 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/refvar/expand.go @@ -0,0 +1,147 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package refvar + +import ( + "fmt" + "log" + "strings" +) + +const ( + operator = '$' + referenceOpener = '(' + referenceCloser = ')' +) + +// syntaxWrap returns the input string wrapped by the expansion syntax. +func syntaxWrap(input string) string { + var sb strings.Builder + sb.WriteByte(operator) + sb.WriteByte(referenceOpener) + sb.WriteString(input) + sb.WriteByte(referenceCloser) + return sb.String() +} + +// MappingFunc maps a string to anything. +type MappingFunc func(string) interface{} + +// MakePrimitiveReplacer returns a MappingFunc that uses a map to do +// replacements, and a histogram to count map hits. +// +// Func behavior: +// +// If the input key is NOT found in the map, the key is wrapped up as +// as a variable declaration string and returned, e.g. key FOO becomes $(FOO). +// This string is presumably put back where it was found, and might get replaced +// later. +// +// If the key is found in the map, the value is returned if it is a primitive +// type (string, bool, number), and the hit is counted. +// +// If it's not a primitive type (e.g. a map, struct, func, etc.) then this +// function doesn't know what to do with it and it returns the key wrapped up +// again as if it had not been replaced. This should probably be an error. +func MakePrimitiveReplacer( + counts map[string]int, someMap map[string]interface{}) MappingFunc { + return func(key string) interface{} { + if value, ok := someMap[key]; ok { + switch typedV := value.(type) { + case string, int, int32, int64, float32, float64, bool: + counts[key]++ + return typedV + default: + // If the value is some complicated type (e.g. a map or struct), + // this function doesn't know how to jam it into a string, + // so just pretend it was a cache miss. + // Likely this should be an error instead of a silent failure, + // since the programmer passed an impossible value. + log.Printf( + "MakePrimitiveReplacer: bad replacement type=%T val=%v", + typedV, typedV) + return syntaxWrap(key) + } + } + // If unable to return the mapped variable, return it + // as it was found, and a later mapping might be able to + // replace it. + return syntaxWrap(key) + } +} + +// DoReplacements replaces variable references in the input string +// using the mapping function. +func DoReplacements(input string, mapping MappingFunc) interface{} { + var buf strings.Builder + checkpoint := 0 + for cursor := 0; cursor < len(input); cursor++ { + if input[cursor] == operator && cursor+1 < len(input) { + // Copy the portion of the input string since the last + // checkpoint into the buffer + buf.WriteString(input[checkpoint:cursor]) + + // Attempt to read the variable name as defined by the + // syntax from the input string + read, isVar, advance := tryReadVariableName(input[cursor+1:]) + + if isVar { + // We were able to read a variable name correctly; + // apply the mapping to the variable name and copy the + // bytes into the buffer + mapped := mapping(read) + if input == syntaxWrap(read) { + // Preserve the type of variable + return mapped + } + + // Variable is used in a middle of a string + buf.WriteString(fmt.Sprintf("%v", mapped)) + } else { + // Not a variable name; copy the read bytes into the buffer + buf.WriteString(read) + } + + // Advance the cursor in the input string to account for + // bytes consumed to read the variable name expression + cursor += advance + + // Advance the checkpoint in the input string + checkpoint = cursor + 1 + } + } + + // Return the buffer and any remaining unwritten bytes in the + // input string. + return buf.String() + input[checkpoint:] +} + +// tryReadVariableName attempts to read a variable name from the input +// string and returns the content read from the input, whether that content +// represents a variable name to perform mapping on, and the number of bytes +// consumed in the input string. +// +// The input string is assumed not to contain the initial operator. +func tryReadVariableName(input string) (string, bool, int) { + switch input[0] { + case operator: + // Escaped operator; return it. + return input[0:1], false, 1 + case referenceOpener: + // Scan to expression closer + for i := 1; i < len(input); i++ { + if input[i] == referenceCloser { + return input[1:i], true, i + 1 + } + } + + // Incomplete reference; return it. + return string(operator) + string(referenceOpener), false, 1 + default: + // Not the beginning of an expression, ie, an operator + // that doesn't begin an expression. Return the operator + // and the first rune in the string. + return string(operator) + string(input[0]), false, 1 + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/refvar/refvar.go b/vendor/sigs.k8s.io/kustomize/api/filters/refvar/refvar.go new file mode 100644 index 000000000..e00afafd7 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/refvar/refvar.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package refvar + +import ( + "fmt" + "strconv" + + "sigs.k8s.io/kustomize/api/filters/fieldspec" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filter updates $(VAR) style variables with values. +// The fieldSpecs are the places to look for occurrences of $(VAR). +type Filter struct { + MappingFunc MappingFunc `json:"mappingFunc,omitempty" yaml:"mappingFunc,omitempty"` + FieldSpec types.FieldSpec `json:"fieldSpec,omitempty" yaml:"fieldSpec,omitempty"` +} + +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + return kio.FilterAll(yaml.FilterFunc(f.run)).Filter(nodes) +} + +func (f Filter) run(node *yaml.RNode) (*yaml.RNode, error) { + err := node.PipeE(fieldspec.Filter{ + FieldSpec: f.FieldSpec, + SetValue: f.set, + }) + return node, err +} + +func (f Filter) set(node *yaml.RNode) error { + if yaml.IsMissingOrNull(node) { + return nil + } + switch node.YNode().Kind { + case yaml.ScalarNode: + return f.setScalar(node) + case yaml.MappingNode: + return f.setMap(node) + case yaml.SequenceNode: + return f.setSeq(node) + default: + return fmt.Errorf("invalid type encountered %v", node.YNode().Kind) + } +} + +func updateNodeValue(node *yaml.Node, newValue interface{}) { + switch newValue := newValue.(type) { + case int: + node.Value = strconv.FormatInt(int64(newValue), 10) + node.Tag = yaml.NodeTagInt + case int32: + node.Value = strconv.FormatInt(int64(newValue), 10) + node.Tag = yaml.NodeTagInt + case int64: + node.Value = strconv.FormatInt(newValue, 10) + node.Tag = yaml.NodeTagInt + case bool: + node.SetString(strconv.FormatBool(newValue)) + node.Tag = yaml.NodeTagBool + case float32: + node.SetString(strconv.FormatFloat(float64(newValue), 'f', -1, 32)) + node.Tag = yaml.NodeTagFloat + case float64: + node.SetString(strconv.FormatFloat(newValue, 'f', -1, 64)) + node.Tag = yaml.NodeTagFloat + default: + node.SetString(newValue.(string)) + node.Tag = yaml.NodeTagString + } + node.Style = 0 +} + +func (f Filter) setScalar(node *yaml.RNode) error { + if !yaml.IsYNodeString(node.YNode()) { + return nil + } + v := DoReplacements(node.YNode().Value, f.MappingFunc) + updateNodeValue(node.YNode(), v) + return nil +} + +func (f Filter) setMap(node *yaml.RNode) error { + contents := node.YNode().Content + for i := 0; i < len(contents); i += 2 { + if !yaml.IsYNodeString(contents[i]) { + return fmt.Errorf( + "invalid map key: value='%s', tag='%s'", + contents[i].Value, contents[i].Tag) + } + if !yaml.IsYNodeString(contents[i+1]) { + continue + } + newValue := DoReplacements(contents[i+1].Value, f.MappingFunc) + updateNodeValue(contents[i+1], newValue) + } + return nil +} + +func (f Filter) setSeq(node *yaml.RNode) error { + for _, item := range node.YNode().Content { + if !yaml.IsYNodeString(item) { + return fmt.Errorf("invalid value type expect a string") + } + newValue := DoReplacements(item.Value, f.MappingFunc) + updateNodeValue(item, newValue) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/replacement/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/replacement/doc.go new file mode 100644 index 000000000..667c92893 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/replacement/doc.go @@ -0,0 +1,7 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package replacement contains a kio.Filter implementation of the kustomize +// replacement transformer (accepts sources and looks for targets to replace +// their values with values from the sources). +package replacement diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go b/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go new file mode 100644 index 000000000..d4fea260b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go @@ -0,0 +1,244 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package replacement + +import ( + "fmt" + "strings" + + "sigs.k8s.io/kustomize/api/internal/utils" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/resid" + kyaml_utils "sigs.k8s.io/kustomize/kyaml/utils" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type Filter struct { + Replacements []types.Replacement `json:"replacements,omitempty" yaml:"replacements,omitempty"` +} + +// Filter replaces values of targets with values from sources +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + for i, r := range f.Replacements { + if r.Source == nil || r.Targets == nil { + return nil, fmt.Errorf("replacements must specify a source and at least one target") + } + value, err := getReplacement(nodes, &f.Replacements[i]) + if err != nil { + return nil, err + } + nodes, err = applyReplacement(nodes, value, r.Targets) + if err != nil { + return nil, err + } + } + return nodes, nil +} + +func getReplacement(nodes []*yaml.RNode, r *types.Replacement) (*yaml.RNode, error) { + source, err := selectSourceNode(nodes, r.Source) + if err != nil { + return nil, err + } + + if r.Source.FieldPath == "" { + r.Source.FieldPath = types.DefaultReplacementFieldPath + } + fieldPath := kyaml_utils.SmarterPathSplitter(r.Source.FieldPath, ".") + + rn, err := source.Pipe(yaml.Lookup(fieldPath...)) + if err != nil { + return nil, fmt.Errorf("error looking up replacement source: %w", err) + } + if rn.IsNilOrEmpty() { + return nil, fmt.Errorf("fieldPath `%s` is missing for replacement source %s", r.Source.FieldPath, r.Source.ResId) + } + + return getRefinedValue(r.Source.Options, rn) +} + +// selectSourceNode finds the node that matches the selector, returning +// an error if multiple or none are found +func selectSourceNode(nodes []*yaml.RNode, selector *types.SourceSelector) (*yaml.RNode, error) { + var matches []*yaml.RNode + for _, n := range nodes { + ids, err := utils.MakeResIds(n) + if err != nil { + return nil, fmt.Errorf("error getting node IDs: %w", err) + } + for _, id := range ids { + if id.IsSelectedBy(selector.ResId) { + if len(matches) > 0 { + return nil, fmt.Errorf( + "multiple matches for selector %s", selector) + } + matches = append(matches, n) + break + } + } + } + if len(matches) == 0 { + return nil, fmt.Errorf("nothing selected by %s", selector) + } + return matches[0], nil +} + +func getRefinedValue(options *types.FieldOptions, rn *yaml.RNode) (*yaml.RNode, error) { + if options == nil || options.Delimiter == "" { + return rn, nil + } + if rn.YNode().Kind != yaml.ScalarNode { + return nil, fmt.Errorf("delimiter option can only be used with scalar nodes") + } + value := strings.Split(yaml.GetValue(rn), options.Delimiter) + if options.Index >= len(value) || options.Index < 0 { + return nil, fmt.Errorf("options.index %d is out of bounds for value %s", options.Index, yaml.GetValue(rn)) + } + n := rn.Copy() + n.YNode().Value = value[options.Index] + return n, nil +} + +func applyReplacement(nodes []*yaml.RNode, value *yaml.RNode, targetSelectors []*types.TargetSelector) ([]*yaml.RNode, error) { + for _, selector := range targetSelectors { + if selector.Select == nil { + return nil, errors.Errorf("target must specify resources to select") + } + if len(selector.FieldPaths) == 0 { + selector.FieldPaths = []string{types.DefaultReplacementFieldPath} + } + for _, possibleTarget := range nodes { + ids, err := utils.MakeResIds(possibleTarget) + if err != nil { + return nil, err + } + + // filter targets by label and annotation selectors + selectByAnnoAndLabel, err := selectByAnnoAndLabel(possibleTarget, selector) + if err != nil { + return nil, err + } + if !selectByAnnoAndLabel { + continue + } + + // filter targets by matching resource IDs + for i, id := range ids { + if id.IsSelectedBy(selector.Select.ResId) && !rejectId(selector.Reject, &ids[i]) { + err := copyValueToTarget(possibleTarget, value, selector) + if err != nil { + return nil, err + } + break + } + } + } + } + return nodes, nil +} + +func selectByAnnoAndLabel(n *yaml.RNode, t *types.TargetSelector) (bool, error) { + if matchesSelect, err := matchesAnnoAndLabelSelector(n, t.Select); !matchesSelect || err != nil { + return false, err + } + for _, reject := range t.Reject { + if reject.AnnotationSelector == "" && reject.LabelSelector == "" { + continue + } + if m, err := matchesAnnoAndLabelSelector(n, reject); m || err != nil { + return false, err + } + } + return true, nil +} + +func matchesAnnoAndLabelSelector(n *yaml.RNode, selector *types.Selector) (bool, error) { + r := resource.Resource{RNode: *n} + annoMatch, err := r.MatchesAnnotationSelector(selector.AnnotationSelector) + if err != nil { + return false, err + } + labelMatch, err := r.MatchesLabelSelector(selector.LabelSelector) + if err != nil { + return false, err + } + return annoMatch && labelMatch, nil +} + +func rejectId(rejects []*types.Selector, id *resid.ResId) bool { + for _, r := range rejects { + if !r.ResId.IsEmpty() && id.IsSelectedBy(r.ResId) { + return true + } + } + return false +} + +func copyValueToTarget(target *yaml.RNode, value *yaml.RNode, selector *types.TargetSelector) error { + for _, fp := range selector.FieldPaths { + createKind := yaml.Kind(0) // do not create + if selector.Options != nil && selector.Options.Create { + createKind = value.YNode().Kind + } + targetFieldList, err := target.Pipe(&yaml.PathMatcher{ + Path: kyaml_utils.SmarterPathSplitter(fp, "."), + Create: createKind}) + if err != nil { + return errors.WrapPrefixf(err, fieldRetrievalError(fp, createKind != 0)) + } + targetFields, err := targetFieldList.Elements() + if err != nil { + return errors.WrapPrefixf(err, fieldRetrievalError(fp, createKind != 0)) + } + if len(targetFields) == 0 { + return errors.Errorf(fieldRetrievalError(fp, createKind != 0)) + } + + for _, t := range targetFields { + if err := setFieldValue(selector.Options, t, value); err != nil { + return err + } + } + } + return nil +} + +func fieldRetrievalError(fieldPath string, isCreate bool) string { + if isCreate { + return fmt.Sprintf("unable to find or create field %q in replacement target", fieldPath) + } + return fmt.Sprintf("unable to find field %q in replacement target", fieldPath) +} + +func setFieldValue(options *types.FieldOptions, targetField *yaml.RNode, value *yaml.RNode) error { + value = value.Copy() + if options != nil && options.Delimiter != "" { + if targetField.YNode().Kind != yaml.ScalarNode { + return fmt.Errorf("delimiter option can only be used with scalar nodes") + } + tv := strings.Split(targetField.YNode().Value, options.Delimiter) + v := yaml.GetValue(value) + // TODO: Add a way to remove an element + switch { + case options.Index < 0: // prefix + tv = append([]string{v}, tv...) + case options.Index >= len(tv): // suffix + tv = append(tv, v) + default: // replace an element + tv[options.Index] = v + } + value.YNode().Value = strings.Join(tv, options.Delimiter) + } + + if targetField.YNode().Kind == yaml.ScalarNode { + // For scalar, only copy the value (leave any type intact to auto-convert int->string or string->int) + targetField.YNode().Value = value.YNode().Value + } else { + targetField.SetYNode(value.YNode()) + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/doc.go new file mode 100644 index 000000000..a22d13034 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/doc.go @@ -0,0 +1,6 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package replicacount contains a kio.Filter implementation of the kustomize +// ReplicaCountTransformer. +package replicacount diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/replicacount.go b/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/replicacount.go new file mode 100644 index 000000000..ea5351f9b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/replicacount.go @@ -0,0 +1,48 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package replicacount + +import ( + "strconv" + + "sigs.k8s.io/kustomize/api/filters/fieldspec" + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filter updates/sets replicas fields using the fieldSpecs +type Filter struct { + Replica types.Replica `json:"replica,omitempty" yaml:"replica,omitempty"` + FieldSpec types.FieldSpec `json:"fieldSpec,omitempty" yaml:"fieldSpec,omitempty"` + + trackableSetter filtersutil.TrackableSetter +} + +var _ kio.Filter = Filter{} +var _ kio.TrackableFilter = &Filter{} + +// WithMutationTracker registers a callback which will be invoked each time a field is mutated +func (rc *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { + rc.trackableSetter.WithMutationTracker(callback) +} + +func (rc Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + return kio.FilterAll(yaml.FilterFunc(rc.run)).Filter(nodes) +} + +func (rc Filter) run(node *yaml.RNode) (*yaml.RNode, error) { + err := node.PipeE(fieldspec.Filter{ + FieldSpec: rc.FieldSpec, + SetValue: rc.set, + CreateKind: yaml.ScalarNode, // replicas is a ScalarNode + CreateTag: yaml.NodeTagInt, + }) + return node, err +} + +func (rc Filter) set(node *yaml.RNode) error { + return rc.trackableSetter.SetEntry("", strconv.FormatInt(rc.Replica.Count, 10), yaml.NodeTagInt)(node) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/suffix/doc.go b/vendor/sigs.k8s.io/kustomize/api/filters/suffix/doc.go new file mode 100644 index 000000000..18be62dfd --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/suffix/doc.go @@ -0,0 +1,6 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package suffix contains a kio.Filter implementation of the kustomize +// SuffixTransformer. +package suffix diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/suffix/suffix.go b/vendor/sigs.k8s.io/kustomize/api/filters/suffix/suffix.go new file mode 100644 index 000000000..babc257be --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/suffix/suffix.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package suffix + +import ( + "fmt" + + "sigs.k8s.io/kustomize/api/filters/fieldspec" + "sigs.k8s.io/kustomize/api/filters/filtersutil" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filter applies resource name suffix's using the fieldSpecs +type Filter struct { + Suffix string `json:"suffix,omitempty" yaml:"suffix,omitempty"` + + FieldSpec types.FieldSpec `json:"fieldSpec,omitempty" yaml:"fieldSpec,omitempty"` + + trackableSetter filtersutil.TrackableSetter +} + +var _ kio.Filter = Filter{} +var _ kio.TrackableFilter = &Filter{} + +// WithMutationTracker registers a callback which will be invoked each time a field is mutated +func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { + f.trackableSetter.WithMutationTracker(callback) +} + +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + return kio.FilterAll(yaml.FilterFunc(f.run)).Filter(nodes) +} + +func (f Filter) run(node *yaml.RNode) (*yaml.RNode, error) { + err := node.PipeE(fieldspec.Filter{ + FieldSpec: f.FieldSpec, + SetValue: f.evaluateField, + CreateKind: yaml.ScalarNode, // Name is a ScalarNode + CreateTag: yaml.NodeTagString, + }) + return node, err +} + +func (f Filter) evaluateField(node *yaml.RNode) error { + return f.trackableSetter.SetScalar(fmt.Sprintf( + "%s%s", node.YNode().Value, f.Suffix))(node) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/filters/valueadd/valueadd.go b/vendor/sigs.k8s.io/kustomize/api/filters/valueadd/valueadd.go new file mode 100644 index 000000000..f8e6b2f82 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/filters/valueadd/valueadd.go @@ -0,0 +1,134 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package valueadd + +import ( + "strings" + + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// An 'Add' operation aspiring to IETF RFC 6902 JSON. +// +// The filter tries to add a value to a node at a particular field path. +// +// Kinds of target fields: +// +// - Non-existent target field. +// +// The field will be added and the value inserted. +// +// - Existing field, scalar or map. +// +// E.g. 'spec/template/spec/containers/[name:nginx]/image' +// +// This behaves like an IETF RFC 6902 Replace operation would; +// the existing value is replaced without complaint, even though +// this is an Add operation. In contrast, a Replace operation +// must fail (report an error) if the field doesn't exist. +// +// - Existing field, list (array) +// Not supported yet. +// TODO: Honor fields with RFC-6902-style array indices +// TODO: like 'spec/template/spec/containers/2' +// TODO: Modify kyaml/yaml/PathGetter to allow this. +// The value will be inserted into the array at the given position, +// shifting other contents. To instead replace an array entry, use +// an implementation of an IETF RFC 6902 Replace operation. +// +// For the common case of a filepath in the field value, and a desire +// to add the value to the filepath (rather than replace the filepath), +// use a non-zero value of FilePathPosition (see below). +type Filter struct { + // Value is the value to add. + // + // Empty values are disallowed, i.e. this filter isn't intended + // for use in erasing or removing fields. For that, use a filter + // more aligned with the IETF RFC 6902 JSON Remove operation. + // + // At the time of writing, Value's value should be a simple string, + // not a JSON document. This particular filter focuses on easing + // injection of a single-sourced cloud project and/or cluster name + // into various fields, especially namespace and various filepath + // specifications. + Value string + + // FieldPath is a JSON-style path to the field intended to hold the value. + FieldPath string + + // FilePathPosition is a filepath field index. + // + // Call the value of this field _i_. + // + // If _i_ is zero, negative or unspecified, this field has no effect. + // + // If _i_ is > 0, then it's assumed that + // - 'Value' is a string that can work as a directory or file name, + // - the field value intended for replacement holds a filepath. + // + // The filepath is split into a string slice, the value is inserted + // at position [i-1], shifting the rest of the path to the right. + // A value of i==1 puts the new value at the start of the path. + // This change never converts an absolute path to a relative path, + // meaning adding a new field at position i==1 will preserve a + // leading slash. E.g. if Value == 'PEACH' + // + // OLD : NEW : FilePathPosition + // -------------------------------------------------------- + // {empty} : PEACH : irrelevant + // / : /PEACH : irrelevant + // pie : PEACH/pie : 1 (or less to prefix) + // /pie : /PEACH/pie : 1 (or less to prefix) + // raw : raw/PEACH : 2 (or more to postfix) + // /raw : /raw/PEACH : 2 (or more to postfix) + // a/nice/warm/pie : a/nice/warm/PEACH/pie : 4 + // /a/nice/warm/pie : /a/nice/warm/PEACH/pie : 4 + // + // For robustness (liberal input, conservative output) FilePathPosition + // values that that are too large to index the split filepath result in a + // postfix rather than an error. So use 1 to prefix, 9999 to postfix. + FilePathPosition int `json:"filePathPosition,omitempty" yaml:"filePathPosition,omitempty"` +} + +var _ kio.Filter = Filter{} + +func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + _, err := kio.FilterAll(yaml.FilterFunc( + func(node *yaml.RNode) (*yaml.RNode, error) { + var fields []string + // if there is forward slash '/' in the field name, a back slash '\' + // will be used to escape it. + for _, f := range strings.Split(f.FieldPath, "/") { + if len(fields) > 0 && strings.HasSuffix(fields[len(fields)-1], "\\") { + concatField := strings.TrimSuffix(fields[len(fields)-1], "\\") + "/" + f + fields = append(fields[:len(fields)-1], concatField) + } else { + fields = append(fields, f) + } + } + // TODO: support SequenceNode. + // Presumably here one could look for array indices (digits) at + // the end of the field path (as described in IETF RFC 6902 JSON), + // and if found, take it as a signal that this should be a + // SequenceNode instead of a ScalarNode, and insert the value + // into the proper slot, shifting every over. + n, err := node.Pipe(yaml.LookupCreate(yaml.ScalarNode, fields...)) + if err != nil { + return node, err + } + // TODO: allow more kinds + if err := yaml.ErrorIfInvalid(n, yaml.ScalarNode); err != nil { + return nil, err + } + newValue := f.Value + if f.FilePathPosition > 0 { + newValue = filesys.InsertPathPart( + n.YNode().Value, f.FilePathPosition-1, newValue) + } + return n.Pipe(yaml.FieldSetter{StringValue: newValue}) + })).Filter(nodes) + return nodes, err +} diff --git a/vendor/sigs.k8s.io/kustomize/api/hasher/hasher.go b/vendor/sigs.k8s.io/kustomize/api/hasher/hasher.go new file mode 100644 index 000000000..aef436d91 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/hasher/hasher.go @@ -0,0 +1,155 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package hasher + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "sort" + + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// SortArrayAndComputeHash sorts a string array and +// returns a hash for it +func SortArrayAndComputeHash(s []string) (string, error) { + sort.Strings(s) + data, err := json.Marshal(s) + if err != nil { + return "", err + } + return encode(hex256(string(data))) +} + +// Copied from https://github.com/kubernetes/kubernetes +// /blob/master/pkg/kubectl/util/hash/hash.go +func encode(hex string) (string, error) { + if len(hex) < 10 { + return "", fmt.Errorf( + "input length must be at least 10") + } + enc := []rune(hex[:10]) + for i := range enc { + switch enc[i] { + case '0': + enc[i] = 'g' + case '1': + enc[i] = 'h' + case '3': + enc[i] = 'k' + case 'a': + enc[i] = 'm' + case 'e': + enc[i] = 't' + } + } + return string(enc), nil +} + +// hex256 returns the hex form of the sha256 of the argument. +func hex256(data string) string { + return fmt.Sprintf("%x", sha256.Sum256([]byte(data))) +} + +// Hasher computes the hash of an RNode. +type Hasher struct{} + +// Hash returns a hash of the argument. +func (h *Hasher) Hash(node *yaml.RNode) (r string, err error) { + var encoded string + switch node.GetKind() { + case "ConfigMap": + encoded, err = encodeConfigMap(node) + case "Secret": + encoded, err = encodeSecret(node) + default: + var encodedBytes []byte + encodedBytes, err = json.Marshal(node.YNode()) + encoded = string(encodedBytes) + } + if err != nil { + return "", err + } + return encode(hex256(encoded)) +} + +func getNodeValues( + node *yaml.RNode, paths []string) (map[string]interface{}, error) { + values := make(map[string]interface{}) + for _, p := range paths { + vn, err := node.Pipe(yaml.Lookup(p)) + if err != nil { + return map[string]interface{}{}, err + } + if vn == nil { + values[p] = "" + continue + } + if vn.YNode().Kind != yaml.ScalarNode { + vs, err := vn.MarshalJSON() + if err != nil { + return map[string]interface{}{}, err + } + // data, binaryData and stringData are all maps + var v map[string]interface{} + json.Unmarshal(vs, &v) + values[p] = v + } else { + values[p] = vn.YNode().Value + } + } + return values, nil +} + +// encodeConfigMap encodes a ConfigMap. +// Data, Kind, and Name are taken into account. +// BinaryData is included if it's not empty to avoid useless key in output. +func encodeConfigMap(node *yaml.RNode) (string, error) { + // get fields + paths := []string{"metadata/name", "data", "binaryData"} + values, err := getNodeValues(node, paths) + if err != nil { + return "", err + } + m := map[string]interface{}{ + "kind": "ConfigMap", + "name": values["metadata/name"], + "data": values["data"], + } + if _, ok := values["binaryData"].(map[string]interface{}); ok { + m["binaryData"] = values["binaryData"] + } + + // json.Marshal sorts the keys in a stable order in the encoding + data, err := json.Marshal(m) + if err != nil { + return "", err + } + return string(data), nil +} + +// encodeSecret encodes a Secret. +// Data, Kind, Name, and Type are taken into account. +// StringData is included if it's not empty to avoid useless key in output. +func encodeSecret(node *yaml.RNode) (string, error) { + // get fields + paths := []string{"type", "metadata/name", "data", "stringData"} + values, err := getNodeValues(node, paths) + if err != nil { + return "", err + } + m := map[string]interface{}{"kind": "Secret", "type": values["type"], + "name": values["metadata/name"], "data": values["data"]} + if _, ok := values["stringData"].(map[string]interface{}); ok { + m["stringData"] = values["stringData"] + } + + // json.Marshal sorts the keys in a stable order in the encoding + data, err := json.Marshal(m) + if err != nil { + return "", err + } + return string(data), nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go b/vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go new file mode 100644 index 000000000..4cd62dd59 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go @@ -0,0 +1,56 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package ifc holds miscellaneous interfaces used by kustomize. +package ifc + +import ( + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Validator provides functions to validate annotations and labels +type Validator interface { + MakeAnnotationValidator() func(map[string]string) error + MakeAnnotationNameValidator() func([]string) error + MakeLabelValidator() func(map[string]string) error + MakeLabelNameValidator() func([]string) error + ValidateNamespace(string) []string + ErrIfInvalidKey(string) error + IsEnvVarName(k string) error +} + +// KvLoader reads and validates KV pairs. +type KvLoader interface { + Validator() Validator + Load(args types.KvPairSources) (all []types.Pair, err error) +} + +// Loader interface exposes methods to read bytes. +type Loader interface { + + // Repo returns the repo location if this Loader was created from a url + // or the empty string otherwise. + Repo() string + + // Root returns the root location for this Loader. + Root() string + + // New returns Loader located at newRoot. + New(newRoot string) (Loader, error) + + // Load returns the bytes read from the location or an error. + Load(location string) ([]byte, error) + + // Cleanup cleans the loader + Cleanup() error +} + +// KustHasher returns a hash of the argument +// or an error. +type KustHasher interface { + Hash(*yaml.RNode) (string, error) +} + +// See core.v1.SecretTypeOpaque +const SecretTypeOpaque = "Opaque" diff --git a/vendor/sigs.k8s.io/kustomize/api/image/image.go b/vendor/sigs.k8s.io/kustomize/api/image/image.go new file mode 100644 index 000000000..4a88050b4 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/image/image.go @@ -0,0 +1,66 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package image + +import ( + "regexp" + "strings" +) + +// IsImageMatched returns true if the value of t is identical to the +// image name in the full image name and tag as given by s. +func IsImageMatched(s, t string) bool { + // Tag values are limited to [a-zA-Z0-9_.{}-]. + // Some tools like Bazel rules_k8s allow tag patterns with {} characters. + // More info: https://github.com/bazelbuild/rules_k8s/pull/423 + pattern, _ := regexp.Compile("^" + t + "(:[a-zA-Z0-9_.{}-]*)?(@sha256:[a-zA-Z0-9_.{}-]*)?$") + return pattern.MatchString(s) +} + +// Split separates and returns the name and tag parts +// from the image string using either colon `:` or at `@` separators. +// image reference pattern: [[host[:port]/]component/]component[:tag][@digest] +func Split(imageName string) (name string, tag string, digest string) { + // check if image name contains a domain + // if domain is present, ignore domain and check for `:` + searchName := imageName + slashIndex := strings.Index(imageName, "/") + if slashIndex > 0 { + searchName = imageName[slashIndex:] + } else { + slashIndex = 0 + } + + id := strings.Index(searchName, "@") + ic := strings.Index(searchName, ":") + + // no tag or digest + if ic < 0 && id < 0 { + return imageName, "", "" + } + + // digest only + if id >= 0 && (id < ic || ic < 0) { + id += slashIndex + name = imageName[:id] + digest = strings.TrimPrefix(imageName[id:], "@") + return name, "", digest + } + + // tag and digest + if id >= 0 && ic >= 0 { + id += slashIndex + ic += slashIndex + name = imageName[:ic] + tag = strings.TrimPrefix(imageName[ic:id], ":") + digest = strings.TrimPrefix(imageName[id:], "@") + return name, tag, digest + } + + // tag only + ic += slashIndex + name = imageName[:ic] + tag = strings.TrimPrefix(imageName[ic:], ":") + return name, tag, "" +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go new file mode 100644 index 000000000..afe6da2e7 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go @@ -0,0 +1,198 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package accumulator + +import ( + "encoding/json" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/yaml" +) + +// OpenAPIDefinition describes single type. +// Normally these definitions are auto-generated using gen-openapi. +// Same as in k8s.io / kube-openapi / pkg / common. +type OpenAPIDefinition struct { + Schema spec.Schema + Dependencies []string +} + +type myProperties = map[string]spec.Schema +type nameToApiMap map[string]OpenAPIDefinition + +// LoadConfigFromCRDs parse CRD schemas from paths into a TransformerConfig +func LoadConfigFromCRDs( + ldr ifc.Loader, paths []string) (*builtinconfig.TransformerConfig, error) { + tc := builtinconfig.MakeEmptyConfig() + for _, path := range paths { + content, err := ldr.Load(path) + if err != nil { + return nil, err + } + m, err := makeNameToApiMap(content) + if err != nil { + return nil, errors.WrapPrefixf(err, "unable to parse open API definition from '%s'", path) + } + otherTc, err := makeConfigFromApiMap(m) + if err != nil { + return nil, err + } + tc, err = tc.Merge(otherTc) + if err != nil { + return nil, err + } + } + return tc, nil +} + +func makeNameToApiMap(content []byte) (result nameToApiMap, err error) { + if content[0] == '{' { + err = json.Unmarshal(content, &result) + } else { + err = yaml.Unmarshal(content, &result) + } + return +} + +func makeConfigFromApiMap(m nameToApiMap) (*builtinconfig.TransformerConfig, error) { + result := builtinconfig.MakeEmptyConfig() + for name, api := range m { + if !looksLikeAk8sType(api.Schema.SchemaProps.Properties) { + continue + } + tc := builtinconfig.MakeEmptyConfig() + err := loadCrdIntoConfig( + tc, makeGvkFromTypeName(name), m, name, []string{}) + if err != nil { + return result, err + } + result, err = result.Merge(tc) + if err != nil { + return result, err + } + } + return result, nil +} + +// TODO: Get Group and Version for CRD from the +// openAPI definition once +// "x-kubernetes-group-version-kind" is available in CRD +func makeGvkFromTypeName(n string) resid.Gvk { + names := strings.Split(n, filesys.SelfDir) + kind := names[len(names)-1] + return resid.Gvk{Kind: kind} +} + +func looksLikeAk8sType(properties myProperties) bool { + _, ok := properties["kind"] + if !ok { + return false + } + _, ok = properties["apiVersion"] + if !ok { + return false + } + _, ok = properties["metadata"] + return ok +} + +const ( + // "x-kubernetes-annotation": "" + xAnnotation = "x-kubernetes-annotation" + + // "x-kubernetes-label-selector": "" + xLabelSelector = "x-kubernetes-label-selector" + + // "x-kubernetes-identity": "" + xIdentity = "x-kubernetes-identity" + + // "x-kubernetes-object-ref-api-version": + xVersion = "x-kubernetes-object-ref-api-version" + + // "x-kubernetes-object-ref-kind": + xKind = "x-kubernetes-object-ref-kind" + + // "x-kubernetes-object-ref-name-key": "name" + // default is "name" + xNameKey = "x-kubernetes-object-ref-name-key" +) + +// loadCrdIntoConfig loads a CRD spec into a TransformerConfig +func loadCrdIntoConfig( + theConfig *builtinconfig.TransformerConfig, theGvk resid.Gvk, theMap nameToApiMap, + typeName string, path []string) (err error) { + api, ok := theMap[typeName] + if !ok { + return nil + } + for propName, property := range api.Schema.SchemaProps.Properties { + _, annotate := property.Extensions.GetString(xAnnotation) + if annotate { + err = theConfig.AddAnnotationFieldSpec( + makeFs(theGvk, append(path, propName))) + if err != nil { + return + } + } + _, label := property.Extensions.GetString(xLabelSelector) + if label { + err = theConfig.AddLabelFieldSpec( + makeFs(theGvk, append(path, propName))) + if err != nil { + return + } + } + _, identity := property.Extensions.GetString(xIdentity) + if identity { + err = theConfig.AddPrefixFieldSpec( + makeFs(theGvk, append(path, propName))) + if err != nil { + return + } + } + version, ok := property.Extensions.GetString(xVersion) + if ok { + kind, ok := property.Extensions.GetString(xKind) + if ok { + nameKey, ok := property.Extensions.GetString(xNameKey) + if !ok { + nameKey = "name" + } + err = theConfig.AddNamereferenceFieldSpec( + builtinconfig.NameBackReferences{ + Gvk: resid.Gvk{Kind: kind, Version: version}, + Referrers: []types.FieldSpec{ + makeFs(theGvk, append(path, propName, nameKey))}, + }) + if err != nil { + return + } + } + } + if property.Ref.GetURL() != nil { + err = loadCrdIntoConfig( + theConfig, theGvk, theMap, + property.Ref.String(), append(path, propName)) + if err != nil { + return + } + } + } + return nil +} + +func makeFs(in resid.Gvk, path []string) types.FieldSpec { + return types.FieldSpec{ + CreateIfNotPresent: false, + Gvk: in, + Path: strings.Join(path, "/"), + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/namereferencetransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/namereferencetransformer.go new file mode 100644 index 000000000..f8f12b4f2 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/namereferencetransformer.go @@ -0,0 +1,164 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package accumulator + +import ( + "fmt" + "log" + + "sigs.k8s.io/kustomize/api/filters/nameref" + "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/kyaml/resid" +) + +type nameReferenceTransformer struct { + backRefs []builtinconfig.NameBackReferences +} + +const doDebug = false + +var _ resmap.Transformer = &nameReferenceTransformer{} + +type filterMap map[*resource.Resource][]nameref.Filter + +// newNameReferenceTransformer constructs a nameReferenceTransformer +// with a given slice of NameBackReferences. +func newNameReferenceTransformer( + br []builtinconfig.NameBackReferences) resmap.Transformer { + if br == nil { + log.Fatal("backrefs not expected to be nil") + } + return &nameReferenceTransformer{backRefs: br} +} + +// Transform updates name references in resource A that +// refer to resource B, given that B's name may have +// changed. +// +// For example, a HorizontalPodAutoscaler (HPA) +// necessarily refers to a Deployment, the thing that +// an HPA scales. In this case: +// +// - the HPA instance is the Referrer, +// - the Deployment instance is the ReferralTarget. +// +// If the Deployment's name changes, e.g. a prefix is added, +// then the HPA's reference to the Deployment must be fixed. +// +func (t *nameReferenceTransformer) Transform(m resmap.ResMap) error { + fMap := t.determineFilters(m.Resources()) + debug(fMap) + for r, fList := range fMap { + c, err := m.SubsetThatCouldBeReferencedByResource(r) + if err != nil { + return err + } + for _, f := range fList { + f.Referrer = r + f.ReferralCandidates = c + if err := f.Referrer.ApplyFilter(f); err != nil { + return err + } + } + } + return nil +} + +func debug(fMap filterMap) { + if !doDebug { + return + } + fmt.Printf("filterMap has %d entries:\n", len(fMap)) + rCount := 0 + for r, fList := range fMap { + yml, _ := r.AsYAML() + rCount++ + fmt.Printf(` +---- %3d. possible referrer ------------- +%s +---------`, rCount, string(yml), + ) + for i, f := range fList { + fmt.Printf(` +%3d/%3d update: %s + from: %s +`, rCount, i+1, f.NameFieldToUpdate.Path, f.ReferralTarget, + ) + } + } +} + +// Produce a map from referrer resources that might need to be fixed +// to filters that might fix them. The keys to this map are potential +// referrers, so won't include resources like ConfigMap or Secret. +// +// In the inner loop over the resources below, say we +// encounter an HPA instance. Then, in scanning the set +// of all known backrefs, we encounter an entry like +// +// - kind: Deployment +// fieldSpecs: +// - kind: HorizontalPodAutoscaler +// path: spec/scaleTargetRef/name +// +// This entry says that an HPA, via its +// 'spec/scaleTargetRef/name' field, may refer to a +// Deployment. +// +// This means that a filter will need to hunt for the right Deployment, +// obtain it's new name, and write that name into the HPA's +// 'spec/scaleTargetRef/name' field. Return a filter that can do that. +func (t *nameReferenceTransformer) determineFilters( + resources []*resource.Resource) (fMap filterMap) { + // We cache the resource OrgId values because they don't change and otherwise are very visible in a memory pprof + resourceOrgIds := make([]resid.ResId, len(resources)) + for i, resource := range resources { + resourceOrgIds[i] = resource.OrgId() + } + + fMap = make(filterMap) + for _, backReference := range t.backRefs { + for _, referrerSpec := range backReference.Referrers { + for i, res := range resources { + if resourceOrgIds[i].IsSelected(&referrerSpec.Gvk) { + // If this is true, the res might be a referrer, and if + // so, the name reference it holds might need an update. + if resHasField(res, referrerSpec.Path) { + // Optimization - the referrer has the field + // that might need updating. + fMap[res] = append(fMap[res], nameref.Filter{ + // Name field to write in the Referrer. + // If the path specified here isn't found in + // the Referrer, nothing happens (no error, + // no field creation). + NameFieldToUpdate: referrerSpec, + // Specification of object class to read from. + // Always read from metadata/name field. + ReferralTarget: backReference.Gvk, + }) + } + } + } + } + } + return fMap +} + +// TODO: check res for field existence here to avoid extra work. +// res.GetFieldValue, which uses yaml.Lookup under the hood, doesn't know +// how to parse fieldspec-style paths that make no distinction +// between maps and sequences. This means it cannot lookup commonly +// used "indeterminate" paths like +// spec/containers/env/valueFrom/configMapKeyRef/name +// ('containers' is a list, not a map). +// However, the fieldspec filter does know how to handle this; +// extract that code and call it here? +func resHasField(res *resource.Resource, path string) bool { + return true + // fld := strings.Join(utils.PathSplitter(path), ".") + // _, e := res.GetFieldValue(fld) + // return e == nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/refvartransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/refvartransformer.go new file mode 100644 index 000000000..a02edc4fb --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/refvartransformer.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package accumulator + +import ( + "sigs.k8s.io/kustomize/api/filters/refvar" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" +) + +type refVarTransformer struct { + varMap map[string]interface{} + replacementCounts map[string]int + fieldSpecs []types.FieldSpec +} + +// newRefVarTransformer returns a new refVarTransformer +// that replaces $(VAR) style variables with values. +// The fieldSpecs are the places to look for occurrences of $(VAR). +func newRefVarTransformer( + varMap map[string]interface{}, fs []types.FieldSpec) *refVarTransformer { + return &refVarTransformer{ + varMap: varMap, + fieldSpecs: fs, + } +} + +// UnusedVars returns slice of Var names that were unused +// after a Transform run. +func (rv *refVarTransformer) UnusedVars() []string { + var unused []string + for k := range rv.varMap { + if _, ok := rv.replacementCounts[k]; !ok { + unused = append(unused, k) + } + } + return unused +} + +// Transform replaces $(VAR) style variables with values. +func (rv *refVarTransformer) Transform(m resmap.ResMap) error { + rv.replacementCounts = make(map[string]int) + mf := refvar.MakePrimitiveReplacer(rv.replacementCounts, rv.varMap) + for _, res := range m.Resources() { + for _, fieldSpec := range rv.fieldSpecs { + err := res.ApplyFilter(refvar.Filter{ + MappingFunc: mf, + FieldSpec: fieldSpec, + }) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go new file mode 100644 index 000000000..0f4008c97 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go @@ -0,0 +1,190 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package accumulator + +import ( + "fmt" + "log" + "strings" + + "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/resid" +) + +// ResAccumulator accumulates resources and the rules +// used to customize those resources. It's a ResMap +// plus stuff needed to modify the ResMap. +type ResAccumulator struct { + resMap resmap.ResMap + tConfig *builtinconfig.TransformerConfig + varSet types.VarSet +} + +func MakeEmptyAccumulator() *ResAccumulator { + ra := &ResAccumulator{} + ra.resMap = resmap.New() + ra.tConfig = &builtinconfig.TransformerConfig{} + ra.varSet = types.NewVarSet() + return ra +} + +// ResMap returns a copy of the internal resMap. +func (ra *ResAccumulator) ResMap() resmap.ResMap { + return ra.resMap.ShallowCopy() +} + +// Vars returns a copy of underlying vars. +func (ra *ResAccumulator) Vars() []types.Var { + return ra.varSet.AsSlice() +} + +func (ra *ResAccumulator) AppendAll(resources resmap.ResMap) error { + return ra.resMap.AppendAll(resources) +} + +func (ra *ResAccumulator) AbsorbAll(resources resmap.ResMap) error { + return ra.resMap.AbsorbAll(resources) +} + +func (ra *ResAccumulator) MergeConfig( + tConfig *builtinconfig.TransformerConfig) (err error) { + ra.tConfig, err = ra.tConfig.Merge(tConfig) + return err +} + +func (ra *ResAccumulator) GetTransformerConfig() *builtinconfig.TransformerConfig { + return ra.tConfig +} + +// MergeVars accumulates vars into ResAccumulator. +// A Var is a tuple of name, object reference and field reference. +// This func takes a list of vars from the current kustomization file and +// annotates the accumulated resources with the names of the vars that match +// those resources. E.g. if there's a var named "sam" that wants to get +// its data from a ConfigMap named "james", and the resource list contains a +// ConfigMap named "james", then that ConfigMap will be annotated with the +// var name "sam". Later this annotation is used to find the data for "sam" +// by digging into a particular fieldpath of "james". +func (ra *ResAccumulator) MergeVars(incoming []types.Var) error { + for _, v := range incoming { + targetId := resid.NewResIdWithNamespace(v.ObjRef.GVK(), v.ObjRef.Name, v.ObjRef.Namespace) + idMatcher := targetId.GvknEquals + if targetId.Namespace != "" || targetId.IsClusterScoped() { + // Preserve backward compatibility. An empty namespace means + // wildcard search on the namespace hence we still use GvknEquals + idMatcher = targetId.Equals + } + matched := ra.resMap.GetMatchingResourcesByAnyId(idMatcher) + if len(matched) > 1 { + return fmt.Errorf( + "found %d resId matches for var %s "+ + "(unable to disambiguate)", + len(matched), v) + } + if len(matched) == 1 { + matched[0].AppendRefVarName(v) + } + } + return ra.varSet.MergeSlice(incoming) +} + +func (ra *ResAccumulator) MergeAccumulator(other *ResAccumulator) (err error) { + err = ra.AppendAll(other.resMap) + if err != nil { + return err + } + err = ra.MergeConfig(other.tConfig) + if err != nil { + return err + } + return ra.varSet.MergeSet(other.varSet) +} + +func (ra *ResAccumulator) findVarValueFromResources(v types.Var) (interface{}, error) { + for _, res := range ra.resMap.Resources() { + for _, varName := range res.GetRefVarNames() { + if varName == v.Name { + s, err := res.GetFieldValue(v.FieldRef.FieldPath) + if err != nil { + return "", fmt.Errorf( + "field specified in var '%v' "+ + "not found in corresponding resource", v) + } + return s, nil + } + } + } + return "", fmt.Errorf( + "var '%v' cannot be mapped to a field "+ + "in the set of known resources", v) +} + +// makeVarReplacementMap returns a map of Var names to +// their final values. The values are strings intended +// for substitution wherever the $(var.Name) occurs. +func (ra *ResAccumulator) makeVarReplacementMap() (map[string]interface{}, error) { + result := map[string]interface{}{} + for _, v := range ra.Vars() { + s, err := ra.findVarValueFromResources(v) + if err != nil { + return nil, err + } + result[v.Name] = s + } + return result, nil +} + +func (ra *ResAccumulator) Transform(t resmap.Transformer) error { + return t.Transform(ra.resMap) +} + +func (ra *ResAccumulator) ResolveVars() error { + replacementMap, err := ra.makeVarReplacementMap() + if err != nil { + return err + } + if len(replacementMap) == 0 { + return nil + } + t := newRefVarTransformer( + replacementMap, ra.tConfig.VarReference) + err = ra.Transform(t) + if len(t.UnusedVars()) > 0 { + log.Printf( + "well-defined vars that were never replaced: %s\n", + strings.Join(t.UnusedVars(), ",")) + } + return err +} + +func (ra *ResAccumulator) FixBackReferences() (err error) { + if ra.tConfig.NameReference == nil { + return nil + } + return ra.Transform( + newNameReferenceTransformer(ra.tConfig.NameReference)) +} + +// Intersection drops the resources which "other" does not have. +func (ra *ResAccumulator) Intersection(other resmap.ResMap) error { + otherIds := other.AllIds() + for _, curId := range ra.resMap.AllIds() { + toDelete := true + for _, otherId := range otherIds { + if otherId == curId { + toDelete = false + break + } + } + if toDelete { + err := ra.resMap.Remove(curId) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/AnnotationsTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/AnnotationsTransformer.go new file mode 100644 index 000000000..0910c472b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/AnnotationsTransformer.go @@ -0,0 +1,38 @@ +// Code generated by pluginator on AnnotationsTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "sigs.k8s.io/kustomize/api/filters/annotations" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +// Add the given annotations to the given field specifications. +type AnnotationsTransformerPlugin struct { + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` +} + +func (p *AnnotationsTransformerPlugin) Config( + _ *resmap.PluginHelpers, c []byte) (err error) { + p.Annotations = nil + p.FieldSpecs = nil + return yaml.Unmarshal(c, p) +} + +func (p *AnnotationsTransformerPlugin) Transform(m resmap.ResMap) error { + if len(p.Annotations) == 0 { + return nil + } + return m.ApplyFilter(annotations.Filter{ + Annotations: p.Annotations, + FsSlice: p.FieldSpecs, + }) +} + +func NewAnnotationsTransformerPlugin() resmap.TransformerPlugin { + return &AnnotationsTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ConfigMapGenerator.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ConfigMapGenerator.go new file mode 100644 index 000000000..dc18bce85 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ConfigMapGenerator.go @@ -0,0 +1,39 @@ +// Code generated by pluginator on ConfigMapGenerator; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "sigs.k8s.io/kustomize/api/kv" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +type ConfigMapGeneratorPlugin struct { + h *resmap.PluginHelpers + types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + types.ConfigMapArgs +} + +func (p *ConfigMapGeneratorPlugin) Config(h *resmap.PluginHelpers, config []byte) (err error) { + p.ConfigMapArgs = types.ConfigMapArgs{} + err = yaml.Unmarshal(config, p) + if p.ConfigMapArgs.Name == "" { + p.ConfigMapArgs.Name = p.Name + } + if p.ConfigMapArgs.Namespace == "" { + p.ConfigMapArgs.Namespace = p.Namespace + } + p.h = h + return +} + +func (p *ConfigMapGeneratorPlugin) Generate() (resmap.ResMap, error) { + return p.h.ResmapFactory().FromConfigMapArgs( + kv.NewLoader(p.h.Loader(), p.h.Validator()), p.ConfigMapArgs) +} + +func NewConfigMapGeneratorPlugin() resmap.GeneratorPlugin { + return &ConfigMapGeneratorPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HashTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HashTransformer.go new file mode 100644 index 000000000..ec2331265 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HashTransformer.go @@ -0,0 +1,40 @@ +// Code generated by pluginator on HashTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "fmt" + + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/resmap" +) + +type HashTransformerPlugin struct { + hasher ifc.KustHasher +} + +func (p *HashTransformerPlugin) Config( + h *resmap.PluginHelpers, _ []byte) (err error) { + p.hasher = h.ResmapFactory().RF().Hasher() + return nil +} + +// Transform appends hash to generated resources. +func (p *HashTransformerPlugin) Transform(m resmap.ResMap) error { + for _, res := range m.Resources() { + if res.NeedHashSuffix() { + h, err := res.Hash(p.hasher) + if err != nil { + return err + } + res.StorePreviousId() + res.SetName(fmt.Sprintf("%s-%s", res.GetName(), h)) + } + } + return nil +} + +func NewHashTransformerPlugin() resmap.TransformerPlugin { + return &HashTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go new file mode 100644 index 000000000..113f56ea7 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go @@ -0,0 +1,329 @@ +// Code generated by pluginator on HelmChartInflationGenerator; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/imdario/mergo" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/yaml" +) + +// Generate resources from a remote or local helm chart. +type HelmChartInflationGeneratorPlugin struct { + h *resmap.PluginHelpers + types.HelmGlobals + types.HelmChart + tmpDir string +} + +const ( + valuesMergeOptionMerge = "merge" + valuesMergeOptionOverride = "override" + valuesMergeOptionReplace = "replace" +) + +var legalMergeOptions = []string{ + valuesMergeOptionMerge, + valuesMergeOptionOverride, + valuesMergeOptionReplace, +} + +// Config uses the input plugin configurations `config` to setup the generator +// options +func (p *HelmChartInflationGeneratorPlugin) Config( + h *resmap.PluginHelpers, config []byte) (err error) { + if h.GeneralConfig() == nil { + return fmt.Errorf("unable to access general config") + } + if !h.GeneralConfig().HelmConfig.Enabled { + return fmt.Errorf("must specify --enable-helm") + } + if h.GeneralConfig().HelmConfig.Command == "" { + return fmt.Errorf("must specify --helm-command") + } + p.h = h + if err = yaml.Unmarshal(config, p); err != nil { + return + } + return p.validateArgs() +} + +// This uses the real file system since tmpDir may be used +// by the helm subprocess. Cannot use a chroot jail or fake +// filesystem since we allow the user to use previously +// downloaded charts. This is safe since this plugin is +// owned by kustomize. +func (p *HelmChartInflationGeneratorPlugin) establishTmpDir() (err error) { + if p.tmpDir != "" { + // already done. + return nil + } + p.tmpDir, err = os.MkdirTemp("", "kustomize-helm-") + return err +} + +func (p *HelmChartInflationGeneratorPlugin) validateArgs() (err error) { + if p.Name == "" { + return fmt.Errorf("chart name cannot be empty") + } + + // ChartHome might be consulted by the plugin (to read + // values files below it), so it must be located under + // the loader root (unless root restrictions are + // disabled, in which case this can be an absolute path). + if p.ChartHome == "" { + p.ChartHome = types.HelmDefaultHome + } + + // The ValuesFile(s) may be consulted by the plugin, so it must + // be under the loader root (unless root restrictions are + // disabled). + if p.ValuesFile == "" { + p.ValuesFile = filepath.Join(p.ChartHome, p.Name, "values.yaml") + } + for i, file := range p.AdditionalValuesFiles { + // use Load() to enforce root restrictions + if _, err := p.h.Loader().Load(file); err != nil { + return errors.WrapPrefixf(err, "could not load additionalValuesFile") + } + // the additional values filepaths must be relative to the kust root + p.AdditionalValuesFiles[i] = filepath.Join(p.h.Loader().Root(), file) + } + + if err = p.errIfIllegalValuesMerge(); err != nil { + return err + } + + // ConfigHome is not loaded by the plugin, and can be located anywhere. + if p.ConfigHome == "" { + if err = p.establishTmpDir(); err != nil { + return errors.WrapPrefixf( + err, "unable to create tmp dir for HELM_CONFIG_HOME") + } + p.ConfigHome = filepath.Join(p.tmpDir, "helm") + } + return nil +} + +func (p *HelmChartInflationGeneratorPlugin) errIfIllegalValuesMerge() error { + if p.ValuesMerge == "" { + // Use the default. + p.ValuesMerge = valuesMergeOptionOverride + return nil + } + for _, opt := range legalMergeOptions { + if p.ValuesMerge == opt { + return nil + } + } + return fmt.Errorf("valuesMerge must be one of %v", legalMergeOptions) +} + +func (p *HelmChartInflationGeneratorPlugin) absChartHome() string { + if filepath.IsAbs(p.ChartHome) { + return p.ChartHome + } + return filepath.Join(p.h.Loader().Root(), p.ChartHome) +} + +func (p *HelmChartInflationGeneratorPlugin) runHelmCommand( + args []string) ([]byte, error) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.Command(p.h.GeneralConfig().HelmConfig.Command, args...) + cmd.Stdout = stdout + cmd.Stderr = stderr + env := []string{ + fmt.Sprintf("HELM_CONFIG_HOME=%s", p.ConfigHome), + fmt.Sprintf("HELM_CACHE_HOME=%s/.cache", p.ConfigHome), + fmt.Sprintf("HELM_DATA_HOME=%s/.data", p.ConfigHome)} + cmd.Env = append(os.Environ(), env...) + err := cmd.Run() + if err != nil { + helm := p.h.GeneralConfig().HelmConfig.Command + err = errors.WrapPrefixf( + fmt.Errorf( + "unable to run: '%s %s' with env=%s (is '%s' installed?): %w", + helm, strings.Join(args, " "), env, helm, err), + stderr.String(), + ) + } + return stdout.Bytes(), err +} + +// createNewMergedValuesFile replaces/merges original values file with ValuesInline. +func (p *HelmChartInflationGeneratorPlugin) createNewMergedValuesFile() ( + path string, err error) { + if p.ValuesMerge == valuesMergeOptionMerge || + p.ValuesMerge == valuesMergeOptionOverride { + if err = p.replaceValuesInline(); err != nil { + return "", err + } + } + var b []byte + b, err = yaml.Marshal(p.ValuesInline) + if err != nil { + return "", err + } + return p.writeValuesBytes(b) +} + +func (p *HelmChartInflationGeneratorPlugin) replaceValuesInline() error { + pValues, err := p.h.Loader().Load(p.ValuesFile) + if err != nil { + return err + } + chValues := make(map[string]interface{}) + if err = yaml.Unmarshal(pValues, &chValues); err != nil { + return err + } + switch p.ValuesMerge { + case valuesMergeOptionOverride: + err = mergo.Merge( + &chValues, p.ValuesInline, mergo.WithOverride) + case valuesMergeOptionMerge: + err = mergo.Merge(&chValues, p.ValuesInline) + } + p.ValuesInline = chValues + return err +} + +// copyValuesFile to avoid branching. TODO: get rid of this. +func (p *HelmChartInflationGeneratorPlugin) copyValuesFile() (string, error) { + b, err := p.h.Loader().Load(p.ValuesFile) + if err != nil { + return "", err + } + return p.writeValuesBytes(b) +} + +// Write a absolute path file in the tmp file system. +func (p *HelmChartInflationGeneratorPlugin) writeValuesBytes( + b []byte) (string, error) { + if err := p.establishTmpDir(); err != nil { + return "", fmt.Errorf("cannot create tmp dir to write helm values") + } + path := filepath.Join(p.tmpDir, p.Name+"-kustomize-values.yaml") + return path, errors.WrapPrefixf(os.WriteFile(path, b, 0644), "failed to write values file") +} + +func (p *HelmChartInflationGeneratorPlugin) cleanup() { + if p.tmpDir != "" { + os.RemoveAll(p.tmpDir) + } +} + +// Generate implements generator +func (p *HelmChartInflationGeneratorPlugin) Generate() (rm resmap.ResMap, err error) { + defer p.cleanup() + if err = p.checkHelmVersion(); err != nil { + return nil, err + } + if path, exists := p.chartExistsLocally(); !exists { + if p.Repo == "" { + return nil, fmt.Errorf( + "no repo specified for pull, no chart found at '%s'", path) + } + if _, err := p.runHelmCommand(p.pullCommand()); err != nil { + return nil, err + } + } + if len(p.ValuesInline) > 0 { + p.ValuesFile, err = p.createNewMergedValuesFile() + } else { + p.ValuesFile, err = p.copyValuesFile() + } + if err != nil { + return nil, err + } + var stdout []byte + stdout, err = p.runHelmCommand(p.AsHelmArgs(p.absChartHome())) + if err != nil { + return nil, err + } + + rm, resMapErr := p.h.ResmapFactory().NewResMapFromBytes(stdout) + if resMapErr == nil { + return rm, nil + } + // try to remove the contents before first "---" because + // helm may produce messages to stdout before it + r := &kio.ByteReader{Reader: bytes.NewBufferString(string(stdout)), OmitReaderAnnotations: true} + nodes, err := r.Read() + if err != nil { + return nil, fmt.Errorf("error reading helm output: %w", err) + } + + if len(nodes) != 0 { + rm, err = p.h.ResmapFactory().NewResMapFromRNodeSlice(nodes) + if err != nil { + return nil, fmt.Errorf("could not parse rnode slice into resource map: %w", err) + } + return rm, nil + } + return nil, fmt.Errorf("could not parse bytes into resource map: %w", resMapErr) +} + +func (p *HelmChartInflationGeneratorPlugin) pullCommand() []string { + args := []string{ + "pull", + "--untar", + "--untardir", p.absChartHome(), + "--repo", p.Repo, + p.Name} + if p.Version != "" { + args = append(args, "--version", p.Version) + } + return args +} + +// chartExistsLocally will return true if the chart does exist in +// local chart home. +func (p *HelmChartInflationGeneratorPlugin) chartExistsLocally() (string, bool) { + path := filepath.Join(p.absChartHome(), p.Name) + s, err := os.Stat(path) + if err != nil { + return "", false + } + return path, s.IsDir() +} + +// checkHelmVersion will return an error if the helm version is not V3 +func (p *HelmChartInflationGeneratorPlugin) checkHelmVersion() error { + stdout, err := p.runHelmCommand([]string{"version", "-c", "--short"}) + if err != nil { + return err + } + r, err := regexp.Compile(`v?\d+(\.\d+)+`) + if err != nil { + return err + } + v := r.FindString(string(stdout)) + if v == "" { + return fmt.Errorf("cannot find version string in %s", string(stdout)) + } + if v[0] == 'v' { + v = v[1:] + } + majorVersion := strings.Split(v, ".")[0] + if majorVersion != "3" { + return fmt.Errorf("this plugin requires helm V3 but got v%s", v) + } + return nil +} + +func NewHelmChartInflationGeneratorPlugin() resmap.GeneratorPlugin { + return &HelmChartInflationGeneratorPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/IAMPolicyGenerator.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/IAMPolicyGenerator.go new file mode 100644 index 000000000..cfb1fa81b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/IAMPolicyGenerator.go @@ -0,0 +1,33 @@ +// Code generated by pluginator on IAMPolicyGenerator; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "sigs.k8s.io/kustomize/api/filters/iampolicygenerator" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +type IAMPolicyGeneratorPlugin struct { + types.IAMPolicyGeneratorArgs +} + +func (p *IAMPolicyGeneratorPlugin) Config(h *resmap.PluginHelpers, config []byte) (err error) { + p.IAMPolicyGeneratorArgs = types.IAMPolicyGeneratorArgs{} + err = yaml.Unmarshal(config, p) + return +} + +func (p *IAMPolicyGeneratorPlugin) Generate() (resmap.ResMap, error) { + r := resmap.New() + err := r.ApplyFilter(iampolicygenerator.Filter{ + IAMPolicyGenerator: p.IAMPolicyGeneratorArgs, + }) + return r, err +} + +func NewIAMPolicyGeneratorPlugin() resmap.GeneratorPlugin { + return &IAMPolicyGeneratorPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ImageTagTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ImageTagTransformer.go new file mode 100644 index 000000000..ffde73a7a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ImageTagTransformer.go @@ -0,0 +1,41 @@ +// Code generated by pluginator on ImageTagTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "sigs.k8s.io/kustomize/api/filters/imagetag" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +// Find matching image declarations and replace +// the name, tag and/or digest. +type ImageTagTransformerPlugin struct { + ImageTag types.Image `json:"imageTag,omitempty" yaml:"imageTag,omitempty"` + FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` +} + +func (p *ImageTagTransformerPlugin) Config( + _ *resmap.PluginHelpers, c []byte) (err error) { + p.ImageTag = types.Image{} + p.FieldSpecs = nil + return yaml.Unmarshal(c, p) +} + +func (p *ImageTagTransformerPlugin) Transform(m resmap.ResMap) error { + if err := m.ApplyFilter(imagetag.LegacyFilter{ + ImageTag: p.ImageTag, + }); err != nil { + return err + } + return m.ApplyFilter(imagetag.Filter{ + ImageTag: p.ImageTag, + FsSlice: p.FieldSpecs, + }) +} + +func NewImageTagTransformerPlugin() resmap.TransformerPlugin { + return &ImageTagTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LabelTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LabelTransformer.go new file mode 100644 index 000000000..c45731b54 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LabelTransformer.go @@ -0,0 +1,38 @@ +// Code generated by pluginator on LabelTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "sigs.k8s.io/kustomize/api/filters/labels" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +// Add the given labels to the given field specifications. +type LabelTransformerPlugin struct { + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` +} + +func (p *LabelTransformerPlugin) Config( + _ *resmap.PluginHelpers, c []byte) (err error) { + p.Labels = nil + p.FieldSpecs = nil + return yaml.Unmarshal(c, p) +} + +func (p *LabelTransformerPlugin) Transform(m resmap.ResMap) error { + if len(p.Labels) == 0 { + return nil + } + return m.ApplyFilter(labels.Filter{ + Labels: p.Labels, + FsSlice: p.FieldSpecs, + }) +} + +func NewLabelTransformerPlugin() resmap.TransformerPlugin { + return &LabelTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go new file mode 100644 index 000000000..30a88340f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go @@ -0,0 +1,74 @@ +// Code generated by pluginator on NamespaceTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "fmt" + + "sigs.k8s.io/kustomize/api/filters/namespace" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/yaml" +) + +// Change or set the namespace of non-cluster level resources. +type NamespaceTransformerPlugin struct { + types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` + UnsetOnly bool `json:"unsetOnly" yaml:"unsetOnly"` + SetRoleBindingSubjects namespace.RoleBindingSubjectMode `json:"setRoleBindingSubjects" yaml:"setRoleBindingSubjects"` +} + +func (p *NamespaceTransformerPlugin) Config( + _ *resmap.PluginHelpers, c []byte) (err error) { + p.Namespace = "" + p.FieldSpecs = nil + if err := yaml.Unmarshal(c, p); err != nil { + return errors.WrapPrefixf(err, "unmarshalling NamespaceTransformer config") + } + switch p.SetRoleBindingSubjects { + case namespace.AllServiceAccountSubjects, namespace.DefaultSubjectsOnly, namespace.NoSubjects: + // valid + case namespace.SubjectModeUnspecified: + p.SetRoleBindingSubjects = namespace.DefaultSubjectsOnly + default: + return errors.Errorf("invalid value %q for setRoleBindingSubjects: "+ + "must be one of %q, %q or %q", p.SetRoleBindingSubjects, + namespace.DefaultSubjectsOnly, namespace.NoSubjects, namespace.AllServiceAccountSubjects) + } + + return nil +} + +func (p *NamespaceTransformerPlugin) Transform(m resmap.ResMap) error { + if len(p.Namespace) == 0 { + return nil + } + for _, r := range m.Resources() { + if r.IsNilOrEmpty() { + // Don't mutate empty objects? + continue + } + r.StorePreviousId() + if err := r.ApplyFilter(namespace.Filter{ + Namespace: p.Namespace, + FsSlice: p.FieldSpecs, + SetRoleBindingSubjects: p.SetRoleBindingSubjects, + UnsetOnly: p.UnsetOnly, + }); err != nil { + return err + } + matches := m.GetMatchingResourcesByCurrentId(r.CurId().Equals) + if len(matches) != 1 { + return fmt.Errorf( + "namespace transformation produces ID conflict: %+v", matches) + } + } + return nil +} + +func NewNamespaceTransformerPlugin() resmap.TransformerPlugin { + return &NamespaceTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go new file mode 100644 index 000000000..5ee5353f4 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go @@ -0,0 +1,105 @@ +// Code generated by pluginator on PatchJson6902Transformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "sigs.k8s.io/kustomize/api/filters/patchjson6902" + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/yaml" +) + +type PatchJson6902TransformerPlugin struct { + ldr ifc.Loader + decodedPatch jsonpatch.Patch + Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` + Path string `json:"path,omitempty" yaml:"path,omitempty"` + JsonOp string `json:"jsonOp,omitempty" yaml:"jsonOp,omitempty"` +} + +func (p *PatchJson6902TransformerPlugin) Config( + h *resmap.PluginHelpers, c []byte) (err error) { + p.ldr = h.Loader() + err = yaml.Unmarshal(c, p) + if err != nil { + return err + } + if p.Target.Name == "" { + return fmt.Errorf("must specify the target name") + } + if p.Path == "" && p.JsonOp == "" { + return fmt.Errorf("empty file path and empty jsonOp") + } + if p.Path != "" { + if p.JsonOp != "" { + return fmt.Errorf("must specify a file path or jsonOp, not both") + } + rawOp, err := p.ldr.Load(p.Path) + if err != nil { + return err + } + p.JsonOp = string(rawOp) + if p.JsonOp == "" { + return fmt.Errorf("patch file '%s' empty seems to be empty", p.Path) + } + } + if p.JsonOp[0] != '[' { + // if it doesn't seem to be JSON, imagine + // it is YAML, and convert to JSON. + op, err := yaml.YAMLToJSON([]byte(p.JsonOp)) + if err != nil { + return err + } + p.JsonOp = string(op) + } + p.decodedPatch, err = jsonpatch.DecodePatch([]byte(p.JsonOp)) + if err != nil { + return errors.WrapPrefixf(err, "decoding %s", p.JsonOp) + } + if len(p.decodedPatch) == 0 { + return fmt.Errorf( + "patch appears to be empty; file=%s, JsonOp=%s", p.Path, p.JsonOp) + } + return err +} + +func (p *PatchJson6902TransformerPlugin) Transform(m resmap.ResMap) error { + if p.Target == nil { + return fmt.Errorf("must specify a target for patch %s", p.JsonOp) + } + resources, err := m.Select(*p.Target) + if err != nil { + return err + } + for _, res := range resources { + internalAnnotations := kioutil.GetInternalAnnotations(&res.RNode) + + err = res.ApplyFilter(patchjson6902.Filter{ + Patch: p.JsonOp, + }) + if err != nil { + return err + } + + annotations := res.GetAnnotations() + for key, value := range internalAnnotations { + annotations[key] = value + } + err = res.SetAnnotations(annotations) + if err != nil { + return err + } + } + return nil +} + +func NewPatchJson6902TransformerPlugin() resmap.TransformerPlugin { + return &PatchJson6902TransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchStrategicMergeTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchStrategicMergeTransformer.go new file mode 100644 index 000000000..d68f2425e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchStrategicMergeTransformer.go @@ -0,0 +1,89 @@ +// Code generated by pluginator on PatchStrategicMergeTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "fmt" + + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +type PatchStrategicMergeTransformerPlugin struct { + loadedPatches []*resource.Resource + Paths []types.PatchStrategicMerge `json:"paths,omitempty" yaml:"paths,omitempty"` + Patches string `json:"patches,omitempty" yaml:"patches,omitempty"` +} + +func (p *PatchStrategicMergeTransformerPlugin) Config( + h *resmap.PluginHelpers, c []byte) (err error) { + err = yaml.Unmarshal(c, p) + if err != nil { + return err + } + if len(p.Paths) == 0 && p.Patches == "" { + return fmt.Errorf("empty file path and empty patch content") + } + if len(p.Paths) != 0 { + patches, err := loadFromPaths(h, p.Paths) + if err != nil { + return err + } + p.loadedPatches = append(p.loadedPatches, patches...) + } + if p.Patches != "" { + patches, err := h.ResmapFactory().RF().SliceFromBytes([]byte(p.Patches)) + if err != nil { + return err + } + p.loadedPatches = append(p.loadedPatches, patches...) + } + if len(p.loadedPatches) == 0 { + return fmt.Errorf( + "patch appears to be empty; files=%v, Patch=%s", p.Paths, p.Patches) + } + return nil +} + +func loadFromPaths( + h *resmap.PluginHelpers, + paths []types.PatchStrategicMerge) ( + result []*resource.Resource, err error) { + var patches []*resource.Resource + for _, path := range paths { + // For legacy reasons, attempt to treat the path string as + // actual patch content. + patches, err = h.ResmapFactory().RF().SliceFromBytes([]byte(path)) + if err != nil { + // Failing that, treat it as a file path. + patches, err = h.ResmapFactory().RF().SliceFromPatches( + h.Loader(), []types.PatchStrategicMerge{path}) + if err != nil { + return + } + } + result = append(result, patches...) + } + return +} + +func (p *PatchStrategicMergeTransformerPlugin) Transform(m resmap.ResMap) error { + for _, patch := range p.loadedPatches { + target, err := m.GetById(patch.OrgId()) + if err != nil { + return err + } + if err = m.ApplySmPatch( + resource.MakeIdSet([]*resource.Resource{target}), patch); err != nil { + return err + } + } + return nil +} + +func NewPatchStrategicMergeTransformerPlugin() resmap.TransformerPlugin { + return &PatchStrategicMergeTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go new file mode 100644 index 000000000..45be1ef25 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go @@ -0,0 +1,153 @@ +// Code generated by pluginator on PatchTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "fmt" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "sigs.k8s.io/kustomize/api/filters/patchjson6902" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/yaml" +) + +type PatchTransformerPlugin struct { + loadedPatch *resource.Resource + decodedPatch jsonpatch.Patch + Path string `json:"path,omitempty" yaml:"path,omitempty"` + Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` + Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` + Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"` +} + +func (p *PatchTransformerPlugin) Config( + h *resmap.PluginHelpers, c []byte) error { + err := yaml.Unmarshal(c, p) + if err != nil { + return err + } + p.Patch = strings.TrimSpace(p.Patch) + if p.Patch == "" && p.Path == "" { + return fmt.Errorf( + "must specify one of patch and path in\n%s", string(c)) + } + if p.Patch != "" && p.Path != "" { + return fmt.Errorf( + "patch and path can't be set at the same time\n%s", string(c)) + } + if p.Path != "" { + loaded, loadErr := h.Loader().Load(p.Path) + if loadErr != nil { + return loadErr + } + p.Patch = string(loaded) + } + + patchSM, errSM := h.ResmapFactory().RF().FromBytes([]byte(p.Patch)) + patchJson, errJson := jsonPatchFromBytes([]byte(p.Patch)) + if (errSM == nil && errJson == nil) || + (patchSM != nil && patchJson != nil) { + return fmt.Errorf( + "illegally qualifies as both an SM and JSON patch: [%v]", + p.Patch) + } + if errSM != nil && errJson != nil { + return fmt.Errorf( + "unable to parse SM or JSON patch from [%v]", p.Patch) + } + if errSM == nil { + p.loadedPatch = patchSM + if p.Options["allowNameChange"] { + p.loadedPatch.AllowNameChange() + } + if p.Options["allowKindChange"] { + p.loadedPatch.AllowKindChange() + } + } else { + p.decodedPatch = patchJson + } + return nil +} + +func (p *PatchTransformerPlugin) Transform(m resmap.ResMap) error { + if p.loadedPatch == nil { + return p.transformJson6902(m, p.decodedPatch) + } + // The patch was a strategic merge patch + return p.transformStrategicMerge(m, p.loadedPatch) +} + +// transformStrategicMerge applies the provided strategic merge patch +// to all the resources in the ResMap that match either the Target or +// the identifier of the patch. +func (p *PatchTransformerPlugin) transformStrategicMerge(m resmap.ResMap, patch *resource.Resource) error { + if p.Target == nil { + target, err := m.GetById(patch.OrgId()) + if err != nil { + return err + } + return target.ApplySmPatch(patch) + } + selected, err := m.Select(*p.Target) + if err != nil { + return err + } + return m.ApplySmPatch(resource.MakeIdSet(selected), patch) +} + +// transformJson6902 applies the provided json6902 patch +// to all the resources in the ResMap that match the Target. +func (p *PatchTransformerPlugin) transformJson6902(m resmap.ResMap, patch jsonpatch.Patch) error { + if p.Target == nil { + return fmt.Errorf("must specify a target for patch %s", p.Patch) + } + resources, err := m.Select(*p.Target) + if err != nil { + return err + } + for _, res := range resources { + res.StorePreviousId() + internalAnnotations := kioutil.GetInternalAnnotations(&res.RNode) + err = res.ApplyFilter(patchjson6902.Filter{ + Patch: p.Patch, + }) + if err != nil { + return err + } + + annotations := res.GetAnnotations() + for key, value := range internalAnnotations { + annotations[key] = value + } + err = res.SetAnnotations(annotations) + } + return nil +} + +// jsonPatchFromBytes loads a Json 6902 patch from +// a bytes input +func jsonPatchFromBytes( + in []byte) (jsonpatch.Patch, error) { + ops := string(in) + if ops == "" { + return nil, fmt.Errorf("empty json patch operations") + } + + if ops[0] != '[' { + jsonOps, err := yaml.YAMLToJSON(in) + if err != nil { + return nil, err + } + ops = string(jsonOps) + } + return jsonpatch.DecodePatch([]byte(ops)) +} + +func NewPatchTransformerPlugin() resmap.TransformerPlugin { + return &PatchTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PrefixTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PrefixTransformer.go new file mode 100644 index 000000000..33700bb4b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PrefixTransformer.go @@ -0,0 +1,96 @@ +// Code generated by pluginator on PrefixTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "errors" + + "sigs.k8s.io/kustomize/api/filters/prefix" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Add the given prefix to the field +type PrefixTransformerPlugin struct { + Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` + FieldSpecs types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` +} + +// TODO: Make this gvk skip list part of the config. +var prefixFieldSpecsToSkip = types.FsSlice{ + {Gvk: resid.Gvk{Kind: "CustomResourceDefinition"}}, + {Gvk: resid.Gvk{Group: "apiregistration.k8s.io", Kind: "APIService"}}, + {Gvk: resid.Gvk{Kind: "Namespace"}}, +} + +func (p *PrefixTransformerPlugin) Config( + _ *resmap.PluginHelpers, c []byte) (err error) { + p.Prefix = "" + p.FieldSpecs = nil + err = yaml.Unmarshal(c, p) + if err != nil { + return + } + if p.FieldSpecs == nil { + return errors.New("fieldSpecs is not expected to be nil") + } + return +} + +func (p *PrefixTransformerPlugin) Transform(m resmap.ResMap) error { + // Even if the Prefix is empty we want to proceed with the + // transformation. This allows to add contextual information + // to the resources (AddNamePrefix). + for _, r := range m.Resources() { + // TODO: move this test into the filter (i.e. make a better filter) + if p.shouldSkip(r.OrgId()) { + continue + } + id := r.OrgId() + // current default configuration contains + // only one entry: "metadata/name" with no GVK + for _, fs := range p.FieldSpecs { + // TODO: this is redundant to filter (but needed for now) + if !id.IsSelected(&fs.Gvk) { + continue + } + // TODO: move this test into the filter. + if fs.Path == "metadata/name" { + // "metadata/name" is the only field. + // this will add a prefix to the resource + // even if it is empty + + r.AddNamePrefix(p.Prefix) + if p.Prefix != "" { + // TODO: There are multiple transformers that can change a resource's name, and each makes a call to + // StorePreviousID(). We should make it so that we only call StorePreviousID once per kustomization layer + // to avoid storing intermediate names between transformations, to prevent intermediate name conflicts. + r.StorePreviousId() + } + } + if err := r.ApplyFilter(prefix.Filter{ + Prefix: p.Prefix, + FieldSpec: fs, + }); err != nil { + return err + } + } + } + return nil +} + +func (p *PrefixTransformerPlugin) shouldSkip(id resid.ResId) bool { + for _, path := range prefixFieldSpecsToSkip { + if id.IsSelected(&path.Gvk) { + return true + } + } + return false +} + +func NewPrefixTransformerPlugin() resmap.TransformerPlugin { + return &PrefixTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go new file mode 100644 index 000000000..02cb1927a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go @@ -0,0 +1,78 @@ +// Code generated by pluginator on ReplacementTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "fmt" + "reflect" + + "sigs.k8s.io/kustomize/api/filters/replacement" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +// Replace values in targets with values from a source +type ReplacementTransformerPlugin struct { + ReplacementList []types.ReplacementField `json:"replacements,omitempty" yaml:"replacements,omitempty"` + Replacements []types.Replacement `json:"omitempty" yaml:"omitempty"` +} + +func (p *ReplacementTransformerPlugin) Config( + h *resmap.PluginHelpers, c []byte) (err error) { + p.ReplacementList = []types.ReplacementField{} + if err := yaml.Unmarshal(c, p); err != nil { + return err + } + + for _, r := range p.ReplacementList { + if r.Path != "" && (r.Source != nil || len(r.Targets) != 0) { + return fmt.Errorf("cannot specify both path and inline replacement") + } + if r.Path != "" { + // load the replacement from the path + content, err := h.Loader().Load(r.Path) + if err != nil { + return err + } + // find if the path contains a a list of replacements or a single replacement + var replacement interface{} + err = yaml.Unmarshal(content, &replacement) + if err != nil { + return err + } + items := reflect.ValueOf(replacement) + switch items.Kind() { + case reflect.Slice: + repl := []types.Replacement{} + if err := yaml.Unmarshal(content, &repl); err != nil { + return err + } + p.Replacements = append(p.Replacements, repl...) + case reflect.Map: + repl := types.Replacement{} + if err := yaml.Unmarshal(content, &repl); err != nil { + return err + } + p.Replacements = append(p.Replacements, repl) + default: + return fmt.Errorf("unsupported replacement type encountered within replacement path: %v", items.Kind()) + } + } else { + // replacement information is already loaded + p.Replacements = append(p.Replacements, r.Replacement) + } + } + return nil +} + +func (p *ReplacementTransformerPlugin) Transform(m resmap.ResMap) (err error) { + return m.ApplyFilter(replacement.Filter{ + Replacements: p.Replacements, + }) +} + +func NewReplacementTransformerPlugin() resmap.TransformerPlugin { + return &ReplacementTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplicaCountTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplicaCountTransformer.go new file mode 100644 index 000000000..c87d64251 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplicaCountTransformer.go @@ -0,0 +1,73 @@ +// Code generated by pluginator on ReplicaCountTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "fmt" + + "sigs.k8s.io/kustomize/api/filters/replicacount" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/yaml" +) + +// Find matching replicas declarations and replace the count. +// Eases the kustomization configuration of replica changes. +type ReplicaCountTransformerPlugin struct { + Replica types.Replica `json:"replica,omitempty" yaml:"replica,omitempty"` + FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` +} + +func (p *ReplicaCountTransformerPlugin) Config( + _ *resmap.PluginHelpers, c []byte) (err error) { + p.Replica = types.Replica{} + p.FieldSpecs = nil + return yaml.Unmarshal(c, p) +} + +func (p *ReplicaCountTransformerPlugin) Transform(m resmap.ResMap) error { + found := false + for _, fs := range p.FieldSpecs { + matcher := p.createMatcher(fs) + resList := m.GetMatchingResourcesByAnyId(matcher) + if len(resList) > 0 { + found = true + for _, r := range resList { + // There are redundant checks in the filter + // that we'll live with until resolution of + // https://github.com/kubernetes-sigs/kustomize/issues/2506 + err := r.ApplyFilter(replicacount.Filter{ + Replica: p.Replica, + FieldSpec: fs, + }) + if err != nil { + return err + } + } + } + } + + if !found { + gvks := make([]string, len(p.FieldSpecs)) + for i, replicaSpec := range p.FieldSpecs { + gvks[i] = replicaSpec.Gvk.String() + } + return fmt.Errorf("resource with name %s does not match a config with the following GVK %v", + p.Replica.Name, gvks) + } + + return nil +} + +// Match Replica.Name and FieldSpec +func (p *ReplicaCountTransformerPlugin) createMatcher(fs types.FieldSpec) resmap.IdMatcher { + return func(r resid.ResId) bool { + return r.Name == p.Replica.Name && r.Gvk.IsSelected(&fs.Gvk) + } +} + +func NewReplicaCountTransformerPlugin() resmap.TransformerPlugin { + return &ReplicaCountTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SecretGenerator.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SecretGenerator.go new file mode 100644 index 000000000..2a4ef1c90 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SecretGenerator.go @@ -0,0 +1,39 @@ +// Code generated by pluginator on SecretGenerator; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "sigs.k8s.io/kustomize/api/kv" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +type SecretGeneratorPlugin struct { + h *resmap.PluginHelpers + types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + types.SecretArgs +} + +func (p *SecretGeneratorPlugin) Config(h *resmap.PluginHelpers, config []byte) (err error) { + p.SecretArgs = types.SecretArgs{} + err = yaml.Unmarshal(config, p) + if p.SecretArgs.Name == "" { + p.SecretArgs.Name = p.Name + } + if p.SecretArgs.Namespace == "" { + p.SecretArgs.Namespace = p.Namespace + } + p.h = h + return +} + +func (p *SecretGeneratorPlugin) Generate() (resmap.ResMap, error) { + return p.h.ResmapFactory().FromSecretArgs( + kv.NewLoader(p.h.Loader(), p.h.Validator()), p.SecretArgs) +} + +func NewSecretGeneratorPlugin() resmap.GeneratorPlugin { + return &SecretGeneratorPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go new file mode 100644 index 000000000..687f21a60 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SortOrderTransformer.go @@ -0,0 +1,244 @@ +// Code generated by pluginator on SortOrderTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "sort" + "strings" + + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/yaml" +) + +// Sort the resources using a customizable ordering based of Kind. +// Defaults to the ordering of the GVK struct, which puts cluster-wide basic +// resources with no dependencies (like Namespace, StorageClass, etc.) first, +// and resources with a high number of dependencies +// (like ValidatingWebhookConfiguration) last. +type SortOrderTransformerPlugin struct { + SortOptions *types.SortOptions `json:"sortOptions,omitempty" yaml:"sortOptions,omitempty"` +} + +func (p *SortOrderTransformerPlugin) Config( + _ *resmap.PluginHelpers, c []byte) error { + return errors.WrapPrefixf(yaml.Unmarshal(c, p), "Failed to unmarshal SortOrderTransformer config") +} + +func (p *SortOrderTransformerPlugin) applyDefaults() { + // Default to FIFO sort, aka no-op. + if p.SortOptions == nil { + p.SortOptions = &types.SortOptions{ + Order: types.FIFOSortOrder, + } + } + + // If legacy sort is selected and no options are given, default to + // hardcoded order. + if p.SortOptions.Order == types.LegacySortOrder && p.SortOptions.LegacySortOptions == nil { + p.SortOptions.LegacySortOptions = &types.LegacySortOptions{ + OrderFirst: defaultOrderFirst, + OrderLast: defaultOrderLast, + } + } +} + +func (p *SortOrderTransformerPlugin) validate() error { + // Check valid values for SortOrder + if p.SortOptions.Order != types.FIFOSortOrder && p.SortOptions.Order != types.LegacySortOrder { + return errors.Errorf("the field 'sortOptions.order' must be one of [%s, %s]", + types.FIFOSortOrder, types.LegacySortOrder) + } + + // Validate that the only options set are the ones corresponding to the + // selected sort order. + if p.SortOptions.Order == types.FIFOSortOrder && + p.SortOptions.LegacySortOptions != nil { + return errors.Errorf("the field 'sortOptions.legacySortOptions' is"+ + " set but the selected sort order is '%v', not 'legacy'", + p.SortOptions.Order) + } + return nil +} + +func (p *SortOrderTransformerPlugin) Transform(m resmap.ResMap) (err error) { + p.applyDefaults() + err = p.validate() + if err != nil { + return err + } + + // Sort + if p.SortOptions.Order == types.LegacySortOrder { + s := newLegacyIDSorter(m.AllIds(), p.SortOptions.LegacySortOptions) + sort.Sort(s) + err = applyOrdering(m, s.resids) + if err != nil { + return err + } + } + return nil +} + +// applyOrdering takes resources (given in ResMap) and a desired ordering given +// as a sequence of ResIds, and updates the ResMap's resources to match the +// ordering. +func applyOrdering(m resmap.ResMap, ordering []resid.ResId) error { + var err error + resources := make([]*resource.Resource, m.Size()) + // Clear and refill with the correct order + for i, id := range ordering { + resources[i], err = m.GetByCurrentId(id) + if err != nil { + return errors.WrapPrefixf(err, "expected match for sorting") + } + } + m.Clear() + for _, r := range resources { + err = m.Append(r) + if err != nil { + return errors.WrapPrefixf(err, "SortOrderTransformer: Failed to append to resources") + } + } + return nil +} + +// Code for legacy sorting. +// Legacy sorting is a "fixed" order sorting maintained for backwards +// compatibility. + +// legacyIDSorter sorts resources based on two priority lists: +// - orderFirst: Resources that should be placed in the start, in the given order. +// - orderLast: Resources that should be placed in the end, in the given order. +type legacyIDSorter struct { + // resids only stores the metadata of the object. This is an optimization as + // it's expensive to compute these again and again during ordering. + resids []resid.ResId + typeOrders map[string]int +} + +func newLegacyIDSorter( + resids []resid.ResId, + options *types.LegacySortOptions) *legacyIDSorter { + // Precalculate a resource ranking based on the priority lists. + var typeOrders = func() map[string]int { + m := map[string]int{} + for i, n := range options.OrderFirst { + m[n] = -len(options.OrderFirst) + i + } + for i, n := range options.OrderLast { + m[n] = 1 + i + } + return m + }() + return &legacyIDSorter{ + resids: resids, + typeOrders: typeOrders, + } +} + +var _ sort.Interface = legacyIDSorter{} + +func (a legacyIDSorter) Len() int { return len(a.resids) } +func (a legacyIDSorter) Swap(i, j int) { + a.resids[i], a.resids[j] = a.resids[j], a.resids[i] +} +func (a legacyIDSorter) Less(i, j int) bool { + if !a.resids[i].Gvk.Equals(a.resids[j].Gvk) { + return gvkLessThan(a.resids[i].Gvk, a.resids[j].Gvk, a.typeOrders) + } + return legacyResIDSortString(a.resids[i]) < legacyResIDSortString(a.resids[j]) +} + +func gvkLessThan(gvk1, gvk2 resid.Gvk, typeOrders map[string]int) bool { + index1 := typeOrders[gvk1.Kind] + index2 := typeOrders[gvk2.Kind] + if index1 != index2 { + return index1 < index2 + } + return legacyGVKSortString(gvk1) < legacyGVKSortString(gvk2) +} + +// legacyGVKSortString returns a string representation of given GVK used for +// stable sorting. +func legacyGVKSortString(x resid.Gvk) string { + legacyNoGroup := "~G" + legacyNoVersion := "~V" + legacyNoKind := "~K" + legacyFieldSeparator := "_" + + g := x.Group + if g == "" { + g = legacyNoGroup + } + v := x.Version + if v == "" { + v = legacyNoVersion + } + k := x.Kind + if k == "" { + k = legacyNoKind + } + return strings.Join([]string{g, v, k}, legacyFieldSeparator) +} + +// legacyResIDSortString returns a string representation of given ResID used for +// stable sorting. +func legacyResIDSortString(id resid.ResId) string { + legacyNoNamespace := "~X" + legacyNoName := "~N" + legacySeparator := "|" + + ns := id.Namespace + if ns == "" { + ns = legacyNoNamespace + } + nm := id.Name + if nm == "" { + nm = legacyNoName + } + return strings.Join( + []string{id.Gvk.String(), ns, nm}, legacySeparator) +} + +// DO NOT CHANGE! +// Final legacy ordering provided as a default by kustomize. +// Originally an attempt to apply resources in the correct order, an effort +// which later proved impossible as not all types are known beforehand. +// See: https://github.com/kubernetes-sigs/kustomize/issues/3913 +var defaultOrderFirst = []string{ //nolint:gochecknoglobals + "Namespace", + "ResourceQuota", + "StorageClass", + "CustomResourceDefinition", + "ServiceAccount", + "PodSecurityPolicy", + "Role", + "ClusterRole", + "RoleBinding", + "ClusterRoleBinding", + "ConfigMap", + "Secret", + "Endpoints", + "Service", + "LimitRange", + "PriorityClass", + "PersistentVolume", + "PersistentVolumeClaim", + "Deployment", + "StatefulSet", + "CronJob", + "PodDisruptionBudget", +} +var defaultOrderLast = []string{ //nolint:gochecknoglobals + "MutatingWebhookConfiguration", + "ValidatingWebhookConfiguration", +} + +func NewSortOrderTransformerPlugin() resmap.TransformerPlugin { + return &SortOrderTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SuffixTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SuffixTransformer.go new file mode 100644 index 000000000..31b5b8fea --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SuffixTransformer.go @@ -0,0 +1,96 @@ +// Code generated by pluginator on SuffixTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "errors" + + "sigs.k8s.io/kustomize/api/filters/suffix" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Add the given suffix to the field +type SuffixTransformerPlugin struct { + Suffix string `json:"suffix,omitempty" yaml:"suffix,omitempty"` + FieldSpecs types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` +} + +// TODO: Make this gvk skip list part of the config. +var suffixFieldSpecsToSkip = types.FsSlice{ + {Gvk: resid.Gvk{Kind: "CustomResourceDefinition"}}, + {Gvk: resid.Gvk{Group: "apiregistration.k8s.io", Kind: "APIService"}}, + {Gvk: resid.Gvk{Kind: "Namespace"}}, +} + +func (p *SuffixTransformerPlugin) Config( + _ *resmap.PluginHelpers, c []byte) (err error) { + p.Suffix = "" + p.FieldSpecs = nil + err = yaml.Unmarshal(c, p) + if err != nil { + return + } + if p.FieldSpecs == nil { + return errors.New("fieldSpecs is not expected to be nil") + } + return +} + +func (p *SuffixTransformerPlugin) Transform(m resmap.ResMap) error { + // Even if the Suffix is empty we want to proceed with the + // transformation. This allows to add contextual information + // to the resources (AddNameSuffix). + for _, r := range m.Resources() { + // TODO: move this test into the filter (i.e. make a better filter) + if p.shouldSkip(r.OrgId()) { + continue + } + id := r.OrgId() + // current default configuration contains + // only one entry: "metadata/name" with no GVK + for _, fs := range p.FieldSpecs { + // TODO: this is redundant to filter (but needed for now) + if !id.IsSelected(&fs.Gvk) { + continue + } + // TODO: move this test into the filter. + if fs.Path == "metadata/name" { + // "metadata/name" is the only field. + // this will add a suffix to the resource + // even if it is empty + + r.AddNameSuffix(p.Suffix) + if p.Suffix != "" { + // TODO: There are multiple transformers that can change a resource's name, and each makes a call to + // StorePreviousID(). We should make it so that we only call StorePreviousID once per kustomization layer + // to avoid storing intermediate names between transformations, to prevent intermediate name conflicts. + r.StorePreviousId() + } + } + if err := r.ApplyFilter(suffix.Filter{ + Suffix: p.Suffix, + FieldSpec: fs, + }); err != nil { + return err + } + } + } + return nil +} + +func (p *SuffixTransformerPlugin) shouldSkip(id resid.ResId) bool { + for _, path := range suffixFieldSpecsToSkip { + if id.IsSelected(&path.Gvk) { + return true + } + } + return false +} + +func NewSuffixTransformerPlugin() resmap.TransformerPlugin { + return &SuffixTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ValueAddTransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ValueAddTransformer.go new file mode 100644 index 000000000..1d70c98c2 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ValueAddTransformer.go @@ -0,0 +1,141 @@ +// Code generated by pluginator on ValueAddTransformer; DO NOT EDIT. +// pluginator {(devel) unknown } + +package builtins + +import ( + "fmt" + "path/filepath" + "strings" + + "sigs.k8s.io/kustomize/api/filters/namespace" + "sigs.k8s.io/kustomize/api/filters/valueadd" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/yaml" +) + +// An 'Add' transformer inspired by the IETF RFC 6902 JSON spec Add operation. +type ValueAddTransformerPlugin struct { + // Value is the value to add. + // Defaults to base name of encompassing kustomization root. + Value string `json:"value,omitempty" yaml:"value,omitempty"` + + // Targets is a slice of targets that should have the value added. + Targets []Target `json:"targets,omitempty" yaml:"targets,omitempty"` + + // TargetFilePath is a file path. If specified, the file will be parsed into + // a slice of Target, and appended to anything that was specified in the + // Targets field. This is just a means to share common target specifications. + TargetFilePath string `json:"targetFilePath,omitempty" yaml:"targetFilePath,omitempty"` +} + +// Target describes where to put the value. +type Target struct { + // Selector selects the resources to modify. + Selector *types.Selector `json:"selector,omitempty" yaml:"selector,omitempty"` + + // NotSelector selects the resources to exclude + // from those included by overly broad selectors. + // TODO: implement this? + // NotSelector *types.Selector `json:"notSelector,omitempty" yaml:"notSelector,omitempty"` + + // FieldPath is a JSON-style path to the field intended to hold the value. + FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"` + + // FilePathPosition is passed to the filter directly. Look there for doc. + FilePathPosition int `json:"filePathPosition,omitempty" yaml:"filePathPosition,omitempty"` +} + +func (p *ValueAddTransformerPlugin) Config(h *resmap.PluginHelpers, c []byte) error { + err := yaml.Unmarshal(c, p) + if err != nil { + return err + } + p.Value = strings.TrimSpace(p.Value) + if p.Value == "" { + p.Value = filepath.Base(h.Loader().Root()) + } + if p.TargetFilePath != "" { + bytes, err := h.Loader().Load(p.TargetFilePath) + if err != nil { + return err + } + var targets struct { + Targets []Target `json:"targets,omitempty" yaml:"targets,omitempty"` + } + err = yaml.Unmarshal(bytes, &targets) + if err != nil { + return err + } + p.Targets = append(p.Targets, targets.Targets...) + } + if len(p.Targets) == 0 { + return fmt.Errorf("must specify at least one target") + } + for _, target := range p.Targets { + if err = validateSelector(target.Selector); err != nil { + return err + } + // TODO: call validateSelector(target.NotSelector) if field added. + if err = validateJsonFieldPath(target.FieldPath); err != nil { + return err + } + if target.FilePathPosition < 0 { + return fmt.Errorf( + "value of FilePathPosition (%d) cannot be negative", + target.FilePathPosition) + } + } + return nil +} + +// TODO: implement +func validateSelector(_ *types.Selector) error { + return nil +} + +// TODO: Enforce RFC 6902? +func validateJsonFieldPath(p string) error { + if len(p) == 0 { + return fmt.Errorf("fieldPath cannot be empty") + } + return nil +} + +func (p *ValueAddTransformerPlugin) Transform(m resmap.ResMap) (err error) { + for _, t := range p.Targets { + var resources []*resource.Resource + if t.Selector == nil { + resources = m.Resources() + } else { + resources, err = m.Select(*t.Selector) + if err != nil { + return err + } + } + // TODO: consider t.NotSelector if implemented + for _, res := range resources { + if t.FieldPath == types.MetadataNamespacePath { + err = res.ApplyFilter(namespace.Filter{ + Namespace: p.Value, + }) + } else { + err = res.ApplyFilter(valueadd.Filter{ + Value: p.Value, + FieldPath: t.FieldPath, + FilePathPosition: t.FilePathPosition, + }) + } + if err != nil { + return err + } + } + } + return nil +} + +func NewValueAddTransformerPlugin() resmap.TransformerPlugin { + return &ValueAddTransformerPlugin{} +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/builtins/doc.go b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/doc.go new file mode 100644 index 000000000..37a8dc6e7 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/builtins/doc.go @@ -0,0 +1,8 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package builtins holds code generated from the builtin plugins. +// The "builtin" plugins are written as normal plugins and can +// be used as such, but they are also used to generate the code +// in this package so they can be statically linked to client code. +package builtins diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/generators/configmap.go b/vendor/sigs.k8s.io/kustomize/api/internal/generators/configmap.go new file mode 100644 index 000000000..47498aaa5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/generators/configmap.go @@ -0,0 +1,52 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package generators + +import ( + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// MakeConfigMap makes a configmap. +// +// ConfigMap: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#configmap-v1-core +// +// ConfigMaps and Secrets are similar. +// +// Both objects have a `data` field, which contains a map from keys to +// values that must be UTF-8 valid strings. Such data might be simple text, +// or whoever made the data may have done so by performing a base64 encoding +// on binary data. Regardless, k8s has no means to know this, so it treats +// the data field as a string. +// +// The ConfigMap has an additional field `binaryData`, also a map, but its +// values are _intended_ to be interpreted as a base64 encoding of []byte, +// by whatever makes use of the ConfigMap. +// +// In a ConfigMap, any key used in `data` cannot also be used in `binaryData` +// and vice-versa. A key must be unique across both maps. +func MakeConfigMap( + ldr ifc.KvLoader, args *types.ConfigMapArgs) (rn *yaml.RNode, err error) { + rn, err = makeBaseNode("ConfigMap", args.Name, args.Namespace) + if err != nil { + return nil, err + } + m, err := makeValidatedDataMap(ldr, args.Name, args.KvPairSources) + if err != nil { + return nil, err + } + if err = rn.LoadMapIntoConfigMapData(m); err != nil { + return nil, err + } + err = copyLabelsAndAnnotations(rn, args.Options) + if err != nil { + return nil, err + } + err = setImmutable(rn, args.Options) + if err != nil { + return nil, err + } + return rn, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/generators/secret.go b/vendor/sigs.k8s.io/kustomize/api/internal/generators/secret.go new file mode 100644 index 000000000..9afaff156 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/generators/secret.go @@ -0,0 +1,59 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package generators + +import ( + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// MakeSecret makes a kubernetes Secret. +// +// Secret: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#secret-v1-core +// +// ConfigMaps and Secrets are similar. +// +// Like a ConfigMap, a Secret has a `data` field, but unlike a ConfigMap it has +// no `binaryData` field. +// +// All of a Secret's data is assumed to be opaque in nature, and assumed to be +// base64 encoded from its original representation, regardless of whether the +// original data was UTF-8 text or binary. +// +// This encoding provides no secrecy. It's just a neutral, common means to +// represent opaque text and binary data. Beneath the base64 encoding +// is presumably further encoding under control of the Secret's consumer. +// +// A Secret has string field `type` which holds an identifier, used by the +// client, to choose the algorithm to interpret the `data` field. Kubernetes +// cannot make use of this data; it's up to a controller or some pod's service +// to interpret the value, using `type` as a clue as to how to do this. +func MakeSecret( + ldr ifc.KvLoader, args *types.SecretArgs) (rn *yaml.RNode, err error) { + rn, err = makeBaseNode("Secret", args.Name, args.Namespace) + if err != nil { + return nil, err + } + t := "Opaque" + if args.Type != "" { + t = args.Type + } + if _, err := rn.Pipe( + yaml.FieldSetter{ + Name: "type", + Value: yaml.NewStringRNode(t)}); err != nil { + return nil, err + } + m, err := makeValidatedDataMap(ldr, args.Name, args.KvPairSources) + if err != nil { + return nil, err + } + if err = rn.LoadMapIntoSecretData(m); err != nil { + return nil, err + } + copyLabelsAndAnnotations(rn, args.Options) + setImmutable(rn, args.Options) + return rn, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/generators/utils.go b/vendor/sigs.k8s.io/kustomize/api/internal/generators/utils.go new file mode 100644 index 000000000..b570c7e91 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/generators/utils.go @@ -0,0 +1,124 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package generators + +import ( + "fmt" + "path" + "strings" + + "github.com/go-errors/errors" + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +func makeBaseNode(kind, name, namespace string) (*yaml.RNode, error) { + rn, err := yaml.Parse(fmt.Sprintf(` +apiVersion: v1 +kind: %s +`, kind)) + if err != nil { + return nil, err + } + if name == "" { + return nil, errors.Errorf("a configmap must have a name") + } + if _, err := rn.Pipe(yaml.SetK8sName(name)); err != nil { + return nil, err + } + if namespace != "" { + if _, err := rn.Pipe(yaml.SetK8sNamespace(namespace)); err != nil { + return nil, err + } + } + return rn, nil +} + +func makeValidatedDataMap( + ldr ifc.KvLoader, name string, sources types.KvPairSources) (map[string]string, error) { + pairs, err := ldr.Load(sources) + if err != nil { + return nil, errors.WrapPrefix(err, "loading KV pairs", 0) + } + knownKeys := make(map[string]string) + for _, p := range pairs { + // legal key: alphanumeric characters, '-', '_' or '.' + if err := ldr.Validator().ErrIfInvalidKey(p.Key); err != nil { + return nil, err + } + if _, ok := knownKeys[p.Key]; ok { + return nil, errors.Errorf( + "configmap %s illegally repeats the key `%s`", name, p.Key) + } + knownKeys[p.Key] = p.Value + } + return knownKeys, nil +} + +// copyLabelsAndAnnotations copies labels and annotations from +// GeneratorOptions into the given object. +func copyLabelsAndAnnotations( + rn *yaml.RNode, opts *types.GeneratorOptions) error { + if opts == nil { + return nil + } + for _, k := range yaml.SortedMapKeys(opts.Labels) { + v := opts.Labels[k] + if _, err := rn.Pipe(yaml.SetLabel(k, v)); err != nil { + return err + } + } + for _, k := range yaml.SortedMapKeys(opts.Annotations) { + v := opts.Annotations[k] + if _, err := rn.Pipe(yaml.SetAnnotation(k, v)); err != nil { + return err + } + } + return nil +} + +func setImmutable( + rn *yaml.RNode, opts *types.GeneratorOptions) error { + if opts == nil { + return nil + } + if opts.Immutable { + n := &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "true", + Tag: yaml.NodeTagBool, + } + if _, err := rn.Pipe(yaml.FieldSetter{Name: "immutable", Value: yaml.NewRNode(n)}); err != nil { + return err + } + } + + return nil +} + +// ParseFileSource parses the source given. +// +// Acceptable formats include: +// 1. source-path: the basename will become the key name +// 2. source-name=source-path: the source-name will become the key name and +// source-path is the path to the key file. +// +// Key names cannot include '='. +func ParseFileSource(source string) (keyName, filePath string, err error) { + numSeparators := strings.Count(source, "=") + switch { + case numSeparators == 0: + return path.Base(source), source, nil + case numSeparators == 1 && strings.HasPrefix(source, "="): + return "", "", errors.Errorf("missing key name for file path %q in source %q", strings.TrimPrefix(source, "="), source) + case numSeparators == 1 && strings.HasSuffix(source, "="): + return "", "", errors.Errorf("missing file path for key name %q in source %q", strings.TrimSuffix(source, "="), source) + case numSeparators > 1: + return "", "", errors.Errorf("source %q key name or file path contains '='", source) + default: + components := strings.Split(source, "=") + return components[0], components[1], nil + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go b/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go new file mode 100644 index 000000000..d15fa7590 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go @@ -0,0 +1,50 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package git + +import ( + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +// Cloner is a function that can clone a git repo. +type Cloner func(repoSpec *RepoSpec) error + +// ClonerUsingGitExec uses a local git install, as opposed +// to say, some remote API, to obtain a local clone of +// a remote repo. +func ClonerUsingGitExec(repoSpec *RepoSpec) error { + r, err := newCmdRunner(repoSpec.Timeout) + if err != nil { + return err + } + repoSpec.Dir = r.dir + if err = r.run("init"); err != nil { + return err + } + ref := "HEAD" + if repoSpec.Ref != "" { + ref = repoSpec.Ref + } + if err = r.run("fetch", "--depth=1", repoSpec.CloneSpec(), ref); err != nil { + return err + } + if err = r.run("checkout", "FETCH_HEAD"); err != nil { + return err + } + if repoSpec.Submodules { + return r.run("submodule", "update", "--init", "--recursive") + } + return nil +} + +// DoNothingCloner returns a cloner that only sets +// cloneDir field in the repoSpec. It's assumed that +// the cloneDir is associated with some fake filesystem +// used in a test. +func DoNothingCloner(dir filesys.ConfirmedDir) Cloner { + return func(rs *RepoSpec) error { + rs.Dir = dir + return nil + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/git/gitrunner.go b/vendor/sigs.k8s.io/kustomize/api/internal/git/gitrunner.go new file mode 100644 index 000000000..134eb41c5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/git/gitrunner.go @@ -0,0 +1,55 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package git + +import ( + "os/exec" + "time" + + "sigs.k8s.io/kustomize/api/internal/utils" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +// gitRunner runs the external git binary. +type gitRunner struct { + gitProgram string + duration time.Duration + dir filesys.ConfirmedDir +} + +// newCmdRunner returns a gitRunner if it can find the binary. +// It also creats a temp directory for cloning repos. +func newCmdRunner(timeout time.Duration) (*gitRunner, error) { + gitProgram, err := exec.LookPath("git") + if err != nil { + return nil, errors.WrapPrefixf(err, "no 'git' program on path") + } + dir, err := filesys.NewTmpConfirmedDir() + if err != nil { + return nil, err + } + return &gitRunner{ + gitProgram: gitProgram, + duration: timeout, + dir: dir, + }, nil +} + +// run a command with a timeout. +func (r gitRunner) run(args ...string) error { + //nolint: gosec + cmd := exec.Command(r.gitProgram, args...) + cmd.Dir = r.dir.String() + return utils.TimedCall( + cmd.String(), + r.duration, + func() error { + out, err := cmd.CombinedOutput() + if err != nil { + return errors.WrapPrefixf(err, "failed to run '%s': %s", cmd.String(), string(out)) + } + return err + }) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/git/repospec.go b/vendor/sigs.k8s.io/kustomize/api/internal/git/repospec.go new file mode 100644 index 000000000..ba6156cc5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/git/repospec.go @@ -0,0 +1,387 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package git + +import ( + "fmt" + "log" + "net/url" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +// Used as a temporary non-empty occupant of the cloneDir +// field, as something distinguishable from the empty string +// in various outputs (especially tests). Not using an +// actual directory name here, as that's a temporary directory +// with a unique name that isn't created until clone time. +const notCloned = filesys.ConfirmedDir("/notCloned") + +// RepoSpec specifies a git repository and a branch and path therein. +type RepoSpec struct { + // Raw, original spec, used to look for cycles. + // TODO(monopole): Drop raw, use processed fields instead. + raw string + + // Host, e.g. https://github.com/ + Host string + + // RepoPath name (Path to repository), + // e.g. kubernetes-sigs/kustomize + RepoPath string + + // Dir is where the repository is cloned to. + Dir filesys.ConfirmedDir + + // Relative path in the repository, and in the cloneDir, + // to a Kustomization. + KustRootPath string + + // Branch or tag reference. + Ref string + + // Submodules indicates whether or not to clone git submodules. + Submodules bool + + // Timeout is the maximum duration allowed for execing git commands. + Timeout time.Duration +} + +// CloneSpec returns a string suitable for "git clone {spec}". +func (x *RepoSpec) CloneSpec() string { + return x.Host + x.RepoPath +} + +func (x *RepoSpec) CloneDir() filesys.ConfirmedDir { + return x.Dir +} + +func (x *RepoSpec) Raw() string { + return x.raw +} + +func (x *RepoSpec) AbsPath() string { + return x.Dir.Join(x.KustRootPath) +} + +func (x *RepoSpec) Cleaner(fSys filesys.FileSystem) func() error { + return func() error { return fSys.RemoveAll(x.Dir.String()) } +} + +const ( + refQuery = "?ref=" + gitSuffix = ".git" + gitRootDelimiter = "_git/" + pathSeparator = "/" // do not use filepath.Separator, as this is a URL +) + +// NewRepoSpecFromURL parses git-like urls. +// From strings like git@github.com:someOrg/someRepo.git or +// https://github.com/someOrg/someRepo?ref=someHash, extract +// the different parts of URL, set into a RepoSpec object and return RepoSpec object. +// It MUST return an error if the input is not a git-like URL, as this is used by some code paths +// to distinguish between local and remote paths. +// +// In particular, NewRepoSpecFromURL separates the URL used to clone the repo from the +// elements Kustomize uses for other purposes (e.g. query params that turn into args, and +// the path to the kustomization root within the repo). +func NewRepoSpecFromURL(n string) (*RepoSpec, error) { + repoSpec := &RepoSpec{raw: n, Dir: notCloned, Timeout: defaultTimeout, Submodules: defaultSubmodules} + if filepath.IsAbs(n) { + return nil, fmt.Errorf("uri looks like abs path: %s", n) + } + + // Parse the query first. This is safe because according to rfc3986 "?" is only allowed in the + // query and is not recognized %-encoded. + // Note that parseQuery returns default values for empty parameters. + n, query, _ := strings.Cut(n, "?") + repoSpec.Ref, repoSpec.Timeout, repoSpec.Submodules = parseQuery(query) + + var err error + + // Parse the host (e.g. scheme, username, domain) segment. + repoSpec.Host, n, err = extractHost(n) + if err != nil { + return nil, err + } + + // In some cases, we're given a path to a git repo + a path to the kustomization root within + // that repo. We need to split them so that we can ultimately give the repo only to the cloner. + repoSpec.RepoPath, repoSpec.KustRootPath, err = parsePathParts(n, defaultRepoPathLength(repoSpec.Host)) + if err != nil { + return nil, err + } + + return repoSpec, nil +} + +const allSegments = -999999 +const orgRepoSegments = 2 + +func defaultRepoPathLength(host string) int { + if strings.HasPrefix(host, fileScheme) { + return allSegments + } + return orgRepoSegments +} + +// parsePathParts splits the repo path that will ultimately be passed to git to clone the +// repo from the kustomization root path, which Kustomize will execute the build in after the repo +// is cloned. +// +// We first try to do this based on explicit markers in the URL (e.g. _git, .git or //). +// If none are present, we try to apply a historical default repo path length that is derived from +// Github URLs. If there aren't enough segments, we have historically considered the URL invalid. +func parsePathParts(n string, defaultSegmentLength int) (string, string, error) { + repoPath, kustRootPath, success := tryExplicitMarkerSplit(n) + if !success { + repoPath, kustRootPath, success = tryDefaultLengthSplit(n, defaultSegmentLength) + } + + // Validate the result + if !success || len(repoPath) == 0 { + return "", "", fmt.Errorf("failed to parse repo path segment") + } + if kustRootPathExitsRepo(kustRootPath) { + return "", "", fmt.Errorf("url path exits repo: %s", n) + } + + return repoPath, strings.TrimPrefix(kustRootPath, pathSeparator), nil +} + +func tryExplicitMarkerSplit(n string) (string, string, bool) { + // Look for the _git delimiter, which by convention is expected to be ONE directory above the repo root. + // If found, split on the NEXT path element, which is the repo root. + // Example: https://username@dev.azure.com/org/project/_git/repo/path/to/kustomization/root + if gitRootIdx := strings.Index(n, gitRootDelimiter); gitRootIdx >= 0 { + gitRootPath := n[:gitRootIdx+len(gitRootDelimiter)] + subpathSegments := strings.Split(n[gitRootIdx+len(gitRootDelimiter):], pathSeparator) + return gitRootPath + subpathSegments[0], strings.Join(subpathSegments[1:], pathSeparator), true + + // Look for a double-slash in the path, which if present separates the repo root from the kust path. + // It is a convention, not a real path element, so do not preserve it in the returned value. + // Example: https://github.com/org/repo//path/to/kustomozation/root + } else if repoRootIdx := strings.Index(n, "//"); repoRootIdx >= 0 { + return n[:repoRootIdx], n[repoRootIdx+2:], true + + // Look for .git in the path, which if present is part of the directory name of the git repo. + // This means we want to grab everything up to and including that suffix + // Example: https://github.com/org/repo.git/path/to/kustomozation/root + } else if gitSuffixIdx := strings.Index(n, gitSuffix); gitSuffixIdx >= 0 { + upToGitSuffix := n[:gitSuffixIdx+len(gitSuffix)] + afterGitSuffix := n[gitSuffixIdx+len(gitSuffix):] + return upToGitSuffix, afterGitSuffix, true + } + return "", "", false +} + +func tryDefaultLengthSplit(n string, defaultSegmentLength int) (string, string, bool) { + // If the default is to take all segments, do so. + if defaultSegmentLength == allSegments { + return n, "", true + + // If the default is N segments, make sure we have at least that many and take them if so. + // If we have less than N, we have historically considered the URL invalid. + } else if segments := strings.Split(n, pathSeparator); len(segments) >= defaultSegmentLength { + firstNSegments := strings.Join(segments[:defaultSegmentLength], pathSeparator) + rest := strings.Join(segments[defaultSegmentLength:], pathSeparator) + return firstNSegments, rest, true + } + return "", "", false +} + +func kustRootPathExitsRepo(kustRootPath string) bool { + cleanedPath := filepath.Clean(strings.TrimPrefix(kustRootPath, string(filepath.Separator))) + pathElements := strings.Split(cleanedPath, string(filepath.Separator)) + return len(pathElements) > 0 && + pathElements[0] == filesys.ParentDir +} + +// Clone git submodules by default. +const defaultSubmodules = true + +// Arbitrary, but non-infinite, timeout for running commands. +const defaultTimeout = 27 * time.Second + +func parseQuery(query string) (string, time.Duration, bool) { + values, err := url.ParseQuery(query) + // in event of parse failure, return defaults + if err != nil { + return "", defaultTimeout, defaultSubmodules + } + + // ref is the desired git ref to target. Can be specified by in a git URL + // with ?ref= or ?version=, although ref takes precedence. + ref := values.Get("version") + if queryValue := values.Get("ref"); queryValue != "" { + ref = queryValue + } + + // depth is the desired git exec timeout. Can be specified by in a git URL + // with ?timeout=. + duration := defaultTimeout + if queryValue := values.Get("timeout"); queryValue != "" { + // Attempt to first parse as a number of integer seconds (like "61"), + // and then attempt to parse as a suffixed duration (like "61s"). + if intValue, err := strconv.Atoi(queryValue); err == nil && intValue > 0 { + duration = time.Duration(intValue) * time.Second + } else if durationValue, err := time.ParseDuration(queryValue); err == nil && durationValue > 0 { + duration = durationValue + } + } + + // submodules indicates if git submodule cloning is desired. Can be + // specified by in a git URL with ?submodules=. + submodules := defaultSubmodules + if queryValue := values.Get("submodules"); queryValue != "" { + if boolValue, err := strconv.ParseBool(queryValue); err == nil { + submodules = boolValue + } + } + + return ref, duration, submodules +} + +func extractHost(n string) (string, string, error) { + n = ignoreForcedGitProtocol(n) + scheme, n := extractScheme(n) + username, n := extractUsername(n) + stdGithub := isStandardGithubHost(n) + acceptSCP := acceptSCPStyle(scheme, username, stdGithub) + + // Validate the username and scheme before attempting host/path parsing, because if the parsing + // so far has not succeeded, we will not be able to extract the host and path correctly. + if err := validateScheme(scheme, acceptSCP); err != nil { + return "", "", err + } + + // Now that we have extracted a valid scheme+username, we can parse host itself. + + // The file protocol specifies an absolute path to a local git repo. + // Everything after the scheme (including any 'username' we found) is actually part of that path. + if scheme == fileScheme { + return scheme, username + n, nil + } + var host, rest = n, "" + if sepIndex := findPathSeparator(n, acceptSCP); sepIndex >= 0 { + host, rest = n[:sepIndex+1], n[sepIndex+1:] + } + + // Github URLs are strictly normalized in a way that may discard scheme and username components. + if stdGithub { + scheme, username, host = normalizeGithubHostParts(scheme, username) + } + + // Host is required, so do not concat the scheme and username if we didn't find one. + if host == "" { + return "", "", errors.Errorf("failed to parse host segment") + } + return scheme + username + host, rest, nil +} + +// ignoreForcedGitProtocol strips the "git::" prefix from URLs. +// We used to use go-getter to handle our urls: https://github.com/hashicorp/go-getter. +// The git:: prefix signaled go-getter to use the git protocol to fetch the url's contents. +// We silently strip this prefix to allow these go-getter-style urls to continue to work, +// although the git protocol (which is insecure and unsupported on many platforms, including Github) +// will not actually be used as intended. +func ignoreForcedGitProtocol(n string) string { + n, found := trimPrefixIgnoreCase(n, "git::") + if found { + log.Println("Warning: Forcing the git protocol using the 'git::' URL prefix is not supported. " + + "Kustomize currently strips this invalid prefix, but will stop doing so in a future release. " + + "Please remove the 'git::' prefix from your configuration.") + } + return n +} + +// acceptSCPStyle returns true if the scheme and username indicate potential use of an SCP-style URL. +// With this style, the scheme is not explicit and the path is delimited by a colon. +// Strictly speaking the username is optional in SCP-like syntax, but Kustomize has always +// required it for non-Github URLs. +// Example: user@host.xz:path/to/repo.git/ +func acceptSCPStyle(scheme, username string, isGithubURL bool) bool { + return scheme == "" && (username != "" || isGithubURL) +} + +func validateScheme(scheme string, acceptSCPStyle bool) error { + // see https://git-scm.com/docs/git-fetch#_git_urls for info relevant to these validations + switch scheme { + case "": + // Empty scheme is only ok if it's a Github URL or if it looks like SCP-style syntax + if !acceptSCPStyle { + return fmt.Errorf("failed to parse scheme") + } + case sshScheme, fileScheme, httpsScheme, httpScheme: + // These are all supported schemes + default: + // At time of writing, we should never end up here because we do not parse out + // unsupported schemes to begin with. + return fmt.Errorf("unsupported scheme %q", scheme) + } + return nil +} + +const fileScheme = "file://" +const httpScheme = "http://" +const httpsScheme = "https://" +const sshScheme = "ssh://" + +func extractScheme(s string) (string, string) { + for _, prefix := range []string{sshScheme, httpsScheme, httpScheme, fileScheme} { + if rest, found := trimPrefixIgnoreCase(s, prefix); found { + return prefix, rest + } + } + return "", s +} + +func extractUsername(s string) (string, string) { + var userRegexp = regexp.MustCompile(`^([a-zA-Z][a-zA-Z0-9-]*)@`) + if m := userRegexp.FindStringSubmatch(s); m != nil { + username := m[1] + "@" + return username, s[len(username):] + } + return "", s +} + +func isStandardGithubHost(s string) bool { + lowerCased := strings.ToLower(s) + return strings.HasPrefix(lowerCased, "github.com/") || + strings.HasPrefix(lowerCased, "github.com:") +} + +// trimPrefixIgnoreCase returns the rest of s and true if prefix, ignoring case, prefixes s. +// Otherwise, trimPrefixIgnoreCase returns s and false. +func trimPrefixIgnoreCase(s, prefix string) (string, bool) { + if len(prefix) <= len(s) && strings.ToLower(s[:len(prefix)]) == prefix { + return s[len(prefix):], true + } + return s, false +} + +func findPathSeparator(hostPath string, acceptSCP bool) int { + sepIndex := strings.Index(hostPath, pathSeparator) + if acceptSCP { + colonIndex := strings.Index(hostPath, ":") + // The colon acts as a delimiter in scp-style ssh URLs only if not prefixed by '/'. + if sepIndex == -1 || (colonIndex > 0 && colonIndex < sepIndex) { + sepIndex = colonIndex + } + } + return sepIndex +} + +func normalizeGithubHostParts(scheme, username string) (string, string, string) { + if strings.HasPrefix(scheme, sshScheme) || username != "" { + return "", username, "github.com:" + } + return httpsScheme, "", "github.com/" +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/kusterr/yamlformaterror.go b/vendor/sigs.k8s.io/kustomize/api/internal/kusterr/yamlformaterror.go new file mode 100644 index 000000000..aa76d1dd7 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/kusterr/yamlformaterror.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package error has contextual error types. +package kusterr + +import ( + "fmt" + "strings" +) + +// YamlFormatError represents error with yaml file name where json/yaml format error happens. +type YamlFormatError struct { + Path string + ErrorMsg string +} + +func (e YamlFormatError) Error() string { + return fmt.Sprintf("YAML file [%s] encounters a format error.\n%s\n", e.Path, e.ErrorMsg) +} + +// MalformedYamlError represents an error that occurred while trying to decode a given YAML. +type MalformedYamlError struct { + Path string + ErrorMsg string +} + +func (e MalformedYamlError) Error() string { + return fmt.Sprintf("%s in File: %s", e.ErrorMsg, e.Path) +} + +// Handler handles YamlFormatError +func Handler(e error, path string) error { + if isYAMLSyntaxError(e) { + return YamlFormatError{ + Path: path, + ErrorMsg: e.Error(), + } + } + if IsMalformedYAMLError(e) { + return MalformedYamlError{ + Path: path, + ErrorMsg: e.Error(), + } + } + return e +} + +func isYAMLSyntaxError(e error) bool { + return strings.Contains(e.Error(), "error converting YAML to JSON") || strings.Contains(e.Error(), "error unmarshaling JSON") +} + +func IsMalformedYAMLError(e error) bool { + return strings.Contains(e.Error(), "MalformedYAMLError") +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/doc.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/doc.go new file mode 100644 index 000000000..f41f79b0c --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/doc.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package builtinconfig provides legacy methods for +// configuring builtin plugins from a common config file. +// As a user, its best to configure plugins individually +// with plugin config files specified in the `transformers:` +// or `generators:` field, than to use this legacy +// configuration technique. +package builtinconfig diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go new file mode 100644 index 000000000..bf5e3f8a3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinconfig + +import ( + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/yaml" +) + +// loadDefaultConfig returns a TranformerConfig +// object from a list of files. +func loadDefaultConfig( + ldr ifc.Loader, paths []string) (*TransformerConfig, error) { + result := &TransformerConfig{} + for _, path := range paths { + data, err := ldr.Load(path) + if err != nil { + return nil, err + } + t, err := makeTransformerConfigFromBytes(data) + if err != nil { + return nil, err + } + result, err = result.Merge(t) + if err != nil { + return nil, err + } + } + return result, nil +} + +// makeTransformerConfigFromBytes returns a TransformerConfig object from bytes +func makeTransformerConfigFromBytes(data []byte) (*TransformerConfig, error) { + var t TransformerConfig + err := yaml.Unmarshal(data, &t) + if err != nil { + return nil, err + } + t.sortFields() + return &t, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go new file mode 100644 index 000000000..36ef42c27 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go @@ -0,0 +1,99 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinconfig + +import ( + "strings" + + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/resid" +) + +// NameBackReferences is an association between a gvk.GVK (a ReferralTarget) +// and a list of Referrers that could refer to it. +// +// It is used to handle name changes, and can be thought of as a +// a contact list. If you change your own contact info (name, +// phone number, etc.), you must tell your contacts or they won't +// know about the change. +// +// For example, ConfigMaps can be used by Pods and everything that +// contains a Pod; Deployment, Job, StatefulSet, etc. +// The ConfigMap is the ReferralTarget, the others are Referrers. +// +// If the name of a ConfigMap instance changed from 'alice' to 'bob', +// one must +// - visit all objects that could refer to the ConfigMap (the Referrers) +// - see if they mention 'alice', +// - if so, change the Referrer's name reference to 'bob'. +// +// The NameBackReferences instance to aid in this could look like +// { +// kind: ConfigMap +// version: v1 +// fieldSpecs: +// - kind: Pod +// version: v1 +// path: spec/volumes/configMap/name +// - kind: Deployment +// path: spec/template/spec/volumes/configMap/name +// - kind: Job +// path: spec/template/spec/volumes/configMap/name +// (etc.) +// } +type NameBackReferences struct { + resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` + // TODO: rename json 'fieldSpecs' to 'referrers' for clarity. + // This will, however, break anyone using a custom config. + Referrers types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` +} + +func (n NameBackReferences) String() string { + var r []string + for _, f := range n.Referrers { + r = append(r, f.String()) + } + return n.Gvk.String() + ": (\n" + + strings.Join(r, "\n") + "\n)" +} + +type nbrSlice []NameBackReferences + +func (s nbrSlice) Len() int { return len(s) } +func (s nbrSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nbrSlice) Less(i, j int) bool { + return s[i].Gvk.IsLessThan(s[j].Gvk) +} + +func (s nbrSlice) mergeAll(o nbrSlice) (result nbrSlice, err error) { + result = s + for _, r := range o { + result, err = result.mergeOne(r) + if err != nil { + return nil, err + } + } + return result, nil +} + +func (s nbrSlice) mergeOne(other NameBackReferences) (nbrSlice, error) { + var result nbrSlice + var err error + found := false + for _, c := range s { + if c.Gvk.Equals(other.Gvk) { + c.Referrers, err = c.Referrers.MergeAll(other.Referrers) + if err != nil { + return nil, err + } + found = true + } + result = append(result, c) + } + + if !found { + result = append(result, other) + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go new file mode 100644 index 000000000..69b8bd4fb --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go @@ -0,0 +1,156 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinconfig + +import ( + "log" + "sort" + + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" +) + +// TransformerConfig holds the data needed to perform transformations. +type TransformerConfig struct { + NamePrefix types.FsSlice `json:"namePrefix,omitempty" yaml:"namePrefix,omitempty"` + NameSuffix types.FsSlice `json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"` + NameSpace types.FsSlice `json:"namespace,omitempty" yaml:"namespace,omitempty"` + CommonLabels types.FsSlice `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"` + TemplateLabels types.FsSlice `json:"templateLabels,omitempty" yaml:"templateLabels,omitempty"` + CommonAnnotations types.FsSlice `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"` + NameReference nbrSlice `json:"nameReference,omitempty" yaml:"nameReference,omitempty"` + VarReference types.FsSlice `json:"varReference,omitempty" yaml:"varReference,omitempty"` + Images types.FsSlice `json:"images,omitempty" yaml:"images,omitempty"` + Replicas types.FsSlice `json:"replicas,omitempty" yaml:"replicas,omitempty"` +} + +// MakeEmptyConfig returns an empty TransformerConfig object +func MakeEmptyConfig() *TransformerConfig { + return &TransformerConfig{} +} + +// MakeDefaultConfig returns a default TransformerConfig. +func MakeDefaultConfig() *TransformerConfig { + c, err := makeTransformerConfigFromBytes( + builtinpluginconsts.GetDefaultFieldSpecs()) + if err != nil { + log.Fatalf("Unable to make default transformconfig: %v", err) + } + return c +} + +// MakeTransformerConfig returns a merger of custom config, +// if any, with default config. +func MakeTransformerConfig( + ldr ifc.Loader, paths []string) (*TransformerConfig, error) { + t1 := MakeDefaultConfig() + if len(paths) == 0 { + return t1, nil + } + t2, err := loadDefaultConfig(ldr, paths) + if err != nil { + return nil, err + } + return t1.Merge(t2) +} + +// sortFields provides determinism in logging, tests, etc. +func (t *TransformerConfig) sortFields() { + sort.Sort(t.NamePrefix) + sort.Sort(t.NameSuffix) + sort.Sort(t.NameSpace) + sort.Sort(t.CommonLabels) + sort.Sort(t.TemplateLabels) + sort.Sort(t.CommonAnnotations) + sort.Sort(t.NameReference) + sort.Sort(t.VarReference) + sort.Sort(t.Images) + sort.Sort(t.Replicas) +} + +// AddPrefixFieldSpec adds a FieldSpec to NamePrefix +func (t *TransformerConfig) AddPrefixFieldSpec(fs types.FieldSpec) (err error) { + t.NamePrefix, err = t.NamePrefix.MergeOne(fs) + return err +} + +// AddSuffixFieldSpec adds a FieldSpec to NameSuffix +func (t *TransformerConfig) AddSuffixFieldSpec(fs types.FieldSpec) (err error) { + t.NameSuffix, err = t.NameSuffix.MergeOne(fs) + return err +} + +// AddLabelFieldSpec adds a FieldSpec to CommonLabels +func (t *TransformerConfig) AddLabelFieldSpec(fs types.FieldSpec) (err error) { + t.CommonLabels, err = t.CommonLabels.MergeOne(fs) + return err +} + +// AddAnnotationFieldSpec adds a FieldSpec to CommonAnnotations +func (t *TransformerConfig) AddAnnotationFieldSpec(fs types.FieldSpec) (err error) { + t.CommonAnnotations, err = t.CommonAnnotations.MergeOne(fs) + return err +} + +// AddNamereferenceFieldSpec adds a NameBackReferences to NameReference +func (t *TransformerConfig) AddNamereferenceFieldSpec( + nbrs NameBackReferences) (err error) { + t.NameReference, err = t.NameReference.mergeOne(nbrs) + return err +} + +// Merge merges two TransformerConfigs objects into +// a new TransformerConfig object +func (t *TransformerConfig) Merge(input *TransformerConfig) ( + merged *TransformerConfig, err error) { + if input == nil { + return t, nil + } + merged = &TransformerConfig{} + merged.NamePrefix, err = t.NamePrefix.MergeAll(input.NamePrefix) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge NamePrefix fieldSpec") + } + merged.NameSuffix, err = t.NameSuffix.MergeAll(input.NameSuffix) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge NameSuffix fieldSpec") + } + merged.NameSpace, err = t.NameSpace.MergeAll(input.NameSpace) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge NameSpace fieldSpec") + } + merged.CommonAnnotations, err = t.CommonAnnotations.MergeAll( + input.CommonAnnotations) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge CommonAnnotations fieldSpec") + } + merged.CommonLabels, err = t.CommonLabels.MergeAll(input.CommonLabels) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge CommonLabels fieldSpec") + } + merged.TemplateLabels, err = t.TemplateLabels.MergeAll(input.TemplateLabels) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge TemplateLabels fieldSpec") + } + merged.VarReference, err = t.VarReference.MergeAll(input.VarReference) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge VarReference fieldSpec") + } + merged.NameReference, err = t.NameReference.mergeAll(input.NameReference) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge NameReference fieldSpec") + } + merged.Images, err = t.Images.MergeAll(input.Images) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge Images fieldSpec") + } + merged.Replicas, err = t.Replicas.MergeAll(input.Replicas) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge Replicas fieldSpec") + } + merged.sortFields() + return merged, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtinplugintype_string.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtinplugintype_string.go new file mode 100644 index 000000000..ddcfcc190 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtinplugintype_string.go @@ -0,0 +1,41 @@ +// Code generated by "stringer -type=BuiltinPluginType"; DO NOT EDIT. + +package builtinhelpers + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Unknown-0] + _ = x[AnnotationsTransformer-1] + _ = x[ConfigMapGenerator-2] + _ = x[IAMPolicyGenerator-3] + _ = x[HashTransformer-4] + _ = x[ImageTagTransformer-5] + _ = x[LabelTransformer-6] + _ = x[NamespaceTransformer-7] + _ = x[PatchJson6902Transformer-8] + _ = x[PatchStrategicMergeTransformer-9] + _ = x[PatchTransformer-10] + _ = x[PrefixSuffixTransformer-11] + _ = x[PrefixTransformer-12] + _ = x[SuffixTransformer-13] + _ = x[ReplicaCountTransformer-14] + _ = x[SecretGenerator-15] + _ = x[ValueAddTransformer-16] + _ = x[HelmChartInflationGenerator-17] + _ = x[ReplacementTransformer-18] +} + +const _BuiltinPluginType_name = "UnknownAnnotationsTransformerConfigMapGeneratorIAMPolicyGeneratorHashTransformerImageTagTransformerLabelTransformerNamespaceTransformerPatchJson6902TransformerPatchStrategicMergeTransformerPatchTransformerPrefixSuffixTransformerPrefixTransformerSuffixTransformerReplicaCountTransformerSecretGeneratorValueAddTransformerHelmChartInflationGeneratorReplacementTransformer" + +var _BuiltinPluginType_index = [...]uint16{0, 7, 29, 47, 65, 80, 99, 115, 135, 159, 189, 205, 228, 245, 262, 285, 300, 319, 346, 368} + +func (i BuiltinPluginType) String() string { + if i < 0 || i >= BuiltinPluginType(len(_BuiltinPluginType_index)-1) { + return "BuiltinPluginType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BuiltinPluginType_name[_BuiltinPluginType_index[i]:_BuiltinPluginType_index[i+1]] +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtins.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtins.go new file mode 100644 index 000000000..a98a23d39 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtins.go @@ -0,0 +1,115 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinhelpers + +import ( + "sigs.k8s.io/kustomize/api/internal/builtins" + "sigs.k8s.io/kustomize/api/resmap" +) + +//go:generate stringer -type=BuiltinPluginType +type BuiltinPluginType int + +const ( + Unknown BuiltinPluginType = iota + AnnotationsTransformer + ConfigMapGenerator + IAMPolicyGenerator + HashTransformer + ImageTagTransformer + LabelTransformer + NamespaceTransformer + PatchJson6902Transformer + PatchStrategicMergeTransformer + PatchTransformer + PrefixSuffixTransformer + PrefixTransformer + SuffixTransformer + ReplicaCountTransformer + SecretGenerator + ValueAddTransformer + HelmChartInflationGenerator + ReplacementTransformer +) + +var stringToBuiltinPluginTypeMap map[string]BuiltinPluginType + +func init() { //nolint:gochecknoinits + stringToBuiltinPluginTypeMap = makeStringToBuiltinPluginTypeMap() +} + +func makeStringToBuiltinPluginTypeMap() (result map[string]BuiltinPluginType) { + result = make(map[string]BuiltinPluginType, 23) + for k := range GeneratorFactories { + result[k.String()] = k + } + for k := range TransformerFactories { + result[k.String()] = k + } + return +} + +func GetBuiltinPluginType(n string) BuiltinPluginType { + result, ok := stringToBuiltinPluginTypeMap[n] + if ok { + return result + } + return Unknown +} + +var GeneratorFactories = map[BuiltinPluginType]func() resmap.GeneratorPlugin{ + ConfigMapGenerator: builtins.NewConfigMapGeneratorPlugin, + IAMPolicyGenerator: builtins.NewIAMPolicyGeneratorPlugin, + SecretGenerator: builtins.NewSecretGeneratorPlugin, + HelmChartInflationGenerator: builtins.NewHelmChartInflationGeneratorPlugin, +} + +type MultiTransformer struct { + transformers []resmap.TransformerPlugin +} + +func (t *MultiTransformer) Transform(m resmap.ResMap) error { + for _, transformer := range t.transformers { + if err := transformer.Transform(m); err != nil { + return err + } + } + return nil +} + +func (t *MultiTransformer) Config(h *resmap.PluginHelpers, b []byte) error { + for _, transformer := range t.transformers { + if err := transformer.Config(h, b); err != nil { + return err + } + } + return nil +} + +func NewMultiTransformer() resmap.TransformerPlugin { + return &MultiTransformer{[]resmap.TransformerPlugin{ + builtins.NewPrefixTransformerPlugin(), + builtins.NewSuffixTransformerPlugin(), + }} +} + +var TransformerFactories = map[BuiltinPluginType]func() resmap.TransformerPlugin{ + AnnotationsTransformer: builtins.NewAnnotationsTransformerPlugin, + HashTransformer: builtins.NewHashTransformerPlugin, + ImageTagTransformer: builtins.NewImageTagTransformerPlugin, + LabelTransformer: builtins.NewLabelTransformerPlugin, + NamespaceTransformer: builtins.NewNamespaceTransformerPlugin, + PatchJson6902Transformer: builtins.NewPatchJson6902TransformerPlugin, + PatchStrategicMergeTransformer: builtins.NewPatchStrategicMergeTransformerPlugin, + PatchTransformer: builtins.NewPatchTransformerPlugin, + PrefixSuffixTransformer: NewMultiTransformer, + PrefixTransformer: builtins.NewPrefixTransformerPlugin, + SuffixTransformer: builtins.NewSuffixTransformerPlugin, + ReplacementTransformer: builtins.NewReplacementTransformerPlugin, + ReplicaCountTransformer: builtins.NewReplicaCountTransformerPlugin, + ValueAddTransformer: builtins.NewValueAddTransformerPlugin, + // Do not wired SortOrderTransformer as a builtin plugin. + // We only want it to be available in the top-level kustomization. + // See: https://github.com/kubernetes-sigs/kustomize/issues/3913 +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go new file mode 100644 index 000000000..001731f78 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go @@ -0,0 +1,191 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package execplugin + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + + "github.com/google/shlex" + + "sigs.k8s.io/kustomize/api/internal/plugins/utils" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/yaml" +) + +const ( + tmpConfigFilePrefix = "kust-plugin-config-" +) + +// ExecPlugin record the name and args of an executable +// It triggers the executable generator and transformer +type ExecPlugin struct { + // absolute path of the executable + path string + + // Optional command line arguments to the executable + // pulled from specially named fields in cfg. + // This is for executables that don't want to parse YAML. + args []string + + // Plugin configuration data. + cfg []byte + + // PluginHelpers + h *resmap.PluginHelpers +} + +func NewExecPlugin(p string) *ExecPlugin { + return &ExecPlugin{path: p} +} + +func (p *ExecPlugin) ErrIfNotExecutable() error { + f, err := os.Stat(p.path) + if err != nil { + return err + } + // In Windows, it is not possible to determine whether a + // file is executable through file mode. + // TODO: provide for setting the executable FileMode bit on Windows + // The (fs *fileStat) Mode() (m FileMode) {} function in + // https://golang.org/src/os/types_windows.go + // lacks the ability to set the FileMode executable bit in response + // to file data on Windows. + if f.Mode()&0111 == 0000 && runtime.GOOS != "windows" { + return fmt.Errorf("unexecutable plugin at: %s", p.path) + } + return nil +} + +func (p *ExecPlugin) Path() string { + return p.path +} + +func (p *ExecPlugin) Args() []string { + return p.args +} + +func (p *ExecPlugin) Cfg() []byte { + return p.cfg +} + +func (p *ExecPlugin) Config(h *resmap.PluginHelpers, config []byte) error { + p.h = h + p.cfg = config + return p.processOptionalArgsFields() +} + +type argsConfig struct { + ArgsOneLiner string `json:"argsOneLiner,omitempty" yaml:"argsOneLiner,omitempty"` + ArgsFromFile string `json:"argsFromFile,omitempty" yaml:"argsFromFile,omitempty"` +} + +func (p *ExecPlugin) processOptionalArgsFields() error { + var c argsConfig + err := yaml.Unmarshal(p.cfg, &c) + if err != nil { + return err + } + if c.ArgsOneLiner != "" { + p.args, _ = shlex.Split(c.ArgsOneLiner) + } + if c.ArgsFromFile != "" { + content, err := p.h.Loader().Load(c.ArgsFromFile) + if err != nil { + return err + } + for _, x := range strings.Split(string(content), "\n") { + x := strings.TrimLeft(x, " ") + if x != "" { + p.args = append(p.args, x) + } + } + } + return nil +} + +func (p *ExecPlugin) Generate() (resmap.ResMap, error) { + output, err := p.invokePlugin(nil) + if err != nil { + return nil, err + } + rm, err := p.h.ResmapFactory().NewResMapFromBytes(output) + if err != nil { + return nil, err + } + return utils.UpdateResourceOptions(rm) +} + +func (p *ExecPlugin) Transform(rm resmap.ResMap) error { + // add ResIds as annotations to all objects so that we can add them back + inputRM, err := utils.GetResMapWithIDAnnotation(rm) + if err != nil { + return err + } + + // encode the ResMap so it can be fed to the plugin + resources, err := inputRM.AsYaml() + if err != nil { + return err + } + + // invoke the plugin with resources as the input + output, err := p.invokePlugin(resources) + if err != nil { + return fmt.Errorf("%v %s", err, string(output)) + } + + // update the original ResMap based on the output + return utils.UpdateResMapValues(p.path, p.h, output, rm) +} + +// invokePlugin writes plugin config to a temp file, then +// passes the full temp file path as the first arg to a process +// running the plugin binary. Process output is returned. +func (p *ExecPlugin) invokePlugin(input []byte) ([]byte, error) { + f, err := os.CreateTemp("", tmpConfigFilePrefix) + if err != nil { + return nil, errors.WrapPrefixf( + err, "creating tmp plugin config file") + } + _, err = f.Write(p.cfg) + if err != nil { + return nil, errors.WrapPrefixf( + err, "writing plugin config to "+f.Name()) + } + err = f.Close() + if err != nil { + return nil, errors.WrapPrefixf( + err, "closing plugin config file "+f.Name()) + } + //nolint:gosec + cmd := exec.Command( + p.path, append([]string{f.Name()}, p.args...)...) + cmd.Env = p.getEnv() + cmd.Stdin = bytes.NewReader(input) + cmd.Stderr = os.Stderr + if _, err := os.Stat(p.h.Loader().Root()); err == nil { + cmd.Dir = p.h.Loader().Root() + } + result, err := cmd.Output() + if err != nil { + return nil, errors.WrapPrefixf( + err, "failure in plugin configured via %s; %v", + f.Name(), err.Error()) + } + return result, os.Remove(f.Name()) +} + +func (p *ExecPlugin) getEnv() []string { + env := os.Environ() + env = append(env, + "KUSTOMIZE_PLUGIN_CONFIG_STRING="+string(p.cfg), + "KUSTOMIZE_PLUGIN_CONFIG_ROOT="+p.h.Loader().Root()) + return env +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/fnplugin/fnplugin.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/fnplugin/fnplugin.go new file mode 100644 index 000000000..3a033d727 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/fnplugin/fnplugin.go @@ -0,0 +1,202 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package fnplugin + +import ( + "bytes" + "fmt" + + "sigs.k8s.io/kustomize/kyaml/errors" + + "sigs.k8s.io/kustomize/api/internal/plugins/utils" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" + "sigs.k8s.io/kustomize/kyaml/runfn" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// FnPlugin is the struct to hold function information +type FnPlugin struct { + // Function runner + runFns runfn.RunFns + + // Plugin configuration data. + cfg []byte + + // Plugin name cache for error output + pluginName string + + // PluginHelpers + h *resmap.PluginHelpers +} + +func bytesToRNode(yml []byte) (*yaml.RNode, error) { + rnode, err := yaml.Parse(string(yml)) + if err != nil { + return nil, err + } + return rnode, nil +} + +func resourceToRNode(res *resource.Resource) (*yaml.RNode, error) { + yml, err := res.AsYAML() + if err != nil { + return nil, err + } + + return bytesToRNode(yml) +} + +// GetFunctionSpec return function spec is there is. Otherwise return nil +func GetFunctionSpec(res *resource.Resource) (*runtimeutil.FunctionSpec, error) { + rnode, err := resourceToRNode(res) + if err != nil { + return nil, fmt.Errorf("could not convert resource to RNode: %w", err) + } + functionSpec, err := runtimeutil.GetFunctionSpec(rnode) + if err != nil { + return nil, fmt.Errorf("failed to get FunctionSpec: %w", err) + } + return functionSpec, nil +} + +func toStorageMounts(mounts []string) []runtimeutil.StorageMount { + var sms []runtimeutil.StorageMount + for _, mount := range mounts { + sms = append(sms, runtimeutil.StringToStorageMount(mount)) + } + return sms +} + +// NewFnPlugin creates a FnPlugin struct +func NewFnPlugin(o *types.FnPluginLoadingOptions) *FnPlugin { + return &FnPlugin{ + runFns: runfn.RunFns{ + Functions: []*yaml.RNode{}, + Network: o.Network, + EnableStarlark: o.EnableStar, + EnableExec: o.EnableExec, + StorageMounts: toStorageMounts(o.Mounts), + Env: o.Env, + AsCurrentUser: o.AsCurrentUser, + WorkingDir: o.WorkingDir, + }, + } +} + +// Cfg returns function config +func (p *FnPlugin) Cfg() []byte { + return p.cfg +} + +// Config is called by kustomize to pass-in config information +func (p *FnPlugin) Config(h *resmap.PluginHelpers, config []byte) error { + p.h = h + p.cfg = config + + fn, err := bytesToRNode(p.cfg) + if err != nil { + return err + } + + meta, err := fn.GetMeta() + if err != nil { + return err + } + + p.pluginName = fmt.Sprintf("api: %s, kind: %s, name: %s", + meta.APIVersion, meta.Kind, meta.Name) + + return nil +} + +// Generate is called when run as generator +func (p *FnPlugin) Generate() (resmap.ResMap, error) { + output, err := p.invokePlugin(nil) + if err != nil { + return nil, err + } + rm, err := p.h.ResmapFactory().NewResMapFromBytes(output) + if err != nil { + return nil, err + } + return utils.UpdateResourceOptions(rm) +} + +// Transform is called when run as transformer +func (p *FnPlugin) Transform(rm resmap.ResMap) error { + // add ResIds as annotations to all objects so that we can add them back + inputRM, err := utils.GetResMapWithIDAnnotation(rm) + if err != nil { + return err + } + + // encode the ResMap so it can be fed to the plugin + resources, err := inputRM.AsYaml() + if err != nil { + return err + } + + // invoke the plugin with resources as the input + output, err := p.invokePlugin(resources) + if err != nil { + return fmt.Errorf("%v %s", err, string(output)) + } + + // update the original ResMap based on the output + return utils.UpdateResMapValues(p.pluginName, p.h, output, rm) +} + +func injectAnnotation(input *yaml.RNode, k, v string) error { + err := input.PipeE(yaml.SetAnnotation(k, v)) + if err != nil { + return err + } + return nil +} + +// invokePlugin uses Function runner to run function as plugin +func (p *FnPlugin) invokePlugin(input []byte) ([]byte, error) { + // get function config rnode + functionConfig, err := bytesToRNode(p.cfg) + if err != nil { + return nil, err + } + + // This annotation will let kustomize ingnore this item in output + err = injectAnnotation(functionConfig, "config.kubernetes.io/local-config", "true") + if err != nil { + return nil, err + } + // we need to add config as input for generators. Some of them don't work with FunctionConfig + // and in addition kio.Pipeline won't create anything if there are no objects + // see https://github.com/kubernetes-sigs/kustomize/blob/master/kyaml/kio/kio.go#L93 + // Since we added `local-config` annotation so it will be ignored in generator output + // TODO(donnyxia): This is actually not used by generator and only used to bypass a kio limitation. + // Need better solution. + if input == nil { + yml, err := functionConfig.String() + if err != nil { + return nil, err + } + input = []byte(yml) + } + + // Configure and Execute Fn. We don't need to convert resources to ResourceList here + // because function runtime will do that. See kyaml/fn/runtime/runtimeutil/runtimeutil.go + var ouputBuffer bytes.Buffer + p.runFns.Input = bytes.NewReader(input) + p.runFns.Functions = append(p.runFns.Functions, functionConfig) + p.runFns.Output = &ouputBuffer + + err = p.runFns.Execute() + if err != nil { + return nil, errors.WrapPrefixf( + err, "couldn't execute function") + } + + return ouputBuffer.Bytes(), nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go new file mode 100644 index 000000000..1758e5cf6 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go @@ -0,0 +1,332 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package loader + +import ( + "fmt" + "log" + "os" + "path/filepath" + "plugin" + "reflect" + "strings" + + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" + "sigs.k8s.io/kustomize/api/internal/plugins/execplugin" + "sigs.k8s.io/kustomize/api/internal/plugins/fnplugin" + "sigs.k8s.io/kustomize/api/internal/plugins/utils" + "sigs.k8s.io/kustomize/api/konfig" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/kustomize/kyaml/resid" +) + +// Loader loads plugins using a file loader (a different loader). +type Loader struct { + pc *types.PluginConfig + rf *resmap.Factory + fs filesys.FileSystem + + // absolutePluginHome caches the location of a valid plugin root directory. + // It should only be set once the directory's existence has been confirmed. + absolutePluginHome string +} + +func NewLoader( + pc *types.PluginConfig, rf *resmap.Factory, fs filesys.FileSystem) *Loader { + return &Loader{pc: pc, rf: rf, fs: fs} +} + +// LoaderWithWorkingDir returns loader after setting its working directory. +// NOTE: This is not really a new loader since some of the Loader struct fields are pointers. +func (l *Loader) LoaderWithWorkingDir(wd string) *Loader { + lpc := &types.PluginConfig{ + PluginRestrictions: l.pc.PluginRestrictions, + BpLoadingOptions: l.pc.BpLoadingOptions, + FnpLoadingOptions: l.pc.FnpLoadingOptions, + HelmConfig: l.pc.HelmConfig, + } + lpc.FnpLoadingOptions.WorkingDir = wd + return &Loader{pc: lpc, rf: l.rf, fs: l.fs} +} + +// Config provides the global (not plugin specific) PluginConfig data. +func (l *Loader) Config() *types.PluginConfig { + return l.pc +} + +func (l *Loader) LoadGenerators( + ldr ifc.Loader, v ifc.Validator, rm resmap.ResMap) ( + result []*resmap.GeneratorWithProperties, err error) { + for _, res := range rm.Resources() { + g, err := l.LoadGenerator(ldr, v, res) + if err != nil { + return nil, fmt.Errorf("failed to load generator: %w", err) + } + generatorOrigin, err := resource.OriginFromCustomPlugin(res) + if err != nil { + return nil, fmt.Errorf("failed to get origin from CustomPlugin: %w", err) + } + result = append(result, &resmap.GeneratorWithProperties{Generator: g, Origin: generatorOrigin}) + } + return result, nil +} + +func (l *Loader) LoadGenerator( + ldr ifc.Loader, v ifc.Validator, res *resource.Resource) (resmap.Generator, error) { + c, err := l.loadAndConfigurePlugin(ldr, v, res) + if err != nil { + return nil, err + } + g, ok := c.(resmap.Generator) + if !ok { + return nil, fmt.Errorf("plugin %s not a generator", res.OrgId()) + } + return g, nil +} + +func (l *Loader) LoadTransformers( + ldr ifc.Loader, v ifc.Validator, rm resmap.ResMap) ([]*resmap.TransformerWithProperties, error) { + var result []*resmap.TransformerWithProperties + for _, res := range rm.Resources() { + t, err := l.LoadTransformer(ldr, v, res) + if err != nil { + return nil, err + } + transformerOrigin, err := resource.OriginFromCustomPlugin(res) + if err != nil { + return nil, err + } + result = append(result, &resmap.TransformerWithProperties{Transformer: t, Origin: transformerOrigin}) + } + return result, nil +} + +func (l *Loader) LoadTransformer( + ldr ifc.Loader, v ifc.Validator, res *resource.Resource) (*resmap.TransformerWithProperties, error) { + c, err := l.loadAndConfigurePlugin(ldr, v, res) + if err != nil { + return nil, err + } + t, ok := c.(resmap.Transformer) + if !ok { + return nil, fmt.Errorf("plugin %s not a transformer", res.OrgId()) + } + return &resmap.TransformerWithProperties{Transformer: t}, nil +} + +func relativePluginPath(id resid.ResId) string { + return filepath.Join( + id.Group, + id.Version, + strings.ToLower(id.Kind)) +} + +func (l *Loader) AbsolutePluginPath(id resid.ResId) (string, error) { + pluginHome, err := l.absPluginHome() + if err != nil { + return "", err + } + return filepath.Join(pluginHome, relativePluginPath(id), id.Kind), nil +} + +// absPluginHome is the home of kustomize Exec and Go plugins. +// Kustomize plugin configuration files are k8s-style objects +// containing the fields 'apiVersion' and 'kind', e.g. +// +// apiVersion: apps/v1 +// kind: Deployment +// +// kustomize reads plugin configuration data from a file path +// specified in the 'generators:' or 'transformers:' field of a +// kustomization file. For Exec and Go plugins, kustomize +// uses this data to both locate the plugin and configure it. +// Each Exec or Go plugin (its code, its tests, its supporting data +// files, etc.) must be housed in its own directory at +// +// ${absPluginHome}/${pluginApiVersion}/LOWERCASE(${pluginKind}) +// +// where +// - ${absPluginHome} is an absolute path, defined below. +// - ${pluginApiVersion} is taken from the plugin config file. +// - ${pluginKind} is taken from the plugin config file. +func (l *Loader) absPluginHome() (string, error) { + // External plugins are disabled--return the dummy plugin root. + if l.pc.PluginRestrictions != types.PluginRestrictionsNone { + return konfig.NoPluginHomeSentinal, nil + } + // We've already determined plugin home--use the cached value. + if l.absolutePluginHome != "" { + return l.absolutePluginHome, nil + } + + // Check default locations for a valid plugin root, and cache it if found. + dir, err := konfig.DefaultAbsPluginHome(l.fs) + if err != nil { + return "", err + } + l.absolutePluginHome = dir + return l.absolutePluginHome, nil +} + +func isBuiltinPlugin(res *resource.Resource) bool { + // TODO: the special string should appear in Group, not Version. + return res.GetGvk().Group == "" && + res.GetGvk().Version == konfig.BuiltinPluginApiVersion +} + +func (l *Loader) loadAndConfigurePlugin( + ldr ifc.Loader, + v ifc.Validator, + res *resource.Resource) (c resmap.Configurable, err error) { + if isBuiltinPlugin(res) { + switch l.pc.BpLoadingOptions { + case types.BploLoadFromFileSys: + c, err = l.loadPlugin(res) + case types.BploUseStaticallyLinked: + // Instead of looking for and loading a .so file, + // instantiate the plugin from a generated factory + // function (see "pluginator"). Being able to do this + // is what makes a plugin "builtin". + c, err = l.makeBuiltinPlugin(res.GetGvk()) + default: + err = fmt.Errorf( + "unknown plugin loader behavior specified: %v", + l.pc.BpLoadingOptions) + } + } else { + switch l.pc.PluginRestrictions { + case types.PluginRestrictionsNone: + c, err = l.loadPlugin(res) + case types.PluginRestrictionsBuiltinsOnly: + err = types.NewErrOnlyBuiltinPluginsAllowed(res.OrgId().Kind) + default: + err = fmt.Errorf( + "unknown plugin restriction specified: %v", + l.pc.PluginRestrictions) + } + } + if err != nil { + return nil, err + } + yaml, err := res.AsYAML() + if err != nil { + return nil, errors.WrapPrefixf(err, "marshalling yaml from res %s", res.OrgId()) + } + err = c.Config(resmap.NewPluginHelpers(ldr, v, l.rf, l.pc), yaml) + if err != nil { + return nil, errors.WrapPrefixf( + err, "plugin %s fails configuration", res.OrgId()) + } + return c, nil +} + +func (l *Loader) makeBuiltinPlugin(r resid.Gvk) (resmap.Configurable, error) { + bpt := builtinhelpers.GetBuiltinPluginType(r.Kind) + if f, ok := builtinhelpers.GeneratorFactories[bpt]; ok { + return f(), nil + } + if f, ok := builtinhelpers.TransformerFactories[bpt]; ok { + return f(), nil + } + return nil, errors.Errorf("unable to load builtin %s", r) +} + +func (l *Loader) loadPlugin(res *resource.Resource) (resmap.Configurable, error) { + spec, err := fnplugin.GetFunctionSpec(res) + if err != nil { + return nil, fmt.Errorf("loader: %w", err) + } + if spec != nil { + // validation check that function mounts are under the current kustomization directory + for _, mount := range spec.Container.StorageMounts { + if filepath.IsAbs(mount.Src) { + return nil, errors.Errorf("plugin %s with mount path '%s' is not permitted; "+ + "mount paths must be relative to the current kustomization directory", res.OrgId(), mount.Src) + } + if strings.HasPrefix(filepath.Clean(mount.Src), "../") { + return nil, errors.Errorf("plugin %s with mount path '%s' is not permitted; "+ + "mount paths must be under the current kustomization directory", res.OrgId(), mount.Src) + } + } + return fnplugin.NewFnPlugin(&l.pc.FnpLoadingOptions), nil + } + return l.loadExecOrGoPlugin(res.OrgId()) +} + +func (l *Loader) loadExecOrGoPlugin(resId resid.ResId) (resmap.Configurable, error) { + absPluginPath, err := l.AbsolutePluginPath(resId) + if err != nil { + return nil, err + } + // First try to load the plugin as an executable. + p := execplugin.NewExecPlugin(absPluginPath) + if err = p.ErrIfNotExecutable(); err == nil { + return p, nil + } + if !os.IsNotExist(err) { + // The file exists, but something else is wrong, + // likely it's not executable. + // Assume the user forgot to set the exec bit, + // and return an error, rather than adding ".so" + // to the name and attempting to load it as a Go + // plugin, which will likely fail and result + // in an obscure message. + return nil, err + } + // Failing the above, try loading it as a Go plugin. + c, err := l.loadGoPlugin(resId, absPluginPath+".so") + if err != nil { + return nil, err + } + return c, nil +} + +// registry is a means to avoid trying to load the same .so file +// into memory more than once, which results in an error. +// Each test makes its own loader, and tries to load its own plugins, +// but the loaded .so files are in shared memory, so one will get +// "this plugin already loaded" errors if the registry is maintained +// as a Loader instance variable. So make it a package variable. +var registry = make(map[string]resmap.Configurable) + +func (l *Loader) loadGoPlugin(id resid.ResId, absPath string) (resmap.Configurable, error) { + regId := relativePluginPath(id) + if c, ok := registry[regId]; ok { + return copyPlugin(c), nil + } + if !utils.FileExists(absPath) { + return nil, fmt.Errorf( + "expected file with Go object code at: %s", absPath) + } + log.Printf("Attempting plugin load from '%s'", absPath) + p, err := plugin.Open(absPath) + if err != nil { + return nil, errors.WrapPrefixf(err, "plugin %s fails to load", absPath) + } + symbol, err := p.Lookup(konfig.PluginSymbol) + if err != nil { + return nil, errors.WrapPrefixf( + err, "plugin %s doesn't have symbol %s", + regId, konfig.PluginSymbol) + } + c, ok := symbol.(resmap.Configurable) + if !ok { + return nil, fmt.Errorf("plugin '%s' not configurable", regId) + } + registry[regId] = c + return copyPlugin(c), nil +} + +func copyPlugin(c resmap.Configurable) resmap.Configurable { + indirect := reflect.Indirect(reflect.ValueOf(c)) + newIndirect := reflect.New(indirect.Type()) + newIndirect.Elem().Set(reflect.ValueOf(indirect.Interface())) + newNamed := newIndirect.Interface() + return newNamed.(resmap.Configurable) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/plugins/utils/utils.go b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/utils/utils.go new file mode 100644 index 000000000..8182f203e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/plugins/utils/utils.go @@ -0,0 +1,240 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "time" + + "sigs.k8s.io/kustomize/api/konfig" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/yaml" +) + +const ( + idAnnotation = "kustomize.config.k8s.io/id" + HashAnnotation = "kustomize.config.k8s.io/needs-hash" + BehaviorAnnotation = "kustomize.config.k8s.io/behavior" +) + +func GoBin() string { + return filepath.Join(runtime.GOROOT(), "bin", "go") +} + +// DeterminePluginSrcRoot guesses where the user +// has her ${g}/${v}/$lower(${k})/${k}.go files. +func DeterminePluginSrcRoot(fSys filesys.FileSystem) (string, error) { + return konfig.FirstDirThatExistsElseError( + "plugin src root", fSys, []konfig.NotedFunc{ + { + Note: "relative to unit test", + F: func() string { + return filepath.Clean( + filepath.Join( + os.Getenv("PWD"), + "..", "..", + konfig.RelPluginHome)) + }, + }, + { + Note: "relative to unit test (internal pkg)", + F: func() string { + return filepath.Clean( + filepath.Join( + os.Getenv("PWD"), + "..", "..", "..", "..", + konfig.RelPluginHome)) + }, + }, + { + Note: "relative to api package", + F: func() string { + return filepath.Clean( + filepath.Join( + os.Getenv("PWD"), + "..", "..", "..", + konfig.RelPluginHome)) + }, + }, + { + Note: "old style $GOPATH", + F: func() string { + return filepath.Join( + os.Getenv("GOPATH"), + "src", konfig.DomainName, + konfig.ProgramName, konfig.RelPluginHome) + }, + }, + { + Note: "HOME with literal 'gopath'", + F: func() string { + return filepath.Join( + konfig.HomeDir(), "gopath", + "src", konfig.DomainName, + konfig.ProgramName, konfig.RelPluginHome) + }, + }, + { + Note: "home directory", + F: func() string { + return filepath.Join( + konfig.HomeDir(), konfig.DomainName, + konfig.ProgramName, konfig.RelPluginHome) + }, + }, + }) +} + +// FileYoungerThan returns true if the file both exists and has an +// age is <= the Duration argument. +func FileYoungerThan(path string, d time.Duration) bool { + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false + } + } + return time.Since(fi.ModTime()) <= d +} + +// FileModifiedAfter returns true if the file both exists and was +// modified after the given time.. +func FileModifiedAfter(path string, t time.Time) bool { + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false + } + } + return fi.ModTime().After(t) +} + +func FileExists(path string) bool { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// GetResMapWithIDAnnotation returns a new copy of the given ResMap with the ResIds annotated in each Resource +func GetResMapWithIDAnnotation(rm resmap.ResMap) (resmap.ResMap, error) { + inputRM := rm.DeepCopy() + for _, r := range inputRM.Resources() { + idString, err := yaml.Marshal(r.CurId()) + if err != nil { + return nil, err + } + annotations := r.GetAnnotations() + annotations[idAnnotation] = string(idString) + if err = r.SetAnnotations(annotations); err != nil { + return nil, err + } + } + return inputRM, nil +} + +// UpdateResMapValues updates the Resource value in the given ResMap +// with the emitted Resource values in output. +func UpdateResMapValues(pluginName string, h *resmap.PluginHelpers, output []byte, rm resmap.ResMap) error { + mapFactory := h.ResmapFactory() + resFactory := mapFactory.RF() + resources, err := resFactory.SliceFromBytes(output) + if err != nil { + return err + } + // Don't use resources here, or error message will be unfriendly to plugin builders + newMap, err := mapFactory.NewResMapFromBytes([]byte{}) + if err != nil { + return err + } + + for _, r := range resources { + // stale--not manipulated by plugin transformers + if err = removeIDAnnotation(r); err != nil { + return err + } + + // Add to the new map, checking for duplicates + if err := newMap.Append(r); err != nil { + prettyID, err := json.Marshal(r.CurId()) + if err != nil { + prettyID = []byte(r.CurId().String()) + } + return fmt.Errorf("plugin %s generated duplicate resource: %s", pluginName, prettyID) + } + + // Add to or update the old map + oldIdx, err := rm.GetIndexOfCurrentId(r.CurId()) + if err != nil { + return err + } + if oldIdx != -1 { + rm.GetByIndex(oldIdx).ResetRNode(r) + } else { + if err := rm.Append(r); err != nil { + return err + } + } + } + + // Remove items the transformer deleted from the old map + for _, id := range rm.AllIds() { + newIdx, _ := newMap.GetIndexOfCurrentId(id) + if newIdx == -1 { + if err = rm.Remove(id); err != nil { + return err + } + } + } + + return nil +} + +func removeIDAnnotation(r *resource.Resource) error { + // remove the annotation set by Kustomize to track the resource + annotations := r.GetAnnotations() + delete(annotations, idAnnotation) + return r.SetAnnotations(annotations) +} + +// UpdateResourceOptions updates the generator options for each resource in the +// given ResMap based on plugin provided annotations. +func UpdateResourceOptions(rm resmap.ResMap) (resmap.ResMap, error) { + for _, r := range rm.Resources() { + // Disable name hashing by default and require plugin to explicitly + // request it for each resource. + annotations := r.GetAnnotations() + behavior := annotations[BehaviorAnnotation] + var needsHash bool + if val, ok := annotations[HashAnnotation]; ok { + b, err := strconv.ParseBool(val) + if err != nil { + return nil, fmt.Errorf( + "the annotation %q contains an invalid value (%q)", + HashAnnotation, val) + } + needsHash = b + } + delete(annotations, HashAnnotation) + delete(annotations, BehaviorAnnotation) + if err := r.SetAnnotations(annotations); err != nil { + return nil, err + } + if needsHash { + r.EnableHashSuffix() + } + r.SetBehavior(types.NewGenerationBehavior(behavior)) + } + return rm, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/target/errmissingkustomization.go b/vendor/sigs.k8s.io/kustomize/api/internal/target/errmissingkustomization.go new file mode 100644 index 000000000..8b939db1e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/target/errmissingkustomization.go @@ -0,0 +1,44 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package target + +import ( + "fmt" + "strings" + + "sigs.k8s.io/kustomize/api/konfig" + "sigs.k8s.io/kustomize/kyaml/errors" +) + +type errMissingKustomization struct { + path string +} + +func (e *errMissingKustomization) Error() string { + return fmt.Sprintf( + "unable to find one of %v in directory '%s'", + commaOr(quoted(konfig.RecognizedKustomizationFileNames())), + e.path) +} + +func IsMissingKustomizationFileError(err error) bool { + e := &errMissingKustomization{} + return errors.As(err, &e) +} + +func NewErrMissingKustomization(p string) *errMissingKustomization { + return &errMissingKustomization{path: p} +} + +func quoted(l []string) []string { + r := make([]string, len(l)) + for i, v := range l { + r[i] = "'" + v + "'" + } + return r +} + +func commaOr(q []string) string { + return strings.Join(q[:len(q)-1], ", ") + " or " + q[len(q)-1] +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go new file mode 100644 index 000000000..6e06a14ba --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go @@ -0,0 +1,574 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package target + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/accumulator" + "sigs.k8s.io/kustomize/api/internal/builtins" + "sigs.k8s.io/kustomize/api/internal/kusterr" + "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" + "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" + "sigs.k8s.io/kustomize/api/internal/plugins/loader" + "sigs.k8s.io/kustomize/api/internal/utils" + "sigs.k8s.io/kustomize/api/konfig" + load "sigs.k8s.io/kustomize/api/loader" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/yaml" +) + +// KustTarget encapsulates the entirety of a kustomization build. +type KustTarget struct { + kustomization *types.Kustomization + kustFileName string + ldr ifc.Loader + validator ifc.Validator + rFactory *resmap.Factory + pLdr *loader.Loader + origin *resource.Origin +} + +// NewKustTarget returns a new instance of KustTarget. +func NewKustTarget( + ldr ifc.Loader, + validator ifc.Validator, + rFactory *resmap.Factory, + pLdr *loader.Loader) *KustTarget { + return &KustTarget{ + ldr: ldr, + validator: validator, + rFactory: rFactory, + pLdr: pLdr.LoaderWithWorkingDir(ldr.Root()), + } +} + +// Load attempts to load the target's kustomization file. +func (kt *KustTarget) Load() error { + content, kustFileName, err := LoadKustFile(kt.ldr) + if err != nil { + return err + } + + var k types.Kustomization + if err := k.Unmarshal(content); err != nil { + return err + } + + // show warning message when using deprecated fields. + if warningMessages := k.CheckDeprecatedFields(); warningMessages != nil { + for _, msg := range *warningMessages { + fmt.Fprintf(os.Stderr, "%v\n", msg) + } + } + + k.FixKustomization() + + // check that Kustomization is empty + if err := k.CheckEmpty(); err != nil { + return err + } + + errs := k.EnforceFields() + if len(errs) > 0 { + return fmt.Errorf( + "Failed to read kustomization file under %s:\n"+ + strings.Join(errs, "\n"), kt.ldr.Root()) + } + kt.kustomization = &k + kt.kustFileName = kustFileName + return nil +} + +// Kustomization returns a copy of the immutable, internal kustomization object. +func (kt *KustTarget) Kustomization() types.Kustomization { + var result types.Kustomization + b, _ := json.Marshal(*kt.kustomization) + json.Unmarshal(b, &result) + return result +} + +func LoadKustFile(ldr ifc.Loader) ([]byte, string, error) { + var content []byte + match := 0 + var kustFileName string + for _, kf := range konfig.RecognizedKustomizationFileNames() { + c, err := ldr.Load(kf) + if err == nil { + match += 1 + content = c + kustFileName = kf + } + } + switch match { + case 0: + return nil, "", NewErrMissingKustomization(ldr.Root()) + case 1: + return content, kustFileName, nil + default: + return nil, "", fmt.Errorf( + "Found multiple kustomization files under: %s\n", ldr.Root()) + } +} + +// MakeCustomizedResMap creates a fully customized ResMap +// per the instructions contained in its kustomization instance. +func (kt *KustTarget) MakeCustomizedResMap() (resmap.ResMap, error) { + return kt.makeCustomizedResMap() +} + +func (kt *KustTarget) makeCustomizedResMap() (resmap.ResMap, error) { + var origin *resource.Origin + if len(kt.kustomization.BuildMetadata) != 0 { + origin = &resource.Origin{} + } + kt.origin = origin + ra, err := kt.AccumulateTarget() + if err != nil { + return nil, err + } + + // The following steps must be done last, not as part of + // the recursion implicit in AccumulateTarget. + + err = kt.addHashesToNames(ra) + if err != nil { + return nil, err + } + + // Given that names have changed (prefixs/suffixes added), + // fix all the back references to those names. + err = ra.FixBackReferences() + if err != nil { + return nil, err + } + + // With all the back references fixed, it's OK to resolve Vars. + err = ra.ResolveVars() + if err != nil { + return nil, err + } + + err = kt.IgnoreLocal(ra) + if err != nil { + return nil, err + } + + return ra.ResMap(), nil +} + +func (kt *KustTarget) addHashesToNames( + ra *accumulator.ResAccumulator) error { + p := builtins.NewHashTransformerPlugin() + err := kt.configureBuiltinPlugin(p, nil, builtinhelpers.HashTransformer) + if err != nil { + return err + } + return ra.Transform(p) +} + +// AccumulateTarget returns a new ResAccumulator, +// holding customized resources and the data/rules used +// to do so. The name back references and vars are +// not yet fixed. +// The origin parameter is used through the recursive calls +// to annotate each resource with information about where +// the resource came from, e.g. the file and/or the repository +// it originated from. +// As an entrypoint, one can pass an empty resource.Origin object to +// AccumulateTarget. As AccumulateTarget moves recursively +// through kustomization directories, it updates `origin.path` +// accordingly. When a remote base is found, it updates `origin.repo` +// and `origin.ref` accordingly. +func (kt *KustTarget) AccumulateTarget() ( + ra *accumulator.ResAccumulator, err error) { + return kt.accumulateTarget(accumulator.MakeEmptyAccumulator()) +} + +// ra should be empty when this KustTarget is a Kustomization, or the ra of the parent if this KustTarget is a Component +// (or empty if the Component does not have a parent). +func (kt *KustTarget) accumulateTarget(ra *accumulator.ResAccumulator) ( + resRa *accumulator.ResAccumulator, err error) { + ra, err = kt.accumulateResources(ra, kt.kustomization.Resources) + if err != nil { + return nil, errors.WrapPrefixf(err, "accumulating resources") + } + ra, err = kt.accumulateComponents(ra, kt.kustomization.Components) + if err != nil { + return nil, errors.WrapPrefixf(err, "accumulating components") + } + tConfig, err := builtinconfig.MakeTransformerConfig( + kt.ldr, kt.kustomization.Configurations) + if err != nil { + return nil, err + } + err = ra.MergeConfig(tConfig) + if err != nil { + return nil, errors.WrapPrefixf( + err, "merging config %v", tConfig) + } + crdTc, err := accumulator.LoadConfigFromCRDs(kt.ldr, kt.kustomization.Crds) + if err != nil { + return nil, errors.WrapPrefixf( + err, "loading CRDs %v", kt.kustomization.Crds) + } + err = ra.MergeConfig(crdTc) + if err != nil { + return nil, errors.WrapPrefixf( + err, "merging CRDs %v", crdTc) + } + err = kt.runGenerators(ra) + if err != nil { + return nil, err + } + err = kt.runTransformers(ra) + if err != nil { + return nil, err + } + err = kt.runValidators(ra) + if err != nil { + return nil, err + } + err = ra.MergeVars(kt.kustomization.Vars) + if err != nil { + return nil, errors.WrapPrefixf( + err, "merging vars %v", kt.kustomization.Vars) + } + return ra, nil +} + +// IgnoreLocal drops the local resource by checking the annotation "config.kubernetes.io/local-config". +func (kt *KustTarget) IgnoreLocal(ra *accumulator.ResAccumulator) error { + rf := kt.rFactory.RF() + if rf.IncludeLocalConfigs { + return nil + } + remainRes, err := rf.DropLocalNodes(ra.ResMap().ToRNodeSlice()) + if err != nil { + return err + } + return ra.Intersection(kt.rFactory.FromResourceSlice(remainRes)) +} + +func (kt *KustTarget) runGenerators( + ra *accumulator.ResAccumulator) error { + var generators []*resmap.GeneratorWithProperties + gs, err := kt.configureBuiltinGenerators() + if err != nil { + return err + } + generators = append(generators, gs...) + + gs, err = kt.configureExternalGenerators() + if err != nil { + return errors.WrapPrefixf(err, "loading generator plugins") + } + generators = append(generators, gs...) + for i, g := range generators { + resMap, err := g.Generate() + if err != nil { + return err + } + if resMap != nil { + err = resMap.AddOriginAnnotation(generators[i].Origin) + if err != nil { + return errors.WrapPrefixf(err, "adding origin annotations for generator %v", g) + } + } + err = ra.AbsorbAll(resMap) + if err != nil { + return errors.WrapPrefixf(err, "merging from generator %v", g) + } + } + return nil +} + +func (kt *KustTarget) configureExternalGenerators() ( + []*resmap.GeneratorWithProperties, error) { + ra := accumulator.MakeEmptyAccumulator() + var generatorPaths []string + for _, p := range kt.kustomization.Generators { + // handle inline generators + rm, err := kt.rFactory.NewResMapFromBytes([]byte(p)) + if err != nil { + // not an inline config + generatorPaths = append(generatorPaths, p) + continue + } + // inline config, track the origin + if kt.origin != nil { + resources := rm.Resources() + for _, r := range resources { + r.SetOrigin(kt.origin.Append(kt.kustFileName)) + rm.Replace(r) + } + } + if err = ra.AppendAll(rm); err != nil { + return nil, errors.WrapPrefixf(err, "configuring external generator") + } + } + ra, err := kt.accumulateResources(ra, generatorPaths) + if err != nil { + return nil, err + } + return kt.pLdr.LoadGenerators(kt.ldr, kt.validator, ra.ResMap()) +} + +func (kt *KustTarget) runTransformers(ra *accumulator.ResAccumulator) error { + var r []*resmap.TransformerWithProperties + tConfig := ra.GetTransformerConfig() + lts, err := kt.configureBuiltinTransformers(tConfig) + if err != nil { + return err + } + r = append(r, lts...) + lts, err = kt.configureExternalTransformers(kt.kustomization.Transformers) + if err != nil { + return err + } + r = append(r, lts...) + return ra.Transform(newMultiTransformer(r)) +} + +func (kt *KustTarget) configureExternalTransformers(transformers []string) ([]*resmap.TransformerWithProperties, error) { + ra := accumulator.MakeEmptyAccumulator() + var transformerPaths []string + for _, p := range transformers { + // handle inline transformers + rm, err := kt.rFactory.NewResMapFromBytes([]byte(p)) + if err != nil { + // not an inline config + transformerPaths = append(transformerPaths, p) + continue + } + // inline config, track the origin + if kt.origin != nil { + resources := rm.Resources() + for _, r := range resources { + r.SetOrigin(kt.origin.Append(kt.kustFileName)) + rm.Replace(r) + } + } + + if err = ra.AppendAll(rm); err != nil { + return nil, errors.WrapPrefixf(err, "configuring external transformer") + } + } + ra, err := kt.accumulateResources(ra, transformerPaths) + if err != nil { + return nil, err + } + return kt.pLdr.LoadTransformers(kt.ldr, kt.validator, ra.ResMap()) +} + +func (kt *KustTarget) runValidators(ra *accumulator.ResAccumulator) error { + validators, err := kt.configureExternalTransformers(kt.kustomization.Validators) + if err != nil { + return err + } + for _, v := range validators { + // Validators shouldn't modify the resource map + orignal := ra.ResMap().DeepCopy() + err = v.Transform(ra.ResMap()) + if err != nil { + return err + } + newMap := ra.ResMap().DeepCopy() + if err = kt.removeValidatedByLabel(newMap); err != nil { + return err + } + if err = orignal.ErrorIfNotEqualSets(newMap); err != nil { + return fmt.Errorf("validator shouldn't modify the resource map: %v", err) + } + } + return nil +} + +func (kt *KustTarget) removeValidatedByLabel(rm resmap.ResMap) error { + resources := rm.Resources() + for _, r := range resources { + labels := r.GetLabels() + if _, found := labels[konfig.ValidatedByLabelKey]; !found { + continue + } + delete(labels, konfig.ValidatedByLabelKey) + if err := r.SetLabels(labels); err != nil { + return err + } + } + return nil +} + +// accumulateResources fills the given resourceAccumulator +// with resources read from the given list of paths. +func (kt *KustTarget) accumulateResources( + ra *accumulator.ResAccumulator, paths []string) (*accumulator.ResAccumulator, error) { + for _, path := range paths { + // try loading resource as file then as base (directory or git repository) + if errF := kt.accumulateFile(ra, path); errF != nil { + // not much we can do if the error is an HTTP error so we bail out + if errors.Is(errF, load.ErrHTTP) { + return nil, errF + } + ldr, err := kt.ldr.New(path) + if err != nil { + if kusterr.IsMalformedYAMLError(errF) { // Some error occurred while tyring to decode YAML file + return nil, errF + } + return nil, errors.WrapPrefixf( + err, "accumulation err='%s'", errF.Error()) + } + // store the origin, we'll need it later + origin := kt.origin.Copy() + if kt.origin != nil { + kt.origin = kt.origin.Append(path) + ra, err = kt.accumulateDirectory(ra, ldr, false) + // after we are done recursing through the directory, reset the origin + kt.origin = &origin + } else { + ra, err = kt.accumulateDirectory(ra, ldr, false) + } + if err != nil { + if kusterr.IsMalformedYAMLError(errF) { // Some error occurred while tyring to decode YAML file + return nil, errF + } + return nil, errors.WrapPrefixf( + err, "accumulation err='%s'", errF.Error()) + } + } + } + return ra, nil +} + +// accumulateResources fills the given resourceAccumulator +// with resources read from the given list of paths. +func (kt *KustTarget) accumulateComponents( + ra *accumulator.ResAccumulator, paths []string) (*accumulator.ResAccumulator, error) { + for _, path := range paths { + // Components always refer to directories + ldr, errL := kt.ldr.New(path) + if errL != nil { + return nil, fmt.Errorf("loader.New %q", errL) + } + var errD error + // store the origin, we'll need it later + origin := kt.origin.Copy() + if kt.origin != nil { + kt.origin = kt.origin.Append(path) + ra, errD = kt.accumulateDirectory(ra, ldr, true) + // after we are done recursing through the directory, reset the origin + kt.origin = &origin + } else { + ra, errD = kt.accumulateDirectory(ra, ldr, true) + } + if errD != nil { + return nil, fmt.Errorf("accumulateDirectory: %q", errD) + } + } + return ra, nil +} + +func (kt *KustTarget) accumulateDirectory( + ra *accumulator.ResAccumulator, ldr ifc.Loader, isComponent bool) (*accumulator.ResAccumulator, error) { + defer ldr.Cleanup() + subKt := NewKustTarget(ldr, kt.validator, kt.rFactory, kt.pLdr) + err := subKt.Load() + if err != nil { + return nil, errors.WrapPrefixf( + err, "couldn't make target for path '%s'", ldr.Root()) + } + subKt.kustomization.BuildMetadata = kt.kustomization.BuildMetadata + subKt.origin = kt.origin + var bytes []byte + if openApiPath, exists := subKt.Kustomization().OpenAPI["path"]; exists { + bytes, err = ldr.Load(openApiPath) + if err != nil { + return nil, err + } + } + err = openapi.SetSchema(subKt.Kustomization().OpenAPI, bytes, false) + if err != nil { + return nil, err + } + if isComponent && subKt.kustomization.Kind != types.ComponentKind { + return nil, fmt.Errorf( + "expected kind '%s' for path '%s' but got '%s'", types.ComponentKind, ldr.Root(), subKt.kustomization.Kind) + } else if !isComponent && subKt.kustomization.Kind == types.ComponentKind { + return nil, fmt.Errorf( + "expected kind != '%s' for path '%s'", types.ComponentKind, ldr.Root()) + } + + var subRa *accumulator.ResAccumulator + if isComponent { + // Components don't create a new accumulator: the kustomization directives are added to the current accumulator + subRa, err = subKt.accumulateTarget(ra) + ra = accumulator.MakeEmptyAccumulator() + } else { + // Child Kustomizations create a new accumulator which resolves their kustomization directives, which will later + // be merged into the current accumulator. + subRa, err = subKt.AccumulateTarget() + } + if err != nil { + return nil, errors.WrapPrefixf( + err, "recursed accumulation of path '%s'", ldr.Root()) + } + err = ra.MergeAccumulator(subRa) + if err != nil { + return nil, errors.WrapPrefixf( + err, "recursed merging from path '%s'", ldr.Root()) + } + return ra, nil +} + +func (kt *KustTarget) accumulateFile( + ra *accumulator.ResAccumulator, path string) error { + resources, err := kt.rFactory.FromFile(kt.ldr, path) + if err != nil { + return errors.WrapPrefixf(err, "accumulating resources from '%s'", path) + } + if kt.origin != nil { + originAnno, err := kt.origin.Append(path).String() + if err != nil { + return errors.WrapPrefixf(err, "cannot add path annotation for '%s'", path) + } + err = resources.AnnotateAll(utils.OriginAnnotationKey, originAnno) + if err != nil || originAnno == "" { + return errors.WrapPrefixf(err, "cannot add path annotation for '%s'", path) + } + } + err = ra.AppendAll(resources) + if err != nil { + return errors.WrapPrefixf(err, "merging resources from '%s'", path) + } + return nil +} + +func (kt *KustTarget) configureBuiltinPlugin( + p resmap.Configurable, c interface{}, bpt builtinhelpers.BuiltinPluginType) (err error) { + var y []byte + if c != nil { + y, err = yaml.Marshal(c) + if err != nil { + return errors.WrapPrefixf( + err, "builtin %s marshal", bpt) + } + } + err = p.Config( + resmap.NewPluginHelpers( + kt.ldr, kt.validator, kt.rFactory, kt.pLdr.Config()), + y) + if err != nil { + return errors.WrapPrefixf( + err, "trouble configuring builtin %s with config: `\n%s`", bpt, string(y)) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go new file mode 100644 index 000000000..b589961e5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go @@ -0,0 +1,446 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package target + +import ( + "fmt" + "path/filepath" + + "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" + "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Functions dedicated to configuring the builtin +// transformer and generator plugins using config data +// read from a kustomization file and from the +// config.TransformerConfig, whose data may be a +// mix of hardcoded values and data read from file. +// +// Non-builtin plugins will get their configuration +// from their own dedicated structs and YAML files. +// +// There are some loops in the functions below because +// the kustomization file would, say, allow someone to +// request multiple secrets be made, or run multiple +// image tag transforms. In these cases, we'll need +// N plugin instances with differing configurations. + +func (kt *KustTarget) configureBuiltinGenerators() ( + result []*resmap.GeneratorWithProperties, err error) { + for _, bpt := range []builtinhelpers.BuiltinPluginType{ + builtinhelpers.ConfigMapGenerator, + builtinhelpers.SecretGenerator, + builtinhelpers.HelmChartInflationGenerator, + } { + r, err := generatorConfigurators[bpt]( + kt, bpt, builtinhelpers.GeneratorFactories[bpt]) + if err != nil { + return nil, err + } + + var generatorOrigin *resource.Origin + if kt.origin != nil { + generatorOrigin = &resource.Origin{ + Repo: kt.origin.Repo, + Ref: kt.origin.Ref, + ConfiguredIn: filepath.Join(kt.origin.Path, kt.kustFileName), + ConfiguredBy: yaml.ResourceIdentifier{ + TypeMeta: yaml.TypeMeta{ + APIVersion: "builtin", + Kind: bpt.String(), + }, + }, + } + } + + for i := range r { + result = append(result, &resmap.GeneratorWithProperties{Generator: r[i], Origin: generatorOrigin}) + } + } + return result, nil +} + +func (kt *KustTarget) configureBuiltinTransformers( + tc *builtinconfig.TransformerConfig) ( + result []*resmap.TransformerWithProperties, err error) { + for _, bpt := range []builtinhelpers.BuiltinPluginType{ + builtinhelpers.PatchStrategicMergeTransformer, + builtinhelpers.PatchTransformer, + builtinhelpers.NamespaceTransformer, + builtinhelpers.PrefixTransformer, + builtinhelpers.SuffixTransformer, + builtinhelpers.LabelTransformer, + builtinhelpers.AnnotationsTransformer, + builtinhelpers.PatchJson6902Transformer, + builtinhelpers.ReplicaCountTransformer, + builtinhelpers.ImageTagTransformer, + builtinhelpers.ReplacementTransformer, + } { + r, err := transformerConfigurators[bpt]( + kt, bpt, builtinhelpers.TransformerFactories[bpt], tc) + if err != nil { + return nil, err + } + var transformerOrigin *resource.Origin + if kt.origin != nil { + transformerOrigin = &resource.Origin{ + Repo: kt.origin.Repo, + Ref: kt.origin.Ref, + ConfiguredIn: filepath.Join(kt.origin.Path, kt.kustFileName), + ConfiguredBy: yaml.ResourceIdentifier{ + TypeMeta: yaml.TypeMeta{ + APIVersion: "builtin", + Kind: bpt.String(), + }, + }, + } + } + for i := range r { + result = append(result, &resmap.TransformerWithProperties{Transformer: r[i], Origin: transformerOrigin}) + } + } + return result, nil +} + +type gFactory func() resmap.GeneratorPlugin + +var generatorConfigurators = map[builtinhelpers.BuiltinPluginType]func( + kt *KustTarget, + bpt builtinhelpers.BuiltinPluginType, + factory gFactory) (result []resmap.Generator, err error){ + builtinhelpers.SecretGenerator: func(kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f gFactory) ( + result []resmap.Generator, err error) { + var c struct { + types.SecretArgs + } + for _, args := range kt.kustomization.SecretGenerator { + c.SecretArgs = args + c.SecretArgs.Options = types.MergeGlobalOptionsIntoLocal( + c.SecretArgs.Options, kt.kustomization.GeneratorOptions) + p := f() + err := kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + } + return + }, + + builtinhelpers.ConfigMapGenerator: func(kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f gFactory) ( + result []resmap.Generator, err error) { + var c struct { + types.ConfigMapArgs + } + for _, args := range kt.kustomization.ConfigMapGenerator { + c.ConfigMapArgs = args + c.ConfigMapArgs.Options = types.MergeGlobalOptionsIntoLocal( + c.ConfigMapArgs.Options, kt.kustomization.GeneratorOptions) + p := f() + err := kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + } + return + }, + + builtinhelpers.HelmChartInflationGenerator: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f gFactory) ( + result []resmap.Generator, err error) { + var c struct { + types.HelmGlobals + types.HelmChart + } + var globals types.HelmGlobals + if kt.kustomization.HelmGlobals != nil { + globals = *kt.kustomization.HelmGlobals + } + for _, chart := range kt.kustomization.HelmCharts { + c.HelmGlobals = globals + c.HelmChart = chart + p := f() + if err = kt.configureBuiltinPlugin(p, c, bpt); err != nil { + return nil, err + } + result = append(result, p) + } + return + }, +} + +type tFactory func() resmap.TransformerPlugin + +var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func( + kt *KustTarget, + bpt builtinhelpers.BuiltinPluginType, + f tFactory, + tc *builtinconfig.TransformerConfig) (result []resmap.Transformer, err error){ + builtinhelpers.NamespaceTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + if kt.kustomization.Namespace == "" { + return + } + var c struct { + types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` + FieldSpecs []types.FieldSpec + } + c.Namespace = kt.kustomization.Namespace + c.FieldSpecs = tc.NameSpace + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + return + }, + + builtinhelpers.PatchJson6902Transformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, _ *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + var c struct { + Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` + Path string `json:"path,omitempty" yaml:"path,omitempty"` + JsonOp string `json:"jsonOp,omitempty" yaml:"jsonOp,omitempty"` + } + for _, args := range kt.kustomization.PatchesJson6902 { + c.Target = args.Target + c.Path = args.Path + c.JsonOp = args.Patch + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + } + return + }, + builtinhelpers.PatchStrategicMergeTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, _ *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + if len(kt.kustomization.PatchesStrategicMerge) == 0 { + return + } + var c struct { + Paths []types.PatchStrategicMerge `json:"paths,omitempty" yaml:"paths,omitempty"` + } + c.Paths = kt.kustomization.PatchesStrategicMerge + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + return + }, + builtinhelpers.PatchTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, _ *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + if len(kt.kustomization.Patches) == 0 { + return + } + var c struct { + Path string `json:"path,omitempty" yaml:"path,omitempty"` + Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` + Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` + Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"` + } + for _, pc := range kt.kustomization.Patches { + c.Target = pc.Target + c.Patch = pc.Patch + c.Path = pc.Path + c.Options = pc.Options + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + } + return + }, + builtinhelpers.LabelTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + if len(kt.kustomization.Labels) == 0 && len(kt.kustomization.CommonLabels) == 0 { + return + } + for _, label := range kt.kustomization.Labels { + var c struct { + Labels map[string]string + FieldSpecs []types.FieldSpec + } + c.Labels = label.Pairs + fss := types.FsSlice(label.FieldSpecs) + // merge the custom fieldSpecs with the default + if label.IncludeSelectors { + fss, err = fss.MergeAll(tc.CommonLabels) + } else { + // merge spec/template/metadata fieldSpecs if includeTemplate flag is true + if label.IncludeTemplates { + fss, err = fss.MergeAll(tc.TemplateLabels) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to merge template fieldSpec") + } + } + // only add to metadata by default + fss, err = fss.MergeOne(types.FieldSpec{Path: "metadata/labels", CreateIfNotPresent: true}) + } + if err != nil { + return nil, err + } + c.FieldSpecs = fss + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + } + var c struct { + Labels map[string]string + FieldSpecs []types.FieldSpec + } + c.Labels = kt.kustomization.CommonLabels + c.FieldSpecs = tc.CommonLabels + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + return + }, + builtinhelpers.AnnotationsTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + if len(kt.kustomization.CommonAnnotations) == 0 { + return + } + var c struct { + Annotations map[string]string + FieldSpecs []types.FieldSpec + } + c.Annotations = kt.kustomization.CommonAnnotations + c.FieldSpecs = tc.CommonAnnotations + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + return + }, + builtinhelpers.PrefixTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + if kt.kustomization.NamePrefix == "" { + return + } + var c struct { + Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` + FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` + } + c.Prefix = kt.kustomization.NamePrefix + c.FieldSpecs = tc.NamePrefix + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + return + }, + builtinhelpers.SuffixTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + if kt.kustomization.NameSuffix == "" { + return + } + var c struct { + Suffix string `json:"suffix,omitempty" yaml:"suffix,omitempty"` + FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` + } + c.Suffix = kt.kustomization.NameSuffix + c.FieldSpecs = tc.NameSuffix + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + return + }, + builtinhelpers.ImageTagTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + var c struct { + ImageTag types.Image + FieldSpecs []types.FieldSpec + } + for _, args := range kt.kustomization.Images { + c.ImageTag = args + c.FieldSpecs = tc.Images + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + } + return + }, + builtinhelpers.ReplacementTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, _ *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + if len(kt.kustomization.Replacements) == 0 { + return + } + var c struct { + Replacements []types.ReplacementField + } + c.Replacements = kt.kustomization.Replacements + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + return result, nil + }, + builtinhelpers.ReplicaCountTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + var c struct { + Replica types.Replica + FieldSpecs []types.FieldSpec + } + for _, args := range kt.kustomization.Replicas { + c.Replica = args + c.FieldSpecs = tc.Replicas + p := f() + err = kt.configureBuiltinPlugin(p, c, bpt) + if err != nil { + return nil, err + } + result = append(result, p) + } + return + }, + // No kustomization file keyword for this yet. + builtinhelpers.ValueAddTransformer: func( + kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( + result []resmap.Transformer, err error) { + return nil, fmt.Errorf("valueadd keyword not yet defined") + }, +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/target/multitransformer.go b/vendor/sigs.k8s.io/kustomize/api/internal/target/multitransformer.go new file mode 100644 index 000000000..3bc0a8715 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/target/multitransformer.go @@ -0,0 +1,41 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package target + +import ( + "sigs.k8s.io/kustomize/api/resmap" +) + +// multiTransformer contains a list of transformers. +type multiTransformer struct { + transformers []*resmap.TransformerWithProperties +} + +var _ resmap.Transformer = &multiTransformer{} + +// newMultiTransformer constructs a multiTransformer. +func newMultiTransformer(t []*resmap.TransformerWithProperties) resmap.Transformer { + r := &multiTransformer{ + transformers: make([]*resmap.TransformerWithProperties, len(t)), + } + copy(r.transformers, t) + return r +} + +// Transform applies the member transformers in order to the resources, +// optionally detecting and erroring on commutation conflict. +func (o *multiTransformer) Transform(m resmap.ResMap) error { + for _, t := range o.transformers { + if err := t.Transform(m); err != nil { + return err + } + if t.Origin != nil { + if err := m.AddTransformerAnnotation(t.Origin); err != nil { + return err + } + } + m.DropEmpties() + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/utils/annotations.go b/vendor/sigs.k8s.io/kustomize/api/internal/utils/annotations.go new file mode 100644 index 000000000..8a2d5bb4b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/utils/annotations.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import "sigs.k8s.io/kustomize/api/konfig" + +const ( + // build annotations + BuildAnnotationPreviousKinds = konfig.ConfigAnnoDomain + "/previousKinds" + BuildAnnotationPreviousNames = konfig.ConfigAnnoDomain + "/previousNames" + BuildAnnotationPrefixes = konfig.ConfigAnnoDomain + "/prefixes" + BuildAnnotationSuffixes = konfig.ConfigAnnoDomain + "/suffixes" + BuildAnnotationPreviousNamespaces = konfig.ConfigAnnoDomain + "/previousNamespaces" + BuildAnnotationsRefBy = konfig.ConfigAnnoDomain + "/refBy" + BuildAnnotationsGenBehavior = konfig.ConfigAnnoDomain + "/generatorBehavior" + BuildAnnotationsGenAddHashSuffix = konfig.ConfigAnnoDomain + "/needsHashSuffix" + + // the following are only for patches, to specify whether they can change names + // and kinds of their targets + BuildAnnotationAllowNameChange = konfig.ConfigAnnoDomain + "/allowNameChange" + BuildAnnotationAllowKindChange = konfig.ConfigAnnoDomain + "/allowKindChange" + + // for keeping track of origin and transformer data + OriginAnnotationKey = "config.kubernetes.io/origin" + TransformerAnnotationKey = "alpha.config.kubernetes.io/transformations" + + Enabled = "enabled" +) diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go b/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go new file mode 100644 index 000000000..c1761d2c6 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "fmt" + "time" + + "sigs.k8s.io/kustomize/kyaml/errors" +) + +type errTimeOut struct { + duration time.Duration + cmd string +} + +func NewErrTimeOut(d time.Duration, c string) errTimeOut { + return errTimeOut{duration: d, cmd: c} +} + +func (e errTimeOut) Error() string { + return fmt.Sprintf("hit %s timeout running '%s'", e.duration, e.cmd) +} + +func IsErrTimeout(err error) bool { + e := &errTimeOut{} + return errors.As(err, &e) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/utils/makeResIds.go b/vendor/sigs.k8s.io/kustomize/api/internal/utils/makeResIds.go new file mode 100644 index 000000000..ec7320c40 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/utils/makeResIds.go @@ -0,0 +1,68 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "fmt" + "strings" + + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// MakeResIds returns all of an RNode's current and previous Ids +func MakeResIds(n *yaml.RNode) ([]resid.ResId, error) { + var result []resid.ResId + apiVersion := n.Field(yaml.APIVersionField) + var group, version string + if apiVersion != nil { + group, version = resid.ParseGroupVersion(yaml.GetValue(apiVersion.Value)) + } + result = append(result, resid.NewResIdWithNamespace( + resid.Gvk{Group: group, Version: version, Kind: n.GetKind()}, n.GetName(), n.GetNamespace()), + ) + prevIds, err := PrevIds(n) + if err != nil { + return nil, err + } + result = append(result, prevIds...) + return result, nil +} + +// PrevIds returns all of an RNode's previous Ids +func PrevIds(n *yaml.RNode) ([]resid.ResId, error) { + var ids []resid.ResId + // TODO: merge previous names and namespaces into one list of + // pairs on one annotation so there is no chance of error + annotations := n.GetAnnotations( + BuildAnnotationPreviousNames, + BuildAnnotationPreviousNamespaces, + BuildAnnotationPreviousKinds) + if _, ok := annotations[BuildAnnotationPreviousNames]; !ok { + return nil, nil + } + names := strings.Split(annotations[BuildAnnotationPreviousNames], ",") + ns := strings.Split(annotations[BuildAnnotationPreviousNamespaces], ",") + kinds := strings.Split(annotations[BuildAnnotationPreviousKinds], ",") + // This should never happen + if len(names) != len(ns) || len(names) != len(kinds) { + return nil, fmt.Errorf( + "number of previous names, " + + "number of previous namespaces, " + + "number of previous kinds not equal") + } + apiVersion := n.GetApiVersion() + group, version := resid.ParseGroupVersion(apiVersion) + ids = make([]resid.ResId, 0, len(names)) + for i := range names { + gvk := resid.Gvk{ + Group: group, + Version: version, + Kind: kinds[i], + } + ids = append(ids, resid.NewResIdWithNamespace( + gvk, names[i], ns[i])) + } + return ids, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/utils/stringslice.go b/vendor/sigs.k8s.io/kustomize/api/internal/utils/stringslice.go new file mode 100644 index 000000000..3dc422725 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/utils/stringslice.go @@ -0,0 +1,44 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package utils + +// StringSliceIndex returns the index of the str, else -1. +func StringSliceIndex(slice []string, str string) int { + for i := range slice { + if slice[i] == str { + return i + } + } + return -1 +} + +// StringSliceContains returns true if the slice has the string. +func StringSliceContains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} + +// SameEndingSubSlice returns true if the slices end the same way, e.g. +// {"a", "b", "c"}, {"b", "c"} => true +// {"a", "b", "c"}, {"a", "b"} => false +// If one slice is empty and the other is not, return false. +func SameEndingSubSlice(shortest, longest []string) bool { + if len(shortest) > len(longest) { + longest, shortest = shortest, longest + } + diff := len(longest) - len(shortest) + if len(shortest) == 0 { + return diff == 0 + } + for i := len(shortest) - 1; i >= 0; i-- { + if longest[i+diff] != shortest[i] { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go b/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go new file mode 100644 index 000000000..0afadd0c3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go @@ -0,0 +1,23 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "time" +) + +// TimedCall runs fn, failing if it doesn't complete in the given duration. +// The description is used in the timeout error message. +func TimedCall(description string, d time.Duration, fn func() error) error { + done := make(chan error) + timer := time.NewTimer(d) + defer timer.Stop() + go func() { done <- fn() }() + select { + case err := <-done: + return err + case <-timer.C: + return NewErrTimeOut(d, description) + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/internal/validate/fieldvalidator.go b/vendor/sigs.k8s.io/kustomize/api/internal/validate/fieldvalidator.go new file mode 100644 index 000000000..5ccfc3ce7 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/internal/validate/fieldvalidator.go @@ -0,0 +1,68 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package validate + +import ( + "sigs.k8s.io/kustomize/api/ifc" +) + +// FieldValidator implements ifc.Validator to check +// the values of various KRM string fields, +// e.g. labels, annotations, names, namespaces. +// +// TODO: Have this use kyaml/yaml/internal/k8sgen/pkg/labels +// which has label and annotation validation code, but is internal +// so this impl would need to move to kyaml (a fine idea). +type FieldValidator struct { +} + +var _ ifc.Validator = (*FieldValidator)(nil) + +func NewFieldValidator() *FieldValidator { + return &FieldValidator{} +} + +// TODO(#FieldValidator): implement MakeAnnotationValidator +func (f FieldValidator) MakeAnnotationValidator() func(map[string]string) error { + return func(x map[string]string) error { + return nil + } +} + +// TODO(#FieldValidator): implement MakeAnnotationNameValidator +func (f FieldValidator) MakeAnnotationNameValidator() func([]string) error { + return func(x []string) error { + return nil + } +} + +// TODO(#FieldValidator): implement MakeLabelValidator +func (f FieldValidator) MakeLabelValidator() func(map[string]string) error { + return func(x map[string]string) error { + return nil + } +} + +// TODO(#FieldValidator): implement MakeLabelNameValidator +func (f FieldValidator) MakeLabelNameValidator() func([]string) error { + return func(x []string) error { + return nil + } +} + +// TODO(#FieldValidator): implement ValidateNamespace +func (f FieldValidator) ValidateNamespace(s string) []string { + var errs []string + return errs +} + +// TODO(#FieldValidator): implement ErrIfInvalidKey +func (f FieldValidator) ErrIfInvalidKey(s string) error { + return nil +} + +// TODO(#FieldValidator): implement IsEnvVarName +func (f FieldValidator) IsEnvVarName(k string) error { + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go new file mode 100644 index 000000000..97c1d6b22 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go @@ -0,0 +1,47 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const commonAnnotationFieldSpecs = ` +commonAnnotations: +- path: metadata/annotations + create: true + +- path: spec/template/metadata/annotations + create: true + version: v1 + kind: ReplicationController + +- path: spec/template/metadata/annotations + create: true + kind: Deployment + +- path: spec/template/metadata/annotations + create: true + kind: ReplicaSet + +- path: spec/template/metadata/annotations + create: true + kind: DaemonSet + +- path: spec/template/metadata/annotations + create: true + kind: StatefulSet + +- path: spec/template/metadata/annotations + create: true + group: batch + kind: Job + +- path: spec/jobTemplate/metadata/annotations + create: true + group: batch + kind: CronJob + +- path: spec/jobTemplate/spec/template/metadata/annotations + create: true + group: batch + kind: CronJob + +` diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go new file mode 100644 index 000000000..b2a78f568 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go @@ -0,0 +1,113 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const commonLabelFieldSpecs = ` +commonLabels: +- path: spec/selector + create: true + version: v1 + kind: Service + +- path: spec/selector + create: true + version: v1 + kind: ReplicationController +- path: spec/selector/matchLabels + create: true + kind: Deployment + +- path: spec/template/spec/affinity/podAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels + create: false + group: apps + kind: Deployment + +- path: spec/template/spec/affinity/podAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels + create: false + group: apps + kind: Deployment + +- path: spec/template/spec/affinity/podAntiAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels + create: false + group: apps + kind: Deployment + +- path: spec/template/spec/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels + create: false + group: apps + kind: Deployment + +- path: spec/template/spec/topologySpreadConstraints/labelSelector/matchLabels + create: false + group: apps + kind: Deployment + +- path: spec/selector/matchLabels + create: true + kind: ReplicaSet + +- path: spec/selector/matchLabels + create: true + kind: DaemonSet + +- path: spec/selector/matchLabels + create: true + group: apps + kind: StatefulSet + +- path: spec/template/spec/affinity/podAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels + create: false + group: apps + kind: StatefulSet + +- path: spec/template/spec/affinity/podAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels + create: false + group: apps + kind: StatefulSet + +- path: spec/template/spec/affinity/podAntiAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels + create: false + group: apps + kind: StatefulSet + +- path: spec/template/spec/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels + create: false + group: apps + kind: StatefulSet + +- path: spec/template/spec/topologySpreadConstraints/labelSelector/matchLabels + create: false + group: apps + kind: StatefulSet + +- path: spec/selector/matchLabels + create: false + group: batch + kind: Job + +- path: spec/jobTemplate/spec/selector/matchLabels + create: false + group: batch + kind: CronJob + +- path: spec/selector/matchLabels + create: false + group: policy + kind: PodDisruptionBudget + +- path: spec/podSelector/matchLabels + create: false + group: networking.k8s.io + kind: NetworkPolicy + +- path: spec/ingress/from/podSelector/matchLabels + create: false + group: networking.k8s.io + kind: NetworkPolicy + +- path: spec/egress/to/podSelector/matchLabels + create: false + group: networking.k8s.io + kind: NetworkPolicy +` + metadataLabelsFieldSpecs diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go new file mode 100644 index 000000000..2f220cb29 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +import ( + "bytes" +) + +// GetDefaultFieldSpecs returns default fieldSpecs. +func GetDefaultFieldSpecs() []byte { + configData := [][]byte{ + []byte(namePrefixFieldSpecs), + []byte(nameSuffixFieldSpecs), + []byte(commonLabelFieldSpecs), + []byte(templateLabelFieldSpecs), + []byte(commonAnnotationFieldSpecs), + []byte(namespaceFieldSpecs), + []byte(varReferenceFieldSpecs), + []byte(nameReferenceFieldSpecs), + []byte(imagesFieldSpecs), + []byte(replicasFieldSpecs), + } + return bytes.Join(configData, []byte("\n")) +} + +// GetDefaultFieldSpecsAsMap returns default fieldSpecs +// as a string->string map. +func GetDefaultFieldSpecsAsMap() map[string]string { + result := make(map[string]string) + result["nameprefix"] = namePrefixFieldSpecs + result["namesuffix"] = nameSuffixFieldSpecs + result["commonlabels"] = commonLabelFieldSpecs + result["templatelabels"] = templateLabelFieldSpecs + result["commonannotations"] = commonAnnotationFieldSpecs + result["namespace"] = namespaceFieldSpecs + result["varreference"] = varReferenceFieldSpecs + result["namereference"] = nameReferenceFieldSpecs + result["images"] = imagesFieldSpecs + result["replicas"] = replicasFieldSpecs + return result +} diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go new file mode 100644 index 000000000..4b7b5faac --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go @@ -0,0 +1,8 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package builtinpluginconsts provides builtin plugin +// configuration data. Builtin plugins can also be +// configured individually with plugin config files, +// in which case the constants in this package are ignored. +package builtinpluginconsts diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go new file mode 100644 index 000000000..b8d8bf1e3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go @@ -0,0 +1,18 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const ( + imagesFieldSpecs = ` +images: +- path: spec/containers[]/image + create: true +- path: spec/initContainers[]/image + create: true +- path: spec/template/spec/containers[]/image + create: true +- path: spec/template/spec/initContainers[]/image + create: true +` +) diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/metadatalabels.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/metadatalabels.go new file mode 100644 index 000000000..d070cca4b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/metadatalabels.go @@ -0,0 +1,51 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const metadataLabelsFieldSpecs = ` +- path: metadata/labels + create: true + +- path: spec/template/metadata/labels + create: true + version: v1 + kind: ReplicationController + +- path: spec/template/metadata/labels + create: true + kind: Deployment + +- path: spec/template/metadata/labels + create: true + kind: ReplicaSet + +- path: spec/template/metadata/labels + create: true + kind: DaemonSet + +- path: spec/template/metadata/labels + create: true + group: apps + kind: StatefulSet + +- path: spec/volumeClaimTemplates[]/metadata/labels + create: true + group: apps + kind: StatefulSet + +- path: spec/template/metadata/labels + create: true + group: batch + kind: Job + +- path: spec/jobTemplate/metadata/labels + create: true + group: batch + kind: CronJob + +- path: spec/jobTemplate/spec/template/metadata/labels + create: true + group: batch + kind: CronJob +` diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go new file mode 100644 index 000000000..59a25a61f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const ( + namePrefixFieldSpecs = ` +namePrefix: +- path: metadata/name +` +) diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go new file mode 100644 index 000000000..658b0c34b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go @@ -0,0 +1,427 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +// LINT.IfChange +const ( + nameReferenceFieldSpecs = ` +nameReference: +- kind: Deployment + fieldSpecs: + - path: spec/scaleTargetRef/name + kind: HorizontalPodAutoscaler + +- kind: ReplicationController + fieldSpecs: + - path: spec/scaleTargetRef/name + kind: HorizontalPodAutoscaler + +- kind: ReplicaSet + fieldSpecs: + - path: spec/scaleTargetRef/name + kind: HorizontalPodAutoscaler + +- kind: StatefulSet + fieldSpecs: + - path: spec/scaleTargetRef/name + kind: HorizontalPodAutoscaler + +- kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/volumes/configMap/name + version: v1 + kind: Pod + - path: spec/containers/env/valueFrom/configMapKeyRef/name + version: v1 + kind: Pod + - path: spec/initContainers/env/valueFrom/configMapKeyRef/name + version: v1 + kind: Pod + - path: spec/containers/envFrom/configMapRef/name + version: v1 + kind: Pod + - path: spec/initContainers/envFrom/configMapRef/name + version: v1 + kind: Pod + - path: spec/volumes/projected/sources/configMap/name + version: v1 + kind: Pod + - path: template/spec/volumes/configMap/name + kind: PodTemplate + - path: template/spec/containers/env/valueFrom/configMapKeyRef/name + kind: PodTemplate + - path: template/spec/initContainers/env/valueFrom/configMapKeyRef/name + kind: PodTemplate + - path: template/spec/containers/envFrom/configMapRef/name + kind: PodTemplate + - path: template/spec/initContainers/envFrom/configMapRef/name + kind: PodTemplate + - path: template/spec/volumes/projected/sources/configMap/name + kind: PodTemplate + - path: spec/template/spec/volumes/configMap/name + kind: Deployment + - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name + kind: Deployment + - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name + kind: Deployment + - path: spec/template/spec/containers/envFrom/configMapRef/name + kind: Deployment + - path: spec/template/spec/initContainers/envFrom/configMapRef/name + kind: Deployment + - path: spec/template/spec/volumes/projected/sources/configMap/name + kind: Deployment + - path: spec/template/spec/volumes/configMap/name + kind: ReplicaSet + - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name + kind: ReplicaSet + - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name + kind: ReplicaSet + - path: spec/template/spec/containers/envFrom/configMapRef/name + kind: ReplicaSet + - path: spec/template/spec/initContainers/envFrom/configMapRef/name + kind: ReplicaSet + - path: spec/template/spec/volumes/projected/sources/configMap/name + kind: ReplicaSet + - path: spec/template/spec/volumes/configMap/name + kind: DaemonSet + - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name + kind: DaemonSet + - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name + kind: DaemonSet + - path: spec/template/spec/containers/envFrom/configMapRef/name + kind: DaemonSet + - path: spec/template/spec/initContainers/envFrom/configMapRef/name + kind: DaemonSet + - path: spec/template/spec/volumes/projected/sources/configMap/name + kind: DaemonSet + - path: spec/template/spec/volumes/configMap/name + kind: StatefulSet + - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name + kind: StatefulSet + - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name + kind: StatefulSet + - path: spec/template/spec/containers/envFrom/configMapRef/name + kind: StatefulSet + - path: spec/template/spec/initContainers/envFrom/configMapRef/name + kind: StatefulSet + - path: spec/template/spec/volumes/projected/sources/configMap/name + kind: StatefulSet + - path: spec/template/spec/volumes/configMap/name + kind: Job + - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name + kind: Job + - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name + kind: Job + - path: spec/template/spec/containers/envFrom/configMapRef/name + kind: Job + - path: spec/template/spec/initContainers/envFrom/configMapRef/name + kind: Job + - path: spec/template/spec/volumes/projected/sources/configMap/name + kind: Job + - path: spec/jobTemplate/spec/template/spec/volumes/configMap/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/volumes/projected/sources/configMap/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/containers/env/valueFrom/configMapKeyRef/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/containers/envFrom/configMapRef/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/initContainers/envFrom/configMapRef/name + kind: CronJob + - path: spec/configSource/configMap + kind: Node + - path: rules/resourceNames + kind: Role + - path: rules/resourceNames + kind: ClusterRole + - path: metadata/annotations/nginx.ingress.kubernetes.io\/fastcgi-params-configmap + kind: Ingress + +- kind: Secret + version: v1 + fieldSpecs: + - path: spec/volumes/secret/secretName + version: v1 + kind: Pod + - path: spec/containers/env/valueFrom/secretKeyRef/name + version: v1 + kind: Pod + - path: spec/initContainers/env/valueFrom/secretKeyRef/name + version: v1 + kind: Pod + - path: spec/containers/envFrom/secretRef/name + version: v1 + kind: Pod + - path: spec/initContainers/envFrom/secretRef/name + version: v1 + kind: Pod + - path: spec/imagePullSecrets/name + version: v1 + kind: Pod + - path: spec/volumes/projected/sources/secret/name + version: v1 + kind: Pod + - path: template/spec/volumes/secret/secretName + kind: PodTemplate + - path: template/spec/containers/env/valueFrom/secretKeyRef/name + kind: PodTemplate + - path: template/spec/initContainers/env/valueFrom/secretKeyRef/name + kind: PodTemplate + - path: template/spec/containers/envFrom/secretRef/name + kind: PodTemplate + - path: template/spec/initContainers/envFrom/secretRef/name + kind: PodTemplate + - path: template/spec/imagePullSecrets/name + kind: PodTemplate + - path: template/spec/volumes/projected/sources/secret/name + kind: PodTemplate + - path: spec/template/spec/volumes/secret/secretName + kind: Deployment + - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name + kind: Deployment + - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name + kind: Deployment + - path: spec/template/spec/containers/envFrom/secretRef/name + kind: Deployment + - path: spec/template/spec/initContainers/envFrom/secretRef/name + kind: Deployment + - path: spec/template/spec/imagePullSecrets/name + kind: Deployment + - path: spec/template/spec/volumes/projected/sources/secret/name + kind: Deployment + - path: spec/template/spec/volumes/secret/secretName + kind: ReplicaSet + - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name + kind: ReplicaSet + - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name + kind: ReplicaSet + - path: spec/template/spec/containers/envFrom/secretRef/name + kind: ReplicaSet + - path: spec/template/spec/initContainers/envFrom/secretRef/name + kind: ReplicaSet + - path: spec/template/spec/imagePullSecrets/name + kind: ReplicaSet + - path: spec/template/spec/volumes/projected/sources/secret/name + kind: ReplicaSet + - path: spec/template/spec/volumes/secret/secretName + kind: DaemonSet + - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name + kind: DaemonSet + - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name + kind: DaemonSet + - path: spec/template/spec/containers/envFrom/secretRef/name + kind: DaemonSet + - path: spec/template/spec/initContainers/envFrom/secretRef/name + kind: DaemonSet + - path: spec/template/spec/imagePullSecrets/name + kind: DaemonSet + - path: spec/template/spec/volumes/projected/sources/secret/name + kind: DaemonSet + - path: spec/template/spec/volumes/secret/secretName + kind: StatefulSet + - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name + kind: StatefulSet + - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name + kind: StatefulSet + - path: spec/template/spec/containers/envFrom/secretRef/name + kind: StatefulSet + - path: spec/template/spec/initContainers/envFrom/secretRef/name + kind: StatefulSet + - path: spec/template/spec/imagePullSecrets/name + kind: StatefulSet + - path: spec/template/spec/volumes/projected/sources/secret/name + kind: StatefulSet + - path: spec/template/spec/volumes/secret/secretName + kind: Job + - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name + kind: Job + - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name + kind: Job + - path: spec/template/spec/containers/envFrom/secretRef/name + kind: Job + - path: spec/template/spec/initContainers/envFrom/secretRef/name + kind: Job + - path: spec/template/spec/imagePullSecrets/name + kind: Job + - path: spec/template/spec/volumes/projected/sources/secret/name + kind: Job + - path: spec/jobTemplate/spec/template/spec/volumes/secret/secretName + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/volumes/projected/sources/secret/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/containers/env/valueFrom/secretKeyRef/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/containers/envFrom/secretRef/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/initContainers/envFrom/secretRef/name + kind: CronJob + - path: spec/jobTemplate/spec/template/spec/imagePullSecrets/name + kind: CronJob + - path: spec/tls/secretName + kind: Ingress + - path: metadata/annotations/ingress.kubernetes.io\/auth-secret + kind: Ingress + - path: metadata/annotations/nginx.ingress.kubernetes.io\/auth-secret + kind: Ingress + - path: metadata/annotations/nginx.ingress.kubernetes.io\/auth-tls-secret + kind: Ingress + - path: spec/tls/secretName + kind: Ingress + - path: imagePullSecrets/name + kind: ServiceAccount + - path: parameters/secretName + kind: StorageClass + - path: parameters/adminSecretName + kind: StorageClass + - path: parameters/userSecretName + kind: StorageClass + - path: parameters/secretRef + kind: StorageClass + - path: rules/resourceNames + kind: Role + - path: rules/resourceNames + kind: ClusterRole + - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name + kind: Service + group: serving.knative.dev + version: v1 + - path: spec/azureFile/secretName + kind: PersistentVolume + +- kind: Service + version: v1 + fieldSpecs: + - path: spec/serviceName + kind: StatefulSet + group: apps + - path: spec/rules/http/paths/backend/serviceName + kind: Ingress + - path: spec/backend/serviceName + kind: Ingress + - path: spec/rules/http/paths/backend/service/name + kind: Ingress + - path: spec/defaultBackend/service/name + kind: Ingress + - path: spec/service/name + kind: APIService + group: apiregistration.k8s.io + - path: webhooks/clientConfig/service + kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + - path: webhooks/clientConfig/service + kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + +- kind: Role + group: rbac.authorization.k8s.io + fieldSpecs: + - path: roleRef/name + kind: RoleBinding + group: rbac.authorization.k8s.io + +- kind: ClusterRole + group: rbac.authorization.k8s.io + fieldSpecs: + - path: roleRef/name + kind: RoleBinding + group: rbac.authorization.k8s.io + - path: roleRef/name + kind: ClusterRoleBinding + group: rbac.authorization.k8s.io + +- kind: ServiceAccount + version: v1 + fieldSpecs: + - path: subjects + kind: RoleBinding + group: rbac.authorization.k8s.io + - path: subjects + kind: ClusterRoleBinding + group: rbac.authorization.k8s.io + - path: spec/serviceAccountName + kind: Pod + - path: spec/template/spec/serviceAccountName + kind: StatefulSet + - path: spec/template/spec/serviceAccountName + kind: Deployment + - path: spec/template/spec/serviceAccountName + kind: ReplicationController + - path: spec/jobTemplate/spec/template/spec/serviceAccountName + kind: CronJob + - path: spec/template/spec/serviceAccountName + kind: Job + - path: spec/template/spec/serviceAccountName + kind: DaemonSet + +- kind: PersistentVolumeClaim + version: v1 + fieldSpecs: + - path: spec/volumes/persistentVolumeClaim/claimName + kind: Pod + - path: spec/template/spec/volumes/persistentVolumeClaim/claimName + kind: StatefulSet + - path: spec/template/spec/volumes/persistentVolumeClaim/claimName + kind: Deployment + - path: spec/template/spec/volumes/persistentVolumeClaim/claimName + kind: ReplicationController + - path: spec/jobTemplate/spec/template/spec/volumes/persistentVolumeClaim/claimName + kind: CronJob + - path: spec/template/spec/volumes/persistentVolumeClaim/claimName + kind: Job + - path: spec/template/spec/volumes/persistentVolumeClaim/claimName + kind: DaemonSet + +- kind: PersistentVolume + version: v1 + fieldSpecs: + - path: spec/volumeName + kind: PersistentVolumeClaim + - path: rules/resourceNames + kind: ClusterRole + +- kind: StorageClass + version: v1 + group: storage.k8s.io + fieldSpecs: + - path: spec/storageClassName + kind: PersistentVolume + - path: spec/storageClassName + kind: PersistentVolumeClaim + - path: spec/volumeClaimTemplates/spec/storageClassName + kind: StatefulSet + +- kind: PriorityClass + version: v1 + group: scheduling.k8s.io + fieldSpecs: + - path: spec/priorityClassName + kind: Pod + - path: spec/template/spec/priorityClassName + kind: StatefulSet + - path: spec/template/spec/priorityClassName + kind: Deployment + - path: spec/template/spec/priorityClassName + kind: ReplicationController + - path: spec/jobTemplate/spec/template/spec/priorityClassName + kind: CronJob + - path: spec/template/spec/priorityClassName + kind: Job + - path: spec/template/spec/priorityClassName + kind: DaemonSet + +- kind: IngressClass + version: v1 + group: networking.k8s.io/v1 + fieldSpecs: + - path: spec/ingressClassName + kind: Ingress +` +) + +// LINT.ThenChange(/examples/transformerconfigs/README.md) diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go new file mode 100644 index 000000000..35774a7db --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go @@ -0,0 +1,20 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const ( + namespaceFieldSpecs = ` +namespace: +- path: metadata/name + kind: Namespace + create: true +- path: spec/service/namespace + group: apiregistration.k8s.io + kind: APIService + create: true +- path: spec/conversion/webhook/clientConfig/service/namespace + group: apiextensions.k8s.io + kind: CustomResourceDefinition +` +) diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go new file mode 100644 index 000000000..11592bd2b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const ( + nameSuffixFieldSpecs = ` +nameSuffix: +- path: metadata/name +` +) diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go new file mode 100644 index 000000000..76549c21f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go @@ -0,0 +1,23 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const replicasFieldSpecs = ` +replicas: +- path: spec/replicas + create: true + kind: Deployment + +- path: spec/replicas + create: true + kind: ReplicationController + +- path: spec/replicas + create: true + kind: ReplicaSet + +- path: spec/replicas + create: true + kind: StatefulSet +` diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/templatelabels.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/templatelabels.go new file mode 100644 index 000000000..5d3c9c195 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/templatelabels.go @@ -0,0 +1,8 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const templateLabelFieldSpecs = ` +templateLabels: +` + metadataLabelsFieldSpecs diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go new file mode 100644 index 000000000..f4011d825 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go @@ -0,0 +1,223 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package builtinpluginconsts + +const ( + varReferenceFieldSpecs = ` +varReference: +- path: spec/jobTemplate/spec/template/spec/containers/args + kind: CronJob + +- path: spec/jobTemplate/spec/template/spec/containers/command + kind: CronJob + +- path: spec/jobTemplate/spec/template/spec/containers/env/value + kind: CronJob + +- path: spec/jobTemplate/spec/template/spec/containers/volumeMounts/mountPath + kind: CronJob + +- path: spec/jobTemplate/spec/template/spec/initContainers/args + kind: CronJob + +- path: spec/jobTemplate/spec/template/spec/initContainers/command + kind: CronJob + +- path: spec/jobTemplate/spec/template/spec/initContainers/env/value + kind: CronJob + +- path: spec/jobTemplate/spec/template/spec/initContainers/volumeMounts/mountPath + kind: CronJob + +- path: spec/jobTemplate/spec/template/volumes/nfs/server + kind: CronJob + +- path: spec/template/spec/containers/args + kind: DaemonSet + +- path: spec/template/spec/containers/command + kind: DaemonSet + +- path: spec/template/spec/containers/env/value + kind: DaemonSet + +- path: spec/template/spec/containers/volumeMounts/mountPath + kind: DaemonSet + +- path: spec/template/spec/initContainers/args + kind: DaemonSet + +- path: spec/template/spec/initContainers/command + kind: DaemonSet + +- path: spec/template/spec/initContainers/env/value + kind: DaemonSet + +- path: spec/template/spec/initContainers/volumeMounts/mountPath + kind: DaemonSet + +- path: spec/template/spec/volumes/nfs/server + kind: DaemonSet + +- path: spec/template/spec/containers/args + kind: Deployment + +- path: spec/template/spec/containers/command + kind: Deployment + +- path: spec/template/spec/containers/env/value + kind: Deployment + +- path: spec/template/spec/containers/volumeMounts/mountPath + kind: Deployment + +- path: spec/template/spec/initContainers/args + kind: Deployment + +- path: spec/template/spec/initContainers/command + kind: Deployment + +- path: spec/template/spec/initContainers/env/value + kind: Deployment + +- path: spec/template/spec/initContainers/volumeMounts/mountPath + kind: Deployment + +- path: spec/template/spec/volumes/nfs/server + kind: Deployment + +- path: spec/template/metadata/annotations + kind: Deployment + +- path: spec/rules/host + kind: Ingress + +- path: spec/tls/hosts + kind: Ingress + +- path: spec/tls/secretName + kind: Ingress + +- path: spec/template/spec/containers/args + kind: Job + +- path: spec/template/spec/containers/command + kind: Job + +- path: spec/template/spec/containers/env/value + kind: Job + +- path: spec/template/spec/containers/volumeMounts/mountPath + kind: Job + +- path: spec/template/spec/initContainers/args + kind: Job + +- path: spec/template/spec/initContainers/command + kind: Job + +- path: spec/template/spec/initContainers/env/value + kind: Job + +- path: spec/template/spec/initContainers/volumeMounts/mountPath + kind: Job + +- path: spec/template/spec/volumes/nfs/server + kind: Job + +- path: spec/containers/args + kind: Pod + +- path: spec/containers/command + kind: Pod + +- path: spec/containers/env/value + kind: Pod + +- path: spec/containers/volumeMounts/mountPath + kind: Pod + +- path: spec/initContainers/args + kind: Pod + +- path: spec/initContainers/command + kind: Pod + +- path: spec/initContainers/env/value + kind: Pod + +- path: spec/initContainers/volumeMounts/mountPath + kind: Pod + +- path: spec/volumes/nfs/server + kind: Pod + +- path: spec/template/spec/containers/args + kind: ReplicaSet + +- path: spec/template/spec/containers/command + kind: ReplicaSet + +- path: spec/template/spec/containers/env/value + kind: ReplicaSet + +- path: spec/template/spec/containers/volumeMounts/mountPath + kind: ReplicaSet + +- path: spec/template/spec/initContainers/args + kind: ReplicaSet + +- path: spec/template/spec/initContainers/command + kind: ReplicaSet + +- path: spec/template/spec/initContainers/env/value + kind: ReplicaSet + +- path: spec/template/spec/initContainers/volumeMounts/mountPath + kind: ReplicaSet + +- path: spec/template/spec/volumes/nfs/server + kind: ReplicaSet + +- path: spec/ports/port + kind: Service + +- path: spec/ports/targetPort + kind: Service + +- path: spec/template/spec/containers/args + kind: StatefulSet + +- path: spec/template/spec/containers/command + kind: StatefulSet + +- path: spec/template/spec/containers/env/value + kind: StatefulSet + +- path: spec/template/spec/containers/volumeMounts/mountPath + kind: StatefulSet + +- path: spec/template/spec/initContainers/args + kind: StatefulSet + +- path: spec/template/spec/initContainers/command + kind: StatefulSet + +- path: spec/template/spec/initContainers/env/value + kind: StatefulSet + +- path: spec/template/spec/initContainers/volumeMounts/mountPath + kind: StatefulSet + +- path: spec/volumeClaimTemplates/spec/nfs/server + kind: StatefulSet + +- path: spec/nfs/server + kind: PersistentVolume + +- path: metadata/labels + +- path: metadata/annotations +` +) diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/doc.go b/vendor/sigs.k8s.io/kustomize/api/konfig/doc.go new file mode 100644 index 000000000..8c5f8f2cd --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package konfig provides configuration methods and constants +// for the kustomize API, e.g. the set of file names to look for +// to identify a kustomization root. +package konfig diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/general.go b/vendor/sigs.k8s.io/kustomize/api/konfig/general.go new file mode 100644 index 000000000..712bfe789 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/general.go @@ -0,0 +1,49 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package konfig + +// RecognizedKustomizationFileNames is a list of file names +// that kustomize recognizes. +// To avoid ambiguity, a kustomization directory may not +// contain more than one match to this list. +func RecognizedKustomizationFileNames() []string { + return []string{ + "kustomization.yaml", + "kustomization.yml", + "Kustomization", + } +} + +func DefaultKustomizationFileName() string { + return RecognizedKustomizationFileNames()[0] +} + +const ( + // An environment variable to consult for kustomization + // configuration data. See: + // https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html + XdgConfigHomeEnv = "XDG_CONFIG_HOME" + + // Use this when XdgConfigHomeEnv not defined. + XdgConfigHomeEnvDefault = ".config" + + // A program name, for use in help, finding the XDG_CONFIG_DIR, etc. + ProgramName = "kustomize" + + // ConfigAnnoDomain is internal configuration-related annotation namespace. + // See https://github.com/kubernetes-sigs/kustomize/blob/master/cmd/config/docs/api-conventions/functions-spec.md. + ConfigAnnoDomain = "internal.config.kubernetes.io" + + // If a resource has this annotation, kustomize will drop it. + IgnoredByKustomizeAnnotation = "config.kubernetes.io/local-config" + + // Label key that indicates the resources are built from Kustomize + ManagedbyLabelKey = "app.kubernetes.io/managed-by" + + // An environment variable to turn on/off adding the ManagedByLabelKey + EnableManagedbyLabelEnv = "KUSTOMIZE_ENABLE_MANAGEDBY_LABEL" + + // Label key that indicates the resources are validated by a validator + ValidatedByLabelKey = "validated-by" +) diff --git a/vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go b/vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go new file mode 100644 index 000000000..30bd3b6e3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go @@ -0,0 +1,138 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package konfig + +import ( + "os" + "path/filepath" + "runtime" + + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +const ( + // Symbol that must be used inside Go plugins. + PluginSymbol = "KustomizePlugin" + + // Name of environment variable used to set AbsPluginHome. + // See that variable for an explanation. + KustomizePluginHomeEnv = "KUSTOMIZE_PLUGIN_HOME" + + // Relative path below XDG_CONFIG_HOME/kustomize to find plugins. + // e.g. AbsPluginHome = XDG_CONFIG_HOME/kustomize/plugin + RelPluginHome = "plugin" + + // Location of builtin plugins below AbsPluginHome. + BuiltinPluginPackage = "builtin" + + // The value of kubernetes ApiVersion to use in configuration + // files for builtin plugins. + // The value for non-builtins can be anything. + BuiltinPluginApiVersion = BuiltinPluginPackage + + // Domain from which kustomize code is imported, for locating + // plugin source code under $GOPATH when GOPATH is defined. + DomainName = "sigs.k8s.io" + + // Injected into plugin paths when plugins are disabled. + // Provides a clue in flows that shouldn't happen. + NoPluginHomeSentinal = "/No/non-builtin/plugins!" +) + +type NotedFunc struct { + Note string + F func() string +} + +// DefaultAbsPluginHome returns the absolute path in the given file +// system to first directory that looks like a good candidate for +// the home of kustomize plugins. +func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) { + return FirstDirThatExistsElseError( + "plugin root", fSys, []NotedFunc{ + { + Note: "homed in $" + KustomizePluginHomeEnv, + F: func() string { + return os.Getenv(KustomizePluginHomeEnv) + }, + }, + { + Note: "homed in $" + XdgConfigHomeEnv, + F: func() string { + if root := os.Getenv(XdgConfigHomeEnv); root != "" { + return filepath.Join(root, ProgramName, RelPluginHome) + } + // do not look in "kustomize/plugin" if XdgConfigHomeEnv is unset + return "" + }, + }, + { + Note: "homed in default value of $" + XdgConfigHomeEnv, + F: func() string { + return filepath.Join( + HomeDir(), XdgConfigHomeEnvDefault, + ProgramName, RelPluginHome) + }, + }, + { + Note: "homed in home directory", + F: func() string { + return filepath.Join( + HomeDir(), ProgramName, RelPluginHome) + }, + }, + }) +} + +// FirstDirThatExistsElseError tests different path functions for +// existence, returning the first that works, else error if all fail. +func FirstDirThatExistsElseError( + what string, + fSys filesys.FileSystem, + pathFuncs []NotedFunc) (string, error) { + var nope []types.Pair + for _, dt := range pathFuncs { + if dir := dt.F(); dir != "" { + if fSys.Exists(dir) { + return dir, nil + } + nope = append(nope, types.Pair{Key: dt.Note, Value: dir}) + } else { + nope = append(nope, types.Pair{Key: dt.Note, Value: ""}) + } + } + return "", types.NewErrUnableToFind(what, nope) +} + +func HomeDir() string { + home := os.Getenv(homeEnv()) + if len(home) > 0 { + return home + } + return "~" +} + +func homeEnv() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +func CurrentWorkingDir() string { + // Try for full path first to be explicit. + pwd := os.Getenv(pwdEnv()) + if len(pwd) > 0 { + return pwd + } + return filesys.SelfDir +} + +func pwdEnv() string { + if runtime.GOOS == "windows" { + return "CD" + } + return "PWD" +} diff --git a/vendor/sigs.k8s.io/kustomize/api/krusty/doc.go b/vendor/sigs.k8s.io/kustomize/api/krusty/doc.go new file mode 100644 index 000000000..bf516ca94 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/krusty/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package krusty is intended as the entry point package +// for those seeking to add kustomize ability to other +// programs. +// +// To use, follow the example of the kustomize CLI's 'build' +// command. Also, see the high level tests in this package, +// which serve a dual purpose as examples. +package krusty diff --git a/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go b/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go new file mode 100644 index 000000000..8c7684054 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go @@ -0,0 +1,163 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package krusty + +import ( + "fmt" + "log" + + "sigs.k8s.io/kustomize/api/internal/builtins" + pLdr "sigs.k8s.io/kustomize/api/internal/plugins/loader" + "sigs.k8s.io/kustomize/api/internal/target" + "sigs.k8s.io/kustomize/api/internal/utils" + "sigs.k8s.io/kustomize/api/konfig" + fLdr "sigs.k8s.io/kustomize/api/loader" + "sigs.k8s.io/kustomize/api/provenance" + "sigs.k8s.io/kustomize/api/provider" + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/kustomize/kyaml/openapi" +) + +// Kustomizer performs kustomizations. +// +// It's meant to behave similarly to the kustomize CLI, and can be +// used instead of performing an exec to a kustomize CLI subprocess. +// To use, load a filesystem with kustomization files (any +// number of overlays and bases), then make a Kustomizer +// injected with the given filesystem, then call Run. +type Kustomizer struct { + options *Options + depProvider *provider.DepProvider +} + +// MakeKustomizer returns an instance of Kustomizer. +func MakeKustomizer(o *Options) *Kustomizer { + return &Kustomizer{ + options: o, + depProvider: provider.NewDepProvider(), + } +} + +// Run performs a kustomization. +// +// It reads given path from the given file system, interprets it as +// a kustomization.yaml file, perform the kustomization it represents, +// and return the resulting resources. +// +// Any files referenced by the kustomization must be present on the +// filesystem. One may call Run any number of times, on any number +// of internal paths (e.g. the filesystem may contain multiple overlays, +// and Run can be called on each of them). +func (b *Kustomizer) Run( + fSys filesys.FileSystem, path string) (resmap.ResMap, error) { + resmapFactory := resmap.NewFactory(b.depProvider.GetResourceFactory()) + lr := fLdr.RestrictionNone + if b.options.LoadRestrictions == types.LoadRestrictionsRootOnly { + lr = fLdr.RestrictionRootOnly + } + ldr, err := fLdr.NewLoader(lr, path, fSys) + if err != nil { + return nil, err + } + defer ldr.Cleanup() + kt := target.NewKustTarget( + ldr, + b.depProvider.GetFieldValidator(), + resmapFactory, + // The plugin configs are always located on disk, regardless of the fSys passed in + pLdr.NewLoader(b.options.PluginConfig, resmapFactory, filesys.MakeFsOnDisk()), + ) + err = kt.Load() + if err != nil { + return nil, err + } + var bytes []byte + if openApiPath, exists := kt.Kustomization().OpenAPI["path"]; exists { + bytes, err = ldr.Load(openApiPath) + if err != nil { + return nil, err + } + } + err = openapi.SetSchema(kt.Kustomization().OpenAPI, bytes, true) + if err != nil { + return nil, err + } + var m resmap.ResMap + m, err = kt.MakeCustomizedResMap() + if err != nil { + return nil, err + } + err = b.applySortOrder(m, kt) + if err != nil { + return nil, err + } + if b.options.AddManagedbyLabel || utils.StringSliceContains(kt.Kustomization().BuildMetadata, types.ManagedByLabelOption) { + t := builtins.LabelTransformerPlugin{ + Labels: map[string]string{ + konfig.ManagedbyLabelKey: fmt.Sprintf("kustomize-%s", provenance.GetProvenance().Semver()), + }, + FieldSpecs: []types.FieldSpec{{ + Path: "metadata/labels", + CreateIfNotPresent: true, + }}, + } + err = t.Transform(m) + if err != nil { + return nil, err + } + } + m.RemoveBuildAnnotations() + if !utils.StringSliceContains(kt.Kustomization().BuildMetadata, types.OriginAnnotations) { + err = m.RemoveOriginAnnotations() + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to clean up origin tracking annotations") + } + } + if !utils.StringSliceContains(kt.Kustomization().BuildMetadata, types.TransformerAnnotations) { + err = m.RemoveTransformerAnnotations() + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to clean up transformer annotations") + } + } + return m, nil +} + +func (b *Kustomizer) applySortOrder(m resmap.ResMap, kt *target.KustTarget) error { + // Sort order can be defined in two places: + // - (new) kustomization file + // - (old) CLI flag + // + // We want the kustomization file to take precedence over the CLI flag. + // Eventually, we may want to move away from having a CLI flag altogether: + // https://github.com/kubernetes-sigs/kustomize/issues/3947 + + // Case 1: Sort order set in kustomization file. + if kt.Kustomization().SortOptions != nil { + // If set in CLI flag too, warn the user. + if b.options.Reorder != ReorderOptionUnspecified { + log.Println("Warning: Sorting order is set both in 'kustomization.yaml'" + + " ('sortOptions') and in a CLI flag ('--reorder'). Using the" + + " kustomization file over the CLI flag.") + } + pl := &builtins.SortOrderTransformerPlugin{ + SortOptions: kt.Kustomization().SortOptions, + } + err := pl.Transform(m) + if err != nil { + return errors.Wrap(err) + } + } else if b.options.Reorder == ReorderOptionLegacy || b.options.Reorder == ReorderOptionUnspecified { + // Case 2: Sort order set in CLI flag only or not at all. + pl := &builtins.SortOrderTransformerPlugin{ + SortOptions: &types.SortOptions{ + Order: types.LegacySortOrder, + }, + } + return errors.Wrap(pl.Transform(m)) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/krusty/options.go b/vendor/sigs.k8s.io/kustomize/api/krusty/options.go new file mode 100644 index 000000000..086242c8e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/krusty/options.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package krusty + +import ( + "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" + "sigs.k8s.io/kustomize/api/types" +) + +type ReorderOption string + +const ( + ReorderOptionLegacy ReorderOption = "legacy" + ReorderOptionNone ReorderOption = "none" + ReorderOptionUnspecified ReorderOption = "unspecified" +) + +// Options holds high-level kustomize configuration options, +// e.g. are plugins enabled, should the loader be restricted +// to the kustomization root, etc. +type Options struct { + // When true, sort the resources before emitting them, + // per a particular sort order. When false, don't do the + // sort, and instead respect the depth-first resource input + // order as specified by the kustomization file(s). + + // Sort the resources before emitting them. Possible values: + // - "legacy": Use a fixed order that kustomize provides for backwards + // compatibility. + // - "none": Respect the depth-first resource input order as specified by the + // kustomization file. + // - "unspecified": The user didn't specify any preference. Kustomize will + // select the appropriate default. + Reorder ReorderOption + + // When true, a label + // app.kubernetes.io/managed-by: kustomize- + // is added to all the resources in the build out. + AddManagedbyLabel bool + + // Restrictions on what can be loaded from the file system. + // See type definition. + LoadRestrictions types.LoadRestrictions + + // Options related to kustomize plugins. + PluginConfig *types.PluginConfig +} + +// MakeDefaultOptions returns a default instance of Options. +func MakeDefaultOptions() *Options { + return &Options{ + Reorder: ReorderOptionNone, + AddManagedbyLabel: false, + LoadRestrictions: types.LoadRestrictionsRootOnly, + PluginConfig: types.DisabledPluginConfig(), + } +} + +// GetBuiltinPluginNames returns a list of builtin plugin names +func GetBuiltinPluginNames() []string { + var ret []string + for k := range builtinhelpers.GeneratorFactories { + ret = append(ret, k.String()) + } + for k := range builtinhelpers.TransformerFactories { + ret = append(ret, k.String()) + } + return ret +} diff --git a/vendor/sigs.k8s.io/kustomize/api/kv/kv.go b/vendor/sigs.k8s.io/kustomize/api/kv/kv.go new file mode 100644 index 000000000..61a0463c3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/kv/kv.go @@ -0,0 +1,197 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package kv + +import ( + "bufio" + "bytes" + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/generators" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" +) + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +// loader reads and validates KV pairs. +type loader struct { + // Used to read the filesystem. + ldr ifc.Loader + + // Used to validate various k8s data fields. + validator ifc.Validator +} + +func NewLoader(ldr ifc.Loader, v ifc.Validator) ifc.KvLoader { + return &loader{ldr: ldr, validator: v} +} + +func (kvl *loader) Validator() ifc.Validator { + return kvl.validator +} + +func (kvl *loader) Load( + args types.KvPairSources) (all []types.Pair, err error) { + pairs, err := kvl.keyValuesFromEnvFiles(args.EnvSources) + if err != nil { + return nil, errors.WrapPrefixf(err, + "env source files: %v", + args.EnvSources) + } + all = append(all, pairs...) + + pairs, err = keyValuesFromLiteralSources(args.LiteralSources) + if err != nil { + return nil, errors.WrapPrefixf(err, + "literal sources %v", args.LiteralSources) + } + all = append(all, pairs...) + + pairs, err = kvl.keyValuesFromFileSources(args.FileSources) + if err != nil { + return nil, errors.WrapPrefixf(err, + "file sources: %v", args.FileSources) + } + return append(all, pairs...), nil +} + +func keyValuesFromLiteralSources(sources []string) ([]types.Pair, error) { + var kvs []types.Pair + for _, s := range sources { + k, v, err := parseLiteralSource(s) + if err != nil { + return nil, err + } + kvs = append(kvs, types.Pair{Key: k, Value: v}) + } + return kvs, nil +} + +func (kvl *loader) keyValuesFromFileSources(sources []string) ([]types.Pair, error) { + var kvs []types.Pair + for _, s := range sources { + k, fPath, err := generators.ParseFileSource(s) + if err != nil { + return nil, err + } + content, err := kvl.ldr.Load(fPath) + if err != nil { + return nil, err + } + kvs = append(kvs, types.Pair{Key: k, Value: string(content)}) + } + return kvs, nil +} + +func (kvl *loader) keyValuesFromEnvFiles(paths []string) ([]types.Pair, error) { + var kvs []types.Pair + for _, p := range paths { + content, err := kvl.ldr.Load(p) + if err != nil { + return nil, err + } + more, err := kvl.keyValuesFromLines(content) + if err != nil { + return nil, err + } + kvs = append(kvs, more...) + } + return kvs, nil +} + +// keyValuesFromLines parses given content in to a list of key-value pairs. +func (kvl *loader) keyValuesFromLines(content []byte) ([]types.Pair, error) { + var kvs []types.Pair + + scanner := bufio.NewScanner(bytes.NewReader(content)) + currentLine := 0 + for scanner.Scan() { + // Process the current line, retrieving a key/value pair if + // possible. + scannedBytes := scanner.Bytes() + kv, err := kvl.keyValuesFromLine(scannedBytes, currentLine) + if err != nil { + return nil, err + } + currentLine++ + + if len(kv.Key) == 0 { + // no key means line was empty or a comment + continue + } + + kvs = append(kvs, kv) + } + return kvs, nil +} + +// KeyValuesFromLine returns a kv with blank key if the line is empty or a comment. +func (kvl *loader) keyValuesFromLine(line []byte, currentLine int) (types.Pair, error) { + kv := types.Pair{} + + if !utf8.Valid(line) { + return kv, fmt.Errorf("line %d has invalid utf8 bytes : %v", line, string(line)) + } + + // We trim UTF8 BOM from the first line of the file but no others + if currentLine == 0 { + line = bytes.TrimPrefix(line, utf8bom) + } + + // trim the line from all leading whitespace first + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + + // If the line is empty or a comment, we return a blank key/value pair. + if len(line) == 0 || line[0] == '#' { + return kv, nil + } + + data := strings.SplitN(string(line), "=", 2) + key := data[0] + if err := kvl.validator.IsEnvVarName(key); err != nil { + return kv, err + } + + if len(data) == 2 { + kv.Value = data[1] + } else { + // If there is no value (no `=` in the line), we set the value to an empty string + kv.Value = "" + } + kv.Key = key + return kv, nil +} + +// ParseLiteralSource parses the source key=val pair into its component pieces. +// This functionality is distinguished from strings.SplitN(source, "=", 2) since +// it returns an error in the case of empty keys, values, or a missing equals sign. +func parseLiteralSource(source string) (keyName, value string, err error) { + // leading equal is invalid + if strings.Index(source, "=") == 0 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + // split after the first equal (so values can have the = character) + items := strings.SplitN(source, "=", 2) + if len(items) != 2 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + return items[0], removeQuotes(items[1]), nil +} + +// removeQuotes removes the surrounding quotes from the provided string only if it is surrounded on both sides +// rather than blindly trimming all quotation marks on either side. +func removeQuotes(str string) string { + if len(str) < 2 || str[0] != str[len(str)-1] { + return str + } + if str[0] == '"' || str[0] == '\'' { + return str[1 : len(str)-1] + } + return str +} diff --git a/vendor/sigs.k8s.io/kustomize/api/loader/errors.go b/vendor/sigs.k8s.io/kustomize/api/loader/errors.go new file mode 100644 index 000000000..2463debdb --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/loader/errors.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package loader + +import "sigs.k8s.io/kustomize/kyaml/errors" + +var ( + ErrHTTP = errors.Errorf("HTTP Error") + ErrRtNotDir = errors.Errorf("must build at directory") +) diff --git a/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go b/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go new file mode 100644 index 000000000..672ca29b3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go @@ -0,0 +1,349 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package loader + +import ( + "fmt" + "io" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" + + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/git" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +// IsRemoteFile returns whether path has a url scheme that kustomize allows for +// remote files. See https://github.com/kubernetes-sigs/kustomize/blob/master/examples/remoteBuild.md +func IsRemoteFile(path string) bool { + u, err := url.Parse(path) + return err == nil && (u.Scheme == "http" || u.Scheme == "https") +} + +// fileLoader is a kustomization's interface to files. +// +// The directory in which a kustomization file sits +// is referred to below as the kustomization's _root_. +// +// An instance of fileLoader has an immutable root, +// and offers a `New` method returning a new loader +// with a new root. +// +// A kustomization file refers to two kinds of files: +// +// * supplemental data paths +// +// `Load` is used to visit these paths. +// +// These paths refer to resources, patches, +// data for ConfigMaps and Secrets, etc. +// +// The loadRestrictor may disallow certain paths +// or classes of paths. +// +// * bases (other kustomizations) +// +// `New` is used to load bases. +// +// A base can be either a remote git repo URL, or +// a directory specified relative to the current +// root. In the former case, the repo is locally +// cloned, and the new loader is rooted on a path +// in that clone. +// +// As loaders create new loaders, a root history +// is established, and used to disallow: +// +// - A base that is a repository that, in turn, +// specifies a base repository seen previously +// in the loading stack (a cycle). +// +// - An overlay depending on a base positioned at +// or above it. I.e. '../foo' is OK, but '.', +// '..', '../..', etc. are disallowed. Allowing +// such a base has no advantages and encourages +// cycles, particularly if some future change +// were to introduce globbing to file +// specifications in the kustomization file. +// +// These restrictions assure that kustomizations +// are self-contained and relocatable, and impose +// some safety when relying on remote kustomizations, +// e.g. a remotely loaded ConfigMap generator specified +// to read from /etc/passwd will fail. +// +type fileLoader struct { + // Loader that spawned this loader. + // Used to avoid cycles. + referrer *fileLoader + + // An absolute, cleaned path to a directory. + // The Load function will read non-absolute + // paths relative to this directory. + root filesys.ConfirmedDir + + // Restricts behavior of Load function. + loadRestrictor LoadRestrictorFunc + + // If this is non-nil, the files were + // obtained from the given repository. + repoSpec *git.RepoSpec + + // File system utilities. + fSys filesys.FileSystem + + // Used to load from HTTP + http *http.Client + + // Used to clone repositories. + cloner git.Cloner + + // Used to clean up, as needed. + cleaner func() error +} + +// NewFileLoaderAtCwd returns a loader that loads from PWD. +// A convenience for kustomize edit commands. +func NewFileLoaderAtCwd(fSys filesys.FileSystem) *fileLoader { + return newLoaderOrDie( + RestrictionRootOnly, fSys, filesys.SelfDir) +} + +// NewFileLoaderAtRoot returns a loader that loads from "/". +// A convenience for tests. +func NewFileLoaderAtRoot(fSys filesys.FileSystem) *fileLoader { + return newLoaderOrDie( + RestrictionRootOnly, fSys, filesys.Separator) +} + +// Repo returns the absolute path to the repo that contains Root if this fileLoader was created from a url +// or the empty string otherwise. +func (fl *fileLoader) Repo() string { + if fl.repoSpec != nil { + return fl.repoSpec.Dir.String() + } + return "" +} + +// Root returns the absolute path that is prepended to any +// relative paths used in Load. +func (fl *fileLoader) Root() string { + return fl.root.String() +} + +func newLoaderOrDie( + lr LoadRestrictorFunc, + fSys filesys.FileSystem, path string) *fileLoader { + root, err := filesys.ConfirmDir(fSys, path) + if err != nil { + log.Fatalf("unable to make loader at '%s'; %v", path, err) + } + return newLoaderAtConfirmedDir( + lr, root, fSys, nil, git.ClonerUsingGitExec) +} + +// newLoaderAtConfirmedDir returns a new fileLoader with given root. +func newLoaderAtConfirmedDir( + lr LoadRestrictorFunc, + root filesys.ConfirmedDir, fSys filesys.FileSystem, + referrer *fileLoader, cloner git.Cloner) *fileLoader { + return &fileLoader{ + loadRestrictor: lr, + root: root, + referrer: referrer, + fSys: fSys, + cloner: cloner, + cleaner: func() error { return nil }, + } +} + +// New returns a new Loader, rooted relative to current loader, +// or rooted in a temp directory holding a git repo clone. +func (fl *fileLoader) New(path string) (ifc.Loader, error) { + if path == "" { + return nil, errors.Errorf("new root cannot be empty") + } + + repoSpec, err := git.NewRepoSpecFromURL(path) + if err == nil { + // Treat this as git repo clone request. + if err = fl.errIfRepoCycle(repoSpec); err != nil { + return nil, err + } + return newLoaderAtGitClone( + repoSpec, fl.fSys, fl, fl.cloner) + } + + if filepath.IsAbs(path) { + return nil, fmt.Errorf("new root '%s' cannot be absolute", path) + } + root, err := filesys.ConfirmDir(fl.fSys, fl.root.Join(path)) + if err != nil { + return nil, errors.WrapPrefixf(err, ErrRtNotDir.Error()) + } + if err = fl.errIfGitContainmentViolation(root); err != nil { + return nil, err + } + if err = fl.errIfArgEqualOrHigher(root); err != nil { + return nil, err + } + return newLoaderAtConfirmedDir( + fl.loadRestrictor, root, fl.fSys, fl, fl.cloner), nil +} + +// newLoaderAtGitClone returns a new Loader pinned to a temporary +// directory holding a cloned git repo. +func newLoaderAtGitClone( + repoSpec *git.RepoSpec, fSys filesys.FileSystem, + referrer *fileLoader, cloner git.Cloner) (ifc.Loader, error) { + cleaner := repoSpec.Cleaner(fSys) + err := cloner(repoSpec) + if err != nil { + cleaner() + return nil, err + } + root, f, err := fSys.CleanedAbs(repoSpec.AbsPath()) + if err != nil { + cleaner() + return nil, err + } + // We don't know that the path requested in repoSpec + // is a directory until we actually clone it and look + // inside. That just happened, hence the error check + // is here. + if f != "" { + cleaner() + return nil, fmt.Errorf( + "'%s' refers to file '%s'; expecting directory", + repoSpec.AbsPath(), f) + } + // Path in repo can contain symlinks that exit repo. We can only + // check for this after cloning repo. + if !root.HasPrefix(repoSpec.CloneDir()) { + _ = cleaner() + return nil, fmt.Errorf("%q refers to directory outside of repo %q", repoSpec.AbsPath(), + repoSpec.CloneDir()) + } + return &fileLoader{ + // Clones never allowed to escape root. + loadRestrictor: RestrictionRootOnly, + root: root, + referrer: referrer, + repoSpec: repoSpec, + fSys: fSys, + cloner: cloner, + cleaner: cleaner, + }, nil +} + +func (fl *fileLoader) errIfGitContainmentViolation( + base filesys.ConfirmedDir) error { + containingRepo := fl.containingRepo() + if containingRepo == nil { + return nil + } + if !base.HasPrefix(containingRepo.CloneDir()) { + return fmt.Errorf( + "security; bases in kustomizations found in "+ + "cloned git repos must be within the repo, "+ + "but base '%s' is outside '%s'", + base, containingRepo.CloneDir()) + } + return nil +} + +// Looks back through referrers for a git repo, returning nil +// if none found. +func (fl *fileLoader) containingRepo() *git.RepoSpec { + if fl.repoSpec != nil { + return fl.repoSpec + } + if fl.referrer == nil { + return nil + } + return fl.referrer.containingRepo() +} + +// errIfArgEqualOrHigher tests whether the argument, +// is equal to or above the root of any ancestor. +func (fl *fileLoader) errIfArgEqualOrHigher( + candidateRoot filesys.ConfirmedDir) error { + if fl.root.HasPrefix(candidateRoot) { + return fmt.Errorf( + "cycle detected: candidate root '%s' contains visited root '%s'", + candidateRoot, fl.root) + } + if fl.referrer == nil { + return nil + } + return fl.referrer.errIfArgEqualOrHigher(candidateRoot) +} + +// TODO(monopole): Distinguish branches? +// I.e. Allow a distinction between git URI with +// path foo and tag bar and a git URI with the same +// path but a different tag? +func (fl *fileLoader) errIfRepoCycle(newRepoSpec *git.RepoSpec) error { + // TODO(monopole): Use parsed data instead of Raw(). + if fl.repoSpec != nil && + strings.HasPrefix(fl.repoSpec.Raw(), newRepoSpec.Raw()) { + return fmt.Errorf( + "cycle detected: URI '%s' referenced by previous URI '%s'", + newRepoSpec.Raw(), fl.repoSpec.Raw()) + } + if fl.referrer == nil { + return nil + } + return fl.referrer.errIfRepoCycle(newRepoSpec) +} + +// Load returns the content of file at the given path, +// else an error. Relative paths are taken relative +// to the root. +func (fl *fileLoader) Load(path string) ([]byte, error) { + if IsRemoteFile(path) { + return fl.httpClientGetContent(path) + } + if !filepath.IsAbs(path) { + path = fl.root.Join(path) + } + path, err := fl.loadRestrictor(fl.fSys, fl.root, path) + if err != nil { + return nil, err + } + return fl.fSys.ReadFile(path) +} + +func (fl *fileLoader) httpClientGetContent(path string) ([]byte, error) { + var hc *http.Client + if fl.http != nil { + hc = fl.http + } else { + hc = &http.Client{} + } + resp, err := hc.Get(path) + if err != nil { + return nil, errors.Wrap(err) + } + defer resp.Body.Close() + // response unsuccessful + if resp.StatusCode < 200 || resp.StatusCode > 299 { + _, err = git.NewRepoSpecFromURL(path) + if err == nil { + return nil, errors.Errorf("URL is a git repository") + } + return nil, fmt.Errorf("%w: status code %d (%s)", ErrHTTP, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + content, err := io.ReadAll(resp.Body) + return content, errors.Wrap(err) +} + +// Cleanup runs the cleaner. +func (fl *fileLoader) Cleanup() error { + return fl.cleaner() +} diff --git a/vendor/sigs.k8s.io/kustomize/api/loader/loader.go b/vendor/sigs.k8s.io/kustomize/api/loader/loader.go new file mode 100644 index 000000000..e10885b9b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/loader/loader.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package loader has a data loading interface and various implementations. +package loader + +import ( + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/git" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +// NewLoader returns a Loader pointed at the given target. +// If the target is remote, the loader will be restricted +// to the root and below only. If the target is local, the +// loader will have the restrictions passed in. Regardless, +// if a local target attempts to transitively load remote bases, +// the remote bases will all be root-only restricted. +func NewLoader( + lr LoadRestrictorFunc, + target string, fSys filesys.FileSystem) (ifc.Loader, error) { + repoSpec, err := git.NewRepoSpecFromURL(target) + if err == nil { + // The target qualifies as a remote git target. + return newLoaderAtGitClone( + repoSpec, fSys, nil, git.ClonerUsingGitExec) + } + root, err := filesys.ConfirmDir(fSys, target) + if err != nil { + return nil, errors.WrapPrefixf(err, ErrRtNotDir.Error()) + } + return newLoaderAtConfirmedDir( + lr, root, fSys, nil, git.ClonerUsingGitExec), nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go b/vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go new file mode 100644 index 000000000..a016a9625 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package loader + +import ( + "fmt" + + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +type LoadRestrictorFunc func( + filesys.FileSystem, filesys.ConfirmedDir, string) (string, error) + +func RestrictionRootOnly( + fSys filesys.FileSystem, root filesys.ConfirmedDir, path string) (string, error) { + d, f, err := fSys.CleanedAbs(path) + if err != nil { + return "", err + } + if f == "" { + return "", fmt.Errorf("'%s' must resolve to a file", path) + } + if !d.HasPrefix(root) { + return "", fmt.Errorf( + "security; file '%s' is not in or below '%s'", + path, root) + } + return d.Join(f), nil +} + +func RestrictionNone( + _ filesys.FileSystem, _ filesys.ConfirmedDir, path string) (string, error) { + return path, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go b/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go new file mode 100644 index 000000000..0e24fc36c --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go @@ -0,0 +1,84 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package provenance + +import ( + "fmt" + "runtime" + "runtime/debug" + "strings" +) + +// These variables are set at build time using ldflags. +// +//nolint:gochecknoglobals +var ( + // During a release, this will be set to the release tag, e.g. "kustomize/v4.5.7" + version = developmentVersion + // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + buildDate = "unknown" +) + +// This default value, (devel), matches +// the value debug.BuildInfo uses for an unset main module version. +const developmentVersion = "(devel)" + +// Provenance holds information about the build of an executable. +type Provenance struct { + // Version of the kustomize binary. + Version string `json:"version,omitempty" yaml:"version,omitempty"` + // GitCommit is a git commit + GitCommit string `json:"gitCommit,omitempty" yaml:"gitCommit,omitempty"` + // BuildDate is date of the build. + BuildDate string `json:"buildDate,omitempty" yaml:"buildDate,omitempty"` + // GoOs holds OS name. + GoOs string `json:"goOs,omitempty" yaml:"goOs,omitempty"` + // GoArch holds architecture name. + GoArch string `json:"goArch,omitempty" yaml:"goArch,omitempty"` + // GoVersion holds Go version. + GoVersion string `json:"goVersion,omitempty" yaml:"goVersion,omitempty"` +} + +// GetProvenance returns an instance of Provenance. +func GetProvenance() Provenance { + p := Provenance{ + BuildDate: buildDate, + Version: version, + GitCommit: "unknown", + GoOs: runtime.GOOS, + GoArch: runtime.GOARCH, + GoVersion: runtime.Version(), + } + info, ok := debug.ReadBuildInfo() + if !ok { + return p + } + + for _, setting := range info.Settings { + // For now, the git commit is the only information of interest. + // We could consider adding other info such as the commit date in the future. + if setting.Key == "vcs.revision" { + p.GitCommit = setting.Value + } + } + return p +} + +// Short returns the shortened provenance stamp. +func (v Provenance) Short() string { + return fmt.Sprintf( + "%v", + Provenance{ + Version: v.Version, + BuildDate: v.BuildDate, + }) +} + +// Semver returns the semantic version of kustomize. +// kustomize version is set in format "kustomize/vX.X.X" in every release. +// X.X.X is a semver. If the version string is not in this format, +// return the original version string +func (v Provenance) Semver() string { + return strings.TrimPrefix(v.Version, "kustomize/") +} diff --git a/vendor/sigs.k8s.io/kustomize/api/provider/depprovider.go b/vendor/sigs.k8s.io/kustomize/api/provider/depprovider.go new file mode 100644 index 000000000..0102c89ce --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/provider/depprovider.go @@ -0,0 +1,42 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package provider + +import ( + "sigs.k8s.io/kustomize/api/hasher" + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/validate" + "sigs.k8s.io/kustomize/api/resource" +) + +// DepProvider is a dependency provider, injecting different +// implementations depending on the context. +type DepProvider struct { + resourceFactory *resource.Factory + // implemented by api/internal/validate.FieldValidator + // See TODO inside the validator for status. + // At time of writing, this is a do-nothing + // validator as it's not critical to kustomize function. + fieldValidator ifc.Validator +} + +func NewDepProvider() *DepProvider { + rf := resource.NewFactory(&hasher.Hasher{}) + return &DepProvider{ + resourceFactory: rf, + fieldValidator: validate.NewFieldValidator(), + } +} + +func NewDefaultDepProvider() *DepProvider { + return NewDepProvider() +} + +func (dp *DepProvider) GetResourceFactory() *resource.Factory { + return dp.resourceFactory +} + +func (dp *DepProvider) GetFieldValidator() ifc.Validator { + return dp.fieldValidator +} diff --git a/vendor/sigs.k8s.io/kustomize/api/resmap/factory.go b/vendor/sigs.k8s.io/kustomize/api/resmap/factory.go new file mode 100644 index 000000000..9cc860749 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/resmap/factory.go @@ -0,0 +1,145 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resmap + +import ( + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/kusterr" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Factory makes instances of ResMap. +type Factory struct { + // Makes resources. + resF *resource.Factory +} + +// NewFactory returns a new resmap.Factory. +func NewFactory(rf *resource.Factory) *Factory { + return &Factory{resF: rf} +} + +// RF returns a resource.Factory. +func (rmF *Factory) RF() *resource.Factory { + return rmF.resF +} + +func New() ResMap { + return newOne() +} + +// FromResource returns a ResMap with one entry. +func (rmF *Factory) FromResource(res *resource.Resource) ResMap { + m, err := newResMapFromResourceSlice([]*resource.Resource{res}) + if err != nil { + panic(err) + } + return m +} + +// FromResourceSlice returns a ResMap with a slice of resources. +func (rmF *Factory) FromResourceSlice(ress []*resource.Resource) ResMap { + m, err := newResMapFromResourceSlice(ress) + if err != nil { + panic(err) + } + return m +} + +// FromFile returns a ResMap given a resource path. +func (rmF *Factory) FromFile( + loader ifc.Loader, path string) (ResMap, error) { + content, err := loader.Load(path) + if err != nil { + return nil, err + } + m, err := rmF.NewResMapFromBytes(content) + if err != nil { + return nil, kusterr.Handler(err, path) + } + return m, nil +} + +// NewResMapFromBytes decodes a list of objects in byte array format. +func (rmF *Factory) NewResMapFromBytes(b []byte) (ResMap, error) { + resources, err := rmF.resF.SliceFromBytes(b) + if err != nil { + return nil, err + } + return newResMapFromResourceSlice(resources) +} + +// NewResMapFromConfigMapArgs returns a Resource slice given +// a configmap metadata slice from kustomization file. +func (rmF *Factory) NewResMapFromConfigMapArgs( + kvLdr ifc.KvLoader, argList []types.ConfigMapArgs) (ResMap, error) { + var resources []*resource.Resource + for i := range argList { + res, err := rmF.resF.MakeConfigMap(kvLdr, &argList[i]) + if err != nil { + return nil, errors.WrapPrefixf(err, "NewResMapFromConfigMapArgs") + } + resources = append(resources, res) + } + return newResMapFromResourceSlice(resources) +} + +// FromConfigMapArgs creates a new ResMap containing one ConfigMap. +func (rmF *Factory) FromConfigMapArgs( + kvLdr ifc.KvLoader, args types.ConfigMapArgs) (ResMap, error) { + res, err := rmF.resF.MakeConfigMap(kvLdr, &args) + if err != nil { + return nil, err + } + return rmF.FromResource(res), nil +} + +// NewResMapFromSecretArgs takes a SecretArgs slice, generates +// secrets from each entry, and accumulates them in a ResMap. +func (rmF *Factory) NewResMapFromSecretArgs( + kvLdr ifc.KvLoader, argsList []types.SecretArgs) (ResMap, error) { + var resources []*resource.Resource + for i := range argsList { + res, err := rmF.resF.MakeSecret(kvLdr, &argsList[i]) + if err != nil { + return nil, errors.WrapPrefixf(err, "NewResMapFromSecretArgs") + } + resources = append(resources, res) + } + return newResMapFromResourceSlice(resources) +} + +// FromSecretArgs creates a new ResMap containing one secret. +func (rmF *Factory) FromSecretArgs( + kvLdr ifc.KvLoader, args types.SecretArgs) (ResMap, error) { + res, err := rmF.resF.MakeSecret(kvLdr, &args) + if err != nil { + return nil, err + } + return rmF.FromResource(res), nil +} + +func newResMapFromResourceSlice( + resources []*resource.Resource) (ResMap, error) { + result := New() + for _, res := range resources { + err := result.Append(res) + if err != nil { + return nil, err + } + } + return result, nil +} + +// NewResMapFromRNodeSlice returns a ResMap from a slice of RNodes +func (rmF *Factory) NewResMapFromRNodeSlice(s []*yaml.RNode) (ResMap, error) { + rs, err := rmF.resF.ResourcesFromRNodes(s) + if err != nil { + return nil, err + } + return newResMapFromResourceSlice(rs) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/resmap/resmap.go b/vendor/sigs.k8s.io/kustomize/api/resmap/resmap.go new file mode 100644 index 000000000..ea913ba6b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/resmap/resmap.go @@ -0,0 +1,333 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package resmap implements a map from ResId to Resource that +// tracks all resources in a kustomization. +package resmap + +import ( + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// A Transformer modifies an instance of ResMap. +type Transformer interface { + // Transform modifies data in the argument, + // e.g. adding labels to resources that can be labelled. + Transform(m ResMap) error +} + +// A TransformerWithProperties contains a Transformer and stores +// some of its properties +type TransformerWithProperties struct { + Transformer + Origin *resource.Origin +} + +// A Generator creates an instance of ResMap. +type Generator interface { + Generate() (ResMap, error) +} + +// A GeneratorWithProperties contains a Generator and stores +// some of its properties +type GeneratorWithProperties struct { + Generator + Origin *resource.Origin +} + +// Something that's configurable accepts an +// instance of PluginHelpers and a raw config +// object (YAML in []byte form). +type Configurable interface { + Config(h *PluginHelpers, config []byte) error +} + +// NewPluginHelpers makes an instance of PluginHelpers. +func NewPluginHelpers( + ldr ifc.Loader, v ifc.Validator, rf *Factory, + pc *types.PluginConfig) *PluginHelpers { + return &PluginHelpers{ldr: ldr, v: v, rf: rf, pc: pc} +} + +// PluginHelpers holds things that any or all plugins might need. +// This should be available to each plugin, in addition to +// any plugin-specific configuration. +type PluginHelpers struct { + ldr ifc.Loader + v ifc.Validator + rf *Factory + pc *types.PluginConfig +} + +func (c *PluginHelpers) GeneralConfig() *types.PluginConfig { + return c.pc +} + +func (c *PluginHelpers) Loader() ifc.Loader { + return c.ldr +} + +func (c *PluginHelpers) ResmapFactory() *Factory { + return c.rf +} + +func (c *PluginHelpers) Validator() ifc.Validator { + return c.v +} + +type GeneratorPlugin interface { + Generator + Configurable +} + +type TransformerPlugin interface { + Transformer + Configurable +} + +// ResMap is an interface describing operations on the +// core kustomize data structure, a list of Resources. +// +// Every Resource has two ResIds: OrgId and CurId. +// +// In a ResMap, no two resources may have the same CurId, +// but they may have the same OrgId. The latter can happen +// when mixing two or more different overlays apply different +// transformations to a common base. When looking for a +// resource to transform, try the OrgId first, and if this +// fails or finds too many, it might make sense to then try +// the CurrId. Depends on the situation. +// +// TODO: get rid of this interface (use bare resWrangler). +// There aren't multiple implementations any more. +type ResMap interface { + // Size reports the number of resources. + Size() int + + // Resources provides a discardable slice + // of resource pointers, returned in the order + // as appended. + Resources() []*resource.Resource + + // Append adds a Resource. Error on CurId collision. + // + // A class invariant of ResMap is that all of its + // resources must differ in their value of + // CurId(), aka current Id. The Id is the tuple + // of {namespace, group, version, kind, name} + // (see ResId). + // + // This invariant reflects the invariant of a + // kubernetes cluster, where if one tries to add + // a resource to the cluster whose Id matches + // that of a resource already in the cluster, + // only two outcomes are allowed. Either the + // incoming resource is _merged_ into the existing + // one, or the incoming resource is rejected. + // One cannot end up with two resources + // in the cluster with the same Id. + Append(*resource.Resource) error + + // AppendAll appends another ResMap to self, + // failing on any CurId collision. + AppendAll(ResMap) error + + // AbsorbAll appends, replaces or merges the contents + // of another ResMap into self, + // allowing and sometimes demanding ID collisions. + // A collision would be demanded, say, when a generated + // ConfigMap has the "replace" option in its generation + // instructions, meaning it _must_ replace + // something in the known set of resources. + // If a resource id for resource X is found to already + // be in self, then the behavior field for X must + // be BehaviorMerge or BehaviorReplace. If X is not in + // self, then its behavior _cannot_ be merge or replace. + AbsorbAll(ResMap) error + + // AddOriginAnnotation will add the provided origin as + // an origin annotation to all resources in the ResMap, if + // the origin is not nil. + AddOriginAnnotation(origin *resource.Origin) error + + // RemoveOriginAnnotation will remove the origin annotation + // from all resources in the ResMap + RemoveOriginAnnotations() error + + // AddTransformerAnnotation will add the provided origin as + // an origin annotation if the resource doesn't have one; a + // transformer annotation otherwise; to all resources in + // ResMap + AddTransformerAnnotation(origin *resource.Origin) error + + // RemoveTransformerAnnotation will remove the transformer annotation + // from all resources in the ResMap + RemoveTransformerAnnotations() error + + // AnnotateAll annotates all resources in the ResMap with + // the provided key value pair. + AnnotateAll(key string, value string) error + + // AsYaml returns the yaml form of resources. + AsYaml() ([]byte, error) + + // GetByIndex returns a resource at the given index, + // nil if out of range. + GetByIndex(int) *resource.Resource + + // GetIndexOfCurrentId returns the index of the resource + // with the given CurId. + // Returns error if there is more than one match. + // Returns (-1, nil) if there is no match. + GetIndexOfCurrentId(id resid.ResId) (int, error) + + // GetMatchingResourcesByCurrentId returns the resources + // who's CurId is matched by the argument. + GetMatchingResourcesByCurrentId(matches IdMatcher) []*resource.Resource + + // GetMatchingResourcesByAnyId returns the resources + // who's current or previous IDs is matched by the argument. + GetMatchingResourcesByAnyId(matches IdMatcher) []*resource.Resource + + // GetByCurrentId is shorthand for calling + // GetMatchingResourcesByCurrentId with a matcher requiring + // an exact match, returning an error on multiple or no matches. + GetByCurrentId(resid.ResId) (*resource.Resource, error) + + // GetById is shorthand for calling + // GetMatchingResourcesByAnyId with a matcher requiring + // an exact match, returning an error on multiple or no matches. + GetById(resid.ResId) (*resource.Resource, error) + + // GroupedByCurrentNamespace returns a map of namespace + // to a slice of *Resource in that namespace. + // Cluster-scoped Resources are not included (see ClusterScoped). + // Resources with an empty namespace are placed + // in the resid.DefaultNamespace entry. + GroupedByCurrentNamespace() map[string][]*resource.Resource + + // GroupedByOriginalNamespace performs as GroupByNamespace + // but use the original namespace instead of the current + // one to perform the grouping. + GroupedByOriginalNamespace() map[string][]*resource.Resource + + // ClusterScoped returns a slice of resources that + // cannot be placed in a namespace, e.g. + // Node, ClusterRole, Namespace itself, etc. + ClusterScoped() []*resource.Resource + + // AllIds returns all CurrentIds. + AllIds() []resid.ResId + + // Replace replaces the resource with the matching CurId. + // Error if there's no match or more than one match. + // Returns the index where the replacement happened. + Replace(*resource.Resource) (int, error) + + // Remove removes the resource whose CurId matches the argument. + // Error if not found. + Remove(resid.ResId) error + + // Clear removes all resources and Ids. + Clear() + + // DropEmpties drops empty resources from the ResMap. + DropEmpties() + + // SubsetThatCouldBeReferencedByResource returns a ResMap subset + // of self with resources that could be referenced by the + // resource argument. + // This is a filter; it excludes things that cannot be + // referenced by the resource, e.g. objects in other + // namespaces. Cluster wide objects are never excluded. + SubsetThatCouldBeReferencedByResource(*resource.Resource) (ResMap, error) + + // DeAnchor replaces YAML aliases with structured data copied from anchors. + // This cannot be undone; if desired, call DeepCopy first. + // Subsequent marshalling to YAML will no longer have anchor + // definitions ('&') or aliases ('*'). + // + // Anchors are not expected to work across YAML 'documents'. + // If three resources are loaded from one file containing three YAML docs: + // + // {resourceA} + // --- + // {resourceB} + // --- + // {resourceC} + // + // then anchors defined in A cannot be seen from B and C and vice versa. + // OTOH, cross-resource links (a field in B referencing fields in A) will + // work if the resources are gathered in a ResourceList: + // + // apiVersion: config.kubernetes.io/v1 + // kind: ResourceList + // metadata: + // name: someList + // items: + // - {resourceA} + // - {resourceB} + // - {resourceC} + // + DeAnchor() error + + // DeepCopy copies the ResMap and underlying resources. + DeepCopy() ResMap + + // ShallowCopy copies the ResMap but + // not the underlying resources. + ShallowCopy() ResMap + + // ErrorIfNotEqualSets returns an error if the + // argument doesn't have the same resources as self. + // Ordering is _not_ taken into account, + // as this function was solely used in tests written + // before internal resource order was maintained, + // and those tests are initialized with maps which + // by definition have random ordering, and will + // fail spuriously. + // TODO: modify tests to not use resmap.FromMap, + // TODO: - and replace this with a stricter equals. + ErrorIfNotEqualSets(ResMap) error + + // ErrorIfNotEqualLists returns an error if the + // argument doesn't have the resource objects + // data as self, in the same order. + // Meta information is ignored; this is similar + // to comparing the AsYaml() strings, but allows + // for more informed errors on not equals. + ErrorIfNotEqualLists(ResMap) error + + // Debug prints the ResMap. + Debug(title string) + + // Select returns a list of resources that + // are selected by a Selector + Select(types.Selector) ([]*resource.Resource, error) + + // ToRNodeSlice returns a copy of the resources as RNodes. + ToRNodeSlice() []*yaml.RNode + + // ApplySmPatch applies a strategic-merge patch to the + // selected set of resources. + ApplySmPatch( + selectedSet *resource.IdSet, patch *resource.Resource) error + + // RemoveBuildAnnotations removes annotations created by the build process. + RemoveBuildAnnotations() + + // ApplyFilter applies an RNode filter to all Resources in the ResMap. + // TODO: Send/recover ancillary Resource data to/from subprocesses. + // Assure that the ancillary data in Resource (everything not in the RNode) + // is sent to and re-captured from transformer subprocess (as the process + // might edit that information). One way to do this would be to solely use + // RNode metadata annotation reading and writing instead of using Resource + // struct data members, i.e. the Resource struct is replaced by RNode + // and use of (slow) k8s metadata annotations inside the RNode. + ApplyFilter(f kio.Filter) error +} diff --git a/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go b/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go new file mode 100644 index 000000000..411e4e813 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go @@ -0,0 +1,764 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resmap + +import ( + "bytes" + "fmt" + "reflect" + + "sigs.k8s.io/kustomize/api/filters/annotations" + "sigs.k8s.io/kustomize/api/resource" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/resid" + kyaml "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// resWrangler implements ResMap. +type resWrangler struct { + // Resource list maintained in load (append) order. + // This is important for transformers, which must + // be performed in a specific order, and for users + // who for whatever reasons wish the order they + // specify in kustomizations to be maintained and + // available as an option for final YAML rendering. + rList []*resource.Resource +} + +func newOne() *resWrangler { + result := &resWrangler{} + result.Clear() + return result +} + +// Clear implements ResMap. +func (m *resWrangler) Clear() { + m.rList = nil +} + +// DropEmpties quickly drops empty resources. +// It doesn't use Append, which checks for Id collisions. +func (m *resWrangler) DropEmpties() { + var rList []*resource.Resource + for _, r := range m.rList { + if !r.IsNilOrEmpty() { + rList = append(rList, r) + } + } + m.rList = rList +} + +// Size implements ResMap. +func (m *resWrangler) Size() int { + return len(m.rList) +} + +func (m *resWrangler) indexOfResource(other *resource.Resource) int { + for i, r := range m.rList { + if r == other { + return i + } + } + return -1 +} + +// Resources implements ResMap. +func (m *resWrangler) Resources() []*resource.Resource { + tmp := make([]*resource.Resource, len(m.rList)) + copy(tmp, m.rList) + return tmp +} + +// Append implements ResMap. +func (m *resWrangler) Append(res *resource.Resource) error { + id := res.CurId() + if r := m.GetMatchingResourcesByCurrentId(id.Equals); len(r) > 0 { + return fmt.Errorf( + "may not add resource with an already registered id: %s", id) + } + m.append(res) + return nil +} + +// append appends without performing an Id check +func (m *resWrangler) append(res *resource.Resource) { + m.rList = append(m.rList, res) +} + +// Remove implements ResMap. +func (m *resWrangler) Remove(adios resid.ResId) error { + var rList []*resource.Resource + for _, r := range m.rList { + if r.CurId() != adios { + rList = append(rList, r) + } + } + if len(rList) != m.Size()-1 { + return fmt.Errorf("id %s not found in removal", adios) + } + m.rList = rList + return nil +} + +// Replace implements ResMap. +func (m *resWrangler) Replace(res *resource.Resource) (int, error) { + id := res.CurId() + i, err := m.GetIndexOfCurrentId(id) + if err != nil { + return -1, errors.WrapPrefixf(err, "in Replace") + } + if i < 0 { + return -1, fmt.Errorf("cannot find resource with id %s to replace", id) + } + m.rList[i] = res + return i, nil +} + +// AllIds implements ResMap. +func (m *resWrangler) AllIds() (ids []resid.ResId) { + ids = make([]resid.ResId, m.Size()) + for i, r := range m.rList { + ids[i] = r.CurId() + } + return +} + +// Debug implements ResMap. +func (m *resWrangler) Debug(title string) { + fmt.Println("--------------------------- " + title) + firstObj := true + for i, r := range m.rList { + if firstObj { + firstObj = false + } else { + fmt.Println("---") + } + fmt.Printf("# %d %s\n%s\n", i, r.OrgId(), r.String()) + } +} + +type IdMatcher func(resid.ResId) bool + +// GetByIndex implements ResMap. +func (m *resWrangler) GetByIndex(i int) *resource.Resource { + if i < 0 || i >= m.Size() { + return nil + } + return m.rList[i] +} + +// GetIndexOfCurrentId implements ResMap. +func (m *resWrangler) GetIndexOfCurrentId(id resid.ResId) (int, error) { + count := 0 + result := -1 + for i, r := range m.rList { + if id.Equals(r.CurId()) { + count++ + result = i + } + } + if count > 1 { + return -1, fmt.Errorf("id matched %d resources", count) + } + return result, nil +} + +type IdFromResource func(r *resource.Resource) resid.ResId + +func GetCurrentId(r *resource.Resource) resid.ResId { return r.CurId() } + +// GetMatchingResourcesByCurrentId implements ResMap. +func (m *resWrangler) GetMatchingResourcesByCurrentId( + matches IdMatcher) []*resource.Resource { + return m.filteredById(matches, GetCurrentId) +} + +// GetMatchingResourcesByAnyId implements ResMap. +func (m *resWrangler) GetMatchingResourcesByAnyId( + matches IdMatcher) []*resource.Resource { + var result []*resource.Resource + for _, r := range m.rList { + for _, id := range append(r.PrevIds(), r.CurId()) { + if matches(id) { + result = append(result, r) + break + } + } + } + return result +} + +func (m *resWrangler) filteredById( + matches IdMatcher, idGetter IdFromResource) []*resource.Resource { + var result []*resource.Resource + for _, r := range m.rList { + if matches(idGetter(r)) { + result = append(result, r) + } + } + return result +} + +// GetByCurrentId implements ResMap. +func (m *resWrangler) GetByCurrentId( + id resid.ResId) (*resource.Resource, error) { + return demandOneMatch(m.GetMatchingResourcesByCurrentId, id, "Current") +} + +// GetById implements ResMap. +func (m *resWrangler) GetById( + id resid.ResId) (*resource.Resource, error) { + r, err := demandOneMatch(m.GetMatchingResourcesByAnyId, id, "Id") + if err != nil { + return nil, fmt.Errorf( + "%s; failed to find unique target for patch %s", + err.Error(), id.String()) + } + return r, nil +} + +type resFinder func(IdMatcher) []*resource.Resource + +func demandOneMatch( + f resFinder, id resid.ResId, s string) (*resource.Resource, error) { + r := f(id.Equals) + if len(r) == 1 { + return r[0], nil + } + if len(r) > 1 { + return nil, fmt.Errorf("multiple matches for %s %s", s, id) + } + return nil, fmt.Errorf("no matches for %s %s", s, id) +} + +// GroupedByCurrentNamespace implements ResMap. +func (m *resWrangler) GroupedByCurrentNamespace() map[string][]*resource.Resource { + items := m.groupedByCurrentNamespace() + delete(items, resid.TotallyNotANamespace) + return items +} + +// ClusterScoped implements ResMap. +func (m *resWrangler) ClusterScoped() []*resource.Resource { + return m.groupedByCurrentNamespace()[resid.TotallyNotANamespace] +} + +func (m *resWrangler) groupedByCurrentNamespace() map[string][]*resource.Resource { + byNamespace := make(map[string][]*resource.Resource) + for _, res := range m.rList { + namespace := res.CurId().EffectiveNamespace() + if _, found := byNamespace[namespace]; !found { + byNamespace[namespace] = []*resource.Resource{} + } + byNamespace[namespace] = append(byNamespace[namespace], res) + } + return byNamespace +} + +// GroupedByOriginalNamespace implements ResMap. +func (m *resWrangler) GroupedByOriginalNamespace() map[string][]*resource.Resource { + items := m.groupedByOriginalNamespace() + delete(items, resid.TotallyNotANamespace) + return items +} + +func (m *resWrangler) groupedByOriginalNamespace() map[string][]*resource.Resource { + byNamespace := make(map[string][]*resource.Resource) + for _, res := range m.rList { + namespace := res.OrgId().EffectiveNamespace() + if _, found := byNamespace[namespace]; !found { + byNamespace[namespace] = []*resource.Resource{} + } + byNamespace[namespace] = append(byNamespace[namespace], res) + } + return byNamespace +} + +// AsYaml implements ResMap. +func (m *resWrangler) AsYaml() ([]byte, error) { + firstObj := true + var b []byte + buf := bytes.NewBuffer(b) + for _, res := range m.rList { + out, err := res.AsYAML() + if err != nil { + m, _ := res.Map() + return nil, errors.WrapPrefixf(err, "%#v", m) + } + if firstObj { + firstObj = false + } else { + if _, err = buf.WriteString("---\n"); err != nil { + return nil, err + } + } + if _, err = buf.Write(out); err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} + +// ErrorIfNotEqualSets implements ResMap. +func (m *resWrangler) ErrorIfNotEqualSets(other ResMap) error { + m2, ok := other.(*resWrangler) + if !ok { + return fmt.Errorf("bad cast to resWrangler 1") + } + if m.Size() != m2.Size() { + return fmt.Errorf( + "lists have different number of entries: %#v doesn't equal %#v", + m.rList, m2.rList) + } + seen := make(map[int]bool) + for _, r1 := range m.rList { + id := r1.CurId() + others := m2.GetMatchingResourcesByCurrentId(id.Equals) + if len(others) == 0 { + return fmt.Errorf( + "id in self missing from other; id: %s", id) + } + if len(others) > 1 { + return fmt.Errorf( + "id in self matches %d in other; id: %s", len(others), id) + } + r2 := others[0] + if !reflect.DeepEqual(r1.RNode, r2.RNode) { + return fmt.Errorf( + "nodes unequal: \n -- %s,\n -- %s\n\n--\n%#v\n------\n%#v\n", + r1, r2, r1, r2) + } + seen[m2.indexOfResource(r2)] = true + } + if len(seen) != m.Size() { + return fmt.Errorf("counting problem %d != %d", len(seen), m.Size()) + } + return nil +} + +// ErrorIfNotEqualLists implements ResMap. +func (m *resWrangler) ErrorIfNotEqualLists(other ResMap) error { + m2, ok := other.(*resWrangler) + if !ok { + return fmt.Errorf("bad cast to resWrangler 2") + } + if m.Size() != m2.Size() { + return fmt.Errorf( + "lists have different number of entries: %#v doesn't equal %#v", + m.rList, m2.rList) + } + for i, r1 := range m.rList { + r2 := m2.rList[i] + if err := r1.ErrIfNotEquals(r2); err != nil { + return err + } + } + return nil +} + +type resCopier func(r *resource.Resource) *resource.Resource + +// ShallowCopy implements ResMap. +func (m *resWrangler) ShallowCopy() ResMap { + return m.makeCopy( + func(r *resource.Resource) *resource.Resource { + return r + }) +} + +// DeepCopy implements ResMap. +func (m *resWrangler) DeepCopy() ResMap { + return m.makeCopy( + func(r *resource.Resource) *resource.Resource { + return r.DeepCopy() + }) +} + +// makeCopy copies the ResMap. +func (m *resWrangler) makeCopy(copier resCopier) ResMap { + result := &resWrangler{} + result.rList = make([]*resource.Resource, m.Size()) + for i, r := range m.rList { + result.rList[i] = copier(r) + } + return result +} + +// SubsetThatCouldBeReferencedByResource implements ResMap. +func (m *resWrangler) SubsetThatCouldBeReferencedByResource( + referrer *resource.Resource) (ResMap, error) { + referrerId := referrer.CurId() + if referrerId.IsClusterScoped() { + // A cluster scoped resource can refer to anything. + return m, nil + } + result := newOne() + roleBindingNamespaces, err := getNamespacesForRoleBinding(referrer) + if err != nil { + return nil, err + } + for _, possibleTarget := range m.rList { + id := possibleTarget.CurId() + if id.IsClusterScoped() { + // A cluster-scoped resource can be referred to by anything. + result.append(possibleTarget) + continue + } + if id.IsNsEquals(referrerId) { + // The two objects are in the same namespace. + result.append(possibleTarget) + continue + } + // The two objects are namespaced (not cluster-scoped), AND + // are in different namespaces. + // There's still a chance they can refer to each other. + if roleBindingNamespaces[possibleTarget.GetNamespace()] { + result.append(possibleTarget) + } + } + return result, nil +} + +// getNamespacesForRoleBinding returns referenced ServiceAccount namespaces +// if the resource is a RoleBinding +func getNamespacesForRoleBinding(r *resource.Resource) (map[string]bool, error) { + result := make(map[string]bool) + if r.GetKind() != "RoleBinding" { + return result, nil + } + subjects, err := r.GetSlice("subjects") + if err != nil || subjects == nil { + return result, nil + } + for _, s := range subjects { + subject := s.(map[string]interface{}) + if ns, ok1 := subject["namespace"]; ok1 { + if kind, ok2 := subject["kind"]; ok2 { + if kind.(string) == "ServiceAccount" { + if n, ok3 := ns.(string); ok3 { + result[n] = true + } else { + return nil, errors.Errorf("Invalid Input: namespace is blank for resource %q\n", r.CurId()) + } + } + } + } + } + return result, nil +} + +// AppendAll implements ResMap. +func (m *resWrangler) AppendAll(other ResMap) error { + if other == nil { + return nil + } + m2, ok := other.(*resWrangler) + if !ok { + return fmt.Errorf("bad cast to resWrangler 3") + } + return m.appendAll(m2.rList) +} + +// appendAll appends all the resources, error on Id collision. +func (m *resWrangler) appendAll(list []*resource.Resource) error { + for _, res := range list { + if err := m.Append(res); err != nil { + return err + } + } + return nil +} + +// AbsorbAll implements ResMap. +func (m *resWrangler) AbsorbAll(other ResMap) error { + if other == nil { + return nil + } + m2, ok := other.(*resWrangler) + if !ok { + return fmt.Errorf("bad cast to resWrangler 4") + } + for _, r := range m2.rList { + err := m.appendReplaceOrMerge(r) + if err != nil { + return err + } + } + return nil +} + +// AddOriginAnnotation implements ResMap. +func (m *resWrangler) AddOriginAnnotation(origin *resource.Origin) error { + if origin == nil { + return nil + } + for _, res := range m.rList { + or, err := res.GetOrigin() + if or != nil || err != nil { + // if any resources already have an origin annotation, + // skip it + continue + } + if err := res.SetOrigin(origin); err != nil { + return err + } + } + return nil +} + +// RemoveOriginAnnotation implements ResMap +func (m *resWrangler) RemoveOriginAnnotations() error { + for _, res := range m.rList { + if err := res.SetOrigin(nil); err != nil { + return err + } + } + return nil +} + +// AddTransformerAnnotation implements ResMap +func (m *resWrangler) AddTransformerAnnotation(origin *resource.Origin) error { + for _, res := range m.rList { + or, err := res.GetOrigin() + if err != nil { + return err + } + if or == nil { + // the resource does not have an origin annotation, so + // we assume that the transformer generated the resource + // rather than modifying it + err = res.SetOrigin(origin) + } else { + // the resource already has an origin annotation, so we + // record the provided origin as a transformation + err = res.AddTransformation(origin) + } + if err != nil { + return err + } + } + return nil +} + +// RemoveTransformerAnnotations implements ResMap +func (m *resWrangler) RemoveTransformerAnnotations() error { + for _, res := range m.rList { + if err := res.ClearTransformations(); err != nil { + return err + } + } + return nil +} + +func (m *resWrangler) appendReplaceOrMerge(res *resource.Resource) error { + id := res.CurId() + matches := m.GetMatchingResourcesByAnyId(id.Equals) + switch len(matches) { + case 0: + switch res.Behavior() { + case types.BehaviorMerge, types.BehaviorReplace: + return fmt.Errorf( + "id %#v does not exist; cannot merge or replace", id) + default: + // presumably types.BehaviorCreate + return m.Append(res) + } + case 1: + old := matches[0] + if old == nil { + return fmt.Errorf("id lookup failure") + } + index := m.indexOfResource(old) + if index < 0 { + return fmt.Errorf("indexing problem") + } + switch res.Behavior() { + case types.BehaviorReplace: + res.CopyMergeMetaDataFieldsFrom(old) + case types.BehaviorMerge: + // ensure the origin annotation doesn't get overwritten + orig, err := old.GetOrigin() + if err != nil { + return err + } + res.CopyMergeMetaDataFieldsFrom(old) + res.MergeDataMapFrom(old) + res.MergeBinaryDataMapFrom(old) + if orig != nil { + res.SetOrigin(orig) + } + + default: + return fmt.Errorf( + "id %#v exists; behavior must be merge or replace", id) + } + i, err := m.Replace(res) + if err != nil { + return err + } + if i != index { + return fmt.Errorf("unexpected target index in replacement") + } + return nil + default: + return fmt.Errorf( + "found multiple objects %v that could accept merge of %v", + matches, id) + } +} + +// AnnotateAll implements ResMap +func (m *resWrangler) AnnotateAll(key string, value string) error { + return m.ApplyFilter(annotations.Filter{ + Annotations: map[string]string{ + key: value, + }, + FsSlice: []types.FieldSpec{{ + Path: "metadata/annotations", + CreateIfNotPresent: true, + }}, + }) +} + +// Select returns a list of resources that +// are selected by a Selector +func (m *resWrangler) Select(s types.Selector) ([]*resource.Resource, error) { + var result []*resource.Resource + sr, err := types.NewSelectorRegex(&s) + if err != nil { + return nil, err + } + for _, r := range m.rList { + curId := r.CurId() + orgId := r.OrgId() + + // It first tries to match with the original namespace + // then matches with the current namespace + if !sr.MatchNamespace(orgId.EffectiveNamespace()) && + !sr.MatchNamespace(curId.EffectiveNamespace()) { + continue + } + + // It first tries to match with the original name + // then matches with the current name + if !sr.MatchName(orgId.Name) && + !sr.MatchName(curId.Name) { + continue + } + + // matches the GVK + if !sr.MatchGvk(r.GetGvk()) { + continue + } + + // matches the label selector + matched, err := r.MatchesLabelSelector(s.LabelSelector) + if err != nil { + return nil, err + } + if !matched { + continue + } + + // matches the annotation selector + matched, err = r.MatchesAnnotationSelector(s.AnnotationSelector) + if err != nil { + return nil, err + } + if !matched { + continue + } + result = append(result, r) + } + return result, nil +} + +// ToRNodeSlice returns a copy of the resources as RNodes. +func (m *resWrangler) ToRNodeSlice() []*kyaml.RNode { + result := make([]*kyaml.RNode, len(m.rList)) + for i := range m.rList { + result[i] = m.rList[i].Copy() + } + return result +} + +// DeAnchor implements ResMap. +func (m *resWrangler) DeAnchor() (err error) { + for i := range m.rList { + if err = m.rList[i].DeAnchor(); err != nil { + return err + } + } + return nil +} + +// ApplySmPatch applies the patch, and errors on Id collisions. +func (m *resWrangler) ApplySmPatch( + selectedSet *resource.IdSet, patch *resource.Resource) error { + var list []*resource.Resource + for _, res := range m.rList { + if selectedSet.Contains(res.CurId()) { + patchCopy := patch.DeepCopy() + patchCopy.CopyMergeMetaDataFieldsFrom(patch) + patchCopy.SetGvk(res.GetGvk()) + patchCopy.SetKind(patch.GetKind()) + if err := res.ApplySmPatch(patchCopy); err != nil { + return err + } + } + if !res.IsNilOrEmpty() { + list = append(list, res) + } + } + m.Clear() + return m.appendAll(list) +} + +func (m *resWrangler) RemoveBuildAnnotations() { + for _, r := range m.rList { + r.RemoveBuildAnnotations() + } +} + +// ApplyFilter implements ResMap. +func (m *resWrangler) ApplyFilter(f kio.Filter) error { + reverseLookup := make(map[*kyaml.RNode]*resource.Resource, len(m.rList)) + nodes := make([]*kyaml.RNode, len(m.rList)) + for i, r := range m.rList { + ptr := &(r.RNode) + nodes[i] = ptr + reverseLookup[ptr] = r + } + // The filter can modify nodes, but also delete and create them. + // The filtered list might be smaller or larger than the nodes list. + filtered, err := f.Filter(nodes) + if err != nil { + return err + } + // Rebuild the resmap from the filtered RNodes. + var nRList []*resource.Resource + for _, rn := range filtered { + if rn.IsNilOrEmpty() { + // A node might make it through the filter as an object, + // but still be empty. Drop such entries. + continue + } + res, ok := reverseLookup[rn] + if !ok { + // A node was created; make a Resource to wrap it. + res = &resource.Resource{ + RNode: *rn, + // Leave remaining fields empty. + // At at time of writing, seeking to eliminate those fields. + // Alternatively, could just return error on creation attempt + // until remaining fields eliminated. + } + } + nRList = append(nRList, res) + } + m.rList = nRList + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/resource/doc.go b/vendor/sigs.k8s.io/kustomize/api/resource/doc.go new file mode 100644 index 000000000..32d34b162 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/resource/doc.go @@ -0,0 +1,5 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package resource implements representations of k8s API resources. +package resource diff --git a/vendor/sigs.k8s.io/kustomize/api/resource/factory.go b/vendor/sigs.k8s.io/kustomize/api/resource/factory.go new file mode 100644 index 000000000..cbda87237 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/resource/factory.go @@ -0,0 +1,293 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resource + +import ( + "encoding/json" + "fmt" + "log" + "strings" + + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/generators" + "sigs.k8s.io/kustomize/api/internal/kusterr" + "sigs.k8s.io/kustomize/api/konfig" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Factory makes instances of Resource. +type Factory struct { + hasher ifc.KustHasher + + // When set to true, IncludeLocalConfigs indicates + // that Factory should include resources with the + // annotation 'config.kubernetes.io/local-config'. + // By default these resources are ignored. + IncludeLocalConfigs bool +} + +// NewFactory makes an instance of Factory. +func NewFactory(h ifc.KustHasher) *Factory { + return &Factory{hasher: h} +} + +// Hasher returns an ifc.KustHasher +func (rf *Factory) Hasher() ifc.KustHasher { + return rf.hasher +} + +// FromMap returns a new instance of Resource. +func (rf *Factory) FromMap(m map[string]interface{}) *Resource { + return rf.FromMapAndOption(m, nil) +} + +// FromMapWithName returns a new instance with the given "original" name. +func (rf *Factory) FromMapWithName(n string, m map[string]interface{}) *Resource { + return rf.FromMapWithNamespaceAndName(resid.DefaultNamespace, n, m) +} + +// FromMapWithNamespaceAndName returns a new instance with the given "original" namespace. +func (rf *Factory) FromMapWithNamespaceAndName(ns string, n string, m map[string]interface{}) *Resource { + r := rf.FromMapAndOption(m, nil) + return r.setPreviousId(ns, n, r.GetKind()) +} + +// FromMapAndOption returns a new instance of Resource with given options. +func (rf *Factory) FromMapAndOption( + m map[string]interface{}, args *types.GeneratorArgs) *Resource { + n, err := yaml.FromMap(m) + if err != nil { + // TODO: return err instead of log. + log.Fatal(err) + } + return rf.makeOne(n, args) +} + +// makeOne returns a new instance of Resource. +func (rf *Factory) makeOne(rn *yaml.RNode, o *types.GeneratorArgs) *Resource { + if rn == nil { + log.Fatal("RNode must not be null") + } + resource := &Resource{RNode: *rn} + if o != nil { + if o.Options == nil || !o.Options.DisableNameSuffixHash { + resource.EnableHashSuffix() + } + resource.SetBehavior(types.NewGenerationBehavior(o.Behavior)) + } + + return resource +} + +// SliceFromPatches returns a slice of resources given a patch path +// slice from a kustomization file. +func (rf *Factory) SliceFromPatches( + ldr ifc.Loader, paths []types.PatchStrategicMerge) ([]*Resource, error) { + var result []*Resource + for _, path := range paths { + content, err := ldr.Load(string(path)) + if err != nil { + return nil, err + } + res, err := rf.SliceFromBytes(content) + if err != nil { + return nil, kusterr.Handler(err, string(path)) + } + result = append(result, res...) + } + return result, nil +} + +// FromBytes unmarshalls bytes into one Resource. +func (rf *Factory) FromBytes(in []byte) (*Resource, error) { + result, err := rf.SliceFromBytes(in) + if err != nil { + return nil, err + } + if len(result) != 1 { + return nil, fmt.Errorf( + "expected 1 resource, found %d in %v", len(result), in) + } + return result[0], nil +} + +// SliceFromBytes unmarshals bytes into a Resource slice. +func (rf *Factory) SliceFromBytes(in []byte) ([]*Resource, error) { + nodes, err := rf.RNodesFromBytes(in) + if err != nil { + return nil, err + } + return rf.resourcesFromRNodes(nodes), nil +} + +// DropLocalNodes removes the local nodes by default. Local nodes are detected via the annotation `config.kubernetes.io/local-config: "true"` +func (rf *Factory) DropLocalNodes(nodes []*yaml.RNode) ([]*Resource, error) { + var result []*yaml.RNode + for _, node := range nodes { + if node.IsNilOrEmpty() { + continue + } + md, err := node.GetValidatedMetadata() + if err != nil { + return nil, err + } + + if rf.IncludeLocalConfigs { + result = append(result, node) + continue + } + localConfig, exist := md.ObjectMeta.Annotations[konfig.IgnoredByKustomizeAnnotation] + if !exist || localConfig == "false" { + result = append(result, node) + } + } + return rf.resourcesFromRNodes(result), nil +} + +// ResourcesFromRNodes converts RNodes to Resources. +func (rf *Factory) ResourcesFromRNodes( + nodes []*yaml.RNode) (result []*Resource, err error) { + return rf.DropLocalNodes(nodes) +} + +// resourcesFromRNode assumes all nodes are good. +func (rf *Factory) resourcesFromRNodes( + nodes []*yaml.RNode) (result []*Resource) { + for _, n := range nodes { + result = append(result, rf.makeOne(n, nil)) + } + return +} + +func (rf *Factory) RNodesFromBytes(b []byte) ([]*yaml.RNode, error) { + nodes, err := kio.FromBytes(b) + if err != nil { + return nil, err + } + nodes, err = rf.dropBadNodes(nodes) + if err != nil { + return nil, err + } + return rf.inlineAnyEmbeddedLists(nodes) +} + +// inlineAnyEmbeddedLists scans the RNode slice for nodes named FooList. +// Such nodes are expected to be lists of resources, each of type Foo. +// These lists are replaced in the result by their inlined resources. +func (rf *Factory) inlineAnyEmbeddedLists( + nodes []*yaml.RNode) (result []*yaml.RNode, err error) { + var n0 *yaml.RNode + for len(nodes) > 0 { + n0, nodes = nodes[0], nodes[1:] + kind := n0.GetKind() + if !strings.HasSuffix(kind, "List") { + result = append(result, n0) + continue + } + // Convert a FooList into a slice of Foo. + var m map[string]interface{} + m, err = n0.Map() + if err != nil { + return nil, fmt.Errorf("trouble expanding list of %s; %w", kind, err) + } + items, ok := m["items"] + if !ok { + // treat as an empty list + continue + } + slice, ok := items.([]interface{}) + if !ok { + if items == nil { + // an empty list + continue + } + return nil, fmt.Errorf( + "expected array in %s/items, but found %T", kind, items) + } + innerNodes, err := rf.convertObjectSliceToNodeSlice(slice) + if err != nil { + return nil, err + } + nodes = append(nodes, innerNodes...) + } + return result, nil +} + +// convertObjectSlice converts a list of objects to a list of RNode. +func (rf *Factory) convertObjectSliceToNodeSlice( + objects []interface{}) (result []*yaml.RNode, err error) { + var bytes []byte + var nodes []*yaml.RNode + for _, obj := range objects { + bytes, err = json.Marshal(obj) + if err != nil { + return + } + nodes, err = kio.FromBytes(bytes) + if err != nil { + return + } + nodes, err = rf.dropBadNodes(nodes) + if err != nil { + return + } + result = append(result, nodes...) + } + return +} + +// dropBadNodes may drop some nodes from its input argument. +func (rf *Factory) dropBadNodes(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + var result []*yaml.RNode + for _, n := range nodes { + if n.IsNilOrEmpty() { + continue + } + if _, err := n.GetValidatedMetadata(); err != nil { + return nil, err + } + if foundNil, path := n.HasNilEntryInList(); foundNil { + return nil, fmt.Errorf("empty item at %v in object %v", path, n) + } + result = append(result, n) + } + return result, nil +} + +// SliceFromBytesWithNames unmarshals bytes into a Resource slice with specified original +// name. +func (rf *Factory) SliceFromBytesWithNames(names []string, in []byte) ([]*Resource, error) { + result, err := rf.SliceFromBytes(in) + if err != nil { + return nil, err + } + if len(names) != len(result) { + return nil, fmt.Errorf("number of names doesn't match number of resources") + } + for i, res := range result { + res.setPreviousId(resid.DefaultNamespace, names[i], res.GetKind()) + } + return result, nil +} + +// MakeConfigMap makes an instance of Resource for ConfigMap +func (rf *Factory) MakeConfigMap(kvLdr ifc.KvLoader, args *types.ConfigMapArgs) (*Resource, error) { + rn, err := generators.MakeConfigMap(kvLdr, args) + if err != nil { + return nil, err + } + return rf.makeOne(rn, &args.GeneratorArgs), nil +} + +// MakeSecret makes an instance of Resource for Secret +func (rf *Factory) MakeSecret(kvLdr ifc.KvLoader, args *types.SecretArgs) (*Resource, error) { + rn, err := generators.MakeSecret(kvLdr, args) + if err != nil { + return nil, err + } + return rf.makeOne(rn, &args.GeneratorArgs), nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/resource/idset.go b/vendor/sigs.k8s.io/kustomize/api/resource/idset.go new file mode 100644 index 000000000..5d6bd63ed --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/resource/idset.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resource + +import "sigs.k8s.io/kustomize/kyaml/resid" + +type IdSet struct { + ids map[resid.ResId]bool +} + +func MakeIdSet(slice []*Resource) *IdSet { + set := make(map[resid.ResId]bool) + for _, r := range slice { + id := r.CurId() + if _, ok := set[id]; !ok { + set[id] = true + } + } + return &IdSet{ids: set} +} + +func (s IdSet) Contains(id resid.ResId) bool { + _, ok := s.ids[id] + return ok +} + +func (s IdSet) Size() int { + return len(s.ids) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/resource/origin.go b/vendor/sigs.k8s.io/kustomize/api/resource/origin.go new file mode 100644 index 000000000..f0a4ae75d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/resource/origin.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resource + +import ( + "path/filepath" + "strings" + + "sigs.k8s.io/kustomize/api/internal/git" + kyaml "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Origin retains information about the origin of resources and transformer configs +// that contributed to the output of `kustomize build` +type Origin struct { + // Path is the path to the resource. If a local resource, this path is + // rooted from the directory upon which `kustomize build` was invoked. If a + // remote resource, this path is rooted from the root of the remote repo. + Path string `json:"path,omitempty" yaml:"path,omitempty"` + + // Repo is the remote repository that the resource or transformer originated from if it is + // not from a local file + Repo string `json:"repo,omitempty" yaml:"repo,omitempty"` + + // Ref is the ref of the remote repository that the resource or transformer originated from + // if it is not from a local file + Ref string `json:"ref,omitempty" yaml:"ref,omitempty"` + + // The following fields only apply to resources that have been + // generated by fields other than the `resources` field, or to transformer + // configs. + + // ConfiguredIn is the file path to the generator or transformer config that created the + // resource + ConfiguredIn string `json:"configuredIn,omitempty" yaml:"configuredIn,omitempty"` + + // ConfiguredBy is the ObjectReference of the generator or transformer config + ConfiguredBy kyaml.ResourceIdentifier `json:"configuredBy,omitempty" yaml:"configuredBy,omitempty"` +} + +// Copy returns a copy of origin +func (origin *Origin) Copy() Origin { + if origin == nil { + return Origin{} + } + return *origin +} + +// Append returns a copy of origin with a path appended to it +func (origin *Origin) Append(path string) *Origin { + originCopy := origin.Copy() + repoSpec, err := git.NewRepoSpecFromURL(path) + if err == nil { + originCopy.Repo = repoSpec.CloneSpec() + absPath := repoSpec.AbsPath() + path = absPath[strings.Index(absPath[1:], "/")+1:][1:] + originCopy.Path = "" + originCopy.Ref = repoSpec.Ref + } + originCopy.Path = filepath.Join(originCopy.Path, path) + return &originCopy +} + +// String returns a string version of origin +func (origin *Origin) String() (string, error) { + anno, err := kyaml.Marshal(origin) + return string(anno), err +} + +// Transformations is a list of Origin +type Transformations []*Origin + +// String returns a string version of Transformations +func (transformations *Transformations) String() (string, error) { + anno, err := kyaml.Marshal(transformations) + return string(anno), err +} + +// OriginFromCustomPlugin takes a custom plugin defined as a resource +// and returns an origin object to describe it +func OriginFromCustomPlugin(res *Resource) (*Origin, error) { + origin, err := res.GetOrigin() + if err != nil { + return nil, err + } + var result *Origin + if origin != nil { + result = &Origin{ + Repo: origin.Repo, + Ref: origin.Ref, + ConfiguredIn: origin.Path, + ConfiguredBy: kyaml.ResourceIdentifier{ + TypeMeta: kyaml.TypeMeta{ + APIVersion: res.GetApiVersion(), + Kind: res.GetKind(), + }, + NameMeta: kyaml.NameMeta{ + Name: res.GetName(), + Namespace: res.GetNamespace(), + }, + }, + } + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/resource/resource.go b/vendor/sigs.k8s.io/kustomize/api/resource/resource.go new file mode 100644 index 000000000..ae1a98be0 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/resource/resource.go @@ -0,0 +1,534 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resource + +import ( + "fmt" + "log" + "strings" + + "sigs.k8s.io/kustomize/api/filters/patchstrategicmerge" + "sigs.k8s.io/kustomize/api/ifc" + "sigs.k8s.io/kustomize/api/internal/utils" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/resid" + kyaml "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/yaml" +) + +// Resource is an RNode, representing a Kubernetes Resource Model object, +// paired with metadata used by kustomize. +type Resource struct { + kyaml.RNode + refVarNames []string +} + +var BuildAnnotations = []string{ + utils.BuildAnnotationPreviousKinds, + utils.BuildAnnotationPreviousNames, + utils.BuildAnnotationPrefixes, + utils.BuildAnnotationSuffixes, + utils.BuildAnnotationPreviousNamespaces, + utils.BuildAnnotationAllowNameChange, + utils.BuildAnnotationAllowKindChange, + utils.BuildAnnotationsRefBy, + utils.BuildAnnotationsGenBehavior, + utils.BuildAnnotationsGenAddHashSuffix, + + kioutil.PathAnnotation, + kioutil.IndexAnnotation, + kioutil.SeqIndentAnnotation, + kioutil.IdAnnotation, + kioutil.InternalAnnotationsMigrationResourceIDAnnotation, + + kioutil.LegacyPathAnnotation, + kioutil.LegacyIndexAnnotation, + kioutil.LegacyIdAnnotation, +} + +func (r *Resource) ResetRNode(incoming *Resource) { + r.RNode = *incoming.Copy() +} + +func (r *Resource) GetGvk() resid.Gvk { + return resid.GvkFromNode(&r.RNode) +} + +func (r *Resource) Hash(h ifc.KustHasher) (string, error) { + return h.Hash(&r.RNode) +} + +func (r *Resource) SetGvk(gvk resid.Gvk) { + r.SetKind(gvk.Kind) + r.SetApiVersion(gvk.ApiVersion()) +} + +func (r *Resource) GetOrigin() (*Origin, error) { + annotations := r.GetAnnotations() + originAnnotations, ok := annotations[utils.OriginAnnotationKey] + if !ok { + return nil, nil + } + var origin Origin + if err := yaml.Unmarshal([]byte(originAnnotations), &origin); err != nil { + return nil, err + } + return &origin, nil +} + +func (r *Resource) SetOrigin(origin *Origin) error { + annotations := r.GetAnnotations() + if origin == nil { + delete(annotations, utils.OriginAnnotationKey) + } else { + originStr, err := origin.String() + if err != nil { + return err + } + annotations[utils.OriginAnnotationKey] = originStr + } + return r.SetAnnotations(annotations) +} + +func (r *Resource) GetTransformations() (Transformations, error) { + annotations := r.GetAnnotations() + transformerAnnotations, ok := annotations[utils.TransformerAnnotationKey] + if !ok { + return nil, nil + } + var transformations Transformations + if err := yaml.Unmarshal([]byte(transformerAnnotations), &transformations); err != nil { + return nil, err + } + return transformations, nil +} + +func (r *Resource) AddTransformation(origin *Origin) error { + annotations := r.GetAnnotations() + transformations, err := r.GetTransformations() + if err != nil { + return err + } + if transformations == nil { + transformations = Transformations{} + } + transformations = append(transformations, origin) + transformationStr, err := transformations.String() + if err != nil { + return err + } + annotations[utils.TransformerAnnotationKey] = transformationStr + return r.SetAnnotations(annotations) +} + +func (r *Resource) ClearTransformations() error { + annotations := r.GetAnnotations() + delete(annotations, utils.TransformerAnnotationKey) + return r.SetAnnotations(annotations) +} + +// ResCtx is an interface describing the contextual added +// kept kustomize in the context of each Resource object. +// Currently mainly the name prefix and name suffix are added. +type ResCtx interface { + AddNamePrefix(p string) + AddNameSuffix(s string) + GetNamePrefixes() []string + GetNameSuffixes() []string +} + +// ResCtxMatcher returns true if two Resources are being +// modified in the same kustomize context. +type ResCtxMatcher func(ResCtx) bool + +// DeepCopy returns a new copy of resource +func (r *Resource) DeepCopy() *Resource { + rc := &Resource{ + RNode: *r.Copy(), + } + rc.copyKustomizeSpecificFields(r) + return rc +} + +// CopyMergeMetaDataFieldsFrom copies everything but the non-metadata in +// the resource. +// TODO: move to RNode, use GetMeta to improve performance. +// TODO: make a version of mergeStringMaps that is build-annotation aware +// to avoid repeatedly setting refby and genargs annotations +// Must remove the kustomize bit at the end. +func (r *Resource) CopyMergeMetaDataFieldsFrom(other *Resource) error { + if err := r.SetLabels( + mergeStringMaps(other.GetLabels(), r.GetLabels())); err != nil { + return fmt.Errorf("copyMerge cannot set labels - %w", err) + } + + ra := r.GetAnnotations() + _, enableNameSuffixHash := ra[utils.BuildAnnotationsGenAddHashSuffix] + merged := mergeStringMapsWithBuildAnnotations(other.GetAnnotations(), ra) + if !enableNameSuffixHash { + delete(merged, utils.BuildAnnotationsGenAddHashSuffix) + } + if err := r.SetAnnotations(merged); err != nil { + return fmt.Errorf("copyMerge cannot set annotations - %w", err) + } + + if err := r.SetName(other.GetName()); err != nil { + return fmt.Errorf("copyMerge cannot set name - %w", err) + } + if err := r.SetNamespace(other.GetNamespace()); err != nil { + return fmt.Errorf("copyMerge cannot set namespace - %w", err) + } + r.copyKustomizeSpecificFields(other) + return nil +} + +func (r *Resource) copyKustomizeSpecificFields(other *Resource) { + r.refVarNames = copyStringSlice(other.refVarNames) +} + +func (r *Resource) MergeDataMapFrom(o *Resource) { + r.SetDataMap(mergeStringMaps(o.GetDataMap(), r.GetDataMap())) +} + +func (r *Resource) MergeBinaryDataMapFrom(o *Resource) { + r.SetBinaryDataMap(mergeStringMaps(o.GetBinaryDataMap(), r.GetBinaryDataMap())) +} + +func (r *Resource) ErrIfNotEquals(o *Resource) error { + meYaml, err := r.AsYAML() + if err != nil { + return err + } + otherYaml, err := o.AsYAML() + if err != nil { + return err + } + if !r.ReferencesEqual(o) { + return fmt.Errorf( + `unequal references - self: +%sreferenced by: %s +--- other: +%sreferenced by: %s +`, meYaml, r.GetRefBy(), otherYaml, o.GetRefBy()) + } + if string(meYaml) != string(otherYaml) { + return fmt.Errorf(`--- self: +%s +--- other: +%s +`, meYaml, otherYaml) + } + return nil +} + +func (r *Resource) ReferencesEqual(other *Resource) bool { + setSelf := make(map[resid.ResId]bool) + setOther := make(map[resid.ResId]bool) + for _, ref := range other.GetRefBy() { + setOther[ref] = true + } + for _, ref := range r.GetRefBy() { + if _, ok := setOther[ref]; !ok { + return false + } + setSelf[ref] = true + } + return len(setSelf) == len(setOther) +} + +func copyStringSlice(s []string) []string { + if s == nil { + return nil + } + c := make([]string, len(s)) + copy(c, s) + return c +} + +// Implements ResCtx AddNamePrefix +func (r *Resource) AddNamePrefix(p string) { + r.appendCsvAnnotation(utils.BuildAnnotationPrefixes, p) +} + +// Implements ResCtx AddNameSuffix +func (r *Resource) AddNameSuffix(s string) { + r.appendCsvAnnotation(utils.BuildAnnotationSuffixes, s) +} + +func (r *Resource) appendCsvAnnotation(name, value string) { + if value == "" { + return + } + currentValue := r.getCsvAnnotation(name) + newValue := strings.Join(append(currentValue, value), ",") + if err := r.RNode.PipeE(kyaml.SetAnnotation(name, newValue)); err != nil { + panic(err) + } +} + +// Implements ResCtx GetNamePrefixes +func (r *Resource) GetNamePrefixes() []string { + return r.getCsvAnnotation(utils.BuildAnnotationPrefixes) +} + +// Implements ResCtx GetNameSuffixes +func (r *Resource) GetNameSuffixes() []string { + return r.getCsvAnnotation(utils.BuildAnnotationSuffixes) +} + +func (r *Resource) getCsvAnnotation(name string) []string { + annotations := r.GetAnnotations() + if _, ok := annotations[name]; !ok { + return nil + } + return strings.Split(annotations[name], ",") +} + +// PrefixesSuffixesEquals is conceptually doing the same task +// as OutermostPrefixSuffix but performs a deeper comparison +// of the suffix and prefix slices. +func (r *Resource) PrefixesSuffixesEquals(o ResCtx) bool { + return utils.SameEndingSubSlice(r.GetNamePrefixes(), o.GetNamePrefixes()) && + utils.SameEndingSubSlice(r.GetNameSuffixes(), o.GetNameSuffixes()) +} + +// RemoveBuildAnnotations removes annotations created by the build process. +// These are internal-only to kustomize, added to the data pipeline to +// track name changes so name references can be fixed. +func (r *Resource) RemoveBuildAnnotations() { + annotations := r.GetAnnotations() + if len(annotations) == 0 { + return + } + for _, a := range BuildAnnotations { + delete(annotations, a) + } + if err := r.SetAnnotations(annotations); err != nil { + panic(err) + } +} + +func (r *Resource) setPreviousId(ns string, n string, k string) *Resource { + r.appendCsvAnnotation(utils.BuildAnnotationPreviousNames, n) + r.appendCsvAnnotation(utils.BuildAnnotationPreviousNamespaces, ns) + r.appendCsvAnnotation(utils.BuildAnnotationPreviousKinds, k) + return r +} + +// AllowNameChange allows name changes to the resource. +func (r *Resource) AllowNameChange() { + r.enable(utils.BuildAnnotationAllowNameChange) +} + +// NameChangeAllowed checks if a patch resource is allowed to change another resource's name. +func (r *Resource) NameChangeAllowed() bool { + return r.isEnabled(utils.BuildAnnotationAllowNameChange) +} + +// AllowKindChange allows kind changes to the resource. +func (r *Resource) AllowKindChange() { + r.enable(utils.BuildAnnotationAllowKindChange) +} + +// KindChangeAllowed checks if a patch resource is allowed to change another resource's kind. +func (r *Resource) KindChangeAllowed() bool { + return r.isEnabled(utils.BuildAnnotationAllowKindChange) +} + +func (r *Resource) isEnabled(annoKey string) bool { + annotations := r.GetAnnotations() + v, ok := annotations[annoKey] + return ok && v == utils.Enabled +} + +func (r *Resource) enable(annoKey string) { + annotations := r.GetAnnotations() + annotations[annoKey] = utils.Enabled + if err := r.SetAnnotations(annotations); err != nil { + panic(err) + } +} + +// String returns resource as JSON. +func (r *Resource) String() string { + bs, err := r.MarshalJSON() + if err != nil { + return "<" + err.Error() + ">" + } + return strings.TrimSpace(string(bs)) +} + +// AsYAML returns the resource in Yaml form. +// Easier to read than JSON. +func (r *Resource) AsYAML() ([]byte, error) { + json, err := r.MarshalJSON() + if err != nil { + return nil, err + } + return yaml.JSONToYAML(json) +} + +// MustYaml returns YAML or panics. +func (r *Resource) MustYaml() string { + yml, err := r.AsYAML() + if err != nil { + log.Fatal(err) + } + return string(yml) +} + +// Behavior returns the behavior for the resource. +func (r *Resource) Behavior() types.GenerationBehavior { + annotations := r.GetAnnotations() + if v, ok := annotations[utils.BuildAnnotationsGenBehavior]; ok { + return types.NewGenerationBehavior(v) + } + return types.NewGenerationBehavior("") +} + +// SetBehavior sets the behavior for the resource. +func (r *Resource) SetBehavior(behavior types.GenerationBehavior) { + annotations := r.GetAnnotations() + annotations[utils.BuildAnnotationsGenBehavior] = behavior.String() + if err := r.SetAnnotations(annotations); err != nil { + panic(err) + } +} + +// NeedHashSuffix returns true if a resource content +// hash should be appended to the name of the resource. +func (r *Resource) NeedHashSuffix() bool { + return r.isEnabled(utils.BuildAnnotationsGenAddHashSuffix) +} + +// EnableHashSuffix marks the resource as needing a content +// hash to be appended to the name of the resource. +func (r *Resource) EnableHashSuffix() { + r.enable(utils.BuildAnnotationsGenAddHashSuffix) +} + +// OrgId returns the original, immutable ResId for the resource. +// This doesn't have to be unique in a ResMap. +func (r *Resource) OrgId() resid.ResId { + ids := r.PrevIds() + if len(ids) > 0 { + return ids[0] + } + return r.CurId() +} + +// PrevIds returns a list of ResIds that includes every +// previous ResId the resource has had through all of its +// GVKN transformations, in the order that it had that ID. +// I.e. the oldest ID is first. +// The returned array does not include the resource's current +// ID. If there are no previous IDs, this will return nil. +func (r *Resource) PrevIds() []resid.ResId { + prevIds, err := utils.PrevIds(&r.RNode) + if err != nil { + // this should never happen + panic(err) + } + return prevIds +} + +// StorePreviousId stores the resource's current ID via build annotations. +func (r *Resource) StorePreviousId() { + id := r.CurId() + r.setPreviousId(id.EffectiveNamespace(), id.Name, id.Kind) +} + +// CurId returns a ResId for the resource using the +// mutable parts of the resource. +// This should be unique in any ResMap. +func (r *Resource) CurId() resid.ResId { + return resid.NewResIdWithNamespace( + r.GetGvk(), r.GetName(), r.GetNamespace()) +} + +// GetRefBy returns the ResIds that referred to current resource +func (r *Resource) GetRefBy() []resid.ResId { + var resIds []resid.ResId + asStrings := r.getCsvAnnotation(utils.BuildAnnotationsRefBy) + for _, s := range asStrings { + resIds = append(resIds, resid.FromString(s)) + } + return resIds +} + +// AppendRefBy appends a ResId into the refBy list +// Using any type except fmt.Stringer here results in a compilation error +func (r *Resource) AppendRefBy(id fmt.Stringer) { + r.appendCsvAnnotation(utils.BuildAnnotationsRefBy, id.String()) +} + +// GetRefVarNames returns vars that refer to current resource +func (r *Resource) GetRefVarNames() []string { + return r.refVarNames +} + +// AppendRefVarName appends a name of a var into the refVar list +func (r *Resource) AppendRefVarName(variable types.Var) { + r.refVarNames = append(r.refVarNames, variable.Name) +} + +// ApplySmPatch applies the provided strategic merge patch. +func (r *Resource) ApplySmPatch(patch *Resource) error { + n, ns, k := r.GetName(), r.GetNamespace(), r.GetKind() + if patch.NameChangeAllowed() || patch.KindChangeAllowed() { + r.StorePreviousId() + } + if err := r.ApplyFilter(patchstrategicmerge.Filter{ + Patch: &patch.RNode, + }); err != nil { + return err + } + if r.IsNilOrEmpty() { + return nil + } + if !patch.KindChangeAllowed() { + r.SetKind(k) + } + if !patch.NameChangeAllowed() { + r.SetName(n) + } + r.SetNamespace(ns) + return nil +} + +func (r *Resource) ApplyFilter(f kio.Filter) error { + l, err := f.Filter([]*kyaml.RNode{&r.RNode}) + if len(l) == 0 { + // The node was deleted, which means the entire resource + // must be deleted. Signal that via the following: + r.SetYNode(nil) + } + return err +} + +func mergeStringMaps(maps ...map[string]string) map[string]string { + result := map[string]string{} + for _, m := range maps { + for key, value := range m { + result[key] = value + } + } + return result +} + +func mergeStringMapsWithBuildAnnotations(maps ...map[string]string) map[string]string { + result := mergeStringMaps(maps...) + for i := range BuildAnnotations { + if len(maps) > 0 { + if v, ok := maps[0][BuildAnnotations[i]]; ok { + result[BuildAnnotations[i]] = v + continue + } + } + delete(result, BuildAnnotations[i]) + } + return result +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/builtinpluginloadingoptions_string.go b/vendor/sigs.k8s.io/kustomize/api/types/builtinpluginloadingoptions_string.go new file mode 100644 index 000000000..033a45123 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/builtinpluginloadingoptions_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=BuiltinPluginLoadingOptions"; DO NOT EDIT. + +package types + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[BploUndefined-0] + _ = x[BploUseStaticallyLinked-1] + _ = x[BploLoadFromFileSys-2] +} + +const _BuiltinPluginLoadingOptions_name = "BploUndefinedBploUseStaticallyLinkedBploLoadFromFileSys" + +var _BuiltinPluginLoadingOptions_index = [...]uint8{0, 13, 36, 55} + +func (i BuiltinPluginLoadingOptions) String() string { + if i < 0 || i >= BuiltinPluginLoadingOptions(len(_BuiltinPluginLoadingOptions_index)-1) { + return "BuiltinPluginLoadingOptions(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BuiltinPluginLoadingOptions_name[_BuiltinPluginLoadingOptions_index[i]:_BuiltinPluginLoadingOptions_index[i+1]] +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/configmapargs.go b/vendor/sigs.k8s.io/kustomize/api/types/configmapargs.go new file mode 100644 index 000000000..69877769f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/configmapargs.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// ConfigMapArgs contains the metadata of how to generate a configmap. +type ConfigMapArgs struct { + // GeneratorArgs for the configmap. + GeneratorArgs `json:",inline,omitempty" yaml:",inline,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/doc.go b/vendor/sigs.k8s.io/kustomize/api/types/doc.go new file mode 100644 index 000000000..22c38a651 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/doc.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package types holds the definition of the kustomization struct and +// supporting structs. It's the k8s API conformant object that describes +// a set of generation and transformation operations to create and/or +// modify k8s resources. +// A kustomization file is a serialization of this struct. +package types diff --git a/vendor/sigs.k8s.io/kustomize/api/types/erronlybuiltinpluginsallowed.go b/vendor/sigs.k8s.io/kustomize/api/types/erronlybuiltinpluginsallowed.go new file mode 100644 index 000000000..9e6ffacff --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/erronlybuiltinpluginsallowed.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "fmt" + + "sigs.k8s.io/kustomize/kyaml/errors" +) + +type errOnlyBuiltinPluginsAllowed struct { + name string +} + +func (e *errOnlyBuiltinPluginsAllowed) Error() string { + return fmt.Sprintf( + "external plugins disabled; unable to load external plugin '%s'", + e.name) +} + +func NewErrOnlyBuiltinPluginsAllowed(n string) *errOnlyBuiltinPluginsAllowed { + return &errOnlyBuiltinPluginsAllowed{name: n} +} + +func IsErrOnlyBuiltinPluginsAllowed(err error) bool { + e := &errOnlyBuiltinPluginsAllowed{} + return errors.As(err, &e) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/errunabletofind.go b/vendor/sigs.k8s.io/kustomize/api/types/errunabletofind.go new file mode 100644 index 000000000..b91bb68ff --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/errunabletofind.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "fmt" + "strings" + + "sigs.k8s.io/kustomize/kyaml/errors" +) + +type errUnableToFind struct { + // What are we unable to find? + what string + // What things did we try? + attempts []Pair +} + +func (e *errUnableToFind) Error() string { + var m []string + for _, p := range e.attempts { + m = append(m, "('"+p.Value+"'; "+p.Key+")") + } + return fmt.Sprintf( + "unable to find %s - tried: %s", e.what, strings.Join(m, ", ")) +} + +func NewErrUnableToFind(w string, a []Pair) *errUnableToFind { + return &errUnableToFind{what: w, attempts: a} +} + +func IsErrUnableToFind(err error) bool { + e := &errUnableToFind{} + return errors.As(err, &e) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go b/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go new file mode 100644 index 000000000..8d3579544 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "fmt" + + "sigs.k8s.io/kustomize/kyaml/resid" +) + +// FieldSpec completely specifies a kustomizable field in a k8s API object. +// It helps define the operands of transformations. +// +// For example, a directive to add a common label to objects +// will need to know that a 'Deployment' object (in API group +// 'apps', any version) can have labels at field path +// 'spec/template/metadata/labels', and further that it is OK +// (or not OK) to add that field path to the object if the +// field path doesn't exist already. +// +// This would look like +// { +// group: apps +// kind: Deployment +// path: spec/template/metadata/labels +// create: true +// } +type FieldSpec struct { + resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` + Path string `json:"path,omitempty" yaml:"path,omitempty"` + CreateIfNotPresent bool `json:"create,omitempty" yaml:"create,omitempty"` +} + +func (fs FieldSpec) String() string { + return fmt.Sprintf( + "%s:%v:%s", fs.Gvk.String(), fs.CreateIfNotPresent, fs.Path) +} + +// If true, the primary key is the same, but other fields might not be. +func (fs FieldSpec) effectivelyEquals(other FieldSpec) bool { + return fs.IsSelected(&other.Gvk) && fs.Path == other.Path +} + +type FsSlice []FieldSpec + +func (s FsSlice) Len() int { return len(s) } +func (s FsSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s FsSlice) Less(i, j int) bool { + return s[i].Gvk.IsLessThan(s[j].Gvk) +} + +// MergeAll merges the argument into this, returning the result. +// Items already present are ignored. +// Items that conflict (primary key matches, but remain data differs) +// result in an error. +func (s FsSlice) MergeAll(incoming FsSlice) (result FsSlice, err error) { + result = s + for _, x := range incoming { + result, err = result.MergeOne(x) + if err != nil { + return nil, err + } + } + return result, nil +} + +// MergeOne merges the argument into this, returning the result. +// If the item's primary key is already present, and there are no +// conflicts, it is ignored (we don't want duplicates). +// If there is a conflict, the merge fails. +func (s FsSlice) MergeOne(x FieldSpec) (FsSlice, error) { + i := s.index(x) + if i > -1 { + // It's already there. + if s[i].CreateIfNotPresent != x.CreateIfNotPresent { + return nil, fmt.Errorf("conflicting fieldspecs") + } + return s, nil + } + return append(s, x), nil +} + +func (s FsSlice) index(fs FieldSpec) int { + for i, x := range s { + if x.effectivelyEquals(fs) { + return i + } + } + return -1 +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/generationbehavior.go b/vendor/sigs.k8s.io/kustomize/api/types/generationbehavior.go new file mode 100644 index 000000000..f8f362780 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/generationbehavior.go @@ -0,0 +1,46 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// GenerationBehavior specifies generation behavior of configmaps, secrets and maybe other resources. +type GenerationBehavior int + +const ( + // BehaviorUnspecified is an Unspecified behavior; typically treated as a Create. + BehaviorUnspecified GenerationBehavior = iota + // BehaviorCreate makes a new resource. + BehaviorCreate + // BehaviorReplace replaces a resource. + BehaviorReplace + // BehaviorMerge attempts to merge a new resource with an existing resource. + BehaviorMerge +) + +// String converts a GenerationBehavior to a string. +func (b GenerationBehavior) String() string { + switch b { + case BehaviorReplace: + return "replace" + case BehaviorMerge: + return "merge" + case BehaviorCreate: + return "create" + default: + return "unspecified" + } +} + +// NewGenerationBehavior converts a string to a GenerationBehavior. +func NewGenerationBehavior(s string) GenerationBehavior { + switch s { + case "replace": + return BehaviorReplace + case "merge": + return BehaviorMerge + case "create": + return BehaviorCreate + default: + return BehaviorUnspecified + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go b/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go new file mode 100644 index 000000000..a4145db3d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// GeneratorArgs contains arguments common to ConfigMap and Secret generators. +type GeneratorArgs struct { + // Namespace for the configmap, optional + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + + // Name - actually the partial name - of the generated resource. + // The full name ends up being something like + // NamePrefix + this.Name + hash(content of generated resource). + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // Behavior of generated resource, must be one of: + // 'create': create a new one + // 'replace': replace the existing one + // 'merge': merge with the existing one + Behavior string `json:"behavior,omitempty" yaml:"behavior,omitempty"` + + // KvPairSources for the generator. + KvPairSources `json:",inline,omitempty" yaml:",inline,omitempty"` + + // Local overrides to global generatorOptions field. + Options *GeneratorOptions `json:"options,omitempty" yaml:"options,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/generatoroptions.go b/vendor/sigs.k8s.io/kustomize/api/types/generatoroptions.go new file mode 100644 index 000000000..683d89bfd --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/generatoroptions.go @@ -0,0 +1,76 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// GeneratorOptions modify behavior of all ConfigMap and Secret generators. +type GeneratorOptions struct { + // Labels to add to all generated resources. + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + + // Annotations to add to all generated resources. + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` + + // DisableNameSuffixHash if true disables the default behavior of adding a + // suffix to the names of generated resources that is a hash of the + // resource contents. + DisableNameSuffixHash bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty"` + + // Immutable if true add to all generated resources. + Immutable bool `json:"immutable,omitempty" yaml:"immutable,omitempty"` +} + +// MergeGlobalOptionsIntoLocal merges two instances of GeneratorOptions. +// Values in the first 'local' argument cannot be overridden by the second +// 'global' argument, except in the case of booleans. +// +// With booleans, there's no way to distinguish an 'intentional' +// false from 'default' false. So the rule is, if the global value +// of the value of a boolean is true, i.e. disable, it trumps the +// local value. If the global value is false, then the local value is +// respected. Bottom line: a local false cannot override a global true. +// +// boolean fields are always a bad idea; should always use enums instead. +func MergeGlobalOptionsIntoLocal( + localOpts *GeneratorOptions, + globalOpts *GeneratorOptions) *GeneratorOptions { + if globalOpts == nil { + return localOpts + } + if localOpts == nil { + localOpts = &GeneratorOptions{} + } + overrideMap(&localOpts.Labels, globalOpts.Labels) + overrideMap(&localOpts.Annotations, globalOpts.Annotations) + if globalOpts.DisableNameSuffixHash { + localOpts.DisableNameSuffixHash = true + } + if globalOpts.Immutable { + localOpts.Immutable = true + } + return localOpts +} + +func overrideMap(localMap *map[string]string, globalMap map[string]string) { + if *localMap == nil { + if globalMap != nil { + *localMap = CopyMap(globalMap) + } + return + } + for k, v := range globalMap { + _, ok := (*localMap)[k] + if !ok { + (*localMap)[k] = v + } + } +} + +// CopyMap copies a map. +func CopyMap(in map[string]string) map[string]string { + out := make(map[string]string) + for k, v := range in { + out[k] = v + } + return out +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go b/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go new file mode 100644 index 000000000..15ea7178f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go @@ -0,0 +1,185 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import "path/filepath" + +const HelmDefaultHome = "charts" + +type HelmGlobals struct { + // ChartHome is a file path, relative to the kustomization root, + // to a directory containing a subdirectory for each chart to be + // included in the kustomization. + // The default value of this field is "charts". + // So, for example, kustomize looks for the minecraft chart + // at {kustomizationRoot}/{ChartHome}/minecraft. + // If the chart is there at build time, kustomize will use it as found, + // and not check version numbers or dates. + // If the chart is not there, kustomize will attempt to pull it + // using the version number specified in the kustomization file, + // and put it there. To suppress the pull attempt, simply assure + // that the chart is already there. + ChartHome string `json:"chartHome,omitempty" yaml:"chartHome,omitempty"` + + // ConfigHome defines a value that kustomize should pass to helm via + // the HELM_CONFIG_HOME environment variable. kustomize doesn't attempt + // to read or write this directory. + // If omitted, {tmpDir}/helm is used, where {tmpDir} is some temporary + // directory created by kustomize for the benefit of helm. + // Likewise, kustomize sets + // HELM_CACHE_HOME={ConfigHome}/.cache + // HELM_DATA_HOME={ConfigHome}/.data + // for the helm subprocess. + ConfigHome string `json:"configHome,omitempty" yaml:"configHome,omitempty"` +} + +type HelmChart struct { + // Name is the name of the chart, e.g. 'minecraft'. + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // Version is the version of the chart, e.g. '3.1.3' + Version string `json:"version,omitempty" yaml:"version,omitempty"` + + // Repo is a URL locating the chart on the internet. + // This is the argument to helm's `--repo` flag, e.g. + // `https://itzg.github.io/minecraft-server-charts`. + Repo string `json:"repo,omitempty" yaml:"repo,omitempty"` + + // ReleaseName replaces RELEASE-NAME in chart template output, + // making a particular inflation of a chart unique with respect to + // other inflations of the same chart in a cluster. It's the first + // argument to the helm `install` and `template` commands, i.e. + // helm install {RELEASE-NAME} {chartName} + // helm template {RELEASE-NAME} {chartName} + // If omitted, the flag --generate-name is passed to 'helm template'. + ReleaseName string `json:"releaseName,omitempty" yaml:"releaseName,omitempty"` + + // Namespace set the target namespace for a release. It is .Release.Namespace + // in the helm template + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + + // AdditionalValuesFiles are local file paths to values files to be used in + // addition to either the default values file or the values specified in ValuesFile. + AdditionalValuesFiles []string `json:"additionalValuesFiles,omitempty" yaml:"additionalValuesFiles,omitempty"` + + // ValuesFile is a local file path to a values file to use _instead of_ + // the default values that accompanied the chart. + // The default values are in '{ChartHome}/{Name}/values.yaml'. + ValuesFile string `json:"valuesFile,omitempty" yaml:"valuesFile,omitempty"` + + // ValuesInline holds value mappings specified directly, + // rather than in a separate file. + ValuesInline map[string]interface{} `json:"valuesInline,omitempty" yaml:"valuesInline,omitempty"` + + // ValuesMerge specifies how to treat ValuesInline with respect to Values. + // Legal values: 'merge', 'override', 'replace'. + // Defaults to 'override'. + ValuesMerge string `json:"valuesMerge,omitempty" yaml:"valuesMerge,omitempty"` + + // IncludeCRDs specifies if Helm should also generate CustomResourceDefinitions. + // Defaults to 'false'. + IncludeCRDs bool `json:"includeCRDs,omitempty" yaml:"includeCRDs,omitempty"` //nolint: tagliatelle + + // SkipHooks sets the --no-hooks flag when calling helm template. This prevents + // helm from erroneously rendering test templates. + SkipHooks bool `json:"skipHooks,omitempty" yaml:"skipHooks,omitempty"` + + // ApiVersions is the kubernetes apiversions used for Capabilities.APIVersions + ApiVersions []string `json:"apiVersions,omitempty" yaml:"apiVersions,omitempty"` + + // NameTemplate is for specifying the name template used to name the release. + NameTemplate string `json:"nameTemplate,omitempty" yaml:"nameTemplate,omitempty"` + + // SkipTests skips tests from templated output. + SkipTests bool `json:"skipTests,omitempty" yaml:"skipTests,omitempty"` +} + +// HelmChartArgs contains arguments to helm. +// Deprecated. Use HelmGlobals and HelmChart instead. +type HelmChartArgs struct { + ChartName string `json:"chartName,omitempty" yaml:"chartName,omitempty"` + ChartVersion string `json:"chartVersion,omitempty" yaml:"chartVersion,omitempty"` + ChartRepoURL string `json:"chartRepoUrl,omitempty" yaml:"chartRepoUrl,omitempty"` + ChartHome string `json:"chartHome,omitempty" yaml:"chartHome,omitempty"` + ChartRepoName string `json:"chartRepoName,omitempty" yaml:"chartRepoName,omitempty"` + HelmBin string `json:"helmBin,omitempty" yaml:"helmBin,omitempty"` + HelmHome string `json:"helmHome,omitempty" yaml:"helmHome,omitempty"` + Values string `json:"values,omitempty" yaml:"values,omitempty"` + ValuesLocal map[string]interface{} `json:"valuesLocal,omitempty" yaml:"valuesLocal,omitempty"` + ValuesMerge string `json:"valuesMerge,omitempty" yaml:"valuesMerge,omitempty"` + ReleaseName string `json:"releaseName,omitempty" yaml:"releaseName,omitempty"` + ReleaseNamespace string `json:"releaseNamespace,omitempty" yaml:"releaseNamespace,omitempty"` + ExtraArgs []string `json:"extraArgs,omitempty" yaml:"extraArgs,omitempty"` +} + +// SplitHelmParameters splits helm parameters into +// per-chart params and global chart-independent parameters. +func SplitHelmParameters( + oldArgs []HelmChartArgs) (charts []HelmChart, globals HelmGlobals) { + for i, old := range oldArgs { + charts = append(charts, makeHelmChartFromHca(&oldArgs[i])) + if old.HelmHome != "" { + // last non-empty wins + globals.ConfigHome = old.HelmHome + } + if old.ChartHome != "" { + // last non-empty wins + globals.ChartHome = old.ChartHome + } + } + return charts, globals +} + +func makeHelmChartFromHca(old *HelmChartArgs) (c HelmChart) { + c.Name = old.ChartName + c.Version = old.ChartVersion + c.Repo = old.ChartRepoURL + c.ValuesFile = old.Values + c.ValuesInline = old.ValuesLocal + c.ValuesMerge = old.ValuesMerge + c.ReleaseName = old.ReleaseName + return +} + +func (h HelmChart) AsHelmArgs(absChartHome string) []string { + args := []string{"template"} + if h.ReleaseName != "" { + args = append(args, h.ReleaseName) + } else { + // AFAICT, this doesn't work as intended due to a bug in helm. + // See https://github.com/helm/helm/issues/6019 + // I've tried placing the flag before and after the name argument. + args = append(args, "--generate-name") + } + if h.Name != "" { + args = append(args, filepath.Join(absChartHome, h.Name)) + } + if h.Namespace != "" { + args = append(args, "--namespace", h.Namespace) + } + if h.NameTemplate != "" { + args = append(args, "--name-template", h.NameTemplate) + } + + if h.ValuesFile != "" { + args = append(args, "-f", h.ValuesFile) + } + for _, valuesFile := range h.AdditionalValuesFiles { + args = append(args, "-f", valuesFile) + } + + for _, apiVer := range h.ApiVersions { + args = append(args, "--api-versions", apiVer) + } + if h.IncludeCRDs { + args = append(args, "--include-crds") + } + if h.SkipTests { + args = append(args, "--skip-tests") + } + if h.SkipHooks { + args = append(args, "--no-hooks") + } + return args +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/iampolicygenerator.go b/vendor/sigs.k8s.io/kustomize/api/types/iampolicygenerator.go new file mode 100644 index 000000000..f1d27ba7b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/iampolicygenerator.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +type Cloud string + +const GKE Cloud = "gke" + +// IAMPolicyGeneratorArgs contains arguments to generate a GKE service account resource. +type IAMPolicyGeneratorArgs struct { + // which cloud provider to generate for (e.g. "gke") + Cloud `json:"cloud" yaml:"cloud"` + + // information about the kubernetes cluster for this object + KubernetesService `json:"kubernetesService" yaml:"kubernetesService"` + + // information about the service account and project + ServiceAccount `json:"serviceAccount" yaml:"serviceAccount"` +} + +type KubernetesService struct { + // the name used for the Kubernetes service account + Name string `json:"name" yaml:"name"` + + // the name of the Kubernetes namespace for this object + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` +} + +type ServiceAccount struct { + // the name of the new cloud provider service account + Name string `json:"name" yaml:"name"` + + // The ID of the project + ProjectId string `json:"projectId" yaml:"projectId"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/image.go b/vendor/sigs.k8s.io/kustomize/api/types/image.go new file mode 100644 index 000000000..e40ed324d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/image.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// Image contains an image name, a new name, a new tag or digest, +// which will replace the original name and tag. +type Image struct { + // Name is a tag-less image name. + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // NewName is the value used to replace the original name. + NewName string `json:"newName,omitempty" yaml:"newName,omitempty"` + + // TagSuffix is the value used to suffix the original tag + // If Digest and NewTag is present an error is thrown + TagSuffix string `json:"tagSuffix,omitempty" yaml:"tagSuffix,omitempty"` + + // NewTag is the value used to replace the original tag. + NewTag string `json:"newTag,omitempty" yaml:"newTag,omitempty"` + + // Digest is the value used to replace the original image tag. + // If digest is present NewTag value is ignored. + Digest string `json:"digest,omitempty" yaml:"digest,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go b/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go new file mode 100644 index 000000000..41376bedd --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go @@ -0,0 +1,351 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/yaml" +) + +const ( + KustomizationVersion = "kustomize.config.k8s.io/v1beta1" + KustomizationKind = "Kustomization" + ComponentVersion = "kustomize.config.k8s.io/v1alpha1" + ComponentKind = "Component" + MetadataNamespacePath = "metadata/namespace" + MetadataNamespaceApiVersion = "v1" + MetadataNamePath = "metadata/name" + + OriginAnnotations = "originAnnotations" + TransformerAnnotations = "transformerAnnotations" + ManagedByLabelOption = "managedByLabel" +) + +var BuildMetadataOptions = []string{OriginAnnotations, TransformerAnnotations, ManagedByLabelOption} + +// Kustomization holds the information needed to generate customized k8s api resources. +type Kustomization struct { + TypeMeta `json:",inline" yaml:",inline"` + + // MetaData is a pointer to avoid marshalling empty struct + MetaData *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` + + // OpenAPI contains information about what kubernetes schema to use. + OpenAPI map[string]string `json:"openapi,omitempty" yaml:"openapi,omitempty"` + + // + // Operators - what kustomize can do. + // + + // NamePrefix will prefix the names of all resources mentioned in the kustomization + // file including generated configmaps and secrets. + NamePrefix string `json:"namePrefix,omitempty" yaml:"namePrefix,omitempty"` + + // NameSuffix will suffix the names of all resources mentioned in the kustomization + // file including generated configmaps and secrets. + NameSuffix string `json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"` + + // Namespace to add to all objects. + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + + // CommonLabels to add to all objects and selectors. + CommonLabels map[string]string `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"` + + // Labels to add to all objects but not selectors. + Labels []Label `json:"labels,omitempty" yaml:"labels,omitempty"` + + // CommonAnnotations to add to all objects. + CommonAnnotations map[string]string `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"` + + // Deprecated: Use the Patches field instead, which provides a superset of the functionality of PatchesStrategicMerge. + // PatchesStrategicMerge specifies the relative path to a file + // containing a strategic merge patch. Format documented at + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md + // URLs and globs are not supported. + PatchesStrategicMerge []PatchStrategicMerge `json:"patchesStrategicMerge,omitempty" yaml:"patchesStrategicMerge,omitempty"` + + // Deprecated: Use the Patches field instead, which provides a superset of the functionality of JSONPatches. + // JSONPatches is a list of JSONPatch for applying JSON patch. + // Format documented at https://tools.ietf.org/html/rfc6902 + // and http://jsonpatch.com + PatchesJson6902 []Patch `json:"patchesJson6902,omitempty" yaml:"patchesJson6902,omitempty"` + + // Patches is a list of patches, where each one can be either a + // Strategic Merge Patch or a JSON patch. + // Each patch can be applied to multiple target objects. + Patches []Patch `json:"patches,omitempty" yaml:"patches,omitempty"` + + // Images is a list of (image name, new name, new tag or digest) + // for changing image names, tags or digests. This can also be achieved with a + // patch, but this operator is simpler to specify. + Images []Image `json:"images,omitempty" yaml:"images,omitempty"` + + // Deprecated: Use the Images field instead. + ImageTags []Image `json:"imageTags,omitempty" yaml:"imageTags,omitempty"` + + // Replacements is a list of replacements, which will copy nodes from a + // specified source to N specified targets. + Replacements []ReplacementField `json:"replacements,omitempty" yaml:"replacements,omitempty"` + + // Replicas is a list of {resourcename, count} that allows for simpler replica + // specification. This can also be done with a patch. + Replicas []Replica `json:"replicas,omitempty" yaml:"replicas,omitempty"` + + // Deprecated: Vars will be removed in future release. Migrate to Replacements instead. + // Vars allow things modified by kustomize to be injected into a + // kubernetes object specification. A var is a name (e.g. FOO) associated + // with a field in a specific resource instance. The field must + // contain a value of type string/bool/int/float, and defaults to the name field + // of the instance. Any appearance of "$(FOO)" in the object + // spec will be replaced at kustomize build time, after the final + // value of the specified field has been determined. + Vars []Var `json:"vars,omitempty" yaml:"vars,omitempty"` + + // SortOptions change the order that kustomize outputs resources. + SortOptions *SortOptions `json:"sortOptions,omitempty" yaml:"sortOptions,omitempty"` + + // + // Operands - what kustomize operates on. + // + + // Resources specifies relative paths to files holding YAML representations + // of kubernetes API objects, or specifications of other kustomizations + // via relative paths, absolute paths, or URLs. + Resources []string `json:"resources,omitempty" yaml:"resources,omitempty"` + + // Components specifies relative paths to specifications of other Components + // via relative paths, absolute paths, or URLs. + Components []string `json:"components,omitempty" yaml:"components,omitempty"` + + // Crds specifies relative paths to Custom Resource Definition files. + // This allows custom resources to be recognized as operands, making + // it possible to add them to the Resources list. + // CRDs themselves are not modified. + Crds []string `json:"crds,omitempty" yaml:"crds,omitempty"` + + // Deprecated: Anything that would have been specified here should be specified in the Resources field instead. + Bases []string `json:"bases,omitempty" yaml:"bases,omitempty"` + + // + // Generators (operators that create operands) + // + + // ConfigMapGenerator is a list of configmaps to generate from + // local data (one configMap per list item). + // The resulting resource is a normal operand, subject to + // name prefixing, patching, etc. By default, the name of + // the map will have a suffix hash generated from its contents. + ConfigMapGenerator []ConfigMapArgs `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty"` + + // SecretGenerator is a list of secrets to generate from + // local data (one secret per list item). + // The resulting resource is a normal operand, subject to + // name prefixing, patching, etc. By default, the name of + // the map will have a suffix hash generated from its contents. + SecretGenerator []SecretArgs `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty"` + + // HelmGlobals contains helm configuration that isn't chart specific. + HelmGlobals *HelmGlobals `json:"helmGlobals,omitempty" yaml:"helmGlobals,omitempty"` + + // HelmCharts is a list of helm chart configuration instances. + HelmCharts []HelmChart `json:"helmCharts,omitempty" yaml:"helmCharts,omitempty"` + + // HelmChartInflationGenerator is a list of helm chart configurations. + // Deprecated. Auto-converted to HelmGlobals and HelmCharts. + HelmChartInflationGenerator []HelmChartArgs `json:"helmChartInflationGenerator,omitempty" yaml:"helmChartInflationGenerator,omitempty"` + + // GeneratorOptions modify behavior of all ConfigMap and Secret generators. + GeneratorOptions *GeneratorOptions `json:"generatorOptions,omitempty" yaml:"generatorOptions,omitempty"` + + // Configurations is a list of transformer configuration files + Configurations []string `json:"configurations,omitempty" yaml:"configurations,omitempty"` + + // Generators is a list of files containing custom generators + Generators []string `json:"generators,omitempty" yaml:"generators,omitempty"` + + // Transformers is a list of files containing transformers + Transformers []string `json:"transformers,omitempty" yaml:"transformers,omitempty"` + + // Validators is a list of files containing validators + Validators []string `json:"validators,omitempty" yaml:"validators,omitempty"` + + // BuildMetadata is a list of strings used to toggle different build options + BuildMetadata []string `json:"buildMetadata,omitempty" yaml:"buildMetadata,omitempty"` +} + +const ( + deprecatedWarningToRunEditFix = "Run 'kustomize edit fix' to update your Kustomization automatically." + deprecatedWarningToRunEditFixExperimential = "[EXPERIMENTAL] Run 'kustomize edit fix' to update your Kustomization automatically." + deprecatedBaseWarningMessage = "# Warning: 'bases' is deprecated. Please use 'resources' instead." + " " + deprecatedWarningToRunEditFix + deprecatedImageTagsWarningMessage = "# Warning: 'imageTags' is deprecated. Please use 'images' instead." + " " + deprecatedWarningToRunEditFix + deprecatedPatchesJson6902Message = "# Warning: 'patchesJson6902' is deprecated. Please use 'patches' instead." + " " + deprecatedWarningToRunEditFix + deprecatedPatchesStrategicMergeMessage = "# Warning: 'patchesStrategicMerge' is deprecated. Please use 'patches' instead." + " " + deprecatedWarningToRunEditFix + deprecatedVarsMessage = "# Warning: 'vars' is deprecated. Please use 'replacements' instead." + " " + deprecatedWarningToRunEditFixExperimential +) + +// CheckDeprecatedFields check deprecated field is used or not. +func (k *Kustomization) CheckDeprecatedFields() *[]string { + var warningMessages []string + if k.Bases != nil { + warningMessages = append(warningMessages, deprecatedBaseWarningMessage) + } + if k.ImageTags != nil { + warningMessages = append(warningMessages, deprecatedImageTagsWarningMessage) + } + if k.PatchesJson6902 != nil { + warningMessages = append(warningMessages, deprecatedPatchesJson6902Message) + } + if k.PatchesStrategicMerge != nil { + warningMessages = append(warningMessages, deprecatedPatchesStrategicMergeMessage) + } + if k.Vars != nil { + warningMessages = append(warningMessages, deprecatedVarsMessage) + } + return &warningMessages +} + +// FixKustomization fixes things +// like empty fields that should not be empty, or +// moving content of deprecated fields to newer +// fields. +func (k *Kustomization) FixKustomization() { + if k.Kind == "" { + k.Kind = KustomizationKind + } + if k.APIVersion == "" { + if k.Kind == ComponentKind { + k.APIVersion = ComponentVersion + } else { + k.APIVersion = KustomizationVersion + } + } + + // 'bases' field was deprecated in favor of the 'resources' field. + k.Resources = append(k.Resources, k.Bases...) + k.Bases = nil + + // 'imageTags' field was deprecated in favor of the 'images' field. + k.Images = append(k.Images, k.ImageTags...) + k.ImageTags = nil + + for i, g := range k.ConfigMapGenerator { + if g.EnvSource != "" { + k.ConfigMapGenerator[i].EnvSources = + append(g.EnvSources, g.EnvSource) //nolint:gocritic + k.ConfigMapGenerator[i].EnvSource = "" + } + } + for i, g := range k.SecretGenerator { + if g.EnvSource != "" { + k.SecretGenerator[i].EnvSources = + append(g.EnvSources, g.EnvSource) //nolint:gocritic + k.SecretGenerator[i].EnvSource = "" + } + } + charts, globals := SplitHelmParameters(k.HelmChartInflationGenerator) + if k.HelmGlobals == nil { + if globals.ChartHome != "" || globals.ConfigHome != "" { + k.HelmGlobals = &globals + } + } + k.HelmCharts = append(k.HelmCharts, charts...) + // Wipe it for the fix command. + k.HelmChartInflationGenerator = nil +} + +// FixKustomizationPreMarshalling fixes things +// that should occur after the kustomization file +// has been processed. +func (k *Kustomization) FixKustomizationPreMarshalling(fSys filesys.FileSystem) error { + // PatchesJson6902 should be under the Patches field. + k.Patches = append(k.Patches, k.PatchesJson6902...) + k.PatchesJson6902 = nil + + if k.PatchesStrategicMerge != nil { + for _, patchStrategicMerge := range k.PatchesStrategicMerge { + // check this patch is file path select. + if _, err := fSys.ReadFile(string(patchStrategicMerge)); err == nil { + // path patch + k.Patches = append(k.Patches, Patch{Path: string(patchStrategicMerge)}) + } else { + // inline string patch + k.Patches = append(k.Patches, Patch{Patch: string(patchStrategicMerge)}) + } + } + k.PatchesStrategicMerge = nil + } + + // this fix is not in FixKustomizationPostUnmarshalling because + // it will break some commands like `create` and `add`. those + // commands depend on 'commonLabels' field + if cl := labelFromCommonLabels(k.CommonLabels); cl != nil { + // check conflicts between commonLabels and labels + for _, l := range k.Labels { + for k := range l.Pairs { + if _, exist := cl.Pairs[k]; exist { + return fmt.Errorf("label name '%s' exists in both commonLabels and labels", k) + } + } + } + k.Labels = append(k.Labels, *cl) + k.CommonLabels = nil + } + + return nil +} + +func (k *Kustomization) CheckEmpty() error { + // generate empty Kustomization + emptyKustomization := &Kustomization{} + + // k.TypeMeta is metadata. It Isn't related to whether empty or not. + emptyKustomization.TypeMeta = k.TypeMeta + + if reflect.DeepEqual(k, emptyKustomization) { + return fmt.Errorf("kustomization.yaml is empty") + } + + return nil +} + +func (k *Kustomization) EnforceFields() []string { + var errs []string + if k.Kind != "" && k.Kind != KustomizationKind && k.Kind != ComponentKind { + errs = append(errs, "kind should be "+KustomizationKind+" or "+ComponentKind) + } + requiredVersion := KustomizationVersion + if k.Kind == ComponentKind { + requiredVersion = ComponentVersion + } + if k.APIVersion != "" && k.APIVersion != requiredVersion { + errs = append(errs, "apiVersion for "+k.Kind+" should be "+requiredVersion) + } + return errs +} + +// Unmarshal replace k with the content in YAML input y +func (k *Kustomization) Unmarshal(y []byte) error { + // TODO: switch to strict decoding to catch duplicate keys. + // We can't do so until there is a yaml decoder that supports anchors AND case-insensitive keys. + // See https://github.com/kubernetes-sigs/kustomize/issues/5061 + j, err := yaml.YAMLToJSON(y) + if err != nil { + return errors.WrapPrefixf(err, "invalid Kustomization") + } + dec := json.NewDecoder(bytes.NewReader(j)) + dec.DisallowUnknownFields() + var nk Kustomization + err = dec.Decode(&nk) + if err != nil { + return errors.WrapPrefixf(err, "invalid Kustomization") + } + *k = nk + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/kvpairsources.go b/vendor/sigs.k8s.io/kustomize/api/types/kvpairsources.go new file mode 100644 index 000000000..9898defad --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/kvpairsources.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// KvPairSources defines places to obtain key value pairs. +type KvPairSources struct { + // LiteralSources is a list of literal + // pair sources. Each literal source should + // be a key and literal value, e.g. `key=value` + LiteralSources []string `json:"literals,omitempty" yaml:"literals,omitempty"` + + // FileSources is a list of file "sources" to + // use in creating a list of key, value pairs. + // A source takes the form: [{key}=]{path} + // If the "key=" part is missing, the key is the + // path's basename. If they "key=" part is present, + // it becomes the key (replacing the basename). + // In either case, the value is the file contents. + // Specifying a directory will iterate each named + // file in the directory whose basename is a + // valid configmap key. + FileSources []string `json:"files,omitempty" yaml:"files,omitempty"` + + // EnvSources is a list of file paths. + // The contents of each file should be one + // key=value pair per line, e.g. a Docker + // or npm ".env" file or a ".ini" file + // (wikipedia.org/wiki/INI_file) + EnvSources []string `json:"envs,omitempty" yaml:"envs,omitempty"` + + // Older, singular form of EnvSources. + // On edits (e.g. `kustomize fix`) this is merged into the plural form + // for consistency with LiteralSources and FileSources. + EnvSource string `json:"env,omitempty" yaml:"env,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/labels.go b/vendor/sigs.k8s.io/kustomize/api/types/labels.go new file mode 100644 index 000000000..05ba890f9 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/labels.go @@ -0,0 +1,30 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +type Label struct { + // Pairs contains the key-value pairs for labels to add + Pairs map[string]string `json:"pairs,omitempty" yaml:"pairs,omitempty"` + // IncludeSelectors inidicates should transformer include the + // fieldSpecs for selectors. Custom fieldSpecs specified by + // FieldSpecs will be merged with builtin fieldSpecs if this + // is true. + IncludeSelectors bool `json:"includeSelectors,omitempty" yaml:"includeSelectors,omitempty"` + // IncludeTemplates inidicates should transformer include the + // spec/template/metadata fieldSpec. Custom fieldSpecs specified by + // FieldSpecs will be merged with spec/template/metadata fieldSpec if this + // is true. If IncludeSelectors is true, IncludeTemplates is not needed. + IncludeTemplates bool `json:"includeTemplates,omitempty" yaml:"includeTemplates,omitempty"` + FieldSpecs []FieldSpec `json:"fields,omitempty" yaml:"fields,omitempty"` +} + +func labelFromCommonLabels(commonLabels map[string]string) *Label { + if len(commonLabels) == 0 { + return nil + } + return &Label{ + Pairs: commonLabels, + IncludeSelectors: true, + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions.go b/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions.go new file mode 100644 index 000000000..6617abdac --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions.go @@ -0,0 +1,24 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// Restrictions on what things can be referred to +// in a kustomization file. +// +//go:generate stringer -type=LoadRestrictions +type LoadRestrictions int + +const ( + LoadRestrictionsUnknown LoadRestrictions = iota + + // Files referenced by a kustomization file must be in + // or under the directory holding the kustomization + // file itself. + LoadRestrictionsRootOnly + + // The kustomization file may specify absolute or + // relative paths to patch or resources files outside + // its own tree. + LoadRestrictionsNone +) diff --git a/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions_string.go b/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions_string.go new file mode 100644 index 000000000..d2355950b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=LoadRestrictions"; DO NOT EDIT. + +package types + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[LoadRestrictionsUnknown-0] + _ = x[LoadRestrictionsRootOnly-1] + _ = x[LoadRestrictionsNone-2] +} + +const _LoadRestrictions_name = "LoadRestrictionsUnknownLoadRestrictionsRootOnlyLoadRestrictionsNone" + +var _LoadRestrictions_index = [...]uint8{0, 23, 47, 67} + +func (i LoadRestrictions) String() string { + if i < 0 || i >= LoadRestrictions(len(_LoadRestrictions_index)-1) { + return "LoadRestrictions(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _LoadRestrictions_name[_LoadRestrictions_index[i]:_LoadRestrictions_index[i+1]] +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/objectmeta.go b/vendor/sigs.k8s.io/kustomize/api/types/objectmeta.go new file mode 100644 index 000000000..4f5d41f4a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/objectmeta.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// ObjectMeta partially copies apimachinery/pkg/apis/meta/v1.ObjectMeta +// No need for a direct dependence; the fields are stable. +type ObjectMeta struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/pair.go b/vendor/sigs.k8s.io/kustomize/api/types/pair.go new file mode 100644 index 000000000..63cfb776e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/pair.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// Pair is a key value pair. +type Pair struct { + Key string + Value string +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/patch.go b/vendor/sigs.k8s.io/kustomize/api/types/patch.go new file mode 100644 index 000000000..5310a6e66 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/patch.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import "reflect" + +// Patch represent either a Strategic Merge Patch or a JSON patch +// and its targets. +// The content of the patch can either be from a file +// or from an inline string. +type Patch struct { + // Path is a relative file path to the patch file. + Path string `json:"path,omitempty" yaml:"path,omitempty"` + + // Patch is the content of a patch. + Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` + + // Target points to the resources that the patch is applied to + Target *Selector `json:"target,omitempty" yaml:"target,omitempty"` + + // Options is a list of options for the patch + Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"` +} + +// Equals return true if p equals o. +func (p *Patch) Equals(o Patch) bool { + targetEqual := (p.Target == o.Target) || + (p.Target != nil && o.Target != nil && *p.Target == *o.Target) + return p.Path == o.Path && + p.Patch == o.Patch && + targetEqual && + reflect.DeepEqual(p.Options, o.Options) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/patchstrategicmerge.go b/vendor/sigs.k8s.io/kustomize/api/types/patchstrategicmerge.go new file mode 100644 index 000000000..81a5ba456 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/patchstrategicmerge.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// PatchStrategicMerge represents a relative path to a +// stategic merge patch with the format +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md +type PatchStrategicMerge string diff --git a/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go b/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go new file mode 100644 index 000000000..741e5debc --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go @@ -0,0 +1,47 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +type HelmConfig struct { + Enabled bool + Command string +} + +// PluginConfig holds plugin configuration. +type PluginConfig struct { + // PluginRestrictions distinguishes plugin restrictions. + PluginRestrictions PluginRestrictions + + // BpLoadingOptions distinguishes builtin plugin behaviors. + BpLoadingOptions BuiltinPluginLoadingOptions + + // FnpLoadingOptions sets the way function-based plugin behaviors. + FnpLoadingOptions FnPluginLoadingOptions + + // HelmConfig contains metadata needed for allowing and running helm. + HelmConfig HelmConfig +} + +func EnabledPluginConfig(b BuiltinPluginLoadingOptions) (pc *PluginConfig) { + pc = MakePluginConfig(PluginRestrictionsNone, b) + pc.FnpLoadingOptions.EnableStar = true + pc.HelmConfig.Enabled = true + // If this command is not on PATH, tests needing it should skip. + pc.HelmConfig.Command = "helmV3" + return +} + +func DisabledPluginConfig() *PluginConfig { + return MakePluginConfig( + PluginRestrictionsBuiltinsOnly, + BploUseStaticallyLinked) +} + +func MakePluginConfig(pr PluginRestrictions, + b BuiltinPluginLoadingOptions) *PluginConfig { + return &PluginConfig{ + PluginRestrictions: pr, + BpLoadingOptions: b, + } +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions.go b/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions.go new file mode 100644 index 000000000..88b03b3f5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// Some plugin classes +// - builtin: plugins defined in the kustomize repo. +// May be freely used and re-configured. +// - local: plugins that aren't builtin but are +// locally defined (presumably by the user), meaning +// the kustomization refers to them via a relative +// file path, not a URL. +// - remote: require a build-time download to obtain. +// Unadvised, unless one controls the +// serving site. +// +//go:generate stringer -type=PluginRestrictions +type PluginRestrictions int + +const ( + PluginRestrictionsUnknown PluginRestrictions = iota + + // Non-builtin plugins completely disabled. + PluginRestrictionsBuiltinsOnly + + // No restrictions, do whatever you want. + PluginRestrictionsNone +) + +// BuiltinPluginLoadingOptions distinguish ways in which builtin plugins are used. +//go:generate stringer -type=BuiltinPluginLoadingOptions +type BuiltinPluginLoadingOptions int + +const ( + BploUndefined BuiltinPluginLoadingOptions = iota + + // Desired in production use for performance. + BploUseStaticallyLinked + + // Desired in testing and development cycles where it's undesirable + // to generate static code. + BploLoadFromFileSys +) + +// FnPluginLoadingOptions set way functions-based plugins are restricted +type FnPluginLoadingOptions struct { + // Allow to run executables + EnableExec bool + // Allow to run starlark + EnableStar bool + // Allow container access to network + Network bool + NetworkName string + // list of mounts + Mounts []string + // list of env variables to pass to fn + Env []string + // Run as uid and gid of the command executor + AsCurrentUser bool + // Run in this working directory + WorkingDir string +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions_string.go b/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions_string.go new file mode 100644 index 000000000..b9dba7dfc --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=PluginRestrictions"; DO NOT EDIT. + +package types + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PluginRestrictionsUnknown-0] + _ = x[PluginRestrictionsBuiltinsOnly-1] + _ = x[PluginRestrictionsNone-2] +} + +const _PluginRestrictions_name = "PluginRestrictionsUnknownPluginRestrictionsBuiltinsOnlyPluginRestrictionsNone" + +var _PluginRestrictions_index = [...]uint8{0, 25, 55, 77} + +func (i PluginRestrictions) String() string { + if i < 0 || i >= PluginRestrictions(len(_PluginRestrictions_index)-1) { + return "PluginRestrictions(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PluginRestrictions_name[_PluginRestrictions_index[i]:_PluginRestrictions_index[i+1]] +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/replacement.go b/vendor/sigs.k8s.io/kustomize/api/types/replacement.go new file mode 100644 index 000000000..cb4163429 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/replacement.go @@ -0,0 +1,87 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "fmt" + "strings" + + "sigs.k8s.io/kustomize/kyaml/resid" +) + +const DefaultReplacementFieldPath = "metadata.name" + +// Replacement defines how to perform a substitution +// where it is from and where it is to. +type Replacement struct { + // The source of the value. + Source *SourceSelector `json:"source,omitempty" yaml:"source,omitempty"` + + // The N fields to write the value to. + Targets []*TargetSelector `json:"targets,omitempty" yaml:"targets,omitempty"` +} + +// SourceSelector is the source of the replacement transformer. +type SourceSelector struct { + // A specific object to read it from. + resid.ResId `json:",inline,omitempty" yaml:",inline,omitempty"` + + // Structured field path expected in the allowed object. + FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"` + + // Used to refine the interpretation of the field. + Options *FieldOptions `json:"options,omitempty" yaml:"options,omitempty"` +} + +func (s *SourceSelector) String() string { + if s == nil { + return "" + } + result := []string{s.ResId.String()} + if s.FieldPath != "" { + result = append(result, s.FieldPath) + } + if opts := s.Options.String(); opts != "" { + result = append(result, opts) + } + return strings.Join(result, ":") +} + +// TargetSelector specifies fields in one or more objects. +type TargetSelector struct { + // Include objects that match this. + Select *Selector `json:"select" yaml:"select"` + + // From the allowed set, remove objects that match this. + Reject []*Selector `json:"reject,omitempty" yaml:"reject,omitempty"` + + // Structured field paths expected in each allowed object. + FieldPaths []string `json:"fieldPaths,omitempty" yaml:"fieldPaths,omitempty"` + + // Used to refine the interpretation of the field. + Options *FieldOptions `json:"options,omitempty" yaml:"options,omitempty"` +} + +// FieldOptions refine the interpretation of FieldPaths. +type FieldOptions struct { + // Used to split/join the field. + Delimiter string `json:"delimiter,omitempty" yaml:"delimiter,omitempty"` + + // Which position in the split to consider. + Index int `json:"index,omitempty" yaml:"index,omitempty"` + + // TODO (#3492): Implement use of this option + // None, Base64, URL, Hex, etc + Encoding string `json:"encoding,omitempty" yaml:"encoding,omitempty"` + + // If field missing, add it. + Create bool `json:"create,omitempty" yaml:"create,omitempty"` +} + +func (fo *FieldOptions) String() string { + if fo == nil || (fo.Delimiter == "" && !fo.Create) { + return "" + } + return fmt.Sprintf("%s(%d), create=%t", fo.Delimiter, fo.Index, fo.Create) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/replacementfield.go b/vendor/sigs.k8s.io/kustomize/api/types/replacementfield.go new file mode 100644 index 000000000..303e5c9e2 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/replacementfield.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +type ReplacementField struct { + Replacement `json:",inline,omitempty" yaml:",inline,omitempty"` + Path string `json:"path,omitempty" yaml:"path,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/replica.go b/vendor/sigs.k8s.io/kustomize/api/types/replica.go new file mode 100644 index 000000000..8267366b5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/replica.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// Replica specifies a modification to a replica config. +// The number of replicas of a resource whose name matches will be set to count. +// This struct is used by the ReplicaCountTransform, and is meant to supplement +// the existing patch functionality with a simpler syntax for replica configuration. +type Replica struct { + // The name of the resource to change the replica count + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // The number of replicas required. + Count int64 `json:"count" yaml:"count"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/secretargs.go b/vendor/sigs.k8s.io/kustomize/api/types/secretargs.go new file mode 100644 index 000000000..62dbe26a7 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/secretargs.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// SecretArgs contains the metadata of how to generate a secret. +type SecretArgs struct { + // GeneratorArgs for the secret. + GeneratorArgs `json:",inline,omitempty" yaml:",inline,omitempty"` + + // Type of the secret. + // + // This is the same field as the secret type field in v1/Secret: + // It can be "Opaque" (default), or "kubernetes.io/tls". + // + // If type is "kubernetes.io/tls", then "literals" or "files" must have exactly two + // keys: "tls.key" and "tls.crt" + Type string `json:"type,omitempty" yaml:"type,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/selector.go b/vendor/sigs.k8s.io/kustomize/api/types/selector.go new file mode 100644 index 000000000..2c07f0b01 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/selector.go @@ -0,0 +1,124 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "fmt" + "regexp" + + "sigs.k8s.io/kustomize/kyaml/resid" +) + +// Selector specifies a set of resources. +// Any resource that matches intersection of all conditions +// is included in this set. +type Selector struct { + // ResId refers to a GVKN/Ns of a resource. + resid.ResId `json:",inline,omitempty" yaml:",inline,omitempty"` + + // AnnotationSelector is a string that follows the label selection expression + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api + // It matches with the resource annotations. + AnnotationSelector string `json:"annotationSelector,omitempty" yaml:"annotationSelector,omitempty"` + + // LabelSelector is a string that follows the label selection expression + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api + // It matches with the resource labels. + LabelSelector string `json:"labelSelector,omitempty" yaml:"labelSelector,omitempty"` +} + +func (s *Selector) Copy() Selector { + return *s +} + +func (s *Selector) String() string { + return fmt.Sprintf( + "%s:a=%s:l=%s", s.ResId, s.AnnotationSelector, s.LabelSelector) +} + +// SelectorRegex is a Selector with regex in GVK +// Any resource that matches intersection of all conditions +// is included in this set. +type SelectorRegex struct { + selector *Selector + groupRegex *regexp.Regexp + versionRegex *regexp.Regexp + kindRegex *regexp.Regexp + nameRegex *regexp.Regexp + namespaceRegex *regexp.Regexp +} + +// NewSelectorRegex returns a pointer to a new SelectorRegex +// which uses the same condition as s. +func NewSelectorRegex(s *Selector) (*SelectorRegex, error) { + sr := new(SelectorRegex) + var err error + sr.selector = s + sr.groupRegex, err = regexp.Compile(anchorRegex(s.Gvk.Group)) + if err != nil { + return nil, err + } + sr.versionRegex, err = regexp.Compile(anchorRegex(s.Gvk.Version)) + if err != nil { + return nil, err + } + sr.kindRegex, err = regexp.Compile(anchorRegex(s.Gvk.Kind)) + if err != nil { + return nil, err + } + sr.nameRegex, err = regexp.Compile(anchorRegex(s.Name)) + if err != nil { + return nil, err + } + sr.namespaceRegex, err = regexp.Compile(anchorRegex(s.Namespace)) + if err != nil { + return nil, err + } + return sr, nil +} + +func anchorRegex(pattern string) string { + if pattern == "" { + return pattern + } + return "^(?:" + pattern + ")$" +} + +// MatchGvk return true if gvk can be matched by s. +func (s *SelectorRegex) MatchGvk(gvk resid.Gvk) bool { + if len(s.selector.Gvk.Group) > 0 { + if !s.groupRegex.MatchString(gvk.Group) { + return false + } + } + if len(s.selector.Gvk.Version) > 0 { + if !s.versionRegex.MatchString(gvk.Version) { + return false + } + } + if len(s.selector.Gvk.Kind) > 0 { + if !s.kindRegex.MatchString(gvk.Kind) { + return false + } + } + return true +} + +// MatchName returns true if the name in selector is +// empty or the n can be matches by the name in selector +func (s *SelectorRegex) MatchName(n string) bool { + if s.selector.Name == "" { + return true + } + return s.nameRegex.MatchString(n) +} + +// MatchNamespace returns true if the namespace in selector is +// empty or the ns can be matches by the namespace in selector +func (s *SelectorRegex) MatchNamespace(ns string) bool { + if s.selector.Namespace == "" { + return true + } + return s.namespaceRegex.MatchString(ns) +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/sortoptions.go b/vendor/sigs.k8s.io/kustomize/api/types/sortoptions.go new file mode 100644 index 000000000..64dd48808 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/sortoptions.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// SortOptions defines the order that kustomize outputs resources. +type SortOptions struct { + // Order selects the ordering strategy. + Order SortOrder `json:"order,omitempty" yaml:"order,omitempty"` + // LegacySortOptions tweaks the sorting for the "legacy" sort ordering + // strategy. + LegacySortOptions *LegacySortOptions `json:"legacySortOptions,omitempty" yaml:"legacySortOptions,omitempty"` +} + +// SortOrder defines different ordering strategies. +type SortOrder string + +const LegacySortOrder SortOrder = "legacy" +const FIFOSortOrder SortOrder = "fifo" + +// LegacySortOptions define various options for tweaking the "legacy" ordering +// strategy. +type LegacySortOptions struct { + // OrderFirst selects the resource kinds to order first. + OrderFirst []string `json:"orderFirst" yaml:"orderFirst"` + // OrderLast selects the resource kinds to order last. + OrderLast []string `json:"orderLast" yaml:"orderLast"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/typemeta.go b/vendor/sigs.k8s.io/kustomize/api/types/typemeta.go new file mode 100644 index 000000000..0ddafd3d8 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/typemeta.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta +// No need for a direct dependence; the fields are stable. +type TypeMeta struct { + Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kustomize/api/types/var.go b/vendor/sigs.k8s.io/kustomize/api/types/var.go new file mode 100644 index 000000000..0ca5579c0 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/api/types/var.go @@ -0,0 +1,211 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "sigs.k8s.io/kustomize/kyaml/resid" +) + +// Var represents a variable whose value will be sourced +// from a field in a Kubernetes object. +type Var struct { + // Value of identifier name e.g. FOO used in container args, annotations + // Appears in pod template as $(FOO) + Name string `json:"name" yaml:"name"` + + // ObjRef must refer to a Kubernetes resource under the + // purview of this kustomization. ObjRef should use the + // raw name of the object (the name specified in its YAML, + // before addition of a namePrefix and a nameSuffix). + ObjRef Target `json:"objref" yaml:"objref"` + + // FieldRef refers to the field of the object referred to by + // ObjRef whose value will be extracted for use in + // replacing $(FOO). + // If unspecified, this defaults to fieldPath: $defaultFieldPath + FieldRef FieldSelector `json:"fieldref,omitempty" yaml:"fieldref,omitempty"` +} + +// Target refers to a kubernetes object by Group, Version, Kind and Name +// gvk.Gvk contains Group, Version and Kind +// APIVersion is added to keep the backward compatibility of using ObjectReference +// for Var.ObjRef +type Target struct { + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` + resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` + Name string `json:"name" yaml:"name"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` +} + +// GVK returns the Gvk object in Target +func (t *Target) GVK() resid.Gvk { + if t.APIVersion == "" { + return t.Gvk + } + versions := strings.Split(t.APIVersion, "/") + if len(versions) == 2 { + t.Group = versions[0] + t.Version = versions[1] + } + if len(versions) == 1 { + t.Version = versions[0] + } + return t.Gvk +} + +// FieldSelector contains the fieldPath to an object field. +// This struct is added to keep the backward compatibility of using ObjectFieldSelector +// for Var.FieldRef +type FieldSelector struct { + FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"` +} + +// defaulting sets reference to field used by default. +func (v *Var) Defaulting() { + if v.FieldRef.FieldPath == "" { + v.FieldRef.FieldPath = DefaultReplacementFieldPath + } + v.ObjRef.GVK() +} + +// DeepEqual returns true if var a and b are Equals. +// Note 1: The objects are unchanged by the VarEqual +// Note 2: Should be normalize be FieldPath before doing +// the DeepEqual. spec.a[b] is supposed to be the same +// as spec.a.b +func (v Var) DeepEqual(other Var) bool { + v.Defaulting() + other.Defaulting() + return reflect.DeepEqual(v, other) +} + +// VarSet is a set of Vars where no var.Name is repeated. +type VarSet struct { + set map[string]Var +} + +// NewVarSet returns an initialized VarSet +func NewVarSet() VarSet { + return VarSet{set: map[string]Var{}} +} + +// AsSlice returns the vars as a slice. +func (vs *VarSet) AsSlice() []Var { + s := make([]Var, len(vs.set)) + i := 0 + for _, v := range vs.set { + s[i] = v + i++ + } + sort.Sort(byName(s)) + return s +} + +// Copy returns a copy of the var set. +func (vs *VarSet) Copy() VarSet { + newSet := make(map[string]Var, len(vs.set)) + for k, v := range vs.set { + newSet[k] = v + } + return VarSet{set: newSet} +} + +// MergeSet absorbs other vars with error on name collision. +func (vs *VarSet) MergeSet(incoming VarSet) error { + for _, incomingVar := range incoming.set { + if err := vs.Merge(incomingVar); err != nil { + return err + } + } + return nil +} + +// MergeSlice absorbs a Var slice with error on name collision. +// Empty fields in incoming vars are defaulted. +func (vs *VarSet) MergeSlice(incoming []Var) error { + for _, v := range incoming { + if err := vs.Merge(v); err != nil { + return err + } + } + return nil +} + +// Merge absorbs another Var with error on name collision. +// Empty fields in incoming Var is defaulted. +func (vs *VarSet) Merge(v Var) error { + if vs.Contains(v) { + return fmt.Errorf( + "var '%s' already encountered", v.Name) + } + v.Defaulting() + vs.set[v.Name] = v + return nil +} + +// AbsorbSet absorbs other vars with error on (name,value) collision. +func (vs *VarSet) AbsorbSet(incoming VarSet) error { + for _, v := range incoming.set { + if err := vs.Absorb(v); err != nil { + return err + } + } + return nil +} + +// AbsorbSlice absorbs a Var slice with error on (name,value) collision. +// Empty fields in incoming vars are defaulted. +func (vs *VarSet) AbsorbSlice(incoming []Var) error { + for _, v := range incoming { + if err := vs.Absorb(v); err != nil { + return err + } + } + return nil +} + +// Absorb absorbs another Var with error on (name,value) collision. +// Empty fields in incoming Var is defaulted. +func (vs *VarSet) Absorb(v Var) error { + conflicting := vs.Get(v.Name) + if conflicting == nil { + // no conflict. The var is valid. + v.Defaulting() + vs.set[v.Name] = v + return nil + } + + if !reflect.DeepEqual(v, *conflicting) { + // two vars with the same name are pointing at two + // different resources. + return fmt.Errorf( + "var '%s' already encountered", v.Name) + } + return nil +} + +// Contains is true if the set has the other var. +func (vs *VarSet) Contains(other Var) bool { + return vs.Get(other.Name) != nil +} + +// Get returns the var with the given name, else nil. +func (vs *VarSet) Get(name string) *Var { + if v, found := vs.set[name]; found { + return &v + } + return nil +} + +// byName is a sort interface which sorts Vars by name alphabetically +type byName []Var + +func (v byName) Len() int { return len(v) } +func (v byName) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v byName) Less(i, j int) bool { return v[i].Name < v[j].Name } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/LICENSE b/vendor/sigs.k8s.io/kustomize/kyaml/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/comments/comments.go b/vendor/sigs.k8s.io/kustomize/kyaml/comments/comments.go new file mode 100644 index 000000000..97334d0f0 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/comments/comments.go @@ -0,0 +1,83 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package comments + +import ( + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/walk" +) + +// CopyComments recursively copies the comments on fields in from to fields in to +func CopyComments(from, to *yaml.RNode) error { + // from node should not be modified, it should be just used as a reference + fromCopy := from.Copy() + copyFieldComments(fromCopy, to) + // walk the fields copying comments + _, err := walk.Walker{ + Sources: []*yaml.RNode{fromCopy, to}, + Visitor: &copier{}, + VisitKeysAsScalars: true}.Walk() + return err +} + +// copier implements walk.Visitor, and copies comments to fields shared between 2 instances +// of a resource +type copier struct{} + +func (c *copier) VisitMap(s walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { + copyFieldComments(s.Dest(), s.Origin()) + return s.Dest(), nil +} + +func (c *copier) VisitScalar(s walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { + to := s.Origin() + // TODO: File a bug with upstream yaml to handle comments for FoldedStyle scalar nodes + // Hack: convert FoldedStyle scalar node to DoubleQuotedStyle as the line comments are + // being serialized without space + // https://github.com/GoogleContainerTools/kpt/issues/766 + if to != nil && to.Document().Style == yaml.FoldedStyle { + to.Document().Style = yaml.DoubleQuotedStyle + } + + copyFieldComments(s.Dest(), to) + return s.Dest(), nil +} + +func (c *copier) VisitList(s walk.Sources, _ *openapi.ResourceSchema, _ walk.ListKind) ( + *yaml.RNode, error) { + copyFieldComments(s.Dest(), s.Origin()) + destItems := s.Dest().Content() + originItems := s.Origin().Content() + + for i := 0; i < len(destItems) && i < len(originItems); i++ { + dest := destItems[i] + origin := originItems[i] + + if dest.Value == origin.Value { + // We copy the comments recursively on each node in the list. + if err := CopyComments(yaml.NewRNode(dest), yaml.NewRNode(origin)); err != nil { + return nil, err + } + } + } + + return s.Dest(), nil +} + +// copyFieldComments copies the comment from one field to another +func copyFieldComments(from, to *yaml.RNode) { + if from == nil || to == nil { + return + } + if to.Document().LineComment == "" { + to.Document().LineComment = from.Document().LineComment + } + if to.Document().HeadComment == "" { + to.Document().HeadComment = from.Document().HeadComment + } + if to.Document().FootComment == "" { + to.Document().FootComment = from.Document().FootComment + } +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/errors/errors.go b/vendor/sigs.k8s.io/kustomize/kyaml/errors/errors.go new file mode 100644 index 000000000..c292d7c68 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/errors/errors.go @@ -0,0 +1,50 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package errors provides libraries for working with the go-errors/errors library. +package errors + +import ( + "fmt" + + goerrors "github.com/go-errors/errors" +) + +// Wrap returns err wrapped in a go-error. If err is nil, returns nil. +func Wrap(err interface{}) error { + if err == nil { + return nil + } + return goerrors.Wrap(err, 1) +} + +// WrapPrefixf returns err wrapped in a go-error with a message prefix. If err is nil, returns nil. +func WrapPrefixf(err interface{}, msg string, args ...interface{}) error { + if err == nil { + return nil + } + return goerrors.WrapPrefix(err, fmt.Sprintf(msg, args...), 1) +} + +// Errorf returns a new go-error. +func Errorf(msg string, args ...interface{}) error { + return goerrors.Wrap(fmt.Errorf(msg, args...), 1) +} + +// As finds the targeted error in any wrapped error. +func As(err error, target interface{}) bool { + return goerrors.As(err, target) +} + +// Is detects whether the error is equal to a given error. +func Is(err error, target error) bool { + return goerrors.Is(err, target) +} + +// GetStack returns a stack trace for the error if it has one +func GetStack(err error) string { + if e, ok := err.(*goerrors.Error); ok { + return string(e.Stack()) + } + return "" +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go b/vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go new file mode 100644 index 000000000..c946577cc --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package ext + +// IgnoreFileName returns the name for ignore files in +// packages. It can be overridden by tools using this library. +var IgnoreFileName = func() string { + return ".krmignore" +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fieldmeta/fieldmeta.go b/vendor/sigs.k8s.io/kustomize/kyaml/fieldmeta/fieldmeta.go new file mode 100644 index 000000000..c537c3372 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fieldmeta/fieldmeta.go @@ -0,0 +1,275 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package fieldmeta + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// FieldMeta contains metadata that may be attached to fields as comments +type FieldMeta struct { + Schema spec.Schema + + Extensions XKustomize + + SettersSchema *spec.Schema +} + +type XKustomize struct { + SetBy string `yaml:"setBy,omitempty" json:"setBy,omitempty"` + PartialFieldSetters []PartialFieldSetter `yaml:"partialSetters,omitempty" json:"partialSetters,omitempty"` + FieldSetter *PartialFieldSetter `yaml:"setter,omitempty" json:"setter,omitempty"` +} + +// PartialFieldSetter defines how to set part of a field rather than the full field +// value. e.g. the tag part of an image field +type PartialFieldSetter struct { + // Name is the name of this setter. + Name string `yaml:"name" json:"name"` + + // Value is the current value that has been set. + Value string `yaml:"value" json:"value"` +} + +// IsEmpty returns true if the FieldMeta has any empty Schema +func (fm *FieldMeta) IsEmpty() bool { + if fm == nil { + return true + } + return reflect.DeepEqual(fm.Schema, spec.Schema{}) +} + +// Read reads the FieldMeta from a node +func (fm *FieldMeta) Read(n *yaml.RNode) error { + // check for metadata on head and line comments + comments := []string{n.YNode().LineComment, n.YNode().HeadComment} + for _, c := range comments { + if c == "" { + continue + } + c := strings.TrimLeft(c, "#") + + // check for new short hand notation or fall back to openAPI ref format + if !fm.processShortHand(c) { + // if it doesn't Unmarshal that is fine, it means there is no metadata + // other comments are valid, they just don't parse + // TODO: consider more sophisticated parsing techniques similar to what is used + // for go struct tags. + if err := fm.Schema.UnmarshalJSON([]byte(c)); err != nil { + // note: don't return an error if the comment isn't a fieldmeta struct + return nil + } + } + fe := fm.Schema.VendorExtensible.Extensions["x-kustomize"] + if fe == nil { + return nil + } + b, err := json.Marshal(fe) + if err != nil { + return errors.Wrap(err) + } + return json.Unmarshal(b, &fm.Extensions) + } + return nil +} + +// processShortHand parses the comment for short hand ref, loads schema to fm +// and returns true if successful, returns false for any other cases and not throw +// error, as the comment might not be a setter ref +func (fm *FieldMeta) processShortHand(comment string) bool { + input := map[string]string{} + err := json.Unmarshal([]byte(comment), &input) + if err != nil { + return false + } + name := input[shortHandRef] + if name == "" { + return false + } + + // check if setter with the name exists, else check for a substitution + // setter and substitution can't have same name in shorthand + + setterRef, err := spec.NewRef(DefinitionsPrefix + SetterDefinitionPrefix + name) + if err != nil { + return false + } + + setterRefBytes, err := setterRef.MarshalJSON() + if err != nil { + return false + } + + if _, err := openapi.Resolve(&setterRef, fm.SettersSchema); err == nil { + setterErr := fm.Schema.UnmarshalJSON(setterRefBytes) + return setterErr == nil + } + + substRef, err := spec.NewRef(DefinitionsPrefix + SubstitutionDefinitionPrefix + name) + if err != nil { + return false + } + + substRefBytes, err := substRef.MarshalJSON() + if err != nil { + return false + } + + if _, err := openapi.Resolve(&substRef, fm.SettersSchema); err == nil { + substErr := fm.Schema.UnmarshalJSON(substRefBytes) + return substErr == nil + } + return false +} + +func isExtensionEmpty(x XKustomize) bool { + if x.FieldSetter != nil { + return false + } + if x.SetBy != "" { + return false + } + if len(x.PartialFieldSetters) > 0 { + return false + } + return true +} + +// Write writes the FieldMeta to a node +func (fm *FieldMeta) Write(n *yaml.RNode) error { + if !isExtensionEmpty(fm.Extensions) { + return fm.WriteV1Setters(n) + } + + // Ref is removed when a setter is deleted, so the Ref string could be empty. + if fm.Schema.Ref.String() != "" { + // Ex: {"$ref":"#/definitions/io.k8s.cli.setters.replicas"} should be converted to + // {"$openAPI":"replicas"} and added to the line comment + ref := fm.Schema.Ref.String() + var shortHandRefValue string + switch { + case strings.HasPrefix(ref, DefinitionsPrefix+SetterDefinitionPrefix): + shortHandRefValue = strings.TrimPrefix(ref, DefinitionsPrefix+SetterDefinitionPrefix) + case strings.HasPrefix(ref, DefinitionsPrefix+SubstitutionDefinitionPrefix): + shortHandRefValue = strings.TrimPrefix(ref, DefinitionsPrefix+SubstitutionDefinitionPrefix) + default: + return fmt.Errorf("unexpected ref format: %s", ref) + } + n.YNode().LineComment = fmt.Sprintf(`{"%s":"%s"}`, shortHandRef, + shortHandRefValue) + } else { + n.YNode().LineComment = "" + } + + return nil +} + +// WriteV1Setters is the v1 setters way of writing setter definitions +// TODO: pmarupaka - remove this method after migration +func (fm *FieldMeta) WriteV1Setters(n *yaml.RNode) error { + fm.Schema.VendorExtensible.AddExtension("x-kustomize", fm.Extensions) + b, err := json.Marshal(fm.Schema) + if err != nil { + return errors.Wrap(err) + } + n.YNode().LineComment = string(b) + return nil +} + +// FieldValueType defines the type of input to register +type FieldValueType string + +const ( + // String defines a string flag + String FieldValueType = "string" + // Bool defines a bool flag + Bool = "boolean" + // Int defines an int flag + Int = "integer" +) + +func (it FieldValueType) String() string { + if it == "" { + return "string" + } + return string(it) +} + +func (it FieldValueType) Validate(value string) error { + switch it { + case Int: + if _, err := strconv.Atoi(value); err != nil { + return errors.WrapPrefixf(err, "value must be an int") + } + case Bool: + if _, err := strconv.ParseBool(value); err != nil { + return errors.WrapPrefixf(err, "value must be a bool") + } + } + return nil +} + +func (it FieldValueType) Tag() string { + switch it { + case String: + return yaml.NodeTagString + case Bool: + return yaml.NodeTagBool + case Int: + return yaml.NodeTagInt + } + return "" +} + +func (it FieldValueType) TagForValue(value string) string { + switch it { + case String: + return yaml.NodeTagString + case Bool: + if _, err := strconv.ParseBool(string(it)); err != nil { + return "" + } + return yaml.NodeTagBool + case Int: + if _, err := strconv.ParseInt(string(it), 0, 32); err != nil { + return "" + } + return yaml.NodeTagInt + } + return "" +} + +const ( + // CLIDefinitionsPrefix is the prefix for cli definition keys. + CLIDefinitionsPrefix = "io.k8s.cli." + + // SetterDefinitionPrefix is the prefix for setter definition keys. + SetterDefinitionPrefix = CLIDefinitionsPrefix + "setters." + + // SubstitutionDefinitionPrefix is the prefix for substitution definition keys. + SubstitutionDefinitionPrefix = CLIDefinitionsPrefix + "substitutions." + + // DefinitionsPrefix is the prefix used to reference definitions in the OpenAPI + DefinitionsPrefix = "#/definitions/" +) + +// shortHandRef is the shorthand reference to setters and substitutions +var shortHandRef = "$openapi" + +func SetShortHandRef(ref string) { + shortHandRef = ref +} + +func ShortHandRef() string { + return shortHandRef +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/confirmeddir.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/confirmeddir.go new file mode 100644 index 000000000..ea6eff90b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/confirmeddir.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "os" + "path/filepath" + "strings" +) + +// ConfirmedDir is a clean, absolute, delinkified path +// that was confirmed to point to an existing directory. +type ConfirmedDir string + +// NewTmpConfirmedDir returns a temporary dir, else error. +// The directory is cleaned, no symlinks, etc. so it's +// returned as a ConfirmedDir. +func NewTmpConfirmedDir() (ConfirmedDir, error) { + n, err := os.MkdirTemp("", "kustomize-") + if err != nil { + return "", err + } + + // In MacOs `os.MkdirTemp` creates a directory + // with root in the `/var` folder, which is in turn + // a symlinked path to `/private/var`. + // Function `filepath.EvalSymlinks`is used to + // resolve the real absolute path. + deLinked, err := filepath.EvalSymlinks(n) + return ConfirmedDir(deLinked), err +} + +// HasPrefix returns true if the directory argument +// is a prefix of self (d) from the point of view of +// a file system. +// +// I.e., it's true if the argument equals or contains +// self (d) in a file path sense. +// +// HasPrefix emulates the semantics of strings.HasPrefix +// such that the following are true: +// +// strings.HasPrefix("foobar", "foobar") +// strings.HasPrefix("foobar", "foo") +// strings.HasPrefix("foobar", "") +// +// d := fSys.ConfirmDir("/foo/bar") +// d.HasPrefix("/foo/bar") +// d.HasPrefix("/foo") +// d.HasPrefix("/") +// +// Not contacting a file system here to check for +// actual path existence. +// +// This is tested on linux, but will have trouble +// on other operating systems. +// TODO(monopole) Refactor when #golang/go/18358 closes. +// See also: +// https://github.com/golang/go/issues/18358 +// https://github.com/golang/dep/issues/296 +// https://github.com/golang/dep/blob/master/internal/fs/fs.go#L33 +// https://codereview.appspot.com/5712045 +func (d ConfirmedDir) HasPrefix(path ConfirmedDir) bool { + if path.String() == string(filepath.Separator) || path == d { + return true + } + return strings.HasPrefix( + string(d), + string(path)+string(filepath.Separator)) +} + +func (d ConfirmedDir) Join(path string) string { + return filepath.Join(string(d), path) +} + +func (d ConfirmedDir) String() string { + return string(d) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/doc.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/doc.go new file mode 100644 index 000000000..bd3963441 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package filesys provides a file system abstraction, +// a subset of that provided by golang.org/pkg/os, +// with an on-disk and in-memory representation. +package filesys diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/file.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/file.go new file mode 100644 index 000000000..5044c653e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/file.go @@ -0,0 +1,15 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "io" + "os" +) + +// File groups the basic os.File methods. +type File interface { + io.ReadWriteCloser + Stat() (os.FileInfo, error) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileinfo.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileinfo.go new file mode 100644 index 000000000..57646d244 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileinfo.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "os" + "time" +) + +var _ os.FileInfo = fileInfo{} + +// fileInfo implements os.FileInfo for a fileInMemory instance. +type fileInfo struct { + node *fsNode +} + +// Name returns the name of the file +func (fi fileInfo) Name() string { return fi.node.Name() } + +// Size returns the size of the file +func (fi fileInfo) Size() int64 { return fi.node.Size() } + +// Mode returns the file mode +func (fi fileInfo) Mode() os.FileMode { return 0777 } + +// ModTime returns a bogus time +func (fi fileInfo) ModTime() time.Time { return time.Time{} } + +// IsDir returns true if it is a directory +func (fi fileInfo) IsDir() bool { return fi.node.isNodeADir() } + +// Sys should return underlying data source, but it now returns nil +func (fi fileInfo) Sys() interface{} { return nil } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileondisk.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileondisk.go new file mode 100644 index 000000000..8ed92d90e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileondisk.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "os" +) + +var _ File = &fileOnDisk{} + +// fileOnDisk implements File using the local filesystem. +type fileOnDisk struct { + file *os.File +} + +// Close closes a file. +func (f *fileOnDisk) Close() error { return f.file.Close() } + +// Read reads a file's content. +func (f *fileOnDisk) Read(p []byte) (n int, err error) { return f.file.Read(p) } + +// Write writes bytes to a file +func (f *fileOnDisk) Write(p []byte) (n int, err error) { return f.file.Write(p) } + +// Stat returns an interface which has all the information regarding the file. +func (f *fileOnDisk) Stat() (os.FileInfo, error) { return f.file.Stat() } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go new file mode 100644 index 000000000..79dfc53bf --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "fmt" + "path/filepath" + + "sigs.k8s.io/kustomize/kyaml/errors" +) + +const ( + Separator = string(filepath.Separator) + SelfDir = "." + ParentDir = ".." +) + +// FileSystem groups basic os filesystem methods. +// It's supposed be functional subset of https://golang.org/pkg/os +type FileSystem interface { + + // Create a file. + Create(path string) (File, error) + + // MkDir makes a directory. + Mkdir(path string) error + + // MkDirAll makes a directory path, creating intervening directories. + MkdirAll(path string) error + + // RemoveAll removes path and any children it contains. + RemoveAll(path string) error + + // Open opens the named file for reading. + Open(path string) (File, error) + + // IsDir returns true if the path is a directory. + IsDir(path string) bool + + // ReadDir returns a list of files and directories within a directory. + ReadDir(path string) ([]string, error) + + // CleanedAbs converts the given path into a + // directory and a file name, where the directory + // is represented as a ConfirmedDir and all that implies. + // If the entire path is a directory, the file component + // is an empty string. + CleanedAbs(path string) (ConfirmedDir, string, error) + + // Exists is true if the path exists in the file system. + Exists(path string) bool + + // Glob returns the list of matching files, + // emulating https://golang.org/pkg/path/filepath/#Glob + Glob(pattern string) ([]string, error) + + // ReadFile returns the contents of the file at the given path. + ReadFile(path string) ([]byte, error) + + // WriteFile writes the data to a file at the given path, + // overwriting anything that's already there. + WriteFile(path string, data []byte) error + + // Walk walks the file system with the given WalkFunc. + Walk(path string, walkFn filepath.WalkFunc) error +} + +// ConfirmDir returns an error if the user-specified path is not an existing directory on fSys. +// Otherwise, ConfirmDir returns path, which can be relative, as a ConfirmedDir and all that implies. +func ConfirmDir(fSys FileSystem, path string) (ConfirmedDir, error) { + if path == "" { + return "", errors.Errorf("directory path cannot be empty") + } + + d, f, err := fSys.CleanedAbs(path) + if err != nil { + return "", errors.WrapPrefixf(err, "not a valid directory") + } + if f != "" { + return "", errors.WrapPrefixf(errors.Errorf("file is not directory"), fmt.Sprintf("'%s'", path)) + } + return d, nil +} + +// FileSystemOrOnDisk satisfies the FileSystem interface by forwarding +// all of its method calls to the given FileSystem whenever it's not nil. +// If it's nil, the call is forwarded to the OS's underlying file system. +type FileSystemOrOnDisk struct { + FileSystem FileSystem +} + +// Set sets the given FileSystem as the target for all the FileSystem method calls. +func (fs *FileSystemOrOnDisk) Set(f FileSystem) { fs.FileSystem = f } + +func (fs FileSystemOrOnDisk) fs() FileSystem { + if fs.FileSystem != nil { + return fs.FileSystem + } + return MakeFsOnDisk() +} + +func (fs FileSystemOrOnDisk) Create(path string) (File, error) { + return fs.fs().Create(path) +} + +func (fs FileSystemOrOnDisk) Mkdir(path string) error { + return fs.fs().Mkdir(path) +} + +func (fs FileSystemOrOnDisk) MkdirAll(path string) error { + return fs.fs().MkdirAll(path) +} + +func (fs FileSystemOrOnDisk) RemoveAll(path string) error { + return fs.fs().RemoveAll(path) +} + +func (fs FileSystemOrOnDisk) Open(path string) (File, error) { + return fs.fs().Open(path) +} + +func (fs FileSystemOrOnDisk) IsDir(path string) bool { + return fs.fs().IsDir(path) +} + +func (fs FileSystemOrOnDisk) ReadDir(path string) ([]string, error) { + return fs.fs().ReadDir(path) +} + +func (fs FileSystemOrOnDisk) CleanedAbs(path string) (ConfirmedDir, string, error) { + return fs.fs().CleanedAbs(path) +} + +func (fs FileSystemOrOnDisk) Exists(path string) bool { + return fs.fs().Exists(path) +} + +func (fs FileSystemOrOnDisk) Glob(pattern string) ([]string, error) { + return fs.fs().Glob(pattern) +} + +func (fs FileSystemOrOnDisk) ReadFile(path string) ([]byte, error) { + return fs.fs().ReadFile(path) +} + +func (fs FileSystemOrOnDisk) WriteFile(path string, data []byte) error { + return fs.fs().WriteFile(path, data) +} + +func (fs FileSystemOrOnDisk) Walk(path string, walkFn filepath.WalkFunc) error { + return fs.fs().Walk(path, walkFn) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsnode.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsnode.go new file mode 100644 index 000000000..608b8e38a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsnode.go @@ -0,0 +1,647 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + "sigs.k8s.io/kustomize/kyaml/errors" +) + +var _ File = &fsNode{} +var _ FileSystem = &fsNode{} + +// fsNode is either a file or a directory. +type fsNode struct { + // What node owns me? + parent *fsNode + + // Value to return as the Name() when the + // parent is nil. + nilParentName string + + // A directory mapping names to nodes. + // If dir is nil, then self node is a file. + // If dir is non-nil, then self node is a directory, + // albeit possibly an empty directory. + dir map[string]*fsNode + + // if this node is a file, this is the content. + content []byte + + // if offset is not nil the file is open and it tracks + // the current file offset. + offset *int +} + +// MakeEmptyDirInMemory returns an empty directory. +// The paths of nodes in this object will never +// report a leading Separator, meaning they +// aren't "absolute" in the sense defined by +// https://golang.org/pkg/path/filepath/#IsAbs. +func MakeEmptyDirInMemory() *fsNode { + return &fsNode{ + dir: make(map[string]*fsNode), + } +} + +// MakeFsInMemory returns an empty 'file system'. +// The paths of nodes in this object will always +// report a leading Separator, meaning they +// are "absolute" in the sense defined by +// https://golang.org/pkg/path/filepath/#IsAbs. +// This is a relevant difference when using Walk, +// Glob, Match, etc. +func MakeFsInMemory() FileSystem { + return &fsNode{ + nilParentName: Separator, + dir: make(map[string]*fsNode), + } +} + +// Name returns the name of the node. +func (n *fsNode) Name() string { + if n.parent == nil { + // Unable to lookup name in parent. + return n.nilParentName + } + if !n.parent.isNodeADir() { + log.Fatal("parent not a dir") + } + for key, value := range n.parent.dir { + if value == n { + return key + } + } + log.Fatal("unable to find fsNode name") + return "" +} + +// Path returns the full path to the node. +func (n *fsNode) Path() string { + if n.parent == nil { + return n.nilParentName + } + if !n.parent.isNodeADir() { + log.Fatal("parent not a dir, structural error") + } + return filepath.Join(n.parent.Path(), n.Name()) +} + +// mySplit trims trailing separators from the directory +// result of filepath.Split. +func mySplit(s string) (string, string) { + dName, fName := filepath.Split(s) + return StripTrailingSeps(dName), fName +} + +func (n *fsNode) addFile(name string, c []byte) (result *fsNode, err error) { + parent := n + dName, fileName := mySplit(name) + if dName != "" { + parent, err = parent.addDir(dName) + if err != nil { + return nil, err + } + } + if !isLegalFileNameForCreation(fileName) { + return nil, fmt.Errorf( + "illegal name '%s' in file creation", fileName) + } + result, ok := parent.dir[fileName] + if ok { + // File already exists; overwrite it. + if result.offset != nil { + return nil, fmt.Errorf("cannot add already opened file '%s'", n.Path()) + } + result.content = append(result.content[:0], c...) + return result, nil + } + result = &fsNode{ + content: append([]byte(nil), c...), + parent: parent, + } + parent.dir[fileName] = result + return result, nil +} + +// Create implements FileSystem. +// Create makes an empty file. +func (n *fsNode) Create(path string) (result File, err error) { + f, err := n.AddFile(path, nil) + if err != nil { + return f, err + } + f.offset = new(int) + return f, nil +} + +// WriteFile implements FileSystem. +func (n *fsNode) WriteFile(path string, d []byte) error { + _, err := n.AddFile(path, d) + return err +} + +// AddFile adds a file and any necessary containing +// directories to the node. +func (n *fsNode) AddFile( + name string, c []byte) (result *fsNode, err error) { + if n.dir == nil { + return nil, fmt.Errorf( + "cannot add a file to a non-directory '%s'", n.Name()) + } + return n.addFile(cleanQueryPath(name), c) +} + +func (n *fsNode) addDir(path string) (result *fsNode, err error) { + parent := n + dName, subDirName := mySplit(path) + if dName != "" { + parent, err = n.addDir(dName) + if err != nil { + return nil, err + } + } + switch subDirName { + case "", SelfDir: + return n, nil + case ParentDir: + if n.parent == nil { + return nil, fmt.Errorf( + "cannot add a directory above '%s'", n.Path()) + } + return n.parent, nil + default: + if !isLegalFileNameForCreation(subDirName) { + return nil, fmt.Errorf( + "illegal name '%s' in directory creation", subDirName) + } + result, ok := parent.dir[subDirName] + if ok { + if result.isNodeADir() { + // it's already there. + return result, nil + } + return nil, fmt.Errorf( + "cannot make dir '%s'; a file of that name already exists in '%s'", + subDirName, parent.Name()) + } + result = &fsNode{ + dir: make(map[string]*fsNode), + parent: parent, + } + parent.dir[subDirName] = result + return result, nil + } +} + +// Mkdir implements FileSystem. +// Mkdir creates a directory. +func (n *fsNode) Mkdir(path string) error { + _, err := n.AddDir(path) + return err +} + +// MkdirAll implements FileSystem. +// MkdirAll creates a directory. +func (n *fsNode) MkdirAll(path string) error { + _, err := n.AddDir(path) + return err +} + +// AddDir adds a directory to the node, not complaining +// if it is already there. +func (n *fsNode) AddDir(path string) (result *fsNode, err error) { + if n.dir == nil { + return nil, fmt.Errorf( + "cannot add a directory to file node '%s'", n.Name()) + } + return n.addDir(cleanQueryPath(path)) +} + +// CleanedAbs implements FileSystem. +func (n *fsNode) CleanedAbs(path string) (ConfirmedDir, string, error) { + node, err := n.Find(path) + if err != nil { + return "", "", errors.WrapPrefixf(err, "unable to clean") + } + if node == nil { + return "", "", notExistError(path) + } + if node.isNodeADir() { + return ConfirmedDir(node.Path()), "", nil + } + return ConfirmedDir(node.parent.Path()), node.Name(), nil +} + +// Exists implements FileSystem. +// Exists returns true if the path exists. +func (n *fsNode) Exists(path string) bool { + if !n.isNodeADir() { + return n.Name() == path + } + result, err := n.Find(path) + if err != nil { + return false + } + return result != nil +} + +func cleanQueryPath(path string) string { + // Always ignore leading separator? + // Remember that filepath.Clean returns "." if + // given an empty string argument. + return filepath.Clean(StripLeadingSeps(path)) +} + +// Find finds the given node, else nil if not found. +// Return error on structural/argument errors. +func (n *fsNode) Find(path string) (*fsNode, error) { + if !n.isNodeADir() { + return nil, fmt.Errorf("can only find inside a dir") + } + if path == "" { + // Special case; check *before* cleaning and *before* + // comparison to nilParentName. + return nil, nil + } + if (n.parent == nil && path == n.nilParentName) || path == SelfDir { + // Special case + return n, nil + } + return n.findIt(cleanQueryPath(path)) +} + +func (n *fsNode) findIt(path string) (result *fsNode, err error) { + parent := n + dName, item := mySplit(path) + if dName != "" { + parent, err = n.findIt(dName) + if err != nil { + return nil, err + } + if parent == nil { + // all done, target doesn't exist. + return nil, nil + } + } + if !parent.isNodeADir() { + return nil, fmt.Errorf("'%s' is not a directory", parent.Path()) + } + return parent.dir[item], nil +} + +// RemoveAll implements FileSystem. +// RemoveAll removes an item and everything it contains. +func (n *fsNode) RemoveAll(path string) error { + result, err := n.Find(path) + if err != nil { + return err + } + if result == nil { + // If the path doesn't exist, no need to remove anything. + return nil + } + return result.Remove() +} + +// Remove drop the node, and everything it contains, from its parent. +func (n *fsNode) Remove() error { + if n.parent == nil { + return fmt.Errorf("cannot remove a root node") + } + if !n.parent.isNodeADir() { + log.Fatal("parent not a dir") + } + for key, value := range n.parent.dir { + if value == n { + delete(n.parent.dir, key) + return nil + } + } + log.Fatal("unable to find self in parent") + return nil +} + +// isNodeADir returns true if the node is a directory. +// Cannot collide with the poorly named "IsDir". +func (n *fsNode) isNodeADir() bool { + return n.dir != nil +} + +// IsDir implements FileSystem. +// IsDir returns true if the argument resolves +// to a directory rooted at the node. +func (n *fsNode) IsDir(path string) bool { + result, err := n.Find(path) + if err != nil || result == nil { + return false + } + return result.isNodeADir() +} + +// ReadDir implements FileSystem. +func (n *fsNode) ReadDir(path string) ([]string, error) { + if !n.Exists(path) { + return nil, notExistError(path) + } + if !n.IsDir(path) { + return nil, fmt.Errorf("%s is not a directory", path) + } + + dir, err := n.Find(path) + if err != nil { + return nil, err + } + if dir == nil { + return nil, fmt.Errorf("could not find directory %s", path) + } + + keys := make([]string, len(dir.dir)) + i := 0 + for k := range dir.dir { + keys[i] = k + i++ + } + return keys, nil +} + +// Size returns the size of the node. +func (n *fsNode) Size() int64 { + if n.isNodeADir() { + return int64(len(n.dir)) + } + return int64(len(n.content)) +} + +// Open implements FileSystem. +// Open opens the node in read-write mode and sets the offset its start. +// Writing right after opening the file will replace the original content +// and move the offset forward, as with a file opened with O_RDWR | O_CREATE. +// +// As an example, let's consider a file with content "content": +// - open: sets offset to start, content is "content" +// - write "@": offset increases by one, the content is now "@ontent" +// - read the rest: since offset is 1, the read operation returns "ontent" +// - write "$": offset is at EOF, so "$" is appended and content is now "@ontent$" +// - read the rest: returns 0 bytes and EOF +// - close: the content is still "@ontent$" +func (n *fsNode) Open(path string) (File, error) { + result, err := n.Find(path) + if err != nil { + return nil, err + } + if result == nil { + return nil, notExistError(path) + } + if result.offset != nil { + return nil, fmt.Errorf("cannot open previously opened file '%s'", path) + } + result.offset = new(int) + return result, nil +} + +// Close marks the node closed. +func (n *fsNode) Close() error { + if n.offset == nil { + return fmt.Errorf("cannot close already closed file '%s'", n.Path()) + } + n.offset = nil + return nil +} + +// ReadFile implements FileSystem. +func (n *fsNode) ReadFile(path string) (c []byte, err error) { + result, err := n.Find(path) + if err != nil { + return nil, err + } + if result == nil { + return nil, notExistError(path) + } + if result.isNodeADir() { + return nil, fmt.Errorf("cannot read content from non-file '%s'", n.Path()) + } + c = make([]byte, len(result.content)) + copy(c, result.content) + return c, nil +} + +// Read returns the content of the file node. +func (n *fsNode) Read(d []byte) (c int, err error) { + if n.isNodeADir() { + return 0, fmt.Errorf( + "cannot read content from non-file '%s'", n.Path()) + } + if n.offset == nil { + return 0, fmt.Errorf("cannot read from closed file '%s'", n.Path()) + } + + rest := n.content[*n.offset:] + if len(d) < len(rest) { + rest = rest[:len(d)] + } else { + err = io.EOF + } + copy(d, rest) + *n.offset += len(rest) + return len(rest), err +} + +// Write saves the contents of the argument to the file node. +func (n *fsNode) Write(p []byte) (c int, err error) { + if n.isNodeADir() { + return 0, fmt.Errorf( + "cannot write content to non-file '%s'", n.Path()) + } + if n.offset == nil { + return 0, fmt.Errorf("cannot write to closed file '%s'", n.Path()) + } + n.content = append(n.content[:*n.offset], p...) + *n.offset = len(n.content) + return len(p), nil +} + +// ContentMatches returns true if v matches fake file's content. +func (n *fsNode) ContentMatches(v []byte) bool { + return bytes.Equal(v, n.content) +} + +// GetContent the content of a fake file. +func (n *fsNode) GetContent() []byte { + return n.content +} + +// Stat returns an instance of FileInfo. +func (n *fsNode) Stat() (os.FileInfo, error) { + return fileInfo{node: n}, nil +} + +// Walk implements FileSystem. +func (n *fsNode) Walk(path string, walkFn filepath.WalkFunc) error { + result, err := n.Find(path) + if err != nil { + return err + } + if result == nil { + return notExistError(path) + } + return result.WalkMe(walkFn) +} + +// Walk runs the given walkFn on each node. +func (n *fsNode) WalkMe(walkFn filepath.WalkFunc) error { + fi, err := n.Stat() + // always visit self first + err = walkFn(n.Path(), fi, err) + if !n.isNodeADir() { + // it's a file, so nothing more to do + return err + } + // process self as a directory + if err == filepath.SkipDir { + return nil + } + // Walk is supposed to visit in lexical order. + for _, k := range n.sortedDirEntries() { + if err := n.dir[k].WalkMe(walkFn); err != nil { + if err == filepath.SkipDir { + // stop processing this directory + break + } + // bail out completely + return err + } + } + return nil +} + +func (n *fsNode) sortedDirEntries() []string { + keys := make([]string, len(n.dir)) + i := 0 + for k := range n.dir { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +// FileCount returns a count of files. +// Directories, empty or otherwise, not counted. +func (n *fsNode) FileCount() int { + count := 0 + n.WalkMe(func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + count++ + } + return nil + }) + return count +} + +func (n *fsNode) DebugPrint() { + n.WalkMe(func(path string, info os.FileInfo, err error) error { + if err != nil { + fmt.Printf("err '%v' at path %q\n", err, path) + return nil + } + if info.IsDir() { + if info.Size() == 0 { + fmt.Println("empty dir: " + path) + } + } else { + fmt.Println(" file: " + path) + } + return nil + }) +} + +var legalFileNamePattern = regexp.MustCompile("^[a-zA-Z0-9-_.:]+$") + +// This rules enforced here should be simpler and tighter +// than what's allowed on a real OS. +// Should be fine for testing or in-memory purposes. +func isLegalFileNameForCreation(n string) bool { + if n == "" || n == SelfDir || !legalFileNamePattern.MatchString(n) { + return false + } + return !strings.Contains(n, ParentDir) +} + +// RegExpGlob returns a list of file paths matching the regexp. +// Excludes directories. +func (n *fsNode) RegExpGlob(pattern string) ([]string, error) { + var result []string + var expression = regexp.MustCompile(pattern) + err := n.WalkMe(func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + if expression.MatchString(path) { + result = append(result, path) + } + } + return nil + }) + if err != nil { + return nil, err + } + sort.Strings(result) + return result, nil +} + +// Glob implements FileSystem. +// Glob returns the list of file paths matching +// per filepath.Match semantics, i.e. unlike RegExpGlob, +// Match("foo/a*") will not match sub-sub directories of foo. +// This is how /bin/ls behaves. +func (n *fsNode) Glob(pattern string) ([]string, error) { + var result []string + var allFiles []string + err := n.WalkMe(func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + match, err := filepath.Match(pattern, path) + if err != nil { + return err + } + if match { + allFiles = append(allFiles, path) + } + } + return nil + }) + if err != nil { + return nil, err + } + if IsHiddenFilePath(pattern) { + result = allFiles + } else { + result = RemoveHiddenFiles(allFiles) + } + sort.Strings(result) + return result, nil +} + +// notExistError indicates that a file or directory does not exist. +// Unwrapping returns os.ErrNotExist so errors.Is(err, os.ErrNotExist) works correctly. +type notExistError string + +func (err notExistError) Error() string { return fmt.Sprintf("'%s' doesn't exist", string(err)) } +func (err notExistError) Unwrap() error { return os.ErrNotExist } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk.go new file mode 100644 index 000000000..10ce83f12 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk.go @@ -0,0 +1,141 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "fmt" + "log" + "os" + "path/filepath" + + "sigs.k8s.io/kustomize/kyaml/errors" +) + +var _ FileSystem = fsOnDisk{} + +// fsOnDisk implements FileSystem using the local filesystem. +type fsOnDisk struct{} + +// MakeFsOnDisk makes an instance of fsOnDisk. +func MakeFsOnDisk() FileSystem { + return fsOnDisk{} +} + +// Create delegates to os.Create. +func (fsOnDisk) Create(name string) (File, error) { return os.Create(name) } + +// Mkdir delegates to os.Mkdir. +func (fsOnDisk) Mkdir(name string) error { + return os.Mkdir(name, 0777|os.ModeDir) +} + +// MkdirAll delegates to os.MkdirAll. +func (fsOnDisk) MkdirAll(name string) error { + return os.MkdirAll(name, 0777|os.ModeDir) +} + +// RemoveAll delegates to os.RemoveAll. +func (fsOnDisk) RemoveAll(name string) error { + return os.RemoveAll(name) +} + +// Open delegates to os.Open. +func (fsOnDisk) Open(name string) (File, error) { return os.Open(name) } + +// CleanedAbs converts the given path into a +// directory and a file name, where the directory +// is represented as a ConfirmedDir and all that implies. +// If the entire path is a directory, the file component +// is an empty string. +func (x fsOnDisk) CleanedAbs( + path string) (ConfirmedDir, string, error) { + absRoot, err := filepath.Abs(path) + if err != nil { + return "", "", fmt.Errorf( + "abs path error on '%s' : %v", path, err) + } + deLinked, err := filepath.EvalSymlinks(absRoot) + if err != nil { + return "", "", fmt.Errorf( + "evalsymlink failure on '%s' : %w", path, err) + } + if x.IsDir(deLinked) { + return ConfirmedDir(deLinked), "", nil + } + d := filepath.Dir(deLinked) + if !x.IsDir(d) { + // Programmer/assumption error. + log.Fatalf("first part of '%s' not a directory", deLinked) + } + if d == deLinked { + // Programmer/assumption error. + log.Fatalf("d '%s' should be a subset of deLinked", d) + } + f := filepath.Base(deLinked) + if filepath.Join(d, f) != deLinked { + // Programmer/assumption error. + log.Fatalf("these should be equal: '%s', '%s'", + filepath.Join(d, f), deLinked) + } + return ConfirmedDir(d), f, nil +} + +// Exists returns true if os.Stat succeeds. +func (fsOnDisk) Exists(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +// Glob returns the list of matching files +func (fsOnDisk) Glob(pattern string) ([]string, error) { + var result []string + allFilePaths, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if IsHiddenFilePath(pattern) { + result = allFilePaths + } else { + result = RemoveHiddenFiles(allFilePaths) + } + return result, nil +} + +// IsDir delegates to os.Stat and FileInfo.IsDir +func (fsOnDisk) IsDir(name string) bool { + info, err := os.Stat(name) + if err != nil { + return false + } + return info.IsDir() +} + +// ReadDir delegates to os.ReadDir +func (fsOnDisk) ReadDir(name string) ([]string, error) { + dirEntries, err := os.ReadDir(name) + if err != nil { + return nil, err + } + result := make([]string, len(dirEntries)) + for i := range dirEntries { + result[i] = dirEntries[i].Name() + } + return result, nil +} + +// ReadFile delegates to os.ReadFile. +func (fsOnDisk) ReadFile(name string) ([]byte, error) { + content, err := os.ReadFile(name) + return content, errors.Wrap(err) +} + +// WriteFile delegates to os.WriteFile with read/write permissions. +func (fsOnDisk) WriteFile(name string, c []byte) error { + return errors.Wrap(os.WriteFile(name, c, 0666)) //nolint:gosec +} + +// Walk delegates to filepath.Walk. +func (fsOnDisk) Walk(path string, walkFn filepath.WalkFunc) error { + return filepath.Walk(path, walkFn) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_unix.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_unix.go new file mode 100644 index 000000000..15935a462 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_unix.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +//go:build !windows +// +build !windows + +package filesys + +import ( + "path/filepath" +) + +func getOSRoot() (string, error) { + return string(filepath.Separator), nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_windows.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_windows.go new file mode 100644 index 000000000..8c8a33c4d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_windows.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "path/filepath" + + "golang.org/x/sys/windows" +) + +func getOSRoot() (string, error) { + sysDir, err := windows.GetSystemDirectory() + if err != nil { + return "", err + } + return filepath.VolumeName(sysDir) + `\`, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/filesys/util.go b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/util.go new file mode 100644 index 000000000..0713ffe2c --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/filesys/util.go @@ -0,0 +1,143 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filesys + +import ( + "os" + "path/filepath" + "strings" +) + +// RootedPath returns a rooted path, e.g. "/foo/bar" as +// opposed to "foo/bar". +func RootedPath(elem ...string) string { + return Separator + filepath.Join(elem...) +} + +// StripTrailingSeps trims trailing filepath separators from input. +func StripTrailingSeps(s string) string { + k := len(s) + for k > 0 && s[k-1] == filepath.Separator { + k-- + } + return s[:k] +} + +// StripLeadingSeps trims leading filepath separators from input. +func StripLeadingSeps(s string) string { + k := 0 + for k < len(s) && s[k] == filepath.Separator { + k++ + } + return s[k:] +} + +// PathSplit converts a file path to a slice of string. +// If the path is absolute (if the path has a leading slash), +// then the first entry in the result is an empty string. +// Desired: path == PathJoin(PathSplit(path)) +func PathSplit(incoming string) []string { + if incoming == "" { + return []string{} + } + dir, path := filepath.Split(incoming) + if dir == string(os.PathSeparator) { + if path == "" { + return []string{""} + } + return []string{"", path} + } + dir = strings.TrimSuffix(dir, string(os.PathSeparator)) + if dir == "" { + return []string{path} + } + return append(PathSplit(dir), path) +} + +// PathJoin converts a slice of string to a file path. +// If the first entry is an empty string, then the returned +// path is absolute (it has a leading slash). +// Desired: path == PathJoin(PathSplit(path)) +func PathJoin(incoming []string) string { + if len(incoming) == 0 { + return "" + } + if incoming[0] == "" { + return string(os.PathSeparator) + filepath.Join(incoming[1:]...) + } + return filepath.Join(incoming...) +} + +// InsertPathPart inserts 'part' at position 'pos' in the given filepath. +// The first position is 0. +// +// E.g. if part == 'PEACH' +// +// OLD : NEW : POS +// -------------------------------------------------------- +// {empty} : PEACH : irrelevant +// / : /PEACH : irrelevant +// pie : PEACH/pie : 0 (or negative) +// /pie : /PEACH/pie : 0 (or negative) +// raw : raw/PEACH : 1 (or larger) +// /raw : /raw/PEACH : 1 (or larger) +// a/nice/warm/pie : a/nice/warm/PEACH/pie : 3 +// /a/nice/warm/pie : /a/nice/warm/PEACH/pie : 3 +// +// * An empty part results in no change. +// +// - Absolute paths get their leading '/' stripped, treated like +// relative paths, and the leading '/' is re-added on output. +// The meaning of pos is intentionally the same in either absolute or +// relative paths; if it weren't, this function could convert absolute +// paths to relative paths, which is not desirable. +// +// - For robustness (liberal input, conservative output) Pos values +// that are too small (large) to index the split filepath result in a +// prefix (postfix) rather than an error. Use extreme position values +// to assure a prefix or postfix (e.g. 0 will always prefix, and +// 9999 will presumably always postfix). +func InsertPathPart(path string, pos int, part string) string { + if part == "" { + return path + } + parts := PathSplit(path) + if pos < 0 { + pos = 0 + } else if pos > len(parts) { + pos = len(parts) + } + if len(parts) > 0 && parts[0] == "" && pos < len(parts) { + // An empty string at 0 indicates an absolute path, and means + // we must increment pos. This change means that a position + // specification has the same meaning in relative and absolute paths. + // E.g. in either the path 'a/b/c' or the path '/a/b/c', + // 'a' is at 0, 'b' is at 1 and 'c' is at 2, and inserting at + // zero means a new first field _without_ changing an absolute + // path to a relative path. + pos++ + } + result := make([]string, len(parts)+1) + copy(result, parts[0:pos]) + result[pos] = part + return PathJoin(append(result, parts[pos:]...)) //nolint: makezero +} + +func IsHiddenFilePath(pattern string) bool { + return strings.HasPrefix(filepath.Base(pattern), ".") +} + +// Removes paths containing hidden files/folders from a list of paths +func RemoveHiddenFiles(paths []string) []string { + if len(paths) == 0 { + return paths + } + var result []string + for _, path := range paths { + if !IsHiddenFilePath(path) { + result = append(result, path) + } + } + return result +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go new file mode 100644 index 000000000..76b51a8c5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go @@ -0,0 +1,208 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package container + +import ( + "fmt" + "os" + "path/filepath" + + "sigs.k8s.io/kustomize/kyaml/errors" + runtimeexec "sigs.k8s.io/kustomize/kyaml/fn/runtime/exec" + "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filter filters Resources using a container image. +// The container must start a process that reads the list of +// input Resources from stdin, reads the Configuration from the env +// API_CONFIG, and writes the filtered Resources to stdout. +// If there is a error or validation failure, the process must exit +// non-zero. +// The full set of environment variables from the parent process +// are passed to the container. +// +// Function Scoping: +// Filter applies the function only to Resources to which it is scoped. +// +// Resources are scoped to a function if any of the following are true: +// - the Resource were read from the same directory as the function config +// - the Resource were read from a subdirectory of the function config directory +// - the function config is in a directory named "functions" and +// they were read from a subdirectory of "functions" parent +// - the function config doesn't have a path annotation (considered globally scoped) +// - the Filter has GlobalScope == true +// +// In Scope Examples: +// +// Example 1: deployment.yaml and service.yaml in function.yaml scope +// same directory as the function config directory +// . +// ├── function.yaml +// ├── deployment.yaml +// └── service.yaml +// +// Example 2: apps/deployment.yaml and apps/service.yaml in function.yaml scope +// subdirectory of the function config directory +// . +// ├── function.yaml +// └── apps +//    ├── deployment.yaml +//    └── service.yaml +// +// Example 3: apps/deployment.yaml and apps/service.yaml in functions/function.yaml scope +// function config is in a directory named "functions" +// . +// ├── functions +// │   └── function.yaml +// └── apps +//    ├── deployment.yaml +//    └── service.yaml +// +// Out of Scope Examples: +// +// Example 1: apps/deployment.yaml and apps/service.yaml NOT in stuff/function.yaml scope +// . +// ├── stuff +// │   └── function.yaml +// └── apps +//    ├── deployment.yaml +//    └── service.yaml +// +// Example 2: apps/deployment.yaml and apps/service.yaml NOT in stuff/functions/function.yaml scope +// . +// ├── stuff +// │   └── functions +// │    └── function.yaml +// └── apps +//    ├── deployment.yaml +//    └── service.yaml +// +// Default Paths: +// Resources emitted by functions will have default path applied as annotations +// if none is present. +// The default path will be the function-dir/ (or parent directory in the case of "functions") +// + function-file-name/ + namespace/ + kind_name.yaml +// +// Example 1: Given a function in fn.yaml that produces a Deployment name foo and a Service named bar +// dir +// └── fn.yaml +// +// Would default newly generated Resources to: +// +// dir +// ├── fn.yaml +// └── fn +//    ├── deployment_foo.yaml +//    └── service_bar.yaml +// +// Example 2: Given a function in functions/fn.yaml that produces a Deployment name foo and a Service named bar +// dir +// └── fn.yaml +// +// Would default newly generated Resources to: +// +// dir +// ├── functions +// │   └── fn.yaml +// └── fn +//    ├── deployment_foo.yaml +//    └── service_bar.yaml +// +// Example 3: Given a function in fn.yaml that produces a Deployment name foo, namespace baz and a Service named bar namespace baz +// dir +// └── fn.yaml +// +// Would default newly generated Resources to: +// +// dir +// ├── fn.yaml +// └── fn +// └── baz +//    ├── deployment_foo.yaml +//    └── service_bar.yaml +type Filter struct { + runtimeutil.ContainerSpec `json:",inline" yaml:",inline"` + + Exec runtimeexec.Filter + + UIDGID string +} + +func (c Filter) String() string { + if c.Exec.DeferFailure { + return fmt.Sprintf("%s deferFailure: %v", c.Image, c.Exec.DeferFailure) + } + return c.Image +} +func (c Filter) GetExit() error { + return c.Exec.GetExit() +} + +func (c *Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + if err := c.setupExec(); err != nil { + return nil, err + } + return c.Exec.Filter(nodes) +} + +func (c *Filter) setupExec() error { + // don't init 2x + if c.Exec.Path != "" { + return nil + } + + if c.Exec.WorkingDir == "" { + wd, err := os.Getwd() + if err != nil { + return errors.Wrap(err) + } + c.Exec.WorkingDir = wd + } + + path, args := c.getCommand() + c.Exec.Path = path + c.Exec.Args = args + return nil +} + +// getArgs returns the command + args to run to spawn the container +func (c *Filter) getCommand() (string, []string) { + network := runtimeutil.NetworkNameNone + if c.ContainerSpec.Network { + network = runtimeutil.NetworkNameHost + } + // run the container using docker. this is simpler than using the docker + // libraries, and ensures things like auth work the same as if the container + // was run from the cli. + args := []string{"run", + "--rm", // delete the container afterward + "-i", "-a", "STDIN", "-a", "STDOUT", "-a", "STDERR", // attach stdin, stdout, stderr + "--network", string(network), + + // added security options + "--user", c.UIDGID, + "--security-opt=no-new-privileges", // don't allow the user to escalate privileges + // note: don't make fs readonly because things like heredoc rely on writing tmp files + } + + for _, storageMount := range c.StorageMounts { + // convert declarative relative paths to absolute (otherwise docker will throw an error) + if !filepath.IsAbs(storageMount.Src) { + storageMount.Src = filepath.Join(c.Exec.WorkingDir, storageMount.Src) + } + args = append(args, "--mount", storageMount.String()) + } + + args = append(args, runtimeutil.NewContainerEnvFromStringSlice(c.Env).GetDockerFlags()...) + a := append(args, c.Image) //nolint:gocritic + return "docker", a +} + +// NewContainer returns a new container filter +func NewContainer(spec runtimeutil.ContainerSpec, uidgid string) Filter { + f := Filter{ContainerSpec: spec, UIDGID: uidgid} + + return f +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/doc.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/doc.go new file mode 100644 index 000000000..2747a96fb --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/doc.go @@ -0,0 +1,5 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package exec contains the exec function implementation. +package exec diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/exec.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/exec.go new file mode 100644 index 000000000..8bb3fe12e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/exec.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package exec + +import ( + "io" + "os" + "os/exec" + "path/filepath" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type Filter struct { + // Path is the path to the executable to run + Path string `yaml:"path,omitempty"` + + // Args are the arguments to the executable + Args []string `yaml:"args,omitempty"` + + // WorkingDir is the working directory that the executable + // should run in + WorkingDir string + + runtimeutil.FunctionFilter +} + +func (c *Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + c.FunctionFilter.Run = c.Run + return c.FunctionFilter.Filter(nodes) +} + +func (c *Filter) Run(reader io.Reader, writer io.Writer) error { + cmd := exec.Command(c.Path, c.Args...) //nolint:gosec + cmd.Stdin = reader + cmd.Stdout = writer + cmd.Stderr = os.Stderr + if c.WorkingDir == "" { + return errors.Errorf("no working directory set for exec function") + } + if !filepath.IsAbs(c.WorkingDir) { + return errors.Errorf( + "relative working directory %s not allowed", c.WorkingDir) + } + if c.WorkingDir == "/" { + return errors.Errorf( + "root working directory '/' not allowed") + } + cmd.Dir = c.WorkingDir + return cmd.Run() +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/doc.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/doc.go new file mode 100644 index 000000000..89f9036a4 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/doc.go @@ -0,0 +1,5 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package runtimeutil contains libraries for implementing function runtimes. +package runtimeutil diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/functiontypes.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/functiontypes.go new file mode 100644 index 000000000..f0448ea28 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/functiontypes.go @@ -0,0 +1,312 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package runtimeutil + +import ( + "fmt" + "os" + "sort" + "strings" + + "sigs.k8s.io/kustomize/kyaml/yaml" + k8syaml "sigs.k8s.io/yaml" +) + +const ( + FunctionAnnotationKey = "config.kubernetes.io/function" + oldFunctionAnnotationKey = "config.k8s.io/function" +) + +var functionAnnotationKeys = []string{FunctionAnnotationKey, oldFunctionAnnotationKey} + +// ContainerNetworkName is a type for network name used in container +type ContainerNetworkName string + +const ( + NetworkNameNone ContainerNetworkName = "none" + NetworkNameHost ContainerNetworkName = "host" +) +const defaultEnvValue string = "true" + +// ContainerEnv defines the environment present in a container. +type ContainerEnv struct { + // EnvVars is a key-value map that will be set as env in container + EnvVars map[string]string + + // VarsToExport are only env key. Value will be the value in the host system + VarsToExport []string +} + +// GetDockerFlags returns docker run style env flags +func (ce *ContainerEnv) GetDockerFlags() []string { + envs := ce.EnvVars + if envs == nil { + envs = make(map[string]string) + } + + flags := []string{} + // return in order to keep consistent among different runs + keys := []string{} + for k := range envs { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + flags = append(flags, "-e", key+"="+envs[key]) + } + + for _, key := range ce.VarsToExport { + flags = append(flags, "-e", key) + } + + return flags +} + +// AddKeyValue adds a key-value pair into the envs +func (ce *ContainerEnv) AddKeyValue(key, value string) { + if ce.EnvVars == nil { + ce.EnvVars = make(map[string]string) + } + ce.EnvVars[key] = value +} + +// HasExportedKey returns true if the key is a exported key +func (ce *ContainerEnv) HasExportedKey(key string) bool { + for _, k := range ce.VarsToExport { + if k == key { + return true + } + } + return false +} + +// AddKey adds a key into the envs +func (ce *ContainerEnv) AddKey(key string) { + if !ce.HasExportedKey(key) { + ce.VarsToExport = append(ce.VarsToExport, key) + } +} + +// Raw returns a slice of string which represents the envs. +// Example: [foo=bar, baz] +func (ce *ContainerEnv) Raw() []string { + var ret []string + for k, v := range ce.EnvVars { + ret = append(ret, k+"="+v) + } + + ret = append(ret, ce.VarsToExport...) + return ret +} + +// NewContainerEnv returns a pointer to a new ContainerEnv +func NewContainerEnv() *ContainerEnv { + var ce ContainerEnv + ce.EnvVars = make(map[string]string) + // default envs + ce.EnvVars["LOG_TO_STDERR"] = defaultEnvValue + ce.EnvVars["STRUCTURED_RESULTS"] = defaultEnvValue + return &ce +} + +// NewContainerEnvFromStringSlice returns a new ContainerEnv pointer with parsing +// input envStr. envStr example: ["foo=bar", "baz"] +func NewContainerEnvFromStringSlice(envStr []string) *ContainerEnv { + ce := NewContainerEnv() + for _, e := range envStr { + parts := strings.SplitN(e, "=", 2) + if len(parts) == 1 { + ce.AddKey(e) + } else { + ce.AddKeyValue(parts[0], parts[1]) + } + } + return ce +} + +// FunctionSpec defines a spec for running a function +type FunctionSpec struct { + DeferFailure bool `json:"deferFailure,omitempty" yaml:"deferFailure,omitempty"` + + // Container is the spec for running a function as a container + Container ContainerSpec `json:"container,omitempty" yaml:"container,omitempty"` + + // Starlark is the spec for running a function as a starlark script + Starlark StarlarkSpec `json:"starlark,omitempty" yaml:"starlark,omitempty"` + + // ExecSpec is the spec for running a function as an executable + Exec ExecSpec `json:"exec,omitempty" yaml:"exec,omitempty"` +} + +type ExecSpec struct { + Path string `json:"path,omitempty" yaml:"path,omitempty"` +} + +// ContainerSpec defines a spec for running a function as a container +type ContainerSpec struct { + // Image is the container image to run + Image string `json:"image,omitempty" yaml:"image,omitempty"` + + // Network defines network specific configuration + Network bool `json:"network,omitempty" yaml:"network,omitempty"` + + // Mounts are the storage or directories to mount into the container + StorageMounts []StorageMount `json:"mounts,omitempty" yaml:"mounts,omitempty"` + + // Env is a slice of env string that will be exposed to container + Env []string `json:"envs,omitempty" yaml:"envs,omitempty"` +} + +// StarlarkSpec defines how to run a function as a starlark program +type StarlarkSpec struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // Path specifies a path to a starlark script + Path string `json:"path,omitempty" yaml:"path,omitempty"` + + // URL specifies a url containing a starlark script + URL string `json:"url,omitempty" yaml:"url,omitempty"` +} + +// StorageMount represents a container's mounted storage option(s) +type StorageMount struct { + // Type of mount e.g. bind mount, local volume, etc. + MountType string `json:"type,omitempty" yaml:"type,omitempty"` + + // Source for the storage to be mounted. + // For named volumes, this is the name of the volume. + // For anonymous volumes, this field is omitted (empty string). + // For bind mounts, this is the path to the file or directory on the host. + Src string `json:"src,omitempty" yaml:"src,omitempty"` + + // The path where the file or directory is mounted in the container. + DstPath string `json:"dst,omitempty" yaml:"dst,omitempty"` + + // Mount in ReadWrite mode if it's explicitly configured + // See https://docs.docker.com/storage/bind-mounts/#use-a-read-only-bind-mount + ReadWriteMode bool `json:"rw,omitempty" yaml:"rw,omitempty"` +} + +func (s *StorageMount) String() string { + mode := "" + if !s.ReadWriteMode { + mode = ",readonly" + } + return fmt.Sprintf("type=%s,source=%s,target=%s%s", s.MountType, s.Src, s.DstPath, mode) +} + +// GetFunctionSpec returns the FunctionSpec for a resource. Returns +// nil if the resource does not have a FunctionSpec. +// +// The FunctionSpec is read from the resource metadata.annotation +// "config.kubernetes.io/function" +func GetFunctionSpec(n *yaml.RNode) (*FunctionSpec, error) { + meta, err := n.GetMeta() + if err != nil { + return nil, fmt.Errorf("failed to get ResourceMeta: %w", err) + } + + fn, err := getFunctionSpecFromAnnotation(n, meta) + if err != nil { + return nil, err + } + if fn != nil { + return fn, nil + } + + // legacy function specification for backwards compatibility + container := meta.Annotations["config.kubernetes.io/container"] + if container != "" { + return &FunctionSpec{Container: ContainerSpec{Image: container}}, nil + } + return nil, nil +} + +// getFunctionSpecFromAnnotation parses the config function from an annotation +// if it is found +func getFunctionSpecFromAnnotation(n *yaml.RNode, meta yaml.ResourceMeta) (*FunctionSpec, error) { + var fs FunctionSpec + for _, s := range functionAnnotationKeys { + fn := meta.Annotations[s] + if fn != "" { + if err := k8syaml.UnmarshalStrict([]byte(fn), &fs); err != nil { + return nil, fmt.Errorf("%s unmarshal error: %w", s, err) + } + return &fs, nil + } + } + n, err := n.Pipe(yaml.Lookup("metadata", "configFn")) + if err != nil { + return nil, fmt.Errorf("failed to look up metadata.configFn: %w", err) + } + if yaml.IsMissingOrNull(n) { + return nil, nil + } + s, err := n.String() + if err != nil { + fmt.Fprintf(os.Stderr, "configFn parse error: %v\n", err) + return nil, fmt.Errorf("configFn parse error: %w", err) + } + if err := k8syaml.UnmarshalStrict([]byte(s), &fs); err != nil { + return nil, fmt.Errorf("%s unmarshal error: %w", "configFn", err) + } + return &fs, nil +} + +func StringToStorageMount(s string) StorageMount { + m := make(map[string]string) + options := strings.Split(s, ",") + for _, option := range options { + keyVal := strings.SplitN(option, "=", 2) + if len(keyVal) == 2 { + m[keyVal[0]] = keyVal[1] + } + } + var sm StorageMount + for key, value := range m { + switch { + case key == "type": + sm.MountType = value + case key == "src" || key == "source": + sm.Src = value + case key == "dst" || key == "target": + sm.DstPath = value + case key == "rw" && value == "true": + sm.ReadWriteMode = true + } + } + return sm +} + +// IsReconcilerFilter filters Resources based on whether or not they are Reconciler Resource. +// Resources with an apiVersion starting with '*.gcr.io', 'gcr.io' or 'docker.io' are considered +// Reconciler Resources. +type IsReconcilerFilter struct { + // ExcludeReconcilers if set to true, then Reconcilers will be excluded -- e.g. + // Resources with a reconcile container through the apiVersion (gcr.io prefix) or + // through the annotations + ExcludeReconcilers bool `yaml:"excludeReconcilers,omitempty"` + + // IncludeNonReconcilers if set to true, the NonReconciler will be included. + IncludeNonReconcilers bool `yaml:"includeNonReconcilers,omitempty"` +} + +// Filter implements kio.Filter +func (c *IsReconcilerFilter) Filter(inputs []*yaml.RNode) ([]*yaml.RNode, error) { + var out []*yaml.RNode + for i := range inputs { + functionSpec, err := GetFunctionSpec(inputs[i]) + if err != nil { + return nil, err + } + isFnResource := functionSpec != nil + if isFnResource && !c.ExcludeReconcilers { + out = append(out, inputs[i]) + } + if !isFnResource && c.IncludeNonReconcilers { + out = append(out, inputs[i]) + } + } + return out, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/runtimeutil.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/runtimeutil.go new file mode 100644 index 000000000..6a06b81c3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/runtimeutil.go @@ -0,0 +1,281 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package runtimeutil + +import ( + "bytes" + "fmt" + "io" + "os" + "path" + "strings" + + "sigs.k8s.io/kustomize/kyaml/comments" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/order" + + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// FunctionFilter wraps another filter to be invoked in the context of a function. +// FunctionFilter manages scoping the function, deferring failures, and saving results +// to files. +type FunctionFilter struct { + // Run implements the function. + Run func(reader io.Reader, writer io.Writer) error + + // FunctionConfig is passed to the function through ResourceList.functionConfig. + FunctionConfig *yaml.RNode `yaml:"functionConfig,omitempty"` + + // GlobalScope explicitly scopes the function to all input resources rather than only those + // resources scoped to it by path. + GlobalScope bool + + // ResultsFile is the file to write function ResourceList.results to. + // If unset, results will not be written. + ResultsFile string + + // DeferFailure will cause the Filter to return a nil error even if Run returns an error. + // The Run error will be available through GetExit(). + DeferFailure bool + + // results saves the results emitted from Run + Results *yaml.RNode + + // exit saves the error returned from Run + exit error + + ids map[string]*yaml.RNode +} + +// GetExit returns the error from Run +func (c FunctionFilter) GetExit() error { + return c.exit +} + +// functionsDirectoryName is keyword directory name for functions scoped 1 directory higher +const functionsDirectoryName = "functions" + +// getFunctionScope returns the path of the directory containing the function config, +// or its parent directory if the base directory is named "functions" +func (c *FunctionFilter) getFunctionScope() (string, error) { + m, err := c.FunctionConfig.GetMeta() + if err != nil { + return "", errors.Wrap(err) + } + var p string + var found bool + p, found = m.Annotations[kioutil.PathAnnotation] + if !found { + p, found = m.Annotations[kioutil.LegacyPathAnnotation] + if !found { + return "", nil + } + } + + functionDir := path.Clean(path.Dir(p)) + + if path.Base(functionDir) == functionsDirectoryName { + // the scope of functions in a directory called "functions" is 1 level higher + // this is similar to how the golang "internal" directory scoping works + functionDir = path.Dir(functionDir) + } + return functionDir, nil +} + +// scope partitions the input nodes into 2 slices. The first slice contains only Resources +// which are scoped under dir, and the second slice contains the Resources which are not. +func (c *FunctionFilter) scope(dir string, nodes []*yaml.RNode) ([]*yaml.RNode, []*yaml.RNode, error) { + // scope container filtered Resources to Resources under that directory + var input, saved []*yaml.RNode + if c.GlobalScope { + return nodes, nil, nil + } + + // global function + if dir == "" || dir == "." { + return nodes, nil, nil + } + + // identify Resources read from directories under the function configuration + for i := range nodes { + m, err := nodes[i].GetMeta() + if err != nil { + return nil, nil, err + } + var p string + var found bool + p, found = m.Annotations[kioutil.PathAnnotation] + if !found { + p, found = m.Annotations[kioutil.LegacyPathAnnotation] + if !found { + // this Resource isn't scoped under the function -- don't know where it came from + // consider it out of scope + saved = append(saved, nodes[i]) + continue + } + } + + resourceDir := path.Clean(path.Dir(p)) + if path.Base(resourceDir) == functionsDirectoryName { + // Functions in the `functions` directory are scoped to + // themselves, and should see themselves as input + resourceDir = path.Dir(resourceDir) + } + if !strings.HasPrefix(resourceDir, dir) { + // this Resource doesn't fall under the function scope if it + // isn't in a subdirectory of where the function lives + saved = append(saved, nodes[i]) + continue + } + + // this input is scoped under the function + input = append(input, nodes[i]) + } + + return input, saved, nil +} + +func (c *FunctionFilter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + in := &bytes.Buffer{} + out := &bytes.Buffer{} + + // only process Resources scoped to this function, save the others + functionDir, err := c.getFunctionScope() + if err != nil { + return nil, err + } + input, saved, err := c.scope(functionDir, nodes) + if err != nil { + return nil, err + } + + // set ids on each input so it is possible to copy comments from inputs back to outputs + if err := c.setIds(input); err != nil { + return nil, err + } + + // write the input + err = kio.ByteWriter{ + WrappingAPIVersion: kio.ResourceListAPIVersion, + WrappingKind: kio.ResourceListKind, + Writer: in, + KeepReaderAnnotations: true, + FunctionConfig: c.FunctionConfig}.Write(input) + if err != nil { + return nil, err + } + + // capture the command stdout for the return value + r := &kio.ByteReader{Reader: out} + + // don't exit immediately if the function fails -- write out the validation + c.exit = c.Run(in, out) + + output, err := r.Read() + if err != nil { + return nil, err + } + + // copy the comments and sync the order of fields from the inputs to the outputs + if err := c.copyCommentsAndSyncOrder(output); err != nil { + return nil, err + } + + if err := c.doResults(r); err != nil { + return nil, err + } + + if c.exit != nil && !c.DeferFailure { + return append(output, saved...), c.exit + } + + // annotate any generated Resources with a path and index if they don't already have one + if err := kioutil.DefaultPathAnnotation(functionDir, output); err != nil { + return nil, err + } + + // emit both the Resources output from the function, and the out-of-scope Resources + // which were not provided to the function + return append(output, saved...), nil +} + +func (c *FunctionFilter) setIds(nodes []*yaml.RNode) error { + // set the id on each node to map inputs to outputs + var id int + c.ids = map[string]*yaml.RNode{} + for i := range nodes { + id++ + idStr := fmt.Sprintf("%v", id) + err := nodes[i].PipeE(yaml.SetAnnotation(kioutil.IdAnnotation, idStr)) + if err != nil { + return errors.Wrap(err) + } + err = nodes[i].PipeE(yaml.SetAnnotation(kioutil.LegacyIdAnnotation, idStr)) + if err != nil { + return errors.Wrap(err) + } + c.ids[idStr] = nodes[i] + } + return nil +} + +func (c *FunctionFilter) copyCommentsAndSyncOrder(nodes []*yaml.RNode) error { + for i := range nodes { + node := nodes[i] + anID, err := node.Pipe(yaml.GetAnnotation(kioutil.IdAnnotation)) + if err != nil { + return errors.Wrap(err) + } + if anID == nil { + anID, err = node.Pipe(yaml.GetAnnotation(kioutil.LegacyIdAnnotation)) + if err != nil { + return errors.Wrap(err) + } + if anID == nil { + continue + } + } + + var in *yaml.RNode + var found bool + if in, found = c.ids[anID.YNode().Value]; !found { + continue + } + if err := comments.CopyComments(in, node); err != nil { + return errors.Wrap(err) + } + if err := order.SyncOrder(in, node); err != nil { + return errors.Wrap(err) + } + if err := node.PipeE(yaml.ClearAnnotation(kioutil.IdAnnotation)); err != nil { + return errors.Wrap(err) + } + if err := node.PipeE(yaml.ClearAnnotation(kioutil.LegacyIdAnnotation)); err != nil { + return errors.Wrap(err) + } + } + return nil +} + +func (c *FunctionFilter) doResults(r *kio.ByteReader) error { + // Write the results to a file if configured to do so + if c.ResultsFile != "" && r.Results != nil { + results, err := r.Results.String() + if err != nil { + return err + } + err = os.WriteFile(c.ResultsFile, []byte(results), 0600) + if err != nil { + return err + } + } + + if r.Results != nil { + c.Results = r.Results + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/types.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/types.go new file mode 100644 index 000000000..5edc4ebc3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/types.go @@ -0,0 +1,8 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package runtimeutil + +type DeferFailureFunction interface { + GetExit() error +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/context.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/context.go new file mode 100644 index 000000000..bbf07f66c --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/context.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package starlark + +import ( + "encoding/json" + "os" + "strings" + + "go.starlark.net/starlark" + "go.starlark.net/starlarkstruct" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util" + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type Context struct { + resourceList starlark.Value +} + +func (c *Context) predeclared() (starlark.StringDict, error) { + e, err := env() + if err != nil { + return nil, err + } + oa, err := oa() + if err != nil { + return nil, err + } + dict := starlark.StringDict{ + "resource_list": c.resourceList, + "open_api": oa, + "environment": e, + } + + return starlark.StringDict{ + "ctx": starlarkstruct.FromStringDict(starlarkstruct.Default, dict), + }, nil +} + +func oa() (starlark.Value, error) { + return interfaceToValue(openapi.Schema()) +} + +func env() (starlark.Value, error) { + env := map[string]interface{}{} + for _, e := range os.Environ() { + pair := strings.SplitN(e, "=", 2) + if len(pair) < 2 { + continue + } + env[pair[0]] = pair[1] + } + value, err := util.Marshal(env) + if err != nil { + return nil, errors.Wrap(err) + } + return value, nil +} + +func interfaceToValue(i interface{}) (starlark.Value, error) { + b, err := json.Marshal(i) + if err != nil { + return nil, err + } + + var in map[string]interface{} + if err := yaml.Unmarshal(b, &in); err != nil { + return nil, errors.Wrap(err) + } + + value, err := util.Marshal(in) + if err != nil { + return nil, errors.Wrap(err) + } + return value, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/doc.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/doc.go new file mode 100644 index 000000000..70f0a2c7b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/doc.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package starlark contains a kio.Filter which can be applied to resources to transform +// them through starlark program. +// +// Starlark has become a popular runtime embedding in go programs, especially for Kubernetes +// and data processing. +// Examples: https://github.com/cruise-automation/isopod, https://qri.io/docs/starlark/starlib, +// https://github.com/stripe/skycfg, https://github.com/k14s/ytt +// +// The resources are provided to the starlark program through the global variable "resourceList". +// "resourceList" is a dictionary containing an "items" field with a list of resources. +// The starlark modified "resourceList" is the Filter output. +// +// After being run through the starlark program, the filter will copy the comments from the input +// resources to restore them -- due to them being dropped as a result of serializing the resources +// as starlark values. +// +// "resourceList" may also contain a "functionConfig" entry to configure the starlark script itself. +// Changes made by the starlark program to the "functionConfig" will be reflected in the +// Filter.FunctionConfig value. +// +// The Filter will also format the output so that output has the preferred field ordering +// rather than an alphabetical field ordering. +// +// The resourceList variable adheres to the kustomize function spec as specified by: +// https://github.com/kubernetes-sigs/kustomize/blob/master/cmd/config/docs/api-conventions/functions-spec.md +// +// All items in the resourceList are resources represented as starlark dictionaries/ +// The items in the resourceList respect the io spec specified by: +// https://github.com/kubernetes-sigs/kustomize/blob/master/cmd/config/docs/api-conventions/config-io.md +// +// The starlark language spec can be found here: +// https://github.com/google/starlark-go/blob/master/doc/spec.md +package starlark diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/starlark.go b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/starlark.go new file mode 100644 index 000000000..aeeeb81ed --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/starlark.go @@ -0,0 +1,180 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package starlark + +import ( + "bytes" + "fmt" + "io" + "net/http" + "os" + + "go.starlark.net/starlark" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" + "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util" + "sigs.k8s.io/kustomize/kyaml/kio/filters" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filter transforms a set of resources through the provided program +type Filter struct { + Name string + + // Program is a starlark script which will be run against the resources + Program string + + // URL is the url of a starlark program to fetch and run + URL string + + // Path is the path to a starlark program to read and run + Path string + + runtimeutil.FunctionFilter +} + +func (sf *Filter) String() string { + return fmt.Sprintf( + "name: %v path: %v url: %v program: %v", sf.Name, sf.Path, sf.URL, sf.Program) +} + +func (sf *Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + if err := sf.setup(); err != nil { + return nil, err + } + sf.FunctionFilter.Run = sf.Run + + return sf.FunctionFilter.Filter(nodes) +} + +func (sf *Filter) setup() error { + if (sf.URL != "" && sf.Path != "") || + (sf.URL != "" && sf.Program != "") || + (sf.Path != "" && sf.Program != "") { + return errors.Errorf("Filter Path, Program and URL are mutually exclusive") + } + + // read the program from a file + if sf.Path != "" { + b, err := os.ReadFile(sf.Path) + if err != nil { + return err + } + sf.Program = string(b) + } + + // read the program from a URL + if sf.URL != "" { + err := func() error { + resp, err := http.Get(sf.URL) + if err != nil { + return err + } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + sf.Program = string(b) + return nil + }() + if err != nil { + return err + } + } + + return nil +} + +func (sf *Filter) Run(reader io.Reader, writer io.Writer) error { + // retain map of inputs to outputs by id so if the name is changed by the + // starlark program, we are able to match the same resources + value, err := sf.readResourceList(reader) + if err != nil { + return errors.Wrap(err) + } + + // run the starlark as program as transformation function + thread := &starlark.Thread{Name: sf.Name} + + ctx := &Context{resourceList: value} + pd, err := ctx.predeclared() + if err != nil { + return errors.Wrap(err) + } + _, err = starlark.ExecFile(thread, sf.Name, sf.Program, pd) + if err != nil { + return errors.Wrap(err) + } + + return sf.writeResourceList(value, writer) +} + +// inputToResourceList transforms input into a starlark.Value +func (sf *Filter) readResourceList(reader io.Reader) (starlark.Value, error) { + // read and parse the inputs + rl := bytes.Buffer{} + _, err := rl.ReadFrom(reader) + if err != nil { + return nil, errors.Wrap(err) + } + rn, err := yaml.Parse(rl.String()) + if err != nil { + return nil, errors.Wrap(err) + } + + // convert to a starlark value + b, err := yaml.Marshal(rn.Document()) // convert to bytes + if err != nil { + return nil, errors.Wrap(err) + } + var in map[string]interface{} + err = yaml.Unmarshal(b, &in) // convert to map[string]interface{} + if err != nil { + return nil, errors.Wrap(err) + } + return util.Marshal(in) // convert to starlark value +} + +// resourceListToOutput converts the output of the starlark program to the filter output +func (sf *Filter) writeResourceList(value starlark.Value, writer io.Writer) error { + // convert the modified resourceList back into a slice of RNodes + // by first converting to a map[string]interface{} + out, err := util.Unmarshal(value) + if err != nil { + return errors.Wrap(err) + } + b, err := yaml.Marshal(out) + if err != nil { + return errors.Wrap(err) + } + + rl, err := yaml.Parse(string(b)) + if err != nil { + return errors.Wrap(err) + } + + // preserve the comments from the input + items, err := rl.Pipe(yaml.Lookup("items")) + if err != nil { + return errors.Wrap(err) + } + err = items.VisitElements(func(node *yaml.RNode) error { + // starlark will serialize the resources sorting the fields alphabetically, + // format them to have a better ordering + _, err := filters.FormatFilter{}.Filter([]*yaml.RNode{node}) + return err + }) + if err != nil { + return errors.Wrap(err) + } + + s, err := rl.String() + if err != nil { + return errors.Wrap(err) + } + + _, err = writer.Write([]byte(s)) + return err +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE new file mode 100644 index 000000000..2683e4bb1 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/k8s.io/helm/pkg/version/doc.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE similarity index 77% rename from vendor/k8s.io/helm/pkg/version/doc.go rename to vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE index 3b61dd50e..866d74a7a 100644 --- a/vendor/k8s.io/helm/pkg/version/doc.go +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE @@ -1,5 +1,4 @@ -/* -Copyright The Helm Authors. +Copyright 2011-2016 Canonical Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,7 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -// Package version represents the current version of the project. -package version // import "k8s.io/helm/pkg/version" diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md new file mode 100644 index 000000000..08eb1babd --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go new file mode 100644 index 000000000..ae7d049f1 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go new file mode 100644 index 000000000..0173b6982 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go @@ -0,0 +1,1000 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go new file mode 100644 index 000000000..f0f3d1867 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go @@ -0,0 +1,2028 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + } + if compact_seq { + emitter.indent = emitter.indent - 2 + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter, false) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter, false) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter, false) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter, false) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter, false) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent(emitter, false, false, seq){ + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter, false) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter, false) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter, false) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go new file mode 100644 index 000000000..de9e72a3e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go new file mode 100644 index 000000000..268558a0d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go @@ -0,0 +1,1258 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go new file mode 100644 index 000000000..b7de0a89c --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go new file mode 100644 index 000000000..64ae88805 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go new file mode 100644 index 000000000..ca0070108 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go @@ -0,0 +1,3038 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:†indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go new file mode 100644 index 000000000..9210ece7e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go new file mode 100644 index 000000000..b8a116bf9 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go new file mode 100644 index 000000000..bb6418dba --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go @@ -0,0 +1,708 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go new file mode 100644 index 000000000..40c74de49 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go @@ -0,0 +1,809 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go new file mode 100644 index 000000000..e88f9c54a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE new file mode 100644 index 000000000..31f292dce --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 QRI, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/doc.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/doc.go new file mode 100644 index 000000000..035b47921 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/doc.go @@ -0,0 +1,25 @@ +// The MIT License (MIT) + +// Copyright (c) 2018 QRI, Inc. + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package util is forked from https://github.com/qri-io/starlib in order to prune +// excessive transitive dependencies from pulling in that library. +package util diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/util.go b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/util.go new file mode 100644 index 000000000..6bc4fba0f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/util.go @@ -0,0 +1,275 @@ +// The MIT License (MIT) + +// Copyright (c) 2018 QRI, Inc. + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package util + +import ( + "fmt" + + "go.starlark.net/starlark" + "go.starlark.net/starlarkstruct" + "sigs.k8s.io/kustomize/kyaml/errors" +) + +// // asString unquotes a starlark string value +// func asString(x starlark.Value) (string, error) { +// return strconv.Unquote(x.String()) +// } + +// IsEmptyString checks is a starlark string is empty ("" for a go string) +// starlark.String.String performs repr-style quotation, which is necessary +// for the starlark.Value contract but a frequent source of errors in API +// clients. This helper method makes sure it'll work properly +func IsEmptyString(s starlark.String) bool { + return s.String() == `""` +} + +// Unmarshal decodes a starlark.Value into it's golang counterpart +// +//nolint:nakedret +func Unmarshal(x starlark.Value) (val interface{}, err error) { + switch v := x.(type) { + case starlark.NoneType: + val = nil + case starlark.Bool: + val = v.Truth() == starlark.True + case starlark.Int: + val, err = starlark.AsInt32(x) + case starlark.Float: + if f, ok := starlark.AsFloat(x); !ok { + err = fmt.Errorf("couldn't parse float") + } else { + val = f + } + case starlark.String: + val = v.GoString() + // case starlibtime.Time: + // val = time.Time(v) + case *starlark.Dict: + var ( + dictVal starlark.Value + pval interface{} + kval interface{} + keys []interface{} + vals []interface{} + // key as interface if found one key is not a string + ki bool + ) + + for _, k := range v.Keys() { + dictVal, _, err = v.Get(k) + if err != nil { + return + } + + pval, err = Unmarshal(dictVal) + if err != nil { + err = fmt.Errorf("unmarshaling starlark value: %w", err) + return + } + + kval, err = Unmarshal(k) + if err != nil { + err = fmt.Errorf("unmarshaling starlark key: %w", err) + return + } + + if _, ok := kval.(string); !ok { + // found key as not a string + ki = true + } + + keys = append(keys, kval) + vals = append(vals, pval) + } + + // prepare result + + rs := map[string]interface{}{} + ri := map[interface{}]interface{}{} + + for i, key := range keys { + // key as interface + if ki { + ri[key] = vals[i] + } else { + rs[key.(string)] = vals[i] + } + } + + if ki { + val = ri // map[interface{}]interface{} + } else { + val = rs // map[string]interface{} + } + case *starlark.List: + var ( + i int + listVal starlark.Value + iter = v.Iterate() + value = make([]interface{}, v.Len()) + ) + + defer iter.Done() + for iter.Next(&listVal) { + value[i], err = Unmarshal(listVal) + if err != nil { + return + } + i++ + } + val = value + case starlark.Tuple: + var ( + i int + tupleVal starlark.Value + iter = v.Iterate() + value = make([]interface{}, v.Len()) + ) + + defer iter.Done() + for iter.Next(&tupleVal) { + value[i], err = Unmarshal(tupleVal) + if err != nil { + return + } + i++ + } + val = value + case *starlark.Set: + fmt.Println("errnotdone: SET") + err = fmt.Errorf("sets aren't yet supported") + case *starlarkstruct.Struct: + if _var, ok := v.Constructor().(Unmarshaler); ok { + err = _var.UnmarshalStarlark(x) + if err != nil { + err = errors.WrapPrefixf(err, "failed marshal %q to Starlark object", v.Constructor().Type()) + return + } + val = _var + } else { + err = fmt.Errorf("constructor object from *starlarkstruct.Struct not supported Marshaler to starlark object: %s", v.Constructor().Type()) + } + default: + fmt.Println("errbadtype:", x.Type()) + err = fmt.Errorf("unrecognized starlark type: %s", x.Type()) + } + return +} + +// Marshal turns go values into starlark types +// +//nolint:nakedret +func Marshal(data interface{}) (v starlark.Value, err error) { + switch x := data.(type) { + case nil: + v = starlark.None + case bool: + v = starlark.Bool(x) + case string: + v = starlark.String(x) + case int: + v = starlark.MakeInt(x) + case int8: + v = starlark.MakeInt(int(x)) + case int16: + v = starlark.MakeInt(int(x)) + case int32: + v = starlark.MakeInt(int(x)) + case int64: + v = starlark.MakeInt64(x) + case uint: + v = starlark.MakeUint(x) + case uint8: + v = starlark.MakeUint(uint(x)) + case uint16: + v = starlark.MakeUint(uint(x)) + case uint32: + v = starlark.MakeUint(uint(x)) + case uint64: + v = starlark.MakeUint64(x) + case float32: + v = starlark.Float(float64(x)) + case float64: + v = starlark.Float(x) + // case time.Time: + // v = starlibtime.Time(x) + case []interface{}: + var elems = make([]starlark.Value, len(x)) + for i, val := range x { + elems[i], err = Marshal(val) + if err != nil { + return + } + } + v = starlark.NewList(elems) + case map[interface{}]interface{}: + dict := &starlark.Dict{} + var elem starlark.Value + for ki, val := range x { + var key starlark.Value + key, err = Marshal(ki) + if err != nil { + return + } + + elem, err = Marshal(val) + if err != nil { + return + } + if err = dict.SetKey(key, elem); err != nil { + return + } + } + v = dict + case map[string]interface{}: + dict := &starlark.Dict{} + var elem starlark.Value + for key, val := range x { + elem, err = Marshal(val) + if err != nil { + return + } + if err = dict.SetKey(starlark.String(key), elem); err != nil { + return + } + } + v = dict + case Marshaler: + v, err = x.MarshalStarlark() + default: + return starlark.None, fmt.Errorf("unrecognized type: %#v", x) + } + return +} + +// Unmarshaler is the interface use to unmarshal starlark custom types. +type Unmarshaler interface { + // UnmarshalStarlark unmarshal a starlark object to custom type. + UnmarshalStarlark(starlark.Value) error +} + +// Marshaler is the interface use to marshal starlark custom types. +type Marshaler interface { + // MarshalStarlark marshal a custom type to starlark object. + MarshalStarlark() (starlark.Value, error) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_reader.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_reader.go new file mode 100644 index 000000000..230ab891b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_reader.go @@ -0,0 +1,349 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package kio + +import ( + "bytes" + "fmt" + "io" + "regexp" + "sort" + "strings" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +const ( + ResourceListKind = "ResourceList" + ResourceListAPIVersion = "config.kubernetes.io/v1" +) + +// ByteReadWriter reads from an input and writes to an output. +type ByteReadWriter struct { + // Reader is where ResourceNodes are decoded from. + Reader io.Reader + + // Writer is where ResourceNodes are encoded. + Writer io.Writer + + // OmitReaderAnnotations will configures Read to skip setting the config.kubernetes.io/index + // annotation on Resources as they are Read. + OmitReaderAnnotations bool + + // KeepReaderAnnotations if set will keep the Reader specific annotations when writing + // the Resources, otherwise they will be cleared. + KeepReaderAnnotations bool + + // PreserveSeqIndent if true adds kioutil.SeqIndentAnnotation to each resource + PreserveSeqIndent bool + + // Style is a style that is set on the Resource Node Document. + Style yaml.Style + + // WrapBareSeqNode wraps the bare sequence node document with map node, + // kyaml uses reader annotations to track resources, it is not possible to + // add them to bare sequence nodes, this option enables wrapping such bare + // sequence nodes into map node with key yaml.BareSeqNodeWrappingKey + // note that this wrapping is different and not related to ResourceList wrapping + WrapBareSeqNode bool + + FunctionConfig *yaml.RNode + + Results *yaml.RNode + + NoWrap bool + WrappingAPIVersion string + WrappingKind string +} + +func (rw *ByteReadWriter) Read() ([]*yaml.RNode, error) { + b := &ByteReader{ + Reader: rw.Reader, + OmitReaderAnnotations: rw.OmitReaderAnnotations, + PreserveSeqIndent: rw.PreserveSeqIndent, + WrapBareSeqNode: rw.WrapBareSeqNode, + } + val, err := b.Read() + rw.Results = b.Results + + if rw.FunctionConfig == nil { + rw.FunctionConfig = b.FunctionConfig + } + if !rw.NoWrap && rw.WrappingKind == "" { + rw.WrappingAPIVersion = b.WrappingAPIVersion + rw.WrappingKind = b.WrappingKind + } + return val, errors.Wrap(err) +} + +func (rw *ByteReadWriter) Write(nodes []*yaml.RNode) error { + w := ByteWriter{ + Writer: rw.Writer, + KeepReaderAnnotations: rw.KeepReaderAnnotations, + Style: rw.Style, + FunctionConfig: rw.FunctionConfig, + Results: rw.Results, + } + if !rw.NoWrap { + w.WrappingAPIVersion = rw.WrappingAPIVersion + w.WrappingKind = rw.WrappingKind + } + return w.Write(nodes) +} + +// ParseAll reads all of the inputs into resources +func ParseAll(inputs ...string) ([]*yaml.RNode, error) { + return (&ByteReader{ + Reader: bytes.NewBufferString(strings.Join(inputs, "\n---\n")), + }).Read() +} + +// FromBytes reads from a byte slice. +func FromBytes(bs []byte) ([]*yaml.RNode, error) { + return (&ByteReader{ + OmitReaderAnnotations: true, + AnchorsAweigh: true, + Reader: bytes.NewBuffer(bs), + }).Read() +} + +// StringAll writes all of the resources to a string +func StringAll(resources []*yaml.RNode) (string, error) { + var b bytes.Buffer + err := (&ByteWriter{Writer: &b}).Write(resources) + return b.String(), err +} + +// ByteReader decodes ResourceNodes from bytes. +// By default, Read will set the config.kubernetes.io/index annotation on each RNode as it +// is read so they can be written back in the same order. +type ByteReader struct { + // Reader is where ResourceNodes are decoded from. + Reader io.Reader + + // OmitReaderAnnotations will configures Read to skip setting the config.kubernetes.io/index + // and internal.config.kubernetes.io/seqindent annotations on Resources as they are Read. + OmitReaderAnnotations bool + + // PreserveSeqIndent if true adds kioutil.SeqIndentAnnotation to each resource + PreserveSeqIndent bool + + // SetAnnotations is a map of caller specified annotations to set on resources as they are read + // These are independent of the annotations controlled by OmitReaderAnnotations + SetAnnotations map[string]string + + FunctionConfig *yaml.RNode + + Results *yaml.RNode + + // DisableUnwrapping prevents Resources in Lists and ResourceLists from being unwrapped + DisableUnwrapping bool + + // WrappingAPIVersion is set by Read(), and is the apiVersion of the object that + // the read objects were originally wrapped in. + WrappingAPIVersion string + + // WrappingKind is set by Read(), and is the kind of the object that + // the read objects were originally wrapped in. + WrappingKind string + + // WrapBareSeqNode wraps the bare sequence node document with map node, + // kyaml uses reader annotations to track resources, it is not possible to + // add them to bare sequence nodes, this option enables wrapping such bare + // sequence nodes into map node with key yaml.BareSeqNodeWrappingKey + // note that this wrapping is different and not related to ResourceList wrapping + WrapBareSeqNode bool + + // AnchorsAweigh set to true attempts to replace all YAML anchor aliases + // with their definitions (anchor values) immediately after the read. + AnchorsAweigh bool +} + +var _ Reader = &ByteReader{} + +// splitDocuments returns a slice of all documents contained in a YAML string. Multiple documents can be divided by the +// YAML document separator (---). It allows for white space and comments to be after the separator on the same line, +// but will return an error if anything else is on the line. +func splitDocuments(s string) ([]string, error) { + docs := make([]string, 0) + if len(s) > 0 { + // The YAML document separator is any line that starts with --- + yamlSeparatorRegexp := regexp.MustCompile(`\n---.*\n`) + + // Find all separators, check them for invalid content, and append each document to docs + separatorLocations := yamlSeparatorRegexp.FindAllStringIndex(s, -1) + prev := 0 + for i := range separatorLocations { + loc := separatorLocations[i] + separator := s[loc[0]:loc[1]] + + // If the next non-whitespace character on the line following the separator is not a comment, return an error + trimmedContentAfterSeparator := strings.TrimSpace(separator[4:]) + if len(trimmedContentAfterSeparator) > 0 && trimmedContentAfterSeparator[0] != '#' { + return nil, errors.Errorf("invalid document separator: %s", strings.TrimSpace(separator)) + } + + docs = append(docs, s[prev:loc[0]]) + prev = loc[1] + } + docs = append(docs, s[prev:]) + } + + return docs, nil +} + +func (r *ByteReader) Read() ([]*yaml.RNode, error) { + if r.PreserveSeqIndent && r.OmitReaderAnnotations { + return nil, errors.Errorf(`"PreserveSeqIndent" option adds a reader annotation, please set "OmitReaderAnnotations" to false`) + } + + output := ResourceNodeSlice{} + + // by manually splitting resources -- otherwise the decoder will get the Resource + // boundaries wrong for header comments. + input := &bytes.Buffer{} + _, err := io.Copy(input, r.Reader) + if err != nil { + return nil, errors.Wrap(err) + } + + // Replace the ending \r\n (line ending used in windows) with \n and then split it into multiple YAML documents + // if it contains document separators (---) + values, err := splitDocuments(strings.ReplaceAll(input.String(), "\r\n", "\n")) + if err != nil { + return nil, errors.Wrap(err) + } + + index := 0 + for i := range values { + // the Split used above will eat the tail '\n' from each resource. This may affect the + // literal string value since '\n' is meaningful in it. + if i != len(values)-1 { + values[i] += "\n" + } + decoder := yaml.NewDecoder(bytes.NewBufferString(values[i])) + node, err := r.decode(values[i], index, decoder) + if err == io.EOF { + continue + } + + if err != nil { + return nil, errors.Wrap(err) + } + if yaml.IsMissingOrNull(node) { + // empty value + continue + } + + // ok if no metadata -- assume not an InputList + meta, err := node.GetMeta() + if err != yaml.ErrMissingMetadata && err != nil { + return nil, errors.WrapPrefixf(err, "[%d]", i) + } + + // the elements are wrapped in an InputList, unwrap them + // don't check apiVersion, we haven't standardized on the domain + if !r.DisableUnwrapping && + len(values) == 1 && // Only unwrap if there is only 1 value + (meta.Kind == ResourceListKind || meta.Kind == "List") && + (node.Field("items") != nil || node.Field("functionConfig") != nil) { + r.WrappingKind = meta.Kind + r.WrappingAPIVersion = meta.APIVersion + + // unwrap the list + if fc := node.Field("functionConfig"); fc != nil { + r.FunctionConfig = fc.Value + } + if res := node.Field("results"); res != nil { + r.Results = res.Value + } + + items := node.Field("items") + if items != nil { + for i := range items.Value.Content() { + // add items + output = append(output, yaml.NewRNode(items.Value.Content()[i])) + } + } + continue + } + + // add the node to the list + output = append(output, node) + + // increment the index annotation value + index++ + } + if r.AnchorsAweigh { + for _, n := range output { + if err = n.DeAnchor(); err != nil { + return nil, err + } + } + } + return output, nil +} + +func (r *ByteReader) decode(originalYAML string, index int, decoder *yaml.Decoder) (*yaml.RNode, error) { + node := &yaml.Node{} + err := decoder.Decode(node) + if err == io.EOF { + return nil, io.EOF + } + if err != nil { + return nil, errors.WrapPrefixf(err, "MalformedYAMLError") + } + + if yaml.IsYNodeEmptyDoc(node) { + return nil, nil + } + + // set annotations on the read Resources + // sort the annotations by key so the output Resources is consistent (otherwise the + // annotations will be in a random order) + n := yaml.NewRNode(node) + // check if it is a bare sequence node and wrap it with a yaml.BareSeqNodeWrappingKey + if r.WrapBareSeqNode && node.Kind == yaml.DocumentNode && len(node.Content) > 0 && + node.Content[0] != nil && node.Content[0].Kind == yaml.SequenceNode { + wrappedNode := yaml.NewRNode(&yaml.Node{ + Kind: yaml.MappingNode, + }) + wrappedNode.PipeE(yaml.SetField(yaml.BareSeqNodeWrappingKey, n)) + n = wrappedNode + } + + if r.SetAnnotations == nil { + r.SetAnnotations = map[string]string{} + } + if !r.OmitReaderAnnotations { + err := kioutil.CopyLegacyAnnotations(n) + if err != nil { + return nil, err + } + r.SetAnnotations[kioutil.IndexAnnotation] = fmt.Sprintf("%d", index) + r.SetAnnotations[kioutil.LegacyIndexAnnotation] = fmt.Sprintf("%d", index) + + if r.PreserveSeqIndent { + // derive and add the seqindent annotation + seqIndentStyle := yaml.DeriveSeqIndentStyle(originalYAML) + if seqIndentStyle != "" { + r.SetAnnotations[kioutil.SeqIndentAnnotation] = seqIndentStyle + } + } + } + var keys []string + for k := range r.SetAnnotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + _, err = n.Pipe(yaml.SetAnnotation(k, r.SetAnnotations[k])) + if err != nil { + return nil, errors.Wrap(err) + } + } + return n, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_writer.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_writer.go new file mode 100644 index 000000000..ec4cafe30 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_writer.go @@ -0,0 +1,198 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package kio + +import ( + "encoding/json" + "io" + "path/filepath" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// ByteWriter writes ResourceNodes to bytes. Generally YAML encoding will be used but in the special +// case of writing a single, bare yaml.RNode that has a kioutil.PathAnnotation indicating that the +// target is a JSON file JSON encoding is used. See shouldJSONEncodeSingleBareNode below for more +// information. +type ByteWriter struct { + // Writer is where ResourceNodes are encoded. + Writer io.Writer + + // KeepReaderAnnotations if set will keep the Reader specific annotations when writing + // the Resources, otherwise they will be cleared. + KeepReaderAnnotations bool + + // ClearAnnotations is a list of annotations to clear when writing the Resources. + ClearAnnotations []string + + // Style is a style that is set on the Resource Node Document. + Style yaml.Style + + // FunctionConfig is the function config for an ResourceList. If non-nil + // wrap the results in an ResourceList. + FunctionConfig *yaml.RNode + + Results *yaml.RNode + + // WrappingKind if set will cause ByteWriter to wrap the Resources in + // an 'items' field in this kind. e.g. if WrappingKind is 'List', + // ByteWriter will wrap the Resources in a List .items field. + WrappingKind string + + // WrappingAPIVersion is the apiVersion for WrappingKind + WrappingAPIVersion string + + // Sort if set, will cause ByteWriter to sort the nodes before writing them. + Sort bool +} + +var _ Writer = ByteWriter{} + +func (w ByteWriter) Write(inputNodes []*yaml.RNode) error { + // Copy the nodes to prevent writer from mutating the original nodes. + nodes := copyRNodes(inputNodes) + if w.Sort { + if err := kioutil.SortNodes(nodes); err != nil { + return errors.Wrap(err) + } + } + + // Even though we use the this value further down we must check this before removing annotations + jsonEncodeSingleBareNode := w.shouldJSONEncodeSingleBareNode(nodes) + + // store seqindent annotation value for each node in order to set the encoder indentation + var seqIndentsForNodes []string + for i := range nodes { + seqIndentsForNodes = append(seqIndentsForNodes, nodes[i].GetAnnotations()[kioutil.SeqIndentAnnotation]) + } + + for i := range nodes { + // clean resources by removing annotations set by the Reader + if !w.KeepReaderAnnotations { + _, err := nodes[i].Pipe(yaml.ClearAnnotation(kioutil.IndexAnnotation)) + if err != nil { + return errors.Wrap(err) + } + _, err = nodes[i].Pipe(yaml.ClearAnnotation(kioutil.LegacyIndexAnnotation)) + if err != nil { + return errors.Wrap(err) + } + + _, err = nodes[i].Pipe(yaml.ClearAnnotation(kioutil.SeqIndentAnnotation)) + if err != nil { + return errors.Wrap(err) + } + } + for _, a := range w.ClearAnnotations { + _, err := nodes[i].Pipe(yaml.ClearAnnotation(a)) + if err != nil { + return errors.Wrap(err) + } + } + + if err := yaml.ClearEmptyAnnotations(nodes[i]); err != nil { + return err + } + + if w.Style != 0 { + nodes[i].YNode().Style = w.Style + } + } + + if jsonEncodeSingleBareNode { + encoder := json.NewEncoder(w.Writer) + encoder.SetIndent("", " ") + return errors.Wrap(encoder.Encode(nodes[0])) + } + + encoder := yaml.NewEncoder(w.Writer) + defer encoder.Close() + // don't wrap the elements + if w.WrappingKind == "" { + for i := range nodes { + if seqIndentsForNodes[i] == string(yaml.WideSequenceStyle) { + encoder.DefaultSeqIndent() + } else { + encoder.CompactSeqIndent() + } + if err := encoder.Encode(upWrapBareSequenceNode(nodes[i].Document())); err != nil { + return errors.Wrap(err) + } + } + return nil + } + // wrap the elements in a list + items := &yaml.Node{Kind: yaml.SequenceNode} + list := &yaml.Node{ + Kind: yaml.MappingNode, + Style: w.Style, + Content: []*yaml.Node{ + {Kind: yaml.ScalarNode, Value: "apiVersion"}, + {Kind: yaml.ScalarNode, Value: w.WrappingAPIVersion}, + {Kind: yaml.ScalarNode, Value: "kind"}, + {Kind: yaml.ScalarNode, Value: w.WrappingKind}, + {Kind: yaml.ScalarNode, Value: "items"}, items, + }} + if w.FunctionConfig != nil { + list.Content = append(list.Content, + &yaml.Node{Kind: yaml.ScalarNode, Value: "functionConfig"}, + w.FunctionConfig.YNode()) + } + if w.Results != nil { + list.Content = append(list.Content, + &yaml.Node{Kind: yaml.ScalarNode, Value: "results"}, + w.Results.YNode()) + } + doc := &yaml.Node{ + Kind: yaml.DocumentNode, + Content: []*yaml.Node{list}} + for i := range nodes { + items.Content = append(items.Content, nodes[i].YNode()) + } + return encoder.Encode(doc) +} + +func copyRNodes(in []*yaml.RNode) []*yaml.RNode { + out := make([]*yaml.RNode, len(in)) + for i := range in { + out[i] = in[i].Copy() + } + return out +} + +// shouldJSONEncodeSingleBareNode determines if nodes contain a single node that should not be +// wrapped and has a JSON file extension, which in turn means that the node should be JSON encoded. +// Note 1: this must be checked before any annotations to avoid losing information about the target +// filename extension. +// Note 2: JSON encoding should only be used for single, unwrapped nodes because multiple unwrapped +// nodes cannot be represented in JSON (no multi doc support). Furthermore, the typical use +// cases for wrapping nodes would likely not include later writing the whole wrapper to a +// .json file, i.e. there is no point risking any edge case information loss e.g. comments +// disappearing, that could come from JSON encoding the whole wrapper just to ensure that +// one (or all nodes) can be read as JSON. +func (w ByteWriter) shouldJSONEncodeSingleBareNode(nodes []*yaml.RNode) bool { + if w.WrappingKind == "" && len(nodes) == 1 { + if path, _, _ := kioutil.GetFileAnnotations(nodes[0]); path != "" { + filename := filepath.Base(path) + for _, glob := range JSONMatch { + if match, _ := filepath.Match(glob, filename); match { + return true + } + } + } + } + return false +} + +// upWrapBareSequenceNode unwraps the bare sequence nodes wrapped by yaml.BareSeqNodeWrappingKey +func upWrapBareSequenceNode(node *yaml.Node) *yaml.Node { + rNode := yaml.NewRNode(node) + seqNode, err := rNode.Pipe(yaml.Lookup(yaml.BareSeqNodeWrappingKey)) + if err == nil && !seqNode.IsNilOrEmpty() { + return seqNode.YNode() + } + return node +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go new file mode 100644 index 000000000..9c11a1463 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package kio contains libraries for reading and writing collections of Resources. +// +// Reading Resources +// +// Resources are Read using a kio.Reader function. Examples: +// [kio.LocalPackageReader{}, kio.ByteReader{}] +// +// Resources read using a LocalPackageReader will have annotations applied so they can be +// written back to the files they were read from. +// +// Modifying Resources +// +// Resources are modified using a kio.Filter. The kio.Filter accepts a collection of +// Resources as input, and returns a new collection as output. +// It is recommended to use the yaml package for manipulating individual Resources in +// the collection. +// +// Writing Resources +// +// Resources are Read using a kio.Reader function. Examples: +// [kio.LocalPackageWriter{}, kio.ByteWriter{}] +// +// ReadWriters +// +// It is preferred to use a ReadWriter when reading and writing from / to the same source. +// +// Building Pipelines +// +// The preferred way to transforms a collection of Resources is to use kio.Pipeline to Read, +// Modify and Write the collection of Resources. Pipeline will automatically sequentially +// invoke the Read, Modify, Write steps, returning and error immediately on any failure. +package kio diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/filters.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/filters.go new file mode 100644 index 000000000..8d7968b3c --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/filters.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Filters are the list of known filters for unmarshalling a filter into a concrete +// implementation. +var Filters = map[string]func() kio.Filter{ + "FileSetter": func() kio.Filter { return &FileSetter{} }, + "FormatFilter": func() kio.Filter { return &FormatFilter{} }, + "GrepFilter": func() kio.Filter { return GrepFilter{} }, + "MatchModifier": func() kio.Filter { return &MatchModifyFilter{} }, + "Modifier": func() kio.Filter { return &Modifier{} }, +} + +// filter wraps a kio.filter so that it can be unmarshalled from yaml. +type KFilter struct { + kio.Filter +} + +func (t KFilter) MarshalYAML() (interface{}, error) { + return t.Filter, nil +} + +func (t *KFilter) UnmarshalYAML(unmarshal func(interface{}) error) error { + i := map[string]interface{}{} + if err := unmarshal(i); err != nil { + return err + } + meta := &yaml.ResourceMeta{} + if err := unmarshal(meta); err != nil { + return err + } + filter, found := Filters[meta.Kind] + if !found { + var knownFilters []string + for k := range Filters { + knownFilters = append(knownFilters, k) + } + sort.Strings(knownFilters) + return fmt.Errorf("unsupported filter Kind %v: may be one of: [%s]", + meta, strings.Join(knownFilters, ",")) + } + t.Filter = filter() + + return unmarshal(t.Filter) +} + +// Modifier modifies the input Resources by invoking the provided pipeline. +// Modifier will return any Resources for which the pipeline does not return an error. +type Modifier struct { + Kind string `yaml:"kind,omitempty"` + + Filters yaml.YFilters `yaml:"pipeline,omitempty"` +} + +var _ kio.Filter = &Modifier{} + +func (f Modifier) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { + for i := range input { + if _, err := input[i].Pipe(f.Filters.Filters()...); err != nil { + return nil, err + } + } + return input, nil +} + +type MatchModifyFilter struct { + Kind string `yaml:"kind,omitempty"` + + MatchFilters []yaml.YFilters `yaml:"match,omitempty"` + + ModifyFilters yaml.YFilters `yaml:"modify,omitempty"` +} + +var _ kio.Filter = &MatchModifyFilter{} + +func (f MatchModifyFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { + var matches = input + var err error + for _, filter := range f.MatchFilters { + matches, err = MatchFilter{Filters: filter}.Filter(matches) + if err != nil { + return nil, err + } + } + _, err = Modifier{Filters: f.ModifyFilters}.Filter(matches) + if err != nil { + return nil, err + } + return input, nil +} + +type MatchFilter struct { + Kind string `yaml:"kind,omitempty"` + + Filters yaml.YFilters `yaml:"pipeline,omitempty"` +} + +var _ kio.Filter = &MatchFilter{} + +func (f MatchFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { + var output []*yaml.RNode + for i := range input { + if v, err := input[i].Pipe(f.Filters.Filters()...); err != nil { + return nil, err + } else if v == nil { + continue + } + output = append(output, input[i]) + } + return output, nil +} + +type FilenameFmtVerb string + +const ( + // KindFmt substitutes kind + KindFmt FilenameFmtVerb = "%k" + + // NameFmt substitutes metadata.name + NameFmt FilenameFmtVerb = "%n" + + // NamespaceFmt substitutes metdata.namespace + NamespaceFmt FilenameFmtVerb = "%s" +) + +// FileSetter sets the file name and mode annotations on Resources. +type FileSetter struct { + Kind string `yaml:"kind,omitempty"` + + // FilenamePattern is the pattern to use for generating filenames. FilenameFmtVerb + // FielnameFmtVerbs may be specified to substitute Resource metadata into the filename. + FilenamePattern string `yaml:"filenamePattern,omitempty"` + + // Mode is the filemode to write. + Mode string `yaml:"mode,omitempty"` + + // Override will override the existing filename if it is set on the pattern. + // Otherwise the existing filename is kept. + Override bool `yaml:"override,omitempty"` +} + +var _ kio.Filter = &FileSetter{} + +const DefaultFilenamePattern = "%n_%k.yaml" + +func (f *FileSetter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { + if f.Mode == "" { + f.Mode = fmt.Sprintf("%d", 0600) + } + if f.FilenamePattern == "" { + f.FilenamePattern = DefaultFilenamePattern + } + + resources := map[string][]*yaml.RNode{} + for i := range input { + if err := kioutil.CopyLegacyAnnotations(input[i]); err != nil { + return nil, err + } + + m, err := input[i].GetMeta() + if err != nil { + return nil, err + } + file := f.FilenamePattern + file = strings.ReplaceAll(file, string(KindFmt), strings.ToLower(m.Kind)) + file = strings.ReplaceAll(file, string(NameFmt), strings.ToLower(m.Name)) + file = strings.ReplaceAll(file, string(NamespaceFmt), strings.ToLower(m.Namespace)) + + if _, found := m.Annotations[kioutil.PathAnnotation]; !found || f.Override { + if _, err := input[i].Pipe(yaml.SetAnnotation(kioutil.PathAnnotation, file)); err != nil { + return nil, err + } + if _, err := input[i].Pipe(yaml.SetAnnotation(kioutil.LegacyPathAnnotation, file)); err != nil { + return nil, err + } + } + resources[file] = append(resources[file], input[i]) + } + + var output []*yaml.RNode + for i := range resources { + if err := kioutil.SortNodes(resources[i]); err != nil { + return nil, err + } + for j := range resources[i] { + if _, err := resources[i][j].Pipe( + yaml.SetAnnotation(kioutil.IndexAnnotation, fmt.Sprintf("%d", j))); err != nil { + return nil, err + } + if _, err := resources[i][j].Pipe( + yaml.SetAnnotation(kioutil.LegacyIndexAnnotation, fmt.Sprintf("%d", j))); err != nil { + return nil, err + } + output = append(output, resources[i][j]) + } + } + return output, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/fmtr.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/fmtr.go new file mode 100644 index 000000000..7f2acbda4 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/fmtr.go @@ -0,0 +1,314 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package yamlfmt contains libraries for formatting yaml files containing +// Kubernetes Resource configuration. +// +// Yaml files are formatted by: +// - Sorting fields and map values +// - Sorting unordered lists for whitelisted types +// - Applying a canonical yaml Style +// +// Fields are ordered using a relative ordering applied to commonly +// encountered Resource fields. All Resources, including non-builtin +// Resources such as CRDs, share the same field precedence. +// +// Fields that do not appear in the explicit ordering are ordered +// lexicographically. +// +// A subset of well known known unordered lists are sorted by element field +// values. +package filters + +import ( + "bytes" + "fmt" + "io" + "sort" + + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type FormattingStrategy = string + +const ( + // NoFmtAnnotation determines if the resource should be formatted. + FmtAnnotation string = "config.kubernetes.io/formatting" + + // FmtStrategyStandard means the resource will be formatted according + // to the default rules. + FmtStrategyStandard FormattingStrategy = "standard" + + // FmtStrategyNone means the resource will not be formatted. + FmtStrategyNone FormattingStrategy = "none" +) + +// FormatInput returns the formatted input. +func FormatInput(input io.Reader) (*bytes.Buffer, error) { + buff := &bytes.Buffer{} + err := kio.Pipeline{ + Inputs: []kio.Reader{&kio.ByteReader{Reader: input}}, + Filters: []kio.Filter{FormatFilter{}}, + Outputs: []kio.Writer{kio.ByteWriter{Writer: buff}}, + }.Execute() + + return buff, err +} + +// FormatFileOrDirectory reads the file or directory and formats each file's +// contents by writing it back to the file. +func FormatFileOrDirectory(path string) error { + return kio.Pipeline{ + Inputs: []kio.Reader{kio.LocalPackageReader{ + PackagePath: path, + }}, + Filters: []kio.Filter{FormatFilter{}}, + Outputs: []kio.Writer{kio.LocalPackageWriter{PackagePath: path}}, + }.Execute() +} + +type FormatFilter struct { + Process func(n *yaml.Node) error + UseSchema bool +} + +var _ kio.Filter = FormatFilter{} + +func (f FormatFilter) Filter(slice []*yaml.RNode) ([]*yaml.RNode, error) { + for i := range slice { + fmtStrategy, err := getFormattingStrategy(slice[i]) + if err != nil { + return nil, err + } + + if fmtStrategy == FmtStrategyNone { + continue + } + + kindNode, err := slice[i].Pipe(yaml.Get("kind")) + if err != nil { + return nil, err + } + if kindNode == nil { + continue + } + apiVersionNode, err := slice[i].Pipe(yaml.Get("apiVersion")) + if err != nil { + return nil, err + } + if apiVersionNode == nil { + continue + } + kind, apiVersion := kindNode.YNode().Value, apiVersionNode.YNode().Value + var s *openapi.ResourceSchema + if f.UseSchema { + s = openapi.SchemaForResourceType(yaml.TypeMeta{APIVersion: apiVersion, Kind: kind}) + } else { + s = nil + } + err = (&formatter{apiVersion: apiVersion, kind: kind, process: f.Process}). + fmtNode(slice[i].YNode(), "", s) + if err != nil { + return nil, err + } + } + return slice, nil +} + +// getFormattingStrategy looks for the formatting annotation to determine +// which strategy should be used for formatting. The default is standard +// if no annotation is found. +func getFormattingStrategy(node *yaml.RNode) (FormattingStrategy, error) { + value, err := node.Pipe(yaml.GetAnnotation(FmtAnnotation)) + if err != nil || value == nil { + return FmtStrategyStandard, err + } + + fmtStrategy := value.YNode().Value + + switch fmtStrategy { + case FmtStrategyStandard: + return FmtStrategyStandard, nil + case FmtStrategyNone: + return FmtStrategyNone, nil + default: + return "", fmt.Errorf( + "formatting annotation has illegal value %s", fmtStrategy) + } +} + +type formatter struct { + apiVersion string + kind string + process func(n *yaml.Node) error +} + +// fmtNode recursively formats the Document Contents. +// See: https://godoc.org/gopkg.in/yaml.v3#Node +func (f *formatter) fmtNode(n *yaml.Node, path string, schema *openapi.ResourceSchema) error { + if n.Kind == yaml.ScalarNode && schema != nil && schema.Schema != nil { + // ensure values that are interpreted as non-string values (e.g. "true") + // are properly quoted + yaml.FormatNonStringStyle(n, *schema.Schema) + } + + // sort the order of mapping fields + if n.Kind == yaml.MappingNode { + sort.Sort(sortedMapContents(*n)) + } + + // sort the order of sequence elements if it is whitelisted + if n.Kind == yaml.SequenceNode { + if yaml.WhitelistedListSortKinds.Has(f.kind) && + yaml.WhitelistedListSortApis.Has(f.apiVersion) { + if sortField, found := yaml.WhitelistedListSortFields[path]; found { + sort.Sort(sortedSeqContents{Node: *n, sortField: sortField}) + } + } + } + + // format the Content + for i := range n.Content { + // MappingNode are structured as having their fields as Content, + // with the field-key and field-value alternating. e.g. Even elements + // are the keys and odd elements are the values + isFieldKey := n.Kind == yaml.MappingNode && i%2 == 0 + isFieldValue := n.Kind == yaml.MappingNode && i%2 == 1 + isElement := n.Kind == yaml.SequenceNode + + // run the process callback on the node if it has been set + // don't process keys: their format should be fixed + if f.process != nil && !isFieldKey { + if err := f.process(n.Content[i]); err != nil { + return err + } + } + + // get the schema for this Node + p := path + var s *openapi.ResourceSchema + switch { + case isFieldValue: + // if the node is a field, lookup the schema using the field name + p = fmt.Sprintf("%s.%s", path, n.Content[i-1].Value) + if schema != nil { + s = schema.Field(n.Content[i-1].Value) + } + case isElement: + // if the node is a list element, lookup the schema for the array items + if schema != nil { + s = schema.Elements() + } + } + // format the node using the schema + err := f.fmtNode(n.Content[i], p, s) + if err != nil { + return err + } + } + return nil +} + +// sortedMapContents sorts the Contents field of a MappingNode by the field names using a statically +// defined field precedence, and falling back on lexicographical sorting +type sortedMapContents yaml.Node + +func (s sortedMapContents) Len() int { + return len(s.Content) / 2 +} +func (s sortedMapContents) Swap(i, j int) { + // yaml MappingNode Contents are a list of field names followed by + // field values, rather than a list of field pairs. + // increment. + // + // e.g. ["field1Name", "field1Value", "field2Name", "field2Value"] + iFieldNameIndex := i * 2 + jFieldNameIndex := j * 2 + iFieldValueIndex := iFieldNameIndex + 1 + jFieldValueIndex := jFieldNameIndex + 1 + + // swap field names + s.Content[iFieldNameIndex], s.Content[jFieldNameIndex] = + s.Content[jFieldNameIndex], s.Content[iFieldNameIndex] + + // swap field values + s.Content[iFieldValueIndex], s.Content[jFieldValueIndex] = s. + Content[jFieldValueIndex], s.Content[iFieldValueIndex] +} + +func (s sortedMapContents) Less(i, j int) bool { + iFieldNameIndex := i * 2 + jFieldNameIndex := j * 2 + iFieldName := s.Content[iFieldNameIndex].Value + jFieldName := s.Content[jFieldNameIndex].Value + + // order by their precedence values looked up from the index + iOrder, foundI := yaml.FieldOrder[iFieldName] + jOrder, foundJ := yaml.FieldOrder[jFieldName] + if foundI && foundJ { + return iOrder < jOrder + } + + // known fields come before unknown fields + if foundI { + return true + } + if foundJ { + return false + } + + // neither field is known, sort them lexicographically + return iFieldName < jFieldName +} + +// sortedSeqContents sorts the Contents field of a SequenceNode by the value of +// the elements sortField. +// e.g. it will sort spec.template.spec.containers by the value of the container `name` field +type sortedSeqContents struct { + yaml.Node + sortField string +} + +func (s sortedSeqContents) Len() int { + return len(s.Content) +} +func (s sortedSeqContents) Swap(i, j int) { + s.Content[i], s.Content[j] = s.Content[j], s.Content[i] +} +func (s sortedSeqContents) Less(i, j int) bool { + // primitive lists -- sort by the element's primitive values + if s.sortField == "" { + iValue := s.Content[i].Value + jValue := s.Content[j].Value + return iValue < jValue + } + + // map lists -- sort by the element's sortField values + var iValue, jValue string + for a := range s.Content[i].Content { + if a%2 != 0 { + continue // not a fieldNameIndex + } + // locate the index of the sortField field + if s.Content[i].Content[a].Value == s.sortField { + // a is the yaml node for the field key, a+1 is the node for the field value + iValue = s.Content[i].Content[a+1].Value + } + } + for a := range s.Content[j].Content { + if a%2 != 0 { + continue // not a fieldNameIndex + } + + // locate the index of the sortField field + if s.Content[j].Content[a].Value == s.sortField { + // a is the yaml node for the field key, a+1 is the node for the field value + jValue = s.Content[j].Content[a+1].Value + } + } + + // compare the field values + return iValue < jValue +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/grep.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/grep.go new file mode 100644 index 000000000..2eb8a8886 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/grep.go @@ -0,0 +1,117 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "regexp" + "strings" + + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type GrepType int + +const ( + Regexp GrepType = 1 << iota + GreaterThanEq + GreaterThan + LessThan + LessThanEq +) + +// GrepFilter filters RNodes with a matching field +type GrepFilter struct { + Path []string `yaml:"path,omitempty"` + Value string `yaml:"value,omitempty"` + MatchType GrepType `yaml:"matchType,omitempty"` + InvertMatch bool `yaml:"invertMatch,omitempty"` + Compare func(a, b string) (int, error) +} + +var _ kio.Filter = GrepFilter{} + +func (f GrepFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { + // compile the regular expression 1 time if we are matching using regex + var reg *regexp.Regexp + var err error + if f.MatchType == Regexp || f.MatchType == 0 { + reg, err = regexp.Compile(f.Value) + if err != nil { + return nil, err + } + } + + var output kio.ResourceNodeSlice + for i := range input { + node := input[i] + val, err := node.Pipe(&yaml.PathMatcher{Path: f.Path}) + if err != nil { + return nil, err + } + if val == nil || len(val.Content()) == 0 { + if f.InvertMatch { + output = append(output, input[i]) + } + continue + } + found := false + err = val.VisitElements(func(elem *yaml.RNode) error { + // get the value + var str string + if f.MatchType == Regexp { + style := elem.YNode().Style + defer func() { elem.YNode().Style = style }() + elem.YNode().Style = yaml.FlowStyle + str, err = elem.String() + if err != nil { + return err + } + str = strings.TrimSpace(strings.ReplaceAll(str, `"`, "")) + } else { + // if not regexp, then it needs to parse into a quantity and comments will + // break that + str = elem.YNode().Value + if str == "" { + return nil + } + } + + if f.MatchType == Regexp || f.MatchType == 0 { + if reg.MatchString(str) { + found = true + } + return nil + } + + comp, err := f.Compare(str, f.Value) + if err != nil { + return err + } + + if f.MatchType == GreaterThan && comp > 0 { + found = true + } + if f.MatchType == GreaterThanEq && comp >= 0 { + found = true + } + if f.MatchType == LessThan && comp < 0 { + found = true + } + if f.MatchType == LessThanEq && comp <= 0 { + found = true + } + return nil + }) + if err != nil { + return nil, err + } + if found == f.InvertMatch { + continue + } + + output = append(output, input[i]) + } + return output, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/local.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/local.go new file mode 100644 index 000000000..bdac1a28f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/local.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +const LocalConfigAnnotation = "config.kubernetes.io/local-config" + +// IsLocalConfig filters Resources using the config.kubernetes.io/local-config annotation +type IsLocalConfig struct { + // IncludeLocalConfig will include local-config if set to true + IncludeLocalConfig bool `yaml:"includeLocalConfig,omitempty"` + + // ExcludeNonLocalConfig will exclude non local-config if set to true + ExcludeNonLocalConfig bool `yaml:"excludeNonLocalConfig,omitempty"` +} + +// Filter implements kio.Filter +func (c *IsLocalConfig) Filter(inputs []*yaml.RNode) ([]*yaml.RNode, error) { + var out []*yaml.RNode + for i := range inputs { + meta, err := inputs[i].GetMeta() + if err != nil { + return nil, err + } + _, local := meta.Annotations[LocalConfigAnnotation] + + if local && c.IncludeLocalConfig { + out = append(out, inputs[i]) + } else if !local && !c.ExcludeNonLocalConfig { + out = append(out, inputs[i]) + } + } + return out, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge.go new file mode 100644 index 000000000..5159052cc --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge.go @@ -0,0 +1,86 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package merge contains libraries for merging Resources and Patches +package filters + +import ( + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/merge2" +) + +// MergeFilter merges Resources with the Group/Version/Kind/Namespace/Name together using +// a 2-way merge strategy. +// +// - Fields set to null in the source will be cleared from the destination +// - Fields with matching keys will be merged recursively +// - Lists with an associative key (e.g. name) will have their elements merged using the key +// - List without an associative key will have the dest list replaced by the source list +type MergeFilter struct { + Reverse bool +} + +var _ kio.Filter = MergeFilter{} + +type mergeKey struct { + apiVersion string + kind string + namespace string + name string +} + +// MergeFilter implements kio.Filter by merging Resources with the same G/V/K/NS/N +func (c MergeFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { + // invert the merge precedence + if c.Reverse { + for i, j := 0, len(input)-1; i < j; i, j = i+1, j-1 { + input[i], input[j] = input[j], input[i] + } + } + + // index the Resources by G/V/K/NS/N + index := map[mergeKey][]*yaml.RNode{} + // retain the original ordering + var order []mergeKey + for i := range input { + meta, err := input[i].GetMeta() + if err != nil { + return nil, err + } + key := mergeKey{ + apiVersion: meta.APIVersion, + kind: meta.Kind, + namespace: meta.Namespace, + name: meta.Name, + } + if _, found := index[key]; !found { + order = append(order, key) + } + index[key] = append(index[key], input[i]) + } + + // merge each of the G/V/K/NS/N lists + var output []*yaml.RNode + var err error + for _, k := range order { + var merged *yaml.RNode + resources := index[k] + for i := range resources { + patch := resources[i] + if merged == nil { + // first resources, don't merge it + merged = resources[i] + } else { + merged, err = merge2.Merge(patch, merged, yaml.MergeOptions{ + ListIncreaseDirection: yaml.MergeOptionsListPrepend, + }) + if err != nil { + return nil, err + } + } + } + output = append(output, merged) + } + return output, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge3.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge3.go new file mode 100644 index 000000000..de8bf6f67 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge3.go @@ -0,0 +1,317 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "fmt" + + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/merge3" +) + +const ( + mergeSourceAnnotation = "config.kubernetes.io/merge-source" + mergeSourceOriginal = "original" + mergeSourceUpdated = "updated" + mergeSourceDest = "dest" +) + +// ResourceMatcher interface is used to match two resources based on IsSameResource implementation +// This is the way to group same logical resources in upstream, local and origin for merge +// The default way to group them is using GVKNN similar to how kubernetes server identifies resources +// Users of this library might have their own interpretation of grouping similar resources +// for e.g. if consumer adds a name-prefix to local resource, it should not be treated as new resource +// for updates etc. +// Hence, the callers of this library may pass different implementation for IsSameResource +type ResourceMatcher interface { + IsSameResource(node1, node2 *yaml.RNode) bool +} + +// ResourceMergeStrategy is the return type from the Handle function in the +// ResourceHandler interface. It determines which version of a resource should +// be included in the output (if any). +type ResourceMergeStrategy int + +const ( + // Merge means the output to dest should be the 3-way merge of original, + // updated and dest. + Merge ResourceMergeStrategy = iota + // KeepDest means the version of the resource in dest should be the output. + KeepDest + // KeepUpdated means the version of the resource in updated should be the + // output. + KeepUpdated + // KeepOriginal means the version of the resource in original should be the + // output. + KeepOriginal + // Skip means the resource should not be included in the output. + Skip +) + +// ResourceHandler interface is used to determine what should be done for a +// resource once the versions in original, updated and dest has been +// identified based on the ResourceMatcher. This allows users to customize +// what should be the result in dest if a resource has been deleted from +// upstream. +type ResourceHandler interface { + Handle(original, updated, dest *yaml.RNode) (ResourceMergeStrategy, error) +} + +// Merge3 performs a 3-way merge on the original, updated, and destination packages. +type Merge3 struct { + OriginalPath string + UpdatedPath string + DestPath string + MatchFilesGlob []string + Matcher ResourceMatcher + Handler ResourceHandler +} + +func (m Merge3) Merge() error { + // Read the destination package. The ReadWriter will take take of deleting files + // for removed resources. + var inputs []kio.Reader + dest := &kio.LocalPackageReadWriter{ + PackagePath: m.DestPath, + MatchFilesGlob: m.MatchFilesGlob, + SetAnnotations: map[string]string{mergeSourceAnnotation: mergeSourceDest}, + } + inputs = append(inputs, dest) + + // Read the original package + inputs = append(inputs, kio.LocalPackageReader{ + PackagePath: m.OriginalPath, + MatchFilesGlob: m.MatchFilesGlob, + SetAnnotations: map[string]string{mergeSourceAnnotation: mergeSourceOriginal}, + }) + + // Read the updated package + inputs = append(inputs, kio.LocalPackageReader{ + PackagePath: m.UpdatedPath, + MatchFilesGlob: m.MatchFilesGlob, + SetAnnotations: map[string]string{mergeSourceAnnotation: mergeSourceUpdated}, + }) + + return kio.Pipeline{ + Inputs: inputs, + Filters: []kio.Filter{m}, + Outputs: []kio.Writer{dest}, + }.Execute() +} + +// Filter combines Resources with the same GVK + N + NS into tuples, and then merges them +func (m Merge3) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + // index the nodes by their identity + matcher := m.Matcher + if matcher == nil { + matcher = &DefaultGVKNNMatcher{MergeOnPath: true} + } + handler := m.Handler + if handler == nil { + handler = &DefaultResourceHandler{} + } + + tl := tuples{matcher: matcher} + for i := range nodes { + if err := tl.add(nodes[i]); err != nil { + return nil, err + } + } + + // iterate over the inputs, merging as needed + var output []*yaml.RNode + for i := range tl.list { + t := tl.list[i] + strategy, err := handler.Handle(t.original, t.updated, t.dest) + if err != nil { + return nil, err + } + switch strategy { + case Merge: + node, err := t.merge() + if err != nil { + return nil, err + } + if node != nil { + output = append(output, node) + } + case KeepDest: + output = append(output, t.dest) + case KeepUpdated: + output = append(output, t.updated) + case KeepOriginal: + output = append(output, t.original) + case Skip: + // do nothing + } + } + return output, nil +} + +// tuples combines nodes with the same GVK + N + NS +type tuples struct { + list []*tuple + + // matcher matches the resources for merge + matcher ResourceMatcher +} + +// DefaultGVKNNMatcher holds the default matching of resources implementation based on +// Group, Version, Kind, Name and Namespace of the resource +type DefaultGVKNNMatcher struct { + // MergeOnPath will use the relative filepath as part of the merge key. + // This may be necessary if the directory contains multiple copies of + // the same resource, or resources patches. + MergeOnPath bool +} + +// IsSameResource returns true if metadata of node1 and metadata of node2 belongs to same logical resource +func (dm *DefaultGVKNNMatcher) IsSameResource(node1, node2 *yaml.RNode) bool { + if node1 == nil || node2 == nil { + return false + } + if err := kioutil.CopyLegacyAnnotations(node1); err != nil { + return false + } + if err := kioutil.CopyLegacyAnnotations(node2); err != nil { + return false + } + + meta1, err := node1.GetMeta() + if err != nil { + return false + } + + meta2, err := node2.GetMeta() + if err != nil { + return false + } + + if meta1.Name != meta2.Name { + return false + } + if meta1.Namespace != meta2.Namespace { + return false + } + if meta1.APIVersion != meta2.APIVersion { + return false + } + if meta1.Kind != meta2.Kind { + return false + } + if dm.MergeOnPath { + // directories may contain multiple copies of a resource with the same + // name, namespace, apiVersion and kind -- e.g. kustomize patches, or + // multiple environments + // mergeOnPath configures the merge logic to use the path as part of the + // resource key + if meta1.Annotations[kioutil.PathAnnotation] != meta2.Annotations[kioutil.PathAnnotation] { + return false + } + } + return true +} + +// add adds a node to the list, combining it with an existing matching Resource if found +func (ts *tuples) add(node *yaml.RNode) error { + for i := range ts.list { + t := ts.list[i] + if ts.matcher.IsSameResource(addedNode(t), node) { + return t.add(node) + } + } + t := &tuple{} + if err := t.add(node); err != nil { + return err + } + ts.list = append(ts.list, t) + return nil +} + +// addedNode returns one on the existing added nodes in the tuple +func addedNode(t *tuple) *yaml.RNode { + if t.updated != nil { + return t.updated + } + if t.original != nil { + return t.original + } + return t.dest +} + +// tuple wraps an original, updated, and dest tuple for a given Resource +type tuple struct { + original *yaml.RNode + updated *yaml.RNode + dest *yaml.RNode +} + +// add sets the corresponding tuple field for the node +func (t *tuple) add(node *yaml.RNode) error { + meta, err := node.GetMeta() + if err != nil { + return err + } + switch meta.Annotations[mergeSourceAnnotation] { + case mergeSourceDest: + if t.dest != nil { + return duplicateError("local", meta.Annotations[kioutil.PathAnnotation]) + } + t.dest = node + case mergeSourceOriginal: + if t.original != nil { + return duplicateError("original upstream", meta.Annotations[kioutil.PathAnnotation]) + } + t.original = node + case mergeSourceUpdated: + if t.updated != nil { + return duplicateError("updated upstream", meta.Annotations[kioutil.PathAnnotation]) + } + t.updated = node + default: + return fmt.Errorf("no source annotation for Resource") + } + return nil +} + +// merge performs a 3-way merge on the tuple +func (t *tuple) merge() (*yaml.RNode, error) { + return merge3.Merge(t.dest, t.original, t.updated) +} + +// duplicateError returns duplicate resources error +func duplicateError(source, filePath string) error { + return fmt.Errorf(`found duplicate %q resources in file %q, please refer to "update" documentation for the fix`, source, filePath) +} + +// DefaultResourceHandler is the default implementation of the ResourceHandler +// interface. It uses the following rules: +// * Keep dest if resource only exists in dest. +// * Keep updated if resource added in updated. +// * Delete dest if updated has been deleted. +// * Don't add the resource back if removed from dest. +// * Otherwise merge. +type DefaultResourceHandler struct{} + +func (*DefaultResourceHandler) Handle(original, updated, dest *yaml.RNode) (ResourceMergeStrategy, error) { + switch { + case original == nil && updated == nil && dest != nil: + // added locally -- keep dest + return KeepDest, nil + case updated != nil && dest == nil: + // added in the update -- add update + return KeepUpdated, nil + case original != nil && updated == nil: + // deleted in the update + return Skip, nil + case original != nil && dest == nil: + // deleted locally + return Skip, nil + default: + // dest and updated are non-nil -- merge them + return Merge, nil + } +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/modify.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/modify.go new file mode 100644 index 000000000..b1090302a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/modify.go @@ -0,0 +1,4 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filters diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/stripcomments.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/stripcomments.go new file mode 100644 index 000000000..8e9ecb3f2 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/stripcomments.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type StripCommentsFilter struct{} + +var _ kio.Filter = StripCommentsFilter{} + +func (f StripCommentsFilter) Filter(slice []*yaml.RNode) ([]*yaml.RNode, error) { + for i := range slice { + stripComments(slice[i].YNode()) + } + return slice, nil +} + +func stripComments(node *yaml.Node) { + if node == nil { + return + } + node.HeadComment = "" + node.LineComment = "" + node.FootComment = "" + for i := range node.Content { + stripComments(node.Content[i]) + } +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go new file mode 100644 index 000000000..0ba3d8382 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go @@ -0,0 +1,105 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package kio + +import ( + "errors" + "os" + "path/filepath" + "strings" + + gitignore "github.com/monochromegane/go-gitignore" + "sigs.k8s.io/kustomize/kyaml/ext" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +// ignoreFilesMatcher handles `.krmignore` files, which allows for ignoring +// files or folders in a package. The format of this file is a subset of the +// gitignore format, with recursive patterns (like a/**/c) not supported. If a +// file or folder matches any of the patterns in the .krmignore file for the +// package, it will be excluded. +// +// It works as follows: +// +// * It will look for .krmignore file in the top folder and on the top level +// of any subpackages. Subpackages are defined by the presence of a Krmfile +// in the folder. +// * `.krmignore` files only cover files and folders for the package in which +// it is defined. So ignore patterns defined in a parent package does not +// affect which files are ignored from a subpackage. +// * An ignore pattern can not ignore a subpackage. So even if the parent +// package contains a pattern that ignores the directory foo, if foo is a +// subpackage, it will still be included if the IncludeSubpackages property +// is set to true +type ignoreFilesMatcher struct { + matchers []matcher + fs filesys.FileSystemOrOnDisk +} + +// readIgnoreFile checks whether there is a .krmignore file in the path, and +// if it is, reads it in and turns it into a matcher. If we can't find a file, +// we just add a matcher that match nothing. +func (i *ignoreFilesMatcher) readIgnoreFile(path string) error { + i.verifyPath(path) + f, err := i.fs.Open(filepath.Join(path, ext.IgnoreFileName())) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + i.matchers = append(i.matchers, matcher{ + matcher: gitignore.DummyIgnoreMatcher(false), + basePath: path, + }) + return nil + } + return err + } + defer f.Close() + + i.matchers = append(i.matchers, matcher{ + matcher: gitignore.NewGitIgnoreFromReader(path, f), + basePath: path, + }) + return nil +} + +// verifyPath checks whether the top matcher on the stack +// is correct for the provided filepath. Matchers are removed once +// we encounter a filepath that is not a subpath of the basepath for +// the matcher. +func (i *ignoreFilesMatcher) verifyPath(path string) { + for j := len(i.matchers) - 1; j >= 0; j-- { + matcher := i.matchers[j] + if strings.HasPrefix(path, matcher.basePath) || path == matcher.basePath { + i.matchers = i.matchers[:j+1] + return + } + } +} + +// matchFile checks whether the file given by the provided path matches +// any of the patterns in the .krmignore file for the package. +func (i *ignoreFilesMatcher) matchFile(path string) bool { + if len(i.matchers) == 0 { + return false + } + i.verifyPath(filepath.Dir(path)) + return i.matchers[len(i.matchers)-1].matcher.Match(path, false) +} + +// matchFile checks whether the directory given by the provided path matches +// any of the patterns in the .krmignore file for the package. +func (i *ignoreFilesMatcher) matchDir(path string) bool { + if len(i.matchers) == 0 { + return false + } + i.verifyPath(path) + return i.matchers[len(i.matchers)-1].matcher.Match(path, true) +} + +// matcher wraps the gitignore matcher and the path to the folder +// where the file was found. +type matcher struct { + matcher gitignore.IgnoreMatcher + + basePath string +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go new file mode 100644 index 000000000..9e00509eb --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go @@ -0,0 +1,447 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package kio contains low-level libraries for reading, modifying and writing +// Resource Configuration and packages. +package kio + +import ( + "fmt" + "strconv" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Reader reads ResourceNodes. Analogous to io.Reader. +type Reader interface { + Read() ([]*yaml.RNode, error) +} + +// ResourceNodeSlice is a collection of ResourceNodes. +// While ResourceNodeSlice has no inherent constraints on ordering or uniqueness, specific +// Readers, Filters or Writers may have constraints. +type ResourceNodeSlice []*yaml.RNode + +var _ Reader = ResourceNodeSlice{} + +func (o ResourceNodeSlice) Read() ([]*yaml.RNode, error) { + return o, nil +} + +// Writer writes ResourceNodes. Analogous to io.Writer. +type Writer interface { + Write([]*yaml.RNode) error +} + +// WriterFunc implements a Writer as a function. +type WriterFunc func([]*yaml.RNode) error + +func (fn WriterFunc) Write(o []*yaml.RNode) error { + return fn(o) +} + +// ReaderWriter implements both Reader and Writer interfaces +type ReaderWriter interface { + Reader + Writer +} + +// Filter modifies a collection of Resource Configuration by returning the modified slice. +// When possible, Filters should be serializable to yaml so that they can be described +// as either data or code. +// +// Analogous to http://www.linfo.org/filters.html +type Filter interface { + Filter([]*yaml.RNode) ([]*yaml.RNode, error) +} + +// TrackableFilter is an extension of Filter which is also capable of tracking +// which fields were mutated by the filter. +type TrackableFilter interface { + Filter + WithMutationTracker(func(key, value, tag string, node *yaml.RNode)) +} + +// FilterFunc implements a Filter as a function. +type FilterFunc func([]*yaml.RNode) ([]*yaml.RNode, error) + +func (fn FilterFunc) Filter(o []*yaml.RNode) ([]*yaml.RNode, error) { + return fn(o) +} + +// Pipeline reads Resource Configuration from a set of Inputs, applies some +// transformation filters, and writes the results to a set of Outputs. +// +// Analogous to http://www.linfo.org/pipes.html +type Pipeline struct { + // Inputs provide sources for Resource Configuration to be read. + Inputs []Reader `yaml:"inputs,omitempty"` + + // Filters are transformations applied to the Resource Configuration. + // They are applied in the order they are specified. + // Analogous to http://www.linfo.org/filters.html + Filters []Filter `yaml:"filters,omitempty"` + + // Outputs are where the transformed Resource Configuration is written. + Outputs []Writer `yaml:"outputs,omitempty"` + + // ContinueOnEmptyResult configures what happens when a filter in the pipeline + // returns an empty result. + // If it is false (default), subsequent filters will be skipped and the result + // will be returned immediately. This is useful as an optimization when you + // know that subsequent filters will not alter the empty result. + // If it is true, the empty result will be provided as input to the next + // filter in the list. This is useful when subsequent functions in the + // pipeline may generate new resources. + ContinueOnEmptyResult bool `yaml:"continueOnEmptyResult,omitempty"` +} + +// Execute executes each step in the sequence, returning immediately after encountering +// any error as part of the Pipeline. +func (p Pipeline) Execute() error { + return p.ExecuteWithCallback(nil) +} + +// PipelineExecuteCallbackFunc defines a callback function that will be called each time a step in the pipeline succeeds. +type PipelineExecuteCallbackFunc = func(op Filter) + +// ExecuteWithCallback executes each step in the sequence, returning immediately after encountering +// any error as part of the Pipeline. The callback will be called each time a step succeeds. +func (p Pipeline) ExecuteWithCallback(callback PipelineExecuteCallbackFunc) error { + var result []*yaml.RNode + + // read from the inputs + for _, i := range p.Inputs { + nodes, err := i.Read() + if err != nil { + return errors.Wrap(err) + } + result = append(result, nodes...) + } + + // apply operations + for i := range p.Filters { + // Not all RNodes passed through kio.Pipeline have metadata nor should + // they all be required to. + nodeAnnos, err := PreprocessResourcesForInternalAnnotationMigration(result) + if err != nil { + return err + } + + op := p.Filters[i] + if callback != nil { + callback(op) + } + result, err = op.Filter(result) + // TODO (issue 2872): This len(result) == 0 should be removed and empty result list should be + // handled by outputs. However currently some writer like LocalPackageReadWriter + // will clear the output directory and which will cause unpredictable results + if len(result) == 0 && !p.ContinueOnEmptyResult || err != nil { + return errors.Wrap(err) + } + + // If either the internal annotations for path, index, and id OR the legacy + // annotations for path, index, and id are changed, we have to update the other. + err = ReconcileInternalAnnotations(result, nodeAnnos) + if err != nil { + return err + } + } + + // write to the outputs + for _, o := range p.Outputs { + if err := o.Write(result); err != nil { + return errors.Wrap(err) + } + } + return nil +} + +// FilterAll runs the yaml.Filter against all inputs +func FilterAll(filter yaml.Filter) Filter { + return FilterFunc(func(nodes []*yaml.RNode) ([]*yaml.RNode, error) { + for i := range nodes { + _, err := filter.Filter(nodes[i]) + if err != nil { + return nil, errors.Wrap(err) + } + } + return nodes, nil + }) +} + +// PreprocessResourcesForInternalAnnotationMigration returns a mapping from id to all +// internal annotations, so that we can use it to reconcile the annotations +// later. This is necessary because currently both internal-prefixed annotations +// and legacy annotations are currently supported, and a change to one must be +// reflected in the other if needed. +func PreprocessResourcesForInternalAnnotationMigration(result []*yaml.RNode) (map[string]map[string]string, error) { + idToAnnosMap := make(map[string]map[string]string) + for i := range result { + idStr := strconv.Itoa(i) + err := result[i].PipeE(yaml.SetAnnotation(kioutil.InternalAnnotationsMigrationResourceIDAnnotation, idStr)) + if err != nil { + return nil, err + } + idToAnnosMap[idStr] = kioutil.GetInternalAnnotations(result[i]) + if err = kioutil.CopyLegacyAnnotations(result[i]); err != nil { + return nil, err + } + meta, _ := result[i].GetMeta() + if err = checkMismatchedAnnos(meta.Annotations); err != nil { + return nil, err + } + } + return idToAnnosMap, nil +} + +func checkMismatchedAnnos(annotations map[string]string) error { + path := annotations[kioutil.PathAnnotation] + index := annotations[kioutil.IndexAnnotation] + id := annotations[kioutil.IdAnnotation] + + legacyPath := annotations[kioutil.LegacyPathAnnotation] + legacyIndex := annotations[kioutil.LegacyIndexAnnotation] + legacyId := annotations[kioutil.LegacyIdAnnotation] + + // if prior to running the functions, the legacy and internal annotations differ, + // throw an error as we cannot infer the user's intent. + if path != "" && legacyPath != "" && path != legacyPath { + return fmt.Errorf("resource input to function has mismatched legacy and internal path annotations") + } + if index != "" && legacyIndex != "" && index != legacyIndex { + return fmt.Errorf("resource input to function has mismatched legacy and internal index annotations") + } + if id != "" && legacyId != "" && id != legacyId { + return fmt.Errorf("resource input to function has mismatched legacy and internal id annotations") + } + return nil +} + +type nodeAnnotations struct { + path string + index string + id string +} + +// ReconcileInternalAnnotations reconciles the annotation format for path, index and id annotations. +// It will ensure the output annotation format matches the format in the input. e.g. if the input +// format uses the legacy format and the output will be converted to the legacy format if it's not. +func ReconcileInternalAnnotations(result []*yaml.RNode, nodeAnnosMap map[string]map[string]string) error { + useInternal, useLegacy, err := determineAnnotationsFormat(nodeAnnosMap) + if err != nil { + return err + } + + for i := range result { + // if only one annotation is set, set the other. + err = missingInternalOrLegacyAnnotations(result[i]) + if err != nil { + return err + } + // we must check to see if the function changed either the new internal annotations + // or the old legacy annotations. If one is changed, the change must be reflected + // in the other. + err = checkAnnotationsAltered(result[i], nodeAnnosMap) + if err != nil { + return err + } + // We invoke determineAnnotationsFormat to find out if the original annotations + // use the internal or (and) the legacy format. We format the resources to + // make them consistent with original format. + err = formatInternalAnnotations(result[i], useInternal, useLegacy) + if err != nil { + return err + } + // if the annotations are still somehow out of sync, throw an error + meta, _ := result[i].GetMeta() + err = checkMismatchedAnnos(meta.Annotations) + if err != nil { + return err + } + + if _, err = result[i].Pipe(yaml.ClearAnnotation(kioutil.InternalAnnotationsMigrationResourceIDAnnotation)); err != nil { + return err + } + } + return nil +} + +// determineAnnotationsFormat determines if the resources are using one of the internal and legacy annotation format or both of them. +func determineAnnotationsFormat(nodeAnnosMap map[string]map[string]string) (bool, bool, error) { + var useInternal, useLegacy bool + var err error + + if len(nodeAnnosMap) == 0 { + return true, true, nil + } + + var internal, legacy *bool + for _, annos := range nodeAnnosMap { + _, foundPath := annos[kioutil.PathAnnotation] + _, foundIndex := annos[kioutil.IndexAnnotation] + _, foundId := annos[kioutil.IdAnnotation] + _, foundLegacyPath := annos[kioutil.LegacyPathAnnotation] + _, foundLegacyIndex := annos[kioutil.LegacyIndexAnnotation] + _, foundLegacyId := annos[kioutil.LegacyIdAnnotation] + + if !(foundPath || foundIndex || foundId || foundLegacyPath || foundLegacyIndex || foundLegacyId) { + continue + } + + foundOneOf := foundPath || foundIndex || foundId + if internal == nil { + f := foundOneOf + internal = &f + } + if (foundOneOf && !*internal) || (!foundOneOf && *internal) { + err = fmt.Errorf("the annotation formatting in the input resources is not consistent") + return useInternal, useLegacy, err + } + + foundOneOf = foundLegacyPath || foundLegacyIndex || foundLegacyId + if legacy == nil { + f := foundOneOf + legacy = &f + } + if (foundOneOf && !*legacy) || (!foundOneOf && *legacy) { + err = fmt.Errorf("the annotation formatting in the input resources is not consistent") + return useInternal, useLegacy, err + } + } + if internal != nil { + useInternal = *internal + } + if legacy != nil { + useLegacy = *legacy + } + return useInternal, useLegacy, err +} + +func missingInternalOrLegacyAnnotations(rn *yaml.RNode) error { + if err := missingInternalOrLegacyAnnotation(rn, kioutil.PathAnnotation, kioutil.LegacyPathAnnotation); err != nil { + return err + } + if err := missingInternalOrLegacyAnnotation(rn, kioutil.IndexAnnotation, kioutil.LegacyIndexAnnotation); err != nil { + return err + } + if err := missingInternalOrLegacyAnnotation(rn, kioutil.IdAnnotation, kioutil.LegacyIdAnnotation); err != nil { + return err + } + return nil +} + +func missingInternalOrLegacyAnnotation(rn *yaml.RNode, newKey string, legacyKey string) error { + meta, _ := rn.GetMeta() + annotations := meta.Annotations + value := annotations[newKey] + legacyValue := annotations[legacyKey] + + if value == "" && legacyValue == "" { + // do nothing + return nil + } + + if value == "" { + // new key is not set, copy from legacy key + if err := rn.PipeE(yaml.SetAnnotation(newKey, legacyValue)); err != nil { + return err + } + } else if legacyValue == "" { + // legacy key is not set, copy from new key + if err := rn.PipeE(yaml.SetAnnotation(legacyKey, value)); err != nil { + return err + } + } + return nil +} + +func checkAnnotationsAltered(rn *yaml.RNode, nodeAnnosMap map[string]map[string]string) error { + meta, _ := rn.GetMeta() + annotations := meta.Annotations + // get the resource's current path, index, and ids from the new annotations + internal := nodeAnnotations{ + path: annotations[kioutil.PathAnnotation], + index: annotations[kioutil.IndexAnnotation], + id: annotations[kioutil.IdAnnotation], + } + + // get the resource's current path, index, and ids from the legacy annotations + legacy := nodeAnnotations{ + path: annotations[kioutil.LegacyPathAnnotation], + index: annotations[kioutil.LegacyIndexAnnotation], + id: annotations[kioutil.LegacyIdAnnotation], + } + + rid := annotations[kioutil.InternalAnnotationsMigrationResourceIDAnnotation] + originalAnnotations, found := nodeAnnosMap[rid] + if !found { + return nil + } + originalPath, found := originalAnnotations[kioutil.PathAnnotation] + if !found { + originalPath = originalAnnotations[kioutil.LegacyPathAnnotation] + } + if originalPath != "" { + switch { + case originalPath != internal.path && originalPath != legacy.path && internal.path != legacy.path: + return fmt.Errorf("resource input to function has mismatched legacy and internal path annotations") + case originalPath != internal.path: + if _, err := rn.Pipe(yaml.SetAnnotation(kioutil.LegacyPathAnnotation, internal.path)); err != nil { + return err + } + case originalPath != legacy.path: + if _, err := rn.Pipe(yaml.SetAnnotation(kioutil.PathAnnotation, legacy.path)); err != nil { + return err + } + } + } + + originalIndex, found := originalAnnotations[kioutil.IndexAnnotation] + if !found { + originalIndex = originalAnnotations[kioutil.LegacyIndexAnnotation] + } + if originalIndex != "" { + switch { + case originalIndex != internal.index && originalIndex != legacy.index && internal.index != legacy.index: + return fmt.Errorf("resource input to function has mismatched legacy and internal index annotations") + case originalIndex != internal.index: + if _, err := rn.Pipe(yaml.SetAnnotation(kioutil.LegacyIndexAnnotation, internal.index)); err != nil { + return err + } + case originalIndex != legacy.index: + if _, err := rn.Pipe(yaml.SetAnnotation(kioutil.IndexAnnotation, legacy.index)); err != nil { + return err + } + } + } + return nil +} + +func formatInternalAnnotations(rn *yaml.RNode, useInternal, useLegacy bool) error { + if !useInternal { + if err := rn.PipeE(yaml.ClearAnnotation(kioutil.IdAnnotation)); err != nil { + return err + } + if err := rn.PipeE(yaml.ClearAnnotation(kioutil.PathAnnotation)); err != nil { + return err + } + if err := rn.PipeE(yaml.ClearAnnotation(kioutil.IndexAnnotation)); err != nil { + return err + } + } + if !useLegacy { + if err := rn.PipeE(yaml.ClearAnnotation(kioutil.LegacyIdAnnotation)); err != nil { + return err + } + if err := rn.PipeE(yaml.ClearAnnotation(kioutil.LegacyPathAnnotation)); err != nil { + return err + } + if err := rn.PipeE(yaml.ClearAnnotation(kioutil.LegacyIndexAnnotation)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/kioutil/kioutil.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/kioutil/kioutil.go new file mode 100644 index 000000000..510ecae18 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/kioutil/kioutil.go @@ -0,0 +1,420 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package kioutil + +import ( + "fmt" + "path" + "sort" + "strconv" + "strings" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type AnnotationKey = string + +const ( + // internalPrefix is the prefix given to internal annotations that are used + // internally by the orchestrator + internalPrefix string = "internal.config.kubernetes.io/" + + // IndexAnnotation records the index of a specific resource in a file or input stream. + IndexAnnotation AnnotationKey = internalPrefix + "index" + + // PathAnnotation records the path to the file the Resource was read from + PathAnnotation AnnotationKey = internalPrefix + "path" + + // SeqIndentAnnotation records the sequence nodes indentation of the input resource + SeqIndentAnnotation AnnotationKey = internalPrefix + "seqindent" + + // IdAnnotation records the id of the resource to map inputs to outputs + IdAnnotation AnnotationKey = internalPrefix + "id" + + // Deprecated: Use IndexAnnotation instead. + LegacyIndexAnnotation AnnotationKey = "config.kubernetes.io/index" + + // Deprecated: use PathAnnotation instead. + LegacyPathAnnotation AnnotationKey = "config.kubernetes.io/path" + + // Deprecated: use IdAnnotation instead. + LegacyIdAnnotation = "config.k8s.io/id" + + // InternalAnnotationsMigrationResourceIDAnnotation is used to uniquely identify + // resources during round trip to and from a function execution. We will use it + // to track the internal annotations and reconcile them if needed. + InternalAnnotationsMigrationResourceIDAnnotation = internalPrefix + "annotations-migration-resource-id" +) + +func GetFileAnnotations(rn *yaml.RNode) (string, string, error) { + rm, _ := rn.GetMeta() + annotations := rm.Annotations + path, found := annotations[PathAnnotation] + if !found { + path = annotations[LegacyPathAnnotation] + } + index, found := annotations[IndexAnnotation] + if !found { + index = annotations[LegacyIndexAnnotation] + } + return path, index, nil +} + +func GetIdAnnotation(rn *yaml.RNode) string { + rm, _ := rn.GetMeta() + annotations := rm.Annotations + id, found := annotations[IdAnnotation] + if !found { + id = annotations[LegacyIdAnnotation] + } + return id +} + +func CopyLegacyAnnotations(rn *yaml.RNode) error { + meta, err := rn.GetMeta() + if err != nil { + if err == yaml.ErrMissingMetadata { + // resource has no metadata, this should be a no-op + return nil + } + return err + } + if err := copyAnnotations(meta, rn, LegacyPathAnnotation, PathAnnotation); err != nil { + return err + } + if err := copyAnnotations(meta, rn, LegacyIndexAnnotation, IndexAnnotation); err != nil { + return err + } + if err := copyAnnotations(meta, rn, LegacyIdAnnotation, IdAnnotation); err != nil { + return err + } + return nil +} + +func copyAnnotations(meta yaml.ResourceMeta, rn *yaml.RNode, legacyKey string, newKey string) error { + newValue := meta.Annotations[newKey] + legacyValue := meta.Annotations[legacyKey] + if newValue != "" { + if legacyValue == "" { + if err := rn.PipeE(yaml.SetAnnotation(legacyKey, newValue)); err != nil { + return err + } + } + } else { + if legacyValue != "" { + if err := rn.PipeE(yaml.SetAnnotation(newKey, legacyValue)); err != nil { + return err + } + } + } + return nil +} + +// ErrorIfMissingAnnotation validates the provided annotations are present on the given resources +func ErrorIfMissingAnnotation(nodes []*yaml.RNode, keys ...AnnotationKey) error { + for _, key := range keys { + for _, node := range nodes { + val, err := node.Pipe(yaml.GetAnnotation(key)) + if err != nil { + return errors.Wrap(err) + } + if val == nil { + return errors.Errorf("missing annotation %s", key) + } + } + } + return nil +} + +// CreatePathAnnotationValue creates a default path annotation value for a Resource. +// The path prefix will be dir. +func CreatePathAnnotationValue(dir string, m yaml.ResourceMeta) string { + filename := fmt.Sprintf("%s_%s.yaml", strings.ToLower(m.Kind), m.Name) + return path.Join(dir, m.Namespace, filename) +} + +// DefaultPathAndIndexAnnotation sets a default path or index value on any nodes missing the +// annotation +func DefaultPathAndIndexAnnotation(dir string, nodes []*yaml.RNode) error { + counts := map[string]int{} + + // check each node for the path annotation + for i := range nodes { + if err := CopyLegacyAnnotations(nodes[i]); err != nil { + return err + } + m, err := nodes[i].GetMeta() + if err != nil { + return err + } + + // calculate the max index in each file in case we are appending + if p, found := m.Annotations[PathAnnotation]; found { + // record the max indexes into each file + if i, found := m.Annotations[IndexAnnotation]; found { + index, _ := strconv.Atoi(i) + if index > counts[p] { + counts[p] = index + } + } + + // has the path annotation already -- do nothing + continue + } + + // set a path annotation on the Resource + path := CreatePathAnnotationValue(dir, m) + if err := nodes[i].PipeE(yaml.SetAnnotation(PathAnnotation, path)); err != nil { + return err + } + if err := nodes[i].PipeE(yaml.SetAnnotation(LegacyPathAnnotation, path)); err != nil { + return err + } + } + + // set the index annotations + for i := range nodes { + m, err := nodes[i].GetMeta() + if err != nil { + return err + } + + if _, found := m.Annotations[IndexAnnotation]; found { + continue + } + + p := m.Annotations[PathAnnotation] + + // set an index annotation on the Resource + c := counts[p] + counts[p] = c + 1 + if err := nodes[i].PipeE( + yaml.SetAnnotation(IndexAnnotation, fmt.Sprintf("%d", c))); err != nil { + return err + } + if err := nodes[i].PipeE( + yaml.SetAnnotation(LegacyIndexAnnotation, fmt.Sprintf("%d", c))); err != nil { + return err + } + } + return nil +} + +// DefaultPathAnnotation sets a default path annotation on any Reources +// missing it. +func DefaultPathAnnotation(dir string, nodes []*yaml.RNode) error { + // check each node for the path annotation + for i := range nodes { + if err := CopyLegacyAnnotations(nodes[i]); err != nil { + return err + } + m, err := nodes[i].GetMeta() + if err != nil { + return err + } + + if _, found := m.Annotations[PathAnnotation]; found { + // has the path annotation already -- do nothing + continue + } + + // set a path annotation on the Resource + path := CreatePathAnnotationValue(dir, m) + if err := nodes[i].PipeE(yaml.SetAnnotation(PathAnnotation, path)); err != nil { + return err + } + if err := nodes[i].PipeE(yaml.SetAnnotation(LegacyPathAnnotation, path)); err != nil { + return err + } + } + return nil +} + +// Map invokes fn for each element in nodes. +func Map(nodes []*yaml.RNode, fn func(*yaml.RNode) (*yaml.RNode, error)) ([]*yaml.RNode, error) { + var returnNodes []*yaml.RNode + for i := range nodes { + n, err := fn(nodes[i]) + if err != nil { + return nil, errors.Wrap(err) + } + if n != nil { + returnNodes = append(returnNodes, n) + } + } + return returnNodes, nil +} + +func MapMeta(nodes []*yaml.RNode, fn func(*yaml.RNode, yaml.ResourceMeta) (*yaml.RNode, error)) ( + []*yaml.RNode, error) { + var returnNodes []*yaml.RNode + for i := range nodes { + meta, err := nodes[i].GetMeta() + if err != nil { + return nil, errors.Wrap(err) + } + n, err := fn(nodes[i], meta) + if err != nil { + return nil, errors.Wrap(err) + } + if n != nil { + returnNodes = append(returnNodes, n) + } + } + return returnNodes, nil +} + +// SortNodes sorts nodes in place: +// - by PathAnnotation annotation +// - by IndexAnnotation annotation +func SortNodes(nodes []*yaml.RNode) error { + var err error + // use stable sort to keep ordering of equal elements + sort.SliceStable(nodes, func(i, j int) bool { + if err != nil { + return false + } + if err := CopyLegacyAnnotations(nodes[i]); err != nil { + return false + } + if err := CopyLegacyAnnotations(nodes[j]); err != nil { + return false + } + var iMeta, jMeta yaml.ResourceMeta + if iMeta, _ = nodes[i].GetMeta(); err != nil { + return false + } + if jMeta, _ = nodes[j].GetMeta(); err != nil { + return false + } + + iValue := iMeta.Annotations[PathAnnotation] + jValue := jMeta.Annotations[PathAnnotation] + if iValue != jValue { + return iValue < jValue + } + + iValue = iMeta.Annotations[IndexAnnotation] + jValue = jMeta.Annotations[IndexAnnotation] + + // put resource config without an index first + if iValue == jValue { + return false + } + if iValue == "" { + return true + } + if jValue == "" { + return false + } + + // sort by index + var iIndex, jIndex int + iIndex, err = strconv.Atoi(iValue) + if err != nil { + err = fmt.Errorf("unable to parse config.kubernetes.io/index %s :%v", iValue, err) + return false + } + jIndex, err = strconv.Atoi(jValue) + if err != nil { + err = fmt.Errorf("unable to parse config.kubernetes.io/index %s :%v", jValue, err) + return false + } + if iIndex != jIndex { + return iIndex < jIndex + } + + // elements are equal + return false + }) + return errors.Wrap(err) +} + +// CopyInternalAnnotations copies the annotations that begin with the prefix +// `internal.config.kubernetes.io` from the source RNode to the destination RNode. +// It takes a parameter exclusions, which is a list of annotation keys to ignore. +func CopyInternalAnnotations(src *yaml.RNode, dst *yaml.RNode, exclusions ...AnnotationKey) error { + srcAnnotations := GetInternalAnnotations(src) + for k, v := range srcAnnotations { + if stringSliceContains(exclusions, k) { + continue + } + if err := dst.PipeE(yaml.SetAnnotation(k, v)); err != nil { + return err + } + } + return nil +} + +// ConfirmInternalAnnotationUnchanged compares the annotations of the RNodes that begin with the prefix +// `internal.config.kubernetes.io`, throwing an error if they differ. It takes a parameter exclusions, +// which is a list of annotation keys to ignore. +func ConfirmInternalAnnotationUnchanged(r1 *yaml.RNode, r2 *yaml.RNode, exclusions ...AnnotationKey) error { + r1Annotations := GetInternalAnnotations(r1) + r2Annotations := GetInternalAnnotations(r2) + + // this is a map to prevent duplicates + diffAnnos := make(map[string]bool) + + for k, v1 := range r1Annotations { + if stringSliceContains(exclusions, k) { + continue + } + if v2, ok := r2Annotations[k]; !ok || v1 != v2 { + diffAnnos[k] = true + } + } + + for k, v2 := range r2Annotations { + if stringSliceContains(exclusions, k) { + continue + } + if v1, ok := r1Annotations[k]; !ok || v2 != v1 { + diffAnnos[k] = true + } + } + + if len(diffAnnos) > 0 { + keys := make([]string, 0, len(diffAnnos)) + for k := range diffAnnos { + keys = append(keys, k) + } + sort.Strings(keys) + + errorString := "internal annotations differ: " + for _, key := range keys { + errorString = errorString + key + ", " + } + return errors.Errorf(errorString[0 : len(errorString)-2]) + } + + return nil +} + +// GetInternalAnnotations returns a map of all the annotations of the provided +// RNode that satisfies one of the following: 1) begin with the prefix +// `internal.config.kubernetes.io` 2) is one of `config.kubernetes.io/path`, +// `config.kubernetes.io/index` and `config.k8s.io/id`. +func GetInternalAnnotations(rn *yaml.RNode) map[string]string { + meta, _ := rn.GetMeta() + annotations := meta.Annotations + result := make(map[string]string) + for k, v := range annotations { + if strings.HasPrefix(k, internalPrefix) || k == LegacyPathAnnotation || k == LegacyIndexAnnotation || k == LegacyIdAnnotation { + result[k] = v + } + } + return result +} + +// stringSliceContains returns true if the slice has the string. +func stringSliceContains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_reader.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_reader.go new file mode 100644 index 000000000..609a791f3 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_reader.go @@ -0,0 +1,360 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package kio + +import ( + "fmt" + "os" + "path/filepath" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/sets" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// requiredResourcePackageAnnotations are annotations that are required to write resources back to +// files. +var requiredResourcePackageAnnotations = []string{kioutil.IndexAnnotation, kioutil.PathAnnotation} + +// PackageBuffer implements Reader and Writer, storing Resources in a local field. +type PackageBuffer struct { + Nodes []*yaml.RNode +} + +func (r *PackageBuffer) Read() ([]*yaml.RNode, error) { + return r.Nodes, nil +} + +func (r *PackageBuffer) Write(nodes []*yaml.RNode) error { + r.Nodes = nodes + return nil +} + +// LocalPackageReadWriter reads and writes Resources from / to a local directory. +// When writing, LocalPackageReadWriter will delete files if all of the Resources from +// that file have been removed from the output. +type LocalPackageReadWriter struct { + Kind string `yaml:"kind,omitempty"` + + KeepReaderAnnotations bool `yaml:"keepReaderAnnotations,omitempty"` + + // PreserveSeqIndent if true adds kioutil.SeqIndentAnnotation to each resource + PreserveSeqIndent bool + + // PackagePath is the path to the package directory. + PackagePath string `yaml:"path,omitempty"` + + // PackageFileName is the name of file containing package metadata. + // It will be used to identify package. + PackageFileName string `yaml:"packageFileName,omitempty"` + + // MatchFilesGlob configures Read to only read Resources from files matching any of the + // provided patterns. + // Defaults to ["*.yaml", "*.yml"] if empty. To match all files specify ["*"]. + MatchFilesGlob []string `yaml:"matchFilesGlob,omitempty"` + + // IncludeSubpackages will configure Read to read Resources from subpackages. + // Subpackages are identified by presence of PackageFileName. + IncludeSubpackages bool `yaml:"includeSubpackages,omitempty"` + + // ErrorIfNonResources will configure Read to throw an error if yaml missing missing + // apiVersion or kind is read. + ErrorIfNonResources bool `yaml:"errorIfNonResources,omitempty"` + + // OmitReaderAnnotations will cause the reader to skip annotating Resources with the file + // path and mode. + OmitReaderAnnotations bool `yaml:"omitReaderAnnotations,omitempty"` + + // SetAnnotations are annotations to set on the Resources as they are read. + SetAnnotations map[string]string `yaml:"setAnnotations,omitempty"` + + // NoDeleteFiles if set to true, LocalPackageReadWriter won't delete any files + NoDeleteFiles bool `yaml:"noDeleteFiles,omitempty"` + + files sets.String + + // FileSkipFunc is a function which returns true if reader should ignore + // the file + FileSkipFunc LocalPackageSkipFileFunc + + // FileSystem can be used to mock the disk file system. + FileSystem filesys.FileSystemOrOnDisk + + // WrapBareSeqNode wraps the bare sequence node document with map node, + // kyaml uses reader annotations to track resources, it is not possible to + // add them to bare sequence nodes, this option enables wrapping such bare + // sequence nodes into map node with key yaml.BareSeqNodeWrappingKey + // note that this wrapping is different and not related to ResourceList wrapping + WrapBareSeqNode bool +} + +func (r *LocalPackageReadWriter) Read() ([]*yaml.RNode, error) { + nodes, err := LocalPackageReader{ + PackagePath: r.PackagePath, + MatchFilesGlob: r.MatchFilesGlob, + IncludeSubpackages: r.IncludeSubpackages, + ErrorIfNonResources: r.ErrorIfNonResources, + SetAnnotations: r.SetAnnotations, + PackageFileName: r.PackageFileName, + FileSkipFunc: r.FileSkipFunc, + PreserveSeqIndent: r.PreserveSeqIndent, + FileSystem: r.FileSystem, + WrapBareSeqNode: r.WrapBareSeqNode, + }.Read() + if err != nil { + return nil, errors.Wrap(err) + } + // keep track of all the files + if !r.NoDeleteFiles { + r.files, err = r.getFiles(nodes) + if err != nil { + return nil, errors.Wrap(err) + } + } + return nodes, nil +} + +func (r *LocalPackageReadWriter) Write(nodes []*yaml.RNode) error { + newFiles, err := r.getFiles(nodes) + if err != nil { + return errors.Wrap(err) + } + var clear []string + for k := range r.SetAnnotations { + clear = append(clear, k) + } + err = LocalPackageWriter{ + PackagePath: r.PackagePath, + ClearAnnotations: clear, + KeepReaderAnnotations: r.KeepReaderAnnotations, + FileSystem: r.FileSystem, + }.Write(nodes) + if err != nil { + return errors.Wrap(err) + } + deleteFiles := r.files.Difference(newFiles) + for f := range deleteFiles { + if err = r.FileSystem.RemoveAll(filepath.Join(r.PackagePath, f)); err != nil { + return errors.Wrap(err) + } + } + return nil +} + +func (r *LocalPackageReadWriter) getFiles(nodes []*yaml.RNode) (sets.String, error) { + val := sets.String{} + for _, n := range nodes { + path, _, err := kioutil.GetFileAnnotations(n) + if err != nil { + return nil, errors.Wrap(err) + } + val.Insert(path) + } + return val, nil +} + +// LocalPackageSkipFileFunc is a function which returns true if the file +// in the package should be ignored by reader. +// relPath is an OS specific relative path +type LocalPackageSkipFileFunc func(relPath string) bool + +// LocalPackageReader reads ResourceNodes from a local package. +type LocalPackageReader struct { + Kind string `yaml:"kind,omitempty"` + + // PackagePath is the path to the package directory. + PackagePath string `yaml:"path,omitempty"` + + // PackageFileName is the name of file containing package metadata. + // It will be used to identify package. + PackageFileName string `yaml:"packageFileName,omitempty"` + + // MatchFilesGlob configures Read to only read Resources from files matching any of the + // provided patterns. + // Defaults to ["*.yaml", "*.yml"] if empty. To match all files specify ["*"]. + MatchFilesGlob []string `yaml:"matchFilesGlob,omitempty"` + + // IncludeSubpackages will configure Read to read Resources from subpackages. + // Subpackages are identified by presence of PackageFileName. + IncludeSubpackages bool `yaml:"includeSubpackages,omitempty"` + + // ErrorIfNonResources will configure Read to throw an error if yaml missing missing + // apiVersion or kind is read. + ErrorIfNonResources bool `yaml:"errorIfNonResources,omitempty"` + + // OmitReaderAnnotations will cause the reader to skip annotating Resources with the file + // path and mode. + OmitReaderAnnotations bool `yaml:"omitReaderAnnotations,omitempty"` + + // SetAnnotations are annotations to set on the Resources as they are read. + SetAnnotations map[string]string `yaml:"setAnnotations,omitempty"` + + // FileSkipFunc is a function which returns true if reader should ignore + // the file + FileSkipFunc LocalPackageSkipFileFunc + + // PreserveSeqIndent if true adds kioutil.SeqIndentAnnotation to each resource + PreserveSeqIndent bool + + // FileSystem can be used to mock the disk file system. + FileSystem filesys.FileSystemOrOnDisk + + // WrapBareSeqNode wraps the bare sequence node document with map node, + // kyaml uses reader annotations to track resources, it is not possible to + // add them to bare sequence nodes, this option enables wrapping such bare + // sequence nodes into map node with key yaml.BareSeqNodeWrappingKey + // note that this wrapping is different and not related to ResourceList wrapping + WrapBareSeqNode bool +} + +var _ Reader = LocalPackageReader{} + +var DefaultMatch = []string{"*.yaml", "*.yml"} +var JSONMatch = []string{"*.json"} +var MatchAll = append(DefaultMatch, JSONMatch...) + +// Read reads the Resources. +func (r LocalPackageReader) Read() ([]*yaml.RNode, error) { + if r.PackagePath == "" { + return nil, fmt.Errorf("must specify package path") + } + + // use slash for path + r.PackagePath = filepath.ToSlash(r.PackagePath) + if len(r.MatchFilesGlob) == 0 { + r.MatchFilesGlob = DefaultMatch + } + + var operand ResourceNodeSlice + var pathRelativeTo string + var err error + ignoreFilesMatcher := &ignoreFilesMatcher{ + fs: r.FileSystem, + } + dir, file, err := r.FileSystem.CleanedAbs(r.PackagePath) + if err != nil { + return nil, errors.Wrap(err) + } + r.PackagePath = filepath.Join(string(dir), file) + err = r.FileSystem.Walk(r.PackagePath, func( + path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrap(err) + } + + // is this the user specified path? + if path == r.PackagePath { + if info.IsDir() { + // skip the root package directory, but check for a + // .krmignore file first. + pathRelativeTo = r.PackagePath + return ignoreFilesMatcher.readIgnoreFile(path) + } + + // user specified path is a file rather than a directory. + // make its path relative to its parent so it can be written to another file. + pathRelativeTo = filepath.Dir(r.PackagePath) + } + + // check if we should skip the directory or file + if info.IsDir() { + return r.shouldSkipDir(path, ignoreFilesMatcher) + } + + // get the relative path to file within the package so we can write the files back out + // to another location. + relPath, err := filepath.Rel(pathRelativeTo, path) + if err != nil { + return errors.WrapPrefixf(err, pathRelativeTo) + } + if match, err := r.shouldSkipFile(path, relPath, ignoreFilesMatcher); err != nil { + return err + } else if match { + // skip this file + return nil + } + + r.initReaderAnnotations(relPath, info) + nodes, err := r.readFile(path, info) + if err != nil { + return errors.WrapPrefixf(err, path) + } + operand = append(operand, nodes...) + return nil + }) + return operand, err +} + +// readFile reads the ResourceNodes from a file +func (r *LocalPackageReader) readFile(path string, _ os.FileInfo) ([]*yaml.RNode, error) { + f, err := r.FileSystem.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + rr := &ByteReader{ + DisableUnwrapping: true, + Reader: f, + OmitReaderAnnotations: r.OmitReaderAnnotations, + SetAnnotations: r.SetAnnotations, + PreserveSeqIndent: r.PreserveSeqIndent, + WrapBareSeqNode: r.WrapBareSeqNode, + } + return rr.Read() +} + +// shouldSkipFile returns true if the file should be skipped +func (r *LocalPackageReader) shouldSkipFile(path, relPath string, matcher *ignoreFilesMatcher) (bool, error) { + // check if the file is covered by a .krmignore file. + if matcher.matchFile(path) { + return true, nil + } + + if r.FileSkipFunc != nil && r.FileSkipFunc(relPath) { + return true, nil + } + + // check if the files are in scope + for _, g := range r.MatchFilesGlob { + if match, err := filepath.Match(g, filepath.Base(path)); err != nil { + return true, errors.Wrap(err) + } else if match { + return false, nil + } + } + return true, nil +} + +// initReaderAnnotations adds the LocalPackageReader Annotations to r.SetAnnotations +func (r *LocalPackageReader) initReaderAnnotations(path string, _ os.FileInfo) { + if r.SetAnnotations == nil { + r.SetAnnotations = map[string]string{} + } + if !r.OmitReaderAnnotations { + r.SetAnnotations[kioutil.PathAnnotation] = path + r.SetAnnotations[kioutil.LegacyPathAnnotation] = path + } +} + +// shouldSkipDir returns a filepath.SkipDir if the directory should be skipped +func (r *LocalPackageReader) shouldSkipDir(path string, matcher *ignoreFilesMatcher) error { + if matcher.matchDir(path) { + return filepath.SkipDir + } + + if r.PackageFileName == "" { + return nil + } + // check if this is a subpackage + if !r.FileSystem.Exists(filepath.Join(path, r.PackageFileName)) { + return nil + } + if !r.IncludeSubpackages { + return filepath.SkipDir + } + return matcher.readIgnoreFile(path) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_writer.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_writer.go new file mode 100644 index 000000000..ce6fa45a5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_writer.go @@ -0,0 +1,150 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package kio + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/filesys" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// LocalPackageWriter writes ResourceNodes to a filesystem +type LocalPackageWriter struct { + Kind string `yaml:"kind,omitempty"` + + // PackagePath is the path to the package directory. + PackagePath string `yaml:"path,omitempty"` + + // KeepReaderAnnotations if set will retain the annotations set by LocalPackageReader + KeepReaderAnnotations bool `yaml:"keepReaderAnnotations,omitempty"` + + // ClearAnnotations will clear annotations before writing the resources + ClearAnnotations []string `yaml:"clearAnnotations,omitempty"` + + // FileSystem can be used to mock the disk file system. + FileSystem filesys.FileSystemOrOnDisk +} + +var _ Writer = LocalPackageWriter{} + +func (r LocalPackageWriter) Write(nodes []*yaml.RNode) error { + // set the path and index annotations if they are missing + if err := kioutil.DefaultPathAndIndexAnnotation("", nodes); err != nil { + return err + } + + if !r.FileSystem.Exists(r.PackagePath) { + return errors.WrapPrefixf(os.ErrNotExist, "could not write to %q", r.PackagePath) + } + if !r.FileSystem.IsDir(r.PackagePath) { + // if the user specified input isn't a directory, the package is the directory of the + // target + r.PackagePath = filepath.Dir(r.PackagePath) + } + + // setup indexes for writing Resources back to files + if err := r.errorIfMissingRequiredAnnotation(nodes); err != nil { + return err + } + outputFiles, err := r.indexByFilePath(nodes) + if err != nil { + return err + } + for k := range outputFiles { + if err = kioutil.SortNodes(outputFiles[k]); err != nil { + return errors.Wrap(err) + } + } + + if !r.KeepReaderAnnotations { + r.ClearAnnotations = append(r.ClearAnnotations, kioutil.PathAnnotation) + r.ClearAnnotations = append(r.ClearAnnotations, kioutil.LegacyPathAnnotation) + } + + // validate outputs before writing any + for path := range outputFiles { + outputPath := filepath.Join(r.PackagePath, path) + if r.FileSystem.IsDir(outputPath) { + return fmt.Errorf("config.kubernetes.io/path cannot be a directory: %s", path) + } + + err = r.FileSystem.MkdirAll(filepath.Dir(outputPath)) + if err != nil { + return errors.Wrap(err) + } + } + + // write files + buf := bytes.NewBuffer(nil) + for path := range outputFiles { + outputPath := filepath.Join(r.PackagePath, path) + err = r.FileSystem.MkdirAll(filepath.Dir(filepath.Join(r.PackagePath, path))) + if err != nil { + return errors.Wrap(err) + } + + buf.Reset() + w := ByteWriter{ + Writer: buf, + KeepReaderAnnotations: r.KeepReaderAnnotations, + ClearAnnotations: r.ClearAnnotations, + } + if err = w.Write(outputFiles[path]); err != nil { + return errors.Wrap(err) + } + + if err := r.FileSystem.WriteFile(outputPath, buf.Bytes()); err != nil { + return errors.Wrap(err) + } + } + + return nil +} + +func (r LocalPackageWriter) errorIfMissingRequiredAnnotation(nodes []*yaml.RNode) error { + for i := range nodes { + for _, s := range requiredResourcePackageAnnotations { + key, err := nodes[i].Pipe(yaml.GetAnnotation(s)) + if err != nil { + return errors.Wrap(err) + } + if key == nil || key.YNode() == nil || key.YNode().Value == "" { + return errors.Errorf( + "resources must be annotated with %s to be written to files", s) + } + } + } + return nil +} + +func (r LocalPackageWriter) indexByFilePath(nodes []*yaml.RNode) (map[string][]*yaml.RNode, error) { + outputFiles := map[string][]*yaml.RNode{} + for i := range nodes { + // parse the file write path + node := nodes[i] + value, err := node.Pipe(yaml.GetAnnotation(kioutil.PathAnnotation)) + if err != nil { + // this should never happen if errorIfMissingRequiredAnnotation was run + return nil, errors.Wrap(err) + } + path := value.YNode().Value + outputFiles[path] = append(outputFiles[path], node) + + if filepath.IsAbs(path) { + return nil, errors.Errorf("package paths may not be absolute paths") + } + if strings.Contains(filepath.Clean(path), "..") { + return nil, fmt.Errorf("resource must be written under package %s: %s", + r.PackagePath, filepath.Clean(path)) + } + } + return outputFiles, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/kio/tree.go b/vendor/sigs.k8s.io/kustomize/kyaml/kio/tree.go new file mode 100644 index 000000000..a14181578 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/kio/tree.go @@ -0,0 +1,519 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package kio + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/xlab/treeprint" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type TreeStructure string + +const ( + // TreeStructurePackage configures TreeWriter to generate the tree structure off of the + // Resources packages. + TreeStructurePackage TreeStructure = "directory" + + // TreeStructureOwners configures TreeWriter to generate the tree structure off of the + // Resource owners. + TreeStructureGraph TreeStructure = "owners" +) + +var GraphStructures = []string{string(TreeStructureGraph), string(TreeStructurePackage)} + +// TreeWriter prints the package structured as a tree. +// TODO(pwittrock): test this package better. it is lower-risk since it is only +// used for printing rather than updating or editing. +type TreeWriter struct { + Writer io.Writer + Root string + Fields []TreeWriterField + Structure TreeStructure + OpenAPIFileName string +} + +// TreeWriterField configures a Resource field to be included in the tree +type TreeWriterField struct { + yaml.PathMatcher + Name string + SubName string +} + +func (p TreeWriter) packageStructure(nodes []*yaml.RNode) error { + for i := range nodes { + if err := kioutil.CopyLegacyAnnotations(nodes[i]); err != nil { + return err + } + } + indexByPackage := p.index(nodes) + + // create the new tree + tree := treeprint.New() + tree.SetValue(p.Root) + + // add each package to the tree + treeIndex := map[string]treeprint.Tree{} + keys := p.sort(indexByPackage) + for _, pkg := range keys { + // create a branch for this package -- search for the parent package and create + // the branch under it -- requires that the keys are sorted + branch := tree + for parent, subTree := range treeIndex { + if strings.HasPrefix(pkg, parent) { + // found a package whose path is a prefix to our own, use this + // package if a closer one isn't found + branch = subTree + // don't break, continue searching for more closely related ancestors + } + } + + // create a new branch for the package + createOk := pkg != "." // special edge case logic for tree on current working dir + if createOk { + branch = branch.AddBranch(branchName(p.Root, pkg, p.OpenAPIFileName)) + } + + // cache the branch for this package + treeIndex[pkg] = branch + + // print each resource in the package + for i := range indexByPackage[pkg] { + var err error + if _, err = p.doResource(indexByPackage[pkg][i], "", branch); err != nil { + return err + } + } + } + + _, err := io.WriteString(p.Writer, tree.String()) + return err +} + +// branchName takes the root directory and relative path to the directory +// and returns the branch name +func branchName(root, dirRelPath, openAPIFileName string) string { + name := filepath.Base(dirRelPath) + _, err := os.Stat(filepath.Join(root, dirRelPath, openAPIFileName)) + if !os.IsNotExist(err) { + // add Pkg: prefix indicating that it is a separate package as it has + // openAPIFile + return fmt.Sprintf("Pkg: %s", name) + } + return name +} + +// Write writes the ascii tree to p.Writer +func (p TreeWriter) Write(nodes []*yaml.RNode) error { + switch p.Structure { + case TreeStructurePackage: + return p.packageStructure(nodes) + case TreeStructureGraph: + return p.graphStructure(nodes) + } + + // If any resource has an owner reference, default to the graph structure. Otherwise, use package structure. + for _, node := range nodes { + if owners, _ := node.Pipe(yaml.Lookup("metadata", "ownerReferences")); owners != nil { + return p.graphStructure(nodes) + } + } + return p.packageStructure(nodes) +} + +// node wraps a tree node, and any children nodes +type node struct { + p TreeWriter + *yaml.RNode + children []*node +} + +func (a node) Len() int { return len(a.children) } +func (a node) Swap(i, j int) { a.children[i], a.children[j] = a.children[j], a.children[i] } +func (a node) Less(i, j int) bool { + return compareNodes(a.children[i].RNode, a.children[j].RNode) +} + +// Tree adds this node to the root +func (a node) Tree(root treeprint.Tree) error { + sort.Sort(a) + branch := root + var err error + + // generate a node for the Resource + if a.RNode != nil { + branch, err = a.p.doResource(a.RNode, "Resource", root) + if err != nil { + return err + } + } + + // attach children to the branch + for _, n := range a.children { + if err := n.Tree(branch); err != nil { + return err + } + } + return nil +} + +// graphStructure writes the tree using owners for structure +func (p TreeWriter) graphStructure(nodes []*yaml.RNode) error { + resourceToOwner := map[string]*node{} + root := &node{} + // index each of the nodes by their owner + for _, n := range nodes { + ownerVal, err := ownerToString(n) + if err != nil { + return err + } + var owner *node + if ownerVal == "" { + // no owner -- attach to the root + owner = root + } else { + // owner found -- attach to the owner + var found bool + owner, found = resourceToOwner[ownerVal] + if !found { + // initialize the owner if not found + resourceToOwner[ownerVal] = &node{p: p} + owner = resourceToOwner[ownerVal] + } + } + + nodeVal, err := nodeToString(n) + if err != nil { + return err + } + val, found := resourceToOwner[nodeVal] + if !found { + // initialize the node if not found -- may have already been initialized if it + // is the owner of another node + resourceToOwner[nodeVal] = &node{p: p} + val = resourceToOwner[nodeVal] + } + val.RNode = n + owner.children = append(owner.children, val) + } + + for k, v := range resourceToOwner { + if v.RNode == nil { + return fmt.Errorf( + "owner '%s' not found in input, but found as an owner of input objects", k) + } + } + + // print the tree + tree := treeprint.New() + if err := root.Tree(tree); err != nil { + return err + } + + _, err := io.WriteString(p.Writer, tree.String()) + return err +} + +// nodeToString generates a string to identify the node -- matches ownerToString format +func nodeToString(node *yaml.RNode) (string, error) { + meta, err := node.GetMeta() + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s/%s", meta.Kind, meta.Namespace, meta.Name), nil +} + +// ownerToString generate a string to identify the owner -- matches nodeToString format +func ownerToString(node *yaml.RNode) (string, error) { + meta, err := node.GetMeta() + if err != nil { + return "", err + } + namespace := meta.Namespace + + owners, err := node.Pipe(yaml.Lookup("metadata", "ownerReferences")) + if err != nil { + return "", err + } + if owners == nil { + return "", nil + } + + elements, err := owners.Elements() + if err != nil { + return "", err + } + if len(elements) == 0 { + return "", err + } + owner := elements[0] + var kind, name string + + if value := owner.Field("kind"); !value.IsNilOrEmpty() { + kind = value.Value.YNode().Value + } + if value := owner.Field("name"); !value.IsNilOrEmpty() { + name = value.Value.YNode().Value + } + + return fmt.Sprintf("%s %s/%s", kind, namespace, name), nil +} + +// index indexes the Resources by their package +func (p TreeWriter) index(nodes []*yaml.RNode) map[string][]*yaml.RNode { + // index the ResourceNodes by package + indexByPackage := map[string][]*yaml.RNode{} + for i := range nodes { + meta, err := nodes[i].GetMeta() + if err != nil || meta.Kind == "" { + // not a resource + continue + } + pkg := filepath.Dir(meta.Annotations[kioutil.PathAnnotation]) + indexByPackage[pkg] = append(indexByPackage[pkg], nodes[i]) + } + return indexByPackage +} + +func compareNodes(i, j *yaml.RNode) bool { + metai, _ := i.GetMeta() + metaj, _ := j.GetMeta() + pi := metai.Annotations[kioutil.PathAnnotation] + pj := metaj.Annotations[kioutil.PathAnnotation] + + // compare file names + if filepath.Base(pi) != filepath.Base(pj) { + return filepath.Base(pi) < filepath.Base(pj) + } + + // compare namespace + if metai.Namespace != metaj.Namespace { + return metai.Namespace < metaj.Namespace + } + + // compare name + if metai.Name != metaj.Name { + return metai.Name < metaj.Name + } + + // compare kind + if metai.Kind != metaj.Kind { + return metai.Kind < metaj.Kind + } + + // compare apiVersion + if metai.APIVersion != metaj.APIVersion { + return metai.APIVersion < metaj.APIVersion + } + return true +} + +// sort sorts the Resources in the index in display order and returns the ordered +// keys for the index +// +// Packages are sorted by package name +// Resources within a package are sorted by: [filename, namespace, name, kind, apiVersion] +func (p TreeWriter) sort(indexByPackage map[string][]*yaml.RNode) []string { + var keys []string + for k := range indexByPackage { + pkgNodes := indexByPackage[k] + sort.Slice(pkgNodes, func(i, j int) bool { return compareNodes(pkgNodes[i], pkgNodes[j]) }) + keys = append(keys, k) + } + + // return the package names sorted lexicographically + sort.Strings(keys) + return keys +} + +func (p TreeWriter) doResource(leaf *yaml.RNode, metaString string, branch treeprint.Tree) (treeprint.Tree, error) { + meta, _ := leaf.GetMeta() + if metaString == "" { + path := meta.Annotations[kioutil.PathAnnotation] + path = filepath.Base(path) + metaString = path + } + + value := fmt.Sprintf("%s %s", meta.Kind, meta.Name) + if len(meta.Namespace) > 0 { + value = fmt.Sprintf("%s %s/%s", meta.Kind, meta.Namespace, meta.Name) + } + + fields, err := p.getFields(leaf) + if err != nil { + return nil, err + } + + n := branch.AddMetaBranch(metaString, value) + for i := range fields { + field := fields[i] + + // do leaf node + if len(field.matchingElementsAndFields) == 0 { + n.AddNode(fmt.Sprintf("%s: %s", field.name, field.value)) + continue + } + + // do nested nodes + b := n.AddBranch(field.name) + for j := range field.matchingElementsAndFields { + elem := field.matchingElementsAndFields[j] + b := b.AddBranch(elem.name) + for k := range elem.matchingElementsAndFields { + field := elem.matchingElementsAndFields[k] + b.AddNode(fmt.Sprintf("%s: %s", field.name, field.value)) + } + } + } + + return n, nil +} + +// getFields looks up p.Fields from leaf and structures them into treeFields. +// TODO(pwittrock): simplify this function +func (p TreeWriter) getFields(leaf *yaml.RNode) (treeFields, error) { + fieldsByName := map[string]*treeField{} + + // index nested and non-nested fields + for i := range p.Fields { + f := p.Fields[i] + seq, err := leaf.Pipe(&f) + if err != nil { + return nil, err + } + if seq == nil { + continue + } + + if fieldsByName[f.Name] == nil { + fieldsByName[f.Name] = &treeField{name: f.Name} + } + + // non-nested field -- add directly to the treeFields list + if f.SubName == "" { + // non-nested field -- only 1 element + val, err := yaml.String(seq.Content()[0], yaml.Trim, yaml.Flow) + if err != nil { + return nil, err + } + fieldsByName[f.Name].value = val + continue + } + + // nested-field -- create a parent elem, and index by the 'match' value + if fieldsByName[f.Name].subFieldByMatch == nil { + fieldsByName[f.Name].subFieldByMatch = map[string]treeFields{} + } + index := fieldsByName[f.Name].subFieldByMatch + for j := range seq.Content() { + elem := seq.Content()[j] + matches := f.Matches[elem] + str, err := yaml.String(elem, yaml.Trim, yaml.Flow) + if err != nil { + return nil, err + } + + // map the field by the name of the element + // index the subfields by the matching element so we can put all the fields for the + // same element under the same branch + matchKey := strings.Join(matches, "/") + index[matchKey] = append(index[matchKey], &treeField{name: f.SubName, value: str}) + } + } + + // iterate over collection of all queried fields in the Resource + for _, field := range fieldsByName { + // iterate over collection of elements under the field -- indexed by element name + for match, subFields := range field.subFieldByMatch { + // create a new element for this collection of fields + // note: we will convert name to an index later, but keep the match for sorting + elem := &treeField{name: match} + field.matchingElementsAndFields = append(field.matchingElementsAndFields, elem) + + // iterate over collection of queried fields for the element + for i := range subFields { + // add to the list of fields for this element + elem.matchingElementsAndFields = append(elem.matchingElementsAndFields, subFields[i]) + } + } + // clear this cached data + field.subFieldByMatch = nil + } + + // put the fields in a list so they are ordered + fieldList := treeFields{} + for _, v := range fieldsByName { + fieldList = append(fieldList, v) + } + + // sort the fields + sort.Sort(fieldList) + for i := range fieldList { + field := fieldList[i] + // sort the elements under this field + sort.Sort(field.matchingElementsAndFields) + + for i := range field.matchingElementsAndFields { + element := field.matchingElementsAndFields[i] + // sort the elements under a list field by their name + sort.Sort(element.matchingElementsAndFields) + // set the name of the element to its index + element.name = fmt.Sprintf("%d", i) + } + } + + return fieldList, nil +} + +// treeField wraps a field node +type treeField struct { + // name is the name of the node + name string + + // value is the value of the node -- may be empty + value string + + // matchingElementsAndFields is a slice of fields that go under this as a branch + matchingElementsAndFields treeFields + + // subFieldByMatch caches matchingElementsAndFields indexed by the name of the matching elem + subFieldByMatch map[string]treeFields +} + +// treeFields wraps a slice of treeField so they can be sorted +type treeFields []*treeField + +func (nodes treeFields) Len() int { return len(nodes) } + +func (nodes treeFields) Less(i, j int) bool { + iIndex, iFound := yaml.FieldOrder[nodes[i].name] + jIndex, jFound := yaml.FieldOrder[nodes[j].name] + if iFound && jFound { + return iIndex < jIndex + } + if iFound { + return true + } + if jFound { + return false + } + + if nodes[i].name != nodes[j].name { + return nodes[i].name < nodes[j].name + } + if nodes[i].value != nodes[j].value { + return nodes[i].value < nodes[j].value + } + return false +} + +func (nodes treeFields) Swap(i, j int) { nodes[i], nodes[j] = nodes[j], nodes[i] } diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/Makefile b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/Makefile new file mode 100644 index 000000000..c616a6c6d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/Makefile @@ -0,0 +1,62 @@ +# Copyright 2020 The Kubernetes Authors. +# SPDX-License-Identifier: Apache-2.0 + +MYGOBIN = $(shell go env GOBIN) +ifeq ($(MYGOBIN),) +MYGOBIN = $(shell go env GOPATH)/bin +endif +KIND_VERSION := "v0.11.1" +API_VERSION ?= "v1.21.2" + +.PHONY: all +all: \ + kustomizationapi/swagger.go \ + kubernetesapi/swagger.go \ + kubernetesapi/openapiinfo.go + +.PHONY: clean +clean: + rm kustomizationapi/swagger.go + rm kubernetesapi/openapiinfo.go + +# This will remove all currently built-in schema, +# so think twice before deleting. +# To replace what this will delete typically requires the ability +# to contact a live kubernetes API server. +.PHONY: nuke +nuke: clean + rm -r kubernetesapi/* + +$(MYGOBIN)/go-bindata: + go install github.com/go-bindata/go-bindata/v3/go-bindata@latest + +$(MYGOBIN)/kind: + ( \ + set -e; \ + d=$(shell mktemp -d); cd $$d; \ + wget -O ./kind https://github.com/kubernetes-sigs/kind/releases/download/$(KIND_VERSION)/kind-$(shell uname)-amd64; \ + chmod +x ./kind; \ + mv ./kind $(MYGOBIN); \ + rm -rf $$d; \ + ) + +.PHONY: kubernetesapi/openapiinfo.go +kubernetesapi/openapiinfo.go: + ./scripts/makeOpenApiInfoDotGo.sh + +kustomizationapi/swagger.go: $(MYGOBIN)/go-bindata kustomizationapi/swagger.json + $(MYGOBIN)/go-bindata \ + --pkg kustomizationapi \ + -o kustomizationapi/swagger.go \ + kustomizationapi/swagger.json + +.PHONY: kubernetesapi/swagger.pb +kubernetesapi/swagger.pb: $(MYGOBIN)/kind $(MYGOBIN)/kustomize + ./scripts/fetchSchemaFromCluster.sh $(API_VERSION) + +.PHONY: kubernetesapi/swagger.go +kubernetesapi/swagger.go: $(MYGOBIN)/go-bindata kubernetesapi/swagger.pb + ./scripts/generateSwaggerDotGo.sh $(API_VERSION) + +$(MYGOBIN)/kustomize: + $(shell cd ../.. && MYGOBIN=$(MYGOBIN) make $(MYGOBIN)/kustomize) diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/README.md b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/README.md new file mode 100644 index 000000000..3bffb952c --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/README.md @@ -0,0 +1,84 @@ +# Sampling New OpenAPI Data + +[OpenAPI schema]: ./kubernetesapi/ +[Kustomization schema]: ./kustomizationapi/ +[kind]: https://hub.docker.com/r/kindest/node/tags + +This document describes how to fetch OpenAPI data from a +live kubernetes API server. +The scripts used will create a clean [kind] instance for this purpose. + +## Replacing the default openapi schema version + +### Delete all currently built-in schema + +This will remove both the Kustomization and Kubernetes schemas: + +``` +make nuke +``` + +### Choose the new version to use + +The compiled-in schema version should maximize API availability with respect to all actively supported Kubernetes versions. For example, while 1.20, 1.21 and 1.22 are the actively supported versions, 1.21 is the best choice. This is because 1.21 introduces at least one new API and does not remove any, while 1.22 removes a large set of long-deprecated APIs that are still supported in 1.20/1.21. + +### Generating additional schema + +If you'd like to change the default schema version, then in the Makefile in this directory, update the `API_VERSION` to your desired version. + +You may need to update the version of Kind these scripts use by changing `KIND_VERSION` in the Makefile in this directory. You can find compatibility information in the [kind release notes](https://github.com/kubernetes-sigs/kind/releases). + +In this directory, fetch the openapi schema, generate the +corresponding swagger.go for the kubernetes api, and update `kubernetesapi/openapiinfo.go`: + +``` +make all +``` + +If you want to run the steps individually instead of using `make all`, you can run +the following commands: + +``` +make kustomizationapi/swagger.go +make kubernetesapi/swagger.go +make kubernetesapi/openapiinfo.go +``` + +You can optionally delete the old `swagger.pb` and `swagger.go` files if we no longer need to support that kubernetes version of +openapi data. Make sure you rerun `make kubernetesapi/openapiinfo.go` after deleting any old schemas. + + +#### Precomputations + +To avoid expensive schema lookups, some functions have precomputed results based on the schema. Unit tests +ensure these are kept in sync with the schema; if these tests fail you will need to follow the suggested diff +to update the precomputed results. + +### Run all tests + +At the top of the repository, run the tests. + +``` +make prow-presubmit-check >& /tmp/k.txt; echo $? +``` + +The exit code should be zero; if not, examine `/tmp/k.txt`. + +## Partial regeneration + +You can also regenerate the kubernetes api schemas specifically with: + +``` +rm kubernetesapi/swagger.go +make kubernetesapi/swagger.go +``` + +To fetch the schema without generating the swagger.go, you can +run: + +``` +rm kubernetesapi/swagger.pb +make kubernetesapi/swagger.pb +``` + +Note that generating the swagger.go will re-fetch the schema. diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/openapiinfo.go b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/openapiinfo.go new file mode 100644 index 000000000..e8b3d8360 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/openapiinfo.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by ./scripts/makeOpenApiInfoDotGo.sh; DO NOT EDIT. + +package kubernetesapi + +import ( + "sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2" +) + +const Info = "{title:Kubernetes,version:v1.21.2}" + +var OpenAPIMustAsset = map[string]func(string) []byte{ + "v1.21.2": v1_21_2.MustAsset, +} + +const DefaultOpenAPI = "v1.21.2" diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2/swagger.go b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2/swagger.go new file mode 100644 index 000000000..b599e539a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2/swagger.go @@ -0,0 +1,249 @@ +// Code generated by go-bindata. (@generated) DO NOT EDIT. + + //Package v1_21_2 generated by go-bindata.// sources: +// kubernetesapi/v1_21_2/swagger.pb +package v1_21_2 + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// ModTime return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _kubernetesapiV1_21_2SwaggerPb = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xfd\x79\x78\x1c\xd7\x79\xe0\x0b\x3f\xdd\x00\x45\xf2\x48\xb2\xa9\xa3\xc5\x51\x5b\x4b\xa9\x29\x11\x40\x13\x68\xb0\xc1\x1d\x5c\xd1\x00\x49\xa1\x09\x92\x30\x21\x91\xb6\x23\x89\x2a\x54\x1d\x34\x8a\xec\xae\x53\xaa\xaa\x06\x05\xe7\xf3\xcc\xc4\xb2\x3d\x9e\x6f\x1e\x3b\x1e\x27\x5e\xee\x38\xd7\xe3\x6c\x1e\x3b\xf1\xc8\x71\xe2\x3b\x49\x1e\x4f\x96\x9b\x1b\x67\x94\x91\xa3\xc4\x71\x66\x9c\x71\x12\x27\x4e\x9c\x79\x32\x9e\x9b\x9b\x78\x32\xb9\x71\x56\xdf\xe7\x2c\xb5\x75\x57\xa3\x1a\x60\x77\xa3\xc1\x7e\xff\x91\xc0\xae\xaa\x53\xa7\xce\xf2\x9e\xf7\xfc\xde\xe5\xa0\x81\x89\xfc\x3e\x7c\x3f\x42\xe7\x6b\x8b\xc4\x36\x89\x4b\x1c\xbc\x7d\xa5\x90\x9f\x28\xe4\x27\x8a\xbf\xff\xa7\x3f\x9c\xc2\x7f\xb1\x07\xdd\x3b\xae\x5a\xc6\xf8\x4a\x61\xbc\x62\x54\x0d\xd7\x56\xcd\x32\x71\xf0\xef\xec\xc1\x5f\x1c\x40\xdb\x35\x6a\x93\x6b\x2b\x85\xcc\x70\xc5\x70\x5c\x85\xda\xca\x4d\xd5\xd5\x96\x15\xba\x78\x9d\x68\xae\xa3\xd0\x25\xe5\x86\x61\xea\xca\x1c\x7b\xf2\x32\x7b\x32\xf7\x38\xbb\x73\x9a\xda\xe4\x4a\x21\xf8\xf5\x2c\xb5\xa7\x2a\x95\x8b\x6a\x95\x38\x96\xaa\x11\x67\x62\x97\x6a\x59\x15\x43\x53\x5d\x83\x9a\xe3\xd7\x1d\x6a\x46\x7f\x59\x55\xab\x95\x89\xdd\xe1\x5f\x56\x4c\x3d\x7f\xc3\xff\x88\xbc\x65\x53\x97\x2e\xd6\x96\x26\x1e\xae\x2f\xe8\x98\xe3\xda\x44\xad\x9e\xe0\x15\x9d\xd8\xd7\x42\x19\x91\x27\x26\x07\x72\xe3\xb9\xd2\xb3\xe8\x0d\x68\xe0\xc0\xbe\x02\xde\x85\x5e\x87\xee\x7a\xda\x54\x6b\xee\x32\xb5\x8d\xb7\x11\x1d\x15\xd1\xc0\xc4\xbe\x7d\xf8\x18\x3a\x8a\xd2\x97\xce\xe3\xfd\xa8\x80\xc6\x77\x8f\xeb\x64\xc9\x30\x0d\xf6\x16\x67\xdc\xa0\xf9\x1b\x47\x9c\xbc\x6a\x19\x79\xd6\x7c\xf9\x95\x42\x3e\x68\x89\x39\xc3\x71\x2f\x6f\x5b\x76\x5d\xcb\xb9\xfe\x08\xba\xf7\xc5\xb1\xa0\x42\x63\xaa\xc6\x0a\xc0\xdb\xf1\x36\xd6\x86\xe8\xfa\x1c\x7a\x34\x72\xbd\x6c\xd3\x9a\x35\xb6\x42\x6c\xc7\xa0\xe6\x18\x6b\x78\x3c\x82\x87\xd8\xff\x27\x43\x3d\x80\xe4\x0d\x93\xca\x4a\x01\xf1\x47\x26\x95\x6c\x16\x95\xde\x31\x88\xbe\x33\x80\xff\x7e\x20\xf3\x37\x03\x78\xdb\x0b\x35\x62\xaf\x66\x5e\x1b\x50\x2b\x15\x7a\xf3\x2a\xfb\xf0\x22\xa5\x37\xaa\xaa\x7d\xc3\x51\x6c\xf2\x42\x8d\x38\xae\x23\xfb\x9a\xac\x10\x93\xfd\xc3\x70\x97\x15\x77\xd5\x22\x4a\xb6\x78\xe9\xd2\xf9\x0b\x53\x97\xcf\x67\xf3\xca\x02\xb1\xd9\xeb\x14\x77\x59\x75\x15\x9d\x2a\x26\x75\x15\xa3\x6a\x55\x48\x95\x98\xae\xb2\xe8\x97\x59\x55\x57\x15\xa3\x6c\x52\x9b\x28\xee\xb2\xe1\x28\x4b\x15\xb5\xac\xa8\xa6\x1e\xba\x45\xb5\x89\xe2\xb0\xa7\x54\x57\x71\x97\xd9\xdf\xac\xe8\x21\x47\xd1\x0d\x47\xb3\x09\x6b\x9a\xbc\x32\x5d\x31\x78\x6d\x9c\x65\x5a\xab\xe8\xfc\x75\xaa\xe3\xd4\xaa\xa4\xae\x20\x9b\xb8\x35\xdb\x24\x3a\x2b\x4c\x35\x57\x15\xc7\x22\x9a\xb1\x64\x68\x8a\x61\xba\xc4\x5e\x51\x2b\xa3\x8a\x49\x6d\x5e\x2d\x77\x99\xac\x7a\x85\x04\xef\x55\x6e\x1a\x95\x0a\xab\x8f\xce\x9f\xf7\x3e\x59\xb4\x86\xa2\xd7\x6c\xc3\x2c\x2b\xaa\xe2\x10\xc7\xe1\x15\x9b\x5d\x12\x1f\x66\x38\xa2\x52\xa2\xf1\x46\xe5\xd7\x1a\xa4\xa2\xb3\x4b\xa2\x09\x74\x79\x3b\x51\x96\x88\xea\xd6\x6c\xa2\x94\x55\x97\x28\x75\xdd\x20\x4b\x22\xa6\xba\x58\x21\xba\x62\x98\x8a\x6a\x19\xa2\x72\xcd\x8a\xcd\xde\x1b\xd3\x9f\x13\xdb\x17\x29\xad\x10\xd5\xfc\x78\x2a\x55\xfa\xf3\x9d\xe8\xff\xde\x89\xff\xfb\xce\xcc\x7f\xdb\xe9\x0d\x82\xff\xb8\xf3\xa9\x65\xa2\x68\xd4\x74\x0d\xb3\x46\x14\x6a\xb1\x96\xf6\x1a\x78\x91\xb5\x87\xab\xdc\x5c\x26\x26\x6b\x53\xdb\x20\x2b\xec\xc3\xab\x94\xb7\xb1\x53\xab\xb8\x8e\xb2\x64\xd3\x6a\xa8\xe5\xf2\xca\x82\x61\x6a\xb2\x9f\x57\xd4\x4a\x8d\xb0\x2a\xca\x56\xe5\xf3\x84\xe8\xa3\x8a\x26\x3b\x92\xf5\x00\x35\x2b\xab\x4a\xcd\x11\xcd\xef\x57\x44\x3c\xca\x0b\x57\x15\xcb\x26\x2b\x06\xad\x39\x0a\xaf\xb3\x7c\xb5\x18\x93\x86\x4e\x4c\xd7\xd0\xd4\x8a\xbc\x66\xa9\xb6\x5a\x25\x2e\x1b\x95\xc3\xe4\x45\x8d\x58\xae\xb2\x44\x6d\x5e\xb6\x28\x92\x2e\xf9\x2f\x19\xe1\x63\x30\xd4\xeb\xac\x3a\x36\x61\xc2\x4d\x51\xeb\xab\x62\xb0\x11\x4e\x44\xaf\xd8\x44\xa3\x65\xd3\x78\x1b\xf1\xbb\x52\x8e\x30\xa2\x37\x3c\xc6\x9e\x50\x2a\xd4\x2c\x13\x9b\xfd\x64\xe8\xac\x39\xdd\x65\xd6\x1a\x35\xa2\xb8\x54\x21\x2f\x5a\x86\xcd\x45\x94\x32\x5c\x26\x26\xb1\xd5\x4a\x65\x55\x59\x32\x56\xf8\xd5\x25\x63\xc9\x25\xc4\x54\xaa\x86\x59\x73\x89\x33\xc2\xa4\x30\xaf\xdb\x92\x51\xae\xc9\xc7\xb4\x65\x36\xf3\x15\x6a\x86\xbe\x65\xb4\x61\x34\xdb\xc4\xb1\xa8\xa9\x8b\x66\x53\x95\x03\x85\x7d\xca\x65\xe2\xd0\x9a\xad\x91\x33\xac\x0a\x44\x57\x88\x6d\xb3\xb6\xa2\x65\x51\x41\x79\xa7\xff\x45\x2e\xbd\x41\x4c\xff\x93\x45\x1f\x2a\x26\x21\xba\x23\xee\x72\x0c\xc7\x65\x3f\x31\xf9\x35\xca\x1a\xac\x5a\x73\x58\x63\x39\xae\x6a\xf3\x59\x6d\xd8\xfc\x1a\x2f\x98\xd6\xdc\x68\x8f\xf3\xf1\x9c\x57\x2e\xb1\x57\xdf\x34\x1c\x32\x1a\x7e\x0b\xeb\x19\x39\x1d\x29\xaf\x1b\x2f\x47\xca\x2a\x29\x9d\x96\x65\x0d\x59\xf7\x10\x63\x85\xe8\xc1\xef\xec\x63\xf9\xc7\x25\x36\x0b\x2f\x97\x57\x98\x8d\x74\x7f\x70\x9b\xe4\x45\x57\xb9\x41\x56\x47\x95\xc5\x9a\x1b\xfc\x5c\x51\x5d\xf6\x7e\xc7\x54\x2d\x67\x99\xba\xa3\xca\xcd\x65\x43\x5b\xe6\x93\xd2\x0c\x35\x88\x7f\xbf\x3f\x90\x65\xf5\xc5\x14\x1a\xf3\x57\x54\x2e\x48\x99\x04\xd3\x6c\xa2\xba\x6c\xa6\x54\xa9\xce\xc7\xd5\x28\xeb\x78\x9d\x54\x88\xcb\xc4\xda\x92\x4b\xc4\x98\x5e\x32\x6c\xc7\xad\x6f\x8c\x4a\x85\xcd\x5c\xc3\xd4\x2a\x35\x5d\xc8\x0e\x76\xab\xf8\x50\xd6\xae\xaa\xc3\x07\x24\xfb\xbf\xe8\x94\x1b\x64\x55\x08\xce\xa0\xe0\xac\xf7\xc5\xd9\x3c\x42\x4f\x45\xe4\x0d\x9b\x01\x4e\xcd\xb2\xa8\xcd\xea\xc2\x85\x83\x58\x2b\x0c\x47\x71\xed\x1a\x09\xe4\x34\xef\x35\xde\xf9\x52\x22\x86\x5b\x4e\x34\x00\x1f\x7f\x57\xc4\x9a\x25\xa7\x8c\x2f\xbd\x17\x57\xc3\xbd\xc5\xe6\x2a\x7b\x75\xd5\x70\x1c\x2e\x95\x45\xd3\x88\xd5\xdd\xc9\x67\x77\x78\x23\x69\xe2\x0e\xc7\x65\x02\x9a\x49\xbc\x7f\x9e\x42\xef\x4c\xe1\xef\x4d\x65\xfe\xa9\x27\xf0\x9e\x99\x52\x1c\x52\x21\x9a\xcb\x47\x3a\x1f\x9d\xb6\xa1\x89\xb1\x28\xd4\x9c\xa5\xa0\x06\x5e\xbf\x88\x9a\x18\xb6\x68\x04\x27\xaf\xcc\x90\x25\x95\xf7\x1d\x9b\xc0\x2b\xc4\x5e\x75\x97\x0d\xb3\x9c\xcf\xde\xcd\x6f\x58\x90\x2f\xe8\x64\x4d\x2a\xea\x22\xa9\xac\x55\x13\x7e\x43\x5c\x4d\xfe\x16\xa1\xbf\x46\xf8\x7f\xa1\xcc\xb7\x90\x57\x95\xdf\x46\x5c\xfb\x63\x3d\xa8\x2a\x55\xf5\x45\xa3\x5a\xab\x2a\x66\xad\xba\x48\x6c\x51\x0b\x31\x74\x1c\x51\x4f\x56\x25\x2e\x55\xe5\x74\xd1\xd4\x4a\x85\x8b\x05\xbe\x30\x18\x2e\xa9\x3a\x0a\x79\x91\x0b\x81\xc6\x25\x55\x7c\xdf\xf3\x5e\x67\x3d\x2f\xc7\x95\x14\x5e\xbc\xbc\x2a\x71\x55\x5d\x75\x55\xf6\x36\x55\x0e\x0a\x3e\x33\x34\xd5\x64\x23\xbb\xe6\x84\xe7\xb6\xa3\x56\xd9\x60\x37\x5c\xc3\x5f\x04\x44\x2d\xd9\x72\x45\x82\xd9\xcb\x5e\x2d\xbe\x85\x35\x17\x53\x5e\x5c\x57\x2c\xe3\xe2\xdb\x85\xf0\x17\xdf\x46\x6e\xf2\x69\xa0\x7a\x33\x87\xcf\x2c\x36\xf1\xaa\xb4\x66\xf2\x62\xc4\x57\x0e\xd7\x2c\xf6\xb2\xb7\x11\x9b\x8a\x5f\x46\xbc\xd9\x26\x34\x05\x95\x0b\x18\xef\x69\xaf\x0f\xd9\x4c\x5b\x32\x2a\x2e\x61\x32\x97\xc9\x41\x36\xb4\xb5\xa8\x6e\x13\x59\x15\x2d\x9b\x38\x84\xad\xaa\x74\x29\x46\x66\xb2\x1a\xe8\x6c\xc9\xab\x1a\x26\xf1\x57\x97\xc8\x22\xcd\xe7\xf6\x8a\x6a\x54\x98\x36\x11\xe8\x6d\xec\x93\xb5\x65\x4a\x1d\xc2\x67\x96\x4b\xbd\x79\x2d\xfb\x82\x35\x8b\x6a\x97\x6b\x5c\x9d\x53\xb9\x90\xe4\x1f\xc4\x1b\x89\x7d\x9b\xac\x8f\x5f\x74\xd0\xba\xb3\x4b\x8a\x3f\xa4\x82\xc5\xd1\x5b\x6f\xeb\x3e\xc0\x70\x14\x52\xb5\xdc\xd5\xa8\x62\xe0\x6b\x65\xaa\xcb\xd6\xd0\xb5\x3e\x68\x2d\x01\x65\x2c\xd5\x89\x27\x26\xcf\xfc\x51\x59\xae\xa9\xb6\x6a\xba\x84\x48\xe1\xcb\x6a\xe7\xf5\x93\x3f\xf9\xb8\x8c\xab\x39\x6c\xb4\xf8\x35\xf7\xc5\xac\xaf\x80\xb8\x54\x31\x1c\xa7\x26\x55\x43\xc3\x2c\x57\x48\x30\x3f\xfc\x35\xcf\x1b\x6e\x63\xe2\x7d\x86\xc3\x74\x51\xff\x95\xeb\x93\xfb\x9e\xc8\xe7\x9a\xa7\x53\x23\x7a\xac\xf0\xe7\x1a\x70\x6d\xd1\x61\x37\x9b\xae\xff\x05\xba\xaf\xea\xcb\xf6\x63\x3d\x45\xab\xc4\x35\xaa\x84\x7d\xfb\x12\xb1\xd9\xf8\x64\x73\xb0\x6e\x7d\x0f\x96\x3b\xd6\x9f\xc4\x74\x6a\xb6\xd7\x7c\xaa\xb7\x60\xcb\x8f\x93\xad\x26\x3e\x99\x4f\x4b\xbe\x34\x2b\x4e\x55\xad\x54\x88\xad\x68\xcb\x35\xf3\x06\xdf\x49\xaa\x0a\x13\x5e\x4a\x45\xb5\xcb\x5e\x37\xf3\xf9\x2e\x8a\x17\x7a\xba\x43\x08\x1f\x76\x16\x75\x1c\x83\x0d\x37\xd9\x6c\x7c\xb8\x85\x67\x57\xcd\xd2\x59\x33\x06\x9a\x3a\x7f\x0f\xd1\x45\x7f\x70\x65\x50\x2e\x39\x72\x04\x8b\x87\x45\xad\x6f\xaa\x8e\x9c\x71\xfe\x6e\x84\xb5\x49\xe3\x7a\x2b\xb4\x50\xd5\x61\x1d\xac\xd5\x2a\xfc\x8d\x46\x30\x6c\xf2\xd9\x6d\xfc\xbb\x27\xb6\xb3\x6d\x47\x99\xd8\x4c\xf8\x5e\x42\x17\xf0\xf9\xcc\xac\x27\x78\xc7\x66\x97\x94\x21\x36\x2a\x87\xb8\xac\x14\xb2\x83\xd6\x5c\xab\xc6\x5b\xcf\xb2\x89\xeb\xae\x2a\x96\xcd\x4a\xd0\xf3\xd9\x3b\xc4\x0f\x61\x69\xfe\x77\x29\xf4\xed\x14\xfe\xab\x54\xe6\x7f\xa6\xbc\x42\xbf\x98\xaa\x5f\x58\x1d\xe2\x7a\x7d\xe8\xda\xaa\xc1\x44\x98\xa9\xdc\x64\x1f\xeb\xdd\xe9\x35\x08\xbb\xcd\x1b\x56\x6c\x12\x2e\xca\x99\xa2\xf3\xa5\x9b\x89\x0e\xa2\xf0\xfd\xeb\xe4\xf8\x78\x68\x37\x6d\xd0\x71\x9d\x6a\xce\x38\x1f\x36\x4c\x54\x8d\xf3\x8e\x1f\x53\x2d\x63\x5c\xb5\x8c\x31\x8d\x9a\x4c\x17\x77\xc6\x77\x7b\x2f\x1c\xf3\x5f\xb8\xc4\x47\xb8\xab\x1a\x15\x27\x8f\x50\x78\x3d\xab\x99\x0e\x71\xb3\xaf\xaf\xfb\x9a\xf0\xd7\x7f\x35\x8d\xbe\x92\xc6\xbf\x9d\xce\x7c\x29\xed\x7d\xfd\x27\xd2\x75\xf7\x5f\xe0\xb3\xdf\x97\x90\x8e\xb2\x4c\x6f\x36\xa8\x1e\x6c\xe5\xb3\xac\x8a\x21\x46\xbc\x3f\x6d\xd9\xd0\xe2\x3d\xb1\x6c\x94\x97\x2b\xab\x5c\xf3\xaf\x56\x89\xc9\xa6\x96\x1b\x6e\xbf\xc8\xbb\xe4\xbe\x89\x7d\x58\x50\x12\x93\x22\x42\x82\xd5\xbf\x98\xdd\xbb\x09\xed\x7a\x5f\x5c\xdd\xc3\x8d\xfb\xb1\x14\xfa\x68\x0a\x7f\x24\x95\xf9\xa0\x3f\xb4\xac\xa7\x8c\x2a\x61\x62\xcc\xdb\x54\xb1\xef\x1b\x17\xf2\x55\xa8\x01\x5c\x94\xf0\x71\xcf\xb5\x4b\x36\x05\xc5\x2e\xc5\x5b\xbb\xd4\x4a\x65\x54\xb1\x49\x59\xb5\xf5\x0a\x71\xc4\xdc\x37\x57\x15\x55\x73\x8d\x15\xc3\x5d\x65\xf2\xce\x30\xbd\x7f\xe5\xb3\xaf\x73\xc5\x1b\x17\x88\x46\x4d\xdd\x89\x4c\xa6\xcf\xa6\xd0\xcb\x29\xfc\x13\xa9\xcc\x27\xfd\x0a\xfe\xcb\xd4\x55\xa1\x68\x52\x5b\xee\x8b\xf8\x17\xf3\x9a\x10\x47\xb3\x8d\x45\x2e\xf8\xc4\x87\x3b\x5c\x80\xc9\xf5\xcc\x5d\x26\x55\x21\xea\x04\x11\xe2\x15\xd3\xf5\x51\x29\x4c\x46\xe5\xbd\x55\xba\xc2\x57\xcb\x90\xea\xa9\x2c\xf0\x05\x6e\xb5\xbe\x6f\xf3\xd9\x6d\x82\x43\x85\xf6\xe0\xf8\xa7\x87\xd1\x88\x87\xdc\xf8\xd5\x71\xd3\x07\x63\xe3\xdf\xe3\xff\xfd\xf6\x71\x8b\xea\x0e\x7e\xd7\x30\xfe\xf0\x60\x00\xe2\xa8\x5c\xc9\x4c\xdd\x58\x31\xf4\x9a\x5a\x09\x7f\xa3\xea\x6b\x8d\xf3\x54\xcf\x2b\x3a\xb1\x6c\xa2\x31\xa1\x34\xe9\x2b\x12\x43\xfc\xf9\xa1\x60\x97\x1c\xd9\xf6\x50\x8b\xc8\xbe\x32\x4c\xc7\x25\xaa\x9e\xcf\x3d\xc4\x1f\x10\x40\xcf\x07\x78\xfa\x3c\xd5\xe7\x0c\xc7\xed\x75\x8e\x77\x03\xcd\x0b\x5c\x37\x8b\xce\x71\x5c\x37\x85\x4e\xa1\x13\xcd\x70\x5d\x55\xd5\x96\x0d\x93\xd8\xab\x79\xeb\x46\x99\xfd\xe0\xe4\x99\x22\x9a\x5f\x29\xe4\xf9\x90\x3a\xc3\x34\xba\xa6\x64\xd0\xa3\x7a\xbb\xe3\xa9\xde\x5d\x18\xf1\x8a\x09\xb4\x77\x26\x19\xed\x65\xb1\x12\xd0\x3b\x01\xf9\xe6\xa9\x1e\xa1\x7b\xc0\xf4\x80\xe9\x01\xd3\x03\xa6\x07\x4c\x0f\x98\x1e\x30\x3d\x60\x7a\xc0\xf4\x80\xe9\x01\xd3\x03\xa6\x07\x4c\xaf\xcb\x4c\xef\x79\xf4\x1c\x7e\x26\xfb\xd6\x1d\x29\x3c\x68\xa9\xee\x72\x66\x52\xbe\x99\x6d\xe6\x79\x73\xb2\x6d\xa2\xe2\x68\xd4\x22\xa3\x8a\x53\xd3\x96\x59\xeb\x73\x0d\x96\xa8\x55\xc1\x20\x2c\x9b\xf2\x2f\xcd\xee\xf4\x09\x40\x4e\x8a\xf7\x8f\x00\x35\x04\x6a\x08\xd4\xb0\xbf\xa9\xe1\xb7\x86\xd1\x69\xd6\x75\xce\xb8\x46\xa9\xad\x1b\x26\x2f\x84\xa3\x2b\x83\x26\xa2\xc4\x0a\x51\x1d\xe2\xe0\x4f\x0f\xe3\x5f\x18\x44\xaf\x0f\x97\x70\x6d\xa5\x90\x79\xa1\x35\xa8\x38\xc7\x0a\x69\x13\x56\xdc\x23\xb1\x62\x50\x91\x30\x5e\xe4\x6f\x02\xc0\x78\x0b\x80\xf1\xcd\xc9\x80\xf1\x20\xde\x2f\x01\x63\xcc\x88\x92\xc4\x91\x77\x04\x30\x47\x60\x8e\xc0\x1c\x81\x39\x02\x73\x04\xe6\x08\xcc\x11\x98\x23\x30\x47\x60\x8e\xc0\x1c\x81\x39\x02\x73\x04\xe6\x08\xcc\x11\x98\x23\x30\xc7\xdb\x94\x39\xfe\xf5\x33\x68\x8f\x60\x8e\x8e\x4b\x6d\xb5\x4c\x42\xb8\x51\xfe\xa2\x55\x54\xc7\x21\x0e\xfe\xe2\x33\xf8\x5f\xed\x41\x48\xfe\x7a\x6d\xa5\x90\xc9\xad\x1d\x32\xbc\x20\xee\x9c\x66\xcf\xe7\x1e\x64\xf7\xca\x5f\xae\x14\xc2\x97\x7a\x1c\x00\x16\x81\x86\x01\x0d\x2b\x02\x0d\x03\x1a\x06\x34\x0c\x68\x58\xdf\xd0\xb0\x62\xcf\xd0\xb0\xb6\xd7\x64\xc3\x34\xac\x08\x34\x0c\x68\x18\xd0\x30\xa0\x61\x40\xc3\xba\x4f\xc3\x8a\x7d\x8d\x96\x8a\x80\x96\x3a\x87\x96\x8a\xbd\x8e\x96\x8a\x5b\x10\x2d\x95\x16\xd1\x39\xe1\x38\x75\x1a\x9d\xe4\x8e\x53\x47\xd0\x21\x74\xa0\x69\x22\x35\x8f\x3f\xad\x14\xf2\x61\x3a\x34\x67\x38\xc9\xfe\x52\x49\x69\xd6\xde\x9a\xec\x2a\x75\x18\x1f\x94\xae\x52\x51\x10\x26\xbd\xa4\xc2\x55\x8a\x38\x4b\x65\xff\xc5\xce\x08\x13\xbb\x5f\xac\x43\x8a\x1a\xc5\x5f\x6f\x14\x3f\x77\x16\x80\x09\x5e\x75\x16\xcd\xa0\x22\x1e\x5c\xa4\xfa\x6a\x86\xff\x57\x49\xe5\x26\xd0\xbe\xf5\x36\x7c\xf1\x63\x69\xf4\xd1\x34\xfe\x48\x3a\xf3\x41\x5f\xe2\x7c\x3b\x75\x95\xad\xe7\x52\xc4\x8f\x72\x77\x42\x8d\x6d\xe4\x84\x88\x88\x6c\x2b\xc2\xa8\x69\x91\x28\x16\x6b\x33\xa6\xcb\xe5\x95\x29\x53\x31\x4c\xb1\xa1\xa7\xb6\x52\x33\x7d\x26\xa0\x2b\xba\xbd\x7a\xb9\x66\x2a\xba\x61\x13\x36\x2f\x88\xbf\xd1\x64\xeb\x05\x5f\x90\xe5\x4e\xdb\xd3\xab\xe5\xde\x46\x59\xaa\xd9\x5c\x77\xb3\x6c\xaa\x11\x87\xaf\x9c\x72\x1a\x4a\x19\x9f\x57\xae\xf0\x37\x72\x9d\x98\x2f\x75\x93\xca\x98\x32\x55\xa9\x4c\xf2\x75\x51\xb7\x57\x15\xbb\x66\xb2\x3d\x17\x9b\x48\x9e\x32\x20\x8b\x23\x7a\xf6\x0e\x51\xb5\xb0\xd4\xf8\x50\x1a\x7d\x7f\x1a\xbf\x3f\x9d\x79\xaf\xdf\x40\xdf\x4c\x71\x55\xea\x82\x6a\xaa\x65\x62\x8b\x4d\x81\xb0\xd6\x38\x0e\xd5\x0c\xbe\xce\xf9\xea\xb7\xca\xf7\x2f\xd4\x56\x98\x1a\xe4\xae\xfa\x0b\x7f\x55\xbd\xc1\xea\xef\x2e\x13\x87\x78\x53\x9b\x89\x1b\x0f\x8c\x70\x36\xb0\x48\x14\x2e\x54\xb8\xaa\x4d\x6d\xa5\x30\x71\x84\xdd\x6b\xab\x1a\x27\x38\x6c\x97\x2a\x26\x32\x57\x87\x99\xe6\xa2\x1a\xa6\x30\xdd\x70\x75\x33\xb8\x97\xef\x6a\x25\x5f\x62\x9b\x24\x4f\x38\x97\x69\x45\x35\xcb\x79\x6a\x97\xc7\xad\x1b\xe5\xf1\x9a\x69\x68\x54\x27\xe3\xbb\x67\x9d\x79\x56\x4a\x3e\x7b\x57\xf8\x5b\xc3\xa4\xfe\x2f\x53\x68\x5a\x4c\xfd\xe3\x68\x92\x4f\xfd\x03\x68\x03\x23\x10\x3d\xc9\x0a\x29\x70\x87\xcb\xed\xd3\x42\xbd\xdb\x60\x49\xb3\xac\xa4\x09\x5c\x44\xa7\xd1\x8e\x29\x8d\x2d\x24\x1b\x2e\x6a\xe3\xb2\xc8\xa2\x1d\x96\x45\xb9\xdf\x19\x8d\xc8\xa2\xc7\x84\xf6\xab\x68\xb4\xc2\xf6\xb2\x72\x6d\x8a\xc8\xa5\x27\xc4\x2d\xbe\x5c\x9a\xf6\x6f\xed\xa0\x84\x7a\x0a\x5d\x46\xf3\x61\x09\x95\x2b\xa2\xd3\x1b\xf0\xa8\x9d\xe1\x75\xbf\xc4\xc1\xab\x03\x88\x16\x10\x2d\x20\x5a\x40\xb4\x7d\x84\x68\x41\x43\x4b\xd0\xd0\x7a\x87\x61\x7f\x23\x8d\xbe\x9e\xc6\x5f\x4b\x67\x7e\xd7\xef\xaa\xcf\xa6\x9f\x0a\xef\x1a\x0d\x53\x71\xc4\xce\x4f\x59\x24\x4b\xc2\xe0\xeb\xd3\x95\x60\xc9\x92\xb3\x84\x37\x56\x48\x17\x34\xa9\x39\x66\x92\xb2\xca\xbb\x44\x6e\x1c\xc3\x3a\xa3\xe0\xac\xfe\x60\x90\x8a\x81\x51\xad\x12\x9d\x69\xa5\x95\xd5\xc0\x12\x1b\x88\x76\xa3\x32\x2a\xf7\x93\xfc\x73\x95\xb2\xad\x6a\x7c\x9c\x18\x54\xf7\x17\x9e\x60\x71\xe0\x46\x6e\xaf\x5f\x6a\x0e\xab\x64\xb8\xa1\x54\xf6\xa4\xf7\x41\xf2\x25\x4b\x62\xbe\x79\x45\xe4\x45\x3d\xab\x44\x35\x63\xeb\x98\xc5\xbc\x0a\xf3\xbc\x06\x71\xfb\x64\x30\x15\x80\xa9\x00\x4c\x05\x60\x2a\x00\x53\x41\x5f\x9b\x0a\xbe\x94\x46\xaf\xa5\xf1\xab\xe9\xcc\x2b\xfe\x6a\xfb\xf1\xf4\x4c\x28\xa8\xd5\xe2\xd1\xb2\xfe\xd4\x9f\xb7\xa9\xa5\x96\xf9\x3a\x3c\x4f\x2b\x86\xb6\x1a\x71\x14\xf2\xba\x3b\x88\x8a\x65\x1d\x5e\xc8\x1f\xce\x2b\x0b\x42\x8e\x88\x45\xd2\x22\x26\x1b\xa6\xc1\x2a\x42\x14\x6a\x5b\xcb\xaa\xe9\x39\x2e\xd9\x35\x32\xbe\xa4\x56\x3c\xed\x3f\x2b\xae\x66\x95\x25\xc3\x54\x2b\xc6\xdb\x3c\xf1\xbd\x48\x14\x55\xe7\xdc\x9d\x8e\x0b\x0c\xab\x07\xaa\xa5\x28\x7c\xc8\x09\x1e\x12\xfa\x76\x5e\x39\x63\x70\x91\x14\xaa\x38\xb5\x1b\xbf\x2c\xb0\x75\xb8\x42\xdd\xe7\xda\x1f\x75\x97\xf3\xd9\x5d\xa2\x3e\x33\xde\x87\x44\x3d\xa0\xde\x33\x88\xde\x35\x88\xdf\x31\x98\xf9\x8e\xef\x06\xf7\x95\x81\xab\x52\x0e\xb2\x21\xba\x4c\x6f\x2a\x65\xd5\x5e\x54\xcb\x11\xe2\xe0\x2b\x6a\xc4\x5e\xa2\x76\x95\xb5\x45\x6c\x4d\x2f\xd5\xbd\xbc\x79\x45\xb9\x5a\xe3\xe9\x24\x96\xf8\x2a\x83\xe9\x0b\x9a\xa1\x07\x8a\x35\x5f\x1b\xf9\x36\xc7\x6f\x5d\xb6\x38\xc9\x95\xc3\x5b\xfd\xf2\xa1\x66\xf4\xe4\xa6\x6f\x60\xf0\x9d\xdf\xa2\x2f\xcb\x2b\x02\x21\x71\x59\x1c\xd6\x56\x87\xc4\x37\x0c\xb1\x3d\x0f\xff\x2b\x3a\x30\x9c\x63\xca\x50\x51\xd5\x6e\x94\x6d\x5a\x33\x75\x76\x17\xf7\x3a\xe3\x37\xd5\x35\x9c\x50\x56\xa4\x06\x14\x2d\xc4\xfb\x82\x45\xbf\xa4\x63\xca\xd0\x59\x6a\x93\x50\xb1\x8a\xa6\x3a\x9a\xaa\xb3\xaf\x97\xed\x23\xfc\x0c\x79\x79\x8e\x50\xa7\x1b\x0a\x5c\xf2\xcb\xc8\x67\xef\xb1\xea\xc7\x4d\x58\xb7\x01\x4b\x1c\x58\xe2\xfa\xd4\x12\x57\x2a\xa3\x39\x81\xb6\xcf\xa0\x69\x8e\xb6\x4f\xa0\x63\xe8\xe8\x06\xe0\xe5\x82\xab\xba\xb5\x64\x9c\x9c\x8b\xc7\xc9\xf7\xe2\x7b\xc4\x6c\x0e\x24\x6d\x47\xd1\x72\xdb\x23\x45\xf0\x7f\x4a\xa3\x6d\x3c\x47\x2e\xfe\xa5\x34\xfe\xf9\x34\x1a\xd4\xa8\x4d\x32\x99\x32\x71\x43\x9a\xee\xd4\xfc\xac\x2f\x25\x72\xb8\x4c\xf8\xb1\x53\x53\xf3\xb3\x72\xf8\xb4\x11\x4c\x37\x14\x34\xd9\x50\xd0\x64\x2b\x05\x95\x2a\xe8\x4d\x62\x80\x94\xd0\x93\x7c\x80\x14\xd1\x69\x74\x72\x03\x03\x24\xf4\x99\x49\xa3\x04\x7f\xea\xbb\xd1\xdd\x5e\xc2\x61\x93\xea\xc4\xc1\x1f\xfe\x6e\xfc\x7b\x4f\x04\x49\x85\xb3\x6b\xbb\xea\x5f\xa4\x3a\xc9\xbd\x2e\x38\xd7\x8b\xfd\x1b\xfc\xf2\xc1\x2f\x1f\xfc\xf2\xc1\xe8\x03\x46\x1f\x30\xfa\x80\xd1\xa7\x67\x8c\x3e\xbd\x63\xd3\x00\xd8\x0e\xb0\x1d\x60\x3b\xc0\x76\x80\xed\x7d\x0d\xdb\x81\x06\x02\x0d\xec\x53\x1a\xb8\x25\xfd\xf2\xaf\xa2\xe3\x02\x50\x1d\x44\xfb\x39\xa0\x1a\x43\x7b\xd1\x48\xe2\x01\xe7\x17\xa9\x4e\xda\xe2\x8c\x7f\x36\x99\x52\xee\xc6\x8f\xd5\x1f\x8c\xc4\x5e\x1f\x75\xbc\xff\xe9\x1d\x01\xde\xba\xdb\xf7\xba\xe7\x24\x6b\x97\xf8\x67\x07\x58\x96\x40\x4f\xc7\xd0\x51\x74\xb8\xce\xc5\x7e\x08\x3d\xd1\x52\x1b\x82\xd7\x16\xf8\xd5\x6f\xd0\xaf\xfe\xb7\x52\xe8\x88\x98\xba\x05\x34\xce\xa7\xee\x08\x6a\x75\xd8\xa1\x13\xc2\x99\xfe\x10\x3a\x10\x38\xd3\xaf\xe3\xf1\x93\xc2\x83\xfe\x30\x3a\x18\xf2\xa0\x5f\xc7\xf3\x49\x52\xa3\x4d\x52\x21\xc9\xfd\x3e\xf7\xf2\x68\x20\x35\x1e\x8c\xf5\x8f\xe7\x12\x24\x23\x2e\x09\x09\x12\x38\xc5\x77\x40\x96\x80\x33\x3c\x70\x51\xe0\xa2\xc0\x45\x81\x8b\x82\x33\x3c\x38\xc3\x83\x33\x3c\x38\xc3\x03\x9f\x07\x3e\x0f\x7c\x1e\xf8\x3c\xf0\x79\x70\x86\x07\x67\x78\x70\x86\x07\x67\x78\x30\x7f\x81\xf9\xab\x07\xcd\x5f\x3d\xed\x0c\xdf\x26\xa0\xdc\x7e\xc7\xf7\xbf\x1f\x46\xc7\x3c\x5f\xed\x35\x4f\x74\xb4\x89\x8f\x90\x99\x06\x65\xb3\x8f\xb3\x1d\xfc\x1f\x86\xf1\x6b\x83\x01\xc4\xfe\xde\x54\x6b\x47\x3b\x5e\x0e\x4a\x9b\xf6\x4b\x6b\xd3\x51\x8f\x79\x79\xd4\x23\x37\xb7\xf9\x47\x3c\xc6\xbe\x11\x8e\x7c\x8c\x1f\xda\x57\x93\x87\xeb\x01\x3c\x51\x3f\x5c\x63\x9b\x38\x6a\x10\x69\xe5\x2c\x49\x38\xf1\x11\x7c\xe9\xe1\xc4\x47\xb0\x19\x81\xcd\x08\x6c\x46\x7d\x64\x33\x82\x13\x1f\xe1\xc4\x47\x60\xf5\xc0\xea\x81\xd5\x03\xab\xef\x09\x56\x0f\x27\x3e\x6e\x31\xfe\x09\x27\x3e\xc2\x89\x8f\x5b\xcb\xfd\x1f\xff\xf6\x08\x3a\x2d\x4e\x7c\x5c\xe4\xf0\x71\xa5\xb0\x48\x5c\x35\x01\x45\x6a\x36\x35\xaf\xd3\x45\xf9\xe3\xdb\xf1\xf7\x8d\xe0\xff\x31\x88\xee\xe6\x25\x5c\x93\x25\x64\x5e\x96\x24\x32\x8c\x1f\x4d\x4f\x76\x7a\x99\x26\xa6\x6d\x6a\x96\xe8\x62\x5b\xc0\xe3\x68\xa0\x27\x71\x5d\x50\xae\xaa\x4c\xd5\x0a\x14\xc0\xa1\x88\xce\x1d\x2a\x3b\x9f\xcb\xf2\xd7\x15\xd9\x7f\xae\x88\x6f\x08\xe0\xa5\xac\x27\xa0\xca\x46\x54\xf9\x68\x3c\x51\xdc\x81\xef\xe0\x15\x43\xd7\xdf\x94\xcc\x32\xf3\x78\x54\xb2\x4c\x3e\x86\x24\xce\x94\x8d\x1e\x06\x98\xbc\x5b\x00\x50\x02\xa0\x04\x40\x09\x80\x12\x00\x25\x00\x4a\x00\x94\x00\x28\x01\x50\x02\xa0\x04\x40\x09\x80\x12\x00\x65\xb7\x01\xe5\x01\x34\x81\xf7\x65\xf3\x3e\xa0\xbc\x97\x93\x49\x59\x0b\xb9\x7d\xcb\x0e\xb2\x1f\xc3\xd0\x11\xb0\x26\x60\x4d\xc0\x9a\x80\x35\x3b\x88\x35\x3f\xfc\x2c\x1a\x17\x58\x53\xb5\x2c\x87\xe7\xc1\x8d\x47\x99\xba\x4a\xaa\x4c\x19\x74\x1d\xfc\xc7\xcf\xe0\xbf\x7f\x02\x6d\x67\x0f\x5c\x5b\x29\x64\x86\xd6\xce\x91\x3b\xc3\x1f\x5c\x20\x6e\xee\x61\x76\xe3\x94\x65\x39\x61\x6f\x47\xff\x72\x8f\x23\x43\xc8\x9b\x0b\x28\x0d\xf2\xe6\x02\x4a\x03\x94\x06\x28\xad\x8f\x50\x5a\x0f\xa5\x3f\xe8\x19\x94\x06\x71\xf9\x80\xd2\x00\xa5\x01\x4a\x03\x94\x06\x79\x73\x21\x70\xf8\xf6\x21\x4c\x3d\x1f\x38\xbc\x25\xf3\xe6\x3e\x83\xa6\x84\xab\xd5\x24\x3a\xc2\x5d\xad\x26\xd0\x3e\x94\x6f\x9a\xfd\x52\xb5\x2c\x87\xe7\x65\xf4\xb8\x50\x4b\xc9\x73\x2f\x24\xbb\x4e\xe5\xf0\x70\x38\xc0\x53\xba\x51\xb1\xd7\x49\x2f\x2a\xff\x8d\x6b\x65\xc3\xe4\x71\x9f\xd9\x3f\xda\x11\xe0\x2f\xec\xe7\xd0\x0d\x48\xd7\xa3\xe2\xb7\xce\xb3\x2e\x81\xa6\x4e\xa3\x93\xe8\x78\x5d\x5e\xdd\x51\x94\x6b\xbd\x8d\x21\x0b\x1c\x24\xd7\xdd\x60\x72\xdd\xaf\xa7\xd0\xb4\xc8\x71\x7b\x1c\x4d\x86\x72\xdc\xe6\xd1\xba\x46\x60\xd3\x19\x2e\x12\xf0\xee\xe3\x09\x78\x99\xf4\x58\x6f\xb9\x45\x91\xbf\xf7\x18\x3a\x1a\xe4\xef\x5d\x67\x19\xc9\x19\xba\x79\x8e\xdc\x36\x0b\xa1\xdc\xaf\x8d\x06\x42\xe6\x91\xd8\x94\xbb\x81\xc0\x19\x11\xd7\x85\xc0\x09\xe5\xdd\xed\xb8\xe8\x81\x34\xbc\x80\x59\x01\xb3\x02\x66\x05\xcc\x0a\x69\x78\x21\x0d\x2f\xa4\xe1\x85\x34\xbc\x80\xfb\x01\xf7\x03\xee\x07\xdc\x0f\xb8\xbf\x2d\xb8\x1f\xd2\xf0\x42\x1a\x5e\x48\xc3\x0b\x69\x78\xc1\x9a\x06\xd6\xb4\x3e\x4a\xc3\xdb\x1a\x4b\x5e\x8b\x1f\x47\xb2\xf1\x6e\xbd\x48\x12\xfc\x97\x77\x06\xf9\x7e\x9b\xf8\xa4\x33\xd1\x62\x68\x44\xd5\x34\xb6\xcb\xf0\xb2\x6c\x8c\x73\xb8\x86\x7f\xfa\xce\xec\x7b\xb6\x05\xf9\x7e\xa5\xa6\x28\xc9\x1b\xd7\x9f\x16\xc4\xe3\x53\xe2\xf1\xdc\x48\xe4\xf4\x4b\x9f\x9c\x47\xef\x7a\x8a\x3d\xde\x66\x86\x7e\x01\x9d\x47\xb3\x75\xe6\xbb\xa3\xe8\x70\x73\x03\x45\x8d\xb5\xa8\x2b\xcb\x66\x03\x90\xd7\xea\xb2\x90\xbb\xa5\xf7\xa4\x45\x72\x8b\x09\x9e\xdc\x22\xb0\xc8\xf0\xd3\x37\x37\x58\x68\x73\xf3\x4c\x49\x4c\xa1\x69\x34\xc5\xa7\xd0\x2d\xbd\xe4\x92\xb0\xd5\x3c\x89\xce\x06\xb6\x9a\x5b\x29\xd0\x9b\x78\x6a\xf2\x64\x3a\x89\x8f\x7b\x93\x29\x5a\x24\x7b\x93\x41\xe5\xec\x8a\xd4\x76\x3d\xe7\x27\x96\x80\xef\xad\xcd\xf7\x4a\x60\x60\x6d\x62\x60\x3d\x8a\x0e\xe3\x83\xd9\xfd\xbe\xe8\xfe\xae\x70\xe8\x60\x78\x40\xde\x0e\xf1\x83\xf8\x17\x87\xd1\x09\x2f\x1e\xc9\x20\x2f\xba\xc4\xe4\xda\x88\x9c\x86\x41\xee\x77\xad\xe6\xb8\xb4\xea\xa9\x20\x21\xe9\x80\xff\x61\x08\x7f\x6b\x10\xed\x8a\x3c\xce\x16\x80\x77\xb6\x98\xf0\x7d\x9a\x97\xec\x59\x9c\x66\xfc\x92\xdb\x94\xf3\x7d\x1f\x7f\x60\x2a\x5c\xbb\x2b\x85\x66\xef\x84\xac\xef\xf1\x9a\x54\x25\x59\xa0\xcf\xe2\x73\xbe\x76\xd4\x38\x90\xbc\xc4\x49\x4d\xda\x1d\x52\xc1\x43\x78\x18\x64\x5a\x02\xbf\x05\xf0\x5b\x00\xbf\x05\xf0\x5b\x80\x4c\x4b\x90\x69\x09\xec\xc5\x60\x2f\x06\x7b\x31\xd8\x8b\x7b\xdd\x5e\x0c\x19\x8d\x20\xa3\xd1\x56\xb1\x90\x41\x46\xa3\x4e\x64\x34\xfa\x8b\x34\xca\x08\x82\x68\x52\x9d\x04\xe0\x50\xa4\x6b\xc7\x5f\x4a\xe3\xd7\xd2\xe8\x2e\x76\xcd\xcf\xc0\xfe\x86\x32\x71\xa3\x6b\xac\xa8\x7d\x2e\x53\x26\xee\x45\xaa\x13\x99\xe6\x7c\x6a\x7e\xd6\xdb\xd1\x39\xed\xb3\x01\x35\x14\x34\xd9\x50\xd0\x64\x2b\x05\x95\x2c\xf4\xb4\x20\x71\x17\xd1\x1c\x27\x71\x67\xd1\x0c\x2a\x6e\x80\xc4\x85\xbe\xb3\x95\xe8\x3b\xfc\x8f\xc3\xe8\xb0\x68\x72\x47\x5b\x26\x7a\xad\xc2\x14\xe4\x7a\x62\x6b\xd9\x06\xb5\x0d\x77\x55\xab\xa8\x8e\x43\xfc\x94\xf8\xbf\x3c\x8c\xff\x76\x10\xdd\x1d\x3c\x78\x6d\xa5\x90\xf9\x5c\x2b\x29\xf1\xe7\x65\x81\xd3\xac\xc0\x9e\x48\x8c\xff\x08\x7f\xdd\x82\xff\x29\x57\x0a\x91\x3a\x02\xc9\xdd\x40\x52\xfc\xe7\x92\x51\xef\x31\x7c\x54\xa2\xde\x86\xe1\x27\x39\x6f\xa4\x1b\xa2\x96\x71\xe0\xb6\xc0\x6d\x81\xdb\x02\xb7\x05\x6e\x0b\xdc\x16\xb8\x2d\x70\x5b\xe0\xb6\xc0\x6d\x81\xdb\x02\xb7\x05\x6e\xdb\x5d\x6e\x3b\x89\x8e\xe0\x43\xd9\x03\xbe\xaf\xda\x83\x61\x37\xb7\xc8\xfe\xad\xd1\xcf\x0d\x98\x2f\x30\x5f\x60\xbe\x7d\xcc\x7c\x7f\xf1\x39\x74\x30\x21\x56\xc0\x26\x3e\x12\x63\x82\xde\xa6\x4c\xcc\x3a\xf8\x5d\xcf\xe1\x1f\xdf\x13\x44\x09\xec\x5f\x3b\x97\xfd\xe5\xa0\x90\x69\xbf\x90\xdc\x08\x7b\xa8\x3e\x6c\x20\xf6\xd6\x1e\x27\x80\x90\xe3\x1e\x60\x18\xe4\xb8\x07\x18\x06\x30\x0c\x60\x58\x1f\xc1\xb0\x1e\xca\x2d\xd4\x33\x30\x0c\x92\xde\x00\x0c\x03\x18\x06\x30\x0c\x60\x18\xe4\xb8\x87\xac\x1c\xb7\x0f\x7f\xea\xf9\xac\x1c\x5b\x32\xc7\xfd\x32\xba\x20\x3c\xa7\xce\xa2\x19\xee\x39\x75\x12\x1d\x47\x93\x4d\xb3\x16\x68\xd4\x26\xf9\x95\x42\x3e\x96\x11\xb5\x94\xef\x3e\x21\x41\xfd\xf5\xab\xc9\x5e\x53\x07\xf0\x84\xf4\x9a\xca\x66\xa5\x9b\x54\x6c\x7d\x22\xee\x52\xd9\xcf\xed\x0c\x60\xd9\x23\x7e\xe6\xfb\x78\x2e\xb6\x37\x3e\xa1\x46\x67\xc9\x98\x00\x59\x73\xa8\x84\x9e\xac\x4b\xa9\x71\x04\x1d\xda\x58\x8f\x40\x72\x56\xc8\x8e\xbf\xc1\xe4\x0d\xef\x4a\xa3\x59\x21\x19\x8a\xe8\x34\x97\x0c\x93\x68\xc3\xe3\x10\x5d\x14\xf9\x51\xce\xa1\x33\x41\x7e\x94\x5b\x29\xef\x92\x48\x13\xf3\x24\x3a\x1b\x4a\x13\x73\x2b\x05\x6e\x5c\x6c\x89\x0c\xfa\x9d\x12\x5b\xb9\xff\x31\x1a\x88\xad\x5c\x6c\x2e\xfd\x78\x11\x76\x50\xdc\x2b\x44\x58\x5c\x5e\xfd\x6e\x08\x33\xc8\xb1\x0f\x98\x17\x30\x2f\x60\x5e\xc0\xbc\x90\x63\x1f\x72\xec\x43\x8e\x7d\xc8\xb1\x0f\xe6\x06\x30\x37\x80\xb9\x01\xcc\x0d\x60\x6e\x68\x8b\xb9\x01\x72\xec\x43\x8e\x7d\xc8\xb1\x0f\x39\xf6\xc1\x9a\x07\xd6\xbc\x3e\xca\xb1\xdf\x29\xda\xbc\x15\xf3\x2e\xff\xd5\x83\xe8\x94\x97\x77\xd9\x72\xd6\xca\xb9\xef\xaa\x2e\x59\xaa\x55\x98\xbc\xf2\x12\xee\x3b\xbc\x6f\xf0\x67\x1f\xc4\x7f\x97\x0e\xce\xac\x1d\xb6\x89\xaa\x2b\xe2\x9a\x37\xb4\x02\xad\x7b\x41\x96\xb3\x40\xdc\xdc\x13\xec\xce\xfa\xc3\xb2\x43\x37\x88\xbe\x6f\x2f\x55\x2f\xbd\x15\x9d\x12\x23\xf3\x08\x3a\xc4\x47\xe6\x3e\x94\x47\xa3\x89\xa7\x02\x87\xaa\x95\x38\x18\x1f\x8e\x1f\x8c\x77\xe0\xc1\x32\x71\xd1\xf5\x4b\xc9\xe3\x6f\x14\xe7\x1a\xcf\x78\x08\x57\x21\x3c\xee\x32\xbf\x1a\x3a\x96\x7c\xaf\x4d\xac\x8a\xaa\x91\x96\x3a\x60\x58\xde\xdc\xe5\x3e\x28\x16\xd1\x69\x74\xb2\xce\x4c\xbb\xce\x4e\x00\xaa\x07\xc6\xd9\x0d\x1a\x67\x3f\x9e\xba\x75\x11\x30\x23\x4c\xb2\x6c\x55\xf3\x4d\xb2\xdd\x14\x24\x56\xad\x03\x82\xa4\xf8\xb7\x77\x06\x82\xe4\x80\xa5\xda\xae\xc1\xed\x50\x62\x07\xdb\x92\x44\xd9\x63\x89\x34\xf7\x5d\x93\x27\x6f\xac\x2f\x68\x8c\xd7\x60\x2f\x4f\xa8\xf5\x50\xf8\x62\x95\xd8\x65\x12\xbe\xba\x27\x7c\x95\x69\xdf\x2e\x29\x1b\xda\x58\xc3\x7d\x91\x52\xd8\xdf\xab\xf2\x2a\xab\x65\xf1\x22\x9a\x43\xa5\x3a\x59\x36\x89\x8e\x6c\x40\xd5\x99\xe7\xb1\x39\x20\xd7\x12\xe4\xda\x67\x06\xd0\xa7\x07\xf0\xa7\x06\x32\x9f\xf0\x77\xd8\x2f\x0d\xf4\x8f\x5c\xab\xe3\x8b\xac\x9d\xb9\xdd\x96\x33\x70\x36\x38\x83\x20\xbd\xe1\x26\x03\x77\x84\x83\x02\x61\xe6\x57\x2b\xfc\x49\x93\x9a\x63\xe2\x69\x7e\x07\x37\x52\x38\xca\x70\xc9\xa1\xe6\xbc\x88\x56\xbb\xc0\xa6\x85\xfc\x7b\xc1\x9b\x2c\xc1\x8f\x23\xcd\x05\x6e\xf1\xb5\x14\x7a\x35\x85\x5f\x49\x65\xbe\xe0\xef\x56\x3e\x99\x3a\x4b\xd9\xde\xd6\x70\x94\x32\xe5\xcd\x4e\x95\xec\x12\xfb\x29\xab\x4c\x45\xbe\x82\x6f\xf3\x84\xa9\xa3\xe6\x04\x06\xdb\x31\x55\xe3\x1f\xce\xcd\xe0\x15\x43\x93\x0c\x83\x54\x74\x47\xa1\x37\x65\x9b\x0a\x33\xb1\x45\xa8\x55\x21\x79\x45\xbc\x91\x87\x17\x7a\x7d\xc9\xb7\x5b\xb1\x0d\xe0\xbf\x3e\xbb\x8d\x57\x2b\xe2\xf7\xd7\x05\x1d\xb2\x79\x82\x34\x4b\x24\x48\x6b\xb7\xf0\x2f\x1d\x41\x87\xf0\x81\xec\x84\xbf\x7b\x79\x43\x38\x12\x3f\xf4\xd4\x6d\x71\xde\xcc\x1f\xa4\x11\x96\xfb\x9e\x9a\x4b\x1d\x4d\xad\x18\x66\x79\x1c\xbf\x92\xc6\x5f\x48\xa3\x3b\x43\xbf\x65\x32\x65\x8e\xc3\x96\xa8\x5d\xf5\x77\xcb\xaa\xc2\xdb\x35\xf7\x40\x99\xb8\x53\xc1\xbd\x53\xf3\xb3\xe7\xd8\xef\x3d\x98\x23\xd2\x10\x4e\x64\xfb\xb8\x13\x19\x1b\xb1\xa7\xd0\x09\x74\x6c\x63\x39\x22\xf9\x37\x26\x26\x87\xfc\xa3\x37\xa0\x37\xfa\xb1\xd9\x54\x27\xf5\xbb\xc6\x5f\x7e\x03\xfe\x72\x3a\xf0\xce\x7a\x6c\xcd\x5d\xe3\x45\xaa\x93\xdc\x7d\xec\x16\xe9\x55\x4a\x75\xd2\x91\xdd\xe1\x02\x3a\x22\xda\xa9\x80\xc6\x79\x3b\x8d\xa0\x21\xf4\x44\xa2\x5b\x1c\xab\x4f\xe2\x94\x3e\x9b\x3c\x63\x77\xe3\xc7\xea\xb9\x03\x2f\x3a\x72\x2e\xcd\xda\xdb\xcb\xcc\xfb\x77\x04\xad\xfa\x78\xd2\x6e\x90\x37\xec\x1b\xe4\x5d\x9d\x6d\xdb\x22\x3f\xd2\xad\x4e\x53\x6a\xb5\x71\x41\x2d\x82\xed\xde\x06\xb7\x7b\x1f\x48\xdd\xc2\x9c\x3e\x21\xf6\x79\x87\xd0\x81\x60\x9f\xd7\x46\x91\x90\xb0\xc1\x6b\x4d\x62\x34\x91\x13\x81\x20\x29\xfe\xd6\x9d\x81\x48\x18\x6b\x79\x5f\xc7\x65\xc3\xfd\x5c\xdd\xe8\x9c\x64\x80\xfd\x5b\xbf\x09\x2a\xd8\xbf\xc1\xfe\x6d\x93\xf7\x6f\x9d\xd4\xf2\x12\x37\x6e\x6d\x52\x03\x4b\x05\x34\x8e\xc7\xb2\x7b\xfd\x5d\xd7\xae\xf0\x7e\x8d\xdd\xde\xf9\x84\x69\xf8\x1b\x43\xe8\xd1\xa8\xf9\x48\x24\x7d\xd7\x89\x55\xa1\xab\x55\x62\xba\x0e\xfe\xa5\x21\xfc\xb9\xc1\x80\x2b\xae\xb4\x76\x1c\xe7\x8c\x5f\x42\x9b\x0e\xe0\x1c\xb9\x19\x90\xc9\xa0\xf0\x39\xc3\x71\xcf\x52\x7b\xaa\x52\xf1\x69\x25\xe4\x6b\x8f\x19\xd5\x17\x93\x07\xed\x5e\x3c\x12\xa3\x7e\x84\xcf\x28\xf7\x5b\x1d\xce\xd6\x84\xb4\x54\x90\xa3\x1d\xe2\x95\x20\x5e\x09\xe2\x95\x20\x5e\x09\x72\xb4\x43\x8e\x76\x88\x13\x81\x38\x11\x88\x13\x81\x38\x91\x5e\x8c\x13\x81\x3c\xeb\x90\x67\x7d\xab\x78\xc6\x43\x9e\xf5\x4e\xe4\x59\xff\x97\xcf\xa1\x03\x02\xf3\xd9\x8b\xaa\x96\xf7\xf0\x10\x2f\xaa\xfe\xa4\x4d\xad\x52\x73\x5c\xb6\x6f\xa9\x10\x07\xff\xd6\xb3\xf8\xe7\xf7\xa0\x07\xd9\x53\x53\xe1\x87\xfc\x23\x38\x47\xd6\x4e\xbc\x3e\x2d\x0a\xbb\x4c\x2b\x24\x37\xc4\x6e\xbd\x5c\x5f\x92\x3c\xa2\x33\x74\x63\x8f\xe3\x3b\x48\xb6\x0e\x54\x0b\x92\xad\x03\xd5\x02\xaa\x05\x54\xab\x8f\xa8\x56\x0f\x25\x99\xe9\x19\xaa\x05\xd9\x4f\x80\x6a\x01\xd5\x02\xaa\x05\x54\x0b\x92\xad\x43\x7a\x86\xdb\x07\x42\xf5\x7c\x7a\x86\x2d\x99\x6c\x5d\x43\x4f\x0a\xb7\xa7\x29\x74\x8a\xbb\x3d\x1d\x45\x87\xd1\xc1\xa6\x4e\x7b\x9c\x54\x49\xcc\x94\x0f\xe1\xa1\x96\xf2\xac\x1b\xc9\xee\x4e\x67\xf1\x8c\x74\x71\x6a\xca\xc4\xa4\xdf\x53\xe8\xe5\x61\x0f\x3e\x5e\x33\x94\x94\xd2\x3d\xfb\xb3\x3b\xd7\xe2\x67\xf7\xf9\xb9\xd8\xc3\xa8\x6c\x44\xfc\xda\x4d\x58\x26\xd8\xd6\x39\x74\x06\x4d\xd7\x39\x53\xef\x47\x85\x75\x77\x12\x78\x51\x43\xb8\xc7\x06\xc3\x3d\xfe\xdf\x94\x48\xbd\x5e\xe0\xa9\xd7\xfd\xa0\x8d\x83\x68\x23\xe3\x10\x95\x44\xaa\xf4\x69\x34\x15\x4a\x95\xbe\xc1\xb2\x9a\x09\x1d\x91\x4c\x60\x1f\x4f\x26\xc0\xe4\xda\xc6\x8a\x6f\x35\xd7\x7a\x6b\xa2\xad\x05\xc9\x95\x28\xfd\x72\x2f\x8d\xad\x25\xba\x94\xd8\x7c\xec\x61\x31\xb6\x5f\xdc\xd1\x54\x8c\xf9\x0f\x76\x4e\xa0\x41\x0e\x76\xa0\xbf\x40\x7f\x81\xfe\x02\xfd\x85\x1c\xec\x90\x83\x1d\x72\xb0\x43\x0e\x76\xb0\x42\x80\x15\x02\xac\x10\x60\x85\x00\x2b\x44\x5b\xac\x10\x90\x83\x1d\x72\xb0\x43\x0e\x76\xc8\xc1\x0e\x46\x3e\x30\xf2\xf5\x51\x0e\xf6\xee\x51\xe8\xf6\x27\xbd\xf8\xeb\x61\x74\x32\x2e\xe9\x45\x93\xcc\xe9\x9a\x9f\x2d\xde\x26\x2b\x06\x1f\x7c\xf8\x73\xc3\xf8\x57\x43\x39\x31\xfe\x49\x6b\x39\x31\x82\xbc\xf3\x97\x65\x49\x6d\xca\x8d\xb1\xf7\x66\x5c\xd6\xde\xc6\xd7\xcd\x19\x8e\xdb\xe3\xee\xf5\xa5\x1b\xcd\xed\x2c\x6d\x4f\x9b\xe1\x8d\xfc\x2b\xc9\xa3\x79\x3f\x2e\x34\x66\xc4\x68\x6c\xe1\x68\xa6\x3f\xc8\x92\x01\xf1\x04\x90\x25\x03\x2c\x4a\x60\x51\x02\x8b\x12\x58\x94\x20\x4b\x06\x64\xc9\x00\x92\x0f\x24\x1f\x48\x3e\x90\xfc\xde\x23\xf9\x5b\xf0\x14\x04\xc8\xc3\x01\x74\x14\xf2\x70\x6c\x9d\x10\x08\xfc\xae\x11\x74\x46\x90\x47\x8d\x52\x5b\x37\xcc\xd8\x0c\x1c\x6b\xd2\x48\x6e\x4d\x74\xf0\xe7\x87\xf1\xaf\x0f\xa2\xfb\xc2\xc5\xf8\x8e\xb9\x2f\xb4\x46\x23\xe7\x58\x49\x6d\x05\x90\xd3\xa1\xda\x48\x1f\xdf\x80\x46\xf2\xd7\x6d\x0d\x00\xd9\xfd\xf4\xbc\xcf\x26\x03\xc8\x49\x7c\x44\x02\xc8\x98\xb1\x23\x79\x24\x6f\xe3\x98\x18\x15\xe0\x90\xc0\x21\x81\x43\x02\x87\x04\x0e\x09\x1c\x12\x38\x24\x70\x48\xe0\x90\xc0\x21\x81\x43\x02\x87\x04\x0e\x09\x1c\x12\x38\x24\x70\x48\xe0\x90\xfd\xc5\x21\x3f\x34\x8c\x86\x04\x87\x74\x5c\x6a\xab\x65\x12\x20\xc8\x71\xcd\x31\xc2\xc7\xfd\xe2\x6f\x0e\xe1\x6f\xa4\x11\x92\x37\x5e\x5b\x29\x64\x32\xfc\xa8\xdf\xe8\x9e\x76\x7a\x61\x36\x38\xe3\x77\x41\xdc\x7b\xa5\x20\x7f\x6d\xf3\x19\xbf\x6f\x41\x27\x05\xa0\x3b\x8c\x0e\x72\x40\x37\x8e\xc6\xd0\xde\xa6\x09\x17\xbc\x4f\x5c\x29\xe4\x65\x7d\x6e\xe1\x58\xcf\x32\x71\xd1\xf5\xa7\x93\x69\xdd\x04\xde\x27\x69\x5d\xb4\x85\x3d\xc7\x41\x59\x91\xb0\xb7\x60\xe6\xdf\xed\x88\x34\xf3\xc3\xde\xd9\xbf\xf1\x2d\xed\x1d\xfa\xdb\xd9\xc6\x2e\x72\xf8\x59\x97\x11\x66\x7d\xad\x0d\xb1\xc3\x90\x0b\x66\x83\xb9\x60\x3e\x96\xba\xe5\xa9\x3e\x2d\x72\xc9\x1c\x47\x93\x41\x2e\x99\x2e\xca\x0b\x7e\x0c\x70\x6b\xf2\x22\xe6\xf0\xbd\x35\x65\x47\xee\x8f\x5f\x1f\x91\x17\xbb\x64\x0c\x8f\xea\x8b\x88\x07\xc4\x2f\x1d\x96\x10\x9d\x49\xb1\x02\x42\x23\x41\x68\x40\x98\x7f\x57\xc2\xfc\x21\xbe\x13\xe2\x3b\x21\xbe\xb3\x53\xf1\x9d\xa5\x7f\x73\xeb\x2b\xfc\x8c\x48\xf1\x76\x02\x1d\x0b\xa5\x78\x6b\xfb\x12\xdf\x99\x35\xfc\xba\x12\xaf\x39\xec\xc4\xdb\x45\x0b\xa3\xe2\x1f\xdf\x19\x59\xe5\xf7\x34\x1c\xff\x1f\xbf\x3d\x10\xe7\xfe\x77\x6e\xe9\x87\x73\xff\xfb\x4d\xe1\x80\x73\xff\xe1\xdc\xff\x4d\x3e\xf7\xbf\xf3\xe4\xa7\x33\x68\xe7\xfa\xa3\xf1\x62\x7e\x07\xbe\x83\x7f\x35\x2a\x1d\x40\x13\x78\x5f\x36\xef\x43\xff\x7b\xf9\x34\x92\x73\x5a\x16\x99\x1d\x64\x3f\x76\x12\xe4\xe3\x1f\x1b\x41\x85\x71\xd5\x32\xc6\x57\x0a\x4d\x1d\x10\x8d\xaa\xe1\xda\x7c\x96\x7a\x88\xf0\xcf\x87\xf1\x9f\xa4\xd1\x76\x8d\xda\x1e\xb8\x6a\xe0\x83\x73\xec\xa9\xcb\xec\xa9\xdc\x23\xec\xf2\x34\xb5\x49\x38\x56\x39\xb8\xbe\xb9\xb0\x90\x7d\x03\x1b\x2f\x41\x7d\x6e\x15\x16\xce\x25\x8f\xa8\x11\x3c\x14\xa3\x38\x64\xb3\x9e\x57\x5f\x50\x97\xcc\xcb\x3b\x82\x76\x56\xe2\x01\x61\xa8\xa9\x1f\x93\x77\x74\xab\xb5\xd7\x4d\x0b\x1b\x9b\x1b\xd6\x61\xa0\x85\x5d\xa2\x85\x31\x73\x7d\xfd\xb4\xb0\xad\x02\x83\xd3\xc2\xb6\x0a\x8c\xdc\x3f\xdb\x15\x08\x8c\x7b\x7d\x42\x18\x92\x11\x72\x2b\xd7\x35\x11\x01\xb8\x10\x70\x21\xe0\x42\xc0\x85\x80\x0b\x01\x17\xae\x1b\x17\x7e\x3e\xd5\xe6\x9c\x58\x97\x05\x3c\x3c\x8f\x66\x43\xf0\xb0\xc3\x79\xb6\x5a\x5b\xe0\xd7\x58\xd4\x23\x5b\xcb\x64\x82\xf8\x8d\x3b\x03\x15\x60\x24\x01\x1f\x86\x14\x83\x47\x2d\x11\xd3\xd5\x79\xbd\x00\x58\x62\xbf\x69\x23\xc0\x12\x81\x25\x6e\x2d\x96\xb8\x81\x7d\x5e\x12\xf4\x6b\xef\x42\x50\x3a\x8c\x0e\xe2\xfd\xd9\x82\x8f\x10\x1f\x08\x23\xc4\xe0\xa1\x46\x8a\xb8\xf5\x1c\x8e\xf1\x2f\xe7\xd0\x69\xe1\xc0\x48\x5e\x74\x89\xc9\x7d\x4d\xfd\xf8\xe9\x26\xe0\xd2\x30\xcb\x36\x13\x48\x3e\xb6\xfc\x87\x11\xfc\xcf\x07\x10\x0e\x4a\xf0\x23\xa8\xe3\x3c\x1c\x67\xc5\xe3\xb9\x3d\xec\xda\x19\xff\x99\x86\x38\x67\x79\x5f\x9b\x31\xa6\x8a\xce\x8a\xd1\x7a\x0a\x9d\xe0\xa3\x95\x8d\xda\xfd\x4d\x47\x6b\xf0\x4d\xfe\x51\x53\xb2\x5e\xb7\x8a\x33\xd7\x95\x2a\x31\xa8\x86\x1c\xbb\x5e\x25\x1a\x42\x94\x33\x7f\xb8\x23\xb6\x27\x9a\x38\x41\x7a\x9d\x31\x22\x2f\x77\xbb\x3f\x8a\xb3\xe8\x1c\x3a\x53\xa7\x31\x6c\xac\x43\x40\x59\x00\xe0\xb9\x41\xe0\xf9\x72\xaa\x6d\x52\xe1\xbc\x00\x9f\x33\xa8\x18\x80\xcf\x8e\x89\x98\x0e\xc9\x90\x04\xb0\x9a\xfb\xe8\xae\x58\x11\x73\x8f\x47\x45\x4d\x5f\xac\x0c\x8b\x9f\xba\x2e\x55\x80\x8d\x02\x1b\x05\x36\x0a\x6c\x14\xd8\x28\xb0\xd1\x8d\xb0\xd1\x6e\xd2\xcc\xf6\x82\x58\x4f\x35\x48\xa4\x99\x2d\x2a\x0f\x09\xaa\x42\xa3\x6e\x51\x7c\xf7\x5d\xb1\xda\x41\x92\xbf\xa5\xa7\x32\x0c\xf1\x3d\x7d\x37\x35\x06\xa0\xa6\xfd\xa6\xa7\x00\x35\x05\x6a\xba\xc9\xd4\xb4\x7b\x1c\xaa\x53\x9b\xc4\x5b\x73\xc5\x94\xe5\xde\x16\x10\xf5\xa5\x11\x34\x25\x20\xaa\x50\x47\x12\x4f\xc2\xb1\xa8\xae\x1b\x8e\x5d\xe3\xd3\x67\xb1\xa6\x97\x89\xeb\xe0\x9f\x1f\xc6\xbf\x31\x88\x76\x8a\x22\xae\xad\x14\x32\xff\xb4\xb5\xf4\x93\xf3\x54\x9f\xf1\x0b\x2b\xf2\xc2\xda\x94\x8c\x32\xcf\x1f\x10\x0a\x52\xd8\x74\x19\xf3\x46\xc8\x47\x19\x3f\xf9\xde\x92\x3c\xf9\x0e\xe1\x03\x72\xf2\x89\xae\x97\x13\x2f\xa6\x91\xe1\x4c\x1c\xc8\x45\x09\xb9\x28\x21\x17\x25\xe4\xa2\x84\x5c\x94\x90\x8b\xd2\x85\x5c\x94\x90\x8b\x12\x72\x51\x42\x2e\x4a\xc8\x45\x09\xb9\x28\x21\x17\x25\xe4\xa2\x84\x5c\x94\x90\x8b\xb2\x8f\x73\x51\x7e\x65\x08\xbd\xc1\x0b\x39\x17\xf4\x91\x98\xba\x45\x0d\xd3\x75\xf0\xe7\x87\xf0\x67\x06\x83\xe0\x85\x5a\x6b\x64\xf1\x8c\xf7\x7c\x9b\x78\xe2\xf0\xcd\x20\x10\xc2\x2f\x7b\xce\x70\xdc\xb3\xd4\x9e\xaa\x54\x7c\xc0\xd8\x3e\xdb\xde\x6d\x44\x12\xcf\x27\x93\xc4\x61\xbc\xa7\xde\xc7\xd9\x6f\x66\x60\x87\xc0\x0e\x81\x1d\x02\x3b\x04\x76\x08\xec\x10\xd8\x21\xb0\x43\x60\x87\xc0\x0e\x81\x1d\x02\x3b\x04\x76\xd8\xf3\xec\x10\xc8\x1e\x90\x3d\x20\x7b\x7d\x4c\xf6\xfe\x00\xa1\x7d\xc2\xbf\x50\xad\xb1\x19\xef\xca\x62\xea\xcf\xbb\xe6\x5b\x19\xb6\x5d\x20\x37\x1d\xfc\xe3\x28\xfb\x83\xdb\xd0\x03\xd1\x27\x7c\x6f\xfc\xfb\xc4\xaa\xa1\xa8\xca\x53\xec\xa1\xcb\xfc\xa1\xdc\x1e\xf1\xeb\x54\xe4\x19\xe9\x80\x1f\xba\xaf\xcd\xc1\x7a\x6f\x42\x4c\xc2\x45\xdd\xdf\x4f\xa0\x63\x4d\x7d\x61\xeb\x1a\xc1\xf3\x87\x0d\x55\xb0\xf4\x03\x69\x74\x51\x10\xba\x73\xe8\x0c\x27\x74\xa7\xd0\xad\x15\x89\x16\x44\x60\xe7\x1c\x2a\x05\x81\x9d\xb7\x5c\xe8\x53\x22\xce\xe4\x02\x3a\x1f\x8a\x33\xb9\xe5\x52\x93\x50\x22\x49\x46\x89\x45\x7c\x5a\xa2\xc4\xd8\x21\x27\xe9\x62\xf8\xa5\x8d\x0e\xc2\x8f\xc4\x43\xc6\xed\x78\x9b\x45\x1d\x17\x95\x20\x4c\x61\xed\x30\x85\x12\xc4\x6b\x37\x89\xd7\x6e\xbb\x07\xf7\xd7\x9f\x45\x47\x65\x1a\x0c\x0e\xaf\xeb\x25\x6b\x13\x3f\x6e\x71\x33\xfe\x91\x67\xf1\xf7\xed\x41\xaf\x13\xff\xf2\x45\xec\x6e\xb1\x21\xb7\xe5\x56\xc2\xd3\x35\xe9\x92\xc2\x26\x8f\xc2\xa9\x7f\x8e\x2f\x86\xfc\xcf\xc6\x40\x27\xfe\x73\x8f\x9b\x42\x8a\x60\x24\x00\x23\x41\x11\x8c\x04\x60\x24\x00\x23\x01\x18\x09\xfa\xc6\x48\x50\xec\x19\x23\x41\xdb\x6b\xb2\x61\x23\x41\x11\x8c\x04\x60\x24\x00\x23\x01\x18\x09\xc0\x48\xd0\x7d\x23\x41\xb1\xaf\x99\x7e\x11\x98\x7e\xe7\x98\x7e\xb1\xd7\x99\x7e\x71\x0b\x32\xfd\xd2\x35\x71\xdc\xe2\x3e\x9e\xee\x27\x7d\xe9\x3c\x3e\x88\xf6\xa3\x42\xf3\xfc\x0f\x02\x4b\x79\xb4\x97\xa3\xa1\x39\xc3\x49\x76\x1b\x6d\x0e\x61\xb9\x93\xe7\xf5\xab\xc9\x2c\xf8\x00\x9e\xf0\xb2\x43\x84\xe1\x98\xe7\x61\xca\xbd\x57\x1b\xe8\x6f\xf6\x3b\x3b\x1a\x88\xd8\xeb\x3d\xa3\x83\x29\xe9\x57\x56\xfc\xd0\x15\xfe\x25\x70\xd5\x34\x9a\x42\xa7\xea\x6c\x0d\xe3\x68\x6c\x5d\xed\x0e\xf9\x75\x20\xd1\xe8\x06\xc1\xf5\x37\x53\xcd\xd3\x80\x9d\x16\xf2\xe0\x28\x3a\xcc\xe5\x41\x01\xad\x77\x5c\xa2\x33\xc2\x44\x75\x12\x1d\x0f\x4c\x54\x1b\x28\xe6\xac\x30\x4a\x9d\x42\x27\x42\x46\xa9\xf5\x97\x93\x2c\x81\xb8\x19\xa8\x63\x12\x28\xf7\x5f\x47\x1b\x24\x50\x46\xe6\x9c\x0b\x25\xf0\xa3\x4b\x52\x18\x8d\xca\x5c\xa5\x61\x61\x34\xed\xdf\xd7\x59\xb1\xd4\x99\x7c\xa5\x80\x65\x01\xcb\x02\x96\x05\x2c\xdb\x3f\x58\x16\x34\xb3\x04\xcd\xac\x77\xb8\x35\xe4\x8a\xee\x4a\xae\x68\x30\x0f\x80\x79\x00\xcc\x03\x60\x1e\x00\xf3\x40\x5f\x9b\x07\xe0\xc8\x00\x38\x32\x00\x8e\x0c\xe8\xd4\x91\x01\x60\x7d\x03\xeb\x5b\xbf\x5a\xdf\x4a\xe5\x36\x9f\x24\x9c\x64\xd1\xea\x14\x2f\xbe\x9e\x8b\x07\xd5\xf7\x62\x79\xb0\x55\x20\xc2\xd1\x16\xcc\xe7\x85\x3f\x3d\x82\xa6\x85\x8f\xfb\x52\x85\xde\x64\xea\x98\x4d\x2b\x79\xdf\xfb\xb7\xde\xe5\x5d\xa4\x10\x62\xb7\x3a\xda\x32\xa9\xaa\xfe\x69\x8f\xdf\x18\xc6\xff\x66\x1b\x7a\x28\x54\xc8\x94\x57\x86\xcf\xd9\x3f\x9b\x92\xa3\x31\x94\x56\xc8\xf4\xb4\x1d\xcf\x03\xfe\x6c\x85\xde\x5c\xe0\x65\xb7\x25\xbf\xd0\x68\xb0\xb9\xe1\x1b\x38\xa9\x0a\xb3\xfd\x51\xb0\x6b\x1b\x8a\xf0\x88\x50\xd9\xf9\x5c\x8e\xbf\xee\x6c\xcc\x67\x49\x63\x40\x50\xdf\x1e\x77\xca\xdf\x94\xfc\x44\xcd\x8f\x03\xb8\x29\x0e\x69\xbd\x91\x3c\x6f\x9f\xc4\x67\xc5\x14\x0d\x9a\xba\xe9\x91\x33\x6b\x0d\x62\xc8\x5d\x04\x61\x09\x90\xbb\x08\xec\x5f\x60\xff\x02\xfb\x57\x3f\xd9\xbf\x20\x77\x11\xe4\x2e\x02\xbb\x03\xd8\x1d\xc0\xee\x00\x76\x87\x9e\xb0\x3b\x94\x0e\xa3\x83\x78\x7f\xb6\xe0\x73\x92\x07\xc2\x67\xc0\x05\xbb\xbc\xc6\x63\xe0\x20\xe9\x11\x24\x3d\xda\x22\x88\x16\x92\x1e\x75\x22\xe9\xd1\xab\xc3\x68\x3c\x9a\xce\xbc\x49\x12\x0e\xb1\x39\xac\xaa\x96\x83\x3f\x3c\x8c\x3f\xb5\xfe\x34\xe7\xd3\xbc\x80\x0b\xaa\xd5\xa6\x34\xe7\xd9\x50\x9a\xf3\xc0\x6d\xd8\x7f\x0b\x1c\x95\x18\x0f\x10\x5b\xc9\x49\xde\x62\x16\xf4\x70\xae\xf3\xfa\x8c\xe8\x7e\x3f\x00\x24\x04\x48\x08\x90\x10\x20\x21\x40\x42\x80\x84\x00\x09\x01\x12\x02\x24\x04\x48\x08\x90\x10\x20\x21\x40\x42\x38\x1c\x11\x68\x22\xd0\x44\xa0\x89\xb7\x11\x4d\xfc\xa5\x11\x34\x23\x53\xa8\x5b\x06\x79\xd1\x25\x26\xef\xad\xc0\xe9\x51\x32\x46\xad\xe6\xb8\xb4\xea\x95\x18\xc6\x5d\xd2\xfb\xf1\xaf\x87\xf1\x8f\x6e\x43\xbb\x22\xa5\x5c\x5b\x29\x64\x7e\xb1\x15\x8f\xc7\x69\x5e\xba\xb7\xc3\x9c\xf1\x4b\xef\x09\xff\xc7\x51\xfe\xba\xa9\xf0\x87\x5d\x29\x34\xab\x31\x00\xcc\x46\x80\x59\x49\x66\x93\xb3\xf8\x9c\x97\x56\x3d\x66\x18\x7a\x84\xb2\x49\x9b\x47\x8f\x70\x4c\xf2\xb7\x04\xba\x09\x74\x13\xe8\x26\xd0\x4d\xa0\x9b\x40\x37\x81\x6e\x02\xdd\x04\xba\x09\x74\x13\xe8\x26\xd0\x4d\xa0\x9b\x5d\xa6\x9b\x67\xd0\x34\x9e\xca\x9e\xf2\xe9\xe6\xe3\x61\x17\xc8\x66\x7b\x3d\x70\x88\x04\x84\x09\x08\x13\x10\x66\x08\x61\xfe\xe6\x08\x3a\xe9\x21\x4c\xcb\x49\x74\x8b\x74\x5c\xd5\x25\x4b\xb5\x0a\x9b\xc1\x1e\xbc\x7c\xef\x08\xfe\x93\x41\xb4\x9d\x3d\x7f\x6d\xa5\x90\xf9\xa9\x56\x98\xe5\x82\x2c\x67\x81\xb8\x3d\x81\x29\x15\x89\x29\x2d\x27\xec\x5d\x19\xaa\x25\xa0\xc9\x46\x34\x79\x29\x19\x4d\x8e\xe2\x9c\x8f\x26\x2d\x47\xa2\xc8\x50\xb3\x02\x7d\x04\xfa\x08\xf4\x11\xe8\x23\xd0\x47\xa0\x8f\x40\x1f\x81\x3e\x02\x7d\x04\xfa\x08\xf4\x11\xe8\x23\xd0\xc7\x9e\xa6\x8f\x47\xd0\x21\x7c\x20\x3b\xe1\xd3\xc7\x37\x84\xe9\x63\x68\x7b\xd7\x08\x1c\xc1\x2b\x13\x90\x26\x20\x4d\x40\x9a\x1d\x44\x9a\x5f\x4d\xa3\xfb\x05\xd2\x74\x5c\x6a\xab\x65\xe2\xf9\x63\xe2\x5f\x4a\xe3\x9f\x4f\xa3\xed\xf2\xe7\x4c\xa6\xcc\x13\x15\x2f\x51\xbb\xea\x37\xa0\xaa\x70\x5e\x95\xc3\x65\xe2\x2e\x88\xfb\xa6\xe6\x67\xcf\xb1\xdf\xda\x77\x3a\x53\x43\x41\x93\x0d\x05\x4d\xb6\x52\x50\xc9\x68\x7e\xf0\xd7\x45\x81\x03\xcf\xa1\x33\x1c\x07\x9e\x42\x27\xd0\xb1\x0d\xe0\x40\xef\xe3\x25\xf3\xc3\x1f\x1c\x42\xbb\x45\xe3\x6a\x94\xda\xba\x61\xf2\x0a\x86\x3c\x5e\x79\xc6\x6f\x07\xff\xd9\x1e\xfc\xf5\x01\xf4\xfa\xf0\x4d\xd7\x56\x0a\x99\xdd\x62\x4b\x60\x4b\x65\xc6\x5b\xed\x3c\x2c\x3c\xc7\x1e\xce\x0d\xb1\x9b\xa6\x43\x4f\x5e\x29\xf0\x0b\x67\xa9\x3d\x55\xa9\xf8\x78\xd6\xe9\x75\x26\xfb\xbc\x38\x3b\x6d\x1f\x3f\x3b\x8d\x75\xc2\x21\x74\x00\x4d\x34\x3d\xee\x2c\xd2\xa0\x2b\x85\x3c\xff\xe6\x96\x8e\x63\x7c\x73\x32\x88\x3d\x88\xf7\x4b\x10\x1b\xd3\x6f\x92\xcb\xf2\x17\x46\x89\x6c\xc2\x41\x8f\x00\x64\x01\xc8\x02\x90\x05\x20\x0b\x40\x16\x80\x2c\x00\x59\x00\xb2\x00\x64\x01\xc8\x02\x90\x05\x20\x0b\x40\xb6\xcb\x40\x16\xa0\x27\x40\x4f\x80\x9e\x7d\x0c\x3d\x7f\xe2\x19\xa4\xc4\x42\xcf\x95\xc2\xb8\xe6\x18\x26\xd5\x89\x83\xdf\xfd\x0c\xfe\x87\x27\x10\x92\x77\x5c\x5b\x29\x64\x9e\x58\x9b\xc7\x4d\x2f\xcc\x5e\xa4\x3a\xc9\xdd\xc7\x6e\x93\x38\xf4\x4a\x41\xfe\xda\xe3\xf8\xad\x08\x6c\x0a\xd8\x54\x11\xd8\x14\xb0\x29\x60\x53\xc0\xa6\xfa\x86\x4d\xf5\xd0\x61\xfc\x3d\xc3\xa6\xe0\x94\x78\x60\x53\xc0\xa6\x80\x4d\x01\x9b\xda\x8c\x53\xe2\xfb\x1a\x25\xc1\x31\xd6\xfd\x7c\x8c\x75\x71\x0b\xa2\xa4\xd2\xb3\xa8\x28\xdc\x96\x8e\xa1\xa3\xdc\x6d\x69\x3f\x2a\xa0\xf1\xa6\x6e\x4b\x1e\x6f\x5a\x29\xe4\x25\x18\x6a\xc9\x67\xe9\xe9\x64\x9f\xa5\x09\xbc\x4f\xe6\xae\x13\xe5\xa2\x98\x13\x38\xa2\xb4\x2b\xd1\x61\x29\xfb\xdf\x76\x44\xf8\xd7\x2e\xb1\xfa\x28\xaa\x8f\xba\x1e\x10\xbf\x74\x0c\x76\x09\x36\xc5\xa3\x73\xf1\xe0\x22\xd5\x57\x33\xfc\xbf\x4a\x2a\x37\x86\xf6\xae\xa3\x91\x8b\x1f\x4b\xa3\x8f\xa6\xf1\x47\xd2\x99\x0f\xfa\x82\xe5\xdb\xa9\xab\x6c\xd9\x96\x92\x7c\x94\x9f\x51\xa3\xb1\xfd\x9a\x90\x04\x91\xdd\x43\x98\x28\x2d\x12\xc5\x62\x4d\xcb\x54\xb6\xbc\x32\x65\x2a\x86\x29\xf6\xed\xd4\x56\x6a\xa6\xbf\xf5\xd7\x15\xdd\x5e\xbd\x5c\x33\x15\xdd\xb0\x09\x1b\xfe\xc4\xdf\x4f\xb2\x65\x81\xaf\xbb\x72\x43\xed\xa9\xcf\x72\x0b\xa3\x2c\xd5\x6c\xae\xa2\x59\x36\xd5\x88\xc3\x17\x48\x39\xdb\xa4\x28\xcf\x2b\x57\xf8\x1b\xb9\xea\xcb\x57\xb4\x49\x65\x4c\x99\xaa\x54\x26\xf9\xf2\xa7\xdb\xab\x8a\x5d\x33\xd9\xd6\x8a\xcd\x17\x6f\xcd\x97\xc5\x11\x3d\x7b\x87\xa8\x5a\x58\x38\x7c\x28\x8d\xbe\x3f\x8d\xdf\x9f\xce\xbc\xd7\x6f\xa0\x6f\xa6\xb8\xc6\x74\x41\x35\xd5\x32\xb1\x85\xee\x2f\x3c\xcf\x1d\x87\x6a\x06\x5f\xce\x7c\x2d\x5b\xe5\xdb\x14\x6a\x2b\x4c\xdb\x71\x57\xfd\xf5\xbd\xaa\xde\x60\xf5\x77\x97\x89\x43\xbc\x19\xcc\xa4\x8a\xc7\x3f\x38\x02\x58\x24\x0a\x97\x1d\x5c\xa3\xa6\xb6\x52\x98\x38\xc2\xee\xb5\x55\x8d\x83\x1a\xb6\x19\x15\xf3\x95\x6b\xbd\x4c\x41\x51\x0d\x53\x58\x64\xb8\x56\x19\xdc\xcb\x37\xaf\x12\x23\xb1\xbd\x90\x27\x83\xcb\xb4\xa2\x9a\xe5\x3c\xb5\xcb\xe3\xd6\x8d\xf2\x78\xcd\x34\x34\xaa\x93\xf1\xdd\xb3\xce\x3c\x2b\x25\x9f\xbd\x2b\xfc\xad\x61\x00\xff\x8d\x14\x3a\x29\x66\xf8\x61\x74\x90\xcf\xf0\x71\xb4\xbe\xc1\x87\xa6\xd9\xf3\x05\x7c\x1c\x4d\xa2\xed\xd3\x42\x81\x5b\x7f\x21\x33\xac\x90\x09\x7e\xb2\xff\x8e\x29\x8d\xad\x12\x1b\x29\xa5\x3d\x82\x26\x5e\x9c\x34\x17\x3f\x6b\x08\x1a\x8b\x3a\x2e\xca\xfd\xda\x68\x44\xd0\x3c\x24\x14\x5a\x25\x38\xee\x9f\x1f\x12\x25\x85\x8e\x54\x77\x03\xa1\xe3\xdf\xd5\x19\xf1\xf3\x14\xba\x8c\xe6\xc3\xe2\x27\x57\x44\xa7\x37\xe0\x1d\x3c\xc3\xab\x7d\x89\x13\x54\x07\x58\x2b\xb0\x56\x60\xad\xc0\x5a\xfb\x88\xb5\x82\x0e\x96\xa0\x83\xf5\x0e\x8c\xfe\x46\x1a\x7d\x3d\x8d\xbf\x96\xce\xfc\xae\xdf\x55\x9f\x4d\x3f\x15\xde\xfe\x19\xa6\xe2\x88\x2d\x9c\xb2\x48\x96\x84\xe5\xd6\xc7\x24\xc1\x92\x25\x67\x09\x6f\xac\x90\xb6\x67\x52\x73\xcc\x24\x65\x95\x77\x89\xdc\x01\x86\xb5\x42\x01\x4c\xfd\xc1\x20\xd5\x01\xa3\x5a\x25\x3a\xd3\x3b\x2b\xab\x81\x49\x35\x10\xed\x46\x65\x54\x6e\x0c\xf9\xe7\x2a\x65\x5b\xd5\xf8\x38\x31\xa8\xee\x2f\x3c\xc1\xe2\xc0\xad\xd5\x5e\xbf\xd4\x1c\x56\xc9\x70\x43\xa9\xec\x49\xef\x83\xe4\x4b\x96\xc4\x7c\xf3\x8a\xc8\x8b\x7a\x56\x89\x6a\xc6\xd6\x31\x8b\x79\x15\xe6\x79\x0d\xe2\x36\xbc\xc0\xfc\x81\xf9\x03\xf3\x07\xe6\x0f\xcc\xbf\xaf\x99\xff\x97\xd2\xe8\xb5\x34\x7e\x35\x9d\x79\xc5\x5f\x6d\x3f\x9e\x9e\x09\xe5\xf9\xb3\x78\x9c\xa8\x3f\xf5\xe7\x6d\x6a\xa9\x65\xbe\x0e\xcf\xd3\x8a\xa1\xad\x46\x3c\x7e\xbc\xee\x0e\x12\x05\xb2\x0e\x2f\xe4\x0f\xe7\x95\x05\x21\x47\xc4\x22\x69\x11\x93\x0d\xd3\x60\x15\x21\x0a\xb5\xad\x65\xd5\xf4\x3c\x90\xec\x1a\x19\x5f\x52\x2b\x9e\xf6\x9f\x15\x57\xb3\xca\x92\x61\xaa\x15\xe3\x6d\x9e\xf8\x5e\x24\x8a\xaa\x73\x80\x4e\xc7\x05\x4f\xd5\x03\xd5\x52\x14\x3e\xe4\x04\x0f\x09\x7d\x3b\xaf\x9c\x31\xb8\x48\x0a\x55\x9c\xda\x8d\x5f\x16\x18\x2d\x5c\xa1\xee\x73\xed\x8f\xba\xcb\xf9\xec\x2e\x51\x9f\x19\xef\x43\xa2\xae\x4c\xef\x19\x44\xef\x1a\xc4\xef\x18\xcc\x7c\xc7\xf7\x67\xfb\xca\xc0\x55\x29\x07\xd9\x10\x5d\xa6\x37\x95\xb2\x6a\x2f\xaa\xe5\x08\x67\xf0\x15\x35\x62\x2f\x51\xbb\xca\xda\x22\xb6\xa6\x97\xea\x5e\xde\xbc\xa2\x5c\xad\xf1\x74\x12\x4b\x7c\x95\xc1\xf4\x05\xcd\xd0\x03\xc5\x9a\xaf\x8d\x7c\x9b\xe3\xb7\xae\xc3\x03\xae\xf9\x55\x6f\xf5\xcb\x87\x9a\xd1\x93\x9b\xbe\xa5\xc0\xf7\x62\x8b\xbe\x2c\xaf\x08\x5c\xc4\x65\x71\x58\x5b\x1d\x12\xdf\x30\xc4\xf6\x3c\xfc\xaf\xe8\xc0\x70\x8e\x29\x43\x45\x55\xbb\x51\xb6\x69\xcd\xd4\xd9\x5d\xdc\x7d\x8c\xdf\x54\xd7\x70\x42\x59\x91\x1a\x50\xb4\x10\xef\x0b\x16\xfd\x92\x8e\x29\x43\x67\xa9\x4d\x42\xc5\x2a\x9a\xea\x68\xaa\xce\xbe\x5e\xb6\x8f\x70\x18\xe4\xe5\x39\x42\x9d\x6e\x28\x70\xc9\x2f\x23\x9f\xbd\xc7\xaa\x1f\x37\x61\xdd\x06\x4c\x6a\x60\x52\xeb\x53\x93\x5a\xa9\x8c\xe6\x04\xbc\x3e\x83\xa6\x39\xbc\x3e\x81\x8e\xa1\xa3\x1b\x80\x97\x0b\xae\xea\xd6\x9c\x36\xf1\xe3\x64\xd3\x54\x94\x25\x5f\xcf\xc5\xf3\xe3\x7b\xf1\x3d\x42\x48\x04\x02\x1c\xb5\x3d\xba\x03\x7f\x6b\x0f\x7a\x90\x0d\x25\xee\x1d\x4e\xab\x16\x35\x99\x1c\xe2\xed\x41\x1c\xfc\xe5\x3d\xf8\x3f\x0e\xa0\xed\x1a\xb5\x39\xba\x7e\x5c\xec\xd2\xea\x5d\xc3\xbd\xc7\x44\x33\xe6\x1e\x14\x49\x1b\x6c\x4e\xaf\x23\x97\x7a\xdc\x4f\xbc\xb4\x88\xce\x89\x01\x75\x1a\x9d\xe4\x03\xea\x08\x3a\x84\x0e\xac\x91\xa6\xc1\x16\xf6\x87\xe8\x57\xb6\x64\xf4\x4c\xb0\x4e\x5e\x7f\x53\xf2\x58\xcb\xe3\xd1\x98\xb1\x96\xcd\x7a\xe3\x2b\x5a\x2b\xc8\xd0\x00\x5e\xf0\x90\xa1\x01\x2c\x33\x60\x99\x01\xcb\x4c\x3f\x59\x66\x20\x43\x03\x64\x68\x00\x22\x0e\x44\x1c\x88\x38\x10\xf1\x9e\x20\xe2\x90\xa1\x01\x32\x34\x6c\x15\x06\x08\x19\x1a\x3a\x91\xa1\xe1\x1b\x43\xe8\xd1\xb8\x93\xb6\x6c\xc2\xa1\x15\x9b\xa9\xf8\x97\x86\xf0\xe7\x42\x47\x69\xad\xc8\xf5\xcb\xd4\x8d\x15\x43\xaf\xa9\x95\xc8\xa1\x5a\xbe\xae\x78\x59\x94\xd0\xae\xb3\xb4\xf2\xb9\x91\xd0\x69\x58\x41\xe1\x73\x86\xe3\x6e\xb5\x14\xac\x9b\x71\x2c\xd6\xc5\x64\x88\xb7\x17\x8f\x34\x1e\x8b\x15\xb4\x74\xd4\xd3\x78\x77\x3c\x34\xbc\x0b\x23\xfe\xa1\x90\x88\x15\x30\x1f\x60\x3e\xc0\x7c\x80\xf9\x00\xf3\x01\xe6\x03\xcc\x07\x98\x0f\x30\x1f\x60\x3e\xc0\x7c\x80\xf9\x00\xf3\x01\xe6\x03\xcc\x07\x98\xaf\xab\x98\xef\x57\x1e\x44\xc7\x05\xe6\x5b\xe4\x78\x6f\xa5\xd0\xec\x2c\x7d\xcd\xa6\xe6\x75\xba\xe8\x9d\xa3\x3f\x2e\xfc\xef\xf0\x4b\x0f\xe2\x3f\x4f\xa3\x1d\xfc\x69\xee\x7e\x67\x13\x55\x57\xc4\x45\xaf\x91\x83\x15\x77\xda\xa6\x66\x89\x2e\xe6\xb2\xec\xae\x22\x7b\x26\x7c\x80\xbd\xbc\xda\x66\x3f\x3c\x81\xd6\xae\xa2\xe3\x02\xad\x1d\x44\xfb\x39\x5a\x1b\x43\x7b\xd1\x48\x53\xb7\x39\xfe\x3d\xdc\x6f\x4e\xd4\x29\x11\xa3\x3d\x1c\x8f\xbd\xee\xc0\x83\x65\xe2\xa2\xeb\x73\xc9\x94\x6d\x04\x0f\x49\xca\xc6\x5f\xee\x79\xc8\xc9\xf7\x87\x19\x5b\xe6\x73\x3b\x42\x2d\x3e\x64\x13\xab\xa2\x6a\x24\xb1\xd1\x9f\x90\x37\x76\xb3\xdd\x8b\xfc\x24\xaf\xba\xd4\x21\xeb\x69\x78\x08\x5a\x85\xc4\x21\x1b\x4c\x1c\xf2\xaf\x53\xb7\x38\xe7\xa7\x44\xda\x90\x49\x74\x24\x48\x1b\xd2\x35\xb1\x61\xd5\xda\x2d\x36\x8a\xff\xcf\x9d\x21\xb1\xb1\xcf\x52\x6d\xb6\x49\x65\x1b\x3b\xbe\xa4\x24\xca\x8f\xdd\x16\x67\xb9\x5d\x92\x1e\x6f\xac\x2f\x68\x8c\xbf\x7e\x2f\x3f\x80\xf0\xa1\xf0\xc5\x2a\xb1\xcb\x24\x7c\x75\x4f\xf8\x2a\x53\x31\x5d\x52\x36\xb4\xb1\x86\xfb\x22\xa5\xb0\xbf\x57\xe5\x55\x56\xcb\xe2\x45\x34\x87\x4a\x75\x92\x6b\x12\x1d\xd9\x80\x35\x66\x9e\xb7\x1b\x08\xb2\x04\x41\xf6\x99\x01\xf4\xe9\x01\xfc\xa9\x81\xcc\x27\x7c\xa3\xd0\x4b\x03\xfd\x23\xc8\xea\xb0\x01\x6b\x67\x8e\xc1\x39\xda\x62\x83\x33\x30\x86\x0d\x37\x19\xb8\x23\x9c\x07\x0b\xab\x89\x5a\xe1\x4f\x9a\xd4\x1c\x13\x4f\xf3\x3b\xb8\xd5\xcc\x51\x86\x4b\x0e\x35\xe7\x85\x55\xe8\x02\x9b\x16\xf2\xef\x05\x6f\xb2\x04\x3f\x8e\x34\x97\xb0\xc5\xd7\x52\xe8\xd5\x14\x7e\x25\x95\xf9\x82\xaf\xf1\x7e\x32\x75\x96\xb2\x0d\x9c\xe1\x28\x65\xca\x9b\x9d\x2a\xd9\x25\xf6\x53\x56\x99\x8a\x7c\x05\xdf\xcb\x88\xd0\xfd\x9a\x13\xf0\xef\x31\x55\xe3\x1f\xce\xad\x0a\x15\x43\x93\x31\x79\xa4\xa2\x3b\x0a\xbd\x29\xdb\x54\x50\x77\x8b\x50\xab\x42\xf2\x8a\x78\x23\x37\xe3\x79\x7d\xc9\xf7\x14\xb1\x0d\xe0\xbf\x3e\xbb\x8d\x57\x2b\x62\x92\xea\xb4\x96\xf8\x68\xbc\xb8\xdf\x81\xef\xe0\x95\x6b\xb3\xc0\x2f\x1d\x40\x13\x78\x5f\x36\xef\x1f\x5f\x7d\x6f\xf8\xe0\x6b\xf9\xc4\xed\x70\xe8\x35\xfe\xbe\x0c\x9a\x8a\x6e\x63\x16\x89\xab\xae\x77\x2f\xf3\x6b\x0f\xe2\x97\x06\xd0\xdd\xde\x12\xc9\x8b\x68\x71\x43\x33\x14\xda\xd0\xf0\xe7\xba\xb3\xab\x79\x46\xe8\x28\xfb\xb8\x8e\xc2\xc6\xeb\x04\xda\x87\xf2\x89\xe3\x95\xd7\xb0\x5d\x5b\x9b\x75\x47\x01\xf1\xb7\xa3\x35\xc6\x70\xe6\xb7\x76\xd4\xf7\x42\xcb\x9b\x9c\x5c\x74\x93\xd3\xbd\xbe\x28\xce\xa0\x22\x3a\x5d\xa7\x2f\xac\xbb\x33\x40\x4b\x80\xed\xce\x06\xb7\x3b\x3f\x9a\x6a\x87\x30\x38\x2b\xf6\x3c\x6c\xdb\xee\xef\x79\xba\x2b\x54\xf8\xc6\xa7\x35\xa1\xd2\xda\x3a\x28\xe4\x4d\xf1\x1d\x77\xd5\x0b\x95\xf5\x6f\x81\x86\xc3\x5b\xa0\xee\xc8\x16\xd8\x07\xf5\x9b\x84\x83\x7d\x10\xec\x83\x36\x79\x1f\xd4\x15\xbd\xb2\xfd\x32\x3e\x71\x7f\xd5\x3f\x3b\xa2\xff\x34\x8c\xf2\x5e\xce\x04\xe1\xb9\xdd\x64\x2f\x44\x4c\xdd\xa2\x86\xe9\x3a\xf8\xc3\xc3\xf8\x53\x83\x41\x22\x85\x5a\x6b\xee\xdc\x67\xbc\xe7\xdb\xe4\xcd\x9d\xe5\x0f\x88\xd4\x0c\xc1\xe2\xea\xbf\x65\xce\x70\x5c\x70\xe3\x6e\x9c\x4c\xad\xb8\x5d\x5f\x3f\x9f\x3c\xe3\x86\xf1\x9e\xfa\x24\x0d\x7e\xdb\x47\xe1\x02\xf8\x70\x83\x0f\x37\xf8\x70\x83\x0f\x37\xf8\x70\x83\x0f\x37\xf8\x70\x83\x0f\x37\xf8\x70\x83\x0f\x37\xf8\x70\x83\x0f\x37\xf8\x70\x77\xd7\x87\x7b\xeb\xc1\x09\xf0\x12\x07\x2f\x71\xf0\x12\xdf\x42\x5e\xe2\x9f\x1b\x41\x07\x5a\x82\x89\x6c\xda\x19\xfe\x8f\x6f\xc7\x7f\x37\x8c\xbf\x12\x42\x8a\x2f\xa7\x64\xab\x87\x40\xa2\xe9\x49\x4a\x2f\x55\xeb\x82\x28\xa3\x2d\x38\x71\x34\xd0\x8a\xb8\xe6\x27\xd7\x50\xa6\x58\x05\xea\xde\x50\x44\xc3\x0e\x95\x9d\xcf\x3d\x14\x0b\x23\x65\x0d\x01\x43\xae\xc7\xc1\xe9\xa6\x70\x70\x9a\x4d\x46\x90\x7b\xf0\xe3\xf5\x08\x52\xb6\x38\x00\x48\x00\x90\x00\x20\x01\x40\x02\x80\x04\x00\x09\x00\x12\x00\x24\x00\x48\x00\x90\x00\x20\x01\x40\x02\x80\xdc\x4c\x00\xb9\xa6\x4f\x95\xdc\xb9\xdd\x0e\x3e\x55\x80\x2d\x01\x5b\x02\xb6\xdc\x42\xd8\xf2\x2f\x86\x50\x36\x2e\x2a\x4c\x20\x4c\x2f\x08\x0c\xff\xda\x10\xfe\xf9\xc1\xfa\xd8\x00\xa7\x35\xef\x47\xe9\x32\xda\x26\xdf\xc7\xd1\x9b\xf5\xe1\x05\xb2\x7c\x48\x66\xdb\x46\x2f\xc8\xd6\xfc\x8e\x13\x5c\x8d\x23\x6e\xc9\x80\x22\x01\x45\x02\x8a\x04\x14\x09\x28\x12\x50\x24\xa0\x48\x40\x91\x80\x22\x01\x45\x02\x8a\x04\x14\x09\x28\x12\xf2\xd9\x02\xf2\x03\xe4\x07\xc8\xaf\x5b\xc8\xef\xa3\xc3\xe8\xa0\x40\x7e\x8e\xb6\x4c\xf4\x5a\x85\x69\x6b\xe2\x48\xfb\x3a\xfc\x67\xd9\x06\xb5\x0d\x77\x55\xab\xa8\x8e\x43\x1c\xfc\xf5\x21\xfc\x9f\x07\x11\x0e\x1e\xf3\x51\xe0\x6a\x6b\x28\x70\x5e\x16\x38\xcd\x0a\x6c\x13\x10\x14\x0f\x2c\xf8\x75\x92\x54\x30\xf2\x2a\x88\x88\x8e\x67\x81\x5a\x32\xe6\x3b\x8d\x4f\x4a\x8e\xd7\x30\x5a\xa4\xe3\x61\xa4\xa5\x63\x12\x0e\xc0\x69\x57\x40\x07\x81\x0e\x02\x1d\x04\x3a\x08\x74\x10\xe8\x20\xd0\x41\xa0\x83\x40\x07\x81\x0e\x02\x1d\x04\x3a\x08\x74\x10\xe8\x20\xd0\x41\xa0\x83\xbd\x44\x07\xbf\x37\x87\x2e\x0a\x3a\x68\x12\xf7\x26\xb5\x6f\x44\xe8\xe0\xda\xa1\xcd\xf2\x09\x8b\x56\x0c\xcd\x08\x22\x9c\x5f\x1e\xc1\xef\xdc\x86\xee\x0e\xca\xbb\xb6\x52\xc8\x7c\xae\x95\x38\xe7\x8b\xe2\x91\x79\x56\xe0\x6a\x4f\x44\x3b\x0f\xf3\xd7\x5d\xf4\x3f\x25\x1c\xf3\x1c\xa9\x2d\xe0\xc6\x0d\x44\x3e\x3f\x97\xcc\x23\x8f\xe1\xa3\x92\x47\x36\x8c\x4f\xc9\x23\x23\xdd\x00\xe1\xd0\x40\x19\x81\x32\x02\x65\x04\xca\x08\x94\x11\x28\x23\x50\x46\xa0\x8c\x40\x19\x81\x32\x02\x65\x04\xca\xb8\x99\x94\x71\x12\x1d\xc1\x87\xb2\x07\xfc\xc0\xe6\x07\xc3\xe1\xd0\x91\xfd\x1b\x04\x45\x03\x03\x05\x06\x0a\x0c\xb4\xab\x0c\xf4\x37\x9f\x41\x4f\x78\xb9\x1c\x9b\x1d\x09\xc3\x89\x10\xfe\xc4\x33\xf8\x9b\x4f\x04\xc9\x1b\x77\x0b\x8d\xd5\x96\x6b\xad\x27\x8c\x3d\x9c\xc9\x29\x59\xee\x41\x76\x53\xc3\xe1\x2d\xec\x52\x8f\x33\xc3\x22\xe0\x33\xc0\x67\x45\xc0\x67\x80\xcf\x00\x9f\x01\x3e\xeb\x1b\x7c\x56\xec\x19\x7c\xd6\xf6\x9a\x6c\x18\x9f\x15\x01\x9f\x01\x3e\x03\x7c\x06\xf8\x0c\xf0\x59\xf7\xf1\x59\xb1\xaf\x79\x52\x11\x78\x52\xe7\x78\x52\xb1\xd7\x79\x52\x71\x0b\xf2\xa4\xd2\x9b\xd1\x09\xe1\x6b\x75\x08\x1d\xe0\xbe\x56\x79\x34\x8a\x72\x4d\x8f\x8a\xd6\xa8\x4d\xf2\x2b\x85\x3c\x67\x42\x73\x86\x93\xec\x58\x75\x2e\xd9\x6f\xea\x71\x9c\x6d\x38\xb4\x96\x3b\x6d\x85\x1d\xa4\xae\x3f\x12\xef\xa1\xb5\x1d\x6f\xe3\x61\x9a\xd9\x2f\xec\x08\x60\xd7\xeb\xc5\x3a\xa3\xa8\xa6\x04\x5b\x6f\x14\x3f\x74\x16\x6d\x09\x12\x75\x1c\x4d\xa2\x23\x75\x07\xee\x0f\xa3\x3d\xad\x35\x29\x1c\xaf\x9f\x74\xbc\xfe\x87\xd2\xe8\xfb\xd3\xf8\xfd\xe9\xcc\x7b\xfd\x06\xfa\x66\xaa\x7f\x8e\xd7\x6f\x7a\x8a\x7d\xe9\x3f\xa7\xd0\x51\x31\x95\x27\xd0\x3e\x3e\x95\x73\xa8\xe5\x71\x87\x4e\xb2\x47\x0b\xf8\x30\x3a\x88\xb6\x4f\x0b\x25\x6d\x5d\xcf\x9f\x62\xcf\x4f\xe0\x23\xe8\x10\xda\x31\xa5\xb1\x45\x60\x9d\x05\x24\xc9\x91\xe6\xd3\xdf\xa2\x8e\x8b\xda\x26\x67\x72\x9f\x1f\x0d\xe4\x48\x46\x68\xa6\x8a\x46\x2b\x6c\x9f\x29\xd7\x0d\x21\x52\x9e\x10\xd7\x84\x48\x99\xf6\x6f\xe8\xac\x70\x79\x0a\x5d\x46\xf3\x61\xe1\x92\x2b\xa2\xd3\x1b\xf0\x8c\x9d\xe1\x75\xbf\xc4\x69\xa8\x03\xdc\x14\xb8\x29\x70\x53\xe0\xa6\x7d\xc4\x4d\x41\xcd\x4a\x50\xb3\x7a\x07\x2c\x7f\x23\x8d\xbe\x9e\xc6\x5f\x4b\x67\x7e\xd7\xef\xaa\xcf\xa6\x9f\x0a\x6f\xe5\x0c\x53\x71\xc4\x76\x4c\x59\x24\x4b\xc2\x0a\xeb\x23\x8f\x60\xc9\x92\xb3\x84\x37\x56\x48\xa1\x33\xa9\x39\x66\x92\xb2\xca\xbb\x44\xee\xe6\xc2\x8a\x9f\x80\x9f\xfe\x60\x90\x1a\x81\x51\xad\x12\x9d\xa9\x96\x95\xd5\xc0\x3c\x1a\x88\x76\xa3\x32\x2a\x37\x79\xfc\x73\x95\xb2\xad\x6a\x7c\x9c\x18\x54\xf7\x17\x9e\x60\x71\xe0\x96\x67\xaf\x5f\x6a\x0e\xab\x64\xb8\xa1\x54\xf6\xa4\xf7\x41\xf2\x25\x4b\x62\xbe\x79\x45\xe4\x45\x3d\xab\x44\x35\x63\xeb\x98\xc5\xbc\x0a\xf3\xbc\x06\x71\x9b\x57\xe0\xf7\xc0\xef\x81\xdf\x03\xbf\x07\x7e\xdf\xd7\xfc\xfe\x4b\x69\xf4\x5a\x1a\xbf\x9a\xce\xbc\xe2\xaf\xb6\x1f\x4f\xcf\x84\x82\x5b\xad\x0a\x51\x1d\xe2\x4f\xfd\x79\x9b\x5a\x6a\x99\xaf\xc3\xc2\x2b\x36\xe2\xbd\xe3\x75\x77\x10\x1d\xcb\x3a\xbc\x90\x3f\x9c\x57\x16\x84\x1c\x11\x8b\xa4\x45\x4c\x36\x4c\x83\x55\x84\x28\xd4\xb6\x96\x55\xd3\xf3\x26\xb2\x6b\x64\x7c\x49\xad\x78\xda\x7f\x56\x5c\xcd\x2a\x4b\x86\xa9\x56\x8c\xb7\x79\xe2\x7b\x91\x28\xaa\xce\x61\x38\x1d\x17\x6c\x54\x0f\x54\x4b\x51\xf8\x90\x13\x3c\x24\xf4\xed\xbc\x72\xc6\xe0\x22\x29\x54\x71\x6a\x37\x7e\x59\x60\x80\x70\x85\xba\xcf\xb5\x3f\xea\x2e\xe7\xb3\xbb\x44\x7d\x66\xbc\x0f\x89\xba\x25\xbd\x67\x10\xbd\x6b\x10\xbf\x63\x30\xf3\x1d\xdf\x37\xed\x2b\x03\x57\xa5\x1c\x64\x43\x74\x99\xde\x54\xca\xaa\xbd\xa8\x96\x23\xa8\xc1\x57\xd4\x88\xbd\x44\xed\x2a\x6b\x8b\xd8\x9a\x5e\xaa\x7b\x79\xf3\x8a\x72\xb5\xc6\xd3\x49\x2c\xf1\x55\x06\xd3\x17\x34\x43\x0f\x14\x6b\xbe\x36\xf2\x6d\x8e\xdf\xba\x6c\x71\x92\x2b\x87\xb7\xfa\xe5\x43\xcd\xe8\xc9\x4d\x9f\xfa\xfb\x1e\x69\xd1\x97\xe5\x15\xc1\x86\xb8\x2c\x0e\x6b\xab\x43\xe2\x1b\x86\xd8\x9e\x87\xff\x15\x1d\x18\xce\x31\x65\xa8\xa8\x6a\x37\xca\x36\xad\x99\x3a\xbb\x8b\xbb\x82\xf1\x9b\xea\x1a\x4e\x28\x2b\x52\x03\x8a\x16\xe2\x7d\xc1\xa2\x5f\xd2\x31\x65\xe8\x2c\xb5\x49\xa8\x58\x45\x53\x1d\x4d\xd5\xd9\xd7\xcb\xf6\x11\xce\x7f\xbc\x3c\x47\xa8\xd3\x0d\x05\x2e\xf9\x65\xe4\xb3\xf7\x58\xf5\xe3\x26\xac\xdb\x80\x79\x0c\xcc\x63\x7d\x6a\x1e\x2b\x95\x9b\x32\x5e\x34\x27\xc0\xf5\x19\x34\xcd\xc1\xf5\x09\x74\x0c\x1d\xdd\x00\xd5\x5c\x70\x55\xb7\xe6\x78\xc4\x38\x17\x4f\x8c\xef\xc5\xf7\x88\xd9\x1c\x48\xda\xf6\xd1\xe3\x2d\x18\x02\x82\xbf\x7e\x27\x1a\x4f\x70\x29\xb7\xa8\xee\xa5\xcc\x18\x27\x2f\x12\x0d\xff\xbb\x3b\xf1\xfb\xd3\x21\xe7\x72\x8d\x9a\x26\xfb\xcc\x73\x67\x9e\x0a\x3c\xac\x39\xc4\x24\x1a\xcf\xae\x4b\xf5\xdc\x63\xf2\x26\x41\xcc\xcf\x11\x37\x40\xe5\xf3\x54\x3f\xf3\x22\xd1\x26\x06\x72\xe3\x39\x91\x03\xe2\x30\x7a\x48\x8c\x89\xfb\xd1\xbd\x7c\x4c\xdc\x8d\xee\xfc\xa9\xd4\x0e\x24\xeb\x9d\x68\x2e\x78\x2c\xbe\xf3\x11\xde\x21\x6b\x81\xae\xcf\x27\xf7\xf9\x18\xde\x5b\xdf\xe7\xb2\xaa\x12\x9d\x47\x3a\x3f\xfb\x81\x50\x93\x3c\xee\x35\xc9\xfc\xa5\x85\xe6\x6d\x92\x8d\xb4\xc9\x3c\x75\x3a\xda\x28\x6d\xff\xe2\x16\x9a\xb9\x54\x43\x0e\x7e\x21\x43\xbd\xf1\x7a\x79\x9a\x56\xab\x6c\x98\x1b\x8e\x54\x19\xaa\x94\x5b\x58\xc4\xaf\xb2\x7d\x6a\x2e\xc9\xb3\xad\xdd\x8a\xa2\xda\xb6\xba\x9a\x57\x2e\x52\xd7\xbb\x20\xf6\xd6\x6c\xe3\xa0\x38\xcb\xa4\x52\xc9\x67\xb7\xcb\xa7\xc3\xe6\xb0\x8f\xa4\xd0\x07\x53\xf8\x03\xa9\xcc\xf7\x05\xe2\x70\x5a\x18\xe4\x08\x93\x59\x12\xc9\x06\xef\x93\x1b\x3f\x5e\x50\x14\x5b\x84\x8d\x79\xec\x59\x2e\x1a\x6d\x0e\x7a\xf8\x25\x6a\x92\xf0\x65\x31\x05\x2d\xaa\xe7\xb3\x3b\xfd\x9f\xc3\x55\x3b\x86\x8e\xe2\xc3\xd9\x83\xbe\x8c\xc8\x84\x43\xcc\xa2\xad\xbd\x19\x31\x66\x2f\x20\x8a\xab\x99\x1b\x5e\xab\x5d\xbc\x4c\x04\xb2\x14\x34\xc3\x55\x4d\x5d\xb5\x3d\x9b\x40\xe0\x9b\x20\xbf\x59\x2e\x35\x86\x23\x17\x98\x70\x3b\xf2\xdd\x6d\xf6\x0e\xc7\xd5\x89\x6d\x47\xbc\x10\xea\x5e\x79\x29\xfe\x95\x86\xc9\x84\xda\xba\x5e\xc9\xf7\x0c\xf9\xec\x36\xc7\xd5\x0d\x33\xf2\x4a\x1b\x59\xd8\xcc\x54\x12\x5e\x29\x05\xe9\x06\x3f\x93\xd6\xdc\xc8\x3b\x5f\x44\x2b\xd8\xcd\xd8\xde\x3b\xdf\xfc\xd4\x53\x6f\xe1\xa3\xc9\xae\x91\x7a\xdc\xac\x2a\x4c\x6c\xfb\x9b\x9a\x4a\x85\x8a\xed\x93\xb7\x92\x73\x21\xd2\xf4\x7b\x07\x98\x88\x0f\x87\x0d\x7d\x72\x14\x9d\x17\xa9\x93\xd4\x9a\x4b\x1d\x4d\xad\x18\x66\x79\x0d\x81\xcf\x05\x07\x1b\xbb\x15\x8b\xea\xde\x23\xc4\xf6\xf3\x26\xfd\xce\x5e\xfc\x33\x03\xe8\x75\xa1\xc2\x98\xcc\xcb\xd9\x44\xd5\xeb\xd0\xe6\x93\x7e\x49\xf3\x54\x9f\xf2\x4b\xca\x4d\xb0\x7b\xa7\x82\xe7\xc3\x6e\x19\x4d\x9e\x69\xaf\x2d\xb5\x54\x45\x97\x85\x34\x3d\x8f\x66\xb9\x34\x9d\x46\x53\xe8\x54\x53\x73\x75\xe8\x5b\x99\xae\xd1\xa4\x8e\x89\x32\xf8\xe1\x78\x89\x79\x07\x1e\x2c\x13\x17\x5d\x5f\x4c\x16\xd1\xa7\xf0\x09\x29\xa2\x43\x55\x92\xb2\xba\x59\xad\xc2\x42\x3b\xf3\xc9\x9d\x0d\x3d\x37\x66\x13\xab\xa2\x6a\xa4\xc5\xce\x3b\x28\x6f\xdf\xcc\xfe\x2b\x3e\x8d\x16\xd0\x9b\xea\x1c\x6d\x6e\xbd\x03\xc1\x34\x04\x1e\x38\x1b\xf4\xc0\xf9\xd5\x14\xba\x2a\xdc\x68\xe6\xd1\xc5\xc0\x8d\xa6\xa3\x62\xa5\x13\x32\x2c\x59\x54\x59\xb5\xee\x88\xaa\xdc\x2f\xec\x6a\x10\x55\x1e\xdb\x51\x9b\x4a\xa7\x03\xe2\x8e\x4d\x15\x4e\x9d\x71\xd4\x01\xd9\x94\x20\x9b\xc0\x58\xdc\x15\x63\x31\x58\x09\xc0\x4a\x00\x56\x82\x4e\x59\x09\x4a\x9f\x4f\xb5\x97\x88\x0a\x25\x61\x82\x2b\x09\x81\x77\xee\x2d\x96\x99\xb4\xd1\x51\xe2\xb5\x87\x9d\x78\xbb\x68\xa6\xae\x28\x10\xc5\x7f\x7f\x57\x83\x02\x71\xc8\x52\x6d\xd7\xe0\xde\x94\xc2\x0e\xdb\xe2\xa6\x67\xbf\xa5\xba\xda\xf2\x66\x69\x15\x6f\xac\x2f\x68\x8c\x57\x67\x2f\xfb\x73\xf2\xa1\xf0\xc5\x2a\xb1\xcb\x24\x7c\x75\x4f\xf8\xaa\xe3\xda\xaa\x4b\xca\x86\x36\xd6\x70\x5f\xa4\x14\xf6\xf7\xaa\xbc\xca\x6a\x59\xbc\x88\xe6\x50\xa9\x6e\xbb\x35\x89\x8e\x6c\x60\x00\xcd\xf3\x0c\x0e\xa0\xcb\x24\xe8\x32\x9f\x19\x40\x9f\x1e\xc0\x9f\x1a\xc8\x7c\xc2\x5f\x10\x5e\x1a\xe8\x9f\x7d\x56\x9d\xd3\x0c\x6b\x67\xee\x8c\xcc\x1d\xbb\xd8\xe0\x0c\xa0\xfa\x70\x93\x81\x3b\xc2\xd7\x35\xe1\xbb\xae\x56\xf8\x93\x4c\x81\x13\x4f\xf3\x3b\xb8\x32\xe5\x28\xc3\x25\x87\x9a\xf3\x22\xa7\xc9\x05\x36\x2d\xe4\xdf\x0b\xde\x64\x09\x7e\x1c\x69\xbe\x01\x2c\xbe\x96\x42\xaf\xa6\xf0\x2b\xa9\xcc\x17\x7c\xe6\xfc\xc9\xd4\x59\x6a\x6b\x5c\xcf\x2b\x53\xde\xec\x54\xc9\x2e\xb1\x9f\xb2\xca\x54\xe4\x2b\xb8\xed\x52\xa8\x64\x35\x27\xf0\x42\x1e\x53\x35\xfe\xe1\xdc\xb7\xbb\x62\x68\x72\xc9\x25\x15\xdd\x51\xe8\x4d\xd9\xa6\xc2\xf7\xd9\x22\xd4\xaa\x90\xbc\x22\xde\xc8\x93\xd0\x78\x7d\xc9\x6d\x88\xb1\x0d\xe0\xbf\x3e\xbb\x8d\x57\x2b\x02\x2c\x37\x07\x91\x35\xcf\xc5\x6d\x89\x5c\xdc\x5d\x58\x38\x4a\x33\xa8\x88\x4f\x67\x4f\xfa\x9c\x7d\x77\x18\xd2\x37\x29\xe1\x76\xc8\x08\x87\xbf\xba\x07\xdd\x2b\x50\xf1\x22\x4f\xa8\xbf\x52\x18\xbf\x4e\x17\x1d\xfc\xcb\x7b\xf0\xbf\x1f\x40\x3b\xf8\x8f\x6c\x19\x7d\x6c\xed\x84\x52\x25\xba\x98\x7b\x84\xdd\x52\x64\x97\xaf\x14\x4a\x74\xf1\x2c\xb5\xa7\x2a\x15\x7f\xcd\x74\x7a\x3c\xa7\x54\xe9\x2a\x3a\x2e\x46\xff\x41\xb4\x9f\x8f\xfe\x31\xb4\x17\x8d\x34\x1d\xfd\xbc\x65\xd8\xb8\x2f\xd1\xc5\x96\x42\x23\x13\x22\x1a\xaf\x3f\x99\x3c\xcc\x9f\xc0\xbb\xe5\x30\xe7\x6f\x97\x03\xbc\x44\x17\x21\xb9\x3c\x64\xc7\x82\xe4\xf2\x10\xe5\x05\x51\x5e\x10\xe5\xd5\xaf\x51\x5e\x90\x5c\x1e\x92\xcb\x43\x74\x0d\x44\xd7\x40\x74\x0d\x44\xd7\xf4\x44\x74\x0d\xa4\x6f\x87\xf4\xed\x5b\x25\x9e\x00\xd2\xb7\x77\x22\x7d\xfb\xdf\x0f\xa3\x63\x9e\xaf\xfd\x9a\xe7\x55\x7a\xf6\x0c\xd3\x5d\xa1\x95\x5a\x95\x68\x15\xd5\xa8\x3a\xf8\x3f\x0c\xe3\xd7\x06\x03\x27\xf3\xef\x95\x27\x55\x1a\xa6\x6e\xac\x18\x7a\x4d\xad\x44\x0e\xad\xf4\x15\xc9\x79\xbf\xb4\x2b\xbc\xb4\x69\x56\x5a\x5b\x0e\xad\xcc\xe7\xf2\xfc\x81\xfa\xcc\x5a\xb1\x6f\x9c\x33\x9c\x5e\x4f\x24\xbf\x29\x87\x4f\x5e\x4d\x06\x7d\x07\xf0\x44\x98\xe8\x35\xf8\xe8\xc7\x35\x37\xba\xbe\x3b\x9e\x30\xde\x85\x11\xff\x62\x8e\x19\x01\x0e\x02\x1c\x04\x38\x08\x70\x10\xe0\x20\xc0\x41\x80\x83\x00\x07\x01\x0e\x02\x1c\x04\x38\x08\x70\x10\xe0\x60\x97\xe1\xe0\xd6\xf3\x15\x02\xfc\x08\xf8\x11\xf0\xe3\x16\xc2\x8f\xaf\x8d\xa1\x69\x19\x06\x6e\x19\xe4\x45\x97\x98\xbc\xb7\x38\xcd\x32\xe8\xf8\x4a\x61\x91\xb8\x6a\x61\x5c\xab\x39\x2e\xad\x7a\x05\x86\xc9\x97\x0c\xff\x7e\xf7\x18\xfe\x9f\x03\xe8\xfe\x48\x21\xd7\xe4\xd3\x99\xbd\x31\x51\xe0\xd3\xbc\x44\x6f\x53\x39\xe3\x97\x98\xdb\xc7\xc3\xc0\xc3\xe5\x5c\x11\xc5\x34\x7b\xa2\xcd\x41\xe0\x1f\x48\x35\x0f\xad\xbc\x29\x40\xa0\x85\x4c\x0e\x02\x97\xd1\x12\xd2\x9b\x81\xc0\xa0\xfe\x63\x3e\x8d\x09\x98\x60\xb4\xb5\x65\x43\xe5\x9b\x7d\x62\x8b\xa1\xe2\x2f\x24\x53\xc3\x8b\x78\xce\xf3\x82\x8d\xe9\x6f\xc9\x0e\x9b\x55\x23\xec\x42\xc8\x2b\x8c\x32\x9f\x46\xcd\x3a\x3d\x1f\x1f\x40\xde\xb4\xdf\xf7\x7b\x11\xe4\x9b\xd6\xf5\xc5\xb7\xa1\x17\xd1\x4a\x5d\x40\x43\x97\x7a\x18\x82\x1f\x20\xc8\x7c\x83\x41\xe6\xef\x4f\xf7\x9e\xc0\x42\xff\x3f\x11\xf6\x5e\x43\x4e\x10\xf6\xde\x43\xe2\x92\x87\xab\x77\x5f\x5c\xe6\xbe\xbc\xab\x99\xb8\x7c\xcc\x0f\x62\x6f\x2a\x21\x27\x64\x14\xfb\xe6\x09\x48\x88\x61\x87\x18\x76\x88\x61\x87\x18\x76\x88\x61\x87\x18\x76\x88\x61\x8f\x8f\x61\xef\xbe\x56\x51\xfc\xd6\x5d\xcd\xb4\x8a\xc3\x09\x91\xed\x4d\x75\x8d\x82\x08\x6d\xdf\x14\x55\x03\x02\xdb\xfb\x4d\xc1\x81\xc0\x76\x08\x6c\xdf\xe4\xc0\xf6\x0f\xa4\x36\x6f\xb3\x7c\xcb\xe1\xef\xdd\x5f\x73\x4a\x67\xd0\x34\x9e\xca\x9e\xf2\x6d\x53\x8f\x87\xa3\xe1\x9b\x95\xd3\x18\x0e\xdf\xf6\x60\xf5\x77\x0c\xa0\xdd\x02\x68\x2f\x55\xe8\x4d\x36\x5b\x6d\x5a\xc9\x07\x7d\x24\xc1\x36\xfe\x6a\x1a\x7f\x25\x8d\xee\x0b\xdd\x34\xe5\xdd\x93\xc9\x94\xb9\xca\xc7\x14\x4f\xdf\x58\xa0\x2a\xbc\xf9\x72\x8f\x96\x89\x7b\x36\xe6\xa1\xa9\xf9\xd9\x73\xec\x86\xf6\x2d\x83\x0d\x05\x4d\x36\x14\x34\xd9\x4a\x41\x25\x03\x5d\x14\x23\xfb\x1c\x3a\xc3\x47\xf6\x29\x74\x02\x1d\xdb\xc0\x6a\xe6\x7d\x63\xd2\x80\xc5\x7f\xb5\x07\xdd\xe7\x39\x35\x5b\x54\x77\x49\xd5\xe2\xde\x4f\xf8\xf7\xf6\xe0\xdf\x1c\x08\xbc\x95\x47\xd6\xce\x18\x30\x4f\xf5\xa7\xe4\xa3\xb9\x27\xd8\xad\x5e\x52\x6c\xff\xe7\xad\x96\x40\xe0\x39\x34\x2d\xba\xe2\x38\x9a\xe4\x5d\x71\x00\x4d\xa0\x7d\x89\x07\xa2\x86\xbe\xb8\xa5\x3c\x02\x17\x92\xe5\x41\x0e\x0f\xc7\xa4\xf5\xf6\xde\xb2\xae\x83\x96\xc1\x5f\x18\xfc\x85\xc1\x5f\x18\xfc\x85\xc1\x5f\x18\xfc\x85\xc1\x5f\x18\xfc\x85\xc1\x5f\x18\xfc\x85\xc1\x5f\x18\xfc\x85\xc1\x5f\x18\x92\x09\x80\x37\x2f\x78\xf3\x82\x37\x6f\xb7\xbc\x79\x7f\x61\x04\x1d\x6a\x29\x99\x80\xd8\xe3\x55\x55\xcb\x77\xe0\x7d\x69\x04\xff\x7e\x28\x8f\xc0\x4f\xca\x3c\x02\xe1\xe4\x01\xa6\x27\xc9\x3c\x3e\x37\xcd\x4b\xb9\xa0\x5a\x6d\x49\x1c\x30\x1a\xe8\x2d\x5c\x37\x93\xab\x1c\x53\x7d\x02\x85\x6c\x28\xa2\x03\x87\xca\xce\xe7\x1e\x89\x4d\x3b\xe0\xd7\xb1\xd7\xd1\xe0\x66\xa4\x19\x38\x9f\xcc\x09\x87\xf1\x9e\x7a\x4e\xe8\x37\x69\x94\x12\x36\xb7\x52\xf0\xaf\x04\x4c\x08\x98\x10\x30\x21\x60\x42\xc0\x84\x80\x09\x01\x13\x02\x26\x04\x4c\x08\x98\x10\x30\x21\x60\x42\xc0\x84\xdd\xc6\x84\x87\xd0\x01\x3c\x91\xdd\xe7\xbb\x6e\xdd\x1f\x71\xdd\xf2\x36\x77\xb7\xc3\xd1\x25\x00\x30\x01\x60\x02\xc0\xdc\x42\x00\xf3\xcf\x87\xd1\xac\x4c\x47\xa0\x33\x25\xdb\xa0\xa6\x4d\xca\x06\xf7\xe4\x67\xcf\xf8\x69\x09\x24\xde\xac\xd6\x5c\x95\x69\x50\x37\xc9\xe2\x32\xa5\x37\x22\x7b\x52\x07\xff\xd0\x30\xfe\xe0\x36\xf4\x5d\xb1\x45\x5d\x5b\x29\x64\xde\xd3\x62\xb2\xd4\x0b\xf2\x2d\x57\xc5\x5b\xa6\xc3\x6f\x69\x53\xce\xd4\xe3\xfc\x81\xa9\xb8\x9a\x5e\x29\xac\xf5\x7e\xc8\xa0\x1a\x8f\x36\x5b\x49\x74\x7a\xfd\xc5\x64\xfe\xf9\x34\x5e\x88\x49\xb3\xba\xd6\xe8\x94\x94\x74\xad\x4e\x03\x16\x0a\x2c\x14\x58\x28\xb0\x50\x60\xa1\xc0\x42\x81\x85\x02\x0b\x05\x16\x0a\x2c\x14\x58\x28\xb0\x50\x60\xa1\xe0\x32\x09\xc4\x11\x88\x23\x10\xc7\xae\x11\xc7\x77\xaa\xe8\xc9\x96\x88\xa3\x48\x84\xba\x36\x70\xfc\x99\xe7\xf1\xc7\x86\xd0\xc3\xcd\x80\xa3\xc8\xc9\x32\xb9\x76\xd0\xf3\x5a\xdc\x28\x77\x9c\x3d\xdb\x84\x12\xf2\xd2\xd7\x7a\xba\xc7\x31\x61\x11\x98\x18\x30\xb1\x22\x30\x31\x60\x62\xc0\xc4\x80\x89\xf5\x0d\x13\x2b\xf6\x0c\x13\x6b\x7b\x4d\x36\xcc\xc4\x8a\xc0\xc4\x80\x89\x01\x13\x03\x26\x06\x4c\xac\xfb\x4c\xac\xd8\xd7\x08\xab\x08\x08\xab\x73\x08\xab\xd8\xeb\x08\xab\xb8\x05\x11\x56\xe9\x5d\x29\x74\x5d\xf8\x57\x69\x48\xe5\xfe\x55\xdf\x8d\xde\x82\xae\x36\xcd\x2a\x17\x0f\xba\xbc\x2c\x95\x49\xce\x66\x89\x9e\x57\x09\xd9\xe2\xae\xbf\x3d\xd9\xe9\xea\xad\xf8\xcd\x6d\x72\xb4\x6a\x48\x5c\x99\xfd\x47\x94\x04\xe8\x9e\x10\x2b\x9c\xa2\xae\xcd\xe2\x4e\x8a\xdb\x36\x9d\xc6\x09\x78\x66\xa0\x32\x22\x75\x39\x8b\x9f\x46\x0b\x1d\x18\x04\x90\xce\x18\x8e\xaa\xd9\xe0\x51\x35\xbf\xb7\xc6\x51\x35\x4b\x42\x84\x5d\x43\xcf\x72\x11\x76\x15\x75\x66\xf4\xa2\x1b\xe2\x64\x1a\x1d\x2d\x06\x27\xd3\x74\xec\x65\x15\x91\xa2\x9e\x20\x2d\x94\xa2\xbe\x53\x6f\x4b\x16\xc0\x16\xdd\x7c\x01\x9c\xfb\x3f\xc7\x92\x04\x70\x41\x1e\x61\x10\x3a\x0f\x22\xc1\x0b\x3b\xf7\xa4\x3c\x1b\x67\x0d\x61\x3c\xed\x97\xd6\x45\xb1\xdc\x99\x13\x73\x00\x92\x03\x24\x07\x48\x0e\x90\xbc\x7f\x20\x39\xe8\x9c\x09\x3a\x67\xef\x58\x11\xe0\xb4\xb2\xae\x9c\x56\x06\xc6\x1a\x30\xd6\x80\xb1\x06\x8c\x35\x60\xac\xe9\x6b\x63\x0d\x1c\x5a\x09\x87\x56\xc2\xa1\x95\x9d\x3a\xb4\x12\x6c\xa1\x60\x0b\xed\x57\x5b\x68\xa9\xdc\xe6\xf3\x5a\x93\x0c\x87\xb9\x78\x6e\x7d\x2f\xbe\x47\xcc\xe6\x40\xd2\x6e\x36\xc3\x6e\xff\xb1\x75\xaf\x0c\x05\x27\xa6\x89\xd4\x26\xc2\xab\x1d\x7f\x66\x08\xff\x48\x28\x2f\xf3\x0b\xad\x65\x2c\xe1\x39\x32\xda\x94\x9a\xe4\xf1\x50\x5e\x65\x5e\xee\x9c\xe1\xb8\x5b\xed\xe0\xb5\x9e\x4d\x41\x72\x2e\x79\x20\x3f\x8e\xb3\xf5\x29\x98\x45\x05\xc2\x89\x49\x20\xa3\x08\x44\x4f\x40\x46\x11\x30\x0c\x81\x61\x08\x0c\x43\x7d\x64\x18\x82\x8c\x22\x90\x51\x04\x80\x3c\x00\x79\x00\xf2\x00\xe4\x7b\x02\xc8\x43\x46\x11\xc8\x28\xb2\x55\x10\x24\x64\x14\xe9\x44\x46\x91\xdf\xb8\x86\x8e\x8b\x8c\x22\x1a\xb1\x65\x29\xc4\xa9\x4f\x24\x12\xbe\x66\x94\x4d\xc3\x2c\x7b\xc2\x1a\xbf\xef\x1a\xfe\xb3\x3d\xe8\xbe\xf0\x1d\xbe\x6b\xec\xe1\xb5\x93\x87\x4c\x07\xcf\x2c\x88\x52\x2f\x8b\x52\x73\xfb\xd8\x83\xa1\xcb\x8e\xe7\x15\xdb\xec\x89\x1e\x27\x7a\x90\x2d\x04\x78\x17\x64\x0b\x01\xde\x05\xbc\x0b\x78\x57\x1f\xf1\xae\x1e\xf2\xf3\xed\x19\xde\x05\x0e\xa8\xc0\xbb\x80\x77\x01\xef\x02\xde\x05\xd9\x42\xc0\x43\xee\xf6\xc1\x53\x3d\xef\x21\xb7\x25\xb3\x85\xac\xa2\xe7\x84\x27\xd4\x55\xf4\x34\xf7\x84\xba\x84\x2e\xa0\xf3\x4d\xc3\xd1\x23\x0c\xcb\x8b\x42\x6f\x0a\x8d\xda\x92\x20\xe4\x85\x64\x97\xa8\x8b\x78\x4e\xba\x44\xc5\x30\x36\xef\x98\xfa\x66\x95\x8c\x49\x0a\xf2\xe3\xa8\x09\x6f\xcb\xfa\xb9\x40\x9a\xa3\xb5\x09\x71\xcf\xa6\xc0\x35\xc1\xc2\x9e\x45\xdf\x8d\xde\x52\x97\xfc\x63\x16\x9d\x6b\x53\xa7\x42\xf0\x25\x24\xfc\xd8\x60\xc2\x8f\x9f\x4d\xa3\xb7\x0a\x69\xb3\x80\xde\xc4\xa5\xcd\x79\xd4\xbe\x81\x89\xae\x89\x5c\x1e\x6f\x46\x57\x82\x5c\x1e\x6d\x7d\xc1\xf3\x22\x7f\xc7\x5b\xd0\xd5\x50\xfe\x8e\xb6\xbe\x61\xe3\xc2\x52\x24\xf3\xe8\xbe\xb0\xcc\xfd\xef\x63\x4d\x84\x65\x3e\x36\x6f\x47\x73\xc1\x79\x5c\xdc\x1f\x27\x38\xfd\x12\xba\x25\x42\x21\x51\x07\xf0\x69\xe0\xd3\xc0\xa7\x81\x4f\x43\xa2\x0e\x48\xd4\x01\x89\x3a\x20\x51\x07\xd8\x49\xc0\x4e\x02\x76\x12\xb0\x93\x80\x9d\xa4\x2d\x76\x12\x48\xd4\x01\x89\x3a\x20\x51\x07\x24\xea\x00\x33\x24\x98\x21\xfb\x28\x51\x47\xf7\xf9\x74\xfb\x93\x73\x7c\x75\x08\x1d\x10\x1e\xfd\xf6\xa2\xaa\xe5\xbd\x06\x89\x3d\x20\xd4\xa6\x15\xb2\xc8\x36\xb5\x66\xd9\xc1\x3f\x34\x84\x3f\x38\x88\x1e\x64\x4f\x4d\x85\x1f\xf2\x89\xf9\xc8\xda\xee\xfc\x97\x69\x85\x14\x45\x61\x39\xee\xf9\x7f\xb9\xbe\x24\xc9\xcb\x43\x37\x6e\xb5\xcc\x1c\x1a\x7a\x52\x0c\xde\x29\x74\x8a\x0f\xde\xa3\xe8\x30\x3a\xd8\xd4\xbc\xc2\x7b\xc0\x33\xab\x84\x3e\xbb\x25\xcb\xb3\x91\x3c\x18\xcf\xe2\x19\x39\x18\x9b\xf6\xb5\x1c\x92\xa1\x97\x37\x0e\xc2\x24\x23\x37\xa4\xeb\x80\xf0\x05\x48\xd7\x01\xe6\x21\x30\x0f\x81\x79\xa8\x8f\xcc\x43\x90\xae\x03\xd2\x75\x00\x96\x07\x2c\x0f\x58\x1e\xb0\x7c\x4f\x60\x79\x48\xd7\x01\xe9\x3a\xb6\x0a\x88\x84\x74\x1d\x9d\x48\xd7\xf1\xea\x08\x3a\x21\xe0\x9e\x6a\x59\x4e\x90\x7f\xd7\xf4\x09\xda\xf8\xf7\xf8\x7f\xbf\x7d\x5c\x27\x56\x85\xae\xb2\x05\x58\xfe\xfe\x76\xfc\x2f\x46\xf0\x1f\x0d\xa2\xed\xec\xf1\x6b\x2b\x85\xcc\x67\x53\xb2\xf9\x43\x89\x79\x4d\x4f\xa0\x79\x64\x6f\xc6\x2f\xa6\x2d\x59\x7a\x47\x03\xfd\x85\xeb\x68\x72\xb5\x63\x2a\x50\xa0\x98\x0d\x45\x74\xe1\x50\xd9\xf9\xdc\xa3\xfc\x75\x53\x96\xe5\x5c\x29\xf8\xe8\x50\x0f\x2a\xd9\xeb\x10\x71\x33\xd2\xfb\x3e\x1a\x4f\xf8\x76\xe0\x3b\x78\xc5\xd0\xf5\x8b\xc9\xb4\x71\x2f\x1e\xf1\x72\x54\x5b\x96\x23\xc1\x62\xd0\xec\x90\xe1\x17\x90\x21\x20\x43\x40\x86\x80\x0c\x01\x19\x02\x32\x04\x64\x08\xc8\x10\x90\x21\x20\x43\x40\x86\x80\x0c\x37\x13\x19\x1e\x46\x07\xf1\xfe\x6c\x61\x47\x0a\x0f\x5a\xaa\xbb\x9c\x79\x80\xc7\x75\xcb\x5a\x04\x9b\xb7\xec\x20\xfb\x3d\x27\xa5\xf6\x47\x52\xa9\xd2\xf3\xe8\x39\xfc\x4c\xf6\xad\xfe\x83\x93\xb2\xca\x22\x2e\x9c\x69\x46\x35\x77\x59\x71\x34\x6a\x91\x51\xc5\xa9\x69\xcb\xac\xdb\xb8\xea\x4b\xd4\xaa\x60\x23\x96\x4d\x79\x13\x65\x77\xfa\x4c\x22\xfc\x06\xa0\x99\x40\x33\x81\x66\xf6\x31\xcd\xfc\xb9\xe7\xd0\x4c\xab\xae\x8a\x4d\x08\xa7\x4d\x2b\xc4\xc1\x7f\xf1\x2c\xfe\x89\x3d\x6b\xb9\x2e\x66\x93\x5d\x17\x73\xb9\xb5\x7c\x16\x03\xce\xc8\xee\xed\x71\xc2\x08\xe9\x86\x01\xbe\x41\xba\x61\x80\x6f\x00\xdf\x00\xbe\xf5\x11\x7c\xeb\xa1\x6c\x05\x3d\x03\xdf\x20\x8c\x1e\xe0\x1b\xc0\x37\x80\x6f\x00\xdf\x20\xdd\x30\xc4\xf9\xde\x3e\x40\xaa\xe7\xe3\x7c\xb7\x64\xba\xe1\x67\xd0\x94\xf0\xcc\x9a\x44\x47\xb8\x67\xd6\x04\xda\x87\xf2\xad\x87\x77\xb6\x25\xa3\xf0\x62\xb2\x27\xd6\x29\x7c\x62\x1d\x71\x9f\x31\x29\x84\xff\xd5\xce\xb5\x68\xd9\xdd\x7e\x1e\x61\x0e\xc6\x46\xc5\x3f\xbb\x8c\xc6\x04\xc9\x3a\x8d\x4e\xa2\xe3\x75\xc9\x82\x47\x51\x6e\x1d\x11\xb7\x90\xe3\x0d\xf2\x01\x6f\x2c\x1f\xf0\xd7\x53\x68\x5a\xa4\xd4\x3d\x8e\x26\x43\x29\x75\xf3\x68\x5d\x23\xb0\xa9\x40\x40\x27\x84\xb0\x39\x84\x0e\x70\x61\xb3\xde\x72\x8b\x22\xa1\xf0\x31\x74\x34\x48\x28\xbc\xce\x32\x5a\x4d\xdc\xdb\x05\x99\x94\xfb\xf6\xe8\x5a\x32\xe9\xc1\xd8\x74\xbd\x5c\x3e\x1d\x12\x97\x9a\xc9\xa7\x20\x3d\x6f\x47\x25\x15\xe4\xe4\x05\x88\x0b\x10\x17\x20\x2e\x40\x5c\xc8\xc9\x0b\x39\x79\x21\x27\x2f\xe4\xe4\x05\x63\x02\x18\x13\xc0\x98\x00\xc6\x04\x30\x26\xb4\xc5\x98\x00\x39\x79\x21\x27\x2f\xe4\xe4\x85\x9c\xbc\x60\xab\x03\x5b\x5d\xdb\x73\xf2\x36\x65\xd4\x6d\x4d\xd6\xbb\x91\x9c\xbc\x5d\x40\xcf\x5b\x30\xd6\x05\xbf\x6f\x18\x15\xbc\x4c\x20\x86\x4d\xca\x06\x13\x48\x51\xc7\x79\x99\x1b\x44\xba\x12\x1b\x1a\x71\xf0\xef\x0c\xe1\x2f\x0e\x22\x5c\xf7\xc8\xb5\x95\x42\x66\x45\x6a\xba\xa6\x6e\xac\x18\x7a\x4d\xad\x44\x52\x82\xf8\xbb\xca\xa9\xf9\xd9\x05\x51\x58\x5b\x52\x81\xe4\x73\x8f\xcb\x64\x1e\x91\x0a\x5d\x29\x04\xef\x99\x33\xda\x78\x7c\xde\x6d\x94\xd1\x63\x77\xfc\x1c\xba\x0b\x23\x5e\x31\x61\x4b\x7e\x3e\x79\xf2\x9c\xc0\xc7\xc4\xdc\x08\x9a\x1c\xf9\x69\x3e\xe2\x46\x16\xe4\xf9\x80\x50\x03\xc8\xf3\x01\x56\x2a\xb0\x52\x81\x95\xaa\x5f\xad\x54\x90\xe7\x03\xf2\x7c\x80\x75\x00\xac\x03\x60\x1d\x00\xeb\x40\x4f\x58\x07\x20\x99\x06\x24\xd3\xd8\x2a\x3c\x14\x92\x69\x74\x22\x99\xc6\x3f\x0e\xa1\x11\x01\x04\x35\x4a\x6d\xdd\x30\xe3\x69\x20\xb7\x11\x3a\xf8\x37\x87\xf0\xaf\x0c\xa2\xd7\x87\x6f\xbd\xb6\x52\xc8\xbc\xd0\x1a\x05\x9c\x63\x85\xb4\x09\x00\x8e\xf2\x07\xa6\x43\x15\xb9\x52\xe0\xe5\xcf\x19\x8e\xbb\xd5\xce\x07\xdb\x0c\x10\xf8\xe6\x64\xc6\x77\x10\xef\xf7\x0e\xad\x6b\x1c\x1b\x12\x8d\xf3\x36\x8f\xb0\xbd\x96\x10\x23\x00\x40\x00\x80\x00\x00\x01\x00\x02\x00\x04\x00\x08\x00\x10\x00\x20\x00\x40\x00\x80\x00\x00\x01\x00\x02\x00\x04\x00\x08\x00\x10\x00\x20\x00\xc0\x6e\x01\xc0\xbf\x4f\xa3\x47\xa4\x47\x60\x7c\x22\xdd\x71\xfc\x5f\xd3\xf8\xbf\xa4\xd1\x2e\xb5\x2e\xc2\x3e\xf3\x86\x32\x71\xa3\x6b\xad\xf8\x8a\xdc\x23\x65\xe2\xd6\xc5\xd4\x4f\xcd\xcf\x7a\xbb\xbb\xf6\xe1\xb8\xc9\x86\x82\x26\x1b\x0a\x9a\x6c\xa5\xa0\x92\x85\x9e\x16\x04\xee\x22\x9a\xe3\x04\xee\x2c\x9a\x41\xc5\x0d\x10\xb8\xd0\x77\xb6\x92\xd6\x05\xff\xcd\x30\x9a\x13\xcd\xbf\x54\xa1\x37\xd9\x5a\x68\xd3\x4a\xde\x67\x2b\xf5\xf9\x8c\x05\x8d\xb5\x6c\x83\xda\x86\xbb\x5a\x21\x2b\xa4\x12\xd9\x7b\x3b\xf8\x93\xc3\xf8\x07\xb7\xa1\x87\x42\xa5\x4d\x79\x85\xf9\x59\x11\xde\x9d\x6a\x0d\xd8\xce\xcb\xf7\xcc\xb1\xf7\x4c\x87\xdf\xd3\x26\x8a\x7b\x92\x3f\x70\x36\xa6\xae\x32\x0b\x43\xf3\x1a\x6c\x0d\x07\xcf\xa6\x0e\xd2\x6d\x07\xbe\x1e\xd7\xfd\x9e\x64\xae\xfb\x66\x7c\x45\xc0\xdb\xe6\xad\xdb\xe8\xed\x2c\x49\xf0\x5a\xa3\x14\xd0\x2f\xa0\x5f\x40\xbf\x80\x7e\x01\xfd\x02\xfa\x05\xf4\x0b\xe8\x17\xd0\x2f\xa0\x5f\x40\xbf\x80\x7e\x01\xfd\x02\xfa\x05\xf4\x0b\xe8\x17\xd0\x6f\x6f\xa1\xdf\x0f\x8f\xa0\xb3\x82\x3d\x8a\xcc\x18\x75\x94\xb1\xc9\xd9\x69\x16\xd5\x75\xc3\xb1\x6b\x7c\x7b\xbe\x58\xd3\xcb\xc4\x75\xf0\x17\x87\xf1\xd7\x06\xd1\xeb\x44\x39\x3e\x67\xfc\xa7\x2d\x62\x46\xaa\xcf\xf8\x25\x16\x79\x89\x6d\xe2\x8b\xfb\xf9\x03\x22\x6b\x47\x43\xde\xe9\x98\xd7\x6e\x0d\xa8\xd8\xa3\x51\xe3\xcf\x25\x93\xc7\x63\xf8\xa8\xe4\x88\x62\xa4\x48\x27\xd2\x98\x9e\x88\x49\xb7\x00\xdc\x10\xb8\x21\x70\x43\xe0\x86\xc0\x0d\x81\x1b\x02\x37\x04\x6e\x08\xdc\x10\xb8\x21\x70\x43\xe0\x86\xc0\x0d\xbb\xcb\x0d\xb7\x5e\xda\x3b\x20\x93\x40\x26\x81\x4c\x6e\x21\x32\xf9\x81\x11\xb4\x8f\x75\xdd\xf8\x4a\xa1\x19\x86\x14\xfb\xc7\xaa\x6a\xc9\x9f\xdf\x8e\x7f\x7f\x18\x7f\x3d\x8d\xb6\x6b\xd4\x26\xd7\x56\x0a\x99\x87\x6c\xa2\xea\x75\x3b\x5b\xe1\xe3\x76\x41\xb5\x72\x0f\xb3\xab\xd3\xd4\x26\x57\x42\x44\xd0\xbf\xdc\xde\x43\x9e\x4a\x6f\x5e\xdf\x99\x5d\xec\x0b\xf2\x2b\x85\xbc\x5f\x9d\x44\x3e\xf7\x70\x3c\x9f\xbb\x03\x0f\x96\x89\x8b\xae\x9f\x4f\x26\x73\xc3\x78\x8f\x24\x73\xd9\xac\xa4\x72\xc1\xeb\xc3\xe1\xdd\x99\x4f\xee\x08\xda\xf8\x51\x9b\x58\x15\x55\x23\x4d\x9b\x59\x91\x37\x74\xa9\xa5\xd7\x7d\xf0\x5f\x43\x53\xc3\x41\x32\x70\xf0\xdf\x06\x0f\xfe\xfb\xc1\xd4\xad\x4e\xf3\xf5\x1f\xcd\xd7\x4e\x51\x61\xd5\xda\x2c\x2a\x72\xff\xf0\xfa\x40\x54\x60\x99\xc1\x5d\x0d\x49\x87\x47\xc5\x6f\xdd\x12\x0e\x9d\x39\x6b\x0f\x04\x46\x82\xc0\x80\xf3\x9e\xba\x72\xde\x13\x1c\xf4\x01\x07\x7d\xc0\x41\x1f\x9d\x3a\xe8\xa3\xf4\xf9\x54\x97\x4e\x3b\x40\x97\xc5\xe9\xc1\xe7\xd1\x6c\xe8\xf4\xe0\xf6\x9c\xa0\xa0\xc4\x2f\xfe\x3b\xf1\x76\xd1\x4c\xad\xae\xff\xe1\x55\xbe\xb9\x2e\x50\xfc\xc3\x3b\x83\xf5\x7f\xd8\x52\x6d\xd7\xe0\x86\x41\xb1\x57\x6c\xba\x67\x78\xc4\x12\xa9\xbd\x3a\xae\x14\xbc\xb1\xbe\xa0\x31\xfe\xe6\xbd\x3c\x90\xf0\xa1\xf0\xc5\x2a\xb1\xcb\x24\x7c\x75\x4f\xf8\x2a\xcf\x74\x4f\xca\x86\x36\xd6\x70\x5f\xa4\x14\xf6\xf7\xaa\xbc\xca\x6a\x59\xbc\x88\xe6\x50\xa9\x6e\xb7\x32\x89\x8e\x6c\xa0\x9b\xe7\xb9\xc5\x1c\x54\x91\x04\x55\xe4\x33\x03\xe8\xd3\x03\xf8\x53\x03\x99\x4f\xf8\xf2\xfc\xa5\x81\xfe\xd9\xbb\xd4\x19\x19\x58\x3b\x73\xa3\x39\x37\x84\xb1\xc1\x19\xb8\xce\x0c\x37\x19\xb8\x23\x7c\x59\x12\x3e\x16\x6a\x85\x3f\xc9\xf4\x2f\xf1\x34\xbf\x83\xeb\x42\x8e\x32\x5c\x72\xa8\x39\x2f\x7c\x48\x2e\xb0\x69\x21\xff\x5e\xf0\x26\x4b\xf0\xe3\x48\xf3\x4d\x55\xf1\xb5\x14\x7a\x35\x85\x5f\x49\x65\xbe\xe0\xd3\xab\x4f\xa6\xce\x52\x5b\xe3\x6a\x5a\x99\xf2\x66\xa7\x4a\x76\x89\xfd\x94\x55\xa6\x22\x5f\xc1\xb9\xa4\xd0\xa8\x6a\x4e\x60\x2d\x1f\x53\x35\xfe\xe1\xdc\x07\xa1\x62\x68\x72\xc5\x24\x15\xdd\x51\xe8\x4d\xd9\xa6\xc2\x46\x6f\x11\x6a\x55\x48\x5e\x11\x6f\xe4\x4e\x3f\x5e\x5f\x72\x3e\x18\xdb\x00\xfe\xeb\xb3\xdb\x78\xb5\x22\x0e\x2c\x1d\x67\x41\xed\xdc\xc1\x5d\x7f\x34\x7e\xc1\xd8\x81\xef\xe0\x9f\x8a\x4a\x87\xd0\x01\x3c\x91\xdd\xe7\x1b\x03\xee\xe7\x73\x47\x4e\x64\xbf\xd0\xec\x20\xfb\x39\x8c\xf8\xb7\x9e\x11\x01\xbf\x6f\x3b\x3a\x94\x00\x25\x2d\xaa\x7b\x38\x72\xdc\xa2\xb6\xbb\x44\xed\x9b\xaa\xad\xe3\x2f\xdf\x81\x7f\x2c\x84\x26\x73\x1a\x35\x4d\xf6\xb5\xe7\xce\x3c\x15\x4c\x39\x97\x2a\xa1\x67\xa4\x63\x64\x6e\x58\xde\x2b\xd6\xc3\x73\xc4\x8d\x38\x30\xce\x07\x0f\x4c\xb0\x3d\xae\xe0\x8d\x87\xd1\x43\x62\x8c\xdd\x8f\xee\xe5\x63\xec\x6e\x74\xe7\x4f\xa5\x76\x20\xf9\x31\x89\x43\xe8\x6a\xf2\x10\x3a\x80\x27\xea\x87\x90\xac\xce\x59\x51\x1d\xb9\x3d\x8e\x0e\xa7\xc7\xe2\x87\x13\xc2\x3b\xe4\x57\xa2\xec\xbf\x0d\xb5\xd3\x5e\xaf\x9d\xe6\x2f\x2d\x24\x36\xd4\x48\xa4\xa1\xe6\xa9\xd3\x5a\x4b\x35\x55\xea\xd6\x6c\xc2\x1e\x68\xa9\x52\x11\x9d\xc6\x27\xb3\xc7\xfd\x29\xf4\x58\x78\xea\xc5\xbe\x62\x33\xa6\xe1\x5b\xd0\x55\xfc\x74\x66\xc1\x9b\x86\x87\xe7\xa4\x5b\x08\xeb\x41\xde\x95\x5e\x37\x5e\xf6\x56\xa3\x90\xa9\xfa\x2a\x59\x5c\xa0\xda\x0d\xe2\x3a\xd9\x6d\xfc\x81\xf0\x8e\x17\xff\x64\x06\x15\x13\x26\xa4\x4d\xfc\x55\x4c\x46\xad\x57\x88\xed\x4f\x51\x47\x53\x2b\x04\xff\xf7\x07\xf1\xf7\x0d\x04\x83\xae\xc0\xed\x06\xfc\x92\xd7\x98\x81\x96\x7a\x39\x28\x6f\xda\x2f\x2f\x97\x8f\x33\x26\xc4\xde\xba\xc0\x8a\x6d\xb3\x75\xe1\xbb\xd1\x69\x31\x54\x8f\xa2\xc3\x7c\xa8\x16\xd0\x38\x1a\x6b\xba\xa2\xa8\x35\x97\xb2\xaf\x33\xcc\x32\xdf\x20\xb0\x1a\xdd\xaa\x81\x61\x3e\x79\x1a\x8c\xe1\xbd\xde\xf9\x50\x41\x05\xe4\x7c\x10\x75\x88\x58\x19\x7e\x2f\x64\x65\x38\xe0\x59\x19\xd6\xd5\x29\x85\x26\xa6\x87\x6e\xf5\x4b\x71\x1a\x4d\xa1\x53\x75\xda\xfd\x7a\x3b\x06\x54\x7a\x30\x47\x6c\xd0\x1c\xf1\xc3\xa9\x36\xc8\x85\x33\xc2\x22\x71\x12\x1d\x0f\x2c\x12\x1d\x10\x2f\x6d\x97\x1f\x09\x66\x8e\xe2\xfb\xee\x0a\xc4\xcb\x89\x06\x32\xb1\x2e\x39\x33\x1e\x8b\x2b\xba\x21\x65\x80\x5f\xf4\x9b\xb0\x03\x7e\x01\xfc\x62\x93\xf9\x45\x37\xb4\xcd\x24\xea\xd0\xfe\xf5\xa2\x34\x81\xf6\xe1\x7c\x76\xd4\xdf\x08\xdd\x13\xde\x4c\xf1\xfb\x6f\x0b\x86\xf1\x1b\xc3\x81\x63\xd5\x9a\x51\x9e\xdc\x39\xcd\xe6\x82\x00\xff\xeb\x61\xfc\xe9\xc1\x60\xbd\x6c\xf1\xcc\xdf\x39\x56\xc2\x65\x76\xa1\x4d\xc1\x9c\xbb\x6f\xc6\x2d\xb4\xc1\x6b\x20\x78\xf3\x16\x82\x37\xe7\x92\x67\xd4\x08\x1e\x92\x67\x7e\xf8\x4d\x8e\xe2\x0d\x41\x10\xaa\x09\xa1\x9a\x10\xaa\x09\xa1\x9a\x10\xaa\x09\xa1\x9a\x10\xaa\x09\xa1\x9a\x10\xaa\x09\xa1\x9a\x10\xaa\x09\xa1\x9a\x10\xaa\x09\xa1\x9a\x10\xaa\x09\xa1\x9a\x10\xaa\x79\xfb\x84\x6a\x7e\x79\x18\x8d\x89\x24\x72\x8e\x4b\x6d\xb5\x4c\x1a\xce\x0e\xd6\x1c\xc3\xa4\x3a\xf1\xe3\x34\x7f\x68\x18\xff\xc1\x20\x42\xf2\xf6\x6b\x2b\x85\xcc\xcb\xf2\x3c\x8a\x30\x47\x34\x3d\x29\x49\x97\x94\x1b\x86\xa9\x2b\xd3\x0b\xb3\x17\xa9\xde\x1e\x98\x38\x1a\x68\x44\x5c\xeb\x93\xeb\x27\x53\xaa\x02\x55\x6f\x28\xa2\x5d\x87\xca\xce\xe7\xee\xe7\xaf\x5b\x10\x5f\x70\xa5\x20\xab\x06\xf0\x71\x3d\x28\xff\xa6\x40\xf9\x4f\x27\x83\xc7\x09\xbc\x4f\xb2\xc5\xe8\x10\xf3\x5c\x17\x45\xeb\x47\x79\x3e\x10\x48\x20\x90\x40\x20\x81\x40\x02\x81\x04\x02\x09\x04\x12\x08\x24\x10\x48\x20\x90\x40\x20\x81\x40\x02\x81\xec\x2e\x81\x3c\x80\x26\xf0\xbe\x6c\xde\x27\x90\xf7\x46\xe2\xc3\xc4\xce\xad\xd1\xb3\x0a\xa8\x22\x50\x45\xa0\x8a\x7d\x4c\x15\xbf\x38\x84\xee\x8f\xfa\x29\x3a\x44\xb3\x89\xeb\xe0\x9f\x1a\xc2\x9f\x08\x39\x23\xda\xad\x39\x23\x2e\xf0\xa7\xdb\xe4\x88\xf8\x44\xc8\x11\x51\x14\x3c\x67\x38\xee\x59\x6a\x4f\x55\x2a\xbe\x63\x62\xfb\x4e\x39\xbe\x8d\x0e\xa7\x6d\xc9\x15\xf1\xc9\x64\x22\xf8\x04\xde\x5d\x1f\x53\x29\x3a\x02\x20\x20\x40\x40\x80\x80\x00\x01\x01\x02\x02\x04\x04\x08\x08\x10\x10\x20\x20\x40\x40\x80\x80\x00\x01\x01\x02\x6e\x26\x04\x04\x9c\x07\x38\x0f\x70\x5e\x1f\xe3\xbc\x9f\x7b\x1e\x4d\x0b\x27\x41\x55\x67\xaa\xac\x41\x4d\x9b\x94\x0d\x9e\x78\x82\x3d\x13\xb8\x0c\x56\x6b\xae\xca\x34\x94\x9b\x64\x71\x99\xd2\x1b\x91\x3d\x9f\x83\xbf\x75\x0d\xff\xff\x87\xd0\x77\xc5\x16\x72\x6d\xa5\x90\x99\x14\x2a\xa4\x2d\x17\x3f\x4f\x3a\x7a\x2e\x84\x17\x64\xe1\x57\x45\xe1\xd3\xe1\xc2\x73\x87\xd9\xb3\x53\x71\x25\x5f\x29\xac\xf5\x60\x8f\x73\xbe\x22\x40\x30\x80\x60\x45\x80\x60\x00\xc1\x00\x82\x01\x04\xeb\x1b\x08\x56\xec\x19\x08\xd6\xf6\x9a\x6c\x18\x82\x15\x01\x82\x01\x04\x03\x08\x06\x10\x0c\x20\x58\xf7\x21\x58\xb1\xaf\x99\x55\x11\x98\x55\xe7\x98\x55\xb1\xd7\x99\x55\x71\x0b\x32\xab\xd2\x3f\x4b\x21\x22\xfc\xa0\x9e\x43\xcf\x70\x3f\xa8\x2b\xe8\x29\x74\xb9\x79\xa6\xc3\x58\xb2\xb5\x52\xc8\xaf\x05\x8f\xe6\x0c\x27\x39\x86\xf2\x91\x78\xaf\xa9\xed\x78\x9b\x70\x98\x7a\x31\xd9\x61\xea\x69\xbc\x10\x93\xaa\x6d\x2d\x18\x27\xdd\xaa\xd6\xaa\x3c\xca\xfe\x2e\x5a\x03\xc6\x3d\x21\x96\x34\x45\x5d\x9b\xbb\x1d\x15\xb7\x6d\x26\x79\x13\xa0\x4c\x47\x8b\xe8\xf9\xba\x6c\xb6\xf3\xe8\x62\x7b\x3b\x1c\x72\xdc\x42\x42\xef\x0d\x26\xf4\xfe\xf5\x34\x52\x85\x40\x7a\x2b\x7a\x33\x17\x48\x97\x51\xdb\xc7\x27\x5a\x12\x19\xbf\xaf\xa1\x67\x83\x8c\xdf\x9d\x78\x4f\x59\x9c\x72\xf6\x3c\x7a\x2e\x74\xca\x59\x27\x5e\xb4\x71\xe9\x6a\xd1\xd6\xa5\x6b\x9b\x24\x6a\xe4\xf8\xd4\x9f\x1a\x5b\x43\xba\x16\xe4\x41\x79\xa1\x53\x07\xe9\xd2\xda\x92\x76\x5a\x3c\xd2\x44\xd2\x4e\xfb\x05\x75\x51\xe6\x76\xe6\x48\x56\xa0\xdd\x40\xbb\x81\x76\x03\xed\xee\x1f\xda\x0d\x5a\x65\x82\x56\xd9\x3b\xe6\x00\x38\x0e\xbb\x2b\xc7\x61\x83\xd5\x05\xac\x2e\x60\x75\x01\xab\x0b\x58\x5d\xfa\xda\xea\xf2\xa5\x34\x7a\x2d\x8d\x5f\x4d\x67\x5e\xf1\x57\xdb\x8f\xa7\x67\x42\x51\xc0\x56\x85\xa8\x0e\xf1\xa7\xfe\x7c\xfd\x19\xf0\x11\x9f\x2b\xaf\xbb\x83\x30\x62\xd6\xe1\x85\xfc\xe1\xbc\xb2\x20\xe4\x88\x1b\x3e\xb7\x3e\x58\x45\x88\x3c\x1c\xdf\xf3\x01\xb3\x6b\x64\x7c\x49\xad\x78\xda\x7f\x56\x5c\xcd\x86\xce\xed\xf7\x5e\xa5\xea\xdc\x84\x41\xc7\x05\xd1\xd6\x03\xd5\x52\x14\x3e\xe4\x28\xa1\xe3\xfc\x59\x4b\xe5\x95\x33\x06\x17\x49\xa1\x8a\x53\xbb\xf1\xcb\x02\xb3\x91\x2b\xd4\x7d\xae\xfd\x51\x77\x39\x9f\xdd\x25\xea\x33\xe3\x9f\x97\x1f\x71\x26\x7b\xcf\x20\x7a\xd7\x20\x7e\xc7\x60\xe6\x3b\xbe\x47\xe1\x57\x06\xae\x4a\x39\xc8\x86\xe8\x32\xbd\x59\x7f\xb8\x3f\xeb\x7b\x5f\x51\x23\xf6\x12\xb5\xab\xac\x2d\x62\x6b\x7a\xa9\xee\xe5\xcd\x2b\xca\xd5\x1a\x4f\x27\x91\xa7\xfe\x1b\x4c\x5f\xd0\x0c\x3d\x50\xac\xf9\xda\x28\xce\x48\xf2\x5a\x97\x2d\x4e\x72\xe5\xf0\x56\xbf\x7c\xa8\x19\x3d\xb9\xe9\xdb\x6a\x7c\x3f\xc2\xe8\xcb\xf2\x8a\x40\x57\x5c\x16\x87\xb5\xd5\x21\xf1\x0d\x43\x6c\xcf\xc3\xff\x8a\x0e\x0c\xe7\x98\x32\x54\x54\xb5\x1b\x65\x9b\xd6\x4c\x9d\xdd\xc5\x1d\xf8\xf8\x4d\x75\x0d\x27\x94\x15\xa9\x01\x45\x0b\xf1\xbe\x60\xd1\x2f\xe9\x98\x32\x74\x96\xda\x24\x54\xac\xa2\xa9\x8e\xa6\xea\xec\xeb\x65\xfb\x08\x97\x4d\x5e\x9e\x23\xd4\xe9\x86\x02\x97\xfc\x32\xf2\xd9\x7b\xac\xfa\x71\x13\xd6\x6d\xc0\xa8\x09\x46\xcd\x3e\x35\x6a\x96\xca\x68\x4e\xe0\xf8\x33\x68\x9a\xe3\xf8\x13\xe8\x18\x3a\xba\x01\x78\xb9\xe0\xaa\x6e\xcd\x49\x04\xd5\xb9\x78\x50\x7d\x2f\xbe\x47\xcc\xe6\x40\xd2\x6e\x22\xb4\x6e\xff\x91\x65\x1f\x7a\x1e\x1d\x96\xb1\x03\x96\x41\x5e\x74\x89\xc9\x07\x58\x28\x66\x40\xab\x39\x2e\xad\x7a\xe3\x29\xd4\xfc\xf8\xab\xd7\xf0\x5f\xef\x41\xbb\x22\x0f\x5e\x5b\x29\x64\x0e\xad\x1d\x27\x30\xcd\x0b\xf4\xa8\xe1\x8c\x5f\x60\x6e\x2f\x8f\x11\x08\x97\x76\xa5\xd0\xec\x66\x88\x0b\x80\xb8\x00\x88\x0b\x00\x4b\x09\x58\x4a\xc0\x52\x02\x96\x92\x9e\xb1\x94\xf4\x8e\x21\x00\x08\x35\x10\x6a\x20\xd4\x40\xa8\x81\x50\xf7\x35\xa1\x06\x84\x06\x08\xad\x4f\x11\xda\x96\x8c\x0b\xf8\x57\x29\xb4\x22\xb8\x1f\x45\x55\xce\xfd\xca\x88\x20\xad\x19\xf7\x0b\x50\xd1\x98\xbf\x5f\x0f\x10\x60\x14\x69\xad\x14\xf2\xcd\x68\x52\x5b\x02\x05\x2a\xc9\x54\x70\x16\x9f\x93\xa7\x2a\x35\xa9\x48\xdc\x91\xef\x71\x60\x0e\x65\xff\x0e\xc5\x90\xb7\xc7\xfc\xa0\x80\xa6\x90\x6d\x4c\x06\x04\x74\x19\xb3\x09\x2a\x56\x43\x0e\x7a\xa1\x2e\x08\x40\x45\xd7\x3a\xdc\xbb\xe0\xbf\x05\x51\x01\x1b\x8c\x0a\xf8\x67\x03\xc8\x16\xe2\xe8\x06\x32\xb8\x38\xd2\x50\xe7\x07\x2c\xba\x29\xc2\x04\x2c\x64\x06\x61\x02\x5d\x79\xf1\x8b\x22\x6e\xe0\x05\x44\x43\x71\x03\x5d\x79\xf3\xad\x06\x12\xb4\x26\x7d\xd7\x12\xa9\xeb\x90\xcc\xb9\xf7\x8d\xc5\x48\xdf\xb1\xd8\xa0\x81\xa6\x92\xf8\xb0\x0c\x18\xa8\x93\xc4\xfe\xc3\x5d\x92\xc9\x10\x24\x00\xe8\x1b\xd0\x37\xa0\x6f\x40\xdf\x10\x24\x00\x41\x02\x10\x24\x00\x41\x02\x60\x82\x01\x13\x0c\x98\x60\xc0\x04\x03\x26\x18\x08\x12\x80\x20\x01\x08\x12\x80\x20\x01\xb0\x70\x82\x85\xb3\x07\x2d\x9c\x3d\x1d\x24\xd0\x55\x20\xdd\xfe\xc0\x80\xff\x35\x8a\x4e\x6e\x30\x30\x60\xfc\x7b\x4c\xb5\x4a\xde\x8e\x3f\x3b\x8a\xbf\x36\x10\xc3\xc9\xf7\xda\x44\xd5\xeb\xb6\xab\xcd\x83\x02\xd8\xcd\x9b\x62\xad\x2c\xbd\x37\xd5\xfc\x10\xcf\x4d\xb0\x0a\x79\x83\xf0\xe1\xf8\x41\x78\x07\x1e\x2c\x93\x6e\x1b\x42\x32\xef\x8b\x33\x43\xe7\x6d\x62\x55\x54\x8d\xb4\xda\xc7\xde\xfd\x60\x94\x06\x5e\x08\x46\xe9\x44\xa3\xf4\xbb\xd3\xfd\x65\x94\x4e\x5a\x95\x9b\x0b\x44\xab\xd6\xb2\x40\x6c\xd5\xf1\x26\x41\x38\xe6\x7e\x65\x57\x9c\x5f\x8e\xdc\x3c\xac\xe5\x97\x13\x6f\x0d\xde\xca\x36\x60\x90\x70\x09\x12\x0e\xec\x10\x5d\xb1\x43\x00\x80\x02\x00\x05\x00\xaa\x53\x00\xaa\xf4\xf9\x54\x9b\x77\xe1\x97\x85\x13\xda\x79\x34\x1b\x72\x42\xeb\xf0\xce\x5e\x89\xd7\x21\x76\xe2\xed\xa2\x99\xba\xbc\xaf\x2a\xfe\xfe\x5d\x31\x6a\xc4\x61\x4b\xb5\x5d\x83\x3b\xed\x08\xdc\xdf\xea\x06\x6b\xd4\x52\x5d\x6d\xb9\xeb\xba\xc5\x1b\xeb\x0b\x1a\xe3\xf5\xd8\xcb\xfe\x9c\x7c\x28\x7c\xb1\x4a\xec\x32\x09\x5f\xdd\x13\xbe\xca\xb3\x26\x90\xb2\xa1\x8d\x35\xdc\x17\x29\x85\xfd\xbd\x2a\xaf\xb2\x5a\x16\x2f\xa2\x39\x54\xaa\xdb\xda\x4d\xa2\x23\x1b\x18\x46\xf3\x3c\xca\x1b\x34\x9a\x04\x8d\xe6\x33\x03\xe8\xd3\x03\xf8\x53\x03\x99\x4f\xf8\xcb\xc2\x4b\x03\xfd\xb3\x67\xab\xb3\xca\xb2\x76\xe6\xde\x6e\xdc\x73\x80\x0d\xce\x20\xdd\xc3\x70\x93\x81\x3b\xc2\x57\x37\xe1\x1c\xa9\x56\xf8\x93\x4c\x8d\x13\x4f\xf3\x3b\xb8\x4a\xe5\x28\xc3\x25\x87\x9a\xf3\x22\xef\xc1\x05\x36\x2d\xe4\xdf\x0b\xde\x64\x09\x7e\x1c\x69\xbe\x99\x2c\xbe\x96\x42\xaf\xa6\xf0\x2b\xa9\xcc\x17\x7c\xc6\xfb\xc9\xd4\x59\x6a\x6b\x5c\xdb\x2b\x53\xde\xec\x54\xc9\x2e\xb1\x9f\xb2\xca\x54\xe4\x2b\x38\x1c\x17\x8a\x59\xcd\x09\xdc\xdc\xc6\x54\x8d\x7f\x38\x77\x1e\xac\x18\x9a\x5c\x78\x49\x45\x77\x14\x7a\x53\xb6\xa9\x70\xae\xb3\x08\xb5\x2a\x24\xaf\x88\x37\xf2\x44\x15\x5e\x5f\x72\x48\x1d\xdb\x00\xfe\xeb\xb3\xdb\x78\xb5\x22\x11\x24\xef\x4d\x6d\xca\xee\x38\x69\x81\x79\x34\x7e\x81\xd9\x81\xef\xe0\xdf\xd4\x6d\x5e\x7c\x06\x4d\xe3\xa9\xec\xa9\x1d\x29\x3c\x68\xa9\xee\x72\xe6\x71\x3e\x25\xa5\x7c\x68\x56\x44\x76\x90\xdd\x95\x93\xa3\xe7\x23\x1d\x38\x7b\x1a\xff\xda\x08\xda\x27\xb0\xb3\x49\xdc\x9b\xd4\x66\xd3\x3e\xc4\x9c\x0d\xb3\x6c\x13\xc7\xd1\x2a\xaa\xe3\x10\x1f\x34\xff\xc0\x08\x7e\xe7\x00\xba\x3b\x78\x82\x2d\x96\x8f\xc6\x50\xe6\x59\xf1\xf8\x34\x7b\x3c\xf7\x10\xbb\xe1\xa2\xff\xcc\x95\x42\xf8\x6a\x9b\x51\xb2\x8a\xce\x8a\x31\x79\x0a\x9d\xe0\x63\xf2\x30\x3a\x88\xf6\x37\xcd\xc5\x1f\xfa\xf6\x95\x42\x3e\x5c\xaf\xc4\x71\xf6\x6c\xf2\x30\x9a\xc4\x47\xe4\x30\x6a\x68\x63\x39\x86\x22\x6f\x0c\x8f\x9b\x04\xf8\x9c\xf9\xfd\x1d\xf5\xdd\x90\x8d\x67\xc1\x91\x9e\x78\x54\xde\xd3\x9d\xce\x28\xce\xa2\x73\xe8\x4c\x9d\x56\xb0\xb1\xde\x00\x85\x00\x20\xee\x06\x21\xee\xcb\xa9\xb6\x89\x84\xf3\x02\xcc\xce\xa0\x62\x00\x66\x6f\x3f\xf9\x62\xd5\x5c\x94\xfb\xc8\xae\x7a\xf9\xf2\x80\x87\x56\xcd\xa8\x4c\x79\x44\xfc\xde\x25\x91\x02\x00\x15\x00\x2a\x00\x54\x00\xa8\x00\x50\x01\xa0\xf6\x27\x40\xed\xb0\x5e\x90\xc8\x67\x8b\x2f\xdd\x55\xaf\x1a\xec\x4d\xc0\xa5\x11\x7d\xe1\x61\xbe\x0d\xef\xbc\xba\x00\x4c\xb4\xdf\x94\x14\x60\xa2\xc0\x44\x37\x99\x89\xde\x36\xf8\x29\x89\xa2\x96\x8e\xa2\xc3\xf8\x60\x76\xbf\xcf\x35\xbf\x2b\xcc\x35\xc3\xe5\x76\x81\x65\x7e\x6b\x04\xcd\x09\x96\x69\x2f\xaa\x5a\xde\x6b\xac\x70\x02\xf0\xf1\x95\x82\x70\x9e\x1e\x67\xb5\x71\x2c\x55\xf3\xa8\x26\xff\xfb\xed\xe3\x36\xad\x04\x9c\xf3\xe3\x23\xf8\x3b\x83\xe8\x3e\x56\xda\x54\xb8\x30\xb6\xd8\xfd\x44\x4a\x3a\x61\x07\x59\x9d\xd8\x8c\x17\xfb\x02\x2f\xf5\xf6\x65\xca\xc6\x95\x1e\xd2\xdc\x3d\x95\x7d\x88\x3f\x3d\x14\x24\x35\x88\x44\xa9\x53\x8b\xf8\xfb\x29\xc7\x25\xaa\x3e\x1a\x44\xf3\xf1\x3d\x88\x8c\xfd\x32\x5c\x52\x0d\x04\xd7\x50\x24\x00\x37\x54\x76\x3e\xb7\x87\xbf\xee\x72\xfd\x87\x5c\x29\x5c\xf4\x3e\x5d\x67\x75\xed\xf1\x2c\xdf\xa5\x1b\x68\x5e\xcc\xab\x59\x74\x8e\xcf\xab\x29\x74\x0a\x9d\xd8\xc0\x52\xca\xb3\x72\x9d\x59\x21\x66\x72\x1a\xaa\xe6\x53\xe0\xa6\x30\x24\xac\x6b\x0a\x36\x1d\x99\x72\x2a\xb2\x5e\x88\x5a\x0e\x20\xb3\x39\x64\x36\x2f\x41\x7a\x17\x48\xef\x02\xe9\x5d\x20\xbd\x4b\xdf\xa4\x77\x29\xf5\x4c\xf6\x92\xb6\xd7\x64\xc3\x69\x35\x4a\x90\x56\x03\xd2\x6a\x40\x5a\x0d\x48\xab\x01\x69\x35\xba\x9f\x56\xa3\x54\x40\xe3\x78\x2c\xbb\xd7\x27\x1d\xbb\xc2\xa4\x83\x6d\xdb\x1a\x09\xc7\xf3\xe8\x39\xfc\x4c\xf6\xad\xfe\x23\x93\xb2\xb2\x02\x3d\x32\x9d\xa8\xe6\x2e\x2b\x8e\x46\x2d\x32\xaa\x38\x35\x6d\x99\x75\x18\x57\x7a\x89\x5a\x15\x06\x1d\xcb\xa6\xbc\x71\xb2\x3b\x7d\x48\xd1\x49\x86\x52\xea\xeb\xec\x06\x25\xc8\x6e\xd0\xb9\xec\x06\xa5\x9e\xcf\x6e\xb0\x05\xf3\xb7\xe3\x7f\x3b\x84\x1e\x61\x5d\x37\xbe\x52\x18\xf7\x6c\x39\xa6\xbb\x42\x2b\xb5\x2a\xd1\x2a\xaa\x51\x75\xf0\x3b\x87\xf0\xb7\x07\xd0\x76\x8d\xda\xe4\xda\x4a\x21\xb3\x7f\xed\xd3\x02\xe7\xfd\x42\xae\xf0\x42\xa6\x59\x21\xb9\x71\xf6\xd0\x34\xb5\xc9\x95\x42\xec\x0d\x67\xa9\x3d\x55\xa9\xf8\x30\xb1\x7d\xf6\xbb\x0e\x81\xc4\x65\x74\x41\x80\xc4\xb3\x68\x86\x83\xc4\x93\xe8\x38\x9a\x6c\x0a\xe8\x59\xdb\x71\x0b\x5c\xdc\xb7\xb7\x94\xcc\xfe\x6a\x32\x24\x3c\x80\x27\x24\x24\xcc\x66\x25\x0d\x8c\x7d\x5f\x94\xd0\x27\x64\xc9\x07\x7c\x08\xf8\x10\xf0\x21\xe0\x43\xc0\x87\x80\x0f\x01\x1f\x02\x3e\x04\x7c\x08\xf8\x10\xf0\x21\xe0\x43\xc0\x87\x5d\xc6\x87\x40\xea\x80\xd4\x01\xa9\xeb\x63\x52\xf7\x7f\x8d\xa0\xc3\x1e\xa9\x5b\xd3\x05\x91\xb7\xa1\xcd\xbf\xc0\x73\x44\xfc\xe7\x23\xf8\x0f\x07\x03\x84\xf7\xd9\x56\x7c\x0f\xe7\x58\x31\x97\xd9\x0d\x3d\xe1\x81\xf8\x28\x7f\x9d\x40\x88\x81\xd3\x61\x50\xc9\x5e\x27\x86\x3d\xe9\x7a\x38\x97\x4c\x15\x47\xf0\x50\x3d\x55\x0c\x1a\x1d\x3c\x0d\x01\x15\x02\x2a\x04\x54\x08\xa8\x10\x50\x21\xa0\x42\x40\x85\x80\x0a\x01\x15\x02\x2a\x04\x54\x08\xa8\x70\x33\x51\xe1\x61\x74\x10\xef\xcf\x16\x7c\xb7\xc1\x07\xc2\x9e\x86\xc1\xe6\x0d\xfc\x0d\x81\x62\x02\xc5\x04\x8a\xd9\x55\x8a\xf9\x7f\x0c\xa3\xfd\xf2\x98\xa2\x9a\x4b\x1d\x4d\xad\x18\x66\x39\x40\x9a\x1c\x60\x51\xd3\x55\x2b\x16\xd5\xbd\x3b\x88\xed\xe0\x7f\x18\xc2\xdf\x1a\x44\xaf\x0b\x3d\x74\x6d\xa5\x90\x79\x49\x82\x4c\xc3\xd4\x8d\x15\x43\xaf\xa9\x95\x08\xd3\xf4\x55\xdc\x27\xfd\x62\xe7\xa9\x3e\xe5\x17\xdb\x16\xb0\x99\xcf\x09\xcc\x37\x15\x54\xed\x4a\xa1\xc9\x0b\xe7\x0c\xc7\xdd\x6a\xae\x8e\x9b\x01\x2e\x77\xc7\x83\xcb\xbb\x30\xe2\x15\xe3\x9e\x89\xd7\x17\x93\xe1\xe5\x29\x7c\xc2\x4b\xc0\x1a\xf4\x8e\xa4\x98\x4d\xba\x08\x90\x26\x20\x4d\x40\x9a\x80\x34\x01\x69\x02\xd2\x04\xa4\x09\x48\x13\x90\x26\x20\x4d\x40\x9a\x80\x34\x01\x69\x82\xf7\x23\x70\x43\xe0\x86\xc0\x0d\x37\x85\x1b\x7e\x65\x18\x1d\xf2\x8e\x37\xb7\x9c\xe4\x34\x8c\x84\xa3\x2e\x36\x81\xf1\x0f\x0e\xe3\x97\x07\xd1\x76\xf6\xdc\xb5\x95\x42\x66\xa5\x35\x64\x78\x59\x94\xb0\x40\xdc\x36\x51\xc2\xdd\x82\x12\x5a\x96\x13\xc9\x9a\xe8\xbf\x66\xce\x70\x5c\x60\x81\x8d\x2c\xf0\x62\x32\xe6\xdb\x8b\x47\xfc\x73\x96\x2c\xc7\xcb\x84\xe8\xb7\x6c\x34\xe0\xb9\x15\xb6\x08\xdc\x0f\xb8\x1f\x70\x3f\xe0\x7e\xc0\xfd\x80\xfb\x01\xf7\x03\xee\x07\xdc\x0f\xb8\x1f\x70\x3f\xe0\x7e\xc0\xfd\xba\xcc\xfd\xc0\x23\x11\xc8\x22\x90\x45\x20\x8b\x1d\x24\x8b\xbf\x99\x46\x77\x0b\xb2\x28\xce\x8a\x1b\xc7\xbf\x90\xc6\x9f\x4f\xa3\x3b\xc4\x3f\x33\x99\x32\x3f\x36\x6f\x89\xda\x55\xbf\xdd\x54\x85\xf3\xa6\xdc\x3d\x65\xe2\x8a\xf3\xe1\xa6\xe6\x67\xcf\xb1\x9f\xda\x77\xf0\x58\x43\x41\x93\x0d\x05\x4d\xb6\x52\x50\xc9\x40\x17\x05\xb4\x3b\x87\xce\x70\x68\x77\x0a\x9d\x40\xc7\x36\x00\xed\xbc\x6f\x4c\x42\x76\xf8\xbd\x23\xe8\x58\xf3\x43\xc1\x17\x89\xab\x7a\xf8\x36\xfe\x7c\xf0\x5f\x1f\xc6\xef\xde\x86\x70\xe4\x74\x38\xfe\x54\xe6\xa7\x5b\x89\x5c\x0f\x1f\x1b\xd4\x13\xb1\xeb\xd9\x9b\xf5\x67\xd5\xf1\xaf\xe9\xc8\x81\x75\xb7\x11\xf9\x4d\x0c\x5f\x5f\x97\x07\xe8\x7a\x0f\xaf\xe2\x5d\x04\x24\x18\x48\x30\x90\x60\x20\xc1\x40\x82\x81\x04\x03\x09\x06\x12\x0c\x24\x18\x48\x30\x90\x60\x20\xc1\x40\x82\xbb\x4d\x82\x7b\xe8\xa0\x60\x40\xbc\x80\x78\x01\xf1\x6e\x21\xc4\xfb\xfd\x6f\x44\xb3\x11\xc4\xbb\x52\x68\xe6\x38\x6a\x51\x5d\x37\x1c\xbb\xc6\xb7\xf3\x8b\x35\xbd\x4c\x5c\x0f\x4b\x8e\x3b\xae\xea\xd6\x1c\xfc\x85\x0c\xfe\x91\x01\xb4\x53\x14\x75\x6d\xa5\x90\xd9\x67\x13\x55\x57\xc4\x55\xaf\xc9\x83\xa5\x7b\x9e\xea\x33\x7e\x89\x45\x5e\x62\x6e\x9c\x3d\x21\xc0\x71\xd8\x19\x34\xe6\xd6\x05\x5e\x6a\xfb\xa8\x32\x87\x79\x7a\x53\xf4\x86\x66\x05\xe5\x2b\xa2\xd3\x9c\xf2\x4d\xa2\x23\xe8\x50\xd3\x63\x6d\x44\x13\xf0\x83\x6d\x1a\x6b\xee\x51\xbc\x87\xe3\x29\xde\x1d\x78\xb0\x4c\x5c\x74\xfd\x2d\xc9\x0c\xef\x10\x3e\x20\x19\x9e\x78\xa1\x77\xb8\x4d\xe3\x3b\x23\x9e\x9e\x99\x77\xec\x0c\xf7\xd2\x7e\x9b\x58\x15\x55\x23\xeb\xea\xa8\x09\xf9\xd0\xa6\xf5\x55\x71\x0e\x95\xd0\x93\x78\x70\x91\xea\xab\x19\xfe\x5f\x25\x95\xdb\x70\x9f\x14\x3f\x96\x46\x1f\x4d\xe3\x8f\xa4\x33\x1f\xf4\x65\xf7\xb7\x53\x57\xd9\x22\x28\x17\xee\x51\xee\x18\xad\xb1\xed\xb9\x10\xb6\x91\xcd\x62\x18\x20\x2e\x12\xc5\x3b\x30\x4a\xcf\x2b\x53\xa6\x62\x98\x02\xd3\x50\x5b\xa9\x99\x3e\xe9\xd1\x15\xdd\x5e\xbd\x5c\x33\x15\xdd\xb0\x09\x93\x30\xc4\xc7\x07\x4c\x0b\xe0\x6a\x96\xe4\x27\xde\x6e\x49\xee\x58\x95\xa5\x9a\xcd\x35\x72\xcb\xa6\x1a\x71\xb8\x3e\x24\x3b\x4d\xae\x96\x79\xe5\x0a\x7f\x23\xdf\xe9\x70\x05\x66\x52\x19\x53\xa6\x2a\x95\x49\xae\xed\xe8\xf6\xaa\x62\xd7\x4c\xd6\xe1\x4c\x24\x79\x2a\x9e\x2c\x8e\xe8\xd9\x3b\x44\xd5\x42\xf2\xb7\xf8\xa1\x34\xfa\xfe\x34\x7e\x7f\x3a\xf3\x5e\xbf\x81\xbe\x99\xe2\x0a\xf2\x05\xd5\x54\xcb\xc4\x16\x5b\x3d\x61\x7f\x76\x1c\xaa\x19\x5c\x7b\xf1\x37\x55\x2a\xdf\x95\x52\x5b\x61\xca\xad\xbb\xea\xab\x73\x55\xf5\x06\xab\xbf\xbb\x4c\x1c\xe2\x09\x49\x26\xb8\x3d\xdc\xc5\x89\xcf\x22\x51\xb8\x78\xe6\x1b\x28\x6a\x2b\x85\x89\x23\xec\x5e\x5b\xd5\x38\x97\xab\x50\xb3\x2c\x44\x22\xdf\xe4\x30\x7d\x54\x35\x4c\xa1\xa9\xf0\x4d\x44\x70\x2f\x67\x15\x92\x1a\xb2\xad\xaf\xb7\xcc\x95\x69\x45\x35\xcb\x79\x6a\x97\xc7\xad\x1b\xe5\xf1\x9a\x69\x68\x54\x27\xe3\xbb\x67\x9d\x79\x56\x4a\x3e\x7b\x57\xf8\x5b\xc3\x0b\xd3\xe7\x52\x6d\x94\x0d\xc2\x22\x55\xe0\x16\xa9\xed\xd3\x42\x75\xbf\xa5\xf2\x92\x4c\x09\xcd\x85\x90\x55\x6b\x59\x08\xb5\x26\x76\xa2\xa2\xaa\xf8\xc3\x77\x85\x85\xd0\x71\x4b\xb5\xd9\x7e\x9b\xed\x51\xf9\x12\xb7\x2e\x69\xb4\xcf\x62\x4b\xdb\xa6\xc8\xa2\x37\xd6\x17\x34\xc6\xeb\xb2\x97\x1b\x26\x1f\x0a\x5f\xac\x12\xbb\x4c\xc2\x57\xf7\x84\xaf\x32\x5d\xd8\x25\x65\x43\x1b\x6b\xb8\x2f\x52\x0a\xfb\x7b\x55\x5e\x65\xb5\x2c\x5e\x44\x73\xa8\x54\x27\x07\x27\xd1\x91\x0d\x58\xa0\xe6\x39\xe7\x07\x49\x98\x20\x09\x3f\x33\x80\x3e\x3d\x80\x3f\x35\x90\xf9\x84\x6f\xf8\x7a\x69\xa0\x7f\x24\x61\x1d\x1a\x61\xed\xcc\x51\x3f\xc7\x77\x6c\x70\x06\x06\xbf\xe1\x26\x03\x77\x84\x33\x6f\x61\x19\x52\x2b\xfc\x49\x93\x9a\x63\xe2\x69\x7e\x07\xb7\x0c\x3a\xca\x70\xc9\xa1\xe6\xbc\xb0\x7c\x5d\x60\xd3\x42\xfe\xbd\xe0\x4d\x96\xe0\xc7\x91\xe6\x22\xba\xf8\x5a\x0a\xbd\x9a\xc2\xaf\xa4\x32\x5f\xf0\x55\xf3\x4f\xa6\xce\x52\xb6\xd3\x34\x1c\xa5\x4c\x79\xb3\x53\x25\xbb\xc4\x7e\xca\x2a\x53\x91\xaf\xe0\x9b\xae\x2a\x51\x4d\x47\xa9\x39\x01\xe3\x1f\x53\x35\xfe\xe1\xdc\x72\x52\x31\x34\x41\xf7\x39\xcc\x55\xe8\x4d\xd9\xa6\xc2\xb2\x60\x11\x6a\x55\x48\x5e\x11\x6f\xe4\xa6\x4a\xaf\x2f\xf9\xe6\x27\xb6\x01\xfc\xd7\x67\xb7\xf1\x6a\x45\xcc\x6e\xed\x54\x47\x13\x97\x88\xce\x29\xa2\x6b\x18\xb2\x79\x23\xa0\xd2\x69\x74\x12\x1f\xcf\x4e\xfa\x70\xe3\xd1\x30\xdc\x88\x29\xfe\x76\x48\xdd\x87\xdf\x3b\x8c\xf2\x75\xbb\xb2\xb0\x6b\x88\x45\x75\x87\x68\x35\xdb\x70\x57\xf9\x75\x83\x38\xf8\xab\x43\xf8\xb5\x41\xf4\x3a\x7f\x3d\x15\x4e\x21\x6f\x6f\x2d\xa2\x6f\x9e\xea\x0b\xb2\x40\xb1\x84\xb6\x29\xb0\x4f\x3c\xe0\xad\xca\xbc\x4a\x0d\xaf\xda\x1a\xc1\x7d\x4d\x77\x85\x6d\xf7\xfd\xf0\x26\xdd\x33\xc9\x93\xee\x28\x3e\xdc\x64\xd2\x45\x9b\xb8\xd1\x77\x03\x42\xfd\xc0\xc1\x03\x1c\x3c\xc0\xc1\x03\x1c\x3c\xc0\xc1\x03\x1c\x3c\xc0\xc1\x03\x1c\x3c\xc0\xc1\x03\x1c\x3c\xc0\xc1\x03\x1c\x3c\x7a\xd1\xc1\x03\xbc\x34\xc0\x4b\x03\xbc\x34\xfa\xd8\x4b\xe3\x5b\x43\xe8\x41\xef\x80\xd3\x7a\xef\x8c\xb7\xe3\x2f\x0f\xe1\xaf\xa4\x83\x23\x4c\x1f\xe2\x3e\x17\xd1\x0d\xa6\x6f\x10\xcb\xdd\xcb\xae\xd6\x9d\x15\xda\x66\x1f\x8a\x37\xa3\x13\x02\x8a\x1d\x42\x07\x38\x14\xcb\xa3\x51\x94\x6b\xca\xa6\x59\xbd\xf3\x2b\x85\xbc\x5f\x9d\x5b\x30\x59\x72\xbf\x89\xf3\xc9\xe4\x6c\x18\xef\xa9\x3f\xba\x33\x78\x7d\xc4\x53\xe2\x63\x3b\x82\x96\x7d\xd4\xf3\x93\x68\xd6\xb8\x0f\xc8\x1b\x3a\xda\xbe\xc5\xd3\xe8\x24\x3a\x5e\x67\xef\x5b\x57\x03\x83\x85\x0f\x7c\x1d\x36\xe8\xeb\xf0\x83\x29\x54\x14\x0e\x0a\xc7\xd0\xd1\xc0\x41\xa1\x5d\x33\xfc\x16\x25\x47\x8b\x3e\x0d\xed\x14\x10\xb9\xbf\x7c\x7d\x20\x20\xb0\x50\xba\x15\x35\x24\x13\xee\x17\xbf\x75\x56\x24\x3c\x85\x2e\xa3\xf9\xb0\x48\xc8\x15\xd1\xe9\x0d\x98\x21\x66\x78\x5d\x2f\x71\xa0\xeb\x80\x98\x48\x12\x13\xdf\x48\xa3\xaf\xa7\xf1\xd7\xd2\x99\xdf\xf5\x1b\xe8\xb3\xe9\xa7\xc2\x0a\x8c\xc1\xf4\x68\xae\x84\x28\x8b\x64\x49\x18\x33\xfc\x9d\x43\x40\xcc\xe5\x66\x8d\x57\x31\x24\x06\x4c\x6a\x8e\x99\xa4\xac\xf2\x86\x90\x3a\x4c\x58\x5c\x08\x86\xe0\x77\x81\x1c\x7d\x46\xb5\x4a\x74\x26\x90\x2a\xab\x81\x95\x21\x20\xcb\x46\x65\x54\xaa\x36\x5c\xb9\x53\xca\x36\x5b\xd2\x2c\x62\x1b\x54\xf7\x55\xb4\x60\x75\xe3\x06\x1c\xaf\x35\x6a\x0e\xab\x64\x58\x2d\x54\xd9\x93\xde\x07\xc9\x97\x2c\x89\xdd\xb4\x57\x44\x5e\xd4\x53\x58\xd0\x63\xea\x98\xc5\xbc\x0a\xf3\xbc\x06\x31\x2a\x5b\xf1\x4b\x69\xf4\x5a\x1a\xbf\x9a\xce\xbc\xe2\x37\xf3\xc7\xd3\x33\x21\x03\xa5\x55\x21\xaa\x43\x7c\xf4\x31\x6f\x53\x4b\x2d\xf3\x0e\x10\x06\xb0\x88\xf5\xc3\xfb\x96\xc0\xc2\xc9\x7a\xa9\x90\x3f\x9c\x57\x16\x44\x87\x88\xd6\xb1\x98\xc6\xce\x76\x3d\x1e\x45\x23\x0a\xb5\xad\x65\xd5\xf4\xac\x31\x76\x8d\x8c\x2f\xa9\x15\x8f\x3a\x67\xc5\xd5\xac\xb2\x64\x98\x6a\xc5\x78\x9b\x87\xaf\x16\x09\x53\x09\xf9\x1e\x61\x5c\xa8\x82\x7a\x80\x34\x45\xe1\x43\x4e\xf0\x90\xe0\xbc\x79\xe5\x8c\xc1\x87\x73\xa8\xe2\xd4\x6e\xfc\xb2\x60\xbf\xe5\x0a\xcc\xcc\x27\x1b\x75\x97\xf3\xd9\x5d\xa2\x3e\x33\xde\x87\x44\xcc\x3a\xc5\xf7\x0c\xa2\x77\x0d\xe2\x77\x0c\x66\xbe\xe3\xdb\xf6\xbe\x32\x70\x55\x72\x20\x36\xa5\xd8\x86\xa7\xac\xda\x8b\x6a\x99\x28\x1a\xad\x54\x08\x97\xa1\xc1\xbc\x20\xf6\x12\xb5\xab\xac\x2d\x62\x6b\x7a\xa9\xee\xe5\xcd\x2b\xca\xc7\xb3\x37\x18\x85\x21\x93\x8d\x53\x9d\x68\x86\x1e\x00\x5d\xce\x06\x85\x87\x85\xd7\xba\x0e\xcf\x3e\xc1\xaf\x7a\xf4\x2f\x1f\x6a\x46\x8f\x1b\xf9\x9b\x1c\xdf\xa2\x17\x7d\x59\x5e\x99\xd2\xd8\x7e\x88\xaf\x9d\x61\xe1\x30\x24\xbe\x61\x48\x19\x93\x1d\x1f\x1d\x18\xce\x31\x65\xa8\xa8\x6a\x37\xd8\x3a\x61\xea\xec\x2e\x6e\x4a\xe3\x37\xd5\x35\x9c\x80\xb5\x72\xe8\x47\x0b\xf1\xbe\x60\xd1\x2f\xe9\x98\x32\x74\x96\xda\x24\x54\xac\xa2\xa9\x8e\xa6\xea\xec\xeb\x65\xfb\x08\xe3\x29\x2f\xcf\x11\xd2\xab\xa1\xc0\x25\xbf\x8c\x7c\xf6\x1e\xab\x7e\xdc\x84\xd7\xf4\xcf\xa7\xd0\x9c\x58\x76\xcf\xa0\x69\xbe\xec\x9e\x40\xc7\xd0\xd1\x0d\x2c\x1f\xc2\xb5\x0e\x5d\x66\xa5\x4d\xe0\xf3\x68\x16\xed\x10\x6d\x4b\xf4\x5b\x2d\x33\x69\x4f\xd0\xce\x35\xfd\xba\x12\xaf\x3f\xec\xc4\xdb\x45\x9b\xa3\xe2\x7f\xb9\x33\x58\xf5\x87\x1b\x3c\x17\x9b\xed\x0f\xee\xe3\x4e\x2d\x1d\x53\x05\xc0\x13\xb1\xdf\x14\x10\xf0\x44\x04\x4f\xc4\x4d\xf6\x44\xec\x38\xed\x69\xab\x64\x4f\xf4\x37\x3c\x84\x0e\xe0\x89\xec\x3e\xdf\x5b\xf0\xfe\xb0\xbf\xa1\x5f\x68\xe7\x23\x29\xf1\x0f\x0f\xa3\x8c\xc7\xfc\x64\x42\x7f\xaa\x07\xd0\xef\xa5\x61\xfc\x2b\x83\xc1\x1a\xf4\x13\xad\x64\x7f\xba\x48\x75\xd2\x13\x59\x9f\x5e\x7f\x33\xb4\x0e\x52\xbd\x7d\x4b\x60\x3f\xa5\x78\x3a\x9b\x3c\x2f\x76\xe3\xc7\x1a\xe6\x05\xd5\x09\x1c\xe4\x09\x5e\x7e\xe0\xe5\x07\x5e\x7e\xe0\xe5\x07\x5e\x7e\xe0\xe5\x07\x5e\x7e\xe0\xe5\x07\x5e\x7e\xe0\xe5\x07\x5e\x7e\xe0\xe5\xb7\x99\x5e\x7e\x05\x34\x8e\xc7\xb2\x7b\x7d\xf2\xb0\x2b\x42\x1e\xa8\xde\x05\xe8\x00\x8e\x81\xe0\x18\x08\x8e\x81\x5b\xc8\x31\xf0\x2f\xd2\x02\x12\x3a\xe3\xba\xe1\x68\x94\xc9\xc9\x20\x9b\xfc\x38\xfe\x52\x1a\xbf\x96\x46\x77\xf9\xd7\xae\xad\x14\x32\x6f\x28\x13\x37\xba\x2c\x8b\xda\xe7\x32\x65\xe2\xce\x78\x77\x5e\x29\x4c\xcd\xcf\x7a\x9b\xc0\x36\x26\xcb\x68\x5b\xea\x7e\x0b\x3d\x2d\x90\xdc\x45\x34\xc7\x91\xdc\x59\x34\x83\x8a\x1b\x4b\xdd\xef\x7d\xe7\x9c\xe1\x24\x72\x39\xfc\xae\x11\x74\xa6\x79\x93\xaf\x79\xf4\x2a\x31\x75\x8b\x1a\xa6\xeb\x54\x0c\x8d\x38\xf8\xf3\xc3\xf8\xd7\x07\xeb\x7a\x67\xb5\xb5\x80\xed\x33\xb2\xa4\x05\x56\x52\x9b\x82\xb5\xf7\xf2\x07\x42\x23\x20\xc8\xa2\x12\x79\xdd\xd6\x08\xd8\xee\x3e\xb0\x6d\x25\xa4\xfa\xfa\xb3\xc9\xd0\x76\x12\x1f\x11\xa8\x36\xd2\xea\x71\x89\x74\xea\x07\x20\xb0\x5c\x60\xb9\xc0\x72\x81\xe5\x02\xcb\x05\x96\x0b\x2c\x17\x58\x2e\xb0\x5c\x60\xb9\xc0\x72\x81\xe5\x02\xcb\x85\xc3\x59\x01\xfd\x02\xfa\x05\xf4\x7b\x1b\xa1\xdf\x3f\xc9\xa1\x09\xc1\x21\x55\xb6\xad\x2b\x1b\xdc\x6b\x9f\xdd\x1d\xd0\x48\x09\x13\x8c\x50\xb0\xf8\xcb\x39\xfc\x63\x03\x08\xd7\x3d\x73\x6d\xa5\x90\x79\x38\x26\x6e\x7c\x6a\x7e\x76\x41\x14\x90\x7b\x94\x5d\x9e\x8a\x3e\xc6\x29\xb1\xbc\xa1\xcd\x41\xe4\xab\xe8\x39\x41\xf0\xae\xa2\xa7\x39\xc1\xbb\x84\x2e\xa0\xf3\xb1\x04\x8f\x95\x31\xa6\x96\xcb\x6c\x2c\xb8\xd4\x0e\x20\x5e\x7d\xc3\x08\xda\x2b\xeb\x7b\xab\x51\xe6\xcf\x27\x93\xbc\x13\xf8\x98\x24\x75\xf1\x5d\x14\x61\x7a\x82\xf9\x85\xea\x97\x79\x79\x67\x6c\x3f\x29\xf1\x51\xe8\xa1\xae\xca\xca\x3b\xba\xd6\x5b\x45\x15\x5d\x43\xcf\xd6\x05\x7e\xb4\xb7\xbb\x20\x16\x04\x62\xd6\x37\x18\xb3\xfe\xb5\x54\xf3\x78\xf3\x4e\x0b\x19\x4d\x04\xcb\x3f\x83\xde\x1a\x04\xcb\xb7\xfb\x25\x2d\x46\xbd\xb7\x26\xb0\xd6\x14\x49\x6b\x4b\xb3\xdc\x0f\xed\x8a\x15\x58\xf7\x79\x51\xf1\x66\x58\x48\x3d\x26\x7e\xed\x9e\x8c\x82\x18\x79\x88\x91\x87\x18\x79\x88\x91\x87\x18\x79\x88\x91\xef\xcf\x18\xf9\xc4\xb0\xf6\xce\x6f\x6a\x8a\x3f\x7a\x57\xac\x8e\x30\x92\x10\x43\x1f\x52\x1c\x44\x50\x6a\x57\xf4\x06\x08\xa8\xef\x37\x6d\x05\x02\xea\x21\xa0\x7e\x93\x03\xea\x57\x37\x6d\xb7\x9a\x1c\x7a\x2c\x82\xe4\x3b\xbf\x4c\x94\x0e\xa3\x83\x78\x7f\xb6\xe0\x1b\x50\x1e\x08\x3b\xc3\x07\x37\x76\x21\x0e\xff\xcf\x86\xd1\x29\xc1\x59\x85\x93\x5c\x80\x57\xc3\x47\xf2\x34\xf3\xf8\xe4\x8f\xe0\x4f\x0d\xe3\xcf\x0f\xa2\xd7\x89\x7f\xf9\x87\xf3\xbc\xd0\xa2\xaf\x27\x7b\xaa\x4d\x3e\x9e\x4f\xdc\xf4\xfd\x19\x1d\x79\x20\x4f\xc8\xcb\x93\xfd\x0c\xde\x9d\xb7\xe0\xdd\x79\x35\x79\x5e\x1c\xc0\x13\x72\x5e\x44\x86\x93\x9c\x02\xa2\x2e\x0d\xc7\xf5\x80\x5f\x27\xf8\x75\x82\x5f\x27\xf8\x75\x82\x5f\x27\xf8\x75\x82\x5f\x27\xf8\x75\x82\x5f\x27\xf8\x75\x82\x5f\x27\xf8\x75\x82\x5f\x27\xf8\x75\x82\x5f\x27\xf8\x75\x82\x5f\xe7\xed\xe3\xd7\xf9\xbe\x01\xf4\xb8\xf4\xeb\xac\xb1\x19\xef\xca\x62\xea\xb9\x23\xfe\x93\x34\xfe\xa3\x34\x7a\x20\x7a\x97\x0f\x17\x9b\x86\xf9\x3f\x5e\x26\xee\x54\xe4\x19\x09\x02\x21\xe0\x3f\x3e\xe0\xff\x07\x9e\x45\x43\xa2\x43\x1c\x97\xda\x6a\x99\xd4\xf7\x84\xe6\x18\xba\x6d\xb0\x69\x85\xff\xf4\x19\xfc\xbf\xed\x41\xaf\x97\x37\xfa\x9d\x31\x24\x36\x04\xb6\x54\x65\xbc\xb5\xce\xcb\xcb\x3a\xbd\x30\x3b\xc3\x0b\xc8\xbd\x91\xdd\xb8\x20\x9e\x96\xdd\xe2\x5f\xec\x71\x2a\x5b\x04\x46\x09\x8c\xb2\x08\x8c\x12\x18\x25\x30\x4a\x60\x94\x7d\xc3\x28\x8b\x3d\xc3\x28\xdb\x5e\x93\x0d\x33\xca\x22\x30\x4a\x60\x94\xc0\x28\x81\x51\x02\xa3\xec\x3e\xa3\x2c\xf6\x35\xf0\x2b\x02\xf0\xeb\x1c\xf0\x2b\xf6\x3a\xf0\x2b\x6e\x41\xe0\x57\xd2\xd1\xac\x00\x5b\x45\x74\x9a\x83\xad\x49\x74\x04\x1d\x6a\x7a\x80\x92\x07\xa1\x24\x5b\xca\xfb\x7c\xa8\x15\x98\x75\xfd\x99\x64\x0f\xb5\xa3\xf8\xb0\xf4\x50\x8b\xf2\x2e\xe9\xa2\xe6\xbf\xaf\xd1\x4d\xed\xfa\x23\xf1\x4e\x72\xdb\xf1\x36\xee\x1f\x97\xfd\xf0\xce\x46\x36\x86\xc5\x82\xa4\xa8\x21\x0c\xf6\xb0\xf8\xad\xd3\x20\x4c\x70\xab\x27\xd1\x59\x34\x53\xe7\x63\x7f\x00\x4d\xac\xbf\x03\xc0\xbb\x1e\x42\x94\x37\x18\xa2\xfc\x37\x29\x74\x46\x08\x81\x93\xe8\x38\x17\x02\x87\xd0\x86\xc6\x20\x2a\x89\x80\xe3\x69\x34\x15\x04\x1c\x6f\xb4\xac\xf3\x22\x46\x69\x06\x15\x43\x31\x4a\x1b\x2d\x2c\x49\x32\x35\x97\x1d\x16\x75\x5c\xd4\xa2\xe4\x4a\x94\x51\xf1\xa2\x2d\xf7\xb5\xd1\x46\xc9\xf4\x88\x0c\x46\x0b\x45\xf6\xd1\xa5\x90\x94\x1a\x12\xd7\xeb\xa4\x94\x7f\x73\xa7\xe4\x55\x67\xe2\x98\x01\xdc\x02\xb8\x05\x70\x0b\xe0\xb6\x7f\xc0\x2d\x68\x6b\x09\xda\x5a\xef\x90\x6d\xc8\x21\xd1\x95\x1c\x12\x60\x40\x00\x03\x02\x18\x10\xc0\x80\x00\x06\x84\xbe\x36\x20\x40\x2a\x21\x48\x25\x04\xa9\x84\x3a\x95\x4a\x08\xec\x73\x60\x9f\xeb\x57\xfb\x5c\xa9\xdc\xe6\x2c\x5a\x49\x50\x39\x17\x0f\x95\xef\xc5\xf7\x88\xd9\x1c\x48\xda\x56\x01\xf3\x06\x4d\x63\xed\xcf\x4b\xf2\x8b\x7b\xd1\xe1\x84\xfc\xcf\xc2\x3d\x3d\x26\x09\xf4\x4b\x7b\xf1\xcf\x0d\xa0\x37\x34\xe6\xe1\x12\xe0\x3b\x21\x13\xf4\xe3\xb1\x99\xa0\xbd\x18\x82\xce\xa4\x83\xfe\x27\x22\x93\xea\x3e\x9e\x49\x95\x0d\x1c\x4e\xc1\x6f\x35\xf7\x8d\xb0\x51\xac\x23\x27\x34\x49\x1e\x23\x45\x7c\xba\xe5\xc4\x37\x62\x6c\x34\x64\xbf\x49\x48\x3d\x9d\xf9\xe2\xce\xe6\x7d\x97\x9c\x1d\x7a\xa8\x59\x76\xe8\x8e\xf6\x60\x71\x09\xe9\x68\xb1\xce\xcc\xda\x81\x2e\x04\xac\x07\x46\xd8\x0d\x1a\x61\xff\x34\xd5\x1d\x21\xb3\x2c\x4c\xb4\x2a\xba\x16\x98\x68\xfb\x54\x9c\x59\x35\x17\xe5\x7e\x72\x57\x73\x71\x16\x9f\x3b\x7a\x4f\x93\xdc\xd1\x9d\x95\x60\x90\x40\x1a\x12\x48\xdf\xbe\xf0\x1f\xa8\x0f\x50\x1f\xa0\x3e\x90\x40\xba\x07\xf4\x85\xc4\x3c\xd5\xc5\x9f\xbd\xab\xb9\xca\xb0\x8e\x54\xd2\x4f\xc4\xa7\x92\xee\x98\x1a\x01\xf9\xa4\xfb\x4d\x79\x81\x7c\xd2\x90\x4f\x7a\x93\xf3\x49\xff\x93\xe6\xf9\xa4\xbb\xb1\xdd\xed\xf2\xda\x91\x94\xbb\xba\x87\x32\x4b\xff\xf1\x30\x3a\x2a\x08\xee\x22\xcf\x21\xdd\x52\x46\x69\xcd\xa6\xe6\x75\xba\xe8\xe0\x1f\x1b\xc6\x3f\x33\x88\xee\xe6\x8f\xfa\x6b\x9f\xd3\x5a\x4a\xe9\x69\x9b\x9a\x25\xba\xd8\xa6\xa4\xd2\x7b\xf8\x03\x45\xf6\x9f\x86\x9c\xd2\xf2\x4d\x90\x55\xfa\x16\xb2\x4a\xbf\x29\x79\xe6\xe4\xf1\xa8\x9c\x39\x7c\x3c\x78\xf6\x08\xd1\xf8\x90\x4f\x1a\x72\xb5\x40\x3e\x69\x70\xf9\x07\x97\x7f\x70\xf9\xef\x6b\x97\x7f\xc8\x27\x0d\xf9\xa4\xc1\xd5\x1a\x5c\xad\xc1\xd5\x1a\x5c\xad\x7b\xc2\xd5\x1a\xf2\x49\x6f\x31\xf7\x55\xc8\x27\x0d\xf9\xa4\xb7\x56\x7a\x19\xfc\xd2\x08\x3a\x23\x28\xa3\xbd\xa8\x6a\xf9\xff\x8f\xbd\x7f\x8f\x6f\xe3\x3a\xef\xfc\xf1\x17\x48\x2a\x92\x1e\x2b\x6b\xeb\xc8\x8e\x6d\x48\xb2\x61\xc8\x26\x41\x88\x04\x09\xea\x4e\x5d\x09\x52\x94\x08\xd1\x32\x43\xda\x74\x9a\xd8\x51\x86\xc0\x08\x82\x0c\x61\x90\x19\x90\x8a\xda\xcd\xbe\xd2\x66\xb3\xc9\x6e\xf7\x92\x66\xb3\xe9\xb6\xdb\xa4\xd9\xb4\x75\xe3\xd6\x89\x93\x6e\xba\x6d\xe2\xb6\x69\xbb\x4d\xe2\xad\x5b\xff\x72\x69\xda\x74\xd3\xa4\x49\x9b\xfc\xb6\x6d\x7a\x4f\xb7\x49\xb3\xed\xe6\xfb\x9a\x73\xe6\x0a\xcc\x70\x00\x68\x00\x82\xc2\xe7\x9f\x44\xe6\xcc\x1c\xcc\x9c\x33\xf3\x9c\x73\xde\xcf\xe7\x79\x1e\x93\x41\xd5\x48\x45\xd7\x87\x8e\xaa\x52\x92\x35\xf6\xd1\x04\x7b\x71\x80\xee\xd4\x9b\x99\x72\xb6\x72\x69\x2d\x1d\xad\x34\x06\x1e\x17\x15\x7d\x7e\x0e\x85\x3a\x26\xf9\x05\x8b\xb5\x37\xb3\xec\x80\x8f\xfa\xaf\x6d\x0e\xf2\xe8\x4b\xe8\x43\x47\x92\x26\x79\x7c\x32\x18\x2a\x4e\xb2\xa3\xce\x22\x8d\x06\x60\xf4\x7d\x8b\x0c\xe8\xa8\x77\x3a\x35\x04\x36\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x41\x21\x6f\x1d\x0a\xf9\xf7\x43\xb4\x7f\xdd\x22\x6a\x82\x3f\xe6\xb4\x62\x59\xc9\xcb\x1a\xfb\xe4\x10\xfb\xd8\x40\x7d\x4a\xd6\x46\xf5\x8d\x4b\x73\x17\x95\x7c\x58\xa4\xf1\x3e\x7e\x41\x5d\x32\x6a\xfd\x17\x36\x07\x5d\xec\x52\x5d\xe3\xab\x83\x11\xe4\x11\x76\xa8\xc1\x0c\xbe\x56\x06\x06\x7d\x58\x80\x16\x81\x16\x81\x16\x81\x16\x81\x16\x81\x16\x81\x16\x81\x16\x81\x16\x81\x16\x81\x16\x81\x16\x81\x16\x3b\x8d\x16\x01\xfe\x00\xfe\x00\xfe\x7a\x18\xfc\xfd\xe8\x93\x94\xd4\x87\x6e\x6c\x2d\xed\xa7\x2e\xe4\xbd\xa7\xf2\x7b\x67\xdf\x78\x82\xfd\xbf\x87\x68\x6b\x4e\x51\xe5\x4b\x6b\xe9\x68\x42\x2c\x0c\x55\x63\x4a\x33\x6d\x9e\x72\x39\xf6\x54\xb1\x9c\x8f\xcd\xeb\x57\x2e\xea\x57\x26\xef\xd3\xcf\x9c\x56\x54\xd9\x29\x00\xb4\x8f\x77\x39\xa4\xcb\x80\x57\x81\x57\xa1\x06\x17\x78\x15\x78\x15\x78\x55\xef\xf0\xaa\x2e\x2a\x31\xd5\x35\xbc\x0a\xb5\x8f\xc0\xab\xc0\xab\xc0\xab\xc0\xab\x36\xa2\xf6\x51\x4f\xe3\x25\x14\x67\xe9\xe5\xe2\x2c\x99\x4d\x88\x97\xb2\x4f\x52\x46\x88\x9b\x8e\xd3\x31\x2e\x6e\x3a\x40\x69\x1a\xf3\x2d\x4f\x9e\x53\x54\x39\xb5\x96\x4e\xd9\x60\x68\xbe\xa8\x05\xcb\x99\xfc\x6b\x93\x0b\x25\xd3\x7c\xb0\x92\x69\x98\x0d\x19\xc2\xa5\x78\xdc\x10\x2b\xd9\x37\xe1\xcc\xd0\x46\xf1\xff\xbd\xcd\x06\x60\xbb\xc4\x8c\x13\x93\x9c\xac\xcb\x98\x86\x3a\x40\xbb\x04\x9c\xe2\x82\xb1\x9a\xac\xb1\xa3\xb4\xbf\x89\x5e\x46\xa2\x58\x54\xe5\x68\xb1\x2a\xc7\xd7\x22\x74\x4a\x7c\xe2\x47\xe8\x10\xff\xc4\xc7\xa8\xb9\x97\x8f\xa6\x45\xc1\x8d\x13\x34\x69\x17\xdc\x68\xba\x91\x19\x91\x6f\xfb\x24\x1d\x77\xe4\xdb\x6e\xba\x95\xd6\x2d\x4d\x45\x09\xdd\xd2\x24\x7f\x73\xc4\xb6\x34\xf7\x1b\x99\xd3\x1d\x69\xe8\x95\xcb\x4e\xab\x93\x14\x27\x08\xab\x33\x6d\x9d\xd5\x01\xfb\xd3\x9e\xc2\x1b\xa0\xad\xa0\xad\xa0\xad\xa0\xad\xbd\x43\x5b\xb1\x08\x0b\x58\x84\x75\x0f\x8e\x46\xd1\xa3\x8e\x14\x3d\x02\xf5\x07\xf5\x07\xf5\x07\xf5\x07\xf5\xef\x69\xea\x8f\xda\x77\xa8\x7d\x87\xda\x77\xed\xaa\x7d\x07\xa7\x1a\x9c\x6a\xbd\xea\x54\xcb\x16\xfc\x73\x7b\x86\x5a\x0f\xd2\x04\xc8\xa1\x02\xe2\xab\x49\x6f\x1c\xbd\x8b\xed\x14\xb6\xc1\xb6\xdb\xb4\x09\xb3\xc8\xb0\xb7\x3d\x49\x29\x77\x05\xae\xc0\xb2\x5b\x9f\x7f\x82\x7d\xfb\x21\xda\x66\x96\xdd\x8a\x3e\xb4\xbe\x42\xdd\x28\xc1\x94\xdc\xa3\x9f\x66\x94\xc8\xaa\xab\x8e\x05\x71\x3a\xc4\xe9\x10\xa7\xc3\x5d\x02\x77\x09\xdc\x25\x70\x97\x74\x8d\xbb\xa4\x7b\xbc\x01\xc0\xd4\xc0\xd4\xc0\xd4\xc0\xd4\xc0\xd4\x3d\x8d\xa9\xc1\xd1\xc0\xd1\x7a\x94\xa3\x6d\x4a\x71\xfa\x6b\xe8\x8c\x40\x7c\xc7\xe8\x08\x47\x7c\x69\x1a\xa3\x51\x5f\xb9\x28\x67\x4a\xa9\xb5\x74\xca\x51\x34\xbd\xb3\xd2\xf4\x80\xe2\xe1\x14\xff\xe2\x36\x07\xfa\xba\xc3\xd2\xa6\x9b\x94\xeb\x3e\xf1\x97\xb6\x73\x2e\x81\xa5\x4e\xd3\x49\x3a\x5e\x23\x4b\xdf\x4f\xc3\x0d\xf7\x2f\xf4\x50\x10\xa5\xb7\x28\x4a\xff\x72\x84\xa6\x84\xa8\x7c\x92\x8e\xda\xa2\xf2\x51\x6a\xe6\xfd\x13\xa1\x2b\x13\x3c\x74\xc5\x96\x94\x37\xd9\x86\xaf\x77\xe1\x84\x30\x3d\x87\xe8\x00\x37\x3d\xcd\x35\xdb\x16\x39\x7a\x90\x75\x49\x7e\x72\xc4\x61\x5d\xf6\x78\xea\xd1\x4d\x4b\x93\x10\x47\x0d\x4b\xe3\xa5\x46\x6f\x8f\xcd\x81\x14\x1d\x6c\x15\x6c\x15\x6c\x15\x6c\x15\x52\x74\x48\xd1\x21\x45\x87\x14\x1d\x8c\x1f\x8c\x1f\x8c\x1f\x8c\x1f\x8c\x1f\x52\x74\x48\xd1\x21\x45\x87\x14\x1d\x2e\x34\xb8\xd0\xba\xd0\x85\x96\x2d\x84\xab\x38\x0f\x74\x4d\x35\x21\x1e\x6f\x10\x24\x3b\x69\xf1\x3a\x50\x79\x33\x4a\xd1\x3f\xbb\x9b\xce\x0a\x29\x7a\x4e\x56\x0d\x8f\xa3\xac\xd9\x55\x12\x5d\x7f\x2e\x16\xca\xc5\x72\xc1\x5c\x9e\x09\xa1\xfa\x1b\xc7\x34\x3e\x2c\xec\x6d\xbb\xd9\xa7\xfb\xe9\x76\xe7\xf9\x97\xd6\xd2\xd1\xc3\xaa\x2c\xe5\x63\xe2\x1c\xf3\xed\xb2\x17\xde\xd3\xf6\xd9\x4b\xa2\xf5\x45\xd1\x7a\x32\xad\x5f\xe7\x38\xac\x2d\xa7\x7d\x4f\x16\x2f\x46\xb8\xc8\x3d\x5b\xa1\xc7\xc4\x6b\x7b\x91\xe6\xf9\x6b\x3b\x4b\x33\x94\xf1\x4f\xba\xe3\xec\xbe\xb5\x74\xca\xf7\x66\x03\xdf\xdf\xbd\xde\xef\xef\xcb\xd8\x40\x41\xae\xd2\xd5\x52\xf0\x2b\x3b\xc7\xce\x19\xaf\xa9\xc7\xa0\x9a\x2f\xad\xef\xfd\x39\xdf\xf6\xe8\xa7\xb6\xd7\x0f\xe9\x31\x55\xae\x94\xa4\x9c\xdc\xc2\xa8\x1e\x34\x2e\xdd\xc0\x81\xcd\xbc\x8a\x96\xe9\xd1\x1a\xff\x6d\x28\x23\x0b\xba\x08\xc7\x6e\x8b\x8e\xdd\xdf\x8a\xd0\xab\x85\x63\x77\x89\x5e\x69\x3b\x76\xdb\x6d\x72\xda\x64\xe2\x82\x2d\x59\x65\xb5\xd3\x96\x2c\xf3\xff\xdb\x51\x6f\xc9\x32\x15\x49\xad\x16\xb9\x83\x4d\x6c\xcd\x5b\x30\x69\x13\x15\x7d\x29\xb0\x41\x06\x6d\x77\x6d\x43\xa3\xfc\x6e\xf6\xeb\xff\x9c\xdc\xe3\x3c\x78\x4d\x56\x0b\xb2\xf3\xe8\xa0\xf3\xa8\xbe\xe1\xa8\xca\x85\x62\x6e\xb4\xee\x3c\x57\x2b\xfa\xbf\x6f\x18\x47\xf5\xbb\xcc\xe8\x6f\x4e\xb6\xc6\x98\x4e\xd2\xd1\x16\x56\x77\x0b\x3c\x06\x08\x26\x34\xc0\x84\x3e\xd7\x4f\xcf\xf6\xb3\x67\xfa\xa3\x4f\x5b\x50\xe1\xcd\xfd\xbd\x63\x42\x6b\x90\xaa\xde\xcf\xdc\x55\xcd\xb1\xbf\xfe\x72\xda\xc1\x80\x09\x9f\x17\x77\x98\xb3\x11\xa1\x6c\x90\x4a\xfc\xca\xb2\x52\x1e\x15\x57\xf3\x33\xb8\x5f\x46\x8b\x25\xb2\x9a\x52\x5e\x10\x51\x71\x0f\xeb\x9f\x85\xf1\xef\x25\xf3\x63\xb1\xff\x38\xec\x6f\xdb\x33\x2f\x45\xe8\xc5\x08\x7b\x21\x12\xfd\x84\xb5\x41\x7b\x7f\x64\x56\xd1\xb7\xf3\x45\x2d\x56\x50\x78\xb7\x2b\xb1\xf8\x65\xfd\x4f\xf1\xd8\x94\xeb\x29\xf8\xce\x56\x78\x77\x56\x35\xdb\x47\x3d\x2a\xe5\xf8\x83\x73\xcf\x7f\xa9\x98\x33\xb0\x8d\x5c\xca\x6b\x31\xe5\xba\xd1\xa7\xc2\x33\x5e\x91\x95\x4a\x49\x4e\xc5\xc4\x2f\xf2\x30\x46\x73\x2c\xf9\x0e\xd3\xb3\x03\xac\x9f\x8f\x6f\xe1\xb7\xe5\xd2\x17\x56\x36\x6a\x3e\xb9\xdf\x7b\x3e\xd9\xc6\x5e\xc6\xef\xba\xc3\x33\x4a\x76\x96\x66\x58\x26\x7e\xc6\xda\xee\x3d\xc4\x3f\x3a\xc3\x02\xf8\xb6\x11\x1f\xd0\x4f\x6b\xeb\xa6\xee\xdd\x7b\x28\xbb\xde\xa6\x4e\x94\xbe\x6f\x78\x67\xf7\xd2\x6e\xf6\xb5\x7e\xba\xb3\x66\xf2\x14\x95\xf1\x5b\xdd\xde\x1d\xae\xdf\xde\x89\x02\xf7\x1d\xdd\xe3\xad\x8a\x55\xd7\x38\x5f\x75\xe9\x2f\xec\x05\x9a\xa3\x73\x8d\xbe\xb0\xfc\x86\xdb\xb7\xd1\x7b\x7d\xf0\xcb\x7c\x91\xcd\x87\xf3\x32\x8b\x12\xf7\xd1\xaf\x6f\xf7\x19\xe6\x9b\xd8\xf2\x1d\xf3\xdc\xf2\x75\x7e\xb0\x33\x4f\xd2\x6b\xe8\x7b\x6a\x96\x2a\xe1\x8d\x36\x56\x2e\xd8\xfc\xb5\xb8\xf9\xfb\xdd\x48\x5b\xcd\xd0\x25\xb1\xb1\x7c\x15\x2d\xdb\x1b\xcb\x2e\xb1\x73\x7c\x1b\xd8\x98\x9d\x6b\xda\x98\xad\x63\x18\x33\x7f\xbc\xc3\xc7\xce\x85\xb1\x21\x3c\xe2\xb1\x21\xec\xac\xb9\xc3\xae\xb0\xd7\x6c\x2b\x76\x85\xd8\x15\x6e\xf0\xae\x70\x63\xd7\xd2\x81\x5b\xc3\xc6\x66\x99\x66\x66\x91\xa0\x19\xa9\x7b\xf7\x87\x5f\x4c\xd0\x11\xb1\x3f\x94\x2a\x15\x6d\x6c\x2d\x2d\x7c\xc3\x7e\x49\xa8\xf4\x29\x50\xbe\xbc\x5a\xd2\xe4\xaa\xc6\x7e\x3c\xc1\x3e\x3c\x40\x5b\xf5\x0b\x2f\xad\xa5\xa3\xd7\x0d\x0d\x5b\x39\x5f\x5c\x2b\xe6\x57\xa5\x92\x33\x24\x52\xb2\xf4\xa2\x4b\x46\x13\x4b\x72\x35\xe5\x50\x05\x4d\x5a\x7a\xa2\x21\xde\xce\x90\x1d\xaf\xe1\x12\xe0\x2b\x15\xd9\xd2\xfc\x6a\x55\x59\xca\xa7\x92\x0f\xf2\x0b\xa6\x2a\x15\xcd\x19\xf1\xe7\xf8\x9d\xf9\xa2\x56\xed\xf2\x0c\x57\xd9\xa7\x68\x41\x7c\x33\x73\x74\x8e\x7f\x33\xbc\xe4\x4d\x0b\xd3\x24\x8f\x48\x3d\xbb\x26\x97\x83\xbf\x94\x7d\xde\x5f\xca\x0e\x46\xfc\xc6\x44\xf8\xe6\x23\xc1\x5f\xcb\x08\x4b\x1a\x1f\x87\xfe\x2e\x18\x5f\x83\xa3\xff\xdd\xac\x04\xb9\xbc\x90\xcb\x2b\x8b\x78\x33\xc4\x9b\x21\xde\x0c\xf1\x66\x3d\x13\x6f\x96\xed\x9a\x70\xaa\xd0\xef\xa4\xe5\x38\x9f\x2c\xe2\x7c\x10\xe7\x83\x38\x1f\xc4\xf9\x20\xce\xa7\xf3\x71\x3e\x9b\x50\xa2\x9c\xed\xe9\xd8\x89\x2c\x62\x27\xda\x17\x3b\x91\xed\xfa\xd8\x89\x4d\x98\x7e\x8c\x7d\x64\x8c\x98\x3e\x74\xee\xb4\xf6\xec\xdd\x63\xec\xaf\x1f\xb2\xeb\xbe\x0e\xad\x9f\xc0\xde\x42\x7a\xc9\x5d\xfa\x89\x35\x35\xa7\xbb\x9c\xeb\x21\x73\x3d\x68\x17\x32\xd7\x83\x76\x81\x76\x81\x76\xf5\x10\xed\xea\xa2\xe4\x41\x5d\x43\xbb\x90\xd5\x06\xb4\x0b\xb4\x0b\xb4\x0b\xb4\x0b\x99\xeb\x91\x76\xe3\xd6\x41\x47\x5d\x9f\x76\x63\x53\x66\xae\x7f\x42\xa4\xb7\x1e\xe7\xe9\xad\xfb\x1e\xb9\xc0\x26\x68\x9c\x52\xfe\x1a\x42\x45\x95\x53\x6b\xe9\x94\xc5\x85\x1a\x4a\x5d\x7f\x21\x58\xdb\x94\x60\x83\xb5\xa5\x2a\xad\xdf\x70\x57\xaa\x0c\xc8\x83\x1f\xff\xdd\x6d\x36\xf2\x62\x56\xe2\x7a\x9b\x6e\xdd\x25\xfe\xd6\x2e\xbe\x25\x70\xd4\x19\x3a\x45\x27\x6a\xe4\xd8\x23\x94\x6c\xbc\x5f\x21\xc0\x46\x70\x4b\x8b\xc1\x2d\x5f\x8d\xd0\x49\xf1\x4d\x1f\xa6\x83\xfc\x9b\x4e\x51\x53\xef\x9e\x48\x57\x9f\xe6\xe9\xea\xad\xf8\x95\x66\xdb\x98\x16\x29\xef\x4f\xd0\xa4\x23\xe5\x7d\xb3\x8d\x74\x89\x61\xe1\x29\xf0\xc3\x17\x05\x7f\x7a\x88\x5e\x61\x62\x7b\x21\x07\xd6\xd7\x36\xc5\x9c\xac\xb1\x9f\x1f\x62\xcf\x0c\xd8\x76\x4c\x6b\x50\xf3\x2b\x2e\x0f\x49\xef\x3b\xc8\x2f\x10\x76\xd2\x68\x59\xb7\xf6\xb3\x8a\x3a\x55\x2a\x59\x7d\x18\x5e\x10\xcd\x2d\xa4\xf8\x9d\x0b\x7e\x2f\x07\xd9\x83\xb5\xef\xa5\xd1\xc9\xee\xb7\xb2\x11\xf1\x30\xb4\xbe\xf0\x7e\x40\xeb\x0b\xef\x07\xbc\x1f\xf0\x7e\xf4\x90\xf7\x03\x5a\x5f\x68\x7d\xe1\xfd\x80\xf7\x03\xde\x0f\x78\x3f\xba\xc2\xfb\x01\x25\x2e\x94\xb8\x9b\xc5\x9d\x02\x25\x6e\x3b\x94\xb8\xbf\xbe\x83\x96\x8c\x38\x7f\x03\x0a\xf1\x56\x6a\x13\xc1\xf9\x84\xfd\x97\x94\x9c\x54\xd2\x56\xb9\xb5\x92\x72\x39\x59\xd3\xf4\x1d\x85\x7c\x5d\x63\x7f\x76\x5b\xfc\x3b\x5b\xe8\x2e\x57\xa3\x56\x0a\x9d\x07\x2c\x2f\xc7\xbc\xde\xc0\x92\x68\x60\x8a\x37\xb0\xc8\x1b\x48\x9e\x10\xa7\x4c\x39\xaf\x37\x72\xe4\xd8\xa1\xfc\x7e\x57\x6f\x70\x56\x30\x77\x4f\x9a\x89\x2b\xfc\x6e\x36\xfb\x8b\x7d\xf4\x3a\xc1\x9e\xbf\x87\x1e\x77\xb0\xe7\xf5\x93\x63\x34\xf7\x23\xfe\xe9\x18\x9b\xca\xc8\xd1\xe4\x8f\x36\x9f\x56\xaa\xb9\x1f\x30\xe9\x65\x53\x69\xf0\xbc\xde\x73\x83\x67\xfa\x3e\x48\x5d\xce\x8f\x40\xf6\x0e\x6f\xd8\xfa\xde\xb0\x2c\xbc\x61\x3e\xde\xb0\xcd\x17\x7e\xc5\x7e\x61\x94\x1e\x16\x93\xc8\xe5\x92\x72\x5d\xef\x2e\x55\x29\xa5\x2c\x42\x5c\x3b\x99\x54\xd4\xa2\xa2\x16\xab\x37\x4a\xf2\x9a\x5c\x72\xb1\x43\x33\xad\x28\xfb\xfa\x08\xfb\xeb\x7e\xda\xe3\x68\x6e\xca\x6c\xcd\x9a\x45\x46\x79\x5e\x51\x37\xee\x5c\x30\x9a\x9e\xd7\x9b\x9e\x76\x36\x9d\x3c\xa6\x9f\x3e\xeb\xd1\xa0\x31\xad\xf8\x5f\xba\xb1\x19\x45\x9d\x5d\x6a\x1a\x44\xff\x7b\x0d\xf4\xf4\x7c\x5f\xb0\xad\x7c\x15\x5b\x36\x6c\xe5\x7a\xc3\x69\xd8\xcc\x75\x6e\xa5\xde\x6a\xae\x9f\xce\x34\xfa\x37\xdb\x03\x86\x7c\xdc\xcc\x31\xda\xf0\xa8\x9f\x30\xae\xe8\x82\x81\x6f\x7a\x1d\xd1\xdc\xc8\x43\x80\x01\x01\x46\xeb\xd9\x45\x9b\x5f\xaa\x85\x64\x97\xda\x69\x0c\x1b\xcc\x2e\xba\x91\x26\x31\xf9\x57\x77\x04\xd8\xbc\x7d\x46\xc1\x3a\x69\x3d\x33\x77\x5c\x9c\xd4\x0d\x56\xee\x51\x5a\xa4\x05\xa7\x95\x4b\x66\xe8\x4c\x0b\xea\x85\x19\xfe\x44\x8f\x70\x0f\xb0\x06\xe3\x16\x64\xdc\x50\x0b\xbb\x23\xb5\xb0\x51\x04\x15\x45\x50\x51\x04\xb5\x5d\x45\x50\xb3\xcf\x47\x42\xae\x04\xb9\x28\xc8\x9a\xbe\xaa\xb0\xc9\x5a\x9b\xab\x4b\xc6\xbc\x97\x1b\xdb\xd9\x56\xd1\x4d\x1b\xbb\xe2\xc8\xfc\xd5\x8e\x80\x15\xc7\xb1\xba\x0c\xe7\x0d\x6f\xb7\x26\x79\x2a\xdd\x0d\x5e\x86\x20\xb7\x79\xaf\x2d\x7e\x90\xdb\x1c\xb9\xcd\x37\x57\x6e\xf3\x90\xa9\x5e\x60\x6e\xf3\x8d\x9c\x71\xb2\xe7\xe8\x2c\x9b\x8e\x4f\x59\x48\x7b\xd0\x99\xe5\xdc\xbf\xa5\x0e\xa4\x39\xff\xc1\x27\x69\xb8\x3e\x11\x91\xcb\xc7\x29\x00\xf5\x35\xa9\xa2\xb1\x2f\x3d\xc1\xbe\xd3\x78\x7e\x22\xf1\x2c\x0f\x4b\x95\xe4\x5e\x8f\xfc\x44\x79\xeb\x70\x97\xc7\x23\x20\x53\x11\xb4\xfa\xc8\x54\x04\xad\x3e\xb4\xfa\xd0\xea\xf7\x90\x56\x1f\x99\x8a\x90\xa9\x08\x5a\x7d\x68\xf5\xa1\xd5\x87\x56\xbf\x2b\xb4\xfa\xc8\x54\x04\x69\x3d\x32\x15\x6d\x1e\x69\x7d\xab\x99\x8a\x2c\x2e\xd4\x50\xa6\xa2\x80\xe4\x42\x2d\x26\x1c\xb1\xee\xc1\x95\xda\x21\xfe\x15\xcf\x4c\x45\x36\xe7\xba\xdf\x33\x53\x51\x1b\x48\xd7\xcd\xe5\x2c\xb2\xee\x07\x8e\x15\x48\xe6\x3a\x9c\xb3\xc8\xfe\xb2\x5a\xcf\x59\x64\xb7\x71\x13\x39\x8b\xec\x46\xda\x93\xb3\xc8\xdb\x84\x04\xc5\x4d\x24\x3f\x35\x62\x9b\x98\xfb\x0c\x95\x82\x43\xf2\xa1\x5c\x76\x98\x9b\x61\x71\x5c\x98\x9b\x69\xeb\xa4\xf6\x1b\x9e\xf6\x48\xdc\x80\x58\x81\x58\x81\x58\x81\x58\x7b\x07\xb1\x62\xf9\x15\xb0\xfc\xea\x1e\x06\x0d\x79\x71\x47\xe4\xc5\x40\xfd\x40\xfd\x40\xfd\x40\xfd\x40\xfd\x3d\x8d\xfa\x11\x65\x82\x28\x13\x44\x99\xb4\x2b\xca\x04\x9e\x34\x78\xd2\x7a\xd5\x93\x96\x2d\x84\x1c\x60\x15\x44\x8f\x93\xde\xc0\x77\x17\xdb\x29\xbe\x66\xdb\xd2\x86\xeb\xac\xda\x8c\x79\x54\x7e\x69\x98\x1e\xb1\x92\x71\x29\x5a\x4e\x2a\x15\xcb\x85\xb1\xb5\x89\x15\xb9\x2a\x4d\x18\x19\xf7\x7d\x54\xea\xbc\xd7\x95\x72\x55\x2a\x55\x94\xbc\x79\xb5\xac\x6a\xec\x4f\x12\xec\x3b\x03\xb4\xcb\xd1\xe0\x25\xa3\xc1\xe8\x9b\x23\x8d\x65\xe9\x3f\x6f\xb5\xbd\xa0\xe4\xa7\xac\xb6\x43\xca\xda\x7f\x82\x5f\x30\x65\xdf\xdf\xb2\xb8\x3d\x1b\xe3\xfb\xfc\xfe\x7c\x51\xab\x76\xb9\x76\x7e\x43\x72\xf9\x37\x92\x80\xff\x6a\x21\xf8\x53\x9b\x61\x19\x3b\x65\x96\x39\x38\xc6\x37\xe7\x33\x24\x8e\x2f\x50\x0c\x22\x32\xfd\x23\x7a\x00\x99\xfe\xe1\xda\x82\x6b\x0b\xae\xad\x5e\x72\x6d\x21\xd3\x3f\x32\xfd\xc3\xa5\x00\x97\x02\x5c\x0a\x70\x29\x74\x85\x4b\x61\x13\xe2\x10\xd4\x12\x00\xa6\x45\x2d\x81\xcd\x13\xf0\xc0\x9e\x1e\xa6\x8b\x02\x5f\x6a\x55\x45\x95\x0a\x72\x6d\xe2\xe7\x75\x09\x66\x4e\xb7\xf2\xfc\xb2\x9c\x54\x91\x72\xc5\x6a\x51\xd6\xd8\xe7\x12\xec\xeb\x03\x74\xbb\x71\xc0\xca\x50\xf5\x2f\x1a\x03\x97\xd3\x4b\x73\x4b\xe2\xca\x69\xd1\xe4\x8d\x90\x98\xe5\x01\x7e\x81\xd1\x76\x5d\x59\x82\xfa\x9f\xdd\x1c\xa8\xd2\x37\x45\x6b\xe8\x0c\xd3\x44\x95\x72\x30\x85\xcc\xb0\x33\x06\x85\x74\xbf\x54\x26\xfc\xaf\xeb\x6b\x8f\xb4\xd3\x28\x49\x0a\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\x09\x50\xd9\x5b\xa0\xf2\xed\x49\x7a\xa5\x00\x95\xf9\xa2\x96\x53\x74\xb3\xda\x14\xaa\x94\xcb\xf9\x8a\x52\x2c\x57\xb5\x52\xd1\x3a\xf4\x46\xf6\xb1\x61\xf6\xf6\x2d\xb4\xd3\x6a\xd2\xa2\x95\x1f\x31\x74\x96\x4e\x46\x59\x36\x0d\xaa\x99\x2d\xf8\xac\xd1\xe6\x92\xde\x66\x28\x98\x72\xc4\x5e\x42\xf1\x65\xa2\x31\xe1\xea\xab\x30\x7b\x6d\x38\xe4\x5a\x8e\x3b\xda\x4e\x25\x47\xf8\xcf\xcd\x98\x8f\x53\x87\x39\x5d\x77\xdc\xfd\x74\xb3\xf3\x42\x4c\xff\xa4\xdc\xd7\x45\x52\xee\x95\x60\xfc\x79\x9a\x9d\x34\xf0\x67\xed\xab\x6a\x00\x50\xd7\x28\x78\xe4\xde\x06\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\x04\xd6\xec\x2c\xd6\x9c\xa4\xa3\xec\x70\xfc\xa0\x85\x35\xef\x75\xd6\x40\x72\x6d\xe2\xea\xcb\x1e\x01\x89\x02\x89\x02\x89\x02\x89\xb6\x11\x89\xfe\xe9\x30\xcd\x0a\x24\x2a\xbf\xa1\x2a\x97\xf9\x50\x35\x06\x43\x8b\xe5\x82\x2a\x6b\x9a\xcd\x41\xdf\x33\xcc\xbe\x33\x40\xcc\x6e\xc7\x02\xa1\x1f\x6c\x04\x84\xce\x89\xf6\xba\x02\x81\x8a\x9f\x3b\x6b\x3d\x49\x1d\x03\x35\x6e\x16\xf4\xb3\x05\xfa\xb9\x1c\x4c\x3f\x0f\xb0\xb4\x41\x3f\xed\xb7\xc9\xe0\x9e\x46\xcf\x83\x78\x82\x78\x82\x78\x82\x78\x82\x78\x82\x78\x82\x78\x82\x78\x82\x78\x82\x78\x82\x78\x82\x78\x82\x78\x6e\x38\xf1\x3c\x48\x13\x6c\x3c\x9e\xb2\xa8\xe5\x2e\x27\xf1\x34\xb6\x6f\x60\x9d\x60\x9d\x60\x9d\x60\x9d\x1d\x65\x9d\x6f\xdd\x4f\x27\x05\xeb\xbc\x5c\x52\xae\xeb\x93\x84\xaa\x94\x52\x16\x74\xa8\x95\x82\xea\x27\x69\xb9\x2b\xf2\x35\xc9\x42\x9c\x9f\x48\xb2\x0f\xf5\xd3\x1e\xc7\xe5\x53\xe6\xd5\x16\xec\xdc\xab\xca\x52\xbe\x66\xc7\x3c\x5b\x52\xae\x2f\xf1\xa6\x92\xc3\xfa\xe1\x59\x8f\x06\x0c\xc6\x68\x9f\x1a\x6e\xf1\xaa\xac\x4c\x59\xc1\x02\xa7\x69\x8a\xb3\xc0\xe3\x74\x8c\x8e\xf8\xd6\x0b\x73\x76\x91\xf1\x68\x29\xfb\xde\x02\x29\xe0\x5e\x6f\x0a\xf8\x32\x36\x50\x90\xab\x74\xf5\xa9\x60\x06\x78\x9e\xcd\xd6\x31\x3e\x03\x0a\xae\x37\x7c\x06\x26\x74\xdc\x6a\xf4\x87\xb6\x07\x0c\x59\x4c\x95\x2b\x25\x29\x27\xfb\x8f\xda\x88\x71\xc6\x06\x0c\x5c\xe6\x61\xba\x40\x73\x35\xe5\x0e\x5b\x1f\x39\x14\xdf\x41\xed\xc3\x16\x6b\x1f\xfe\xf7\x48\xa8\x26\xe4\x11\x51\x08\xf1\x3c\xcd\xda\x85\x10\x37\xc8\x26\x55\x56\x1b\xb6\x49\x2d\x99\xa0\x3a\x43\x96\xfc\xf8\x1d\x01\x36\x69\x97\x91\x44\x5f\x72\x9a\xa1\xfd\xe2\x8f\x1b\x61\x85\xda\x53\xfb\x10\xc6\x28\xc0\x18\xa1\xfe\x56\x47\xea\x6f\xa1\xf0\x0a\x0a\xaf\xa0\xf0\x4a\xbb\x0a\xaf\x64\x9f\x8f\x84\x5c\x7d\x62\x51\x14\x40\xbe\x40\x73\x8e\x02\xc8\x6d\xae\x68\x11\xf3\x5e\x3e\x6c\x67\x5b\x45\x37\x75\x76\x05\x91\x79\x6e\x47\xc0\x0a\x62\xb8\x22\xa9\xd5\x22\x77\xb5\x8a\x7d\xb4\xff\xf6\x26\x59\xd1\x37\xcb\x1d\x5e\x56\xec\xae\x6d\x68\x94\xdf\xc5\x7e\xfd\x9f\x93\x7b\x9c\x07\xaf\xc9\x6a\x41\x76\x1e\x1d\x74\x1e\xd5\xaa\xaa\x54\x95\x0b\xc5\xdc\x68\xdd\x79\xae\x56\xf4\x7f\xdf\x30\x8e\xea\x77\x99\xb9\x48\xf3\x94\xad\xd9\x58\x4d\xd2\xd1\x16\xde\xa0\x05\x2e\x48\xc0\x62\x26\x60\x31\xf3\x5c\x3f\x3d\xdb\xcf\x9e\xe9\x8f\x3e\x6d\xcd\x08\x6f\xee\xef\x9d\x9d\x55\x8d\x0f\x47\xef\x67\xae\x49\xe0\x7e\x46\xfd\xe5\xb4\x95\x49\x09\x9f\x17\x77\x98\x4f\x6c\x42\xc2\x22\x95\xf8\x95\xfa\x0a\x4e\x5c\xcd\xcf\xe0\xab\x29\x2d\x96\xc8\x6a\x4a\x79\x41\x48\x74\x1e\xd6\x3f\x0b\xe3\xdf\x4b\xe6\xc7\x62\xff\x71\xd8\x7f\xcb\x97\x79\x29\x42\x2f\x46\xd8\x0b\x91\xe8\x27\x2c\xcc\xf7\xfe\xc8\xac\xa2\xe6\xf8\x42\xaf\xa0\xf0\x6e\x57\x62\xf1\xcb\xfa\x9f\xe2\xb1\x29\xd7\x53\x70\x80\x2b\xd6\x64\xab\x9a\x2d\x46\x18\x95\x72\xfc\xc1\xb9\xc4\xa3\x54\xcc\x19\x73\xae\x5c\xca\x6b\x31\xe5\xba\xd1\xa7\x42\x02\x51\x91\x95\x4a\x49\x4e\xc5\xc4\x2f\x72\x4d\x95\x39\x96\x1c\xa4\x7a\x76\x80\xf5\xf3\xf1\x2d\xfc\xb6\x5c\xfa\xa0\xce\x52\x30\x7f\x2d\x5c\x45\x68\xe1\x1a\x9b\x31\x82\x67\x83\x46\xe6\x94\xec\x11\x3a\xc4\x0e\xc4\xd3\x96\x8b\xe5\x15\x4e\xc7\x8c\xdd\x7a\xbd\x6f\x26\xf4\x82\x47\xff\x7d\x98\xc6\x04\x89\x2d\x2b\xf9\xba\x74\xa1\xea\x6a\xb9\x5a\xbc\x26\xe7\x4a\x92\x53\x5e\xfa\x03\xc3\xec\x4d\xfd\xb4\x43\xbf\xc0\x9a\xe2\xee\xf7\x60\xad\x8b\xe2\xea\x69\xfd\xea\xe4\x6e\xfd\x84\x8b\x4a\xde\x4c\xd6\xe9\x3c\x18\x32\x5f\x7d\x1d\x9d\x15\x6f\xd6\x29\x3a\xc1\xdf\xac\xc3\x74\x90\x26\x7c\xdf\x2c\xfe\xe0\xe6\x2b\xe5\xbc\xad\x9b\x45\xab\x4f\x04\xbf\x52\xc7\xd8\x11\xe3\x85\x71\x74\xbf\xb1\xe6\x70\xdd\x4a\xdd\x7b\x16\xfd\xbd\x6d\x35\x43\x10\xf7\x66\xa7\xae\x51\xb8\xcf\x38\xa7\x23\x03\x91\x39\x4f\xb3\x34\x53\x33\xad\xb7\x34\x12\x98\xd0\x81\x4a\x5b\x44\xa5\xcf\x46\xc2\xb2\x06\x59\x41\x49\xf5\xe9\xca\xa2\xa4\x9d\xb7\x2c\x1c\x90\xb6\xd5\xb2\x24\xdf\x79\x47\x8d\x65\xb9\xcb\x22\xa0\x2e\x63\xb2\x57\xfc\xb9\x33\xb6\x04\xd4\x13\xd4\x13\xd4\x13\xd4\x13\xd4\x13\xd4\x13\xd4\xd3\x9b\x7a\xb6\x75\x59\x90\xf9\xee\x6d\x35\xcb\x82\xfd\x01\x58\xd3\xb5\x58\xd8\xc3\xf7\xd9\x6d\x5f\x2b\x00\x65\xf6\xda\x0a\x05\x28\x13\x28\x73\x83\x51\x66\xc7\x80\x53\x20\xc5\x6c\xeb\x0c\x90\x3d\x46\x47\xd8\xa1\xf8\x01\x0b\x5b\xde\xe3\xc4\x96\xce\x6b\x3b\x00\x2e\xbf\x9e\xa4\x73\x06\xb8\x94\xab\xd7\x15\x55\xff\x58\x6b\xf1\x65\xa3\xf1\xf2\x3f\x92\x64\xff\xbe\x9f\x98\xdd\x90\x35\xc5\x45\x3d\xb0\xa6\x21\x9c\x4f\x0e\x72\xa2\x69\x5d\xd3\xf6\xc8\x74\x01\x37\x25\x9a\x15\xef\xda\x69\x3a\xc9\xdf\xb5\x23\x74\x88\x0e\xf8\xbf\x6b\x76\xe7\x98\x6f\x9c\x19\xb7\x1d\xf4\xb2\x3d\x19\xfc\x2e\x4d\xb2\xa3\xe6\xbb\x54\x3b\x08\x41\x41\xe2\x01\xf0\x34\xfa\xbf\xb7\x79\x0e\xc8\x5e\x6f\xc8\x69\x8e\xc9\xb0\xc9\x37\x3b\x3c\x2c\x99\x39\x3a\x47\x67\x6b\xa6\xfd\xd6\xc6\x05\x33\x3e\x58\x67\x8b\xac\xf3\x83\x91\xd0\x8c\xc3\x05\x01\x3b\x67\x28\x63\xc3\xce\x5b\xcf\xd2\x54\x56\xab\x94\x7c\xdf\x1d\x9e\x96\x66\xa7\x09\x3d\xcb\x96\x75\x49\x18\xc0\xb3\xd3\xc6\x05\xec\x13\xec\x13\xec\x13\xec\x13\xec\x13\xec\xb3\x37\xd9\x67\x9b\x57\x08\x81\x68\x35\xf3\xef\x76\x78\x2e\x12\x06\x03\x10\xa8\xb9\x72\x18\x12\xf4\xb3\x83\x0b\x07\x80\xd0\x5e\x5b\xae\x00\x84\x02\x84\x6e\x30\x08\xed\x1c\x9c\x0a\x24\xa1\xed\x9d\x31\x7a\x26\xb5\x06\xfb\xc8\x30\x4d\x0b\xce\xaa\xae\x48\xb9\x94\x39\x12\xfc\xdb\xb1\x79\xab\x91\xa0\x34\x57\x5a\xd5\xaa\xba\x45\x2c\xc9\x2b\xba\x31\x2e\x17\x2c\xc6\xfa\xe7\x09\xf6\xe3\x5b\xe8\x4e\xbd\x91\x29\x67\x1b\x97\xd6\xd2\xd1\x8f\x36\x92\x95\x74\x5a\xb4\xbd\xa8\x94\xe4\x8c\x68\xbb\x2b\x12\x94\x26\xf9\xcf\x2d\xd6\x3e\xd6\x72\xba\xfe\x7e\x91\xa3\xb4\x85\x1c\xa5\x4d\x45\xf2\xf8\xbe\xa2\x66\xad\xfa\xba\x31\x71\x7e\xda\x48\x5c\x8a\xc4\xa5\x48\x5c\x8a\xc4\xa5\x48\x5c\x1a\xd4\x2d\x48\x5c\x8a\xc4\xa5\x48\x5c\x8a\xc4\xa5\x48\x5c\x8a\xc4\xa5\x48\x5c\x8a\xc4\xa5\x61\x27\x2e\x3d\x4d\x27\xd9\xf1\xf8\x31\x8b\x93\xdc\xe7\xa4\x2b\xf5\x9b\xb8\xf6\xcb\xcd\x90\x61\x14\x19\x46\x91\x61\x74\x13\x65\x18\xfd\xa1\x61\x3a\xd1\x0a\xb6\xb4\x78\xe5\xa7\x13\xec\xdf\xf8\xf1\xca\x9f\x6b\x92\x57\x76\x05\xa8\xdc\xd7\x00\xa8\xec\x7e\x42\xe9\xc7\x13\xc3\x47\x97\x26\xa1\xcc\x07\x03\xc8\x29\x76\xda\x89\x11\x9b\x87\x91\x14\xc8\x41\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x81\x26\x3b\x8c\x26\x8f\xd2\x61\x76\x30\x3e\x61\xa1\xc9\xbb\x7d\xd0\x24\x98\x24\x98\x24\x98\x24\x98\xa4\x83\x49\xfe\xd4\x30\x4d\x05\x33\x49\x67\xc1\x77\x2f\x30\xf9\x87\x09\xf6\x23\x5b\xe8\x5e\x2f\x30\x29\x62\x12\x36\x25\x9d\x4c\xf8\xd1\x49\xfe\x4c\x40\x94\xeb\x22\xca\x40\x11\x65\x31\x98\x61\xce\xb2\x99\x16\xb8\x25\x6a\xbf\x83\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x6e\x42\x4e\xf9\xb5\x21\xba\x5f\x1f\x3a\x5b\x20\x69\x66\xcb\x28\x57\xd7\x94\xd2\xea\x35\x59\x63\xbf\x3a\xc4\x3e\x32\x40\x5b\x73\x8a\x2a\x5f\x5a\x4b\x47\xff\xb9\x31\xf3\x95\xf3\xc5\xb5\x62\x7e\x55\x2a\xb9\xe8\xa3\xb5\xca\x5c\xb0\xda\x59\xe6\xed\x84\x82\x1e\x53\xc9\xfb\xf9\x05\xd3\x8a\x2a\x2f\xa7\x6b\x7f\x62\xbe\xa8\x55\xbb\x9f\x19\x76\x3e\xf0\x7a\x9f\x37\x33\xdc\xc1\x88\xdf\x98\xde\xd5\x74\x75\x31\x98\x1b\x8e\xb1\x51\x83\x1b\xc6\xe3\x06\x20\xac\x1d\x02\xc4\x58\x03\x10\x02\x10\x02\x10\x02\x10\x02\x10\x02\x10\x02\x10\x02\x10\x02\x10\x02\x10\x02\x10\x02\x10\x6e\x24\x20\x04\xe6\x03\xe6\x03\xe6\xeb\x61\xcc\xf7\x27\x43\xb4\xc7\x8d\xf9\xcc\xab\x5e\xbf\xaa\x54\x25\x8d\x7d\x6a\x88\x3d\xef\x60\x7c\x37\x1a\x63\x7c\xe6\x56\xee\x95\x7a\x23\x21\x01\xbe\x11\x07\xe0\x73\xb5\x3f\x5f\xd4\xaa\xb3\x8a\x3a\x55\x2a\x59\xa9\x94\xc3\xcb\xa1\xdc\x6b\xb4\xef\x91\x60\xda\x37\xc2\x92\xb5\xb4\xcf\x35\x1e\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x40\x7d\x1b\x82\xfa\x7e\xa2\x9f\xf6\x8b\xc8\x63\x29\xaf\x2f\x65\x8b\x4a\x59\x95\x0b\x45\x5e\xdb\xcb\x23\xf8\x98\x7d\xb3\x8f\xfd\x75\x1f\xed\xf5\x3c\xd9\x8a\x33\xbe\xbb\x20\x57\xdd\xb3\xb0\x78\xbe\xe4\xfe\x82\x5c\x9d\xf2\xba\xd4\x88\xe6\x9d\x5a\x98\x33\x37\x81\x21\x16\x3c\xab\x6b\x68\xb2\xae\xa1\xc9\x46\x1a\xca\x56\xe8\x31\xc1\xe7\x2e\xd2\x3c\xe7\x73\xb3\x34\x43\x99\x16\xf8\x9c\xe3\x39\xe7\x8b\x5a\x20\xa4\x63\xff\x32\x41\x43\xc6\x28\x55\x2a\x9a\x23\x3d\xa5\x52\xae\xaa\x8a\x3e\x75\xe9\x3b\x39\xfe\x9d\xb1\x2f\x0c\xb1\xdf\x1a\xa0\xad\xfa\x89\x97\xd6\xd2\xd1\x7f\xd1\x18\x9b\x9d\xb6\x5a\x5a\x34\x5a\x0a\x09\xd0\xa6\xf9\x05\x53\x95\x8a\xb6\x9c\xae\xff\x11\x50\xda\x10\x29\xed\x72\x30\xa5\x3d\xc0\xd2\x06\xa5\xd5\xdf\x0f\x33\x6c\xbb\x6e\x58\x00\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x01\x6b\x37\x04\xd6\xfe\xf2\x30\x1d\x72\xeb\x32\xad\x4a\xdf\x46\x12\x48\xfe\xef\x37\x8e\xc9\xe5\x7c\x45\x29\x96\xab\x56\x6a\xc8\x37\x0f\xb3\x2f\x39\x04\x9b\x1f\x6a\x24\x11\xe4\x59\xb3\x91\xae\x48\x03\x79\x9f\x43\xe8\x69\xb1\xc2\xbc\x75\x8f\x80\x86\x2d\x54\xd0\xbe\x10\x0c\x0c\x13\x6c\xb0\x56\xd6\x69\xf5\x39\x28\x21\x28\x21\x28\x21\x28\x21\x28\x21\x28\x21\x28\x21\x28\x21\x28\x21\x28\x21\x28\x21\x28\x21\x28\xe1\x46\x52\xc2\xc3\x74\x90\x4d\xc4\xc7\xad\xf4\x8e\x77\x39\xd3\x3b\x5a\x7b\xb7\xfa\xe4\x8e\xaf\xa3\xd7\xb2\x27\xe2\xaf\xb6\xae\x9b\x34\xee\x98\x5f\xae\x0f\x83\xbe\xc1\x8c\x69\x39\xa5\x22\x8f\xc4\xb4\xd5\xdc\x15\x7d\xd4\xf8\xca\x57\x96\xae\x09\x1a\x52\x51\x15\xde\x43\xf1\xed\x16\x8b\x40\xfa\x48\xf0\x4b\xf0\x4b\xf0\x4b\xce\x2f\x7f\x72\x98\x52\x42\xc6\xa8\x55\x15\x55\x2a\xc8\xb5\xf2\xd2\x9c\x56\xcc\xab\x45\x7d\x14\x4d\x70\xf9\x37\x09\xf6\xa6\x7e\xba\xdd\x38\xdf\x52\x98\xee\x51\x65\x29\x5f\xb3\x2f\x9e\x5e\x9a\x9b\xe1\x17\x27\x77\xeb\x47\x97\xc4\x25\x66\x9d\x18\xf3\x60\x78\xb2\x52\xce\xf5\x5e\x47\x67\x05\xd7\x3b\x45\x27\x38\xd7\x3b\x4c\x07\x69\xc2\x8f\xeb\xa5\xcc\xe7\x36\x9e\x23\x65\xdd\x56\x20\xcc\x7b\x22\x98\xd5\x1d\x63\x47\x0c\x56\xe7\xee\x5e\x53\xe6\x67\xfd\x56\x5d\x6d\x96\xab\x7b\xbd\x51\xe1\xcb\xd8\x40\x41\xae\x52\xf4\xf7\xb6\xd5\x0f\xc1\xfd\xaa\x5c\x29\x49\x39\xd9\x77\x14\xee\x33\x4e\xe8\xc8\x40\x64\xce\xd3\x2c\xcd\xb0\x81\x15\x25\x7f\x23\xca\xff\x37\x16\x49\xb6\x34\x12\x99\xf7\xf4\xd1\x7f\xe9\x63\xff\xb9\x2f\xfa\x9f\x2c\xbb\xfa\xed\xc8\xe3\xfa\x04\x65\xcc\xd9\x23\x5c\x4d\x9b\xd3\x77\xe6\xc2\x10\xba\xf6\x89\x4e\x76\xb8\x22\xc7\xcc\x04\xa9\xf9\x54\x6c\xaa\x1c\x2b\x96\x05\xa1\x51\xd4\xd8\x6a\xd9\x82\x3c\xf9\x58\x5e\xbd\xb1\xb8\x5a\x8e\xe5\x8b\xaa\xac\x7f\xfd\xb2\x45\x0e\xf4\x05\x00\x5f\x61\x19\xe8\xc4\xdc\x28\x19\x9b\xd5\xd8\xe5\x55\x95\x2f\xc6\x2b\xaa\x92\x93\x35\xbe\x14\x32\x8c\x8d\x31\x93\xa5\x62\xcb\xfc\x17\xf9\x26\x87\xaf\x5d\x26\x63\xa3\xb1\xa9\x52\x69\x92\x2f\x74\xf2\xea\x8d\x98\xba\x5a\xd6\x37\xd1\xba\xb9\x30\x57\x77\x46\x73\x72\x3e\xfe\x32\x71\x6b\x0e\xdb\x98\x79\x57\x1f\xfd\xc7\x3e\xf6\xf6\xbe\xe8\xbf\xb3\x3a\xe8\xcf\x22\x7c\x6d\xfc\xb0\x54\x96\x0a\xb2\x2a\x76\x79\x62\xe1\xa0\x69\x4a\xae\xc8\x17\x2e\xd6\x7e\x4a\xe2\x1b\x52\x45\x8d\xe9\xeb\xda\xea\x0d\x6b\x25\x77\x4d\x7a\x4a\xbf\xff\xea\x15\x59\x93\x4d\x03\xa6\x1b\x55\x93\x74\x71\xd8\xb3\x22\xc7\xb8\xe9\xe4\x7b\x27\x45\x8d\xa5\x27\x8e\xea\xe7\xaa\x52\x8e\x23\xb9\x92\x52\x2e\x08\x73\xc5\xf7\x37\xfa\x52\x54\x2a\x96\xc5\x2a\x82\xef\x1f\xec\x73\x39\xa6\x30\x80\xa1\xbe\xeb\x35\xa7\xa0\x82\x52\x92\xca\x85\x94\xa2\x16\xc6\x2a\x4f\x15\xc6\x56\xcb\xc5\x9c\x92\x97\xc7\xf6\xcd\x69\x0b\x7a\x2b\xa9\xf8\x0e\xe7\xb3\x3a\x27\x8d\x67\x23\x61\x59\x83\xac\xde\x4c\x9a\x4d\xd3\x14\x6d\x9d\x16\x2b\xf6\x5b\xce\xb2\x54\x56\xab\x94\xfc\x57\x77\xd4\x5b\x16\x26\x76\x23\x31\xc9\x61\x4c\xf6\x8a\xbf\x75\xc6\x96\x3c\x4a\x8b\xb4\xe0\xb4\x25\xc9\x0c\x9d\x69\xc1\x57\x33\xc3\xef\xf9\x11\x4e\xbc\x35\xd8\x95\x20\xbb\xf2\xb5\x3e\xfa\x6a\x1f\xfb\x72\x5f\xf4\x8b\x56\x07\x7d\xb8\xef\x51\xe7\x3a\xaa\xa8\x2f\xe7\xf9\x5a\x28\xb6\x22\x5f\x16\xde\x1e\x6b\x6b\x65\xbb\x14\x8c\xdd\x2c\xbf\x45\x87\xdd\x28\x2b\xe5\xd1\xb2\x5c\x90\x78\x47\x18\x4b\x29\xa7\x7d\x11\x90\xc5\x1a\x02\xe3\x2d\x2c\x5e\xbb\x26\xe7\x75\x0b\x56\xba\x61\xbb\x61\x6c\xf4\x5e\x2c\x8d\x18\x2b\x2c\xbe\xc6\x8c\x15\x54\x7d\x4e\xac\xc8\x6a\x51\xc9\x5b\x2b\x45\x7b\x7a\xe4\x1e\x2e\xb3\x37\x56\x35\xfd\x26\x9d\xab\x53\x49\xbf\xd2\x7c\x20\xe3\x47\x2e\x0b\xdc\x60\x36\x91\x12\xf7\x79\x4d\x96\xca\x9e\xf7\x18\x67\xfc\x16\x16\xf8\x1d\x78\xac\x1c\x33\x9f\xe9\xa3\x97\xfa\xd8\x8b\x7d\xd1\x17\xac\x6e\x7e\x6f\xdf\x8c\xc3\xc3\x5b\x29\xc9\x92\x26\x5b\x6c\x68\x41\x55\x2a\x52\x81\x0f\xc0\x82\x52\x2a\xe6\x6e\xb8\xdc\x43\xe6\xb3\xd8\x2e\x62\x7d\x94\xd2\xa9\x23\xa9\xd8\x92\x18\x10\xd1\x3b\x15\x7d\xe3\xa0\x6f\xbe\x4c\xcc\x28\xc7\x14\xb5\x72\x45\x2a\x9b\xee\x2a\x75\x55\x1e\xbb\x2c\x95\x4c\x2c\x1f\x17\x47\xe3\xb1\xcb\xc5\xb2\x54\x2a\x7e\xaf\xc9\xf7\x56\x64\x7d\x65\xca\xb7\x2a\x63\x62\x45\x9a\xb7\x99\xaf\x68\x7c\x48\xb3\x2f\x12\x20\x3c\x15\x3b\x5b\xe4\xaf\xb3\xe3\xc6\x15\xb5\xfe\xc9\xec\x6d\x5f\x55\x70\x78\xfe\xb1\x29\xd5\x2b\xa9\xf8\x1d\xe2\x7e\x66\xcc\x07\x71\xf9\xbd\x32\x6f\x1b\xa0\xb7\x0c\xb0\x1f\x18\x88\x7e\xd7\x72\x7e\xfe\x5e\xff\xe3\x06\x28\xd3\x3f\x29\x7d\xdf\x55\x90\xd4\x15\xa9\x20\xc7\x72\x4a\xa9\x24\x73\x33\x68\x7f\x17\xb2\x7a\x59\x51\xaf\xe9\x7d\xe1\x79\xa7\x8f\xd4\xfc\xb8\xff\x8d\xf2\xf7\xd9\x7c\x19\x2b\xe2\xa9\x8a\xfa\x8b\x92\x2b\xe6\x6d\xe2\xcd\xe1\x29\xf7\x3f\x58\xbd\xab\xef\xc4\x0c\xb4\x68\xe2\xd1\x94\xa3\x1b\x4d\xb0\x66\xed\xb5\x2c\x97\xa7\xfb\xc7\x52\xb1\xa9\x9c\xbe\x2d\xe3\x93\xad\xd3\x38\x0c\x89\x67\x18\x8a\x8d\x1a\x03\xef\x7e\x31\xb4\xe3\xb1\xa1\x8c\x94\x7b\x4a\x9f\x67\xca\x79\xfd\x2c\xee\x6b\xe4\x27\xd5\x74\x9c\xa0\xd9\xc6\xab\xef\x6e\xc4\x7c\x82\x15\xab\xa5\xe3\xb1\xa1\x59\x45\x95\x1d\xcd\xc6\x72\x92\x96\x93\xf2\xfa\xd3\x1b\xfd\x23\xbc\xcb\xbc\x3d\x4d\x58\xaf\xba\x06\x2f\x5b\x6d\xa4\xe2\x3b\x2b\xb5\xef\x8d\x73\x11\xf0\x81\xd0\x16\x01\x17\xf4\x66\x26\xd8\x0c\x65\x68\x9b\xe8\xd4\x36\xae\x02\x62\xde\xf3\xf4\x76\xb6\x55\x74\x0c\xb5\x77\x9d\x90\xf9\xee\x6d\xf5\x0b\x81\x44\x45\x52\xab\x45\xee\xad\x14\x1b\x51\xdf\xbd\xc6\x9e\x8a\xbe\xd9\x6c\xfb\xea\x60\x77\x6d\x43\xa3\xfc\x77\xf7\xf3\xa0\xc2\x3d\xce\x83\xd7\x64\xb5\x20\x3b\x8f\x0e\x3a\x8f\xf2\xb0\x47\xb9\x50\xcc\x8d\xd6\x9d\xe7\x6a\x45\xff\xf7\x0d\xe3\xa8\x7e\x97\x99\x8b\x34\x4f\xd9\x9a\x5d\xce\x24\x1d\x6d\x61\x6d\xb2\xc0\x9d\xf8\x58\x93\x04\xac\x49\x9e\xeb\xa7\x67\xfb\xd9\x33\xfd\xd1\xa7\x2d\xc3\xfe\xe6\xfe\xde\xd9\xeb\xd4\xf8\x3d\xf4\x7e\xe6\x7e\x7c\xee\x9b\xd3\x5f\x4e\x5b\xcd\x93\xf0\x79\x71\x87\xf9\xfc\x24\x64\x1f\x52\x89\x5f\xa9\x2f\xc4\xc4\xd5\xfc\x0c\xbe\x28\xd2\x62\x89\xac\xa6\x94\x17\x84\xac\xe5\x61\xfd\xb3\x30\xfe\xbd\x64\x7e\x2c\xf6\x1f\x87\xfd\x37\x61\x99\x97\x22\xf4\x62\x84\xbd\x10\x89\x7e\xc2\x02\x63\xef\x8f\xcc\x2a\x6a\x8e\xaf\xd7\x0a\x0a\xef\x76\x25\x16\xbf\xac\xff\x29\x1e\x9b\x72\x3d\x05\x47\x9e\x62\x69\xb5\xaa\xd9\x0e\xfc\x51\x29\xc7\x1f\x9c\xcb\x22\x4a\xc5\x9c\x31\x75\xca\xa5\xbc\x16\x53\xae\x1b\x7d\x2a\x64\x03\x15\x59\xa9\x94\xe4\x54\x4c\xfc\x22\xd7\x21\x99\x63\xc9\xd1\xa3\x67\x07\x58\x3f\x1f\xdf\xc2\x6f\xcb\xa5\xa9\xe9\x18\x62\xf2\xd7\x8b\x55\x84\x5e\xac\xad\x33\xc0\xfa\x8e\x0b\xeb\xc2\xf6\x57\xa5\x62\xef\x1c\xa6\xe3\xeb\x62\x4a\xa1\xbd\x14\x05\x6f\xa4\x6a\x55\xca\x5d\xb9\x26\x3b\xc4\x96\xbf\x93\x60\xff\x7a\x4b\xfd\x6c\xf6\x0b\x8d\x88\x2e\x45\x5d\x94\x29\xab\xd1\xae\xd0\x5e\xee\xbb\x5e\x3f\xbb\xd6\xde\x28\x04\x98\x2d\x08\x30\x73\xc1\x1f\xd4\x19\x76\x4a\x7c\x3a\xb5\x1d\x5e\xff\x05\x79\x7f\x78\x10\x66\x42\x98\x09\x61\x26\x84\x99\x10\x66\x42\x98\x09\x61\x26\x84\x99\x10\x66\x42\x98\x09\x61\x26\x84\x99\x10\x66\x76\x58\x98\x79\x92\x8e\xb3\x63\xf1\x23\x16\xdf\xd8\xe3\xe4\x1b\xb5\xbb\x3b\x14\xdf\x86\x7a\x12\xea\x49\xa8\x27\x1d\xea\xc9\xff\xf4\x24\x25\x04\x96\x2c\x2b\xf9\x3a\x26\xa9\xae\x96\xf5\xe7\xcd\x95\x24\x4d\x93\x35\xf6\x27\x4f\xb0\x1f\x19\xa4\x1d\xfa\x99\x16\x80\x4c\x8a\xa5\xa5\x6a\x4c\x8a\xa6\xd5\x34\xd1\xe3\xa2\x68\x61\x5a\x6f\x21\xb9\x5b\x3f\xf7\xa2\x92\x37\x71\x9f\xf3\x60\x97\xa3\xbe\x0c\x80\x17\x80\x57\x06\xc0\x0b\xc0\x0b\xc0\x0b\xc0\xab\x67\x80\x57\xa6\x6b\x80\x57\xe8\x77\xd2\x32\xf0\xca\x00\x78\x01\x78\x01\x78\x01\x78\x01\x78\x75\x1e\x78\x65\x7a\x1a\x30\x65\x00\x98\xda\x07\x98\x32\xdd\x0e\x98\x32\x9b\x10\x30\x65\xf3\x34\x27\x24\x52\x19\x3a\xc3\x25\x52\x93\x74\x94\x0e\xfb\x0a\x0d\x39\x85\x32\x55\x86\x4e\x3e\xd4\x48\x49\x93\xab\xf7\x79\x6b\xa3\xb6\xb2\x2d\xa2\x98\x45\x53\x5a\x43\x07\x10\x33\x6b\x0f\x3b\xee\xa7\x5e\x29\x15\xff\xe1\xed\x35\x6c\xec\x2e\x31\x1b\xc5\x24\x37\x06\xdb\x2b\xfe\xdc\x6e\x10\xd6\x5a\x08\xab\xef\x00\x40\xd6\x8d\x10\xd6\x16\x43\x58\xff\x21\xd2\x4a\xd8\x89\xef\x9b\xe8\x6b\x06\x9a\xd3\x34\xfb\xb7\xdf\x7c\xa0\xac\x6f\x5b\x2d\xc5\xc3\x36\x6b\x79\xd6\xb1\x7b\x15\x45\xab\x52\xf2\xcb\x23\x35\x96\xe9\x01\x23\xd0\xc9\x11\x35\xa6\x5c\x76\x5b\xa9\x21\x71\x8a\xc3\x4a\x4d\x5b\x27\xb7\xd1\x5e\xb5\x27\x4c\x16\xe0\x16\xe0\x16\xe0\x16\xe0\xb6\x77\xc0\x2d\x56\x6b\x01\xab\xb5\xee\x21\xdb\x48\x51\xd0\x91\x14\x05\x70\x20\xc0\x81\x00\x07\x02\x1c\x08\x70\x20\xf4\xb4\x03\x01\x99\x6a\x90\xa9\x06\x99\x6a\xda\x95\xa9\x06\xfe\x39\xf8\xe7\x7a\xd5\x3f\x97\x2d\xf8\x93\xe9\x79\x41\xa6\xcf\xd2\x34\x27\xd3\x27\xe9\x38\x1d\x6b\x81\x6a\x2e\x55\xa5\xea\xaa\x05\x95\x93\xde\xd8\x77\x17\xdb\x29\xbe\x66\xdb\xd2\xb6\xd9\xf5\xd5\x86\x74\x19\x49\x5a\x12\xba\x74\x75\x45\xca\xa5\xcc\x9e\xe4\x43\x67\xab\xd4\xd7\x2f\x58\xa6\x2a\x25\x79\x45\xdf\xd7\x96\x0b\x56\x1a\x8d\x5f\x1a\x66\xef\xdc\x42\x77\xea\x8d\x4e\x39\xdb\xbc\xb4\x96\x8e\xfe\x5c\x23\xb9\x34\x16\x95\x92\x9c\x11\x8d\x76\x45\x1a\x8d\x51\xfe\x73\x8b\xb5\xcf\xe3\xac\x67\xe6\xb8\xe5\x2e\x57\xd9\x77\x67\x42\x8d\x7c\xf0\xa7\x33\xc5\x4e\x1b\x9f\x8e\xef\xeb\x6a\x7e\x48\xf6\x60\xa0\xd4\x19\x02\x0c\x90\x51\x03\x7e\x2a\xf8\xa9\xe0\xa7\xea\x55\x3f\x15\x32\x6a\x20\xa3\x06\xfc\x03\xf0\x0f\xc0\x3f\x00\xff\x40\x57\xf8\x07\xb2\x47\xe9\x30\x3b\x18\x9f\xb0\x32\x6a\xdc\xed\xcc\xa8\xe1\xd8\xbd\xa1\xd8\xd9\x66\xa7\xb5\x48\xd7\x81\x74\x1d\x9b\x2b\x9a\x82\x7d\x65\x88\xa2\xfa\xd0\xd9\xec\xb3\xa2\xe4\xab\xf2\xb5\x0a\xdf\xe7\xb0\x5f\x1b\x62\x3f\x3f\x40\x5b\x73\x8a\x2a\x5f\x5a\x4b\x47\xaf\x1b\xd3\x6a\x39\x5f\x5c\x2b\xe6\x57\xa5\x92\x0b\x6d\x5a\x4b\xd8\x05\x25\xff\xa8\xd1\x44\x28\x48\x33\x95\x4c\xf2\x0b\xa6\x15\x55\x5e\x4e\x3b\x5a\x9f\x2f\x6a\xd5\x59\x45\x9d\x2a\x95\x2c\x38\xd9\xed\x79\x3f\x36\x84\x48\x3e\x1c\x0c\x1c\x93\x2c\x21\x78\xa2\xa3\x7b\x5d\x3c\xd1\xc0\x91\xf1\x38\x5d\xdd\xe7\x0d\x38\x77\x30\xe2\xcf\xc9\x43\x63\x80\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x81\x1f\x3b\x8c\x1f\x81\xf8\x80\xf8\x80\xf8\x7a\x18\xf1\xfd\x40\x3f\xc5\x84\xf2\x51\xd2\x37\x5d\x85\x22\xaf\xeb\xe8\x96\x3d\xb2\x2f\xf5\xb1\xff\xd5\x47\xac\xe6\x8c\x4b\x6b\xe9\xe8\xdd\x05\xb9\xea\x9e\x6f\xc5\x93\x24\x63\x05\xb9\x3a\xe5\x3e\x7f\x39\x3d\xb5\x30\x67\xee\xf1\x42\x0c\xe4\xaf\x6b\x68\xb2\xae\xa1\xc9\x46\x1a\xca\x56\xe8\x31\x01\xdf\x2e\xd2\x3c\x87\x6f\xb3\x34\x43\x99\x16\xe0\x9b\xe3\x39\x1b\x49\x24\xc3\x7e\xb0\x9f\xf6\x19\x83\xe0\xad\x3c\x15\xf9\x91\xd9\x1f\xf7\xb1\xaf\xf4\xd1\x5d\x52\x8d\x94\x54\x24\x59\xf0\x1d\x8a\x7d\xfa\x50\xb8\xd5\x9a\xfc\x8a\xae\x1f\x0d\x5f\x79\x73\x7b\x86\xc9\x1c\x8d\x7f\x4c\xd2\x79\x23\x49\xb5\x5c\xbd\xae\xa8\x4f\xe9\x1b\x18\xfb\x6b\xf0\x91\xff\x1a\xe7\xf2\x20\x87\xa2\x6c\x29\x80\x9f\x4b\xb2\x77\xf4\xd3\xcb\xed\x96\xf4\xaf\x26\xa6\xca\x52\xbe\x06\xd4\x5c\x14\x67\x88\x10\x87\xe4\x90\x7e\xc6\x45\xeb\x22\xa7\xbe\xd6\x75\x62\xb8\xe9\x30\xb2\x2b\x74\x4e\x74\xed\x19\x3a\xc5\xbb\xf6\x28\x1d\xa6\x83\xfe\x79\x48\xec\xfe\x59\x4b\xa7\x5c\x37\x16\x48\x9d\xfd\x8b\xea\x17\xe4\x2a\x5d\x7d\x6d\x30\x94\x3e\xce\x8e\x09\x28\xed\xfe\x61\x0f\x2c\x5d\x37\x8e\x14\xfd\x8b\x6d\xb5\x83\xb2\x4f\x95\x2b\x25\x29\x57\x5b\xa0\xd7\x3d\x2e\x49\xe3\xa4\x8e\x0f\x4d\x26\x4b\xe7\x69\xb6\x26\xb3\x52\x8b\x63\x83\x68\x7d\xe4\x56\x6a\x31\xb7\xd2\x87\x22\xe1\x59\x88\x79\x91\xf7\xe8\x2c\x4d\xdb\x79\x8f\x36\xc2\xde\x54\x56\x1b\xb6\x37\xbe\xf6\x24\xc8\x10\x25\x7f\xf2\x8e\x5a\x7b\xf3\x0a\x23\xa6\x4e\xaa\x31\x31\xc3\x46\x26\xa4\x8e\x5b\x98\xf6\xe4\x42\x82\xad\x09\xb0\x35\xc8\xc7\xd1\x91\x7c\x1c\x08\xc4\x46\x20\x36\x02\xb1\xdb\x15\x88\x9d\x7d\x3e\x12\x6e\xd0\x29\x2d\x8a\x14\x8e\x17\x68\xce\x91\xc2\xf1\x26\xdb\x0c\x5a\x23\xc4\xbc\xd7\x08\xdb\xd9\x56\xd1\x4d\x6d\xdf\x96\x64\x7e\x68\x47\xed\x32\x61\xa4\x22\xa9\xd5\x22\x77\xc2\x0b\xf2\xb3\xee\xfe\x24\xc1\xcb\xdc\x77\x74\xed\xb0\xbb\xb6\xa1\x51\x7e\x0f\xfb\xf9\xde\x7f\x8f\xf3\xe0\x35\x59\x2d\xc8\xce\xa3\x83\xce\xa3\x1c\x13\xc9\x85\x62\x6e\xb4\xee\x3c\x57\x2b\xfa\xbf\x6f\x18\x47\xf5\xbb\xcc\x5c\xa4\x79\xca\xd6\xec\x8c\x26\xe9\x68\x0b\xaf\xc9\x02\xd7\xaa\x60\xc5\x12\xb0\x62\x79\xae\x9f\x9e\xed\x67\xcf\xf4\x47\x9f\xb6\xcc\xfe\x9b\xfb\x7b\x67\x77\x54\xe3\xde\xd3\xfb\x99\xcb\x55\xb8\x0b\x5a\x7f\x39\x6d\xd1\x5a\xc2\xe7\xc5\x1d\xe6\xb3\x97\x50\x37\x49\x25\x7e\xa5\xbe\x4c\x13\x57\xf3\x33\xf8\x92\x49\x8b\x25\xb2\x9a\x52\x5e\x10\xea\xad\x87\xf5\xcf\xc2\xf8\xf7\x92\xf9\xb1\xd8\x7f\x1c\xf6\xdf\xb6\x65\x5e\x8a\xd0\x8b\x11\xf6\x42\x24\xfa\x09\x8b\x4c\xbf\x3f\x32\xab\xa8\x39\xbe\x9a\x2b\x28\xbc\xdb\x95\x58\xfc\xb2\xfe\xa7\x78\x6c\xca\xf5\x14\xdc\xe7\x20\x16\x5e\xab\x9a\xad\x53\x19\x95\x72\xfc\xc1\xb9\xfa\xa7\x54\xcc\x19\x13\xab\x5c\xca\x6b\x31\xe5\xba\xd1\xa7\x42\x1d\x53\x91\x95\x4a\x49\x4e\xc5\xc4\x2f\x72\xb9\x9d\x39\x96\x9c\xfd\x7b\x76\x80\xf5\xf3\xf1\x2d\xfc\xb6\x5c\xd2\xb1\x0e\x82\xaa\x36\xef\x0c\xd7\x09\x08\xe7\xfd\x40\xd9\x49\x3a\xca\x0e\xc7\x0f\x5a\x42\xff\x7b\x9d\x21\x02\xae\x86\x6f\x85\x20\x01\xf6\xaf\x86\x69\xd6\x1f\xbf\x0a\x12\xbe\x6e\x22\x86\x62\xb9\xa0\xca\xbc\x82\xe0\xf3\x09\xf6\xdb\x03\xc4\x5c\x13\xaa\x80\xe5\x5a\x63\x5a\xe5\x39\xd1\x54\x48\x3a\xe5\xfd\xd7\x6b\x27\x67\x7e\x33\xf6\x0c\x6d\xfc\xdc\x7c\x51\xab\x42\xa8\x5c\xff\x25\x36\xa2\x2c\xbe\xfa\x64\xf0\xe7\x3a\xc9\x8e\x06\x7c\xae\xc6\x40\x78\xa4\x1e\x81\x70\x19\xc2\x65\x08\x97\x21\x5c\x86\x70\x19\xc2\x65\x08\x97\x21\x5c\x86\x70\x19\xc2\x65\x08\x97\x21\x5c\x86\x70\xb9\xb3\xc2\xe5\xcd\x07\x36\x20\x8d\x86\x34\x1a\xd2\xe8\x4d\x24\x8d\x7e\xdb\x93\x94\x30\xb3\x1f\xf8\xa0\x46\xb9\x9c\xaf\x28\xc5\x72\x55\x63\x5f\x7c\x82\x7d\xe7\x21\x3b\x17\xc2\x90\x58\x78\xaa\xc6\x94\x69\xda\x54\x33\xad\xeb\x59\xf3\xba\xe4\x5e\xfd\x44\x91\xba\xc0\xe6\x80\xd6\xe1\x2e\xa7\x80\x19\xd0\x30\xd0\x30\x54\xbb\x03\x0d\x03\x0d\x03\x0d\xeb\x1d\x1a\xd6\x45\xc5\xdc\xba\x86\x86\xa1\xca\x18\x68\x18\x68\x18\x68\x18\x68\xd8\x46\x54\x19\xeb\x69\xb4\x84\x32\x48\xbd\x5c\x06\x29\xb3\x09\xd1\x52\xf6\x09\x9a\x12\xea\xa9\x49\x3a\xca\xd5\x53\x13\x34\x4e\x29\x5f\xf9\x62\x4e\x51\xe5\xd4\x5a\x3a\x65\x71\xa1\x46\xa2\xca\xd7\x29\xd3\x2f\x94\x52\x17\x82\x95\x52\x09\x36\x68\x67\x76\x14\xd2\x28\xeb\x1e\x5c\x2a\xc6\xf8\x97\xb7\xd9\xf0\xeb\x0e\x31\xdf\x38\x28\xd7\xfd\xe2\x2f\xed\xe7\x5c\x02\x4b\x9d\xa1\x53\x74\xa2\x46\x11\x3e\x42\xc9\xc6\xfb\x17\x1a\x70\x44\xc8\xb6\x18\x21\xfb\xd5\x08\x4d\x8b\xd0\x95\x13\x34\xe9\x08\x5d\x49\x51\x53\x6f\xa0\x7f\xf2\x83\x93\xc2\x72\x1c\xa6\x83\xdc\x72\x34\xdb\x6e\x46\x44\xdd\x1e\xa7\x63\x76\xd4\x6d\x93\x6d\x04\x1b\x98\x8a\x12\xb6\x81\x49\x7e\x6a\xc4\x36\x30\xf7\x19\x51\x50\x8e\x90\x32\xe5\xb2\xc3\xdc\x18\x21\xb4\xc2\xdc\x4c\x5b\x27\xb5\xdf\xf0\xb4\x27\x84\x16\x80\x15\x80\x15\x80\x15\x80\xb5\x77\x00\x2b\x96\x5f\x01\xcb\xaf\xee\x21\xd0\x48\x5f\xd0\x91\xf4\x05\x00\xfd\x00\xfd\x00\xfd\x00\xfd\x00\xfd\x3d\x0d\xfa\x91\xc5\x06\x59\x6c\x90\xc5\xa6\x5d\x59\x6c\xe0\x47\x83\x1f\xad\x57\xfd\x68\xd9\x42\xc8\x09\x9c\x82\xdc\x53\x49\x6f\x7a\xbc\x8b\xed\x14\x5f\xb3\x6d\x69\x1b\x25\xc9\xde\x05\xc9\x6a\xa9\xf2\x26\x0c\x1d\x61\x1f\x4f\xd0\x88\xbb\x10\x9f\x8f\x20\x5d\x93\x73\xaa\x5c\xd5\xd8\xbf\x4f\xb0\xf7\x3a\x4a\xf3\xa9\x8d\xa5\xbb\x58\xe2\x57\x87\x94\xed\xe2\x7e\x47\x55\x3e\x9b\xbc\x8b\x9f\x40\x86\x8b\x9b\xc8\x70\x71\x3e\xf8\x63\x78\x88\xed\xab\xfd\x00\x44\xc7\xbb\x7c\x2a\x48\x66\x01\xf9\x3e\x92\x59\xc0\xbb\x04\xef\x12\xbc\x4b\x3d\xe4\x5d\x42\x32\x0b\x24\xb3\x00\xd5\x07\xd5\x07\xd5\x07\xd5\xef\x0a\xaa\xbf\x09\x89\x04\x92\x59\x80\x94\x22\x99\xc5\xe6\x89\x38\x60\x7f\x7a\x2f\x9d\x34\xeb\xfc\x55\xb4\x75\x52\x5a\xe4\xe5\x4a\x49\xb9\xa1\x4f\xee\x66\x0d\xb3\x31\x2d\x27\x95\x64\xf6\xd3\xf7\xb2\x7f\xe8\xa3\xad\xfa\xe5\x97\xd6\xd2\xd1\x41\x5e\xc3\x8c\x1f\x32\xfb\xd8\x9e\xd2\x67\xac\x46\x92\xfb\xf4\xf3\xa6\x2a\x15\xcd\x49\x01\xed\xe3\x4b\x7a\x03\x21\x57\x31\x7b\x8d\xbf\x78\xfa\x8c\x40\x7a\xc7\xe8\x08\x47\x7a\x69\x1a\xa3\x51\x5f\xe1\xb3\xb4\x5a\x55\xf4\x07\x34\xd2\x46\xf3\x5b\x35\x49\xdd\x42\x30\x84\x1b\x65\xfb\x0d\x08\xe7\x68\xc8\xa4\x71\x7a\x5b\xee\x3c\xd0\xeb\x17\x44\x8b\x7e\x72\x9b\xdd\xf7\xc3\x66\xa9\xb2\xe0\xee\x1f\x34\x4e\xed\xe8\x08\x64\xa6\x69\x8a\x4e\xd7\x04\x60\x34\xdb\xd3\x10\x01\x22\x06\xa3\xc5\x18\x8c\xf7\x45\xe8\xac\x08\x73\x38\x45\x27\xec\x30\x87\xe6\xbf\xf6\x0e\x18\x92\x80\xba\x64\xa1\xdb\x99\xcc\xb7\x6f\xb3\x0d\xc9\x44\x5d\x71\x91\x60\x8b\xf2\x20\x4f\x4b\xdf\x31\x7b\x82\xf2\x22\xbd\x66\xd6\x50\x5e\x04\xe5\x45\x36\xb8\xbc\xc8\x6b\x6e\xde\xbe\x07\xfa\x7a\xc3\x5f\x41\x06\x56\x12\x99\xa0\x71\x96\x8a\x8f\x58\x9c\x61\xa7\xb3\x92\x08\x6f\xf0\x96\xa8\x20\xf2\x97\x83\x74\xb7\xd8\xeb\xac\x70\xad\xc4\x5a\x7a\x2c\xa7\x2a\xe5\xab\xca\x8a\xc6\x3e\x3b\xc8\x3e\xd9\x4f\xdb\xf8\x01\x7d\x06\x7c\x68\xfd\x44\x7d\xd3\xaa\x52\xce\x2a\x2b\xc9\xb8\x7e\x5a\x46\x3f\x65\x39\x6d\xfc\x6d\x56\x51\xa7\x4a\x25\x6b\x06\xec\xf6\x5c\x7d\xcd\xbe\xd3\xbc\x87\xf4\xb7\xd9\x78\xdc\x86\x42\xce\xe7\x83\xdf\xe9\x61\x36\x64\xbc\xd3\xfc\x17\x8c\xb7\xd9\xf8\x11\xf7\xfb\x1c\x10\xc0\x0e\xf9\x02\xe4\x0b\x90\x2f\x40\xbe\x00\xf9\x02\xe4\x0b\x90\x2f\x40\xbe\x00\xf9\x02\xe4\x0b\x90\x2f\x40\xbe\x00\xf9\x42\x87\xe5\x0b\x10\x17\x40\x5c\x00\x71\x41\x0f\x8b\x0b\xfe\xf5\x6e\x9a\x0e\xa8\x94\xa1\xca\x16\xb3\xd2\x8d\xb5\xaa\xe8\xa6\xd2\x56\x18\xf0\x90\x31\xf6\x6b\x51\xf6\x5f\xfb\xed\xa8\xa5\x09\x21\x31\xe0\xc7\xea\x5d\x52\x8b\x76\x8b\xd3\x56\x8b\xc9\x31\xfd\x9a\xda\xa0\x23\xcf\x53\x45\x98\x5a\xc8\xd2\x83\x3c\xcd\x09\xc8\x96\xa1\x33\x1c\xb2\x4d\xd2\x51\x3a\x1c\x98\x5a\xcd\xf3\x06\x03\x69\xdb\xe3\xc1\xb4\xed\x20\x9b\xa8\x0d\x04\xf2\xfe\xad\x66\xa4\x08\x6f\xda\x6e\x8f\xd1\x21\x4b\x8a\xd0\xd4\x30\x4d\x18\x97\x6d\xd8\x48\x65\xe6\x29\x4b\xe7\x6b\xdc\x7a\x2d\x0f\x15\x9c\x7a\xd0\x2a\xb4\xa8\x55\xf8\x48\x24\x4c\x93\x71\x51\xc8\x1e\xce\xd1\x59\x5b\xf6\x70\x8b\x99\xa0\xca\x6a\x95\x32\xef\xdd\x61\x9b\xa0\x53\xf5\x22\x86\xa6\x6c\xd1\x78\xc5\x2b\x50\xb5\x23\x96\x08\xe2\x86\x5e\xb3\x83\x10\x37\x40\xdc\xb0\xc1\xe2\x86\x8e\xae\x51\x83\x34\x09\x0d\xce\x20\xeb\xa5\x76\xf0\xbe\xb1\x6c\x86\xce\xb0\x53\xf1\x13\x96\x70\xe1\x01\xa7\xd8\xc1\xf3\x9a\x5b\x42\xfc\xf0\xf4\x93\x34\xde\xac\xd0\x9b\xfd\xcd\x13\xec\x4d\x83\xb6\x2c\x30\xb1\xbe\x28\xc2\x21\x06\xbc\x4f\x3f\xd3\x5f\x0b\xd8\xe5\x9a\x08\xd4\x2f\x84\x82\x00\xf5\x0b\xa1\x20\x80\x82\x00\x0a\x82\x1e\x52\x10\x74\x51\xf6\xe8\xae\x51\x10\x20\xad\x31\x14\x04\x50\x10\x40\x41\x00\x05\x01\xea\x17\x22\xef\xea\xad\xe3\xf0\xef\xfa\xbc\xab\x9b\xb2\x7e\xe1\x93\xfe\x81\xb1\x19\xc1\x16\x8f\xd3\x31\xce\x16\x0f\x50\x9a\xc6\xfc\x03\xa7\x2a\x15\x4d\xd4\xbb\x32\x89\xd1\x7c\x51\xab\x36\x5a\xc0\xf0\x62\x30\x3a\xdc\xcf\x86\xcd\x08\xaa\x4a\x45\x33\x80\xa1\xfd\x6b\xee\x1a\x86\x7f\xea\x08\xb1\xdf\x65\xd4\x30\x94\x9c\xb4\xcb\x98\x88\x3a\xc0\xbb\x04\x9e\xe2\x19\x47\x6b\xbc\x33\xa3\xb4\xbf\x89\xee\x84\x43\x06\x8e\xe9\x16\x1d\xd3\x5f\x8b\xd0\x29\xf1\x2d\x1f\xa1\x43\xfc\x5b\x1e\xa3\xe6\x5e\x3e\x51\x08\x31\xcd\x0b\x21\x5a\xde\xe8\xa6\x1b\x99\x11\xd5\x14\x4f\xd2\x71\x47\x35\xc5\xa6\x5b\x69\xbd\x58\xaa\xa8\x65\x18\xb2\xad\x49\xfe\xd6\x88\x6d\x6b\xee\xf7\x2c\x67\xe8\xb0\x3b\x49\x71\x82\xb0\x3b\x5e\xf5\x0c\xdb\x66\x81\x50\xd0\x10\xc4\x15\xc4\x15\xc4\x15\xc4\x15\x05\x0d\x51\xd0\x10\x05\x0d\x51\xd0\x10\xe4\x1f\xe4\x1f\xe4\x1f\xe4\x1f\xe4\x3f\x14\xf2\x8f\x82\x86\x28\x68\x88\x82\x86\x28\x68\x08\xc7\x1a\x1c\x6b\xa1\x17\x34\xf4\x75\x52\x85\x5a\xe9\xb0\x95\x82\x86\x8d\xe1\x64\x0f\xd5\xbb\x37\x5a\xde\x8c\x42\xf5\x6f\x0d\xd2\x5e\x21\x54\x17\x6a\x6a\xde\xdf\x45\x65\x6c\x2d\x6d\xfc\x81\x7d\x7e\x90\xfd\xcf\x7e\xda\x2e\xfe\xeb\xd2\x5a\x3a\xba\x6f\x7d\x5d\x3a\xaf\xb1\x97\x7c\x40\x3f\x89\xff\x53\x5b\x4e\xf3\xff\xdf\x6c\x99\xfa\x5e\x4d\xa7\xc5\xfb\x79\x94\x0e\xf3\xf7\x73\x9c\x52\x34\xe2\xeb\xed\x30\xba\x6f\x2d\x9d\xe2\x4f\xdb\x50\xa2\xbe\xc5\xe0\xf7\x6f\x8c\x8d\x1a\xef\x9c\x6b\x7c\xcc\x7a\x9a\x6b\xb5\x2e\x0d\xa4\xeb\x83\xd8\x1e\xe9\xfa\xe0\xfa\x81\xeb\x07\xae\x1f\xb8\x7e\x90\xae\x0f\xe9\xfa\x80\xdc\x81\xdc\x81\xdc\x81\xdc\xbb\x0b\xb9\x23\x5d\x1f\xd2\xf5\x6d\x16\xc8\x88\x74\x7d\xed\x48\xd7\xf7\x8e\x24\xcd\x0a\xf2\xa6\xae\x48\xb9\x94\x49\x87\x78\x53\x0e\x0a\xe7\x97\xc9\x4f\x29\x99\x7f\x79\x23\xfb\xd4\x30\xfb\x7f\x7d\x74\xa7\xde\xce\x94\xb3\x99\x4b\x6b\xe9\xe8\xdd\x3c\x7d\x9f\x7b\x6b\xba\xa8\x94\xe4\xe4\x43\xfa\x81\xc5\xda\x4b\x5c\xc9\x97\x94\xd0\x8b\x02\x2e\xd1\x51\x01\xd5\xd2\x34\xc6\xa1\xda\x30\x0d\xd1\x43\xbe\x50\x8d\xf7\xcc\x5a\x3a\xa5\xdf\x49\x20\x4d\x7b\x32\x98\xa6\x4d\xb2\xa3\x06\x4d\xf3\xed\x73\x33\x9d\x89\xd2\x64\x65\xc0\xe7\xb7\xf9\x0c\x40\xd4\xcc\xcd\xe7\x31\x06\x09\xe3\x58\x87\x87\x21\x73\x9c\x8e\xd1\x91\x9a\x90\x86\x46\xc7\x01\x2a\x3a\x04\x33\xb4\x18\xcc\xf0\x8e\x88\xbf\x33\xa8\x75\xbb\x70\x52\xc4\x37\x1c\xa6\x83\x76\x7c\x43\xe3\x97\x37\x58\x01\xb0\xbd\xc6\x25\xf9\xe3\x77\xf8\x58\x8f\x97\x1b\x3e\x6d\x49\x18\x8c\x21\xf1\x9f\x9d\xb6\x17\xed\x09\x40\x80\x21\x09\x30\x24\x10\xc1\x76\x44\x04\x0b\xf5\x13\xd4\x4f\x50\x3f\xb5\x4b\xfd\x94\x7d\x3e\x12\xae\xd2\x83\x16\x45\x2c\xe2\x05\x9a\x73\xc4\x22\xde\x64\x9b\x41\x9b\x8b\x98\xf7\xf2\x60\x3b\xdb\x2a\xba\xa9\xdd\x2b\x84\xcc\x3f\xdd\xe6\xb3\x42\xd8\x57\x97\x78\xd7\x63\xa3\x31\xc8\xf3\x3c\x76\x70\xd9\x80\x9c\xba\xbd\xb6\x58\x41\x4e\x5d\xe4\xd4\xdd\xe0\x9c\xba\xed\xa4\x4b\x81\x29\x74\xdb\x6b\xff\xb3\x69\x1a\x63\xa3\xf1\xfd\x96\xb6\xf0\x0e\x57\x22\x5d\xe5\x16\x29\x1a\xfc\xbe\x24\x3d\x26\xa0\xa8\x56\x55\x54\xa9\x20\xdb\x24\x74\x45\xae\x4a\x69\x41\x98\xfd\xa0\x68\x4e\xb7\xa9\xfc\xb2\x9c\x54\x91\x72\xc5\x6a\xd1\x66\xa4\x2f\x0c\xb3\x1f\xd9\x42\xb7\x1b\xc7\x2f\x19\xed\x45\x3f\x1a\x31\x98\xb5\x8d\x88\x75\x0b\x22\x3a\xca\xaa\x3f\xbc\x34\xb7\x24\xae\x9b\x16\xed\xde\x48\x39\x96\xf7\x93\xd6\xc6\x60\x88\xb7\x35\x64\x6b\x8f\x5c\x62\x12\xa5\x22\x5b\xbb\x36\xad\x2a\x4b\xf9\x11\xdb\xf9\xcb\x77\x3a\x86\xab\xb0\x58\x95\xaf\xd9\x66\x71\xc8\x25\x24\x70\xb4\x9d\x4a\x8a\xce\x30\xee\x6c\x59\x3c\x90\x3d\x9f\xd7\xdf\x74\xb7\x2b\x2e\x9f\xa2\x05\xf1\xf9\xce\xd1\x39\xfe\xf9\xf2\xcc\x2b\x2d\xcc\xd8\x9c\xfc\x0b\x6d\x64\xd0\x67\x2d\x07\x7f\xb5\x19\x76\xc6\xf8\x6a\xdd\xef\xa4\x59\x34\xb9\xae\x9f\x9d\x1f\x2e\x1f\x94\xf5\x8a\x82\x5f\x17\x45\xc1\x21\xcb\x84\x2c\x13\xb2\x4c\xc8\x32\x21\xcb\x84\x2c\x13\xb2\x4c\xc8\x32\x21\xcb\x84\x2c\x13\xb2\x4c\xc8\x32\x21\xcb\xec\xac\x2c\xf3\x34\x9d\x64\xc7\xe3\xc7\x2c\x66\x72\x9f\x13\xb3\xd4\x6f\xf5\x6e\x05\xe8\x02\xe5\x28\x94\xa3\x50\x8e\x6e\x22\xe5\xe8\x17\x86\xe8\x1e\xb3\xd0\xb3\xe0\xa1\x62\xbf\x79\x4d\xaa\x68\xec\xf9\x21\xf6\xdc\x80\x5d\x96\x73\xd5\x98\x5b\xcb\xf9\xe2\x5a\x31\xbf\x2a\x95\x5c\xa4\xd3\x5a\xc7\x4e\xf3\x06\x1e\x96\x2a\xa1\x80\xcd\x54\x32\x71\xdd\xae\xe6\x69\xb5\x3d\x5f\xd4\x36\x5d\x14\xf8\x53\xfe\x8a\xb5\xd0\x61\xa5\xc9\x24\xf7\x79\xc3\xc2\x1d\x8c\xf8\x8d\x89\xb4\xdb\x17\x82\xc1\x65\x82\x0d\xd6\x56\xe9\xb3\xc6\xc2\xed\x5c\x00\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\x04\x7d\xec\x2c\x7d\x04\xda\x03\xda\x03\xda\xeb\x61\xb4\xf7\xf9\x7b\xe9\xa4\xd0\x3f\x4a\x95\x8a\xb6\x4e\xfc\x77\x5e\x92\xaf\xe9\x8b\xae\xaa\x29\x70\x1c\xd3\x78\x20\x03\xfb\x8f\xf7\xb2\x6f\xf6\xd9\x55\x8d\x06\x79\xf8\xb7\x38\x66\x76\xb2\x3d\xe3\xce\xf0\x56\x96\xe4\x6a\x72\x9f\x7e\x5e\x5d\x49\x35\xf3\xb0\x08\x92\x08\x39\x16\xfc\x55\x22\x72\x73\x9c\x47\x6e\xf6\x3d\x72\x81\xa5\x68\x84\x92\xc1\xe5\xa4\xcc\x9b\x0a\xd4\xf6\xad\x1f\xb1\x7d\xf5\xe1\x60\x82\x96\x64\x09\x23\xcb\xa7\xf5\xa3\x7e\xd9\x40\xa3\x1f\x73\xd4\xad\x1b\x36\x63\xbe\x83\x3b\x7e\xd0\x38\xb5\xa3\x7d\x9f\x39\x43\xa7\xe8\x44\x4d\x74\x44\x53\x9d\x8f\x78\x08\x44\x81\xb7\x18\x05\xfe\x63\xeb\x44\x81\xdf\xa4\x45\xc8\x88\x50\xf0\xe3\x74\xcc\x0e\x05\x6f\xb2\x0d\xd3\x78\x84\x6b\x1d\x02\xc2\xcb\x33\x7f\x7b\x9b\x6d\x3c\x26\xea\x02\xba\x82\xad\xc8\x83\x3c\x08\xa1\x63\x36\x04\xd1\x5d\xbd\x66\xcd\x10\xdd\x85\xe8\xae\x0d\x8e\xee\x6a\xfb\x7a\x31\x30\xc4\xab\xb1\x39\xc1\x23\x47\xbc\xe7\xfc\x90\x3d\x4c\x07\xd9\x44\x7c\xdc\x92\x0a\xdd\xe5\xd4\x1a\x59\xd7\xdc\x0a\x12\x23\xf6\x4b\x49\x5a\x36\xf6\x35\xab\x55\x45\xcb\x49\xa5\x62\xb9\x30\xb6\x36\xb1\x22\x57\xa5\x89\xf5\x83\xba\xf8\x20\xe9\x9f\x74\xa9\xa2\xe4\xcd\xab\x65\xd5\x0a\xec\xfa\xea\x30\xfb\xc0\x16\xda\xe5\x68\xf7\x92\xd1\x6e\xf4\x57\x1a\x09\xee\x3a\x6f\xb5\xbf\xa0\xe4\xa7\xac\xf6\xbb\x22\xc2\xeb\x28\xff\xb9\x29\xfb\xd1\x96\xc5\x93\xd9\x53\xbc\xcf\xdd\x77\xbf\xac\xa2\xf3\xa1\x5e\x41\x31\x58\x57\x0b\xc1\x9f\xf7\x0c\xcb\x98\x9f\xb7\x3d\x28\xc6\x57\xee\x33\x14\x8e\x6f\x5e\x0c\x1e\xe4\x16\x90\x5b\x40\x6e\x01\xb9\x05\xe4\x16\x90\x5b\x40\x6e\x01\xb9\x05\xe4\x16\x90\x5b\x40\x6e\x01\xb9\x05\xe4\x16\x9d\x96\x5b\xcc\x50\x86\x9d\x89\x9f\xb2\x40\xca\x3e\x27\x80\xf1\xd9\xce\xdd\x0a\x38\x06\xb2\x10\xc8\x42\x20\x0b\xd9\x44\xb2\x90\x2f\x0e\x53\xda\x48\x8b\x95\xbb\x22\xe7\x57\x4b\xfa\xaa\xd2\xae\x11\x50\x51\x8b\x8a\x5a\xac\xde\xc8\x95\x24\x4d\xb3\x53\x5e\xfd\xd4\x30\x7b\x5b\x3f\xbd\xdc\xbe\xe4\xd2\x5a\x3a\x1a\xf3\xa8\x07\xb0\x60\x5c\x3f\xad\x5f\x9f\xdc\xab\x9f\xb1\x64\x5d\xb4\x9c\x76\x1d\x0e\x59\x04\xb2\x42\xe7\x04\x08\x3c\x43\xa7\x38\x08\x3c\x4a\x87\xe9\xa0\x2f\xd4\x77\x3c\xfe\x5a\x3a\xe5\xba\xb1\x9b\x95\x83\xbc\x36\x98\xfe\x1d\x67\xc7\xcc\x4c\x50\xb5\xc3\x60\x30\x40\xf7\x1d\x39\x69\x7f\xf4\x8f\xb7\xd5\x0e\xc5\x3e\xef\xca\x00\xee\xd1\x88\x19\x27\x75\x68\x40\x32\x59\x3a\x4f\xb3\x35\x9e\xd5\x16\x47\x04\x5e\x55\x68\x44\x5a\xd4\x88\x7c\x28\x12\x9e\x5d\x98\x17\xa2\x90\xb3\x34\x6d\x8b\x42\xda\x67\x65\xda\x6c\x46\x02\x84\x24\xc9\x1f\xbd\xa3\xd6\xca\xbc\xc2\xaa\x20\xe0\x36\x2c\xf7\x8b\xbf\x77\xca\xae\xa0\x84\x00\x4a\x08\xa0\x84\x00\x4a\x08\xa0\x84\x00\x4a\x08\xa0\x84\x80\x77\x09\x81\x36\xaf\x1d\x32\x6f\xdd\x51\xbb\x38\x18\x09\x28\x1e\xe0\x5e\x32\xdc\xc7\x75\x50\x1d\x58\x31\x40\x5f\xda\x6b\xeb\x14\xe8\x4b\xa1\x2f\xdd\x60\x7d\x69\x07\x51\x54\xa0\xd2\xb4\xb1\x99\x20\xc8\xe0\xfb\xce\x14\xd9\x49\x3a\xca\x0e\xc7\x0f\x5a\x3e\x8b\x7b\x9d\x9e\x0f\x57\x83\xf5\xfe\x8e\xd0\xc5\xa1\x7f\x9b\xa0\x29\x41\x37\xcb\x72\xf5\xba\xa2\x3e\xe5\xa6\x9b\xeb\xaa\x43\x8b\xe5\x82\xaa\x1b\x15\x8d\x3d\x9b\x60\xbf\x32\x40\x2f\xb7\x9b\xd0\xe7\x37\xad\xb1\xf4\x57\x73\xa2\x95\x90\x92\x5f\x0d\xf2\x0b\x2e\x5a\x37\xe2\x0c\xc9\x30\x7e\x69\xbe\xa8\x55\xa1\xd1\xac\xff\x30\x1a\x4a\x7d\xf5\xaa\xe0\x8f\xe3\x10\x3b\x20\x3e\x0e\xa3\xbf\xbd\x3e\x8b\xba\x97\x0d\xc2\x4c\x08\x33\x21\xcc\x84\x30\x13\xc2\x4c\x08\x33\x21\xcc\x84\x30\x13\xc2\x4c\x08\x33\x21\xcc\x84\x30\x13\xc2\xcc\x4e\x0b\x33\x21\xa9\x84\xa4\x12\x92\x4a\x48\x2a\xdb\x28\xa9\xfc\xc8\x10\xed\x72\x27\xd1\x2f\x2b\x79\x59\x63\x3f\x36\xc4\xde\xee\xc8\x9f\x5f\x69\x0c\x20\x5e\x54\xf2\x72\x48\xf4\x70\x97\x23\x75\xbe\xde\x2c\x50\xe1\x4d\xa0\xc2\xd9\x60\x54\xb8\x8f\x3d\xe0\x01\x07\xad\x8c\xf9\xfa\x10\x00\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x02\x0c\x22\x41\x3e\xb0\x1d\xb0\x1d\xb0\x5d\xc7\xb0\xdd\x57\x5f\x47\x27\xcc\x04\xf9\x45\xf9\x0d\x55\xb9\xcc\x47\xcb\x96\x0b\xae\xc8\x55\x29\x3d\x96\x5b\xd5\xaa\xca\x35\xb3\x41\x07\x86\x62\xef\x7d\x1d\xfb\xc1\x21\xba\xcb\x75\xf5\x25\xe3\xb2\xe8\x61\xb1\x70\x54\x8d\x29\xcf\xb4\x89\x66\xaa\xc8\x69\xde\xaa\xb9\xed\x9b\xb1\x5a\x4d\x8e\xeb\xd7\x4d\x39\x9b\x5c\x16\x2d\xfa\x5d\xd1\xe5\x40\x2f\x03\xde\x05\xde\x95\x01\xef\x02\xef\x02\xef\x02\xef\xea\x19\xde\x95\xe9\x1a\xde\x15\xfa\x9d\xb4\xcc\xbb\x32\xe0\x5d\xe0\x5d\xe0\x5d\xe0\x5d\xe0\x5d\x9d\xe7\x5d\x99\x9e\xc6\x53\x19\xe0\xa9\xf6\xe1\xa9\x4c\xb7\xe3\xa9\xcc\x26\xc4\x53\xd9\x77\x45\xe8\xfb\x84\x12\xaa\x4a\x2a\x57\x42\x95\xe8\x2a\x5d\xf1\x53\x42\xd9\xbc\x68\xd4\xda\xaf\xdb\xa2\x28\x37\xe1\x32\x18\x55\xca\x0f\x29\xcd\x17\xb5\x60\xd1\xd4\x7d\xde\xa2\xa9\xad\x6c\x8b\xd0\x4b\xbd\x3e\x58\x2f\x75\x91\xcd\x3b\xf4\x52\xfc\x9e\xec\x52\x67\xf5\x48\xce\x90\x51\xf9\xdd\x35\xc5\xdf\x73\x9b\x1f\x8d\x7b\x40\x4c\x6f\x31\xc9\x1f\xbc\x4d\x88\x53\x36\x04\xbd\x09\x52\xf6\xbd\xf4\x06\x5a\xab\xc9\x14\x71\x99\xf2\x9d\x18\x71\x64\x95\x40\x7e\xbd\x16\xf3\xeb\xfd\x87\x7e\xba\x2e\xec\x54\x85\xca\xdc\x4e\x5d\xa1\x0e\xbd\xb5\xf4\xcf\x45\x3e\xbe\x55\xd2\xec\x7c\x7c\x9d\xfb\xf5\x37\x8a\xcc\x3f\x6b\x54\x75\x64\xfe\xe9\xdc\xcf\x07\x19\xe8\xc6\x0c\x70\x80\x4d\x6d\xca\x3e\xaf\x33\x27\x54\x14\xad\x4a\xc9\x77\x8f\xfa\x19\xe8\x51\x23\x5f\x94\x23\xf9\x96\x72\xd9\xdf\x58\x9f\x10\xa7\x7b\x1a\x6b\xab\x85\x0e\x99\xed\xf6\xa4\x23\x04\x31\x07\x31\x07\x31\x07\x31\xef\x1d\x62\x8e\x25\x68\xc0\x12\xb4\x7b\x5c\x0a\x48\x05\xdb\x91\x54\xb0\xf0\xdc\xc0\x73\x03\xcf\x0d\x3c\x37\xf0\xdc\xf4\xb4\xe7\x06\x19\xc1\x91\x11\x1c\x19\xc1\xdb\x95\x11\x1c\x8e\x51\x38\x46\x7b\xd5\x31\x9a\x2d\x84\x9c\x0c\x3f\x1c\x26\x7d\x13\x3e\xc0\x3a\x5e\x7d\x35\xe9\xcd\xa4\x77\xb1\x9d\xc2\x80\xd8\xc6\x9d\xc2\xcf\x47\xfc\xc5\x28\x9d\x13\x31\x06\xf6\x93\x58\x91\x05\x41\x79\x88\x8d\xe2\x6b\x63\x1a\xef\x5b\xf6\x1f\xa2\xec\x87\xfb\x89\x79\xc0\xf3\x07\x79\x21\x36\x71\x9a\xf9\x96\xd8\x0b\x68\x23\x6f\x6d\x72\x44\x3f\xeb\x6c\x2d\x2e\xaf\xcb\x27\x2c\x46\x32\xe4\xda\x6c\x12\xcd\x8a\xf7\xec\x34\x9d\xe4\xef\xd9\x11\x3a\x44\x07\x7c\x13\x62\x7b\xb8\x41\xcc\xec\xbb\x37\x59\x9a\x6d\x39\xf8\x05\x3c\xc0\xd2\xc6\x0b\x68\xdf\x06\xf9\xa6\x00\x16\x6f\x59\xf4\xaf\xb7\x79\x8e\xcc\x90\x59\x97\x2d\x68\x70\xc6\x8c\x13\x37\x66\x7c\x32\x73\x74\x8e\xce\xd6\xb8\x9e\x5b\x1b\x20\x60\x3c\x78\x92\x5b\xf4\x24\x7f\x30\x12\x9a\x95\xb8\x20\x1c\xc3\x33\x94\xb1\x1d\xc3\x6d\x33\x39\x6d\xb2\x29\x01\xf5\xd9\x32\xef\xda\xe1\x69\x72\xc6\xeb\xea\xb0\x04\xd9\x1e\x51\xf4\xa1\xf3\x96\x07\xa5\x59\x7a\xcd\xf4\xa1\x34\x0b\x4a\xb3\x6c\x70\x69\x16\xc9\xd7\x9c\x87\x35\xf9\x34\x37\x2d\x04\x4c\x02\xf5\xb3\x46\x60\xc5\x97\xec\x41\x9a\x60\xe3\xf1\x94\x95\xf2\x74\x97\xb3\x22\x8b\xf1\x4b\xb7\x42\xed\x79\xf6\x6c\x94\xe6\xd6\xa9\xf6\xd2\xe4\xfe\xea\xcb\xf7\xb2\x7f\xd3\x5f\x5b\xf5\xa5\xb1\xad\xd5\x90\x7e\xd6\xba\x55\x5a\xda\xb2\xab\x7a\x82\xa6\xc4\x2b\x3b\x49\x47\xf9\x2b\x3b\x41\xe3\x94\xf2\x7d\x65\x1d\x9d\xb4\x16\xda\x86\xaa\xb1\x0a\x2a\xbe\x75\x52\xfc\x4b\xab\x44\x7f\x7f\x5b\xed\x60\x34\xbc\x9b\x4a\x1a\x27\x76\x7c\x48\x32\xfa\x8a\xf3\x4c\xcd\x92\xa2\xe9\x31\xc1\x42\x02\x7b\xa8\x16\xf7\x50\x3f\x19\x09\xc3\x26\xcc\x8a\xed\x93\x3e\x13\x5a\xdb\xa7\x76\xd8\x96\xb6\x19\x8f\xa0\xbd\xd3\xbf\xde\x51\x6b\x5b\x9a\xdf\x36\x25\x2a\x81\xb5\xb9\xb0\x63\xc2\x8e\x09\x3b\x26\xec\x98\x36\xf5\x8e\xa9\x23\xab\xcc\xb6\x15\xe2\xc3\x76\xc9\xda\x2e\xfd\x55\x1f\xdd\x65\x24\xbc\x5a\xad\x2a\x5a\x4e\x2a\x15\xcb\x05\x7d\xa3\xc4\x3e\xdf\xc7\x3e\xdb\x47\xff\xcc\xf1\x67\x7d\x4a\xbc\xbb\x20\x57\xdd\x3a\x2d\x11\x0c\x99\xdc\x53\x90\xab\x53\xf6\xb9\xcb\xe9\xa9\x85\x39\xd3\x17\x17\xe2\x6c\x57\xd7\xd0\x64\x5d\x43\x93\x8d\x34\x94\xad\xd0\x63\xe2\x0d\xd6\xa7\x2f\xfd\x0d\x9e\xa5\x19\xca\xb4\x30\x69\x39\x9e\xb3\x91\x18\x49\xf6\x03\xa3\xb4\xe0\xd1\xe3\x13\xfa\xde\x7e\xc2\x6f\x7f\xca\xdb\xd0\x6d\x69\xa9\xa2\xe4\xcd\xeb\x64\xd5\xdc\xae\xb2\x0f\x8c\xb0\x17\xfb\x69\x97\x6b\xb0\x44\x8b\xd1\x24\xdf\xad\xba\x97\x2b\xe7\xad\xe6\x16\x94\xfc\x94\xd5\x5c\xf2\x88\x7e\xae\x73\x14\x45\x1b\xf6\x1a\xc6\xe7\xc2\x90\xf7\xb0\xaa\x3f\x8f\x79\x5c\x0c\xda\x02\x5d\xe4\x83\x76\x9e\x66\x69\xc6\xd7\xec\x38\xba\x23\x65\x74\x47\xca\xe7\x09\x1a\xdc\xd9\x16\x82\x4d\xd2\x0c\xcb\xd8\x46\x48\xfc\xa8\x15\x20\x65\xdf\x8f\xb1\x4e\xf5\xb9\x1b\x8a\x7e\x72\xbb\xf7\x68\x8e\x9a\xdb\xdd\xc6\x06\x74\xd2\x38\x7d\xc3\xc7\x34\xf3\x6a\x7a\x15\x2d\xd7\xac\x12\x43\x1a\x3b\xac\x18\xb1\x35\x6e\x71\x6b\xfc\xe9\x48\xdb\x2c\x0a\x3d\x21\x36\xcc\x8f\xd1\x92\xbd\x61\x0e\xaf\xf5\xd6\x11\x9d\xbe\xd9\x6d\xd0\x90\x35\x6d\xb7\xea\x2c\x5f\xf2\xb7\xef\xf0\x36\x64\xa6\xf4\x4f\xf2\xb5\x5d\xc7\x8c\x78\xce\x0d\x37\x5d\xed\x09\xe6\x84\xd1\x0a\x30\x5a\x08\x28\xea\x48\x40\x11\x94\xe4\x50\x92\x43\x49\xde\x2e\x25\x79\xf6\xf9\x48\xc8\x72\xda\x45\x91\x61\xe2\x02\xcd\x39\x32\x4c\xb4\x59\xa2\x1b\xf3\x5e\x4c\x6c\x67\x5b\x45\x37\x75\x6e\x3d\x91\xf9\xcd\x1d\xde\xeb\x89\xc3\x75\xac\xbe\xb1\x1d\xd2\x51\x4e\x7c\x36\x74\x91\x01\x82\xdf\x6b\x4b\x1b\x10\x7c\x10\xfc\x0d\x26\xf8\x6a\xfb\x36\xbe\x41\xb3\x49\x10\x7d\xef\xd8\x64\x92\x9d\xa1\x0c\x3b\x13\x3f\x65\x01\xfb\x7d\x4e\xcc\xef\xd3\xcc\x2d\x81\xfd\xff\x6e\x98\xe6\x05\x84\xce\x29\x8a\x9a\x2f\x96\xf9\x07\x56\x5b\xe6\xc2\x28\x5c\xeb\x8d\xa4\xf9\xae\xc0\x02\xd0\x3f\x31\xcc\xbe\x7f\x0b\xdd\xe9\x6c\xcd\x52\x1f\x3f\x1b\x31\x42\x99\x1c\xa5\x6d\xcb\xe6\xae\xc6\x2c\x7e\x31\xaf\x37\x17\x4a\x8d\xdb\x11\x3b\x28\x9e\x6f\xa1\x8c\x10\xea\x62\x55\xbe\x66\x5b\xd0\x21\x57\x1e\x0b\x47\xdb\xa9\xe4\x90\x51\x21\xd7\x7e\x92\x3a\xf1\x33\xbf\xd9\x2e\x2f\xb2\xb1\x21\x55\x73\xfd\x3f\xed\xeb\xe2\xd3\x7e\x32\xf8\xd3\x9e\x64\x47\xc5\x77\xcc\x7b\xd9\x57\xfe\xe8\xf1\xe2\xa2\x90\x2e\x0a\x8b\xa0\x90\x2e\xd2\xa4\x21\x4d\x1a\xd2\xa4\xf5\x52\x9a\x34\x14\xd2\x45\x21\x5d\xa4\xa7\x42\x7a\x2a\xa4\xa7\x42\x7a\xaa\xae\x48\x4f\x95\x9d\xa0\x71\x96\x8a\x8f\x58\x48\x64\xa7\x13\xac\xf0\x7d\xdd\xad\x80\x51\x50\xde\x17\x69\x82\x50\xde\x77\xf3\xd4\x4f\x61\x4f\x27\xe8\x80\xc0\x9e\xf9\xa2\x96\x53\x74\x53\xec\xcd\x3c\xe5\x72\xbe\xa2\x14\xcb\x55\xad\x54\xcc\xc9\x1a\xfb\xcb\x21\xf6\x47\x03\xb4\xd3\xba\xc8\x42\x9b\x37\x8c\x39\xb7\x9c\x2f\xae\x15\xf3\xab\x52\xc9\x05\x39\xad\xf5\xed\x59\xa3\xb9\x25\xbd\xb9\x50\x20\x67\x2a\x79\x98\x5f\x30\x63\xde\x92\xc1\x28\x5d\xbf\x34\x5f\xd4\xaa\xb3\x8a\x3a\x55\x2a\x59\xe8\x32\x3c\x29\xf6\x2d\x44\x2d\x57\x82\xa1\xe4\x69\x76\xd2\xe0\x8e\xb5\x6f\x8e\xe1\x74\x70\x75\xbc\x47\xe2\x8e\x7d\xde\x64\x74\x07\x23\xfe\xf0\xbc\x3e\x0e\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x25\xf0\x65\x87\xf1\x25\x40\x21\x40\x21\x40\x61\x0f\x83\xc2\x5f\x7a\x2d\x9d\xf1\xcf\x22\xd7\x58\xaa\x6e\xf6\xad\x27\xd9\x4f\x0f\x12\x73\xe5\x14\x12\xd8\xf0\x21\xb1\x72\x54\x8d\x39\xcf\x34\x8a\xa6\x16\xd2\xcc\x26\x34\xa8\x9f\xe6\x4c\x26\xe4\x9d\x83\xb5\xcb\x91\x5e\x06\x54\x0b\x54\x0b\xb5\x4b\x41\xb5\x40\xb5\x40\xb5\x7a\x87\x6a\x75\x51\x69\xce\xae\xa1\x5a\xa8\x19\x09\xaa\x05\xaa\x05\xaa\x05\xaa\xb5\x11\x35\x23\x7b\x1a\x42\xa1\xa8\x5d\x2f\x17\xb5\xcb\x6c\x42\x08\x95\x95\xfd\xd3\x20\x66\x85\x46\x6a\x9a\xa6\xb8\x46\xea\x38\x1d\xa3\x23\x8d\x65\x5f\x75\x95\xa5\x98\x2f\x6a\x55\x53\x04\x75\x9f\xb7\x40\x69\x2b\xdb\xc2\xb5\x49\x0d\x46\x6e\x36\x9f\x97\x5b\xe8\xa3\xe2\xcf\x6c\xf7\x24\x65\x3b\xc5\xac\x14\x93\xca\x76\x8e\x6d\xf1\xa7\x0e\x72\xb1\xd6\xea\xa1\xf9\x77\x3b\x12\x64\x20\x61\x61\x8b\x09\x0b\xff\xb1\xc9\x7a\x68\xfe\x2f\x61\x0b\xf5\xd0\xd6\x69\x6c\x5e\x64\x25\x3a\x4b\xd3\x8e\xac\x44\x2d\xb7\x16\x24\xd8\x6c\xaf\x2d\x5a\xc7\x14\x56\x14\xad\x4a\xc9\x6f\x8c\x78\xda\xaa\x3d\x46\x0e\x2a\x47\x42\x2f\xe5\xb2\x65\xb6\x26\xc4\xd1\x3a\xb3\x35\x6d\x9d\xdd\x6e\x03\xd6\x9e\x3c\x86\x00\xbb\x00\xbb\x00\xbb\x00\xbb\xbd\x03\x76\xb1\x7a\x0b\x58\xbd\x75\x0f\xf9\x46\x0e\xd9\x8e\xe4\x90\x85\x83\x01\x0e\x06\x38\x18\xe0\x60\x80\x83\xa1\xa7\x1d\x0c\x48\x25\x8e\x54\xe2\x48\x25\xde\xae\x54\xe2\xf0\xdf\xc1\x7f\xd7\xab\xfe\xbb\x6c\x21\xe4\x2c\xfa\x41\x84\x39\xe9\x8d\x80\x77\xb1\x9d\xe2\x6b\xb6\x2d\x6d\xbb\x3d\x63\x9b\x30\x2f\x0e\x7b\x66\x9c\x86\xf4\x97\x76\x9d\x8a\xeb\xba\xa9\xe1\xb9\x35\xfe\x69\x8c\xfd\xed\x43\xb4\x35\xa7\xa8\xf2\xa5\xb5\x40\x69\xfc\x92\xb8\x2c\xb9\x5b\x3f\x6d\x5a\x51\x65\x67\x85\x5d\xe3\x20\xf4\xf0\xd0\xc3\x43\x0f\x0f\xb7\x09\xdc\x26\x70\x9b\xc0\x6d\xd2\x35\x6e\x93\xee\xf1\x0a\x00\x57\x03\x57\x03\x57\x03\x57\x03\x57\xf7\x34\xae\x06\x4f\x03\x4f\xeb\x51\x9e\xb6\x29\xf5\xf0\xaf\xa6\xd3\x82\x01\x1e\xa5\xc3\x9c\x01\x8e\x53\x8a\x46\x7c\xf5\xa5\x39\x45\x95\x39\xf1\x13\x54\x68\xbe\xa8\x05\x67\x02\x0d\x12\xc1\xcf\x05\xa3\xbe\x41\xf6\xa0\x81\xfa\xe2\x71\x83\xed\x19\x77\xe0\x64\x7b\x14\xff\xec\x36\x1b\x7a\xdd\x61\xaa\xdc\x2d\xbe\xb5\x57\xfc\xa5\xdd\x84\x4b\x00\xa9\x53\x74\x82\x26\x6b\x94\xed\x49\x4a\x34\xda\xb3\x10\x44\x41\xce\xde\xa2\x9c\xfd\x8b\x11\x3a\x2e\xbe\xe9\x83\x34\xc1\xbf\xe9\x11\x6a\xe2\xcd\xa3\x33\x42\xc3\x7e\x8c\x8e\xd8\x1a\xf6\xe6\x5a\x98\x12\xc2\xf5\x49\x3a\xea\x10\xae\x37\xd7\x44\xeb\x46\x85\xcb\xc9\x43\x34\x2a\x9b\xd1\x57\xf0\xc2\x10\xdd\x63\xfa\x0a\x6a\x0b\x0e\xb2\x9f\x1d\x62\x3f\x3e\x60\xdb\xc9\xd5\xc6\xd2\x6d\x5b\xf6\x32\xa4\x54\xdb\xf7\x18\x15\x01\x5d\xb6\x58\x9f\x4f\xba\xdc\xd3\xb0\x21\xc9\xb4\x1b\x49\x74\x7d\xf5\x42\xf0\x2b\x9f\x60\x83\xb5\xaf\xbc\xd5\xf7\xee\x97\x1e\xfe\x14\xf8\x53\x90\x35\x1b\xfe\x14\xf8\x53\xe0\x4f\xe9\x1d\x7f\x0a\xb2\x66\x23\x6b\x36\xfc\x29\xf0\xa7\xc0\x9f\x02\x7f\x4a\x57\xf8\x53\x90\x35\x1b\x59\xb3\x37\x8b\x83\x06\x59\xb3\xdb\x91\x35\xfb\x73\x09\x4a\xfb\xa1\x3c\x97\xf8\xb7\xa2\xe4\xab\xf2\xb5\x0a\xdf\x62\xb0\x77\x27\xd8\x73\x0e\xc6\x77\xbd\x31\xc6\xb7\xa0\xe4\x1f\x35\x9a\x08\x89\xf2\x3d\xe8\x45\xf9\xf2\x8e\xdf\xd9\x1c\xc4\xcf\x37\x67\x54\xe8\x28\xb0\x29\xe2\xf7\x70\x30\xf1\x4b\xb2\x44\x2d\xf1\x73\xf4\x3e\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\x1f\x98\xdf\x46\x32\xbf\xcd\x27\x38\x02\x55\x04\x55\x04\x55\xdc\x44\x54\xf1\x57\x87\x88\xb9\xa9\x62\x45\xc9\x6b\xec\x99\x21\xf6\x6e\x07\x36\x54\x1a\xc6\x86\x21\xe1\xc2\xb8\x03\x17\x2e\x28\xf9\xf9\xa2\x56\x9d\x55\xd4\xa9\x52\xc9\x62\x87\xdd\x5e\x98\x6f\x43\xe4\x81\x67\x83\x39\x60\x9c\xc5\x2c\xfa\xe7\xa2\x7e\x36\x1d\x6c\x88\x39\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x02\x12\x42\x18\x08\x84\x07\x84\x07\x84\xd7\x29\x84\xf7\xf1\x04\x9d\xd4\x87\x4e\x1b\xcb\xc9\xaa\xd1\x8a\xac\x19\x09\x50\x6d\xae\xe7\x3c\x58\x2c\x94\x8b\xe5\x82\x69\xad\xd9\x3f\x0d\xb1\xbf\x19\xa0\xdb\x9d\x67\x5c\x5a\x4b\x47\xdf\x12\x69\x0c\xfb\x4d\xdb\xd7\x2d\x89\x96\x17\xcd\x60\xff\x50\x60\xe0\xb8\x80\x81\x8e\xbb\x5b\x4e\xfb\xfe\xe6\xe6\xd0\x11\x76\x69\xe4\x70\x29\x98\x1f\xce\xb1\x73\x06\x29\xf4\x78\xdb\x0c\x61\xa1\xef\xe0\x40\x66\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\x08\x82\xb8\x21\x04\xf1\x5b\x51\xba\x28\x08\x62\x5d\x01\xa5\x31\xa3\x52\x92\x5f\xb0\x71\x51\x54\x56\x32\xff\xfa\xc6\x31\x8d\x97\x80\x62\x3f\x19\x65\xef\xe9\xf7\x2c\xe4\xff\xa0\x2a\x4b\xf9\x98\x38\xcd\x1c\x00\x7b\x36\x36\x0b\xfa\x8f\xe8\x67\xd5\x95\xf3\xaf\x2b\xe2\x2f\x0a\x4e\x85\x9b\xb1\x35\x2b\xd1\xac\xc0\x73\xa7\xe9\x24\xc7\x73\x47\xe8\x10\x1d\xf0\xcd\x58\xe9\xe8\x33\xe3\x19\x53\x66\xbd\xa9\x20\x28\xb7\xd7\x1b\xca\xbd\x8c\x0d\x14\xe4\x6a\xa3\xb5\xaf\x02\x4a\x5c\xf9\x96\xc6\x8a\x7e\x6b\x9b\xe7\x00\x0d\xa9\x72\xa5\x24\xe5\xe4\xc0\x31\x1a\x33\x4e\xdc\x98\x61\xca\xcc\xd1\x39\x3a\x5b\x93\x58\xb7\xb5\x71\x42\x8e\x5d\xe4\xd8\x6d\x31\xc7\xee\x07\x23\xa1\x19\x8b\x0b\x22\xdd\xee\x0c\x65\xec\x74\xbb\x1b\x60\x79\x2a\xab\x0d\x5b\x9e\xc6\x6d\x8d\xdb\x46\x65\xde\xbd\xc3\xd3\xf2\x8c\x57\x24\x55\xdf\x32\xeb\xdb\x4c\x3e\xc1\x05\x9a\xa0\xd1\x8a\x3e\x99\x75\xde\x00\xed\xae\x6d\x48\xdc\xc8\x7e\xfd\x9f\x93\x7b\x9c\x07\xaf\xc9\x6a\x41\x76\x1e\x1d\x74\x1e\xd5\x97\xbe\x55\xb9\x50\xcc\x8d\xd6\x9d\xe7\x6a\x45\xff\xf7\x0d\xe3\xa8\x7e\x97\x99\x8b\x34\x4f\xd9\x1a\xe3\x37\x49\x47\x5b\xf0\x21\x2d\x70\x36\x0f\x0b\x18\x60\x01\x9f\xeb\xa7\x67\xfb\xd9\x33\xfd\xd1\xa7\x2d\x67\xd5\x9b\xfb\x7b\xc7\x02\xd6\xe0\x0c\xbd\x9f\x39\x9e\xe7\xc8\x4d\x7f\x39\x6d\x27\x5d\xc2\xe7\xc5\x1d\xe6\x9c\x5a\x78\x73\xa4\x12\xbf\xb2\xac\x94\x47\xc5\xd5\xfc\x0c\xee\xcd\xd3\x62\x89\xac\xa6\x94\x17\x84\xb7\xea\x61\xfd\xb3\x30\xfe\xbd\x64\x7e\x2c\xf6\x1f\x87\xfd\x4d\x73\xe6\xa5\x08\xbd\x18\x61\x2f\x44\xa2\x9f\xb0\x56\xe2\xef\x8f\xcc\x2a\xfa\xc6\xb2\xa8\xc5\x0a\x0a\xef\x76\x25\x16\xbf\xac\xff\x29\x1e\x9b\x72\x3d\x05\xdf\x63\x5d\x93\xa5\xb2\x16\x5b\xd5\x6c\x2e\x3f\x2a\xe5\xf8\x83\x73\x6f\x47\xa9\x98\x33\x8a\x31\xcb\xa5\xbc\x16\x53\xae\x1b\x7d\x2a\xbc\x01\x15\x59\xa9\x94\xe4\x54\x4c\xfc\x22\x77\x2f\x9a\x63\xc9\xf7\x3a\x9e\x1d\x60\xfd\x7c\x7c\x0b\xbf\x2d\x97\xab\xac\x73\xeb\xd2\xf6\x9a\xff\xab\xf7\x7b\x4f\x3e\xdb\xd8\xcb\x78\x2f\x50\xf6\x20\x4d\xb0\xf1\x78\xca\x0a\x7c\xdc\xc5\x3f\x2b\xe3\x1b\x37\x9a\x89\x0f\xe8\x7f\x74\x06\x33\x6e\xbe\x70\x49\xf6\xe3\xc3\x34\x2f\x76\x5e\xea\x8a\x94\x4b\x99\x23\xc1\xbf\x9d\x3a\x05\x87\xcf\x16\x4c\x55\x4a\xf2\x8a\x6e\x9c\xcb\x05\x8d\xbd\x94\x60\x5f\x19\xa0\x3b\xf5\xd6\xa6\x9c\x8d\x35\x91\xfc\x69\x51\x29\xc9\x19\xd1\x5e\x48\x02\x8e\x34\xbf\x60\xb1\xf6\x9e\x9c\x99\xa0\x1c\x3f\xba\x39\x14\x1c\x5d\x9a\x09\x2a\x1f\xfc\xe5\x4e\xb1\xd3\xc6\x87\xea\xfb\xce\x19\x1f\xac\x63\x50\xa0\xdc\x80\x72\x03\xca\x0d\x28\x37\xa0\xdc\x80\x72\x03\xca\x0d\x28\x37\xa0\xdc\x80\x72\x03\xca\x0d\x28\x37\xa0\xdc\xd8\x48\xe5\xc6\xe6\x23\x1e\xd0\x86\x40\x1b\x02\x6d\xc8\x26\xd2\x86\xbc\x35\x41\xe3\x82\x50\x6a\xb9\x2b\x72\x7e\xb5\xe4\xd2\x86\x98\x39\xa3\xd4\xa2\xa2\x16\xab\x37\x72\x25\x49\xd3\x64\x8d\x7d\x76\x88\x7d\x72\x80\x5e\x6e\x5f\x71\x69\x2d\x1d\xbd\xd1\x60\x12\x29\xa3\xad\x69\xbd\xad\x50\xd3\x49\x2d\x59\xb7\xb3\x9c\x76\xfd\xca\xe6\x20\x8e\x9d\x8f\x19\x7b\x6d\x30\x4c\x3c\xce\x8e\x19\x30\xb1\xee\xf5\x30\xb3\xcc\x3b\x7b\xda\x85\x11\x91\x67\x0a\xac\x11\xac\x11\xac\x11\xac\x11\xac\x11\xac\x11\xac\x11\xac\x11\xac\x11\xac\x11\xac\x11\xac\xb1\x1b\x59\x23\x48\x20\x48\x20\x48\x60\x0f\x93\xc0\x3f\x48\x50\xd2\x88\x12\x53\xf2\xb2\x83\x01\xaa\xab\x65\xfd\x51\x0d\xf8\x67\x04\x82\xb1\x0f\x24\xd8\x9f\xf7\xd1\x56\xfd\xdc\x4b\x6b\xe9\xe8\xfd\x3c\xec\xcb\xbd\xe3\x5c\x14\x17\x72\x32\x94\x7c\x05\x8f\xf8\x52\xf2\xf2\x72\xda\xf9\xf7\x90\x63\xbb\x5e\x43\x67\x04\x46\x3b\x46\x47\x38\x46\x4b\xd3\x18\x8d\xfa\x6b\x68\xf5\x27\x5d\x4b\xa7\x9c\x77\x74\xb3\x51\x5d\x8f\x07\x53\xb5\x83\x6c\xc2\x23\x35\xbb\xa3\xdb\x4d\x81\x9e\xf3\xb6\xa2\x1f\xdd\x66\x77\x77\xdc\x0c\xe2\x5a\xa7\xc7\xef\x35\xe3\xb7\xda\xdc\xe9\x99\x69\x9a\xa2\xd3\x35\xc1\x0a\xcd\xf6\x3a\x22\x14\x10\xa3\xd5\x62\x8c\xd6\xfb\x22\x21\x7c\xf4\x67\x45\x74\xd6\x29\x3a\x61\x47\x67\xb5\xc1\x76\xb4\xcb\x38\x04\x04\x7c\x25\xdf\x72\x87\x6d\x3b\xee\x12\xab\xf6\x98\xe4\x36\x17\xf7\x88\x3f\xb7\xdd\x5a\x3c\x4a\x8b\xb4\xe0\xb4\x16\xc9\x0c\x9d\x69\xc1\xd1\x31\xc3\x6f\xf7\x11\xce\x85\x61\x3e\x02\xcd\xc7\xd7\xfa\xe8\xab\x7d\xec\xcb\x7d\xd1\x2f\x5a\x1d\xf4\xe1\xbe\x47\x9d\xeb\xa0\xa2\xbe\x1c\xe7\x6b\x99\xd8\x8a\x7c\x59\xf8\x44\xac\x0d\x88\x0d\xde\x8d\x3d\x1f\xbf\x45\x87\x79\x28\x2b\xe5\xd1\xb2\x5c\x90\x78\x47\x18\x4b\x21\xa7\x19\x11\x28\xc2\x1a\x02\xe3\x1d\x2c\x5e\xbb\x26\xe7\x75\x43\x55\xba\x61\x3b\x2b\x6c\x40\x5d\x2c\x8d\x18\x2b\x24\xbe\x46\x8c\x15\x54\x7d\xce\xab\xc8\x6a\x51\xc9\x5b\x2b\x3d\x7b\xfa\xe3\x7e\x20\xb3\x37\x56\x35\xfd\x26\x9d\xab\x4b\x49\xbf\xd2\x7c\x20\xe3\x47\x2e\x8b\x4d\xb9\xd9\x44\x4a\xdc\xa7\x88\x0c\xf2\xb8\xc7\x38\xe3\xb7\xb0\xc0\xef\xc0\x63\xe5\x97\xf9\x4c\x1f\xbd\xd4\xc7\x5e\xec\x8b\xbe\x60\x75\xf3\x7b\xfb\x66\x1c\x4e\xce\x4a\x49\x96\x34\xd9\x22\x28\x0b\xaa\x52\x91\x0a\x7c\x00\x16\x94\x52\x31\x77\xc3\xe5\x44\x31\x9f\xc5\xf6\x92\xea\xa3\x94\x4e\x1d\x49\xc5\x96\xc4\x80\x88\xde\xa9\xe8\x0b\x7f\x7d\xf3\x64\xc2\x38\x39\xa6\xa8\x95\x2b\x52\xd9\x74\xea\xa8\xab\xf2\xd8\x65\xa9\x64\xc2\xeb\xb8\x38\x1a\x8f\x5d\x2e\x96\xa5\x52\xf1\x7b\x4d\x0a\xb6\x22\xeb\x2b\x4b\xbe\xd5\x18\x13\x2b\xca\xbc\x4d\x46\x45\xe3\x43\x9a\x7d\x91\xc0\xc5\xa9\xd8\xd9\x22\x7f\x9d\x1d\x37\xae\xa8\xf5\x4f\x66\x6f\xdb\xaa\x82\x56\xf3\x8f\x4d\xa9\x5e\x49\xc5\xef\x10\xf7\x33\x63\x3e\x88\xcb\x3b\x94\x79\xdb\x00\xbd\x65\x80\xfd\xc0\x40\xf4\xbb\x96\x8b\xf0\xf7\xfa\x1f\x37\x70\x92\xfe\x49\xe9\xfb\xa6\x82\xa4\xae\x48\x05\x39\x96\x53\x4a\x25\x99\x1b\x3f\xfb\xbb\x90\xd5\xcb\x8a\x7a\x4d\xef\x0b\xcf\x3b\x7d\xa4\xe6\xc7\xfd\x6f\x94\xbf\xcf\xe6\xcb\x58\x11\x4f\x55\xd4\x5f\x94\x5c\x31\x6f\x73\x61\x8e\x18\x45\xe4\x98\xd9\xbb\xfa\x4e\xca\x00\x70\x26\x44\x4c\x39\xba\xd1\xc4\x4f\xd6\x5e\xc9\x72\x0c\xba\x7f\x2c\x15\x9b\xca\xe9\xdb\x2a\x3e\xa7\x3a\x8d\xc3\x90\x78\x86\xa1\xd8\xa8\x31\xf0\xee\x17\x43\x3b\x1e\x1b\xca\x48\xb9\xa7\xf4\xc9\xa4\x9c\xd7\xcf\xe2\x1e\x39\x7e\x52\x4d\xc7\x09\xe6\x6b\xbc\xfa\xee\x46\xcc\x27\x58\xb1\x5a\x3a\x1e\x1b\x9a\x55\x54\xd9\xd1\x6c\x2c\x27\x69\x39\x29\xaf\x3f\xbd\xd1\x3f\xc2\x07\xcb\xdb\xd3\x84\xf5\xaa\x6b\xf0\xb2\xd5\x46\x2a\xbe\xb3\x52\xfb\xde\x38\xe7\xfa\xe7\x23\x34\x2f\xe6\xfa\xb3\x34\xcd\xe7\xfa\x93\x74\x9c\x8e\xb5\x30\x7d\x88\x90\x61\x5a\xd4\x5b\x9b\x60\x17\x68\x8e\xb6\x89\xbe\x95\xf3\x37\xdb\x66\xd0\x22\x20\xe6\x3d\x57\x6f\x67\x5b\x45\x37\x35\xba\x87\x68\x6c\x69\xe0\x5c\x4c\x64\xbe\x71\x9b\xbd\x0e\xd8\x5f\x17\x8e\xbd\xce\x66\xe2\x6e\x11\x89\xdd\xc6\xc5\x01\x62\xae\x7b\x6d\x49\x82\x98\x6b\xc4\x5c\x6f\x70\xcc\x75\x27\x78\x51\x50\x38\x74\xdb\x36\x85\xd9\x63\x74\x84\x1d\x8a\x1f\xb0\xf4\xc3\xf7\x38\xe3\xac\x9d\xa7\xd6\x07\x5b\x87\x1e\x0a\xfd\xa9\x04\x8d\x98\x95\x28\x7d\x22\x9d\x85\xa4\xcb\x04\x8c\xef\x4a\xb0\xcf\xf4\xd9\x35\x2a\xef\xf1\x00\x8c\x5c\xe6\x96\xbc\x57\x3f\x22\x6a\x49\xda\x01\xc7\xfc\x50\xc8\x70\xf1\x51\x3a\x26\x5e\x96\x09\x1a\xe7\x2f\x4b\x92\x12\x34\xe8\xfb\xb2\xe8\x77\xae\xbf\x2c\x8d\x89\xf1\x02\xa8\xe2\xb9\xe0\x77\xe4\x41\x16\xf7\x2e\xf8\x28\x5e\x0d\x71\x1b\xd1\xb7\x6f\xb3\xfb\x74\xb7\x37\x45\x14\xdd\xba\xc7\x38\xd8\x81\x9e\xcd\x9c\xa0\x49\x3a\x5a\x33\xf5\x36\xdc\xb5\x98\x68\x81\x0e\x5b\x44\x87\xef\x8c\xdc\xcc\x27\x7d\x4a\x30\xc3\x23\x74\xc8\x66\x86\x61\x9a\x84\xc6\xbe\x79\xef\xef\xdc\x25\xc4\x0d\x80\x83\x7f\x7b\xbb\x6d\x12\x6e\x37\xe1\x60\xd9\x30\x03\xbb\xc5\x1f\x3a\x61\x05\x40\x06\x41\x06\x41\x06\x41\x06\x41\x06\x41\x06\x5b\x21\x83\x9d\x64\x79\xe1\x62\xc8\xd0\x67\xfc\x40\xc4\x98\xf9\xf4\x6d\xf6\xa4\xff\x60\x00\x09\x14\x2b\x81\x68\xc5\x2e\xda\xdf\xb6\x85\x00\x28\x60\xaf\x2d\x3f\x40\x01\x41\x01\x37\x98\x02\xb6\x15\xec\x04\xe2\xbf\xb0\x8c\x7e\x76\x82\xc6\x59\x2a\x3e\x62\xe1\xbe\x9d\x4e\xdc\xc7\xcf\xbf\x25\x92\x2a\x7e\x65\x98\xce\x19\x05\x31\x15\x45\xcd\x17\xcb\x4d\xa5\x53\xe4\x8b\x6c\x0b\x32\xbe\x73\x98\xfd\xed\x00\xdd\xee\x6c\x48\x9f\x10\x9f\x35\x4a\x63\x3a\x23\x98\xcb\xe6\xfe\x40\xb9\x1c\xd3\x87\x20\x36\xaf\xb7\x14\x4a\x04\xf3\x88\x1d\x75\xc1\x37\x23\x86\x46\xbf\x58\x95\xaf\xd9\x76\x6e\xc8\x15\xc1\xe3\x68\xdb\x8c\x7f\x9e\x76\x3c\x84\x73\x86\xe6\xf7\x89\xf8\xe7\x66\xbe\xcc\xeb\xe2\xcb\x7c\x55\xf0\x97\x79\x88\x1d\x30\xeb\x65\xd6\xbf\x8c\xc6\xa7\xca\x07\x00\x19\x16\x11\xf5\x8c\xa8\x67\x44\x3d\x23\xea\x19\x51\xcf\x88\x7a\x46\xd4\x33\xa2\x9e\x11\xf5\x8c\xa8\x67\x44\x3d\x23\xea\x79\x23\xa3\x9e\xd7\x45\x26\x7c\xdf\x76\x2b\x20\x13\xc4\x62\x23\x16\x1b\xb1\xd8\x9b\x28\x16\xfb\x1d\x09\x4a\xad\x87\x38\x45\xcd\x4e\xc1\x39\x05\xd0\x64\x7f\x38\xc4\x3e\x37\x40\x77\xd6\x90\x4c\x51\x78\xed\xf5\x8d\xa5\x66\x0c\x0f\x68\x9a\x35\x61\xdc\x48\x92\xdf\x0c\xff\x91\xf9\xa2\x56\x9d\x55\xd4\xa9\x52\xc9\xc2\x94\xe1\x45\x12\xdc\x42\x84\xb2\xa1\x9a\x30\x8d\x55\x73\x6a\x9e\x52\x8a\xa2\x4f\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x40\x95\x48\xd0\x08\x28\x08\x28\x08\x28\xd8\x29\x28\xf8\xc7\x7d\x74\x8f\x91\xa0\xb1\xb6\x02\xf7\x18\xfb\x64\x1f\xfb\x1f\x7d\x44\xf6\x91\x68\xb4\xc0\x23\x4b\x2e\x2b\xea\x35\xab\x1b\xa5\x18\xc7\x40\xc9\xbb\x0a\x72\xf5\xa2\x75\xea\xd4\xc2\xdc\x39\xfd\xcf\xe1\xa9\xf8\xeb\x1a\x9a\xac\x6b\x68\xb2\x91\x86\xb2\x45\xba\x28\xa0\xdb\x39\x3a\xcb\xa1\xdb\x69\x3a\x49\xc7\x5b\x80\x6e\xe6\x33\x06\x21\x37\xf6\x99\x3e\x62\xf5\x69\x30\xd9\xc7\xfa\xd8\x2f\xf4\xd1\x80\xfe\xb7\x75\x7b\xf6\x76\xbd\x67\x95\xbc\x8c\x3e\x75\xf4\xe9\x6f\x0f\xd2\x0e\x33\xf6\xbf\xa2\xe4\x35\xf6\x4b\x83\xec\x43\xfd\x76\xfc\xc9\x03\x62\xd3\xa2\x1a\xcb\x2d\x73\x3e\x36\x85\xb6\x0b\x4a\x3e\xb9\x57\x3f\x45\xc4\x9e\x2c\x28\xf9\xcd\xc6\x8e\x97\xe9\xb8\xe8\xf2\x83\x34\xc1\xbb\x7c\x84\x92\x94\x08\x14\x98\x2f\x28\xf9\xf9\xa2\x16\x8c\x89\xef\xf3\xc6\xc4\x5b\xd9\x16\x41\x88\xcf\x06\x13\xe2\x38\x8b\xd5\x2a\xcc\x17\x94\x3c\x44\xab\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x20\xc1\x1b\x42\x82\x7f\xeb\x09\xb3\x54\xcf\x0a\x97\x80\xfa\xa7\xd4\xbc\xaa\xac\x68\xec\xdd\x4f\xb0\x3f\x79\x88\xb6\xf1\x73\x1b\xc0\x6d\x59\x65\x25\x79\x8f\x7e\x4a\x46\x3f\xec\x8c\x24\xcf\x2a\x2b\x5d\x4e\xda\x32\x60\x54\x60\x54\x19\x30\x2a\x30\x2a\x30\x2a\x30\xaa\x9e\x61\x54\x99\xae\x61\x54\xa1\xdf\x49\xcb\x8c\x2a\x03\x46\x05\x46\x05\x46\x05\x46\x05\x46\xd5\x79\x46\x95\xe9\x69\xa4\x94\x01\x52\x6a\x1f\x52\xca\x74\x3b\x52\xca\x6c\x42\xa4\x94\x7d\x9c\x4e\x08\x85\xd2\x21\x3a\xc0\x15\x4a\xa3\xb4\x9f\x86\x7d\x15\x4a\x9c\x25\xa5\xd6\xd2\xa9\xac\xb2\xd2\x90\x44\xe9\x7c\xb0\x04\xe9\x21\xb6\xcf\x90\x20\xf1\xd6\x0d\x15\x52\x56\x59\x71\xa7\x36\x0e\x10\x3b\xc5\x3f\xbe\xcd\x81\xba\x76\x88\x89\x26\x26\x71\xaa\x15\x15\xff\xd5\x56\xae\x25\x30\xd4\x71\x3a\x46\x47\x6a\xb2\x08\x0f\xd1\x43\x0d\xf5\x27\x52\x06\xa3\x9e\x49\x8b\xf5\x4c\x3e\x1b\xa1\xa3\xe2\x33\x4e\xd3\x18\xff\x8c\x87\xa9\xd1\xd7\x8e\x4e\x8a\x72\x26\x87\xe9\xa0\x5d\xce\xa4\x89\xcb\x4f\x89\xec\xeb\x47\xe8\x90\x23\xfb\x7a\x13\xd7\xb7\x2e\x72\xac\x28\x5a\x95\xc2\xb3\x30\xc9\x8f\x8d\x38\x2c\x88\x51\x16\xd9\x59\x3d\x40\xb9\xcc\xad\xc9\x83\xe2\x88\x61\x4d\xa6\xad\xe3\xed\xb4\x2b\xed\x29\x91\x02\x5e\x0a\x5e\x0a\x5e\x0a\x5e\xda\x3b\xbc\x14\x4b\xac\x80\x25\x56\xf7\x00\x65\x94\xa7\xea\x48\x79\x2a\x70\x7b\x70\x7b\x70\x7b\x70\x7b\x70\xfb\x9e\xe6\xf6\xa8\x52\x88\x2a\x85\xa8\x52\xd8\xae\x2a\x85\x70\x8b\xc1\x2d\xd6\xab\x6e\xb1\x6c\x21\xdc\x9a\x99\x81\xc0\x38\xe9\x0d\x8c\x77\xb1\x9d\xe2\x6b\xb6\x2d\x6d\x88\xf0\x78\x13\x66\x07\x67\x9f\x1f\xa6\x39\x23\xdb\xb0\xac\x1a\x1e\x44\x59\xf3\xce\x36\xec\x3c\xa3\x58\x28\x17\xcb\x05\x73\x95\x66\x96\x54\x7b\xeb\x30\xfb\x6f\x5b\xe8\x4e\xe7\x89\x56\x22\xe2\x5f\x6d\xa4\xae\xda\xb4\x7d\xe5\x92\xf8\x89\x45\x13\x73\x74\x41\xad\x35\x23\xb1\xb1\xe3\xe9\x8c\xc4\xc6\xbe\xb7\xdd\xe5\x92\xf9\xee\x2c\xbd\xf6\xfa\xe0\x0f\xf2\x22\x9b\xaf\xcb\x56\x6c\x66\x39\xae\x7f\x8f\x8d\xef\xd5\x77\x94\x90\xde\x02\xa1\x03\x48\x6f\x01\x57\x18\x5c\x61\x70\x85\xf5\x92\x2b\x0c\xe9\x2d\x90\xde\x02\x2e\x08\xb8\x20\xe0\x82\x80\x0b\xa2\x2b\x5c\x10\xd9\x59\x9a\x61\x99\xf8\x19\x8b\xa0\x3c\xe4\xac\xc9\xe6\xbb\x7f\xab\xaf\xd3\x86\x34\x19\x48\x93\xb1\x49\xe0\x2d\xd2\x64\xb4\x23\x4d\xc6\x6f\xec\xa7\x0b\x82\x6b\xe6\x8b\x5a\x4e\xd1\x8d\x67\x2d\xd4\xf4\x49\x9b\x21\x97\xf3\x15\xa5\x58\xae\x6a\xa5\xa2\x75\xe8\x8d\xec\xef\x92\xec\x7d\xfd\xb4\xd3\x6a\xcc\xc2\x9a\x31\x55\x96\xf2\x35\x9b\xdc\xb3\x46\x13\x4b\x7a\x13\xc9\xfd\xfa\x19\x33\xe6\x85\x06\x31\xb4\x85\xc2\xae\x93\xc3\x95\x0c\x67\x2f\xfb\xc2\x38\xba\x20\xb8\xdf\x0c\x65\x38\xf7\x3b\x41\x93\x74\xd4\x57\xac\x6d\xf7\xa1\xf1\xd8\x29\xd7\x5d\x9b\x64\x6f\x25\x18\xdc\x9d\x66\x27\x05\x8c\x73\x35\x50\x5f\x7a\xcc\x80\x79\xb5\x83\x47\x57\xf7\x7a\xd3\xc3\x97\xb1\x81\x82\x5c\xa5\xe8\x5b\xb6\x7b\x0d\xd2\x3e\x55\xae\x94\xa4\x9c\xbc\xee\x38\xa5\x8c\x93\x36\x64\xa8\x32\x17\x69\x9e\xb2\x35\x51\x23\x37\x31\x24\x50\x39\x22\x90\xa4\xc5\x40\x92\x5f\x8c\x08\xa7\x40\x9a\x3b\x05\xac\x70\x90\x9b\xb2\x10\x9d\xb5\x43\xfe\x36\xa2\xb2\x5a\xa5\x4e\x98\xa9\xe4\x87\xef\xf0\xb2\x43\x77\x1b\xda\x04\xa9\x5c\x63\x7b\x46\xc5\x81\x8d\x31\x3d\xed\x09\x2c\x81\x01\x0a\x30\x40\x10\x37\x77\x44\xdc\x0c\x55\x1b\x54\x6d\x50\xb5\xb5\x4b\xd5\x96\x7d\x3e\x12\xb2\xb4\x67\x51\xc4\x92\x5e\xa0\x39\x47\x2c\x69\x9b\xe5\x42\x31\xef\x05\xc3\x76\xb6\x55\x74\x53\xa3\x6b\x06\xbf\xe5\x40\x43\x6b\x89\xcc\x7f\xdd\xe1\xb5\x66\x18\xa9\x48\x6a\xb5\xc8\xbd\x9e\x62\x8b\xbc\xee\x26\x66\xa4\xa2\x6f\x85\x3b\xbe\x8e\xd8\x5d\xdb\xd0\x28\xbf\x8f\xfd\xbc\x56\xcd\x1e\xe7\xc1\x6b\xb2\x5a\x90\x9d\x47\x07\x9d\x47\xb5\xaa\x2a\x55\xe5\x42\x31\x37\x5a\x77\x9e\xab\x15\xfd\xdf\x37\x8c\xa3\xfa\x5d\x36\xbb\x7d\x5a\xe7\x95\x59\xe0\x02\x01\xac\x5e\x02\x56\x2f\xcf\xf5\xd3\xb3\xfd\xec\x99\xfe\xe8\xd3\xd6\x14\xf0\xe6\xfe\xde\xd9\x3e\xd5\xf8\x54\xf4\x7e\xe6\x1a\x01\xee\xf7\xd3\x5f\x4e\x5b\x29\x94\xf0\x79\x71\x87\xf9\x4c\x26\x24\x25\x52\x89\x5f\xa9\x2f\xd9\xc4\xd5\xfc\x0c\xbe\x7c\xd2\x62\x89\xac\xa6\x94\x17\x84\x64\xe6\x61\xfd\xb3\x30\xfe\xbd\x64\x7e\x2c\xf6\x1f\x87\xfd\xf7\x75\x99\x97\x22\xf4\x62\x84\xbd\x10\x89\x7e\xc2\xc2\x78\xef\x8f\xcc\x2a\x6a\x8e\xaf\xec\x0a\x0a\xef\x76\x25\x16\xbf\xac\xff\x29\x1e\x9b\x72\x3d\x05\x07\xb4\x62\x11\xb6\xaa\xd9\xe2\x80\x51\x29\xc7\x1f\x9c\x4b\x2e\x4a\xc5\x9c\x31\xc9\xca\xa5\xbc\x16\x53\xae\x1b\x7d\x2a\x24\x09\x15\x59\xa9\x94\xe4\x54\x4c\xfc\x22\xd7\x38\x99\x63\xc9\x41\xa9\x67\x07\x58\x3f\x1f\xdf\xc2\x6f\xcb\xa5\xd7\xb9\x1c\xea\xe6\xf1\x26\x74\x6b\x15\xa1\x5b\xeb\xc0\x1c\x91\x9d\xa4\xa3\xec\x70\xfc\xa0\xe5\x20\xb9\xd7\xe9\x20\x71\x5d\x5c\xef\x14\xd9\x84\xf2\xd4\xff\x70\x1b\x1d\x16\x18\xd7\x1c\x0f\xfe\x19\xd9\x28\x77\x4c\x93\x4b\x97\xb5\x55\xfe\x20\x52\x4e\x37\x57\xaa\xbc\x56\x94\xaf\x6b\xec\x77\x28\xfe\xc9\x2d\x74\x87\xeb\xba\x4b\x6b\xe9\x68\xcc\x4a\x0b\xb3\x24\x97\x2e\x2f\x89\x4b\xa7\xf8\xa5\x8b\xfc\xd2\xe4\x88\x38\x63\xca\x79\xe5\x72\xda\xe7\xec\x90\xf7\xe2\x8f\xd3\x63\xb4\x54\x33\x8f\x4d\xd3\x94\xef\x4b\xed\xee\x16\x7d\xc5\xe3\x7d\x9b\xd9\xa7\xfb\xe8\x51\xf1\xb5\x3c\x4c\x17\xf8\xd7\xa2\xaf\xd7\x6e\xbe\x61\xfa\x1e\xc1\x8a\x16\x69\xc1\x66\x45\x21\x35\xfd\x6a\xb1\x14\x5c\xa2\x57\x3a\x96\x82\x21\xb5\x7d\xb3\x29\x47\x9e\x0a\xfe\xd8\xcf\xb3\x59\xe3\x63\xf7\x7a\x79\x8d\x0f\xde\xef\xfe\x5c\xc2\x72\xac\x46\xd6\x5f\x8d\x64\x01\x73\x7d\x60\x6e\xe8\x16\xf9\xe9\x01\x8a\x99\xf5\x1c\x73\xca\xb5\x8a\x52\xd6\xf7\x8d\x7c\x9b\x65\x7b\xcb\xbe\x7f\x80\xfd\x55\x9f\x5d\xe3\x31\xee\xe1\x23\x9b\x36\xaf\x15\x5b\xb4\xe4\xbd\xfa\x39\xa2\xc8\x63\xcd\xa1\x90\x7d\x62\xaf\xa5\x69\x61\x07\xf5\xd5\x82\x6e\x07\x0f\xd2\x04\x8d\x07\xd6\x63\xac\xb9\xa9\x40\xfb\xb1\xbe\x9f\xea\xea\x2b\x83\xcd\x47\x8a\x8d\xd4\x96\x65\xac\xbd\x09\x97\x91\x38\x41\x93\xec\x68\xfc\xb0\x35\xbd\xef\x76\x69\x27\xdc\x57\xb6\x5f\x31\xc1\xbe\xda\x47\x77\xd5\x17\x53\x5d\x4b\x8f\xb1\x4f\xf5\xb1\xdf\xe8\xa3\xad\xfa\x9f\xf5\xb7\xe3\xee\x82\x5c\x75\x6b\xa4\x84\xd7\x58\x54\xaa\x55\xf2\xf2\x72\x7a\x6a\x61\xce\x14\xe3\x86\xf8\x3a\x84\x56\x55\xb5\x42\x8f\x89\x57\x4a\xdf\x85\xea\xaf\xd4\x2c\xcd\x50\xa6\xb5\xaa\xaa\xe6\x73\x36\x92\x59\x8f\x7d\x28\x41\x93\xa2\x8f\xd5\x15\x29\x97\xf2\x59\x23\xb9\x62\x78\x4a\xab\x5a\x55\xb7\xdb\x25\x59\x63\x7f\x33\xc4\xbe\x3e\x40\xf7\xea\xd7\x4e\xd5\x2c\x93\x04\x7b\xb8\x6e\x48\xc3\xca\xf9\xe2\x5a\x31\xbf\x2a\x95\x5c\xd1\x3b\x96\x0c\x73\x5a\x34\xba\xa8\xe8\xeb\xfb\x10\xe2\x74\x52\xc9\x11\x7e\xc1\x62\xed\x8d\x99\xe1\x36\xf6\xef\xe9\xdd\x84\x20\x9b\x7a\xf3\xb3\xcf\xdb\xfc\xec\x60\xc4\x6f\x4c\xd4\x86\x2d\x06\x1b\xa1\x59\x36\x63\x18\x21\xdf\x37\xcc\xb4\x4d\xf6\xa0\x78\xec\x5b\x10\x60\x83\x00\x1b\x04\xd8\x20\xc0\x06\x01\x36\x08\xb0\x41\x80\x0d\x02\x6c\x10\x60\x83\x00\x1b\x04\xd8\x20\xc0\x06\x01\x36\x9d\x0d\xb0\x41\x60\x0c\x02\x63\x10\x18\xd3\xc3\x81\x31\x7f\x9e\xa0\x94\xc9\xef\x7d\x22\x60\xf4\x0f\xce\x19\xfb\xf2\xd1\x04\xfb\x7d\x07\xcd\x8f\x7a\xd0\xfc\x25\x71\x49\x72\xb7\x4d\xf1\x6d\xcd\x91\x71\x30\x64\x8e\xbf\x4c\xc7\x05\x55\x3b\x48\x13\x9c\xaa\x8d\x50\x92\x12\x81\x1c\xdf\xb8\x99\x9b\xe5\xf7\x73\xc1\xe8\x6c\x90\x3d\x68\xba\xf8\xc4\x4f\x3a\x69\xbd\x4d\xf6\xa3\x3f\xb6\xcd\xee\xdb\xbd\xde\x81\x2a\x66\xf7\xde\x67\x1c\xee\x48\x0f\x67\x4e\xd1\x09\x9a\xac\xf1\x45\x37\xd1\xc5\xd0\x50\x21\x04\xa5\x45\xaf\xe5\x7f\x8e\xdc\xdc\xc7\x7d\x46\x68\x12\x8e\xd1\x11\x5b\x93\xd0\x29\xf3\xc0\x43\x4c\x1a\x33\x0f\xb5\xee\x3d\x2f\x43\x91\xfc\xd6\xed\xb6\x79\xb8\xc3\x8c\x1f\xb1\x2c\xc2\x5e\xf1\x97\xce\x18\x04\x04\x8a\x20\x50\x04\x81\x22\x08\x14\x41\xa0\x08\x02\x45\x7a\x33\x50\x24\xbc\x79\x3d\x38\xe6\x24\xf3\x85\xdb\xec\x99\x7f\x30\x20\x0a\xc4\x5c\x0f\xec\xe1\x5a\xe4\xb6\x2f\x07\x10\xef\xd1\x6b\x8b\x10\xc4\x7b\x20\xde\x63\x83\xe3\x3d\xda\x4c\x7c\x02\xe3\x3b\xc2\x33\xfe\xd9\x83\x34\xc1\xc6\xe3\x29\x4b\xab\xb9\xcb\xa9\xd5\x34\xae\xb8\x25\x02\x38\xde\xf5\x24\x1d\x14\x0a\x45\xa1\xe4\x72\xe8\x40\xfd\xf2\xef\xf0\xf3\xd8\x97\x9e\x60\xff\xf7\x21\xda\x2e\xfe\x4b\x9f\x01\xf7\x09\x9f\xb4\x6a\x78\xd3\xcc\xf5\xb2\x99\x39\x9c\xcb\xe2\x92\xbb\xf5\x93\xf8\x3f\x35\xe7\xfc\xc7\xff\xd2\xe5\x32\xc1\x0c\x24\x72\x90\xc8\xa1\x1c\x2b\x24\x72\x90\xc8\x41\x22\xd7\x3b\x12\xb9\x2e\xaa\x36\xda\x35\x12\x39\x94\xc1\x84\x44\x0e\x12\x39\x48\xe4\x20\x91\xdb\x88\x32\x98\x3d\xad\x68\x43\x9d\xbe\x5e\xae\xd3\x97\xd9\x84\x8a\xb6\xec\xab\xe9\xb4\xc0\x82\x47\xe9\x30\xc7\x82\xe3\x94\xa2\x11\x5f\x2c\x68\x70\xa8\xb5\x74\x8a\x53\xa1\x46\xe2\x6c\xd7\x49\x06\x21\x02\x29\x17\x83\xc9\xe0\x18\x1b\x35\xc8\xa0\x0b\x84\x99\x69\x5f\x78\x50\xa7\x13\x11\xc6\xff\x60\x9b\x93\x7e\xdd\x6e\x66\x2d\x29\x1b\xa4\x6b\xaf\xf8\x43\xbb\x59\x57\x6b\x4a\xb0\x9a\x3e\x86\x77\x05\x4a\xb0\x16\x95\x60\x5f\x8c\xb4\x20\xe6\xaa\x79\xfd\x68\x4a\x78\x8a\x27\xe9\xa8\xc3\x53\xdc\x5c\x13\xbe\xf9\x8b\x9b\x72\x49\xd4\xb4\xda\x68\xae\x99\x36\x98\x97\xe4\x6f\x8e\x38\xcd\x4b\xd4\x50\x0d\x38\x24\x18\xca\x65\xc3\xd2\x0c\x89\x63\xa6\xa5\x99\xb6\x4e\x69\xaf\xcd\x69\x8f\xd8\x0c\x7c\x15\x7c\x15\x7c\x15\x7c\xb5\x77\xf8\x2a\xd6\x5e\x01\x6b\xaf\xee\x01\xd0\x10\xfa\x76\x44\xe8\x0b\xce\x0f\xce\x0f\xce\x0f\xce\x0f\xce\xdf\xd3\x9c\x1f\xf1\x1e\x88\xf7\x40\xbc\x47\xbb\xe2\x3d\xe0\x46\x83\x1b\xad\x57\xdd\x68\xd9\x42\xc8\xa1\x4e\x41\xfe\xa9\xa4\x37\x40\xde\xc5\x76\x8a\xaf\xd9\xb6\xb4\x6d\x81\xc9\x9b\x51\x98\xfe\xc3\x09\x53\x98\x5e\x96\xab\xd7\x15\xf5\x29\x7d\xab\xe6\x99\x32\xb7\x58\x2e\xa8\xb2\xa6\xe5\x4a\x92\xa6\xc9\x1a\xfb\xf2\x10\xfb\xec\x00\x31\xfb\x2a\x2b\x5b\xee\x1b\x1a\xcb\x96\x3b\x27\xda\x9b\xd6\xdb\x0b\x29\x5d\xee\x20\xbf\xe0\xa2\x75\x4b\x46\x9e\x5c\xe7\x2f\x21\x51\xae\xf7\xa7\xd3\x4c\x31\x48\x67\x7f\xfa\xd6\x82\xac\x7b\x9d\xa8\xa1\x64\xbc\xc8\x90\x0b\xf9\x3f\x32\xe4\xc2\x3d\x05\xf7\x14\xdc\x53\x3d\xe4\x9e\x42\x86\x5c\x64\xc8\x85\x5b\x00\x6e\x01\xb8\x05\xe0\x16\xe8\x0a\xb7\x00\x32\xe4\x22\x43\xee\x66\x01\xa1\xc8\x90\xdb\x8e\x0c\xb9\xdf\x18\xa2\x61\xcf\xc2\x65\x02\x07\xaa\xab\x65\xfd\x81\x4d\x1c\xf8\xcb\x43\xec\xc3\x03\x76\x31\xb3\x06\x19\xe0\xa2\x68\x24\x4c\x06\xb8\x5b\x30\x40\x5e\x2f\xcd\xd9\x3c\xc0\xdf\x4d\x54\xc8\x7a\x3c\x98\x0e\x1e\x64\x13\x1e\x89\x7d\x1d\xaf\x8e\x81\xcd\x9d\x63\x02\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\x07\xda\xd7\x31\xda\xf7\xab\x09\x93\xf6\x69\x55\x45\x95\x0a\x4e\xe0\x97\xd3\x8a\x79\xb5\xa8\x0f\xa0\x59\x0a\xeb\x1d\x09\xf6\xe7\x7d\x44\xc6\xa9\x97\xd6\xd2\xd1\x3d\x1e\xd5\xb0\xa6\x97\xe6\x66\xf8\x75\xc9\x57\xe8\x47\x97\xc4\xd9\xcb\x69\xeb\xef\x21\x97\xc2\x7a\x8d\xc8\x91\x30\xce\x73\x24\xf4\x3d\x72\x81\xa5\x69\x8c\x46\x7d\xb3\x10\x98\x0f\xba\x96\x4e\x59\x77\x14\x88\xcb\x1a\x23\x61\x06\xfd\x72\x77\xa5\x59\x20\xde\xfa\x2d\x57\x1e\xfc\xf5\x0b\x6d\x45\x3f\xba\xcd\xd5\xdd\xf7\x7b\x17\xc8\xb2\x7b\xfc\x5e\xe3\x84\x76\x77\x7a\x66\x9a\xa6\xe8\x74\x4d\x4e\x94\x66\x7b\x1d\xc1\xb9\x48\x8c\xd2\x62\x62\x94\xf7\x45\x42\xf8\xe8\xcf\x8a\xd4\x2a\xa7\xe8\x84\x9d\x5a\xa5\x0d\xb6\x23\xa0\x58\x56\xbb\x4c\x4b\xf2\x2f\x6e\x77\xd9\x0e\x66\x55\xcf\xb2\xcd\xc5\x3d\xe2\x6f\x6d\xb7\x16\x28\x9d\x85\xd2\x59\xb7\x6e\x44\x3d\x42\x29\x11\x4a\x89\x50\xca\xb6\x95\xce\xfa\x89\x08\xcd\x8a\x14\x66\xa7\xe9\xa4\x23\x85\x59\x88\x53\xf5\xcd\x2f\x26\xcc\xc9\x3e\xb0\xae\x55\xdb\xe6\xfb\xcc\x37\x6e\x73\xcd\xf7\x89\x80\x9a\x59\xf6\x2a\xe0\x6e\x5e\xe1\xa5\x9d\x8b\x00\x14\xcc\xea\xb5\xa5\x07\x0a\x66\xa1\x60\xd6\x06\x17\xcc\xea\x04\x17\x0a\xac\x9a\xd5\x2e\x6b\x9f\x3d\x4c\x07\xd9\x44\x7c\xdc\x0a\x3a\xbe\xcb\x59\x43\xcb\xba\xa6\xbe\x8a\x56\xe8\xa1\xc4\xdf\x1d\xa4\x57\x98\x25\xf5\xcd\xb5\xcc\xeb\x57\x95\xaa\xa4\xb1\xaf\x0d\xb2\x2f\xf4\xdb\x55\x1c\xf7\xaf\x5f\xc3\xca\xd4\x7f\xbc\x52\xbf\x38\x39\xa4\x9f\x2c\x2a\x39\xba\x0e\xcc\x2a\xea\x54\xa9\x64\x25\xdf\xd4\xba\x5d\xdc\xf7\x3a\x81\x19\xc6\x39\x66\xd0\xdf\xc2\xc3\x74\x90\x26\x02\xcb\xb6\xb9\x9e\xb9\xa1\x2c\xcd\x8f\x04\xbf\x69\x23\x2c\x59\x5b\x9f\xcd\xf5\x3b\x6e\x34\x19\x90\xf6\x19\x22\x3e\x88\xf8\x20\xe2\x83\x88\x0f\x22\x3e\x88\xf8\x20\xe2\x83\x88\x0f\x22\x3e\x88\xf8\x20\xe2\x83\x88\x0f\x22\x3e\x88\xf8\x20\xe2\x83\x88\x0f\x22\xbe\x4e\x89\xf8\xde\x3b\x4c\xe3\x26\x81\x13\x51\xba\x3e\xc5\xe5\x2b\x4a\xde\xd2\xf2\x7d\x23\xc1\x5e\x1c\xb0\xd9\xdc\xcf\x44\x8c\x1e\x77\xc4\xeb\x96\x4d\x1b\x66\x12\xba\x05\x25\x1f\x4a\xb8\xee\x88\xbd\x56\xe1\xeb\x31\x63\x66\xd3\x97\x3b\xf6\x22\x6c\xc8\xb5\xee\x75\xb4\x9d\x4a\xde\xc3\x7f\x4e\xc0\x41\xbb\x0c\xcf\x82\x92\xef\x76\x18\xf8\x94\xbf\x07\x32\xf4\x10\x60\x93\x0b\x9e\x0d\xe6\x82\x71\x16\xab\xe5\x82\x0b\x4a\xde\x4d\x03\xfd\x49\x37\x7f\x3e\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\x40\xe0\xc0\x4e\xe3\xc0\x71\x4a\xb1\x91\x78\xd2\x12\x06\xdd\xee\x14\x06\x2d\x28\xf9\x7a\x49\xd0\xe6\xab\x5f\x00\x44\x09\x44\x09\x44\xb9\x89\x10\xe5\x77\xfb\xe8\x7e\x11\x67\xac\xdb\x12\x31\x9f\x71\xe2\x62\x87\x1b\xb3\x2f\xf6\xb1\x2f\xf4\xd1\x4e\xf7\x09\x97\xd6\xd2\xd1\xbb\x0b\x72\xd5\xbd\x02\x10\xcf\x91\xbc\xbf\x20\x57\xa7\x5c\xa7\x2f\xa7\xa7\x16\xe6\xcc\x4d\x67\x78\xda\xc0\xc9\xba\x86\x26\xeb\x1a\x9a\x6c\xa4\xa1\x6c\xc5\x9f\x00\x3e\x26\x08\xe0\x45\x9a\xe7\x04\x70\x96\x66\x28\xd3\x02\x01\x74\x74\xc0\x7c\x51\x33\x31\x20\xfb\xb7\xc3\x74\x48\x8c\xc0\x0a\x47\xc4\x6b\x69\x3f\x4a\x9c\x53\x95\xf2\x55\x65\xc5\x22\xc5\x9f\x4d\xb0\x3f\xec\xa3\x6d\xfc\x32\x5e\x2d\xdd\x2b\xe6\x5b\x55\xca\x59\x65\x25\xc9\xe3\xc1\x33\xfa\x99\x4e\x32\x6b\x1c\x0d\x39\xee\xfb\x71\xff\xae\x3c\x21\xba\xf2\x10\x1d\xe0\x5d\x39\x4a\xfb\x69\xd8\x57\x72\xc9\x1f\x8c\xcb\x7e\xc5\x6d\x06\xc7\x6d\x16\xe4\x2a\x5d\x9d\x0f\xe6\xaa\xc3\x6c\xc8\xd0\xf0\x8a\x96\xc9\x23\x55\x22\xff\x75\x8a\xfe\xe4\x36\x47\x0f\xef\xf5\x09\xf3\x36\x3a\xd9\x8c\x02\xef\x4c\x3f\x67\x4e\xd3\x49\x3a\x5e\x13\x3b\xd1\x4c\x7f\x22\x58\x02\x61\xde\x2d\x86\x79\xff\x68\xe4\xe6\x3e\x65\x9a\x12\x21\xde\x93\x74\xd4\x0e\xf1\x6e\xb2\x89\x9b\x0c\xef\x6e\xcc\x4c\xb8\x8c\x81\xbf\xcd\x48\xfe\xe3\xed\x0e\x33\x71\x87\x1d\xd1\x6d\x58\x86\xfb\xc4\x5f\x3a\x64\x18\x10\xd5\x8d\xa8\x6e\x44\x75\x23\xaa\x1b\x51\xdd\x88\xea\x6e\x3a\xaa\xfb\xf9\x48\xc8\x55\x42\x17\x45\x8c\xf8\x05\x9a\x73\xc4\x88\xb7\xb9\xf2\x68\xa8\x93\x7b\x70\xec\x78\xe6\xcb\xb7\x39\xa6\xff\xc1\xa0\x00\x6f\x63\x51\xb0\x97\x87\x22\xb6\x7f\x4d\x80\x20\xef\x5e\x5b\x89\x20\xc8\x1b\x41\xde\x1b\x1c\xe4\xfd\xf8\x4d\x6e\x10\x6f\x3a\xc0\x3b\xd4\x29\x20\x7b\x90\x26\xd8\x78\x3c\x65\xb9\x62\x76\xb9\xa2\xba\xc5\x15\xb7\x82\x03\x87\xfd\xa1\x51\x80\x3a\x50\xb3\xaa\xc9\xea\x5a\x31\x27\x4b\xb9\x9c\xb2\x5a\xae\x6a\xec\xe9\x04\xfb\x45\x87\x6e\xf5\x7b\x1b\xab\x38\xb3\x24\x5a\x99\x12\xad\x84\x54\x73\x66\xc8\x53\x86\xea\xfe\x29\xd4\x9f\xb9\x89\xfa\x33\x0b\xc1\xdf\xd6\x28\xdb\x5f\x2b\x5d\x75\x0f\x80\xfb\xfb\x82\x48\x15\x22\x55\x88\x54\x21\x52\x85\x48\x15\x22\x55\x88\x54\x21\x52\x85\x48\x15\x22\x55\x88\x54\x21\x52\x85\x48\xb5\xb3\x22\xd5\xcd\x47\x2c\x20\x39\x85\xe4\x14\x92\xd3\x4d\x24\x39\xfd\x1f\x09\x1a\x6d\x86\x30\x6a\xec\x87\x12\xec\x27\x1c\x68\x51\x6b\x0a\x2d\x86\xc4\x14\x63\xeb\x31\x45\xc0\xc4\x9b\x80\x89\x73\xc1\x30\x71\x90\x3d\xe8\xa1\xd0\xac\x05\x8b\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xa0\x88\xb7\x10\x45\xfc\xc1\x7e\xda\x67\x04\xae\x57\x8a\xf2\x1b\xaa\x72\x99\x8f\x96\x1d\xb7\xbe\x22\x57\xa5\xf4\x18\xfb\xe3\x3e\xf6\x95\x3e\xba\xcb\x75\xd2\x25\xe3\xa8\x7f\x00\xfb\xbe\x82\x5c\x9d\x72\x5e\xb2\x2c\xae\xe8\xfa\x20\xf6\xf6\xc4\xaa\x07\x81\x3e\xf6\xf7\x7d\xb4\x47\x8c\x86\x40\x69\x75\xc3\xf0\xf9\x3e\xf6\xd9\x3e\xfa\x67\xe2\x68\x70\xff\xef\x29\xc8\x55\x8e\x18\xd1\xf1\xeb\x77\xfc\x3f\x24\xe8\xb0\xe8\xf8\xb2\x5c\xbd\xae\xa8\x4f\xe9\xdb\x22\x3b\x77\x83\xe0\xeb\xc5\x72\x41\x95\x35\x2d\x57\x92\x34\x4d\xb6\xd2\x07\xfc\x4a\x82\x7d\x6b\x80\x5e\x6e\x5f\x77\x69\x2d\x1d\xfd\x6f\x8d\xa4\x9b\x9d\x13\xed\x4d\xeb\xed\x75\x45\xde\xd9\xbd\xfc\xe7\x2e\x5a\x4f\xb2\x9c\x76\xde\x22\xc8\x7c\x33\xf2\x79\x91\x35\xf6\xea\x93\xc1\x54\x7e\x92\x1d\x35\x48\x7c\xdd\xbb\x67\x80\x79\xe7\x28\x40\xef\x0b\x52\x0f\x52\x0f\x52\x0f\x52\x0f\x52\x0f\x52\x0f\x52\x0f\x52\x0f\x52\x0f\x52\x0f\x52\x0f\x52\xbf\x91\xa4\xfe\x18\x1d\x61\x87\xe2\x07\x2c\x52\x7f\x8f\x33\xae\xd9\xb9\x7d\x6b\x7f\xc1\x6a\x20\x78\x20\x78\x20\xf8\x4d\x84\xe0\xdf\x99\xa4\x87\x05\x7b\x14\x79\x88\x2c\xda\x1b\x54\xeb\x2a\x5f\xd4\xd4\x55\xbe\xb3\x5f\x59\xcd\x17\xe4\xaa\x85\x24\x7f\x7d\x98\xbd\x63\x0b\xfd\x33\xd1\x9c\x45\x89\x3f\xd6\x60\x09\xac\x19\xab\xd9\x0c\x6f\xb6\x2b\xd0\xa4\xe8\x0c\x91\x66\xc9\x60\xd9\xae\xca\x58\xb5\x37\x0d\x58\x59\x0f\x2b\x5f\x1b\xcc\x22\x8f\xb3\x63\x06\x8b\x14\x2f\x8f\x5d\x2d\xab\xb6\x83\x9d\x1c\x92\x0f\x07\x4a\x68\x81\x56\x82\x56\x82\x56\x82\x56\x82\x56\x82\x56\x82\x56\x82\x56\x82\x56\x82\x56\x82\x56\x82\x56\x76\x1d\xad\x3c\x43\xa7\xd8\x89\xf8\xa4\x45\x2b\xef\xaf\x29\xa1\x55\xbb\xd7\xbb\x15\x32\x32\x02\x8b\x02\x8b\x02\x8b\x6e\x22\x2c\xfa\xa6\x61\x91\xdf\x40\x1b\xd3\xaa\x8a\x2a\x15\x64\x87\x1e\xd3\xf8\x4b\x8d\x12\xf3\xa5\x04\xfb\x76\x1f\x91\x71\xf0\xd2\x5a\x3a\x7a\xbf\x47\x29\xa7\x25\x71\x98\x7b\x63\x92\xf7\xea\x27\x18\x7f\x59\x4e\x3b\x0f\x85\x5c\xcc\xe9\xb5\x34\x2d\x78\xdf\x09\x9a\xe4\xbc\xef\x20\x4d\xd0\xb8\x6f\x1e\x5f\xf3\x91\x79\xaa\x76\xfb\xa6\x6e\xa2\x58\x0b\xaf\xe9\xf4\xea\x60\x02\x78\x84\x1d\x32\x08\xa0\xbb\xdb\xcd\x1c\x01\xce\xbb\x71\x4a\x11\xa3\x2f\x6e\x73\x75\x7d\xdc\xbb\xc6\x93\xab\xf7\xf7\x18\xe7\x74\x60\x00\x32\x5c\x49\x5c\x93\x3c\xbd\x85\x11\x40\xd2\x74\x14\x7b\x6a\xb1\xd8\xd3\xfb\x23\xe1\xd8\x80\xf3\xa2\xe6\xd3\x14\x9d\xb6\x6b\x3e\x75\xda\x9a\xf0\xd2\x4f\x6d\xb4\x26\xc9\x7f\xbc\xdd\x65\x4d\xee\xb2\x4a\x41\xb9\x0c\xc8\x6e\xf1\xe7\x4e\xd8\x0f\x14\x83\x42\x31\x28\x14\x83\x42\x31\x28\x14\x83\x42\x31\xa8\xa6\x8b\x41\x3d\x13\xd2\xd4\x3f\x27\x6a\x40\x65\xe8\x8c\xa3\x06\x54\x7b\xe6\xfe\x36\x4e\xee\x0d\x14\x82\xfa\xd6\x6d\xae\xe9\x7f\x7f\x40\x29\x28\xd7\xa2\x20\xca\x2b\x97\xb4\x79\x4d\x80\x62\x50\xbd\xb6\x12\x41\x31\x28\x14\x83\xda\xe0\x62\x50\x1d\x82\x48\x81\x35\xa1\xda\x38\x37\xac\xaf\xa4\x76\x5e\xd6\x7e\x25\x35\xfb\xa7\x27\x29\xe5\x83\x20\x85\x42\x73\x4d\x29\xad\x5e\x93\xa5\x6a\x55\xca\x5d\xb9\xa6\x2f\x0d\xd8\x6f\x3e\xc9\x7e\x7e\x90\x6e\xb7\xe7\x2e\xa1\xbd\x4c\x09\x5f\xba\x6a\x78\x01\xcd\x05\xaf\xa9\xba\x5c\xe6\xed\x4c\x59\xed\x24\xe3\xfa\xf9\xd6\x14\xc6\xdb\xa8\x3d\xa7\xcb\x15\x8e\x19\x88\xfd\x20\xf6\xcb\x40\xec\x07\xb1\x1f\xc4\x7e\x10\xfb\xf5\x8c\xd8\x2f\xd3\x35\x62\xbf\xd0\xef\xa4\x65\xb1\x5f\x06\x62\x3f\x88\xfd\x20\xf6\x83\xd8\x0f\x62\xbf\xce\x8b\xfd\x32\x3d\xad\x7b\xcb\x40\xf7\xd6\x3e\xdd\x5b\xa6\xdb\x75\x6f\x99\x4d\xa8\x7b\x6b\x32\x32\xd4\x01\xf9\x38\x25\x4a\xd5\x62\xa2\x46\xd2\x1f\x5e\xbd\xcf\x9b\xf8\x6d\x65\x5b\x44\x6d\x99\x5c\x30\xf0\x3b\xc3\x4e\xad\x07\xfc\x6a\x6f\xab\x3e\x7c\x34\xfe\xc2\xf6\x7a\x6e\x76\xaf\x98\x9e\x62\x52\x3d\x22\x7b\x50\x1c\xea\x10\x24\x13\x4c\xeb\x11\x7a\x98\x2e\xd4\x38\x5d\x8e\xd3\xb1\x96\x07\x07\x5e\x17\xa8\xc9\x5a\x54\x93\xfd\xfb\x3e\x9a\x17\x76\xe2\x2c\x4d\x73\x3b\x71\x92\x6e\xe6\x55\xa4\x57\x0a\x59\x59\x96\xce\xdb\xb2\xb2\x9b\x6c\x72\x51\xb8\xab\x2f\xd0\x9c\xc3\x5d\x7d\x93\x6d\x06\x99\xb2\x4e\x98\xaa\x75\xec\x65\x45\xd1\xaa\x94\xfc\xe6\x48\xbd\x29\x7b\xc8\x10\x27\x38\x94\x1e\xca\xe5\x7a\xb3\x96\xaa\x91\xb4\xf1\x8b\xa7\xad\x6b\xda\x6c\xe0\xda\xa3\x72\x03\x05\x06\x05\x06\x05\x06\x05\xee\x1d\x0a\x8c\x75\x5d\xc0\xba\xae\x7b\x30\x39\x14\xc6\x1d\x51\x18\xc3\x1b\x01\x6f\x04\xbc\x11\xf0\x46\xc0\x1b\xd1\xd3\xde\x08\x04\x9a\x20\xd0\x04\x81\x26\xed\x0a\x34\x81\xb3\x0f\xce\xbe\x5e\x75\xf6\x65\x0b\x4d\x01\xf1\x75\xe0\xe5\x52\x55\xaa\xae\x06\xcb\xe4\x3b\x42\x9a\x93\xde\xa4\x79\x17\xdb\x29\x4c\x86\x6d\xce\x29\x7c\xfd\xfb\x97\x86\xe9\x90\xfe\x62\x8d\xad\xa5\xfd\xd2\x10\xeb\xe6\xa0\x98\x93\xa5\x5c\x4e\xdf\x19\x58\xa9\x38\x7e\x62\x98\xfd\x9f\x3e\xda\x9a\x53\x54\x1e\xbf\xf5\x80\x57\x1e\x0e\x71\xe5\x94\xb8\x32\x19\xd7\x4f\x99\x56\x54\x79\xd9\x91\xd8\xd7\x7d\x4e\xc8\x29\x39\x9e\xa4\x8c\x78\x5f\x8e\xd3\x31\xfe\xbe\x1c\xa0\x34\x8d\xf9\x7a\x26\xf4\x67\xe1\x6f\x87\xeb\x9e\x6e\x36\x23\xc7\x42\xf0\x4b\x34\xca\xf6\x3b\x23\x24\x8c\x17\x2a\x1e\x37\x43\x29\xdc\xf7\x13\xfd\xe4\x36\xbb\xdf\x1f\xf4\x49\xc2\xe1\xee\xfa\x87\x8c\xb3\x3a\xd9\xfb\x19\xfd\x23\x9d\xaa\x71\xa5\x36\xdf\xfd\x00\x6d\x70\xa0\xb6\xe8\x40\x7d\x3a\x12\xca\xf7\x7f\x4e\xb8\x4d\xcf\xd0\x29\xdb\x6d\xda\x61\x43\xc2\x93\x71\x34\x66\x48\x3c\x4d\x86\xb7\x75\x49\x7e\xfb\x76\xdb\x90\xdc\x6d\xe7\xdf\x70\xdb\x8e\x07\xc5\x81\x8e\x9a\x0e\xa4\xe2\x40\x2a\x8e\x5b\x17\x94\x83\x90\x80\x90\x80\x90\xb4\x2d\x15\xc7\x4f\x87\x33\xed\x9f\x17\xd2\xa6\x29\x3a\xed\x90\x36\xb5\x65\xde\x6f\x6c\x62\x0f\xd8\x15\x34\x99\x7e\xe3\xef\x6e\xb3\xa7\xfe\xd1\xa0\xdc\x1b\xee\x05\xc1\x3e\x1e\x23\xde\xa9\xf5\x00\xd2\x70\xf4\xda\x2a\x04\x69\x38\x90\x86\x63\x83\xd3\x70\x74\x06\x1c\x85\x6f\xf7\x83\xf2\x7a\x64\x8f\xd3\x31\x76\x24\x7e\xc8\xca\xbd\x11\x75\xe5\xde\x70\xb5\x7c\x2b\xa4\x04\x67\xcf\x27\x68\xbf\xc9\x37\xd7\x2d\xb6\x26\x72\x52\xb0\x1f\x4c\xb0\x77\x0f\xd8\x33\xe3\xeb\x0d\xbf\x7a\x39\x5f\x5c\x2b\xe6\x57\xa5\x92\xab\xb4\x9a\xa5\x61\xe1\xc5\xb9\x42\x29\xa2\x96\x4a\xde\x77\xdd\x6b\x76\xe5\xbf\x30\x5f\xd4\xba\x3d\x27\xc8\x86\x54\x3d\xdb\xe7\xfd\xd6\xef\x60\xc4\x6f\x4c\x04\xb8\x9c\x0b\xfe\xd8\x1e\x64\xf1\xda\x8f\x4d\xdc\x80\x2b\x7d\x0d\xb2\x9e\x20\xeb\x09\x4a\x9c\x41\xef\x0e\xbd\x3b\xf4\xee\xbd\xa3\x77\x47\x89\x33\x94\x38\x83\xce\x18\x3a\x63\xe8\x8c\xa1\x33\xee\x0a\x9d\xf1\x26\xa4\x11\x28\x50\x06\xed\x26\x0a\x94\x6d\x9e\x44\x2d\xec\xc5\x24\x9d\x16\xd9\x81\xa5\x4a\x45\x5b\x47\x23\xa9\xcf\x04\xaa\xa2\xdb\x61\x7d\x5b\xc2\x87\xd4\xd4\x49\xbe\x35\xc9\xde\xda\x4f\x5b\xf5\x06\x2e\xad\xa5\xa3\x0f\x79\xe8\x24\xa7\xad\xab\x17\x8d\xab\x93\x83\xfa\x69\x53\x95\x8a\xe6\x84\x80\xf5\xe7\x85\xac\x97\x94\x68\x56\xc0\xbb\xd3\x74\x92\xc3\xbb\x23\x74\x88\x0e\xf8\x62\x6f\xfd\x99\x52\x6b\xe9\x54\xfd\x7d\x05\x22\xbb\xe5\x60\x1a\x77\x80\xa5\x0d\x1a\xa7\xff\x8e\xc1\xe3\x3c\x7e\xca\x05\xc0\xd7\xd7\x62\x46\xbf\xb2\xcd\x1e\x89\x84\xb7\x72\xd2\x63\x30\x86\x8d\x33\x3b\x3d\x1e\x99\x39\x3a\x47\x67\x6b\x5c\x8f\xad\x0d\x08\xbc\x8e\x50\x51\xb6\xa8\xa2\xfc\x60\x24\x34\xab\x70\x41\x28\x29\x67\x28\x63\x2b\x29\xdb\x66\x62\x02\xd4\x94\x8d\x59\xa0\x86\xac\x8e\xd3\x4c\x25\x7f\xf4\x0e\xdb\xc4\xec\xb6\x34\x95\x1e\x56\x25\x21\x0e\x76\xdc\xa8\x40\x5b\x09\x6d\x25\xb4\x95\xd0\x56\x42\x5b\x09\x6d\x65\xd3\xda\xca\xe7\x23\x21\x87\xe0\x35\x9f\x40\xee\xe6\xc3\xfa\x02\xd5\x91\xed\xda\x9e\x64\xfe\xd5\x0e\x7b\x71\x90\x0e\x50\x5d\x7a\x2c\x19\x86\xb8\x8a\xa7\x93\x2b\x06\xa8\x2f\x7b\x6d\x9d\x02\xf5\x25\xd4\x97\x1b\xac\xbe\xec\x1c\x86\x0a\xac\x83\xd6\xa6\x89\x20\x7b\x9a\x4e\xb2\xe3\xf1\x63\x96\xf3\xe2\x3e\xa7\x0e\xb3\xfe\xe2\x5b\x42\x8b\xf9\xae\x21\x7a\x50\xd0\xd4\xb2\x5c\xbd\xae\xa8\xba\x3d\xb0\xcb\xad\x8d\x15\xcb\x05\x55\xb7\x4a\x1a\xfb\x8b\x41\xf6\xd5\x7e\x7a\xb9\x7d\x16\x07\xa7\xeb\xd7\x57\x9b\x13\x57\x27\x87\xf4\xd3\x2e\x5a\x57\x2e\xa7\x8d\x03\xb3\x8a\x3a\x55\x2a\x59\x93\x66\x78\x75\x42\xdb\xa4\xa3\x7c\x1d\x9d\x15\xdf\xc0\x29\x3a\xc1\xbf\x81\xc3\x74\x90\x26\x7c\xbf\x01\x47\x8f\xae\xa5\x53\xc6\x33\x87\x92\x18\xfc\x55\xc1\x5f\xc0\x21\x76\xc0\x03\x87\xd4\x0d\xb2\xf1\x69\x18\x37\x07\x21\x25\x84\x94\x10\x52\x42\x48\x09\x21\x25\x84\x94\x10\x52\x42\x48\x09\x21\x25\x84\x94\x10\x52\x42\x48\x09\x21\x65\xa7\x85\x94\x90\x39\x42\xe6\x08\x99\x63\x0f\xcb\x1c\xbf\x7e\x2f\x9d\x6a\x48\xe6\xa8\xca\x1c\x65\xe9\x9f\xae\x21\x6f\x1c\xd3\xb8\xcb\x8d\xbd\xef\x5e\xf6\xad\x3e\xdb\xb7\x35\xc4\x55\x8e\xe2\x98\xd9\xcb\xf6\x94\xbb\x28\x9a\x59\x92\xab\xc9\x07\xbd\x74\x8e\xf6\x71\xe1\xd0\x0b\x59\xe5\xf8\x3d\x74\x4a\xa0\xb5\x23\x74\x88\xa3\xb5\x31\x1a\xa5\xfd\x81\x78\xd9\xbe\xab\x9b\xcd\x08\x79\x31\x18\xa9\xed\x67\xc3\xf5\x50\xd9\x71\x07\x4e\xde\x16\xfd\x55\x87\xaa\x31\x69\xaa\x1a\x1b\xe8\xfc\x21\x1f\x5d\x63\x7b\xfb\x3f\xc3\x23\xc2\x6b\x5c\x7a\xcd\x0d\x00\xbc\x78\x50\x33\xb6\xa8\x66\x7c\x4f\xe4\xa6\xbf\xfe\x69\xa1\x62\x3c\x41\x93\xb6\x8a\x31\x74\x13\xd2\x98\x8d\xf0\x53\x21\xd6\xd9\x8b\x00\x35\x64\xe6\xef\x6f\xb3\x4d\xc8\x81\x3a\x61\x42\x03\xb6\xe4\x21\x4f\x69\x42\xfb\x2c\x09\x84\x09\xbd\x66\xd2\x20\x4c\x80\x30\x61\x83\x85\x09\xed\x5f\x39\x06\x0a\x12\x42\x5e\x3b\x66\x8f\xd0\x21\x76\x20\x9e\xb6\x74\x04\xaf\x70\x0a\x11\xec\x8b\x6e\x09\x01\xc2\x37\x87\xe9\xac\x7b\x9f\xb3\x6e\x4a\xa8\x75\x82\xba\x9e\x19\x66\xdf\x19\xb0\x67\xcc\x8f\x46\x8c\xed\xa6\x23\x39\x54\xd9\x04\x38\xa6\x42\xa1\x5e\xd5\x11\x4a\xc6\xa8\x11\x9b\xdb\x72\x36\x6d\x50\xbe\x62\x55\xbe\x66\xdb\xbe\x21\x97\x0f\xc0\xd1\x76\x2a\x29\x7e\xae\x83\x9a\xc2\xb6\x25\x9e\xf2\xfb\xb6\xc2\xcf\x48\x15\xfc\xb5\x5e\x6f\xaf\x7c\x08\xd2\x09\x48\x27\x20\x9d\x80\x74\x02\xd2\x09\x48\x27\x20\x9d\x80\x74\x02\xd2\x09\x48\x27\x20\x9d\x80\x74\x02\xd2\x89\xce\x4a\x27\x7a\x30\x8c\x03\xea\x0e\xa8\x3b\xa0\xee\xd8\x44\xea\x8e\xff\xff\x10\xed\x13\xd4\x53\xd0\x1f\x47\xc8\x95\xc0\x9f\x46\xea\xfb\x5f\x19\x62\x3f\x37\x40\xdb\xc5\x7f\x6d\x50\xf2\xfb\xc1\xeb\x16\x63\xd3\x96\xd3\x56\xd2\xfb\xcd\x16\xbc\xd5\xb5\x49\xf0\x17\x83\x79\xe4\x18\x1b\x35\x78\xa4\xeb\x75\x41\x3e\x7c\xb0\x48\xb0\x48\xb0\x48\xb0\x48\xb0\x48\xb0\x48\xb0\x48\xb0\x48\xb0\x48\xb0\x48\xb0\x48\xb0\xc8\x6e\x61\x91\x00\x7d\x00\x7d\x00\x7d\x3d\x0c\xfa\xbe\xd9\x47\xbb\x05\xe8\xd3\x72\x57\xe4\xfc\x6a\xc9\x9d\x5f\x89\x7d\xae\x8f\x7d\xba\x8f\x5e\x6e\x1f\xbc\xb4\x96\x8e\xde\x5d\x90\xab\xee\x59\x56\xdc\x7f\x72\x77\x41\xae\x2e\x59\xa7\x2e\xa7\xa7\x16\xe6\xcc\x4d\x5d\x88\x32\xfe\xba\x86\x26\xeb\x1a\x9a\x6c\xa4\xa1\x6c\xc5\x5f\xfe\xf7\x98\x60\x71\x17\x69\x9e\xb3\xb8\x59\x9a\xa1\x4c\x0b\x2c\xce\xd1\x01\xf3\x45\xcd\xd4\x00\xb2\x7f\xdb\x47\xf1\xb1\xd4\x75\xb9\x54\x1a\x7d\xaa\xac\x5c\x2f\x8f\x29\x15\xb9\x5c\xcc\x8f\xba\x36\xd2\x63\xec\xeb\x11\xf6\x47\x11\xda\xfe\xb8\x5c\x2a\x5d\xd0\x4f\x8b\x3e\xae\x77\xbc\x26\x2a\xae\xc6\x24\x51\x72\x55\xcc\xb5\x6a\xec\x91\x8a\x5c\x9e\x9b\x71\xef\xc5\x47\x62\x52\x49\x53\x62\xfc\x37\x8c\xfd\x55\x6c\xe8\x91\xb9\x99\x69\xce\xb1\x14\x3e\xc5\xe5\x95\xdc\x50\x32\xa9\x0f\x9c\xab\x92\xeb\x1c\x6f\x55\x34\x3a\xed\x6c\xb3\x7e\x1c\xb3\x47\x68\x8f\xe8\xae\xbb\x68\x17\xef\xae\x97\xd3\x6d\x3f\x17\xd9\x46\xc6\xc7\x1b\x84\x21\xd9\xdf\x0f\x51\xd2\x2c\xb8\xea\xa3\xab\xad\x28\x79\x4b\x48\xfb\xa9\x21\xf6\x3f\xfb\xec\x7a\xab\xaf\xf0\xa8\x8e\xb0\xa0\xe4\x93\x77\xeb\x7f\xaf\xad\x89\xba\xa0\xe4\x43\x8e\x0c\x5c\xa4\x23\xe2\xe1\xc7\x29\xc5\x1f\x3e\x41\x83\xf4\x60\x60\xd9\xdf\x05\x25\x1f\x88\x67\xcf\x06\x93\xd7\x38\x8b\xd5\x96\x1f\xd5\x5b\x6e\xa6\xbe\xc1\xbf\xdc\x66\xf7\xe5\xbd\xde\xf5\x0d\xf4\xee\x8c\x1a\x87\xda\xde\xa3\x99\x49\x3a\x4a\x87\x6b\xa2\x64\x1a\xec\x52\x44\xc4\x20\xc8\xaf\xc5\x20\xbf\xb7\x47\x5a\xff\x90\x4f\x88\xe8\xbe\x43\x74\xc0\x8e\xee\xdb\x44\x66\xa0\xb2\x5a\xa5\xe4\xaf\xdd\x6e\x9b\x81\x1d\x56\x0d\x02\xfd\xcb\xbf\x57\xfc\x57\xfb\x3f\x7c\x54\x19\x40\x95\x01\x54\x19\x40\x95\x01\x54\x19\x40\x95\x81\xa6\xab\x0c\xfc\xd0\x4d\xcc\xdf\x27\x45\x41\x81\xc3\x74\xd0\x51\x50\x20\xbc\x09\x3c\xb8\x76\x40\x38\x53\x7c\xe6\x85\xdb\xec\x39\x3c\x1e\x50\x2a\x40\x9f\xd9\xef\xe1\x01\xa2\x6d\x9d\xd8\x11\x73\xdf\x6b\xcb\x09\xc4\xdc\x23\xe6\x7e\x83\x63\xee\x37\x9c\xc9\x78\xe4\x58\x71\x59\xed\xc0\x98\xfd\xec\x38\xa5\xd8\x48\x3c\x69\x89\xbc\x6f\x77\xaa\xc4\x17\x94\xfc\xad\x20\x0b\x67\x1f\xb8\x8d\x0e\x34\x0e\xfe\xc6\xe4\xb5\xa2\xe8\xa6\xff\x43\xf1\x97\x06\xec\xa9\xee\x1e\xe1\xa7\x8c\x99\xc7\x85\x03\x4f\x9f\xe0\x1e\x10\x47\x3c\x66\xb8\xb3\xc6\xb9\x21\x6f\x61\x39\x28\xae\x99\x6d\x26\x68\xdc\xf7\xd5\x33\x16\x6f\x6b\xe9\x15\xb9\x2a\xa5\x53\xe6\x5d\x65\xbf\x19\xf1\x67\xd3\xd3\xe2\xdd\x3e\x41\x93\xfc\xdd\x3e\x48\x2d\xfc\x00\x9d\x17\xc8\x62\x8a\x4e\xdb\xc8\xa2\xb5\x96\xe6\xc4\xe2\x29\x43\x67\x1c\x8b\xa7\x96\x9a\x0a\x2e\x2f\x50\x51\xb4\x2a\x5d\x5d\x0a\xfe\x06\xc7\x59\xca\xf8\xee\xc4\x6f\x59\x52\x54\xe3\xa6\x1d\x1f\x28\xbf\x07\xca\x62\x5e\x5f\x7f\x5e\xcf\x82\x1c\xfa\x90\xc3\x43\x74\x80\xa5\xe3\x63\x96\xe1\xbd\xd3\x69\xaa\xcd\x37\xee\x96\xb0\xd7\x3f\x73\x17\x1d\x0f\xb0\xd7\x86\x77\xca\xb2\xd9\x15\x55\x79\xc3\x8d\xb1\xef\xd3\x9f\xf0\x8d\xec\xcf\xee\x64\x3f\xed\xf0\xdc\x24\x72\x4a\xb9\xac\x3f\xf2\xb9\xb3\x8f\xda\xab\xa2\xaa\x12\xe3\x17\xe9\x3d\x68\xf8\xa4\x92\x29\xe3\x4c\x61\xc8\xcf\xc9\x55\xdb\x96\x1b\xa7\x2c\xe8\x97\x3c\x5e\xac\x5e\x59\x90\xaa\x57\x26\x74\x5b\x2c\xdc\x33\x37\xe9\x9b\xba\xfa\x80\xb7\x2d\x22\xb6\xcd\xb8\x25\xba\xfa\x58\xb0\x39\x9a\x60\xe3\xc2\x00\x39\xef\xd6\x00\x94\x5e\x65\x61\xe3\x71\x8a\x7a\xf6\xd4\xc2\x63\x4d\xf5\xd4\xc2\x6a\x33\x3d\xe5\x3b\xe3\xac\xdb\x85\xa1\xf7\x54\xed\x82\x29\xa8\xcf\xe2\xcf\x38\x7a\x6a\xd8\xea\xa9\x47\x96\x02\xba\x6a\xcc\xdd\x55\x8a\xd6\x03\x7d\x95\xfc\x80\xa3\xaf\xf6\x9b\x7d\x35\x73\x76\xfe\xec\xa3\x67\xd7\xef\xad\xb4\xab\xb7\x04\x62\xdf\x7c\x5f\x61\xb3\xfd\x35\xf1\x9c\xa3\xbf\x46\xcc\xfe\x7a\x64\xe1\xd1\xb9\x47\x2e\x2e\xad\xdf\x61\x13\xae\x0e\x33\x1a\xbf\xf5\x7b\x6c\xd2\xf3\x6b\x3c\x7f\x76\x6a\xa6\x99\xaf\xf1\xbc\x2c\xe5\x6f\xfd\xbe\xca\xfc\xac\xa3\xaf\x92\x96\xe5\x9a\x7a\x74\xfa\xfc\xfa\x9d\x35\xee\x36\x5d\xfa\x96\xf1\xd6\xef\xad\xec\x19\x3a\xc5\x4e\xc4\x27\xad\x15\xd3\xfd\xce\x85\x96\xc7\xd5\x1b\xb1\xe6\xaa\x5b\x0c\xea\xff\x6b\x6a\xbe\x4c\x88\x1f\xe7\xc7\x9c\x97\x7d\xba\x8f\x7e\xbb\x8f\xfd\x66\x5f\xf4\x53\xd6\xe2\xfa\xd9\x3e\x7d\xe0\xb8\x4e\xf8\x8a\x1c\xab\x48\x2a\x57\x7a\x3f\xb6\x38\x6f\x6c\x3f\x0c\x51\xad\xa5\x07\x92\xcb\xf9\x8a\x52\x2c\x57\x35\xfd\xce\x2f\x5f\x2e\xbe\x41\xd6\xc4\x92\xd8\x11\x84\x54\x55\xb8\x7b\xc9\xf4\x8f\xe5\x56\x55\x55\x2e\x57\x8d\xf7\xcb\x14\x5a\x56\x15\xb3\x4d\xce\x64\x62\xf2\x1b\xa4\x6b\x95\x92\xe1\x1f\xba\x7e\x45\x29\x59\xbb\x0c\xfd\x6e\xb8\x1c\xb1\x5a\xad\x4c\x8e\x8d\x95\x94\x9c\x54\xba\xa2\x68\x55\x8f\x35\xa2\xfe\x36\x8c\x6a\x37\xb4\xaa\x7c\xcd\x5e\x23\xca\x25\x49\xab\x16\x73\x9a\x2c\xa9\xb9\x2b\xa3\x25\xa5\x50\x28\x96\x0b\x63\x97\xc4\x7f\x9f\x7e\xfd\xc9\x55\x4d\x56\x27\x9f\x2a\x5e\xcb\x5d\xb9\x91\x8a\x99\xbd\xe1\x7d\x58\x74\xa9\x73\xb1\xfa\x7f\x93\x74\x52\x48\xdb\xd4\x15\x29\x97\x32\x5f\x64\xbe\x5d\x73\x48\xdc\x72\xa5\x55\xad\xaa\xef\xb2\x4a\xf2\x8a\xbe\xc1\x2b\x17\x2c\xa1\xd1\x47\x93\xec\xd9\x7e\xba\x53\xbf\x7c\xca\x79\x35\x2f\x2d\xe8\xa1\x3a\x9a\x16\x4d\x2d\x2a\x25\x39\x23\x9a\x4a\x0e\xeb\xa7\x2d\xd6\x36\xb0\x9c\xae\x3f\x35\x64\x59\x52\x93\xf5\x30\x79\x17\xad\xa5\x53\xf5\xf7\x75\xb3\x89\xcb\x9f\x0a\xb6\x05\xe7\xd9\xac\x61\x0b\x7c\x47\xca\xcc\x71\x57\x7f\x7b\xae\xac\xe6\x6f\xd9\xee\x33\x5c\x09\x6f\x61\x93\xc7\x88\x8d\x18\x67\x6e\xc0\xa0\x65\xe6\xe8\x1c\x9d\xad\xa1\x47\xad\x8d\x1a\xdc\x14\x10\x42\xb5\x88\x33\x3e\x18\x09\xcd\x74\x5c\x10\x90\x71\x86\x32\x36\x64\xdc\x00\x3b\x54\x59\xed\xb0\x1d\x4a\x7e\xfc\x0e\x1f\x3b\xb4\xdb\x52\x56\x79\x98\x9e\xfd\xe2\xe0\x46\x58\x1e\x48\xaf\x20\xbd\x82\xf4\x0a\xd2\x2b\x48\xaf\x20\xbd\x6a\x5a\x7a\xf5\x7c\x84\xe6\xc5\x8a\xe1\x2c\x4d\xf3\x15\xc3\x49\x3a\x4e\xc7\x5a\x98\x3e\x44\xc5\x0f\x5a\x14\x2e\xc5\x0b\x34\xe7\x70\x29\xde\x64\x9b\x41\xcb\x87\x4e\xae\x0f\x82\x15\x61\x99\x9f\xdd\xe1\xb3\x82\x48\x07\xe8\xba\x3c\xd6\x15\x49\xae\x29\xe8\xf0\xb2\x02\xc2\xaf\x5e\x5b\xcc\x40\xf8\x05\xe1\xd7\x06\x0b\xbf\x3a\x47\xbd\x02\x8b\xae\x74\x72\x3e\x09\xc8\x29\x5a\x77\x71\x3d\x18\x0f\x5d\x2a\xf0\xf1\xd7\xd2\xa1\x20\x69\x97\x61\x02\xcb\xd5\x35\xa5\xb4\x7a\x4d\xce\x95\xa4\xe2\x35\x8d\xbd\xe5\xb5\xec\x67\x06\x6d\xb7\xc8\x01\x91\x7c\x46\x35\xd2\x66\x98\x8b\x69\xb3\x42\xca\x82\xd5\xc8\x32\x6f\x64\x5a\x6f\x24\x39\xac\x5f\x54\xa7\xfa\xf2\x3a\xb5\xcb\xd3\x02\x66\x90\x29\x0f\x99\xf2\x32\xc8\x94\x87\x4c\x79\xc8\x94\x87\x4c\x79\x3d\x93\x29\x2f\xd3\x35\x99\xf2\x42\xbf\x93\x96\x33\xe5\x65\x90\x29\x0f\x99\xf2\x90\x29\x0f\x99\xf2\x90\x29\xaf\xf3\x99\xf2\x32\x3d\x9d\xd8\x2e\x83\xc4\x76\xed\x4b\x6c\x97\xe9\xf6\xc4\x76\x99\x4d\x98\xd8\x2e\x7b\x85\x1e\x16\x24\x70\x96\x66\x38\x09\x3c\x45\x27\x68\x32\x38\x04\xd4\x8b\x11\xcd\x17\xb5\xe0\x5a\x0a\x8f\x07\xf3\xbe\x83\x6c\x62\xdd\xc0\x50\xaf\xdf\xa6\x75\xa2\xe1\x78\x7d\x86\xf8\x47\xb6\xdb\xb0\xec\x3e\x23\x12\x52\xf2\xe1\x62\xfb\x7d\xe2\x21\xdb\x4a\xc6\x04\xc8\x9a\xa7\x2c\x9d\xaf\x71\xc7\x1c\xa5\xc3\xad\x8d\x08\x9c\x31\x90\xb7\xb5\x28\x6f\x7b\x4b\x9f\x7f\xec\xce\x9c\x30\x19\x19\x3a\xc3\x4d\x06\x4f\x44\xd7\xda\x0b\x4a\x17\x85\xf4\xed\x1c\x9d\xb5\xa5\x6f\x37\xd3\xde\x23\xc2\x25\x7e\x9e\x66\x1d\x2e\xf1\x9b\x68\xb0\xd1\x58\xdb\xc6\xcc\x5a\x63\xa6\xcc\x25\x91\xfb\xc6\x88\x23\xf4\xc1\x50\x36\x38\x64\x22\xca\x65\x1f\x13\x76\xc8\x99\x8d\x6c\xda\xba\xa0\xb3\xc6\xac\x3d\x72\x39\x60\x5e\x60\x5e\x60\x5e\x60\xde\xde\xc1\xbc\x58\xc6\x05\x2c\xe3\xba\x87\x83\x43\xaa\xdc\x11\xa9\x32\xdc\x0d\x70\x37\xc0\xdd\x00\x77\x03\xdc\x0d\x3d\xed\x6e\x40\xc4\x0a\x22\x56\x10\xb1\xd2\xae\x88\x15\x78\xf3\xe0\xcd\xeb\x55\x6f\x5e\xb6\x10\x72\xb0\x56\x90\x63\x2c\xe9\x4d\x98\x77\xb1\x9d\xe2\x6b\xb6\x2d\x6d\xfb\x68\xf3\x66\x4c\xb6\xf7\xb9\x04\x65\x44\xfe\x92\x9c\xac\x1a\x7e\x50\xd9\x51\x89\x9f\xe7\xad\x34\xca\xf1\x3b\xcf\x28\x16\xca\xc5\x72\xc1\x5c\xb4\xb1\x7f\x9b\x60\x6f\xda\x42\x77\x3a\xcf\xb8\x64\x5c\x1c\x7d\x4b\xa4\xb1\x72\xfd\xd3\xf6\xc5\x4b\xa2\xf9\x45\x93\x78\x84\x52\xc2\xff\x10\xbf\xc0\xf1\x2b\xda\xb2\xb8\x43\xdf\x1f\x9e\x2f\x6a\xd5\x2e\x97\xee\x77\x6f\x45\xff\xd7\x07\x7f\x65\x17\xd9\xbc\xf1\x95\x79\xbc\x7c\x66\x50\x8a\xdf\xe0\x78\x64\x57\x45\x18\x03\xc2\x18\x50\xf0\x1f\xfe\x2d\xf8\xb7\xe0\xdf\xea\x1d\xff\x16\x0a\xfe\xa3\xe0\x3f\xfc\x0a\xf0\x2b\xc0\xaf\x00\xbf\x42\x57\xf8\x15\x50\xf0\x1f\x05\xff\x37\x0b\x49\x45\xc1\xff\x76\x14\xfc\xff\x6e\x1f\xdd\x2f\xa8\xa2\x55\x02\xbe\x16\x29\xb2\x2f\xf6\xb1\x2f\xf4\xd1\x4e\xeb\x04\x8b\x17\xfa\x16\xfe\xbf\xbf\x20\x57\x67\xcc\xd3\x0d\x76\xd7\xf5\xc5\xff\xdb\x53\xe3\x3f\xb0\xda\xfd\x7f\x1d\xa2\xb8\x18\x01\xf9\x0d\x55\xb9\xcc\x3f\x16\xab\xef\x39\xb9\xd5\x34\x59\x63\xdf\x19\x64\x7f\xd9\x4f\xcc\x3e\xc7\x1a\x85\x87\xd6\xcf\x88\x32\x27\x9a\x48\x8e\xe8\xa7\x9d\xb5\x2e\x37\x46\xc5\x38\x3a\xab\xa8\x53\xa5\x92\x25\x9a\x0e\x6f\x80\xda\xc4\x52\x65\xca\x8a\xd1\x9a\xa6\x29\x3e\x5a\xc7\xe9\x18\x1d\xf1\x95\xbe\xdb\xbd\x66\x15\x99\x32\x1e\xbc\xa1\x58\x9e\x80\x90\x9b\xab\xcb\xc1\x00\xf5\x00\x4b\x1b\x00\xd5\xbe\x17\x83\x9b\x1a\x77\x02\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\x0a\x4a\xba\x71\x94\xf4\xbd\x43\xf4\x80\x60\x74\x52\xa5\xa2\xf1\x32\x71\x4a\xb9\xaa\x2a\xba\x39\xd4\x77\x07\x7c\xec\x0c\x44\xb7\x55\x3f\xe5\xd2\x5a\x3a\x3a\xbe\x3e\x97\x9b\xb6\x5a\x58\x34\x5a\x10\x88\x6e\xaa\x52\xd1\x96\xd3\xf5\x47\x6f\x71\x44\xa7\xf7\x1a\xcf\xbd\x5d\xf7\xe0\x9d\x47\x74\xfa\xbd\x98\xa2\xc6\xba\xdb\x71\x0b\x89\x81\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x80\xe8\x3a\x84\xe8\x7e\x38\x41\xe3\x02\xd1\x95\xe5\xea\x75\x45\x7d\x4a\x5f\xad\x59\x4a\x46\x23\x2e\xda\x38\xc4\x53\x48\x14\x65\x8d\xfd\xd1\x10\xfb\xdd\x01\x7a\xb9\x7d\xc5\xa5\xb5\x74\xf4\x46\x63\x41\xd0\x17\xc5\x45\x0b\x46\x06\x8d\x50\x02\x9f\x27\xf8\x05\x17\xad\xdb\x59\x4e\xbb\x7e\x65\xbe\xa8\x55\x37\x1b\x06\xdc\x88\xa8\xe7\xd7\x06\xc3\xbe\xe3\xec\x98\x01\xfb\xea\x5e\x17\x83\xfc\xb9\x7a\xde\x5d\xae\xb5\x91\xa8\x6a\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\x41\x90\xc1\x8e\x91\xc1\x61\xca\x0a\x32\xa8\xae\x48\xb9\x94\x89\x8b\x78\x53\xde\xe9\x13\xad\x84\x8e\xda\xd8\xf7\x59\xff\x7e\xe3\x98\xaa\x94\x64\x8d\xbd\x90\x60\xff\x6b\x80\xee\xd5\xdb\x9a\x72\x36\x65\xc5\xe3\x56\x1a\xe3\x87\x8b\x8a\x3e\x8f\x86\x82\x0d\xc5\x6d\x2f\xd6\xde\x91\x11\xe2\x6b\x97\x42\xd2\x7f\x12\x99\x12\xbd\x99\xe1\x4a\x30\x33\x3c\xcd\x4e\x0a\x34\xa8\xf7\x63\x7d\xb8\xae\xc1\x13\x7d\x5f\x32\x70\x43\x70\x43\x70\x43\x70\x43\x70\x43\x70\x43\x70\x43\x70\x43\x70\x43\x70\x43\x70\x43\x70\x43\x70\xc3\xae\xe4\x86\x9b\xaf\x44\x04\xc8\x24\xc8\x24\xc8\xe4\x26\x22\x93\xbf\x90\xa4\x43\x82\x4c\x6a\x55\x45\x95\x0a\x72\x2d\x8e\x5c\xe3\xf5\x6b\xa4\x6a\x55\xca\x5d\xd1\xa7\x76\x83\x47\xbe\x91\xfd\xd3\x30\xfb\xb1\x7e\xba\xdd\xb8\xcc\x42\x8f\xfb\x54\x59\xca\xd7\x6c\x73\x45\x0d\x9c\x29\xab\x8d\x64\x5c\x3f\x69\x49\x5c\x69\x20\xc2\xda\x73\xc2\x2d\x8f\xde\x64\x89\xa1\x94\xd9\x19\x66\xaa\xbe\xda\xbb\x0b\x64\x79\xb9\x60\x96\x77\x86\x9d\x32\x78\x9d\xbb\xeb\x0d\xf1\x5f\xdd\x4f\xd6\xd1\xbe\xab\x7b\xbd\x61\xde\xcb\xd8\x40\x41\xae\x52\xf4\xfb\xb7\xd7\x0f\xcf\xa0\x2a\x57\x4a\x52\x4e\x0e\x1a\xa1\x87\x8c\xf3\x3a\x39\x48\x99\x47\xe8\x61\xba\xe0\xac\x61\x1f\x8b\x24\x6f\x66\x94\x50\xcc\x39\xa8\x98\xf3\xbb\xfa\xe8\x3f\xf6\xb1\xb7\xf7\x45\xff\x9d\xd5\x41\x7f\x16\xe1\x6b\xe3\x87\xa5\xb2\x54\x90\x55\xb1\xcb\x13\x0b\x0e\x4d\x53\x72\x45\xbe\x70\xb1\xf6\x53\x12\xdf\x90\x2a\x6a\x4c\x5f\xd7\x56\x6f\x58\x2b\xb9\x6b\xd2\x53\xfa\xfd\x57\xaf\xc8\x9a\x6c\x1a\x3e\x67\xe9\x64\xb3\xc4\x32\x37\xb9\x7c\xef\xa4\xa8\xb1\xf4\xc4\x51\xfd\x5c\x55\xca\x71\x24\x57\x52\xca\x05\x61\xe6\xf8\xfe\x46\x5f\x8a\x4a\xc5\xb2\x58\x7d\xf0\xfd\x83\x7d\x2e\xc7\x14\x06\x30\xd4\x77\xbd\xe6\xd4\x55\x50\x4a\x52\xb9\x90\x52\xd4\xc2\x58\xe5\xa9\xc2\xd8\x6a\xb9\x98\x53\xf2\xf2\xd8\xbe\x39\x6d\x41\x6f\x25\x15\xdf\xe1\x7c\x56\xe7\x64\xf3\xb1\x48\xc8\x06\xe3\x95\x7a\x6b\x69\x96\xa5\xf3\xb4\x75\x5a\x2c\xdc\x6f\x71\x1b\x54\x59\xad\x52\xf2\x99\x3b\xea\x6d\xd0\xbd\x46\x0d\x46\xa9\xde\xec\x3c\x28\x0e\x75\xd4\xea\x3c\x4a\x8b\xb4\xe0\xb4\x3a\xc9\x0c\x9d\x69\xc1\xd3\x33\xc3\x6f\xfd\x11\x0e\xc5\x35\x98\x9e\x20\xd3\x83\xea\xed\x1d\xa9\xde\x8e\xb2\xbd\x28\xdb\x8b\xb2\xbd\xed\x2a\xdb\x9b\x7d\x3e\xec\x75\xc2\xa2\xde\xda\x04\xbb\x40\x73\xb4\x4d\xf4\x6d\xfb\x17\x0a\x31\xef\x39\x7c\x3b\xdb\x2a\xba\x89\x3a\xb2\x94\xc8\xfc\x97\x1d\xf5\x6b\x85\xb1\x8a\xa4\x56\x8b\xdc\xcb\x29\x36\xbe\x41\x1b\x97\x7d\x15\x7d\x8f\xdb\xa9\x05\xc4\xee\xda\x86\x46\xf9\xcf\xef\xe7\x65\x00\xf6\x38\x0f\x5e\x93\xd5\x82\xec\x3c\x3a\xe8\x3c\xaa\x55\x55\xa9\x2a\x17\x8a\xb9\xd1\xba\xf3\x5c\xad\xe8\xff\xbe\x61\x1c\xd5\xef\x32\x73\x91\xe6\x29\x5b\xb3\x65\x9a\xa4\xa3\x2d\x2c\x5f\x16\xb8\x14\x00\xcb\x96\x80\x65\xcb\x73\xfd\xf4\x6c\x3f\x7b\xa6\x3f\xfa\xb4\x65\xfb\xdf\xdc\xdf\x3b\x3b\xa6\x1a\xef\x89\xde\xcf\x5c\x0d\xc0\x3d\x7c\xfa\xcb\x69\x6b\x82\x12\x3e\x2f\xee\x30\x9f\xc2\x84\x78\x44\x2a\xf1\x2b\xf5\xb5\x9a\xb8\x9a\x9f\xc1\xd7\x4d\x5a\x2c\x91\xd5\x94\xf2\x82\x10\xc7\x3c\xac\x7f\x16\xc6\xbf\x97\xcc\x8f\xc5\xfe\xe3\xb0\xff\x56\x2e\xf3\x52\x84\x5e\x8c\xb0\x17\x22\xd1\x4f\x58\x58\xee\xfd\x91\x59\x45\xcd\xf1\x25\x5d\x41\xe1\xdd\xae\xc4\xe2\x97\xf5\x3f\xc5\x63\x53\xae\xa7\xe0\xc0\x55\xac\xbe\x56\x35\x5b\x06\x30\x2a\xe5\xf8\x83\x73\x71\x45\xa9\x98\x33\x66\x57\xb9\x94\xd7\x62\xca\x75\xa3\x4f\x85\xf8\xa0\x22\x2b\x95\x92\x9c\x8a\x89\x5f\xe4\x6a\x26\x73\x2c\x39\xf8\xf4\xec\x00\xeb\xe7\xe3\x5b\xf8\x6d\xb9\x94\x39\xdd\xc9\xb2\xfc\xc4\x68\x8d\xcd\x06\x57\xef\xf7\x9e\x83\xb6\xb1\x97\xf1\x1e\xa1\xec\x49\x3a\xce\x8e\xc5\x8f\x58\xce\x90\x3d\xfc\x13\x33\xbe\xf7\xda\xf6\xe2\x03\xfa\xd1\xb6\x16\xc3\xfe\x99\x61\x9a\x18\x93\x2a\x45\x47\x68\xb7\xb7\x66\x53\xa8\xe2\x4c\x6c\xfa\xcd\x04\xfb\xcc\x00\x6d\xcd\x29\xaa\x3e\xc9\x45\x9f\x35\xea\x5d\x3b\xf5\x99\x65\x73\x4b\x60\xe6\x69\xe4\xf2\xc1\x50\x84\x9a\x23\xb6\x2f\x99\xef\x3f\x0c\xcf\x63\xb1\x2a\x5f\xb3\x4d\xd4\x90\x4b\x97\xe0\x68\x3b\x95\x8c\x8a\xb2\xd8\x8a\x2a\x2f\x3b\x44\x9d\xfc\xfe\xa0\xe8\xac\xff\x72\xfc\x5f\x6a\x7e\x63\x74\xf5\x5c\xf0\xa7\xf5\x20\x8b\xd7\x56\x97\x17\x3f\x8e\x24\x90\x90\x6c\x42\xb2\x09\xc9\x26\x24\x9b\x90\x6c\x42\xb2\x09\xc9\x26\x24\x9b\x90\x6c\x42\xb2\x09\xc9\x26\x24\x9b\x1b\x28\xd9\x9c\xa0\x71\x96\x8a\x8f\x58\x94\x62\xa7\x93\x52\xf0\x7d\x5b\x3d\x9a\x80\xcc\x13\x32\x4f\xc8\x3c\x21\xf3\x6c\xa3\xcc\xf3\xf9\x84\xa8\xb1\xed\x0f\x2b\xdf\xc8\x7e\x38\xc1\x3e\xe7\x20\x93\x1f\x6a\x84\x4c\x5a\x0c\xb0\x2b\xe8\xe4\x9d\x5e\x74\xb2\xfb\xb9\xa4\x1f\x45\x0c\x1f\x58\x36\xcc\x25\x2f\x04\x73\xc9\x04\x1b\x34\xb2\x54\x9a\x3d\xed\x22\x92\x36\xb3\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\x04\x9b\xec\x30\x9b\x3c\x4c\x07\xd9\x44\x7c\xdc\xe2\x8c\x77\x39\xd9\xa4\xb5\x8b\x6b\xbf\x74\x0a\xf4\x10\xf4\x10\xf4\x70\x13\xd1\xc3\x7f\xd9\x4f\xfb\x8c\xda\xd3\x79\x7d\x09\x5c\x54\xca\xaa\x5c\x28\x72\xd1\xba\x23\x83\x25\xfb\x83\x3e\xf6\xfb\x7d\x74\x97\xe7\x49\xd1\x68\x81\x87\x9c\x5c\x56\xd4\x6b\x56\xe7\x4a\x31\x8e\x89\x92\xb1\x82\x5c\x9d\xf2\xba\x6a\x6a\x61\xee\x9c\x7e\x46\x78\x8a\xfd\xba\x86\x26\xeb\x1a\x9a\x6c\xa4\xa1\x6c\x91\x2e\x0a\x3c\x77\x8e\xce\x72\x3c\x77\x9a\x4e\xd2\xf1\x16\xf0\x9c\xf9\x8c\x41\x6a\x42\xf6\xad\x61\x7a\xd8\xbf\xbe\x50\x03\xd9\x43\x8b\xe5\x82\x2a\x6b\x9a\x8d\x7c\x7f\x6a\x98\x7d\xff\x16\x62\xae\xe2\x43\x22\xee\xe2\x83\x8d\xd0\xdf\x39\xd1\x5e\x57\xb0\xdf\xa1\xba\xba\x45\x35\x99\x47\x8d\x9b\xed\x7e\x1c\xdc\x85\x32\xd5\x27\x83\x71\xf0\x24\x3b\x1a\x50\xcd\xc8\x18\x80\xfa\xc0\x1f\x00\x62\x00\x62\x00\x62\x00\x62\x00\x62\x00\x62\x00\x62\x00\x62\x00\x62\x00\x62\x00\x62\x00\x62\x00\xe2\x0e\x03\xe2\x83\x34\xc1\xc6\xe3\x29\x0b\x10\xef\x72\x02\x62\x63\xfb\x06\xf9\x2a\x00\x34\x00\x34\x00\x74\x47\x01\xf4\x3f\xf4\x51\x2c\xa8\x7e\x12\xfb\x9d\x3e\xf6\x99\x3e\xda\x59\x57\x15\x69\x5d\xf2\xbc\xa7\x20\x57\xeb\xaa\x16\x81\x3a\x3b\xa8\xf3\x97\x12\x34\xe6\x93\x21\xd6\xa0\xcd\x39\xad\x98\x57\x8b\xfa\x27\x64\x72\xe5\x67\x12\xec\xeb\x03\x44\x76\x32\x9f\xc6\xd4\xc4\xd3\x4b\x73\x33\xbc\xa1\xae\x20\xca\x77\x5f\x77\x65\x12\xb2\x6e\x0e\x04\xb9\x05\x82\xfc\x78\x30\x41\x3e\xc8\x26\xd6\x4b\x19\x62\xf5\x3f\x12\x1f\x80\x1d\x83\x1d\x83\x1d\x83\x1d\x83\x1d\x83\x1d\x83\x1d\x83\x1d\x83\x1d\x83\x1d\x83\x1d\x83\x1d\x77\xad\xb8\xd8\xda\xbb\x41\x5c\x0c\xb6\x0b\xb6\x0b\xb6\xeb\x60\xbb\xdf\x1d\xa2\xa4\xbf\xaa\xd5\x40\x8c\x96\x72\x95\x7d\x7a\x88\xfd\xc6\x00\xbd\xdc\x25\x59\x8d\x6a\x8d\xd5\xbb\x0f\x53\xaf\x9a\x4a\x8e\xd4\x29\x4e\x8d\xf6\xe7\x8b\x5a\x75\x56\x51\xa7\x4a\x25\x4b\x7d\x0a\xd9\xa9\x07\x34\x6c\xa4\x16\xfd\xd5\x57\x05\x83\xc3\x43\xec\x40\xf3\xd2\x53\x90\x43\x90\x43\x90\x43\x90\x43\x90\x43\x90\x43\x90\x43\x90\x43\x90\x43\x90\x43\x90\x43\x90\x43\x90\xc3\x4e\x93\x43\x10\x40\x10\x40\x10\xc0\x1e\x26\x80\x7f\xfd\x24\xa5\x82\xe2\xda\x0d\x00\x98\x2b\x49\x9c\x02\xfe\xf2\x93\xec\xc3\x83\x9e\x81\xeb\x49\xb1\x4e\x54\x8d\x19\xce\x34\x81\x35\x21\xeb\xd3\x7a\x3b\xc9\x07\xf4\x73\xeb\x42\xc6\x9d\xa7\x74\x39\xb6\xcb\x80\x61\x81\x61\x65\xc0\xb0\xc0\xb0\xc0\xb0\xc0\xb0\x7a\x86\x61\x65\xba\x86\x61\x85\x7e\x27\x2d\x33\xac\x0c\x18\x16\x18\x16\x18\x16\x18\x16\x18\x56\xe7\x19\x56\xa6\xa7\x91\x53\x06\xc8\xa9\x7d\xc8\x29\xd3\xed\xc8\x29\xb3\x09\x91\x53\xf6\x2a\x3d\x22\xe4\x4e\xe7\x69\x96\xcb\x9d\xce\xd0\x29\x3a\xe1\x5b\x42\xdd\xc1\xa5\xcc\x2a\xea\x4e\x4a\x34\x5f\xd4\x82\xd5\x4e\xf7\x79\xab\x9d\xb6\xb2\x2d\x42\xe8\xb4\x12\x2c\x74\x3a\xcd\x4e\x36\x26\x74\xe2\x77\x55\x9f\x68\x2f\xfe\xeb\xdb\x3d\x99\xd9\x2b\xc4\xfc\x14\x93\xca\x6e\x3e\xb6\x4f\xfc\xbd\x33\x84\x4c\x00\xad\x8b\x34\x4f\x59\x36\xb0\xa2\xe4\x6f\x44\xf9\xff\xc6\x22\xc9\x49\x3a\xda\xea\xc8\x64\xde\xd3\x47\xff\xa5\x8f\xfd\xe7\xbe\xe8\x7f\xb2\x4c\xd3\xb7\x23\x8f\xeb\x13\xbf\x31\x17\x8c\x70\xd5\x60\x4e\xdf\xf1\x09\x5b\xe2\xda\x7f\x38\x99\xd4\x8a\x1c\xab\xe8\x3d\xaa\x2f\xfa\x52\xb1\xa9\xf2\xff\xc7\xde\xdb\xc7\xb7\x71\x9d\x77\xbe\x1f\x82\x54\x2c\x3d\x56\x62\xf9\x38\x69\x6c\x58\xb6\x61\xc8\x26\x48\x88\x04\x05\xea\x9d\x7a\x25\xf8\x22\x11\x92\x25\x5a\x94\x29\xc7\x95\xc2\x0c\x81\x21\x34\x12\x88\x41\x66\x40\x2a\xca\x6e\xb6\x89\xb3\xe9\xa6\x6d\xd2\xdb\x4d\xb6\x69\xf7\xb6\xdb\x9b\xf4\x6d\x93\x36\x4d\x36\x6f\x6d\x6f\x93\x6d\xda\xbd\x9b\xd6\xad\x1b\xb7\x69\xda\xe6\x73\x6f\xba\xd9\xa6\xeb\x6d\x9a\x6e\xdb\xcd\xde\x6e\x9b\x9b\x7c\xd2\xcd\xfd\xcc\x39\xf3\x0a\xcc\x60\x40\x08\x00\x41\xe1\xf7\x8f\x3f\x32\x31\x73\xe6\xcc\x99\x33\xcf\x9c\xf3\x7d\x9e\xe7\xf7\xc4\x94\x92\xd8\xf9\xab\x5a\x6c\xad\x64\xc3\x83\x7c\x2c\xaf\xdd\xbe\xb4\x56\x8a\xe5\x15\x4d\x36\x5e\x20\xd9\xde\x91\x1a\x1f\x16\xfe\xe5\x36\xb7\xe4\xd6\x02\xdc\xdc\x04\xc5\x56\xd6\x34\xbe\xc8\x2b\x6b\x6a\x4e\xd6\xf9\x27\xd6\x7c\x5f\xcd\x8f\x41\x2a\xb6\xc8\xaf\xc8\x17\xcf\xfc\x9b\x38\x11\x1b\x8d\x4d\x16\x8b\x13\xfc\x03\x9a\xd7\x6e\xc7\xb4\xb5\x92\xb1\x39\x33\xde\x38\x6b\xd5\x60\x36\x27\xe7\xe3\xaf\x10\x5d\x73\x9b\x97\xf7\x45\xe8\xbd\x11\xf6\x23\x91\xe8\xbb\xed\x01\xfa\xab\x3e\xbe\xe6\x7a\x4a\x2a\x49\x05\x59\x13\xbb\x07\x21\xe6\xa1\xeb\x6a\x4e\xe1\x1f\x44\x7b\x9d\x2e\xf1\x8d\x8e\xaa\xc5\x8c\xf5\x52\xe5\xb6\xbd\x42\x58\x95\x8c\x87\x62\x1c\xa2\xcb\x96\x0d\x30\xec\x92\x45\x50\x38\x44\x58\x96\x63\xdc\xfa\xf0\x35\xb9\xaa\xc5\xd2\xe3\x47\x8c\x63\x35\x29\xc7\x51\x8f\xb1\x9d\x15\x6f\x3c\x5f\x37\x1b\x4b\x1c\x49\x29\x09\xdf\x0f\x5f\x97\x3a\xc7\xf2\xed\xaf\x09\xa2\x8c\xdd\x94\x65\xc5\x0b\x6a\x51\x2a\x15\x52\xaa\x56\x18\x2b\xdf\x2c\x8c\xad\x95\x94\x9c\x9a\x97\xc7\xf6\xcc\xe9\xf3\x46\x2b\xa9\xf8\x4e\xf7\xbd\xba\x51\xff\x0f\x45\xe8\x9c\xb0\x11\xd3\x94\xe1\x36\xe2\x38\xdd\xc1\x4c\x14\xf1\x95\x69\x1e\x5f\x79\xcf\x94\x58\x0f\xde\x61\x8b\x4f\x1b\x2d\x8e\xb3\x2c\x9d\xa5\xed\x93\x39\xe3\x0b\x74\xc7\x4d\x36\x6f\xc4\xca\x6a\x87\x8c\x58\xf2\x1b\x23\xbe\x46\xec\x71\xb1\x9c\x8e\xe5\xd4\xa2\xb1\x39\x36\x3f\x76\x1e\x7b\x36\x2a\x0e\xa9\xb1\x67\x53\xf6\x29\x6d\xb4\x6c\x97\xe9\x12\xcd\xbb\x2d\x5b\x32\x43\xa7\x9b\x08\xb0\x9d\xe6\xf7\x70\x91\x13\x5d\x1d\xec\x17\xec\x17\xec\x17\xec\xb7\x87\xd8\x2f\x56\x74\x21\x2b\xba\xee\x81\xe3\x2f\x47\xe8\xab\x11\xf6\x95\x48\xf4\xcb\xf6\xa3\xfa\x58\xe4\xb2\x7b\x3b\xaa\x94\x62\xba\xd8\x52\xc6\x96\xe5\x15\xe1\x49\xb6\xb1\x8d\xf3\xc9\x32\xdf\x12\x3e\x58\xae\xb5\x63\x49\x2d\x8d\x96\xe4\x82\xc4\x1f\x89\xb9\x23\x75\xaf\x31\x05\xc0\xb5\x27\x83\xb9\x40\x50\x56\x57\xe5\xbc\xb1\x8a\x2d\xde\x76\x5c\xbc\x8e\x69\x57\x8a\x23\xe6\x46\x95\xdf\x6e\xac\xa0\x49\x39\x3e\x4f\x14\x35\x6f\x7f\x78\x9c\x8f\x03\xf7\x9e\x5b\xcf\x65\x4d\x37\x3a\xe9\x1e\x28\xc9\x38\xd3\xba\x21\xf3\x22\x2b\xe2\x7d\xb3\x9a\x48\x89\x7e\xae\xca\x52\xc9\xb7\x8f\x71\xc6\xbb\x30\xcf\x7b\xe0\xb7\x01\x87\x0f\x02\x3e\x08\xf8\x20\xe0\x83\x80\x0f\xa2\xa7\x7d\x10\x5f\x88\xd0\x4b\x11\xf6\x62\x24\xfa\x82\xfd\xb5\x7d\x7f\x64\xda\x95\xe9\x5a\x2e\xca\x92\x2e\xdb\xaf\xfe\xbc\xa6\x96\xa5\x02\xff\x0e\xcf\xab\x45\x25\x77\xdb\x13\x81\x64\x3d\x6e\x27\x55\xd6\x78\xe0\xe9\xd4\xe1\x54\x6c\x41\xd8\x11\xf1\x91\x2c\xcb\x25\x63\x9a\x3a\x5f\x11\x39\xa6\x6a\xe5\xeb\x52\xc9\x8a\x88\xd2\xd6\xe4\xb1\x15\xa9\x68\xad\xfe\xe3\xe2\xd7\x78\x6c\x45\x29\x49\x45\xe5\xcd\x96\xf9\x5e\x96\x63\x52\x9e\x03\x7d\x75\x4c\xf0\xdd\xbc\xb3\xb4\x14\x8d\x27\x74\xe7\x24\xb1\xde\x4e\xc5\x66\x14\x6e\x92\x5c\x1d\x57\xb5\xda\x3b\x73\x9c\x28\x15\xb1\xdc\xe7\xab\x3f\xb5\x72\x3d\x15\xdf\x25\xfa\x33\x6d\xdd\x88\x37\xb4\xea\x9d\x03\xf4\x8e\x01\xf6\xfc\x40\xf4\xbb\x76\x7c\xdd\x97\xfa\xaf\x98\x76\xd0\x98\xa2\xd7\xd5\x5b\xb1\x82\xa4\x2d\x4b\x05\x0f\x79\xb0\x17\x6a\xb2\xb6\xa2\x6a\xab\xc6\x58\xf8\xf6\xf4\x62\xd5\xc5\x83\x3b\xca\x97\x35\xd6\x9a\xa4\x2c\xee\x4a\x31\xd6\x0b\x39\x25\xef\x2c\xac\xf9\xb7\x91\x6f\x73\xec\xd1\xd5\xb9\xf8\x25\xff\xd5\xfa\xfa\xa5\x5c\xc3\x68\xd9\x4d\xdb\x73\x61\x47\xd5\x79\x2f\x96\x8a\x09\xc4\xc4\x6d\xb1\x7b\xb5\x9a\x10\xf7\x90\x30\xf6\x3c\xfc\x5f\xde\x89\xa1\x1f\x8b\x25\x32\x52\xee\x66\x41\x53\xd7\x4a\x79\xe3\x28\x1e\xce\xc6\x0f\xaa\x1a\x38\xb1\x58\x31\x57\x40\xde\x46\xac\x3b\x58\xb6\x5b\x3a\x16\x4b\xcc\xaa\x9a\xec\x6a\x36\x96\x93\xf4\x9c\x94\x37\xee\xde\x1c\x1f\x11\xc0\xc8\xdb\xd3\xc5\x72\xba\xa6\xc1\x15\xbb\x8d\x54\xfc\xfe\x72\xf5\xbc\x71\xaf\x6d\xe0\xe2\x83\x8b\xaf\x47\x5d\x7c\xd9\x02\x9d\x17\x28\x7c\x86\xa6\x38\x0a\x3f\x41\xc7\xe8\x68\x13\xf0\x72\xa1\x22\x55\xd6\xc2\x31\x73\x07\x30\xf2\x8d\xa4\x3f\xca\x7e\x80\xdd\x2f\x2c\x86\x63\xcd\xa9\xe5\x29\x2a\xec\x1d\x49\x7a\x3a\x4c\xd0\xb8\xa1\x92\x6e\x9a\x5a\x74\xca\xb9\x7d\x74\x98\xfd\xf0\x36\x7a\xa8\x46\x01\xd9\x66\xe4\xbf\xd8\x88\x0a\xef\x25\xd5\x58\xf7\x76\x81\x00\xef\x5e\x7e\xb9\x1a\x75\xe6\x9a\xca\x6e\x46\x87\xbb\x3c\x50\xbf\x3b\x45\x79\x37\xf4\x9a\x05\x4e\x54\xf3\x75\x33\x9e\x02\x6a\xbb\x21\x43\x01\x2a\x1b\xf0\x52\xc1\x4b\x05\x2f\x55\x4f\x7b\xa9\xa0\xb2\x01\x95\x0d\x78\x07\xe0\x1d\x80\x77\x00\xde\x81\xae\xf0\x0e\x64\xd3\x34\xc6\x46\xe3\x7b\x6d\x7d\xde\x5d\x6e\x7d\x5e\x63\xef\x86\xc2\x6e\x5b\x1d\xd2\x42\xfa\x03\xd2\x1f\x5b\x2b\x0f\x83\xbd\x7d\x98\x2e\x08\x0e\x2a\xe5\x8d\xf5\xb5\xa2\x96\x34\xb9\xa0\x18\xef\x65\x30\x0a\x5d\x5d\xab\x48\xc6\x0a\xea\x96\xbc\x7c\x5d\x55\x6f\x7a\xf6\xa4\x3a\xfb\x77\x43\xec\x67\xb7\xd1\x23\xbe\xed\xd9\x20\xf4\x9d\x7d\x8d\x29\x06\x3f\x65\x5e\xea\x8a\xb8\xd4\x94\xfb\x52\x2d\x92\x11\x9e\xe4\x27\x4c\xfa\x75\xd7\x24\x9d\xf5\x3a\x71\x5e\xd1\x2b\x60\x9f\x4d\x6a\x0b\xbf\x25\x9c\x7f\x3e\xc7\x9e\x15\x78\xb3\xde\x43\xa8\xc5\x9e\x26\x33\xad\x37\xa9\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x81\x46\x21\x40\x0c\x0a\x09\x0a\x09\x0a\xd9\x31\x0a\xf9\x3b\x43\x74\xc4\xa4\x90\x6b\x15\x55\xcf\x49\x45\xa5\x54\x18\x5b\x1f\x77\x43\x47\xce\x97\xd4\x52\x45\x2a\x96\xd5\xbc\x75\x98\xac\xe9\xec\x5d\x43\xec\xad\xdb\xe8\x01\xd7\x99\x4b\xe6\x99\xd1\xb7\x37\x48\x19\xcf\xda\x6d\xcf\xab\xf9\x49\xbb\xed\x16\x01\xc6\x8c\x00\x8c\x4e\xff\x16\x45\xf7\x02\xae\x8a\xea\x65\x2d\x24\x8c\x85\x70\xc2\x38\xcd\x32\x16\x2d\x74\x1e\x91\x19\x53\x19\xf0\x88\x5c\xbc\x71\x1c\x61\x96\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x60\x89\x9d\x65\x89\x7f\x79\x3f\xed\x33\x1e\xdd\xd8\x7a\x3a\x28\x67\xbb\xac\xe6\xad\x94\xed\xb1\xb2\xa6\xbe\xe9\x36\xfb\xc4\xfd\xec\xbd\x11\xba\x27\xa7\x6a\xf2\xd2\x7a\x3a\xfa\x44\x4e\x2d\x95\x0c\x7b\x75\x66\xe6\xb2\x03\x6c\x2a\x6a\x8c\x1f\x6d\x74\x7c\x5e\xcd\x27\xe3\xe6\x51\x53\xaa\x26\x2f\xa6\xcf\xc8\x15\x27\x23\x7a\x5e\xcd\xcf\x1b\x87\x8e\xf7\x27\xc7\x92\x02\x91\x1d\xa6\xdd\x02\x91\xbd\x86\x1e\xe0\x88\xec\x95\x74\xef\xc7\xfb\xb6\x93\x39\x1f\x42\x71\xd7\xe3\xfe\xb8\x8b\xd8\x76\xb3\x1b\x74\xe3\xe9\x70\xd8\x95\x62\x23\x26\xec\x8a\xc7\x4d\xc6\x65\xf5\xd5\x54\x34\xf5\xd4\xe8\x8f\xfa\x8e\xca\xfc\x33\x0d\x8f\xca\xfc\xda\x5d\x38\x2a\xf1\x1f\x75\x8d\xca\x93\xf6\xa8\x5c\x5c\xa8\x33\x2c\x7b\xbc\xc3\xa2\xea\x77\xe1\xb8\x24\xff\xb5\x6b\x5c\x12\xd6\xb8\x4c\xcf\x9c\x9f\xb9\x3c\x13\x3c\x32\x4f\x7a\x46\x46\xa8\xeb\xde\x7d\x63\x33\xfe\x13\xae\xb1\x19\xb2\xc6\xe6\xe2\xfc\xe5\xb9\x8b\x17\x16\x82\x07\x67\xd0\x33\x38\x66\xc3\x5b\x63\x74\xdc\x77\x1f\x36\x52\x13\xbe\x6f\xd4\xd9\x99\xc9\xe9\x46\xdf\xa8\xb3\xb2\x94\xdf\x1a\xe3\xb2\x91\x59\x93\xf9\x31\xd7\xb8\x0c\xda\x96\x66\xf2\xf2\xd4\xd9\xe0\x81\x79\xc2\x6b\x6a\x8c\x2f\x64\xc8\xc8\x04\xdd\x7b\xfd\x21\xdb\xd4\x91\xc9\x1e\xa7\x09\x76\x24\x7e\xc8\x4e\x8e\x7a\xd8\x9d\x4f\x55\x75\xe6\x66\xa4\x56\x49\xb4\xc4\xae\x45\xbf\xd7\x5a\x37\x4d\xce\x4b\x15\xb1\x45\xbe\x2e\xc7\x9e\xb9\x74\x3e\x66\x5c\x96\xaf\x15\x75\xd9\x5e\xec\xe5\xd6\x34\xcd\xd8\x15\x89\xc7\x69\x6d\x13\x8c\xe7\xab\xe6\x53\x71\xde\x53\xb7\x88\xcd\x6f\x5f\xa3\xd1\x90\xa5\x8e\xb5\x62\x7a\xe3\x9a\x5a\x91\x74\xf6\xa3\xd7\xd8\x0f\x0e\x3a\x33\x6a\x6f\xfd\x5a\xad\x16\xbe\x7e\xda\x38\x59\x14\x6b\x15\x73\xca\x25\xfd\xe2\x3e\xa4\xcb\xbd\x94\x28\xd6\x0a\x1f\x1d\x8a\xb5\xc2\x47\x07\x1f\x1d\x7c\x74\x3d\xe4\xa3\xeb\x22\x3d\xfa\xae\xf1\xd1\x41\x28\x1d\x3e\x3a\xf8\xe8\xe0\xa3\x83\x8f\x0e\xc5\x5a\xa1\xe4\x7c\xf7\xb8\xd4\xba\x5e\xc9\x79\x4b\x16\x6b\x7d\x03\xcd\x08\x0e\x78\x92\x8e\x73\x0e\x78\x88\x0e\xd0\x78\x60\x8d\xc3\x9c\xaa\xc9\xa9\xf5\x74\xca\xc3\x86\x5a\x52\xa2\xf5\x62\x38\x46\x1c\x61\xc9\x6a\x8c\xe8\xe9\x87\xd7\x91\xf3\x9d\xed\x0e\x0c\xb3\x8b\xb0\x56\x71\x2f\xb3\x08\x6b\x67\xc8\x97\x00\x55\xd3\x94\xa1\xd3\x55\x45\x58\xf7\x51\x6a\x63\x23\x8e\x42\x5d\x28\xbd\xda\x64\xe9\xd5\xbf\xe9\xa3\x33\xa2\xb6\xe9\x69\x3a\xe9\xaa\x6d\x3a\x4e\x1b\x9e\x85\xc1\xbe\x85\x49\x61\x53\x26\xe8\x08\xb7\x29\xcd\xb4\x3d\x2b\x4a\xba\x9e\xa2\x13\x4e\x49\xd7\x26\xda\x69\xb4\xb8\x6a\xab\xcd\x4f\xf2\x8b\x23\x8e\xf9\x89\xfb\x96\x4f\xf5\x9a\x22\xb3\x7e\xaa\x30\x45\x4e\xd1\xd4\xce\x18\x25\xd4\x4f\x05\x8e\x05\x8e\x05\x8e\x05\x8e\x45\xfd\x54\xd4\x4f\x45\xfd\x54\xd4\x4f\x85\x5b\x00\x6e\x01\xb8\x05\xe0\x16\x80\x5b\xa0\x25\x6e\x01\xd4\x4f\x45\xfd\x54\xd4\x4f\x45\xfd\x54\x78\xdd\xe0\x75\xdb\xe2\xf5\x53\x5b\x4d\x8a\x37\x54\x2c\x75\xeb\x95\x0d\x61\x1f\x49\xd0\x90\x10\x00\xcb\x2b\x7a\x4e\x35\x96\x3b\x4e\xe9\x81\x31\xb9\x94\x2f\xab\x4a\xa9\xa2\x17\x95\x9c\xac\xb3\xb7\x25\xd8\xdf\xf7\xd3\x4e\xfb\xc8\x06\x22\xd9\x67\xcc\x16\x16\x8c\x16\x92\x23\xc6\xc1\xd3\xd6\xe9\x8b\x69\xcf\xaf\x5b\x4d\x7a\x4b\xa6\xac\x98\xda\x53\x34\xc9\xa7\xf6\x31\x3a\x4a\x87\x03\x1d\x20\xce\xf8\xae\xa7\x53\x9e\x1b\x6f\x89\x87\xf6\x5a\xf8\xc4\x9f\x60\x47\xcc\x89\x5f\xfd\xac\xcd\xd7\xc0\xd3\x2b\x6f\xd2\x07\x22\xf8\x11\xc1\x0f\x95\x2d\xb8\x8c\xe0\x32\x82\xcb\xa8\x77\x5c\x46\x50\xd9\x82\xca\x16\x50\x3d\x50\x3d\x50\x3d\x50\x7d\x57\xa0\x7a\xa8\x6c\x41\x65\x6b\xab\xc0\x49\xa8\x6c\xb5\x43\x65\xeb\x67\x87\x68\xbf\x00\x76\xda\xb2\x94\x4b\x59\xa0\xa8\x4e\xd1\x50\x4d\x2d\xca\x3a\xfb\xeb\x04\xfb\xb3\x01\x7a\xc8\x38\x69\xd2\x7d\x8e\x5d\x18\xb4\xdc\x98\x62\xff\x25\xd5\xf8\x60\xb6\x44\x9e\xff\x20\x3f\xe1\x52\x75\x8f\xcc\xda\x9f\xc6\x85\xa0\xc8\xdf\x42\x45\xfe\xe5\x70\x42\x78\x8a\x9d\x30\x09\x61\xe0\xe4\xb2\x88\xb9\x5a\x94\x6b\x8b\x7f\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x76\x0a\x13\x7e\xbe\x8f\x76\x8c\x99\xcf\x68\x8c\x7d\xb6\x8f\x7d\xa6\x8f\xee\x31\xff\x3f\xfa\xea\x82\x6c\xed\x8a\xf2\xf6\x2b\x99\x7c\x55\x41\xae\x4c\xa9\x79\x7b\xfe\xd7\x70\xb6\x89\x9a\xbf\x6c\x50\xa0\xc4\x0b\xbb\xcc\xeb\xa6\xe6\x4a\x2b\x6a\x18\xe1\x62\xdf\x89\xd0\xa3\x66\xa1\xd2\xb2\x22\xbf\xa9\x22\x97\xf8\xec\x73\xc5\x2a\xb2\xff\x3b\xc2\xfe\x24\x42\xbb\x3c\xbf\x2f\xad\xa7\xa3\xaf\x35\xee\xd6\xb3\x76\x10\x4f\x25\xf9\x68\x41\xae\x4c\xba\x8f\x5e\x4c\x4f\xce\xcf\x59\xbb\xd5\xd6\x81\xc6\xda\x71\x9b\xa8\x69\x68\xa2\x91\x86\xb2\xe5\x60\x51\x87\x67\xc4\x73\xb8\x40\xe7\xf9\x73\x98\xa5\x69\xca\x34\x01\x1d\x5d\x03\x70\x5e\xd1\x2b\xd6\xf0\xff\xc6\x10\xa5\xc5\xf0\x2f\x73\xa6\x1c\xac\x7b\x7c\x43\x5d\xb6\x4a\x3c\xb0\x1f\x1c\x62\xbf\x1f\xa1\xed\xfc\x14\x2e\xf4\xa2\xc9\x52\xbe\x0a\x64\x64\xd5\xe5\xe4\x83\xc6\xdf\x33\xc6\x51\x6e\x9d\x97\xac\xba\xdc\x5a\x21\x85\xec\x02\x1d\x11\xa3\x94\xa6\x31\x3e\x4a\xc3\x94\xa0\x27\x03\x23\x36\x79\xbf\x8d\x41\xc9\xaa\xcb\xa1\x08\xf6\x6c\x38\x5d\x7d\x92\xed\xf1\x91\x66\xe7\x57\x31\xa9\xaa\x71\xa1\x1b\x8f\xf8\xc3\xdc\x57\xb0\x81\x82\x5c\xa1\xe8\xbb\xb6\xbb\x46\xf4\x21\x4d\x2e\x17\xa5\x9c\xec\x33\xa8\x0f\x9b\x3f\xb5\x7f\x5c\x33\x3c\xf0\xb5\x4a\x35\xa7\xd1\x81\x45\x56\x36\xc4\x72\x9a\x14\xcb\x79\x4f\xdf\x1d\xbc\xcf\x27\x84\x82\x8d\xf1\xbd\xb2\x15\x6c\x3a\x6e\x0e\x02\x4c\x80\x27\x01\x21\xd8\x1c\x94\xd7\x2a\x94\xfc\xc6\x7d\x2e\x73\xb0\xd3\x4c\x81\x92\xb8\x05\x88\x8a\xff\xeb\x80\x01\x68\x8f\x42\x0d\x2c\x43\x88\x65\x80\x4a\x42\x47\x54\x12\x90\x1e\x8b\xf4\x58\xa4\xc7\xb6\x2b\x3d\x36\xfb\xe9\x3e\xba\x24\x34\xef\xce\xd1\x9c\x4b\xf3\xae\x4d\x99\x82\xad\x4d\x48\xb4\xbe\xf7\x31\xff\xaf\xf4\x0e\x76\x8f\x18\x26\x6a\xdd\x8a\x20\xf3\xd2\xbd\xae\x4f\x7e\xbc\x2c\x69\x15\x85\x3b\x73\x05\x41\xf0\xd9\x0a\x3c\x54\xe6\xce\xfa\xb6\xae\x03\x1e\xae\x6e\x68\x94\x5f\x74\x2f\xdf\xee\xee\x76\xff\xb8\x2a\x6b\x05\xd9\xfd\xeb\xa0\xfb\x57\xbd\xa2\x49\x15\xb9\xa0\xe4\x46\x6b\x8e\xf3\xb4\x62\xfc\xfb\xb6\xf9\xab\xd1\xcb\x8c\xb1\xf3\xcd\x56\x6d\x42\x26\xe8\x48\x13\x4f\x96\x17\x6e\xc2\xea\x23\x6c\xf5\xf1\xd1\x7e\xfa\x70\x3f\xfb\x50\x7f\xf4\xe7\x6d\x13\xfe\xf6\xfe\xde\xd9\x97\x54\x79\x83\x8c\x71\xe6\xd1\x0d\xdc\x63\x69\x4c\x4e\x27\xc6\x69\x28\x60\xe2\x0e\xf3\x2f\x91\x08\x86\x91\x8a\xfc\x4c\x63\xc9\x25\xce\xe6\x47\xf0\xe5\x8f\x1e\x1b\xca\xea\x6a\x69\x5e\x04\xfb\x3c\x65\xbc\x16\xe6\xbf\x17\xac\x97\xc5\xf9\xe3\x70\xf0\x86\x29\xf3\x52\x1f\xbd\xd8\xc7\x5e\xe8\x8b\x7e\xce\x06\x99\x1f\xec\x9b\x55\xb5\x1c\x5f\x99\x15\x54\x3e\xec\x6a\x2c\xbe\x62\xfc\x29\x1e\x9b\xf4\xdc\x05\x47\xd4\x62\x11\xb5\xa6\x3b\x61\x0d\xa3\x52\x8e\xdf\x38\x0f\x16\x29\x2a\x39\xf3\x23\x29\x17\xf3\x7a\x4c\xbd\x65\x8e\xa9\x08\xa6\x28\xcb\x6a\xb9\x28\xa7\x62\xe2\x8a\x3c\x3a\xcb\x7a\x96\x1c\x15\xfb\x0e\x80\x7d\xf9\xf8\x36\xde\x2d\x4f\xa4\x51\x3b\x81\xce\x63\xfe\x16\x7d\x3b\x7b\x05\xef\x58\x0b\x0d\x7a\x76\x1f\xa5\xd8\x48\x3c\x69\xe7\x8d\xdf\xe7\xae\xa8\x96\x55\x97\x37\xa3\x8a\x5a\xcb\x33\xcd\x3f\x33\x42\x53\x02\x21\xe6\x64\xcd\xe4\xda\xb2\x5e\x1d\xb2\xea\xfe\x4d\x29\x94\x94\x52\xc1\x9a\x00\x16\x54\xfc\x9f\x7b\xd9\x1f\xf5\xd3\xab\xdd\x07\xda\x31\xac\x23\x3e\x80\x71\xca\x39\x70\x41\xb4\x78\x49\xb4\x98\xdc\x67\x1c\xed\xfa\x59\x37\x23\x4f\x03\xcf\x68\x31\x8e\x5c\xa3\xe7\xc4\xec\x5d\xa0\xa7\xf9\xec\x35\xd6\x3f\x67\x82\x15\x74\xdd\xc3\x66\xde\x71\x2a\xb0\xaf\xa1\xf3\xbb\x3e\x66\xbc\xf1\xc6\xf0\xd9\x7d\x81\x9d\x37\x67\xb7\xcf\x13\x35\xe7\x7a\x70\xff\x6a\x82\x47\xa3\x7f\xbc\x23\xe0\xb1\x8e\xf9\x53\xce\xe0\x27\xbb\xdf\x3c\x61\xf3\x1e\x6e\xe6\x1a\x7d\x2f\xbd\xae\x6a\x39\xd2\xba\xa7\x8b\xd5\x09\xa8\x69\x93\xd4\xf4\x4f\xfa\xda\x6a\x76\x96\x04\x57\x7d\x96\x16\x1d\xae\xda\x51\xbb\xd6\x98\xe1\xda\xb0\x75\xaa\x63\xe9\xc2\x10\xed\x17\x77\x05\xd8\xb5\xb8\x8d\x6b\x83\x4d\xd9\xb8\xa9\x3c\xbe\x79\x96\x0c\x70\x17\x70\x17\x70\x17\x70\x17\x70\x17\x70\xb7\x19\xb8\xdb\x5a\x01\xb8\x4e\xa2\xe2\xc6\xe1\x6e\xe7\x17\x1d\x99\x2f\xee\x0c\x58\x55\x1c\x09\x21\xc2\xc1\x6b\x8d\x34\x27\x1b\x9b\xb3\xd4\x00\x3f\xee\xb5\x05\x0e\xf8\x31\xf8\xf1\x26\xf3\xe3\xcd\x25\x70\xa1\x84\xb9\xf3\x0c\x2e\x3b\x4b\xd3\x2c\x13\x3f\x6d\x83\xe5\x27\xdd\x28\x3a\xb0\xa1\x5a\x40\xdd\x72\x7c\xfc\x7c\x1f\xdd\x37\x56\x54\x0b\xfa\xd8\x3f\x29\xaa\x05\xa3\x6f\x6f\x61\x2a\x3b\x42\x03\xc6\xdf\x92\xaf\x2a\xaa\x85\x59\xa5\x28\x9f\x95\x4a\xf9\xa2\xac\x65\x1f\x0a\x1b\xfc\xec\x7e\x4a\xb3\xb1\xf8\xa8\xc3\xdc\x8d\xff\x5a\x31\xdc\x45\xb5\x10\xbf\xc7\xbc\x8c\xeb\xae\xd8\xbf\x79\x03\xcd\x99\x51\xc8\x6b\x15\x55\xcf\x49\x45\xa5\x54\x18\x5b\x1f\x37\x06\x6f\x3c\x28\x22\x96\x5f\xd8\x30\x20\xc5\xb2\x9a\xb7\xce\x93\x35\x9d\xfd\xfe\x12\x7b\x79\x90\x1e\x70\x35\xb5\x64\x36\x15\x3d\x58\x5f\x55\xf5\xac\xdd\xe4\xbc\x9a\x9f\xb4\x9b\x4c\x1e\x36\x4e\x9b\x74\xda\x5b\x14\xcd\x39\x2e\xdf\x80\x13\xbb\x5c\x53\x21\x03\x59\x01\xc8\x0a\xa0\x60\x1d\x64\x05\x20\x2b\x00\x59\x81\xde\x91\x15\xe8\xa2\x7a\x6c\x5d\x23\x2b\x80\x42\x61\x90\x15\x80\xac\x00\x64\x05\x20\x2b\xb0\x19\x85\xc2\x7a\x5a\x05\x00\x95\x8c\x7a\xb9\x92\x51\x66\x0b\xaa\x00\x64\xd7\xe9\x7b\x05\xdf\xbc\x4c\x97\x38\xdf\x3c\x4f\x59\x3a\x1b\xc8\x37\x5d\x24\x2a\x65\x92\xa8\x54\x00\x31\x6a\x49\xcd\x9a\x42\x38\xdf\x9c\x66\x19\x93\x6f\xba\x3a\x67\x72\xcd\x80\xae\xb9\xa8\xa6\xb8\x07\x8a\xff\x38\xf9\x63\x36\xf3\x8b\x15\x93\x02\x89\xda\x51\x71\xc4\x66\x32\x35\x81\xc0\x9e\xa3\x67\x69\xb1\xca\x3d\x35\x4b\xd3\xad\x78\x96\x70\x55\x21\x98\xb0\xc9\x60\xc2\x8f\x46\x82\xf3\xac\xae\x08\xd3\x33\x4f\x17\xb8\xe9\x39\x4b\x2d\x9a\xae\x74\x55\x84\x18\x3e\x43\x0b\x4e\x88\x61\xeb\x5a\xbf\x26\x02\x0c\x16\xe9\xb2\x2b\xc0\xa0\x65\xcd\x5b\xa6\xb1\x53\xa6\xaf\x8e\x0d\x2e\xab\x7a\x85\x92\x3f\x36\xea\x6f\x1a\x47\xcc\x88\x12\x57\x78\x8e\xba\x12\x68\x26\xa7\xc4\xd1\xb5\x66\x72\xca\x3e\xbd\xd3\x06\xb3\x3d\x61\x8b\x80\xd0\x80\xd0\x80\xd0\x80\xd0\xbd\x03\xa1\xb1\x38\x0c\x59\x1c\x76\x0f\xa5\x47\xc8\x78\x47\x42\xc6\xe1\x0c\x81\x33\x04\xce\x10\x38\x43\xe0\x0c\xe9\x69\x67\x08\x32\x87\x90\x39\x84\xcc\xa1\x76\x65\x0e\xc1\xd7\x08\x5f\x63\xaf\xfa\x1a\xb3\x85\x0e\xab\x97\x75\x8c\x46\x27\xfd\x69\xf4\x03\xec\x7e\x61\x36\x1c\x93\x4e\x5b\x51\xe3\xe6\xcf\x13\x34\x28\xf2\x03\xf4\x8a\xaa\x49\x05\xd9\xa5\x4f\x2e\x8a\x31\xe6\x74\xa5\xa4\xe6\x65\x9d\xfd\x4a\x82\xfd\xe2\x00\x91\x79\xdc\xd2\x7a\x3a\xaa\x37\x56\x80\x71\x6a\x61\xee\x82\x9a\x6f\x55\x0d\xc6\x87\xf8\x09\x0b\xa2\x17\x8b\x69\xb3\xf1\xf3\x4a\x0b\x53\xfb\x7a\xad\xce\xe2\x33\xe1\x2f\xd3\x38\xdb\x67\xbe\x4c\xde\x79\x62\x25\xec\x88\xa7\xe0\x15\x89\x42\x0e\x04\x72\x20\x50\x5a\x11\xee\x27\xb8\x9f\xe0\x7e\xea\x1d\xf7\x13\x4a\x2b\xa2\xb4\x22\xb0\x3f\xb0\x3f\xb0\x3f\xb0\x7f\x57\x60\x7f\x94\x56\x44\x69\xc5\xad\x02\x3a\x51\x5a\xb1\x1d\xa5\x15\xbf\x15\xa1\x47\x02\x20\x9f\xd0\xb0\x66\x7c\x42\xd3\x7d\x0e\xdb\x13\xc2\x5c\x81\x85\x08\x1f\x29\xc8\x15\x1b\xc1\xf1\x63\x51\x87\xd0\xbf\x0e\xe1\x0f\x3c\x2c\x44\xc4\xeb\x14\x20\xb4\x22\xcc\x4a\x95\x75\xb5\xb8\xb6\x2a\xe7\x8a\x92\xb2\x6a\xa9\x87\x8f\xe9\x9c\x7b\xb3\xdf\x8c\xb2\x0f\xf4\xd3\x3d\x39\x55\xe3\xe8\x75\x9c\xeb\x86\x8b\xdf\xac\x69\xee\xac\x79\xe6\xed\x16\x17\x79\x8b\x53\x46\x8b\xc9\x31\xae\x1e\xae\x6a\xb2\xbb\xa4\x86\xef\xa1\x82\xb5\xb7\x58\x3c\x3c\x4f\x73\x62\xa4\x33\x74\x9a\x8f\xf4\x04\x1d\xa1\x43\xc1\xd2\x45\xaa\x26\x73\x6d\x32\xbf\x0e\xde\xa9\x56\xf8\x95\x70\xe2\x79\x80\x8d\x9b\xc4\x33\x1e\x37\x29\xa7\x7f\x57\xdc\xcc\x33\xfa\xd6\x1d\xce\x33\x3a\x68\x89\x80\x6f\xec\x31\x8d\x5b\x52\xe0\x9b\xf5\xa4\x32\x3c\xe7\xaa\x2a\x73\xa7\xe9\x47\x85\x70\x4c\xe4\xea\x34\x99\xab\xf3\xc9\xbe\x56\x9a\x8c\x0b\x22\x09\xe7\x0c\xcd\x38\x49\x38\xed\x34\x41\xed\xb2\x31\x21\xe2\xdd\x99\xf7\xef\x74\x4c\xd0\xc9\x1a\x65\xcd\x8d\xd9\xa2\x7d\x42\x5f\x73\x33\x2c\x11\xe4\x35\x7b\xcd\x0e\x42\x5e\x13\xf2\x9a\x9b\x2c\xaf\xd9\xd1\x35\x6a\xdb\x3e\x10\x61\x32\x9d\xd9\x0c\x9d\x66\x27\xe3\xc7\xed\x40\x95\xc7\xdd\x9a\x99\xbe\x17\xb8\x2b\x0a\x3a\xfd\xab\x7e\x2b\xd8\x45\xca\xaf\x2a\xdc\x8f\xae\xc9\x05\x85\x7f\x21\x8c\xbd\xb3\xab\x34\xff\xd7\x23\xec\x2f\x22\xf4\xa0\xef\x71\x75\x4b\xf4\x0f\x16\xe4\xca\xa4\xdf\x59\x5b\xa0\x54\x7f\x7b\x76\xc2\x61\xef\x01\xfb\xe2\x10\x65\xcc\xc7\x52\x56\xe4\x37\x55\xe4\x12\x87\x49\xd5\x90\xc2\x0c\x47\x5a\xd3\x2b\xea\xaa\x35\xde\xae\xbe\xb1\x77\x0d\xb1\xb7\x6e\xa3\xd7\x78\xda\xb0\x49\xc6\x3f\xef\x6b\x30\x56\x89\x37\x6f\x75\x7f\xda\x6e\xbe\x45\xc1\x4b\x07\xf9\x09\x93\xee\x2e\x5a\x82\xe5\x01\x17\x46\x60\xd3\x9d\x14\xcd\xb1\x02\x00\x7d\xe6\x95\x15\xb9\x14\x30\xee\xb5\x4a\xc3\x0d\xc5\x52\x21\xe8\x09\x41\x4f\x08\x7a\x42\xd0\x13\x82\x9e\x10\xf4\x84\xa0\x27\x04\x3d\x21\xe8\x09\x41\x4f\x08\x7a\x42\xd0\x13\x82\x9e\x10\xf4\x84\xa0\x27\x04\x3d\x21\xe8\xa9\x53\x41\x4f\x9f\x49\xd2\x62\x50\xe5\x23\x0b\x26\x6e\xb0\xfe\x91\x55\xd0\xff\xab\xc3\xec\x97\xb6\xf9\x89\x10\xa6\xa3\xbf\x6e\x72\x46\x37\x5c\x2c\x59\x56\x2f\xa4\x18\x52\x4b\x18\xe3\x88\xb3\xe2\xe1\xab\x3a\xf3\xfb\x68\x2c\x9a\x9c\xa5\x5c\xc2\xb3\x7a\x76\xb5\x9d\x4a\x1e\x11\x84\xb2\x5a\x16\x31\xbd\xd5\x2b\x32\x6d\x0a\xa4\xec\x4c\x96\x72\x5d\x8f\xcf\x2d\xe1\xf1\x01\x96\x04\x96\x04\x96\x04\x96\x04\x96\x04\x96\x04\x96\x04\x96\x04\x96\x04\x96\x04\x96\x04\x96\x04\x96\xec\x2c\x96\x9c\xa6\x0c\x3b\x1d\x3f\x69\x47\xcd\xed\x71\x47\xde\x05\xec\xf7\xee\x86\xd8\x3b\xe0\x53\xe0\x53\xe0\xd3\x2d\x84\x4f\xff\x3e\x41\xe3\xc1\xf8\x34\xb0\x42\xfc\xaf\x27\xd8\xc7\x07\xfc\xd1\x68\x93\x15\xe2\x4f\xf8\x56\x88\x4f\x07\x1c\x3e\xab\x6a\x93\xc5\xa2\x0d\x2b\x5b\x17\x67\xdb\x26\x2a\x79\xc7\xf5\xce\xd2\x77\x41\xbd\x33\x68\xc5\x81\x4f\x82\x4f\x82\x4f\x82\x4f\x82\x4f\x82\x4f\x82\x4f\x82\x4f\x82\x4f\x82\x4f\x82\x4f\x82\x4f\x22\x6c\x12\xdc\x0f\xdc\x0f\xdc\xaf\x73\xdc\xef\xcf\x1e\xa6\x33\x82\xfb\xe5\x64\xcd\x6c\x45\x76\xe5\x62\x7b\xfe\xac\x14\x4a\x4a\xa9\x60\xd9\x69\x4b\xb1\x4c\x2a\x97\x35\x75\x5d\x2a\xb2\x7f\xf5\x30\xfb\xa3\x7e\xba\xcf\x7d\xc6\xd2\x7a\x3a\x7a\x84\x6b\x97\x59\x47\xd5\x4a\xd1\x4c\x39\xc7\x2f\x88\x2b\x5c\x12\x57\x48\xee\xe7\x0a\x66\xae\xe6\x16\xd3\x81\x07\x4f\x9a\xed\xb7\x58\xc5\x6c\x63\x59\xf2\x29\xcf\x20\xae\xa7\x53\x81\xdd\x0d\x45\x75\xc5\x70\x14\x37\xc7\xce\x98\x28\xce\xe7\xd9\x59\xf9\xce\x81\x1d\x68\x50\x63\xa8\x20\x57\x28\xfa\xe2\x8e\xda\xc7\x7a\xcc\x92\x3b\x6b\xe6\xc9\x1e\xb2\x44\xcf\x36\xf3\xe1\x66\x9e\xa5\x45\xba\x5c\x25\xf9\xd3\x92\xa7\x0b\xf1\x1f\x88\xa0\x35\x29\x82\xf6\x7b\x7d\xed\xb2\x38\xcf\x09\x41\xb4\x05\x7a\xda\x11\x44\xeb\x94\x35\xab\xaf\x61\xd6\x59\x63\x97\xf9\xe2\xce\x5a\x6b\x36\x5d\xa3\x9c\xd6\x8c\x59\x3b\x20\xf4\xd3\x36\xcb\xa8\x41\x43\xad\xd7\xcc\x28\x34\xd4\xa0\xa1\xb6\xc9\x1a\x6a\x9b\xb6\x42\x0e\x93\x3d\xeb\xec\x57\x25\x3b\x4b\xd3\x2c\x13\x3f\x6d\x07\x69\x3d\xe9\x0e\xf5\x0a\x6c\xa3\x36\xd8\xab\xe5\x32\x68\x1f\x4d\xd0\x13\x62\x8b\x27\xaa\xa0\x1a\xbb\xba\xb2\x9a\xcf\x2b\xba\xb6\xc6\xa7\xfd\xf2\x5a\xbe\x20\x57\x74\xf6\xfd\x09\xf6\xed\x7e\xda\x21\x8e\x32\x3e\x8a\xe9\xfa\x21\x1c\xf3\x6a\x7e\xda\x6e\x24\xc3\x1b\x49\x8e\x19\xa7\x88\x5a\xa9\x8b\x69\x9f\x03\xb6\x5a\xc0\xc6\x75\x7a\x4a\xcc\x6e\x63\x56\x1b\xb3\xfb\x24\x1d\xa7\x89\xc0\xd9\x6d\x96\xc6\x35\x3e\x60\xb5\xf7\xde\x92\x10\x8d\xd7\x85\x4f\xea\x43\xec\x80\x39\xa9\x45\x77\x2c\x25\xc1\xda\x1e\xa1\x80\x1f\x82\x32\x10\x94\x81\xa0\x0c\x04\x65\x20\x28\x03\x41\x19\x08\xca\x40\x50\x06\x82\x32\x10\x94\x81\xa0\x0c\x04\x65\x20\x28\x03\x41\x19\x08\xca\x40\x50\xc6\xa6\x04\x65\x7c\xe8\x1a\xed\x0d\x2b\x22\xa7\xe6\x2b\xf2\x6a\x99\x6f\x2e\xd8\xb7\xae\xb2\xe7\x07\x9d\x2a\x40\xc3\xa1\xd8\xee\xb2\x79\x6a\xf2\x31\xe3\xd0\x9a\x7a\x3f\xce\x01\x5d\x8e\xe7\x32\x40\x56\x40\x56\x19\x20\x2b\x20\x2b\x20\x2b\x20\xab\x9e\x41\x56\x99\xae\x41\x56\x2d\xef\x49\xd3\xc8\x2a\x03\x64\x05\x64\x05\x64\x05\x64\x05\x64\xd5\x79\x64\x95\xe9\x69\xc2\x94\x01\x61\x6a\x1f\x61\xca\x74\x3b\x61\xca\x6c\x41\xc2\x94\x7d\x3d\x4d\x89\xb0\xa6\xe3\x34\xc1\xc3\x9a\x0e\xd0\x38\xed\x0b\x2f\x7c\xea\x90\xa1\x96\x04\x33\x3d\x15\x1e\xcc\x94\x64\x43\x35\x25\x51\x9d\x5e\x78\x02\x98\xe2\x7f\xbb\xdd\x81\x60\xaf\x16\xdf\x9c\x98\xe4\xe1\x5d\x8f\x8b\xbf\x76\x82\x78\x09\x40\x95\xa1\xd3\x74\xb2\x2a\x22\x3a\x45\x23\x1b\x19\x69\x44\x41\x23\x99\xa4\xc9\x64\x92\xaf\xf5\xd1\x29\xf1\x9e\x1f\xa1\x43\xfc\x3d\xdf\x47\x1b\x9c\x7d\x34\x2d\xd2\x46\x4e\xd0\x31\x27\x6d\x64\xe3\xad\xcc\x18\xad\x8c\xf3\xe8\xc9\xed\x93\x39\xe3\x6b\xd1\x54\x33\x61\x06\xa7\xb5\x06\xa5\x8e\xfd\x2a\xab\x7a\x85\x92\x9f\x1f\x71\x0c\x8e\xb9\x9c\x8d\xe5\xd4\xa2\xb1\x39\x35\x3f\x36\x6e\xe3\xb3\x57\x1c\x21\x8c\xcf\x94\x7d\x58\x27\xcc\xd0\x65\xba\x44\xf3\x6e\x33\x94\xcc\xd0\xe9\x26\xd2\x32\xa6\xf9\x1d\x5c\xe4\x38\x55\x07\x78\x05\x78\x05\x78\x05\x78\xed\x21\xf0\x8a\xa5\x58\xc8\x52\xac\x7b\xc8\xf4\xcb\x11\xfa\x6a\x84\x7d\x25\x12\xfd\xb2\xfd\xa8\x3e\x16\xb9\xec\xde\x0b\x2a\xa5\x98\x2e\xf6\x73\xb1\x65\x79\x45\xb8\x71\x6d\x66\xe2\x7c\xb2\xcc\xb7\x84\x0f\x96\x6b\xd1\x57\x52\x4b\xa3\x25\xb9\x20\xf1\x47\x62\x6e\x07\xdd\x8b\x43\x41\x4f\xed\xc9\x60\xae\x0e\x94\xd5\x55\x39\x6f\x2c\x3f\x8b\xb7\x1d\xff\xaa\x63\xda\x95\xe2\x88\xb9\x4b\xe4\xb7\x1b\x2b\x68\x52\x8e\xcf\x13\x45\xcd\xdb\x1f\x1e\xe7\xe3\xc0\x5d\xd7\xd6\x73\x59\xd3\x8d\x4e\xba\x07\x4a\x32\xce\xb4\x6e\xc8\xbc\xc8\x8a\x78\xdf\xac\x26\x52\xa2\x9f\x22\xa7\xcc\xa7\x8f\x71\xc6\xbb\x30\xcf\x7b\xe0\xb7\xfb\x85\x03\x00\x0e\x00\x38\x00\xe0\x00\x80\x03\xa0\xa7\x1d\x00\x5f\x88\xd0\x4b\x11\xf6\x62\x24\xfa\x82\xfd\xb5\x7d\x7f\x64\xda\x55\xdf\xb0\x5c\x94\x25\x5d\xb6\x5f\xfd\x79\x4d\x2d\x4b\x05\xfe\x1d\x16\xd9\xa2\x9e\xf0\x1f\xeb\x71\x3b\x05\x12\x8d\x07\x9e\x4e\x1d\x4e\xc5\x16\x84\x1d\x11\x1f\xc9\xb2\x5c\x32\xa6\xa9\xf3\x15\x91\x63\xaa\x56\xbe\x2e\x95\xac\x70\x24\x6d\x4d\x1e\x5b\x91\x8a\xd6\xea\x3f\x2e\x7e\x8d\xc7\x56\x94\x92\x54\x54\xde\x6c\x99\xef\x65\x39\x26\xe5\x39\x4d\x57\xc7\x04\x5c\xcd\x3b\x4b\x4b\xd1\x78\x42\x77\x4e\x12\xeb\xed\x54\x6c\x46\xe1\x26\xc9\xd5\x71\x55\xab\xbd\x33\xc7\x83\x51\x11\xcb\x7d\xbe\xfa\x53\x2b\xd7\x53\xf1\x5d\xa2\x3f\xd3\xd6\x8d\x78\xe3\x9a\xde\x39\x40\xef\x18\x60\xcf\x0f\x44\xbf\x6b\x07\xb7\x7d\xa9\xff\x8a\x69\x07\x8d\x29\x7a\x5d\xbd\x15\x2b\x48\xda\xb2\x54\xf0\x60\x07\x7b\xa1\x26\x6b\x2b\xaa\xb6\x6a\x8c\x85\x6f\x4f\x2f\x56\x5d\x3c\xb8\xa3\x7c\x59\x63\xad\x49\x44\x52\xa9\x31\x23\xf2\x72\x4e\xc9\x3b\x0b\x6b\xfe\x6d\x14\xa9\xe7\xd6\xe8\x1a\x1f\x27\xf3\xcb\x61\x7d\xfd\x52\xae\x61\xb4\xec\xa6\xed\x36\xb0\x43\xda\xbc\x17\x4b\xc5\x04\x33\xe2\xb6\xd8\xbd\x5a\x4d\x88\x7b\x48\x18\x7b\x1e\xfe\x2f\xef\xc4\xd0\x8f\xc5\x12\x19\x29\x77\xb3\xa0\xa9\x6b\xa5\xbc\x71\x14\x8f\x25\xe3\x07\x55\x0d\x9c\x58\xac\x98\x2b\x20\x6f\x23\xd6\x1d\x2c\xdb\x2d\x1d\x8b\x25\x66\x55\x4d\x76\x35\x1b\xcb\x49\x7a\x4e\xca\x1b\x77\x6f\x8e\x8f\x88\x1e\xe4\xed\xe9\x62\x39\x5d\xd3\xe0\x8a\xdd\x46\x2a\x7e\x7f\xb9\x7a\xde\xb8\xd7\x36\xf0\xaf\xc1\xbf\xd6\xa3\xfe\xb5\x6c\x21\x90\xfa\xd2\x79\x01\xb7\x67\x68\x8a\xc3\xed\x13\x74\x8c\x8e\x36\x41\x35\x17\x2a\x52\x65\x4d\xb7\x18\x72\xd2\x1f\xfa\x3e\xc0\xee\x17\x6f\xb3\x63\x69\x1b\x75\x60\xb9\x99\x72\x3d\xf6\xbc\x05\x0b\xfd\xb0\x7f\x48\xd2\xf1\x90\x58\x75\x4d\xb6\x29\xb5\xb1\x48\xd3\x8c\xf1\x73\xaa\x2b\x7f\x2a\xc9\xfe\x65\xbf\x83\xd1\x87\xb8\x5a\xa0\x77\x9b\x7b\xc9\x69\x60\xca\x6e\x20\x39\xcc\xd5\x01\xab\x3c\x79\xbe\x87\xb6\x58\x13\x30\x4f\x73\x62\xde\x65\xe8\x34\x9f\x77\x13\x74\x84\x0e\x85\xfa\x31\x7c\xbb\x16\xea\xd1\xb8\x12\x3e\xc3\x0e\xb0\xf1\xea\x59\xe5\x7f\xad\x8d\x48\xfe\x7d\xc3\xe5\x4b\xdd\x6b\x49\xfd\x35\xf2\x58\x46\x2c\x69\xbf\xce\x3f\x99\x0c\xaf\xa6\x52\xe5\x6d\x6d\xfa\xd1\x00\xf6\xc1\xef\xda\xa4\xdf\xf5\x93\x7d\xad\x34\x11\x17\x84\x07\xf6\x0c\xcd\x38\x1e\xd8\x76\x9a\x9c\x10\xb1\xbe\x76\x59\xa4\xe4\x07\x76\x39\x26\xe7\x51\x73\x25\x2e\x05\x58\x19\x8f\x2f\xb5\xb3\x46\xa6\x3d\xbe\x54\x18\x9b\x10\x63\x03\x9e\xdf\x11\x9e\x0f\x90\x03\x90\x03\x90\xd3\x2e\x90\x93\xfd\x74\x5f\x6b\x37\xad\x74\x49\x04\x56\x9d\xa3\x39\x57\x60\xd5\x1d\xb6\x19\xb6\x40\x88\xf9\x2f\x10\x76\xb0\x7b\xc4\x30\xb5\x6f\x8d\x90\xf9\x91\x9d\xce\x1a\xe1\x40\x8d\x66\x6f\x23\xfb\x93\xa4\xd0\xe8\xed\xec\xc2\x01\xca\xbc\xbd\xb6\x5c\x81\x32\x2f\x94\x79\x37\x59\x99\xb7\xa3\x9c\x2a\x54\x8d\xb7\x5d\x9f\x84\x6c\x86\x4e\xb3\x93\xf1\xe3\x36\x34\x7d\xdc\x2d\xbc\xeb\x7b\xfe\xdd\x50\x61\x9d\xbd\x27\x49\x57\xcc\x8a\xcd\xf9\x55\x85\x2b\x36\x68\x72\x41\xe1\x5f\x08\x45\x2d\x39\x25\x5c\x44\x09\x67\x4e\xef\xc7\xb8\x61\x95\x8c\x59\x78\x4b\x5e\xbe\xae\xaa\x37\x3d\x91\x9f\x36\x93\xfd\xe5\x61\xf6\x9f\xb6\xd1\x23\xbe\x0d\x2f\x99\x2d\x46\x3f\xd7\x67\x7a\x04\x9c\x04\x17\xc3\xda\x88\x81\xb3\xc4\x46\x16\xed\x0b\x5e\x11\x17\x9c\x72\x5f\x30\xe5\xda\x18\x4c\xd8\x5b\x8a\x04\x6f\x37\xe1\xc4\xde\x7a\x82\x29\xd5\xb2\x6c\xef\xf7\xf4\x8a\x2c\xe5\x47\x9c\xa0\x13\xbe\x47\x32\x43\x14\x94\x8a\xbc\xea\x98\xd3\x84\x27\x4e\xcc\xd5\x76\x2a\x79\x8a\x5f\x6e\xd2\xef\x5e\x17\xc5\xad\xd6\xbf\x89\x2e\xd7\x48\xc9\xde\xa4\x79\x61\x06\xe6\xe8\x0c\x37\x03\x93\x74\x8a\x4e\x34\xf1\xe5\xe7\x39\x4d\x33\xeb\x72\x29\x3c\xf1\xe7\xfb\xc2\x5f\xf6\xab\xec\x39\xab\x90\x74\x9d\xf9\x6b\x9a\x81\xfa\x4f\xc0\x6d\x0f\x44\x81\xe9\x3a\xe6\x88\x8f\x0c\xc4\x8e\xa1\x1c\x03\xb1\x63\x24\x30\x20\x81\x01\x09\x0c\xbd\x94\xc0\x00\xb1\x63\x88\x1d\x23\x70\x1c\x81\xe3\x08\x1c\x47\xe0\x78\x57\x04\x8e\x67\xcf\xd1\x1c\x3b\x13\x9f\xb1\xc9\x4b\xd2\x4d\x6f\xea\x6f\xfb\xda\x5f\x3b\x09\xca\xc9\x88\xbb\x85\x72\xf2\xd6\xd1\xb5\x61\xff\xe2\x5e\x3a\x68\x42\x51\x93\x49\x55\xc1\xd0\x31\x5d\x2e\xae\xe8\x6b\xdc\xba\x69\x6b\x45\x59\x37\x36\x20\xf2\x2d\x9d\x7d\x9e\xe2\x9f\xdd\x46\xbb\x3c\xa7\x2d\xad\xa7\xa3\x8f\xd9\x3a\x32\x0b\x72\x71\x65\x41\x9c\x79\xc9\x38\xf3\x12\x3f\x33\xb9\x57\x1c\x30\xe9\x3e\x71\x31\xed\x7f\x70\x8b\x23\x91\x16\xe9\x32\x5d\xaa\x72\xea\xd5\x89\x45\x4a\x79\xc7\x64\x3d\x9d\xf2\xef\x65\xf6\x67\x22\xc1\x21\xd7\x0b\x82\x25\x9e\xa7\x2c\x67\x89\xd3\xd4\x82\x0b\xd2\xb3\x22\xc2\xed\x69\xba\xe8\x44\xb8\xb5\xa6\xe5\xd7\x09\xf7\xf8\x25\x9a\x77\xb9\xc7\x5b\xd2\x74\xb8\xc6\x11\xd7\x08\xb9\x71\x23\x1c\x85\x9e\x61\x33\x16\x0a\xf5\x99\xb5\x26\x02\x0d\xb8\x41\x8f\x2b\x04\x5e\xd9\xfa\x5e\xd9\x2c\x22\x56\x03\x22\x56\x5b\xee\x9e\x7a\xdb\x30\x1d\x11\x96\x58\xaf\xa8\x9a\x54\x90\xfd\x1d\x52\xe6\x8f\xb9\xa2\xa4\xeb\xb2\xed\x7f\xfa\x8f\x43\xec\x1f\x07\xe8\x3e\xf3\x47\xdb\xe3\xf4\x89\x46\x3c\x4e\x0b\xe2\xa4\x29\xa3\xc5\xae\xf0\x2f\x3d\xc6\x2f\x67\x76\xcb\xf4\x28\xb9\x3b\x09\xff\xd1\x46\xbc\xc9\xc2\x7d\x73\x63\x29\xdc\xaa\x1e\x67\x13\xa6\x55\xf5\xce\x40\xcb\x9e\xba\x1e\x41\xad\x03\x09\xfe\x21\xf8\x87\xe0\x1f\x82\x7f\x08\xfe\x21\xf8\x87\xe0\x1f\x82\x7f\x08\xfe\x21\xf8\x87\xe0\x1f\x82\x7f\x08\xfe\xa1\x0e\xfb\x87\x8e\xd2\x61\x76\x30\xbe\xdf\xf6\x0f\x3d\xe8\xf6\x0f\xb9\xf7\x70\xf0\x06\xc1\x1b\x04\x6f\x10\xbc\x41\x2e\x6f\xd0\x97\x86\xe9\x98\xa5\x4d\x22\x70\x63\x80\x42\x89\xf1\xda\x29\x39\x59\xca\xe5\x8c\xe5\x8a\x8d\x21\x7f\x6c\x98\xfd\xf5\x80\x93\x6f\xf6\xa9\x86\xf0\xa3\x68\x6a\x52\x34\xd5\x15\x00\x72\xcf\x2d\xbf\x8c\x37\x6f\x47\x01\x21\x6b\x21\xe4\x7c\x38\x63\x1c\x65\x7b\x2d\xef\x8c\x7b\x34\xc9\x5f\xf1\x07\x51\xe9\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xdd\x46\x1d\x8f\xd1\x51\x76\x38\x7e\xd0\xa6\x8e\x51\x0f\x75\xf4\xec\xf5\xee\x06\x31\x01\x90\x4d\x90\x4d\x90\xcd\x2d\x44\x36\xbf\x39\x44\xe7\x1a\x12\xff\x68\x4c\xf7\x83\xfd\xdb\x21\xf6\x13\xdb\xe8\xc1\x20\xc1\x8f\xe8\x0f\x9a\xe8\x53\x29\xe5\x95\x75\x25\xbf\x26\x15\x3d\x14\xd4\x5e\x08\x77\x40\xee\x23\x95\x3c\x59\x4f\xb0\xa3\x7e\x0f\xce\x2b\x3a\x50\xa7\x0f\xea\x7c\x73\x38\xea\xbc\xc2\x9e\x69\x87\x5e\x07\xdd\xd8\xe3\x0f\x45\x77\x32\xe2\x83\xc2\x2b\x05\x03\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x02\x8c\x76\x18\x8c\x82\x3c\x82\x3c\x82\x3c\xf6\x30\x79\xfc\xfe\x25\x9a\x16\xe4\x31\xaf\xe8\x39\xd5\x30\x7a\xd5\x99\xdd\x01\x41\x96\x72\x29\x5f\x56\x95\x52\x45\x2f\x2a\x39\x59\x67\xff\xfe\xf5\xec\xd3\x83\x74\xbf\xdd\x8a\x9d\xe5\xbd\x57\x2c\x1e\x35\xf3\xb3\x67\xd9\x45\x2b\xc0\x72\xc6\x6c\x66\xc1\x68\x26\xc9\x0f\x9e\xb6\xda\x30\xb3\xab\x9d\x20\x47\xcf\xc1\x5d\x0e\xfe\x32\x60\x5c\x60\x5c\x19\x30\x2e\x30\x2e\x30\x2e\x30\xae\x9e\x61\x5c\x99\xae\x61\x5c\x2d\xef\x49\xd3\x8c\x2b\x03\xc6\x05\xc6\x05\xc6\x05\xc6\x05\xc6\xd5\x79\xc6\x95\xe9\x69\x24\x95\x01\x92\x6a\x1f\x92\xca\x74\x3b\x92\xca\x6c\x41\x24\x95\xbd\x41\x17\x45\xc0\xd4\x59\x9a\xe5\x01\x53\xa7\xe9\x24\x1d\x0f\xd4\xf7\x74\xb8\x95\x49\x9c\x52\x1e\x4a\x74\x5e\xd1\xc3\xe3\xa5\x82\x45\x3f\x79\xb8\xd2\x8d\xe5\xf0\x78\xaa\x53\xec\x84\x19\x4f\x55\x0d\xd2\xcc\x18\x2a\x4f\xaf\x6a\x15\xea\xe2\xbf\xb7\xc3\x8f\x9e\xbd\xd6\xd2\xab\x2d\x55\x91\xb2\x51\xf1\x43\xa7\x59\x99\x40\x5b\x1b\x2c\x3f\x19\xf6\x8c\x50\x7e\x12\xa5\xf9\x9b\x14\x3a\xfd\xa1\x3a\xda\xc6\xe7\x84\x19\x99\xa6\x0c\x37\x23\xc7\xe9\x0e\xa6\xa8\x08\xe2\x4c\xf3\x20\x4e\x5b\xd3\xf8\xce\x5a\x7c\x5a\x68\x19\x67\xe9\xac\x4b\xcb\xf8\x8e\x9a\xb4\xcc\x59\x63\xe6\xaa\x21\xb3\x14\x64\xd3\xc2\x74\x92\x93\xdf\x1e\xf1\x33\x67\x71\xb3\x90\xb3\xab\x2a\xb6\xba\x52\x65\xd9\x0e\x8b\x63\xaa\x2d\xdb\x94\x7d\x4a\x67\x6c\xdc\x65\xba\x44\xf3\x6e\x1b\x57\x4f\x8b\xbb\x4e\xd8\xee\x34\xbf\x9b\x8b\x9c\xf2\xea\xe0\xc1\xe0\xc1\xe0\xc1\xe0\xc1\x3d\xc4\x83\xb1\xb6\x0b\x59\xdb\x75\x0f\x30\x7f\x39\x42\x5f\x8d\xb0\xaf\x44\xa2\x5f\xb6\x1f\xd5\xc7\x22\x97\xdd\x5b\x54\xa5\x14\xd3\xc5\x36\x33\xb6\x2c\xaf\x08\xef\xb2\x8d\x72\x9c\x4f\x96\xf9\x96\xf0\xc1\x72\xad\x22\x4b\x6a\x69\xb4\x24\x17\x24\xfe\x48\xcc\x5d\xaa\x7b\xb5\x29\xa0\xae\x3d\x19\xcc\xb5\x82\xb2\xba\x2a\xe7\x8d\xf5\x6c\xf1\xb6\xe3\xf6\x75\x4c\xbb\x52\x1c\x31\x37\xaf\xfc\x76\x63\x05\x4d\xca\xf1\x79\xa2\xa8\x79\xfb\xc3\xe3\x7c\x1c\xb8\x47\xdd\x7a\x2e\x6b\xba\xd1\x49\xf7\x40\x49\xc6\x99\xd6\x0d\x99\x17\x59\x11\xef\x9b\xd5\x44\x4a\xf4\x53\x54\x07\xf7\xe9\x63\x9c\xf1\x2e\xcc\xf3\x1e\xf8\x6d\xca\xe1\x97\x80\x5f\x02\x7e\x09\xf8\x25\xe0\x97\xe8\x69\xbf\xc4\x17\x22\xf4\x52\x84\xbd\x18\x89\xbe\x60\x7f\x6d\xdf\x1f\x99\x76\x65\xd4\x96\x8b\xb2\xa4\xcb\xf6\xab\x3f\xaf\xa9\x65\xa9\xc0\xbf\xc3\xf3\x6a\x51\xc9\xdd\xf6\x44\x25\x59\x8f\xdb\x49\xc9\x35\x1e\x78\x3a\x75\x38\x15\x5b\x10\x76\x44\x7c\x24\xcb\x72\xc9\x98\xa6\xce\x57\x44\x8e\xa9\x5a\xf9\xba\x54\xb2\xa2\xa4\xb4\x35\x79\x6c\x45\x2a\x5a\xab\xff\xb8\xf8\x35\x1e\x5b\x51\x4a\x52\x51\x79\xb3\x65\xbe\x97\xe5\x98\x94\xe7\x90\x5f\x1d\x13\xcc\x37\xef\x2c\x2d\x45\xe3\x09\xdd\x39\x49\xac\xb7\x53\xb1\x19\x85\x9b\x24\x57\xc7\x55\xad\xf6\xce\x1c\xc7\x4a\x45\x2c\xf7\xf9\xea\x4f\xad\x5c\x4f\xc5\x77\x89\xfe\x4c\x5b\x37\xe2\x0d\xb7\x7a\xe7\x00\xbd\x63\x80\x3d\x3f\x10\xfd\xae\x1d\x73\xf7\xa5\xfe\x2b\xa6\x1d\x34\xa6\xe8\x75\xf5\x56\xac\x20\x69\xcb\x52\xc1\x03\x21\xec\x85\x9a\xac\xad\xa8\xda\xaa\x31\x16\xbe\x3d\xbd\x58\x75\xf1\xe0\x8e\xf2\x65\x8d\xb5\x26\x29\x8b\xbb\x52\x8c\xf5\x42\x4e\xc9\x3b\x0b\x6b\xfe\x6d\xe4\xdb\x1c\x7b\x74\x8d\x8f\x93\xf9\xe5\xb0\xbe\x7e\x29\xd7\x30\x5a\x76\xd3\xf6\x66\xd8\x91\x76\xde\x8b\xa5\x62\x82\x29\x71\x5b\xec\x5e\xad\x26\xc4\x3d\x24\x8c\x3d\x0f\xff\x97\x77\x62\xe8\xc7\x62\x89\x8c\x94\xbb\x59\xd0\xd4\xb5\x52\xde\x38\x8a\x87\xb8\xf1\x83\xaa\x06\x4e\x2c\x56\xcc\x15\x90\xb7\x11\xeb\x0e\x96\xed\x96\x8e\xc5\x12\xb3\xaa\x26\xbb\x9a\x8d\xe5\x24\x3d\x27\xe5\x8d\xbb\x37\xc7\x47\x04\x35\xf2\xf6\x74\xb1\x9c\xae\x69\x70\xc5\x6e\x23\x15\xbf\xbf\x5c\x3d\x6f\xdc\x6b\x1b\xb8\xfd\xe0\xf6\xeb\x51\xb7\x5f\xb6\x40\xe7\x05\xfb\x9e\xa1\x29\xce\xbe\x4f\xd0\x31\x3a\xda\x04\xbc\x5c\xa8\x48\x95\x35\x3d\xd4\x7f\xd6\x09\xe0\x9c\xf4\x07\xce\x0f\xb0\xfb\x85\xc5\x70\xac\x39\x6d\x41\xc9\x1e\xf6\xa1\xd7\xd3\x21\x11\x88\xaf\x2d\x4b\xb9\x54\x50\xbd\xcb\x5c\x71\x4d\xaf\xc8\x9a\xa6\x16\xe5\x65\x63\xa7\x5c\x2a\xe8\xec\x6f\xaf\xb1\xcf\x0d\xd2\xab\x8d\xf3\x26\xab\xeb\x5d\xee\xab\x1f\x7d\x3f\x25\x9a\xbb\xa4\x16\xe5\x8c\x68\x2e\x39\x6c\x9c\x71\xa9\xba\xad\xc5\x74\xed\xa1\x08\xc0\x47\x00\x3e\x02\xf0\xe1\x70\x81\xc3\x05\x0e\x17\x38\x5c\xba\xc6\xe1\xd2\x3d\xfe\x04\x80\x6e\x80\x6e\x80\x6e\x80\x6e\x80\xee\x9e\x06\xdd\x20\x71\x20\x71\x3d\x4a\xe2\xb6\x64\x00\xbe\x4c\x59\x41\x0f\xa7\x68\x92\xd3\xc3\x63\x74\x94\x0e\x07\x06\xa5\x72\x5e\xb5\x9e\x4e\xd5\x02\xa2\x96\xc4\xde\xdf\x0c\x67\x8b\x67\xd9\xac\x89\x0d\x03\xd9\x99\x19\x84\x5f\xdb\x47\x8f\x78\x69\xfc\xb7\x76\x04\x70\xb4\x87\xad\x38\x7c\x3f\x64\xb6\x57\xfc\xd8\x59\x68\x26\x18\xd7\x1c\x9d\xa1\x99\xaa\x48\xfc\x83\xb4\xbf\x89\x87\x85\x40\x2d\x04\xe1\x37\x19\x84\xff\x9d\x3e\x9a\x15\x16\xe3\x14\x9d\xe0\x16\xe3\x30\x35\x37\x09\x45\xcc\x7e\x9a\xc7\xec\xdb\x61\xf6\x4d\x37\x76\x5e\x44\xd8\xcf\xd0\x94\x2b\xc2\xbe\xe9\xd6\x9a\x37\x64\x3c\x22\xbe\xa3\x86\x2c\xf9\x03\xa3\x01\x86\x2c\xe1\x1b\x81\xef\x63\xd4\x0e\x8a\x03\xfd\x8c\x9a\x7d\x6a\xdb\xcd\x1b\x82\xf0\xc1\x84\xc1\x84\xc1\x84\xc1\x84\x11\x84\x8f\x20\x7c\x04\xe1\x23\x08\x1f\xbe\x09\xf8\x26\xe0\x9b\x80\x6f\x02\xbe\x09\x04\xe1\x23\x08\x1f\x41\xf8\x08\xc2\x87\xeb\x0f\xae\xbf\x2e\x74\xfd\x75\x3c\x08\x7f\x03\x01\xf2\x0d\xb2\xe8\x86\x50\x73\x28\xb0\x6e\x7d\xac\xfc\x37\x92\x74\xdc\x98\x60\x63\xeb\x81\xea\xf4\x16\xa2\x28\x55\xd6\xd5\xe2\xda\xaa\x9c\x2b\x4a\xca\xaa\x79\xc8\x5b\xd8\x47\x93\xec\x5f\xf6\xd3\x3d\x39\x55\x93\x97\xd6\xd3\xd1\x21\x4d\x96\xf2\x55\x5b\xd4\x79\xbb\x81\x45\xde\xc0\x94\xd1\x40\x72\xd8\x38\x72\x4a\xd5\xe4\x45\x97\xd2\x96\xef\xa1\xad\x05\xe1\xd9\x3c\xcd\x89\xc9\x94\xa1\xd3\x7c\x32\x4d\xd0\x11\x3a\x14\xe8\xc6\x30\xee\xcc\x98\x3a\xbe\x5d\x0b\x9d\x49\x8f\xf8\xcf\xa4\x57\xb0\x81\x82\x5c\xa1\x1b\x57\xc2\x27\xcf\x01\x36\x6e\xce\x8b\x78\xdc\xf4\x58\xf8\x77\xc5\x3d\x93\xa2\xdf\xd8\xee\x3c\x93\xbd\x9a\x5c\x2e\x4a\x39\xb9\xa1\xc7\x32\x62\x1e\xbc\x09\x4f\x26\x73\x9e\xb2\x74\xb6\xca\x03\xdb\xf4\xa3\x01\xa8\x83\x13\xb6\x49\x27\xec\x27\xfb\xe8\x82\x70\x9e\x9e\xa1\x19\xc7\x79\xda\x0e\x3b\xd1\x42\x5b\x64\x99\x9c\x76\xd9\x94\x3a\xb6\xac\xbc\x56\xa1\xe4\x4f\xec\x72\x4c\xce\xa3\xe6\x2a\x5a\x0a\xb0\x32\x7b\xc5\xef\x9b\x61\x64\xda\xe3\x07\x85\xb1\x09\x31\x36\x60\xf1\x1d\x61\xf1\x80\x30\x80\x30\x80\x30\xed\x82\x30\xd9\x4f\xf5\x09\x45\xe5\x71\xae\xa8\xec\x44\x42\xf5\xfa\xd2\x20\xe6\xbf\x34\xd8\xc1\xee\x11\x23\x4f\x99\x1f\xd9\xe9\xac\x0e\x0e\x94\x25\xad\xa2\xf0\xa0\x14\x81\xb3\x1b\xda\x99\x24\xcb\x52\x25\x77\xbd\xc3\x4b\x86\x87\xab\x1b\x1a\xe5\xbd\xd8\x6b\xfc\x73\x62\xb7\xfb\xc7\x55\x59\x2b\xc8\xee\x5f\x07\xdd\xbf\xea\x15\x4d\xaa\xc8\x05\x25\x37\x5a\x73\x9c\xa7\x15\xe3\xdf\xb7\xcd\x5f\x8d\x5e\x6e\x54\x1f\xba\xce\x82\x65\x9e\x67\xf6\x62\xa1\x12\xb2\x50\xf9\x68\x3f\x7d\xb8\x9f\x7d\xa8\x3f\xfa\xf3\xb6\xb5\x7f\x7b\x7f\xef\xec\x8a\xaa\x1c\x8e\xc6\x38\xf3\x40\x2e\xee\x14\x37\x26\xa7\x93\xe2\x3f\x14\x30\x71\x87\xf9\x47\x4b\xc4\xfd\x49\x45\x7e\xa6\xb1\x3a\x13\x67\xf3\x23\xf8\x4a\x49\x8f\x0d\x65\x75\xb5\x34\x2f\x72\xdd\x9f\x32\x5e\x0b\xf3\xdf\x0b\xd6\xcb\xe2\xfc\x71\x38\x78\xbb\x96\x79\xa9\x8f\x5e\xec\x63\x2f\xf4\x45\x3f\x67\xe3\xcb\x0f\xf6\xcd\xaa\x5a\x8e\x2f\xe2\x0a\x2a\x1f\x76\x35\x16\x5f\x31\xfe\x14\x8f\x4d\x7a\xee\x82\x73\x5f\xb1\xde\x5a\xd3\x9d\x08\xae\x51\x29\xc7\x6f\x9c\xc7\xc5\x15\x95\x9c\xf9\x3d\x95\x8b\x79\x3d\xa6\xde\x32\xc7\x54\xc4\x8d\x95\x65\xb5\x5c\x94\x53\x31\x71\x45\x2e\x4e\x60\x3d\x4b\xce\x5f\x7d\x07\xc0\xbe\x7c\x7c\x1b\xef\x96\x27\x6b\xa0\xa3\x84\xaa\x6d\xdf\x84\xc7\xfc\xbf\x09\xdb\xd9\x2b\xf8\x10\x50\x36\x43\xa7\xd9\xc9\xf8\x71\x5b\x5a\xe4\x71\xfe\x4e\x99\x2f\xb8\xef\x05\xe2\x03\xc6\x21\x6e\xf1\x90\x2d\x28\x4f\xf2\xef\x47\x29\x2b\xe4\x49\xa4\xfc\xaa\xc2\x25\x20\x34\xb9\xa0\xf0\x2f\x84\x57\xa1\x64\x75\xad\x22\x19\x13\xef\x96\xbc\x7c\x5d\x55\x6f\x7a\x42\x34\x6d\x00\xfb\xb7\x23\xec\x1b\xfd\xf4\xa0\x6f\x5b\xc6\xc7\x76\xcc\x87\xc8\x3e\x65\x36\x7c\x45\x34\x3c\xe5\x6e\x38\x79\xd8\x38\x61\xd2\xaf\xb9\xc5\x74\xbd\x13\x5b\x8c\x69\xff\x29\x49\xe2\x25\x78\x8e\x9e\xe5\x2f\xc1\x25\x9a\xa7\x0b\x81\x2f\x81\xff\x58\xae\xa7\x53\xf5\xba\x7c\xa7\xf8\xf6\x4d\xe1\xef\xce\x33\x6c\x41\xbc\x31\x75\xfb\xe1\xe3\x05\xa8\x37\x39\x28\xfa\xdd\x1d\x75\x1e\xf9\xb8\x3f\xf0\xad\xfb\xd4\x27\xcc\x73\x36\xfd\xc1\x67\xf2\xb4\x4c\x6f\xa8\x5a\xf1\xb4\xfc\xc9\x63\x1d\x04\x3a\xdc\x24\x1d\x7e\xb9\x8f\x56\x04\x1d\x5e\xa2\x6b\x0e\x1d\xee\xa4\x79\xea\x80\x69\xb4\x2c\xe0\xa6\x99\xb8\x30\xdc\xfc\x97\xbb\xea\x58\xc0\x27\x6d\xfe\x5c\xd7\xe8\x1d\x15\x87\x6d\xbe\xcd\x03\x94\x06\x94\x06\x94\x06\x94\x06\x94\x06\x94\xde\x30\x94\xfe\x74\x5f\x8b\xc3\xa3\x2e\x09\xc4\x7d\x8e\xe6\x5c\x88\xbb\xcd\x21\x57\xa1\x04\xb9\xc1\x95\x48\x23\x4b\x8b\x0d\xae\x56\x32\xff\x7d\x67\x9d\xa5\xc6\xb1\x10\x98\x5d\x77\x01\x72\x84\xa3\x90\x4d\x5d\x7f\x80\x70\xf7\xda\xaa\x07\x84\x1b\x84\x7b\x93\x09\xf7\x3f\xed\x86\xad\x6d\x18\xa0\xde\xbc\x2f\x4e\x76\x8e\xce\xb0\x99\xf8\x94\x8d\xb5\x87\xdc\x68\xbc\x5e\x33\xb5\x84\xbc\xe5\xfc\xfa\xe5\xdd\x34\x63\xf2\xeb\xb2\x22\xbf\xa9\x22\x97\x78\x18\xba\x5b\x59\x7b\x4d\xaf\xa8\xab\xd6\xba\xcd\xfd\xe8\x04\xb3\x1e\xd3\xf9\x9a\x80\xfd\xef\xbb\xd9\xdf\xf6\xd3\x2e\x4f\x33\xc6\x27\xf5\x20\x47\xd6\xe2\x20\xeb\xa6\x9d\x0f\xea\x14\x6f\xdd\x12\x1a\x98\xb6\x5b\x4f\xa6\x39\xb8\x76\x37\xb6\x98\x0e\x3a\x58\xac\x4a\x5a\x8c\xac\xdf\xdd\x47\x9a\x98\xbd\x37\x49\xe1\xb3\x37\x47\x12\x2d\x05\xcd\x5e\xa7\x9f\xa3\xb6\x3a\xb3\xf3\x51\xf3\x0e\xee\x7a\x3a\x15\x74\x2b\x77\x0a\xb1\x8b\xe1\xb3\x7c\x8e\x9d\x31\x03\xd8\x83\xfa\xe0\x47\x77\x7c\x66\x07\x45\x7f\x8a\x7c\x1e\xf8\x11\x0b\x58\x6f\xf8\x99\x1f\xb0\xb0\xf5\xe6\x3d\xf6\xcc\x1a\xe9\xf4\xc6\xaa\x05\x4c\xfb\x1f\x3b\xd6\x39\x20\xd8\x4d\x12\xec\xef\x8f\x6c\x8a\x9d\xba\x25\xa8\x79\x99\x4a\x0e\x35\xef\x72\x03\x59\x5e\xeb\xb4\x81\xcc\xfc\xcd\x4e\x1f\x03\x39\x59\xb3\xc9\xdc\xb0\xa5\x1c\x17\x5b\xcd\xcd\xb1\x93\xd8\x64\xf6\x9a\xf1\xc5\x26\x13\x9b\xcc\x4d\xde\x64\x76\xe9\x72\xbc\xb1\xcf\x49\xbd\x6f\xc4\x06\x3e\x35\xe1\x51\x58\x33\x34\xc5\x26\xe3\xa7\xec\xad\xe6\x13\xee\xad\x66\xd0\x35\x3a\xb0\xcd\x7c\xef\x10\x8d\x89\x6d\x66\x49\xae\xdc\x52\x35\xe3\xb5\x77\xf6\x98\xbc\xc2\x95\xc8\x47\x1e\x53\x4a\x05\xcd\xb0\x43\x3a\xfb\xcf\x09\xf6\xc5\x01\x62\xce\x09\x4b\xe6\x91\x51\xdd\x14\x4a\x29\xe5\x95\x75\x25\xbf\x26\x15\xdd\x7a\xc1\x92\x2d\x4a\x34\x27\x9a\x4a\xb9\x9c\x4b\x13\xb6\x5b\x2a\xc1\xdb\x48\x38\x82\x80\x1e\x85\x37\xb5\x2c\xdb\x3e\x43\xbd\x22\x4b\xf9\x54\x32\xcd\x4f\xb8\x60\xf7\x66\x51\x74\xc6\xbc\xc8\x79\x45\xaf\xcc\xaa\xda\x64\xb1\x68\x47\x33\xb7\xee\x8b\xdb\x9e\x3a\x50\xd9\x9b\x34\x2f\xde\xa8\x39\x3a\xc3\xdf\xa8\x49\x3a\x45\x27\x9a\xf8\x8a\x72\xdd\xe6\x99\x75\xb9\x14\xae\x6a\x7c\x2d\xfc\x7d\x99\x60\x47\xcc\xf7\xa5\x66\xae\x98\x2f\x8b\x39\xe6\xb5\x45\xd2\x6e\xec\xf1\x7f\x41\x76\x32\xe2\xf7\xcd\x95\x93\xb3\xa8\x7f\x85\xfa\x57\x59\x68\x9d\x42\xeb\x14\x5a\xa7\xd0\x3a\xed\x19\xad\xd3\x6c\xd7\x48\x79\xb6\xbc\x27\x4d\x6b\x4c\x66\xa1\x31\x09\x8d\x49\x68\x4c\x42\x63\x12\x1a\x93\x9d\xd7\x98\x6c\x39\x65\xc8\xf6\xb4\xaa\x5e\x16\xaa\x7a\xed\x53\xd5\xcb\x76\xbd\xaa\xde\x16\x2c\xa8\xc5\x3e\x7e\x8d\x86\x05\x15\xd4\x2b\xaa\x26\x15\x64\x57\xd8\x89\x90\xa7\x93\x2a\x15\x29\x77\xdd\xf8\xe6\xea\xec\x9f\x5f\x63\x1f\x18\x24\x32\x0f\x5d\x5a\x4f\x47\x53\xf5\x8b\xb8\x8b\x5c\xd3\x49\xbb\x89\xe4\x23\xc6\xf1\x0b\xe2\xfc\xc5\x74\xf5\xcf\x5d\x8e\xeb\x50\xb6\x1d\xd8\x0a\x65\xdb\x81\xad\x80\xad\x80\xad\x7a\x08\x5b\x75\x51\x05\x9a\xae\xc1\x56\x28\x8d\x02\x6c\x05\x6c\x05\x6c\x05\x6c\x85\xb2\xed\xa8\xdd\x70\xf7\x50\xa6\xae\xaf\xdd\xb0\x25\xcb\xb6\xaf\x88\xe2\xc9\xfb\x78\xf1\xe4\xc8\xc5\x73\xec\x38\xd5\x09\x16\x4e\x59\x28\x6a\x3d\x9d\xaa\x26\x44\x2d\xa9\xdb\xfe\xfa\xf0\x08\xa8\x63\xec\xa8\x4f\x84\xb9\x97\x91\x99\xa1\x50\xd5\x5d\xa4\xf8\xcf\xed\xf0\x30\xb2\x87\xec\x02\xed\x35\x38\xec\x31\xf1\x53\xfb\x81\x98\xe0\x57\x59\x3a\x4b\xb3\x55\x51\xdb\x87\xe8\x40\x33\x0f\x02\x11\xdb\x48\x97\x69\x32\x5d\xe6\xbb\x7d\x74\x46\x98\x83\xd3\x74\x92\x9b\x83\x23\xd4\xe4\x2c\x14\x42\x0d\x69\x2e\xd4\x60\xe7\xc0\x34\xdf\xda\x53\x42\xa8\x61\x96\xa6\x5d\x42\x0d\xcd\x37\x17\x66\xa7\x1a\xb3\x43\x1b\xb2\x3d\x9e\x58\xe5\x90\xb2\xef\xc9\x97\x47\x3c\x76\xea\x49\xdf\xfa\xeb\x35\x36\x6b\x58\x1c\x66\xdb\x2c\xa7\xe6\x7a\x9b\xad\x17\x2a\xae\x03\xe7\x02\xe7\x02\xe7\x02\xe7\xa2\xe2\x3a\x2a\xae\x43\x50\x0f\x15\xd7\xe1\x56\x80\x5b\x01\x6e\x05\xb8\x15\xe0\x56\x68\x89\x5b\x01\xba\xaa\xd0\x55\x85\xae\x2a\x2a\xae\xc3\x6b\x07\xaf\xdd\x16\xaf\xb8\xde\x18\x5a\x6e\x00\x22\xfb\xd3\xe7\x8d\x54\x74\x6f\x83\x62\xe2\x30\x8d\x07\x04\xad\x0b\x1d\x0b\xf3\xcf\xb9\xa2\xa4\xeb\xb2\x5d\xd9\xe7\x97\x86\xd9\xbb\xfa\xe9\x3e\x87\x78\x0b\x29\x8b\xc7\x7c\x0a\xfa\x98\x70\x7b\xca\x68\x20\xf9\xa8\x71\x80\x8d\xbb\xf9\x49\xee\xdf\x5b\x2c\x7a\x98\xa3\xb3\x62\xa6\x4c\xd2\x29\x3e\x53\x8e\xd2\x61\x3a\xd8\x80\xff\x81\x77\x2c\xe5\xee\xd9\x9d\x2a\x19\x2e\x85\x4f\xa2\xe3\x6c\xa2\x9e\x7f\xc2\xd3\x9b\x1a\xad\x88\xe8\xd7\xb7\xd7\x3e\x8e\xb8\x7f\xb1\x1d\xcf\x13\x79\xdc\x3c\xa6\x53\x0f\x25\x73\x8e\xe6\xe8\x4c\x95\xe3\xb4\xd9\xa7\x02\xfe\x06\xcf\x69\x93\x9e\xd3\x8f\xf7\xb5\xd0\x38\x3c\x25\x5c\xa7\xb3\x34\xed\xb8\x4e\x37\xc5\xd6\x70\x51\xc0\x76\xdb\x9a\xe4\x8f\xee\xaa\xb5\x35\xaf\xb1\xcb\xda\x78\xcc\x4b\xac\xca\xc3\xd9\x6e\xeb\x82\x6a\x35\xa8\x56\x73\xf7\xc2\x75\x50\x15\x50\x15\x50\x95\xb6\x55\xab\xf9\x44\x2b\xd7\x04\x17\x44\x00\xd4\x19\x9a\x71\x05\x40\xb5\x71\x51\x10\x5e\xa5\xa6\xdd\xeb\x82\xcc\x0f\xed\xac\x5d\x17\xec\x0d\xa9\x41\xe3\x59\x2d\x3c\xc6\x85\x1f\x3b\xb1\x58\x80\xea\x6f\xaf\x2d\x51\xa0\xfa\x0b\xd5\xdf\x4d\x56\xfd\xed\x24\x8f\x6a\xb7\xb1\x0f\x57\xef\x3d\x4a\x87\xd9\xc1\xf8\x7e\x5b\xbd\xf7\x41\xb7\x7a\xaf\xbb\xf1\x0e\x28\xf6\xfe\xa7\xab\xf4\xd0\x98\x54\x56\xc6\xd6\xd3\x63\x65\xbb\x7c\xbb\x50\xe5\xd0\xd9\x67\xaf\xb2\x77\x0d\xd2\x3d\x39\x55\x6b\x44\x88\xa3\xba\xfc\x7b\x32\x6a\x1c\x3f\xa5\x6a\xf2\x62\xba\xfa\x37\xa8\x70\x40\x85\x03\x2a\x1c\x08\xdb\x46\xd8\x36\xc2\xb6\x11\xb6\xdd\x35\x61\xdb\xdd\x13\x95\x8c\x70\x59\x84\xcb\x22\x5c\x16\xe1\xb2\x08\x97\xed\xe9\x70\x59\xc4\xf3\x21\x9e\xaf\x47\xe3\xf9\xb6\xa4\x0a\xc7\x06\x49\x5e\x4e\xd5\x78\x5a\x7b\x35\x1e\x6a\x89\x04\xc7\xa5\x70\xd2\x37\xc6\x46\x4d\xd2\x17\x8f\x9b\x74\xaf\xba\x2b\x9e\x48\xc5\xf8\xbf\xd8\xe1\xe0\x30\x47\x73\xa3\x86\x7c\xed\x16\x3f\xb5\x99\x7d\x09\x54\x75\x86\x66\x68\xaa\xca\x61\xb2\x9f\xd2\x1b\x1e\x73\x78\x4a\x10\x33\xd6\x64\xcc\xd8\x3f\xf4\xd1\xb4\x78\xed\x4f\xd0\x31\xfe\xda\x1f\xa4\x66\xa6\x20\xcd\x89\x78\xb1\x0c\x9d\x76\xe2\xc5\x9a\x6c\x2a\x2b\xdc\xcc\x53\x34\xe9\x72\x33\x37\xd9\x56\xf3\x96\x88\x8b\x60\x34\x68\x89\x7c\x42\xa2\x83\xad\x52\xf2\x0f\x47\x1c\x4b\xe4\xaf\xaa\x51\x63\x95\x06\xc5\x61\xc2\x2a\x39\x92\x1a\x6d\xb6\x4f\x90\xd4\x00\x9b\x05\x9b\x05\x9b\x05\x9b\x85\xa4\x06\x24\x35\x10\xf5\x0b\x49\x0d\xf8\x08\xe0\x23\x80\x8f\x00\x3e\x02\xf8\x08\x5a\xe2\x23\x40\xf2\x07\x92\x3f\x90\xfc\x01\x49\x0d\xb8\xe0\xe0\x82\x6b\xb9\xa4\x46\x10\xfa\x6d\xad\xd6\x86\x05\x92\xdb\xe0\xb2\xda\x5c\x19\x8d\xdf\x8d\xd0\xbd\x42\x46\x43\x2a\x97\xf5\x31\xf6\x6b\x11\xf6\x2b\x11\x1a\x30\xfe\x27\x1a\x2d\x70\x0b\x6d\x7c\x27\xec\x07\x28\xc5\xf8\xcd\x25\xef\x2b\xc8\x95\xc9\x72\x59\x9f\x9c\x9f\x3b\x63\xfc\xa1\x75\x4c\xba\xa6\xa1\x89\x9a\x86\x26\x1a\x69\x28\xab\x88\x4c\xb6\x7d\x3c\x93\xcd\x98\x02\xa7\xe8\x04\x1d\x6b\x62\x0a\x58\xf7\x18\xe6\x66\x60\x9f\x1d\xa2\x7d\x62\x30\x97\x8d\xf7\x61\x6c\x3d\x2d\x5e\x8c\xb1\x92\xb4\x2a\xeb\x65\x29\x67\xe9\x90\xf0\x7f\xbf\x65\xec\x86\xba\xac\xb3\x1f\x1c\x62\x3f\x39\x40\xdb\xf9\x19\x4b\xeb\xe9\xa8\x6a\xae\xdc\x4b\x79\x65\x5d\xc9\xaf\x49\x45\xb7\x23\x59\xb2\x77\xc9\x59\x75\x39\xe5\x5a\x03\x4d\xd8\xab\xa7\x04\x3f\x3f\xe1\xd0\x69\x0f\x6e\x54\xcb\xb2\x4d\x38\xf4\x8a\x2c\xe5\x53\xc9\x47\xf8\x09\x19\xe3\x3f\x8b\xe9\x0b\x56\xef\xf2\x59\x75\xf9\xbc\xa2\x77\x7b\x39\xce\xec\x4d\x9a\x17\x4f\x79\x8e\xce\xf0\xa7\x3c\x49\xa7\xe8\x44\x13\x4f\x99\x3b\xee\x67\xd6\x1b\xd1\x6c\x3f\x1b\x6e\x05\x9e\x64\x7b\x4c\x2b\xc0\x1f\xac\x69\x08\xb2\xea\xb2\xf7\xdd\xdf\xe3\xff\xee\xef\x64\xc4\x6f\x91\xfb\xc9\xb3\xc8\x75\x40\xae\x43\x16\xfe\x34\xf8\xd3\xe0\x4f\x83\x3f\xad\x67\xfc\x69\xd9\xae\x71\x17\xb5\xbc\x27\x4d\xfb\x31\xb2\xf0\x63\xc0\x8f\x01\x3f\x06\xfc\x18\xf0\x63\x74\xde\x8f\x91\x7d\x03\xbd\x9e\x5d\x8d\x3f\x67\xeb\x02\x4c\x98\x57\x16\xf1\xa4\xc6\x02\x67\xad\x72\x3d\xa6\xe7\xd4\xb2\x3c\x12\xd3\xd7\x72\xd7\x8d\xd1\xe7\x2b\x58\x59\x5a\x15\x40\xbb\xac\xa9\xfc\x4e\xe3\x3b\x6c\x0c\xd0\x4e\xf9\x80\x6c\x4f\xb3\xe1\x2c\xd8\x70\xfb\xd8\x70\xb6\xeb\xd9\xf0\x16\x4c\xcf\x60\x5f\x4d\xd2\xac\x20\x88\x39\x55\xd5\xf2\x4a\x89\x37\x52\x2d\x6d\x1c\x80\x13\xb9\x17\xd5\x56\x3a\x7e\x5f\x92\xfd\x70\x3f\xbd\xda\xdd\x8e\xad\x6d\xf5\xa0\x8f\xdc\xf1\x79\xe3\xec\xe4\xa0\xf1\xcb\x94\xeb\x1c\x53\xcc\xca\xe1\x82\xfc\xb8\x16\xeb\x1d\x4b\x34\x2b\x20\xde\x29\x3a\xc1\x21\xde\x61\x3a\x48\xfb\xeb\xc4\x81\xbb\xc6\xc6\x12\x99\xe1\xfd\xba\x53\xb5\xe3\x6b\xe1\x64\x6f\x82\x1d\x31\xc9\x9e\xcf\x23\x32\x39\x9f\xe8\x4b\xad\xd6\xf1\xd7\xb6\x07\x3c\x90\x87\xfd\x05\x8f\xc5\x33\x19\x36\x7f\xec\xf4\x63\xc9\xcc\xd1\x19\x9a\xa9\xca\x5c\x69\xee\xb9\x20\x38\x12\xb9\x2b\x4d\xe6\xae\x7c\xa4\xaf\x65\xc6\xe1\x9c\xc8\x5e\x99\xa6\x8c\x93\xbd\xb2\x09\x96\x86\x6b\x1d\xb7\xd7\xd2\x24\x3f\xb0\x2b\xc0\xd2\xbc\xca\x96\x3b\x16\xc6\x65\xc8\xca\x39\xe9\xb0\x6d\x81\xde\x31\xf4\x8e\xef\xde\xc8\x67\x84\xbc\x21\xe4\x0d\x21\x6f\x6d\xd3\x3b\xfe\x74\x1f\x5d\x12\xe9\xa3\xe7\x68\xce\x95\x3e\xda\xa6\x82\x3a\xed\x89\x25\x6a\xef\x0a\x20\x5c\x56\x39\xf3\xee\x9d\x01\x6b\x84\x27\x42\xa4\x8f\xc5\xca\x21\xc1\xe5\x32\x3b\xb9\x70\x80\xf6\x71\xaf\x2d\x57\xa0\x7d\x0c\xed\xe3\x4d\xd6\x3e\xee\x1c\x9b\x6a\xf3\x07\x21\x54\xf7\x78\x9c\xf6\xb1\x54\x7c\xc4\xf6\x6f\xdc\xef\xd6\x3d\xe6\xad\xd6\x0a\x1e\x6f\x3d\x9f\x08\xfb\xab\x41\xba\xdf\x92\x54\x96\x4b\xf9\xb2\xaa\x94\x2a\x3a\xfb\xa3\x41\xf6\x42\xbf\xa3\xd8\x90\xa8\x2f\xa5\x3c\x63\x9d\x97\xdc\xe3\x68\x28\xdb\x7f\x9c\x55\xb5\xc9\x62\xd1\xfe\x0c\xb6\x4e\xfd\xbf\x4d\x31\x94\x57\x69\x52\x4c\xf1\x09\x3a\xc2\xa7\xf8\x38\xed\xa3\x54\xa8\x0c\x87\x7d\xbf\x2d\x51\x03\x3a\x17\x3e\xfb\x87\xd8\x60\x75\x68\xb5\xdd\x07\x4f\x5c\x25\x42\x26\x11\x32\x89\x90\x49\x84\x4c\x22\x64\x12\x21\x93\x08\x99\x44\xc8\x24\x42\x26\x11\x32\x89\x90\x49\x84\x4c\x22\x64\xb2\xc3\x21\x93\x08\x68\x44\x40\x23\x02\x1a\x7b\x38\xa0\xf1\x4b\x83\xf4\x2a\x1b\xb6\x71\xb8\xc2\xfe\xe3\x20\xfb\x55\x17\x69\xdb\x13\x42\xda\x8c\x93\x92\x8f\xb9\x28\x9b\xf1\x87\xad\x46\xd8\x9e\xa5\x13\x82\xb0\x1d\xa2\x03\x9c\xb0\xa5\x68\x84\x92\xe1\x84\xcd\xb8\xd7\x96\xd0\xb5\x33\xe1\x74\xed\x09\x16\xaf\xa1\x6b\x3c\x1d\x1a\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x0d\x64\x6d\x33\xc8\xda\x0f\x0d\xd3\xbc\xa9\xdc\x98\x37\x96\xb2\x8a\x5a\xd2\xe4\x82\xc2\x63\x9b\x7d\x72\x86\x85\x10\x21\xdf\x38\x4a\xc6\x72\xe5\x96\xbc\x7c\x5d\x55\x6f\x7a\x36\x80\x3a\xfb\xe5\x21\xf6\x0b\xdb\xe8\x11\xdf\x16\xed\xf0\xf0\x1f\xec\x6b\x4c\xa2\x70\xd1\xbe\xd8\x15\x71\xb1\x29\xf7\xc5\x5a\xa4\x5e\x38\xc5\x4f\x98\xf4\xeb\xb0\x19\x8e\x5e\xbf\x1b\xd0\x38\xf4\x07\x8a\x8d\x28\x13\xde\xf8\xbe\x70\xaa\x78\x95\x3d\x67\x52\xc5\x7a\xd3\xd4\xe4\x8d\xf5\x1f\x55\x6d\x4c\x2b\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x24\x68\x64\xa7\x68\xe4\x3b\x87\xe9\xa0\xa0\x91\x42\xef\xa3\x8a\x3b\x96\xd5\xbc\x2e\xe7\xd6\x34\xa5\x72\x9b\xff\xae\x38\x3a\x85\x2f\x0d\xb1\xe7\xb7\xd1\xab\xc4\x69\x36\x63\xfc\x55\x93\x31\xba\xc1\x62\xc9\xb2\x67\x56\x6c\xe0\x3c\x17\xdf\xe1\xad\xce\x9b\xc2\x28\x2d\x60\x8a\x23\xce\x2a\x86\xaf\xd4\xcc\x6f\x9e\xb1\x10\x72\x96\x67\x09\xcf\x8a\xd8\xd5\x76\x2a\xb9\x87\x5f\x4e\x74\xc8\x44\x90\x35\x1d\x05\x71\xac\x25\x8e\xc1\xf9\xe9\xbc\x63\x74\xe3\x6a\x38\x6d\x3c\xca\x0e\x9b\xb4\x51\xcc\x27\xab\x00\x53\xf5\xf8\x03\x25\x02\x25\x02\x25\x02\x25\x02\x25\x02\x25\x02\x25\x02\x25\x02\x25\x02\x25\x02\x25\x02\x25\x02\x25\x6e\x3a\x4a\x3c\x49\xc7\xd9\x44\xfc\x88\xad\x28\xf6\x88\x5b\x85\xac\x66\x23\x57\xab\x48\x06\x14\x09\x14\x09\x14\xd9\xc3\x28\xf2\xaf\x23\xf4\xb0\x55\xd2\x5a\x91\xdf\x54\x91\x4b\xfc\x69\x59\x01\x91\xec\xf7\x22\xec\x77\x22\xf4\x4a\xcf\x8f\x75\x6b\x5d\x3f\xc8\x6b\x5d\xbb\x8e\x46\xd1\x6b\x57\xd1\xeb\x7f\xfe\x10\x1d\xa9\x2a\x7a\x5d\xa7\xdc\xb5\x49\x7d\xc7\x74\x2e\x85\xcc\x7e\xfb\x41\xf6\x9f\x23\xae\xe2\xd7\x31\x5e\x98\x46\xfc\x68\xcd\x67\x67\x71\x94\x55\x97\x93\x8f\x1a\x47\xf8\x15\xab\x16\xe2\xca\x2d\x2e\x4c\xb3\x40\x47\xc4\x70\xa6\x69\x8c\x0f\xe7\x30\x25\xe8\xc9\xc0\xbc\x6d\x7e\x1f\xc6\xe8\x65\xd5\xe5\x3b\x2d\x45\xd3\xb2\x22\xd3\xd1\x9f\xdc\xee\x1a\xe1\x3d\x56\xa5\x99\x7a\x83\xfc\xb8\x79\x50\xa7\xc6\x39\x73\x8c\x8e\xd2\xe1\x2a\x61\xe5\x46\x07\x1a\x2a\xca\xa8\x2d\xd3\x64\x6d\x99\xf7\xf4\x09\x5d\x86\x34\xd7\x65\xb0\x6b\xc2\xb4\xe0\x25\x6f\xde\x6e\x34\x58\x3f\xa6\x31\xf3\xe0\x6f\x11\x3c\x46\x23\xf3\x95\x7b\x5d\xe6\x61\xa4\x46\xfa\xbd\x9e\x9d\x78\x8c\x2b\x07\x77\xc2\x4a\x40\xfa\xbd\xd7\x8c\x16\xa4\xdf\x21\xfd\xbe\xc9\xd2\xef\xed\x5c\xfd\x85\xa9\xb1\xb7\x6e\xfd\x97\xdd\x47\x29\x36\x12\x4f\xda\x44\xe5\x3e\x37\x51\xc9\xaa\xcb\x77\x85\xaa\xfb\x77\x76\xd3\x65\xb1\x0d\x59\x29\xaa\xb7\x8c\x37\x52\x53\x8b\x29\xdb\x25\x5c\x9d\x0e\x57\xd6\x14\x55\x53\x2a\xb7\x8b\xf2\xba\x5c\xf4\x26\xc1\x55\x6d\x51\x7e\x66\x37\x7b\xeb\x00\xed\x76\xb5\x3a\x69\x35\x6a\x47\xab\x1c\xae\xbb\x6d\x99\x37\xaf\x75\xde\xb8\x96\x27\xa3\x29\x79\xca\x38\x71\xd6\xa7\x69\x2b\x70\x24\xf0\xd4\xb6\x6c\x77\xd6\xe8\x39\x31\xe1\x17\xe8\x69\x3e\xe1\xcf\xd1\x1c\x9d\x09\x9c\xf0\xee\x91\xb6\x4a\x1d\x04\xf7\xf8\x4e\x37\x44\xff\x24\xfc\x85\x78\x96\x2d\x9a\x2f\x44\xbd\x49\x60\x05\x85\x04\xf7\xb4\xb6\x72\xe7\xdb\x28\x64\x0a\x4c\x84\xed\xab\xea\xcc\x82\x8c\x79\x6e\xd7\x4c\x84\xcc\x35\xfa\x5e\x7a\x5d\xd5\x6a\xa7\x75\x33\x01\x8b\x1f\xec\xd8\x9a\xdc\xb1\xfd\x49\x5f\x5b\x4d\xd4\x92\xd8\x0d\x3e\x4b\x8b\xce\x6e\xb0\x4b\x6c\x20\xdf\xf5\x35\x66\x03\x6b\xcc\x57\x4b\x8c\x62\xe6\xbb\x3b\x43\x6c\xe0\x54\xc3\x9b\xc7\x3a\xc6\xf0\x34\x5f\xff\x74\x85\x29\xc4\xa6\xb3\xd7\xec\x2e\x36\x9d\xd8\x74\x6e\xf2\xa6\x73\x73\xd7\xe0\xa1\xdb\xd2\xcd\x5c\x85\x67\xcf\xd0\x0c\x9b\x8a\x4f\xda\xfb\xd2\x41\x4f\x6c\x40\x60\x4b\xed\x0f\x12\x60\x3f\x32\x44\xc9\xaa\xfc\x07\x27\xf5\x21\xaf\xe8\xda\x1a\x7f\x23\x96\xd7\xf2\x05\xb9\xa2\xb3\xaf\x26\xd8\x1f\x0f\xd0\x0e\x3b\xe9\x21\xfa\x7d\x8d\x49\xaa\xcc\xab\xf9\x69\xbb\xb1\x0c\x6f\xac\x45\x3a\x2a\x07\x3c\x59\x0b\x3e\xd7\x39\xaf\xe8\x5b\x4e\x76\xb9\x6b\x85\x53\x5e\x17\xfe\x16\x1d\x62\x07\x02\x52\x19\xaa\x9f\x0c\x04\x9a\x91\xc7\x80\x3c\x06\xe4\x31\x20\x8f\x01\x79\x0c\xc8\x63\x40\x1e\x03\xf2\x18\x90\xc7\x80\x3c\x06\xe4\x31\x20\x8f\x01\x92\x28\xc8\x43\x40\x1e\x02\xf2\x10\x36\x25\x0f\xe1\x0f\x87\x69\xc2\x2a\x7d\x26\x50\x60\x40\x5c\xbc\xd5\xd8\x1b\xd7\xd4\x8a\x64\xeb\xa2\xbc\x77\x98\x7d\x7d\xc0\x29\x93\xf6\xc9\x46\x04\x51\xac\x6d\xde\xd3\x46\x4b\x5d\x21\x86\x12\xe7\x97\x13\x95\xdb\x9c\x00\x51\x4f\x3f\xbb\x1f\x22\x06\x06\xf9\xb6\x9c\x2e\x5a\x10\xf1\x62\x38\x1f\x1c\x61\xc9\xea\x72\x6d\x9e\x61\xf5\x50\xc1\x50\x71\x15\x60\x43\x60\x43\x60\x43\x60\x43\x60\x43\x60\x43\x60\x43\x60\x43\x60\x43\x60\x43\x60\x43\x60\x43\x60\xc3\x0e\x63\xc3\x09\x3a\xc2\x0e\xc5\x0f\xd8\x21\x4e\x0f\xb9\x43\x9c\x3c\x1b\xbc\xbb\x21\x6d\x07\x50\x13\x50\x13\x50\x73\x0b\x41\xcd\x8f\x0e\xd3\xfe\x86\xa0\xa6\x2e\xe7\x34\xb9\x62\xd3\xcc\x6f\x0e\xb1\x3f\x72\xd1\xcc\x5f\x6a\x84\x66\x2e\xf0\x26\xba\x02\x63\x3e\xec\x8b\x31\x45\x07\xbb\x9f\x5f\x76\xa1\x96\xf3\x86\xb2\x5b\x6d\xc0\x29\x06\x1c\xf1\x8e\x00\x97\x00\x97\x00\x97\x00\x97\x00\x97\x00\x97\x00\x97\x00\x97\x00\x97\x00\x97\x00\x97\x00\x97\x9b\x09\x2e\xf7\x53\x9a\x8d\xc5\x47\x6d\xf8\xc8\xdc\xe0\x52\x6c\xdc\x40\x2c\x41\x2c\x41\x2c\x41\x2c\x3b\x4a\x2c\x3f\x1f\xa1\x9d\x6e\x7d\x62\xf6\x99\x08\xfb\x3f\x23\xb4\x8d\xff\x5f\x5d\xdd\xe7\x5d\x05\xb9\xc2\xb5\x2d\xa1\xf7\xec\xd2\x7b\xfe\x17\x49\x9a\x14\xe3\x99\x53\x55\x2d\xaf\x94\x78\x07\x1d\x7d\xb5\x20\x20\x5c\x94\x25\xdd\xa9\xfa\xf7\x9b\xc3\xec\xdb\x11\xba\xcf\xdd\xc4\xd2\x7a\x3a\xfa\x20\x17\x52\xf3\x72\x87\xf3\xc6\x89\xc9\xc7\x8d\x5f\xa6\x5c\x87\xbb\x41\x2c\x3f\xa4\xc5\x5a\x68\x57\x69\x52\x8c\xec\x04\x1d\xe1\x23\x3b\x4e\xfb\x28\x15\xa8\xc3\xe0\x19\x8c\xf5\x74\x8a\x77\xe9\x4e\x25\xcf\x9e\x0d\xa7\xa4\x07\xd9\x7e\x93\x92\xfa\x3c\x0e\x13\x9b\x8a\xbe\x78\x34\xa1\x7f\x67\x7b\xed\xe0\x3f\x6c\x49\x98\xf9\x8d\xff\x13\xe6\x8f\x1d\x7c\x04\x99\x69\xca\xd0\xe9\x2a\xf9\x9b\x0d\x3f\x03\x88\xde\x40\x6c\xac\x49\xb1\xb1\x9f\xed\x6b\x85\x0d\x98\x15\x9a\x62\x86\x61\xb6\x35\xc5\xda\x61\x4b\xda\x66\x2c\x42\x44\xc9\x92\xef\xd9\x55\x6b\x4b\x5e\x25\x76\x76\x31\xc9\x34\x1f\x7b\xc4\xff\x77\xd2\x7a\x5c\xa6\x4b\x34\xef\xb6\x1e\xc9\x0c\x9d\x6e\xe2\xcb\x38\xcd\x7b\x7e\x91\xfb\x0f\x74\x58\x93\x30\x6b\xf2\x72\x84\xbe\x1a\x61\x5f\x89\x44\xbf\x6c\x0f\xd0\xc7\x22\x97\xdd\x0b\x53\xc5\xd8\x1f\xf1\xc5\x65\x6c\x59\x5e\x11\xbe\x33\x7b\xa3\xea\x38\x68\x4c\x36\xc0\xbb\xe8\xb2\x16\x25\xb5\x34\x5a\x92\x0b\x12\x1f\x08\x73\x6d\xea\xb6\x2a\x02\x59\xd9\x8f\xc0\x9c\x87\xca\xea\xaa\x9c\x37\xec\x56\xf1\xb6\xe3\xd4\x72\x1c\x19\x4a\x71\xc4\x5c\xb2\xf2\x45\x7b\xac\xa0\x19\xdf\xc1\xb2\xac\x29\x6a\xde\x5e\x7a\x3b\x9f\x44\xee\x2f\xb4\x46\x63\x4d\x37\x3a\xe9\x5e\xee\x4b\xc6\x99\xd6\x0d\x99\x17\x59\x11\xf0\xc6\x6a\x22\x25\xfa\x29\xb4\xa7\x7c\xfa\x18\x67\xbc\x0b\xf3\xbc\x07\x3e\x4b\xf1\xcc\x17\x22\xf4\x52\x84\xbd\x18\x89\xbe\x60\x0f\xf3\xfb\x23\xd3\x2e\x4f\x79\x99\x2f\xb8\x6c\xd2\x36\xaf\xa9\x65\xa9\xc0\x1f\x80\x10\xf9\xf1\x38\xdb\xac\x7b\x71\x5c\xed\xc6\x53\x4a\xa7\x0e\xa7\x62\x0b\xe2\x81\x88\xd1\x29\x1b\x3b\x31\x63\x37\x6b\x41\x5b\x39\xa6\x6a\xe5\xeb\x52\xc9\x72\xfe\x69\x6b\xf2\xd8\x8a\x54\xb4\x9c\x1c\x71\xf1\x6b\x3c\xb6\xa2\x94\xa4\xa2\xf2\x66\x8b\x96\x2e\xcb\xc6\x52\x9f\xef\xfd\xc6\xc4\x12\x3f\xef\x10\x74\xd1\x78\x42\x77\x4e\x12\x6e\x85\x54\x6c\x46\xe1\xd3\xd9\xd5\x71\x55\xab\xbd\x33\x67\x1f\x5d\x11\x5e\x0d\xfe\xb2\xa9\x95\xeb\xa9\xf8\x2e\xd1\x9f\x69\xeb\x46\x3c\x5e\xc4\xcc\x3b\x07\xe8\x1d\x03\xec\xf9\x81\xe8\x77\x6d\x57\xf2\x97\xfa\xaf\x98\xd8\xd1\x78\xa5\x8c\x8d\x6c\x41\xd2\x96\xa5\x82\x1c\xcb\xa9\xc5\xa2\xcc\x0d\xa1\xf3\x5e\xc8\x9a\xb1\x91\x30\xc6\xc2\xb7\xa7\x17\xab\x2e\x1e\xdc\x51\x3e\x9f\xad\xc9\x28\xf4\x7f\x8c\x79\x9a\x97\x73\x4a\xde\xf1\x1f\x70\x14\x2d\xb4\xc9\xac\xd1\xd5\xf9\x76\x86\xff\x6a\xc1\xe6\x94\x6b\x18\x2d\x4c\x69\x6f\x5e\x6d\x07\xb2\xf7\x62\xa9\xd8\x64\xce\xd8\xe7\xf2\x4f\xac\xdb\x38\x24\xc4\x3d\x24\x62\xa3\xe6\x83\xf7\x4e\x0c\xfd\x58\x2c\x91\x91\x72\x37\x8d\xaf\x4c\x29\x6f\x1c\xc5\x3d\xb7\xfc\xa0\xaa\x81\x13\xbe\x01\x73\xea\x7b\x1b\xb1\xee\x60\xd9\x6e\xe9\x58\x2c\x31\xab\x6a\xb2\xab\xd9\x58\x4e\xd2\x73\x52\xde\xb8\x7b\x73\x7c\x84\xaf\x9e\xb7\xa7\x0b\xeb\x55\xd3\xe0\x8a\xdd\x46\x2a\x7e\x7f\xb9\x7a\xde\xb8\x3f\xfd\x9f\xee\xa3\xf3\xe2\xd3\x3f\x43\x53\xfc\xd3\x7f\x82\x8e\xd1\xd1\x26\x3e\x1f\x42\x94\x92\x2e\x19\xad\x8d\x73\x31\xb7\xed\x62\x6c\xe5\xfc\x9d\xb6\x19\xb6\x22\x88\xf9\x7f\xb7\x77\xb0\x7b\xc4\x30\xb5\x71\x83\x91\xf9\xd6\xbd\xb5\x8b\x82\x27\x6a\xf4\x41\xfd\x76\x1a\xf1\xb2\x88\xb5\xe9\xc8\x4a\x01\x12\x9f\xbd\xb6\x3e\x81\xc4\x27\x24\x3e\x37\x59\xe2\xb3\x23\x68\x29\x54\xc9\xb3\x6d\xb6\x3f\x3b\x4e\xfb\x58\x2a\x3e\x62\x7b\x75\xee\x77\xbb\x82\xf8\xf1\x77\x83\x27\x88\xbd\x2f\x49\x63\x55\x9a\x9f\x66\x71\x09\x35\xaf\x9b\x55\x4a\xf9\x2f\x8a\xc3\x3d\xff\x7c\x98\xfd\x68\x3f\xbd\xca\x16\xfe\x14\xc2\xd9\x4f\xf8\x60\xcf\x9a\x52\xa7\xc9\xb8\x71\x94\xa5\xd0\x29\xa4\xb0\xab\x8f\x69\x31\x03\x2d\x6c\x68\x11\x94\x32\xd7\x8e\xb6\x0c\x6d\x75\xef\xee\x14\x87\x5e\x0d\x9f\xb1\x47\xd9\xe1\x00\xd5\xcc\xaa\xae\xd4\x96\x78\xf8\xf6\xf6\x9a\xe7\x92\xf0\x27\xa2\xb5\x8f\xe6\x49\xf3\xc0\x4e\x3e\x9d\xcc\x45\x7a\x8a\xce\x55\x2d\x1d\xee\xe4\xf1\x60\xed\x00\x52\xda\x24\x29\xfd\xb5\x3e\x7a\x5a\x60\xce\x2c\x9d\x75\x30\x67\x9b\xcc\x45\x6b\xad\x52\x83\xa5\x17\x1a\x33\x3e\x41\xa5\x17\x42\xac\x51\xf2\xa7\x77\xd5\x18\x9f\xa8\x8d\x50\x6b\xed\xcd\x13\xe2\xb7\x8e\x9a\x1b\xf0\x54\xf0\x54\xf0\x54\xf0\x54\xf0\x54\xf0\xd4\x76\xf3\xd4\xf0\xb5\xc1\xc6\x79\xea\x9d\x6f\x4f\xc2\x79\x6a\x5b\x77\x28\x99\x7f\xb5\xb3\x66\x91\xb0\x2f\x04\xa9\xd6\x2e\x1d\xf6\x94\xdd\x85\x1e\xda\xbd\x72\x00\x5f\xed\xb5\xf5\x0a\xf8\x2a\xf8\xea\x26\xf3\xd5\x4e\x63\xab\xb6\x5a\xfd\x50\x90\x9b\x3d\x49\xc7\xd9\x44\xfc\x88\x8d\x4e\x1f\xf1\x54\x45\xaa\xbe\x42\x07\x8a\x21\x7d\x7b\x98\x32\x0d\x89\x04\x58\xe6\xaf\x54\x59\x57\x8b\x6b\xab\x72\xae\x28\x29\xab\x36\x2b\xfd\xc4\x30\x7b\xdb\x36\x47\x33\xe0\x33\x8d\x68\x06\xcc\xdb\x2d\x2e\xf2\x16\xa7\x8c\x16\xbb\x42\x42\x20\xe9\x2b\x21\xe0\xdb\x5f\x28\x0a\x34\xa1\x28\x70\x25\xfc\x25\x3c\xc0\xc6\xab\x15\x05\x7c\xc7\x1f\x02\x03\x10\x18\x80\xc0\x00\x04\x06\x20\x30\x00\x81\x01\x08\x0c\x40\x60\x00\x02\x03\x10\x18\x80\xc0\x00\x04\x06\x20\x30\xb0\x99\x02\x03\x19\x3a\xcd\x4e\xc6\x8f\xdb\x98\xe3\x71\x0f\xe6\xf0\xdb\xc7\xdd\x0d\x51\x66\xd0\x1b\x80\xde\x00\xf4\x06\xb6\x90\xde\xc0\xfb\x87\xe9\x90\x88\x0a\x95\xca\x65\xbd\x4e\x42\x7c\x5e\x92\x57\x8d\xa5\xa1\x23\x92\xfa\x5f\x87\xd8\x7f\x89\xd0\x3d\xc6\x79\x4b\xeb\xe9\xe8\x6e\x9f\xa8\xd0\x69\x7e\xd2\x82\x5c\x49\x3e\x62\xfc\x3a\x59\x2e\xeb\x6e\x9c\x68\xff\xdc\xe2\x40\xd0\x67\xe9\x84\x20\x7e\x87\xe8\x00\x27\x7e\x29\x1a\xa1\x64\x20\x51\x37\xee\x80\x87\xfa\x58\xdd\xb9\xd3\xc8\xcf\xa7\xc2\xe1\x5e\x92\x0d\x99\x70\xcf\xb8\xba\x89\xf7\x9c\x0e\x78\xb2\xdf\x7f\x61\xbb\x33\xca\x8f\xf9\xc7\x78\x3a\x03\x1d\x33\x0f\xe8\xd0\x58\x67\x4e\xd3\x49\x3a\x5e\xe5\xb1\xdc\xd0\x60\xc3\x47\x89\x38\xce\x26\xe3\x38\x7f\xb2\xef\x4e\x5f\xf4\x8c\x08\x03\x3d\x46\x47\x9d\x30\xd0\xce\x19\x0b\x1e\xa9\xd9\x5a\x63\x91\xfc\xee\x7d\x8e\xb1\x60\x76\x4c\xa6\x63\x1f\x1e\x13\x7f\xeb\x94\x79\x40\x18\x26\xc2\x30\x11\x86\x89\x30\x4c\x84\x61\x22\x0c\xb3\x37\xd3\xda\x5b\xfb\x7d\x0f\x8f\xea\xcc\xfc\xf9\xbd\xce\x0a\x60\x28\x24\xe0\xd2\x59\x17\x3c\xca\xc3\x73\x3a\xb0\x2c\x40\x8c\x65\xaf\x2d\x46\x10\x63\x89\x18\xcb\x4d\x8e\xb1\x6c\x3b\x11\x6a\xb1\x95\x0f\x8d\xa2\x3c\x44\x07\xd8\x78\x7c\x9f\xed\x1a\x78\x8d\xdb\xbd\x60\x37\x7a\x37\xb8\x14\xd8\x07\x93\xf4\xb4\x89\x28\xd7\x2a\xaa\x9e\x93\x8a\x4a\xa9\x10\x1a\xac\xc9\x9f\x8f\xf1\x36\x17\xcb\x6a\xde\x3a\x51\xd6\x6c\x7a\xf9\x87\xc3\xec\xa7\xb6\xd1\xab\x5c\x4d\x1a\xdf\xcb\x5f\x6f\x24\x6a\xf3\xac\xdd\xf4\xbc\x9a\x9f\xb4\x9b\xee\x8a\xb8\xcd\xfd\xfc\x72\x93\xce\x5d\xb9\xbf\xe6\x01\x1d\x47\x00\x67\xed\xfb\xbc\x1c\xfe\x3e\x9f\x62\x27\xac\xf7\xd9\x19\x6e\xf3\xb5\x0e\x18\x69\x54\xb9\x47\x2c\x27\x62\x39\x11\xcb\x89\x58\x4e\xc4\x72\x22\x96\x13\xb1\x9c\x88\xe5\x44\x2c\x27\x62\x39\x11\xcb\x89\x58\xce\xae\x8e\xe5\x9c\xa6\x0c\x3b\x1d\x3f\x69\x43\x93\x3d\x6e\xd8\x12\xb0\xd5\xbb\x1b\xd0\x0b\xa2\x39\x11\xcd\x89\x68\xce\x2d\x14\xcd\xf9\xfc\x28\xcd\xfb\xa0\xd2\x71\x21\xf4\xd9\x24\x29\xfd\xa5\x11\xf6\x62\x3f\x3d\xe0\x21\xa5\xa2\xc5\x68\xd2\x27\xe6\x33\xc0\x1e\x26\x0f\xf3\x08\x50\x17\x98\x14\x6d\x74\x8e\x4e\x0a\x98\xa8\xd1\x15\x01\x13\xe7\xe9\x02\x87\x89\x67\x69\x96\xa6\x83\x3d\x01\x4e\x87\x53\xe6\x5d\xa7\x82\xe0\xde\x1d\x46\x8d\x16\xc2\x91\xe3\x34\xcb\x38\x04\x71\xdc\x23\xd9\xd7\x38\x83\x8c\xfe\xd6\x0e\xff\xa7\x39\xea\x1f\x5b\x1a\xf4\x40\x27\xac\x48\xd3\xcd\x7e\xa6\x99\xe7\xe8\x59\x5a\xac\xf2\xe8\xb6\xe8\xa1\xc2\xbb\x8b\xe8\xd4\x26\xa3\x53\xff\xa0\x8f\xae\x8a\xf0\xd2\x67\x68\xc1\x09\x2f\x6d\xbf\xbd\x69\x9b\x85\x6b\x30\xa2\xb5\x31\x43\x76\x07\xbe\x13\xd3\xf2\x25\x3f\xbf\xcb\xdf\x90\xc5\xec\xb8\xd7\x20\xdb\x75\xd4\x8c\x82\xdd\x74\xd3\x85\xf8\x58\xc4\xc7\x22\x3e\x16\xf1\xb1\x88\x8f\x45\x7c\x6c\x6f\xc6\xc7\x76\x6a\xb5\xd0\x40\xe4\xec\xef\xee\xf4\x5f\x4f\x1c\x0a\x89\xa2\x0d\x5a\x65\x1c\x29\x57\x07\xe3\x74\x7c\x91\x81\x68\xdb\x5e\x5b\xda\x20\xda\x16\xd1\xb6\x9b\x1c\x6d\xbb\x89\x8c\xad\xb1\xaf\xc9\x06\x3f\x1d\x3e\x1f\x9f\xf0\x08\xdd\x5e\x75\x1a\xb1\x17\x86\xad\x92\xfb\x22\x0e\xd1\x55\x6c\xbf\x6e\xc4\xae\x38\xda\xa2\xce\xcf\x0f\xb3\x3f\x1b\xa0\x1d\xe2\x8f\x4b\xeb\xe9\xe8\x87\x1b\x09\xcd\xe5\xc1\x9d\x5d\x11\x88\xbb\xfb\x96\x1d\x6c\xea\xc9\xa8\xe1\x7f\x41\xc4\x6d\xed\x9b\x7b\x29\xfc\xcd\x1d\x63\xa3\xe6\xab\xe8\x99\x59\xe6\x4a\x50\x5c\x06\x11\xb6\x88\xb0\x45\x84\x2d\x22\x6c\x11\x61\x8b\x08\x5b\x44\xd8\x22\xc2\x16\x11\xb6\x88\xb0\x45\x84\x2d\x22\x6c\x11\x61\xdb\xcd\x11\xb6\x75\x6b\x70\xf3\x8d\xdd\xdd\x80\x46\x10\x4f\x8b\x78\x5a\xc4\xd3\x6e\xa1\x78\xda\x6f\x26\x68\x44\xa0\x4c\xbd\xa2\x6a\x52\x41\xae\x61\x99\xe6\xdf\x73\x45\x49\xd7\x65\x9d\xfd\x56\x82\xfd\xda\x00\x91\xf9\xd7\xa5\xf5\x74\xf4\x4d\xe6\x57\xb6\x94\x57\xd6\x95\xfc\x9a\x54\xf4\x00\x4c\x7b\x45\xbb\x20\xce\x98\x32\xda\x69\x09\xbf\x4c\x25\x1f\xe5\x27\x98\x0d\x2f\xa6\xdd\x57\x38\xaf\xe8\x60\x90\x3e\x0c\x72\x8f\x3f\x2f\xdc\xc9\x88\x77\xcc\x18\x69\xba\xf1\x5c\x38\xa8\x3c\xcc\x0e\x9a\xa0\xd2\x3b\x6f\x4c\x52\xe9\x7e\x14\xa8\xde\x04\x1e\x09\x1e\x09\x1e\x09\x1e\x09\x1e\x09\x1e\x09\x1e\x09\x1e\x09\x1e\x09\x1e\x09\x1e\x09\x1e\xb9\x99\x3c\x12\xe4\x0f\xe4\x0f\xe4\xaf\x87\xc9\xdf\xc7\x1f\xa4\x98\x55\x14\xbe\x3a\x5e\xf1\x2d\x63\x3a\x4f\x6d\x60\xef\x7e\x90\x7d\x2d\xe2\x94\x7c\x1f\xe4\xd9\xf0\xe2\x37\x6b\x1c\x9d\x8f\xaa\x1d\x02\x98\x7c\xc8\x38\xae\xaa\xb4\xba\x48\x96\xd8\xdc\x3a\x48\xc6\x7d\xa4\xd6\xd3\x29\xbb\x53\x77\x9a\xd1\x7e\x2e\x9c\x94\x0d\xb1\x41\x37\x00\xab\x2e\x78\xee\x74\x25\xfa\x91\xed\xce\x48\x0f\x5b\x99\xea\xe1\x83\xbd\xdb\x3c\xb4\x03\xe3\xbd\xe1\x5a\x48\x35\x03\x8e\xec\x06\x64\x9b\x77\xa8\x16\x52\xed\xcb\xbe\xf1\x5a\x48\x1b\x37\x18\x8d\x59\x84\x60\x2b\xe0\x89\xee\xad\x9f\x86\x9e\xf9\x8b\x7b\x1d\x83\x31\x5e\x93\xc1\x15\x6e\x39\xa2\x3c\x90\xbf\xcd\x76\x03\xf9\x59\xbd\x66\xc1\x90\x9f\x85\xfc\xac\xad\x55\x0d\x61\xe3\x66\x3e\x2c\x39\xaa\xa5\x2b\xc3\xfa\xb5\x10\xec\xe3\x6a\x03\x88\x5a\x9e\xf9\xf4\x77\x83\xf4\xa0\xb7\x98\xaa\x53\x35\x95\xfd\xc9\x20\xfb\xdd\x7e\xa7\x32\x4f\x42\xb0\x63\xcd\xa4\x5e\x16\x16\xb1\x12\x99\x9c\xc2\x3c\x7b\x8c\x03\x45\x5d\x1e\xfb\x8f\xb3\xaa\x36\x59\x2c\xda\xb7\xd6\xba\xef\x51\x9b\x7c\xf9\x57\x69\x52\x4c\xb8\x09\x3a\xc2\x27\xdc\x38\xed\xa3\x54\xe3\xe5\x37\xce\x2b\x7a\xc7\x4b\x70\x3c\xea\x3f\x87\xef\x61\xdb\x78\x18\x00\x5c\xf5\x70\xd5\xc3\x55\x0f\x57\x3d\x5c\xf5\x70\xd5\xc3\x55\x0f\x57\x3d\x5c\xf5\x70\xd5\xc3\x55\x0f\x57\x3d\x5c\xf5\x70\xd5\xc3\x55\x0f\x57\x3d\x5c\xf5\x9d\x72\xd5\xbf\x38\x4c\x27\xbc\xd4\xad\xae\xcc\x90\x26\x73\x82\x65\xbc\xc0\x96\xd6\xd0\x0f\x0c\xb3\x3f\x1f\x70\xd0\xdc\xc7\x1a\x51\x1a\xba\x24\x9a\x59\x90\xbb\x43\x6e\xe8\xb1\x5b\x7e\xe5\xbb\x9d\x4e\x76\x3b\x21\xdc\x8c\x6c\x9f\x0b\xe1\xc0\x70\x2f\x1b\xae\x05\x86\xce\xa8\x42\x6d\x08\xc8\x10\xc8\x10\xc8\x10\xc8\x10\xc8\x10\xc8\x10\xc8\x10\xc8\x10\xc8\x10\xc8\x10\xc8\x10\xc8\xb0\xab\x91\xe1\x61\x3a\xc8\xf6\xc7\xd3\x76\xc0\xd0\xf7\xb8\x03\x86\x9c\xdd\x1d\x24\x87\x40\x33\x41\x33\x41\x33\x3b\x4a\x33\xbf\x78\x8d\xf6\x0b\x9a\xb9\xcc\x29\xe6\x7a\xba\x6e\xf1\xce\x9c\xa6\x96\x6e\xa8\xcb\x3a\xfb\x89\x6b\xec\x5d\x83\xf4\x4a\x7e\xd2\x92\x79\x52\xf4\xc9\xfa\x41\x86\x53\x9a\x5a\xca\xaa\xcb\xc9\xc7\x8d\xc3\x32\xc6\x21\x8b\xe9\xaa\x02\x25\xe6\x21\x5d\x8e\x0f\x33\x20\x6b\x20\x6b\x19\x90\x35\x90\x35\x90\x35\x90\xb5\x9e\x21\x6b\x99\xae\x21\x6b\x2d\xef\x49\xd3\x64\x2d\x03\xb2\x06\xb2\x06\xb2\x06\xb2\x06\xb2\xd6\x79\xb2\x96\xe9\x69\xda\x94\x01\x6d\x6a\x1f\x6d\xca\x74\x3b\x6d\xca\x6c\x41\xda\x94\x7d\x03\xcd\x88\xb0\xab\x93\x74\x9c\x87\x5d\x1d\xa2\x03\x34\x1e\x98\x98\xc9\xe9\x52\xca\xa4\x4b\x29\x93\x0d\x35\x94\x9c\x19\x92\x4d\x79\xe3\xe9\xf0\x58\xac\x14\x1b\x31\x63\xb1\x78\x37\xcc\x60\x2c\xb3\x13\xee\x48\x2c\x51\xb4\x31\xfe\xbf\xb6\x57\xe3\xb0\x5d\xe2\xfb\x13\x93\x6c\xf2\xb5\x47\xfc\xa5\x33\xec\x4b\xa0\xaa\x69\xca\xd0\xe9\x2a\xc1\x85\x7a\xc9\xb0\xbe\x63\x0e\x99\x05\x08\xc5\x34\x29\x14\xf3\x37\x7d\x74\x46\x94\xf9\x3e\x4d\x27\x5d\x65\xbe\xeb\xa7\x64\xfb\xce\xc2\xc0\xb7\x7e\x63\xe9\xde\xfe\x6d\xcf\x0a\x39\x9a\x53\x74\xc2\x91\xa3\x69\xa2\x9d\x70\x03\x54\x56\xdb\x63\x80\x92\x7f\x3c\x52\x6d\x80\x76\x9b\x95\xe4\x5d\x65\xf9\xd5\x15\xdb\x18\x8d\x8a\x5f\xdd\xc6\x68\xca\x3e\xb0\xdd\x66\xe9\x32\x5d\xa2\x79\xb7\x59\x4a\x66\xe8\x74\x13\xf1\xb7\xd3\xfc\x1e\x2e\x72\xd0\xaa\x03\xc9\x02\xc9\x02\xc9\x02\xc9\xf6\x10\x92\xc5\xc2\x2c\x64\x61\xd6\x3d\xcc\xfa\xe5\x08\x7d\x35\xc2\xbe\x12\x89\x7e\xd9\x7e\x54\x1f\x8b\x5c\x76\xef\x12\x95\x52\x4c\x17\x3b\xbd\xd8\xb2\xbc\x22\x1c\xbc\x36\x4d\x71\x3e\x59\xe6\x5b\xc2\x07\xcb\xb5\x04\x2c\xa9\xa5\xd1\x92\x5c\x90\xf8\x23\x31\x37\x8a\xee\xa5\xa2\xe0\xaa\xf6\x64\x30\xd7\x06\xca\xea\xaa\x9c\x37\x16\xa3\xc5\xdb\x8e\xe7\xd5\x31\xed\x4a\x71\xc4\xdc\x3f\xf2\xdb\x8d\x15\x34\x29\xc7\xe7\x89\xa2\xe6\xed\x0f\x8f\xf3\x71\xe0\x4e\x6d\xeb\xb9\xac\xe9\x46\x27\xdd\x03\x25\x19\x67\x5a\x37\x64\x5e\x64\x45\xbc\x6f\x56\x13\x29\xd1\x4f\x21\x61\xe5\xd3\xc7\x38\xe3\x5d\x98\xe7\x3d\xf0\xdb\x17\xc3\x35\x00\xd7\x00\x5c\x03\x70\x0d\xc0\x35\xd0\xd3\xae\x81\x2f\x44\xe8\xa5\x08\x7b\x31\x12\x7d\xc1\xfe\xda\xbe\x3f\x32\xed\xca\x9c\x2d\x17\x65\x49\x97\xed\x57\x7f\x5e\x53\xcb\x52\x81\x7f\x87\xe7\xd5\xa2\x92\xbb\xed\x09\x0c\xb2\x1e\xb7\x93\x7a\x6b\x3c\xf0\x74\xea\x70\x2a\xb6\x20\xec\x88\xf8\x48\x96\xe5\x92\x31\x4d\x9d\xaf\x88\x1c\x53\xb5\xf2\x75\xa9\x64\x05\x2a\x69\x6b\xf2\xd8\x8a\x54\xb4\x56\xff\x71\xf1\x6b\x3c\xb6\xa2\x94\xa4\xa2\xf2\x66\xcb\x7c\x2f\xcb\x31\x29\xcf\x39\xbb\x3a\x26\xb0\x6b\xde\x59\x5a\x8a\xc6\x13\xba\x73\x92\x58\x6f\xa7\x62\x33\x0a\x37\x49\xae\x8e\xab\x5a\xed\x9d\x39\xbe\x8d\x8a\x58\xee\xf3\xd5\x9f\x5a\xb9\x9e\x8a\xef\x12\xfd\x99\xb6\x6e\xc4\x1b\xf1\xf4\xce\x01\x7a\xc7\x00\x7b\x7e\x20\xfa\x5d\x3b\xec\xed\x4b\xfd\x57\x4c\x3b\x68\x4c\xd1\xeb\xea\xad\x58\x41\xd2\x96\xa5\x82\x07\x3a\xd8\x0b\x35\x59\x5b\x51\xb5\x55\x63\x2c\x7c\x7b\x7a\xb1\xea\xe2\xc1\x1d\xe5\xcb\x1a\x6b\x4d\x52\x16\x77\xa5\x18\xeb\x85\x9c\x92\x77\x16\xd6\xfc\xdb\x28\x94\x2e\xad\xd1\x35\x3e\x4e\xe6\x97\xc3\xfa\xfa\xa5\x5c\xc3\x68\xd9\x4d\xdb\xa1\x60\x07\xbb\x79\x2f\x96\x8a\x09\x90\xc5\x6d\xb1\x7b\xb5\x9a\x10\xf7\x90\x30\xf6\x3c\xfc\x5f\xde\x89\xa1\x1f\x8b\x25\x32\x52\xee\x66\x41\x53\xd7\x4a\x79\xe3\x28\x1e\x65\xc6\x0f\xaa\x1a\x38\xb1\x58\x31\x57\x40\xde\x46\xac\x3b\x58\xb6\x5b\x3a\x16\x4b\xcc\xaa\x9a\xec\x6a\x36\x96\x93\xf4\x9c\x94\x37\xee\xde\x1c\x1f\x11\x57\xc8\xdb\xd3\xc5\x72\xba\xa6\xc1\x15\xbb\x8d\x54\xfc\xfe\x72\xf5\xbc\x71\xaf\x6d\xe0\x79\x83\xe7\xad\x47\x3d\x6f\xd9\x42\x30\x88\x3e\x2f\x40\xf4\x0c\x4d\x71\x10\x7d\x82\x8e\xd1\xd1\x26\xa8\xa6\x90\xfd\xb6\x58\x72\xd2\x9f\x25\x3f\xc0\xee\x17\x6f\xb3\x63\x69\xdb\xc1\x95\xb7\x60\x16\x0a\xfb\x5a\x84\x5e\x2b\xa2\xda\x45\xc4\xb5\xab\x8e\x2e\x7b\x31\xc2\x5e\x88\xd0\x0e\xf1\xc3\xd2\x7a\x3a\xfa\xda\x82\x5c\xf1\xae\xac\x85\x7f\x34\x69\xfc\xc0\xa5\x1d\xf4\xc5\xf4\xe4\xfc\x9c\x05\x13\x5b\x28\xc7\x5e\xd3\xd0\x44\x4d\x43\x13\x8d\x34\x94\x2d\xd3\x33\x62\xe6\x5d\xa0\xf3\x7c\xe6\xcd\xd2\x34\x65\x9a\x98\x79\xae\xfb\x6c\xc4\xd1\xca\xfe\xd7\x35\x1a\x33\x4b\x16\xe7\xae\xcb\xf9\xb5\xa2\xb1\x27\xb6\x47\x5b\xe4\x12\x94\x35\x45\xd5\x94\xca\x6d\xab\x6a\xf1\xef\x5e\x63\x9f\x1a\x24\xe6\x9c\x60\x3b\x2c\xf6\xd6\x4f\x20\x98\x37\x1b\xe2\x95\x6c\x93\x71\xe3\xe0\x05\xbb\x11\xd3\x85\xe1\x39\x06\x79\x04\xc8\x23\x40\x1e\x01\x9c\x56\x70\x5a\xc1\x69\x05\xa7\x55\xd7\x38\xad\xba\xc7\x27\x03\x67\x01\x9c\x05\x70\x16\xc0\x59\x00\x67\x41\x4f\x3b\x0b\x40\x33\x41\x33\x7b\x94\x66\x6e\xc9\x3c\x82\x8d\xc9\xb7\xa6\x5c\x68\xca\x0a\xd7\xf5\x60\xa2\x96\xa4\x14\xe4\xc2\xc9\xeb\x69\x76\xb2\x06\xaf\x9a\x28\xb6\x86\x9e\x99\x58\xd6\xd3\x4f\x8a\xbf\xb0\xc3\x97\x9b\x7d\x8f\x9d\x69\xe0\x45\x64\x4f\x88\xbf\x77\x08\x92\x09\xa6\x75\x91\x9e\xa2\x73\x55\x09\x07\x75\x28\x78\xe8\xc3\x41\x88\x1b\x72\x0f\x9a\xcc\x3d\xf8\xe1\xc8\x86\x1c\x32\xa1\x53\x91\x9e\x16\x49\x02\x59\x3a\xeb\x24\x09\xdc\x61\x93\x97\x44\x72\xc4\x39\x9a\x73\x25\x47\xdc\x61\x9b\xcd\x9b\x32\x91\x9c\xd0\x98\x29\xdb\x90\xe5\xaa\x4d\x57\xf8\xbb\x11\x5f\x53\x16\xf7\xcd\x59\xf0\x9a\xb5\x94\x38\xa6\xc6\xac\x39\xe9\x0b\xed\x34\x70\x48\x5d\x00\x05\x06\x05\x06\x05\x06\x05\x46\xea\x02\x52\x17\x90\xba\x80\xd4\x05\x78\x23\xe0\x8d\x80\x37\x02\xde\x08\x78\x23\x90\xba\x80\xd4\x05\xa4\x2e\x20\x75\x01\xce\x3e\x38\xfb\xba\xd0\xd9\x97\x2d\xb4\x36\x43\x21\x94\x34\x6f\x24\x75\xa1\x13\x0e\xb4\xd6\xa7\x1a\xfc\xb7\x41\x62\xc6\xc4\x1a\x5b\x4f\x8f\x09\xf4\xb7\x2a\x95\x75\xf6\x47\x83\xec\x85\x7e\xba\x27\xa7\x6a\xf2\xd2\x7a\x3a\x9a\x08\x51\xc6\xe7\x27\x3e\x25\x95\x93\x7b\x8c\x03\xa7\x54\x4d\x5e\x4c\xdb\x7f\x9c\x55\xb5\xc9\x62\xd1\x56\xe4\xe9\xf6\xa8\xf6\xec\xd5\x8d\xe9\x31\x19\x63\x64\x4c\x29\xfb\x7e\x5b\xe2\x8e\x3d\x17\x3e\x9b\x86\xd8\xa0\x39\x79\xe2\x71\x2b\x0b\xc6\xea\x83\xa7\xd4\x26\x2a\x69\x22\x4e\x1f\x95\x34\xe1\xa1\x81\x87\x06\x1e\x9a\x1e\xf2\xd0\xa0\x92\x26\x2a\x69\x82\x8c\x83\x8c\x83\x8c\x83\x8c\x77\x05\x19\x47\xb9\x4a\x94\xab\xdc\x2a\x2c\x10\xe5\x2a\xdb\x51\xae\xf2\xbb\x44\xe3\x16\x6d\x0b\x28\x51\x59\x56\xf3\xe6\x1f\xde\x32\xb6\xac\x94\xf2\x4a\xa9\xc0\x5e\xa0\xf8\x4f\x0f\x38\x34\xee\xb5\x66\xb8\xbc\xf9\xb3\x30\xa0\xf3\x6a\x3e\x69\x7e\x41\x04\x7f\x73\x24\xb0\xe7\xd5\x7c\x46\x1c\xda\xe2\x50\xd2\x93\x74\x9c\x26\xaa\x62\xe5\x93\x34\x14\x8a\xca\xcc\xde\x64\xbf\xdc\x17\x2c\x45\x73\x4c\x30\xb8\x03\x34\xce\x19\xdc\x08\x6d\xa0\x61\x3a\x2d\x02\x9d\x8f\xd2\x61\x27\xd0\x79\x63\x2d\x4c\x8a\xb8\xe6\x09\x3a\xe2\x8a\x6b\xde\x50\x13\x16\xe9\x9b\x0b\x27\x79\x83\xec\x89\x6a\x92\x67\xf5\xc3\xcd\xf1\xc2\x02\x9f\xb3\x08\x49\xab\x1f\x92\x96\x45\xaa\x41\x40\xaa\xc1\x01\x1a\x67\xfb\xe2\x29\x5b\x24\xe9\x01\x77\x8d\x6f\x73\x2e\xde\x0d\x05\xbe\xd9\xd7\x77\xd3\x15\x21\xf8\x23\xad\x55\x54\x3d\x27\x15\x95\x52\x61\x6c\x7d\xbc\x6e\xd9\x60\x6e\x93\x8c\xe7\x51\x2c\xab\x79\xeb\x3c\x59\xb3\xed\xb4\xce\xbd\x49\xec\xdd\xbb\xd9\x97\xfb\xe9\x01\x57\xc3\x4b\x66\xc3\xd1\x03\x9a\x2c\xe5\x63\xe2\x38\x6b\x58\x9d\x9d\xd1\x59\xbb\xfd\x79\x35\x3f\x69\xb7\x9f\x3c\x61\x9c\x35\xe9\x34\xb7\x38\x5e\x55\x73\x25\xe0\x44\xe1\xdd\x6a\xad\xb1\xcf\x6a\x74\x45\x98\xe4\x79\xba\xc0\x4d\xf2\x59\x9a\xa5\xe9\x40\x6b\xe8\x1a\x85\x94\x39\x0a\xa9\x80\xee\x86\x3a\x4b\x0a\xe1\x26\x74\x9a\x65\x4c\x13\xea\xba\xb0\x69\x4b\x83\x2e\xeb\xd8\x56\xd1\x3f\xba\xf1\x88\xbf\x81\x7d\x05\x1b\x28\xc8\x15\x8a\xfe\xc9\x0e\xff\xa7\x7b\x58\x93\xcb\x45\x29\x27\x6f\xf8\x01\x9f\x36\x4f\xec\x92\x67\x9c\x79\x8e\x9e\xa5\xc5\xaa\x0f\x7a\x8b\x1e\x32\xe2\xa5\x91\x07\xd7\xe4\xc7\xe9\x0f\xfa\xda\x67\x7a\xae\x8a\x95\xe2\x33\xb4\xe0\xac\x14\x3b\x67\xd8\x82\xed\x4d\x79\xad\x42\x1d\xb3\x7b\x99\xff\x67\xa7\xbf\x61\x3b\x5d\x96\xb4\x8a\xc2\x7d\x52\x62\x23\xb4\x61\x0b\x77\xb2\x6c\x6c\x82\xba\xc0\xbe\x3d\x5c\xdd\xd0\x28\xef\xd9\x5e\x2e\x5b\xb8\xdb\xfd\xe3\xaa\xac\x15\x64\xf7\xaf\x83\xee\x5f\xf5\x8a\x26\x55\xe4\x82\x92\x1b\xad\x39\xce\xd3\x8a\xf1\xef\xdb\xe6\xaf\x46\x2f\x33\x17\xe8\x3c\x65\xab\x6c\xeb\x04\x1d\x69\x22\x78\x65\x9e\x3b\x79\x61\x4f\x43\xec\xe9\x47\xfb\xe9\xc3\xfd\xec\x43\xfd\xd1\x9f\xb7\xa3\x1e\xde\xde\xdf\x3b\xf6\xb4\x8a\x8b\x1b\xe3\xcc\xfd\xbc\xdc\x77\x63\x4c\x4e\x27\xda\x63\x28\x60\xe2\x0e\x73\x87\xa7\x08\x0b\x90\x8a\xfc\xcc\x92\x5a\x1a\x15\x67\xf3\x23\x78\x58\x88\x1e\x1b\xca\xea\x6a\x69\x5e\x84\x3d\x3c\x65\xbc\x16\xe6\xbf\x17\xac\x97\xc5\xf9\xe3\x70\xb0\xa1\xcf\xbc\xd4\x47\x2f\xf6\xb1\x17\xfa\xa2\x9f\xb3\x91\xce\x07\xfb\x66\x55\x2d\xc7\x5d\xe9\x05\x95\x0f\xbb\x1a\x8b\xaf\x18\x7f\x8a\xc7\x26\x3d\x77\xc1\x61\x9d\x48\x5e\x59\xd3\x1d\x07\xef\xa8\x94\xe3\x37\xce\xdd\xe6\x45\x25\x67\x46\xa5\xca\xc5\xbc\x1e\x53\x6f\x99\x63\x2a\xdc\xca\x65\x59\x2d\x17\xe5\x54\x4c\x5c\x91\xc7\xa9\x58\xcf\x92\x43\x33\xdf\x01\xb0\x2f\x1f\xdf\xc6\xbb\xe5\x89\xb9\xe8\x85\x35\xf3\x63\xfe\xdf\xb0\xed\xec\x15\x7c\x80\x28\x3b\x4d\x19\x76\x3a\x7e\xd2\xde\x25\xee\x71\xef\x2d\x03\xae\x73\x57\xec\x35\x7f\x3b\x41\xa3\x62\xaf\x99\x57\xf4\x9c\xba\x6e\x18\xf3\x2a\x6d\x59\xb9\x94\x2f\xab\x4a\xa9\xa2\x17\x95\x9c\xac\xb3\x1f\x4d\xb0\x1f\x18\xa0\xfb\xed\xc3\x1b\x55\x96\x9d\x31\x9b\x59\x30\x9a\x49\xa6\x8d\x83\xa7\xad\x36\xcc\xe4\x72\xcf\x21\x5b\x2d\x24\xef\x06\x5d\x14\xef\x91\xf1\xfe\x18\xef\xd1\x69\x3a\x49\xc7\x03\xdf\x23\x67\xb8\x2d\x81\x01\xcf\xdd\x37\x14\xa0\xb7\x1c\xfe\xfe\x9c\x62\x27\xc4\xcb\xe2\x69\xbd\x56\x2b\xc0\x7c\xc9\xaa\x27\x41\x3d\x9e\xc7\x83\x00\x11\xb7\x87\xb8\x3d\xc4\xed\x21\x6e\x0f\x71\x7b\x88\xdb\x43\xdc\x1e\xe2\xf6\x10\xb7\x87\xb8\x3d\xc4\xed\x21\x6e\x0f\x71\x7b\x88\xdb\x43\xdc\x1e\xe2\xf6\x10\xb7\xd7\xa9\xb8\xbd\x7f\xec\xa3\x5d\x63\x6a\x59\x2e\x29\xf9\xb1\xf5\xf4\xd8\x8d\x5b\x37\xf5\x31\xf6\x97\x7d\xec\xbf\xf6\xd1\x2b\xc4\x9f\xa3\x57\x0b\x72\x85\xbf\x75\x4a\x4e\x8e\x49\xb9\x1c\x5f\x73\x70\x03\xaf\xc5\x2e\x96\xe5\xd2\xdc\x74\x2c\xbb\x70\xf1\x42\xec\x8a\xbc\x1c\x3b\x27\xdf\x36\x56\x32\xb1\x21\x93\xfb\xeb\xb1\xf2\xda\x72\x51\xc9\x99\x5b\xa1\x75\x59\xb3\x3b\xca\x97\xf9\xc3\xc9\x3d\x05\xb9\xb2\x20\x1a\x9f\x14\x6d\xcf\xf1\xa6\x45\xcb\xe7\xe4\xdb\xba\x5c\x19\x7f\xd0\xc3\xde\x6e\xdd\x1c\xd5\xe5\x0a\xf7\x4c\x65\x0f\x07\xc7\xd9\xed\x16\x60\xed\x35\xf4\x00\x07\x6b\xaf\xa4\x7b\x3f\xde\xb7\x9d\xcc\x09\x65\x97\xc9\x1a\xa6\x4b\x66\xd4\x4c\xde\x58\xca\x2b\x6a\x49\x93\x0b\x0a\xf7\x85\x19\x63\xe6\xd4\x27\xe3\x63\x37\xc6\xf7\xcc\x92\xb1\x52\xbb\x25\x2f\x5f\x57\xd5\x9b\x9e\xbd\xaf\x15\x37\xc3\xfe\xed\x30\x7b\x69\x1b\x3d\xe8\xdb\xe6\xd2\x7a\x3a\xfa\xb9\x3e\x73\x92\x3a\x73\x42\x2a\x59\x66\xdf\x62\x9f\x8b\xf6\xb5\xae\x88\x6b\x4d\xb9\xaf\x95\x72\x09\x87\x4c\xd8\x6b\xb3\x04\x6f\x37\xe1\x80\x07\xcf\x4e\x52\x2d\xcb\xb6\x2c\x98\x5e\x91\xa5\xfc\x88\xb3\xf2\xe3\xab\x5b\x73\x9d\x60\x2c\x1e\x9d\x25\x6d\xc2\xb3\x8b\x70\xb5\x9d\x4a\x4e\xf0\xcb\x4d\xfa\xdd\xe6\x62\xba\x7e\xff\xbb\x9d\xcc\x6e\x4c\xca\xba\x8e\x53\x93\x5b\x00\x5e\xae\x2e\x14\xcd\xbe\x39\x1c\xcd\x5e\x61\xcf\x58\xae\x8d\x3a\x13\xd6\xf4\x75\xd4\x7f\x02\xde\x10\xcc\x60\x6f\xc7\x2d\xe1\xed\x00\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\x05\xb3\xed\x2c\xb3\x3d\x47\x73\xec\x4c\x7c\xc6\x8e\x17\x4b\xba\xa3\xce\xea\xef\xf8\x6a\x83\xcf\x00\x80\x01\x80\x01\x80\x7b\x18\x00\xff\xca\xbd\x74\xcc\x4e\x1b\xe4\x38\xaa\x0a\x7c\x8a\x70\x4e\x5d\x2e\xae\xe8\x6b\xdc\xc4\x49\xb9\x9c\xac\xeb\xc6\x36\x44\xbe\xa5\xb3\x6f\x52\xfc\xeb\xdb\xe8\x35\x9e\x93\xed\xe0\xce\x98\x5d\xfe\x6c\x41\x2e\xae\x2c\x88\xf3\x27\xf9\xf9\x97\xf8\xf9\xc9\xb4\x38\x62\xd2\x7d\xba\x19\xd7\x19\x70\x4a\x8b\x13\xc3\xae\xd2\x73\xf4\x6c\x55\xf2\xc2\x59\x9a\xad\x17\xc9\xec\x1a\x25\x2b\x0a\x33\xa0\xaf\xd9\x4f\x45\x68\x49\xe4\x5b\x3f\x4b\x8b\xae\x7c\xeb\x2c\xb5\xec\x1a\xc1\x00\xfc\x75\x82\x5f\x5e\xa2\x79\xce\x2f\x5b\x79\xcd\xd7\x8b\xe4\xa2\x2b\xf4\x8c\x93\x5c\xd4\xc2\xf6\x1b\xad\x87\x55\x0e\xe7\xa5\x4f\xb1\x73\x4e\x28\x78\xcd\xfc\x36\x39\x69\xd0\x6d\xd6\x04\xbc\x22\x11\x1d\x89\xe8\x4d\xe6\xfa\xb5\x3c\x08\xff\xf7\x87\xe8\x80\xb0\xdc\xcb\xc2\x35\x65\xf9\xa8\x02\x72\xbd\x73\x9a\x5a\xba\xa1\x2e\xeb\xec\x7d\x43\xec\x83\x03\xb4\x9d\x9f\xb5\xb4\x9e\x8e\xea\xe6\xe6\xa6\x94\x57\xd6\x95\xfc\x9a\x54\xf4\xb8\xa5\x6c\x90\x30\xa5\xa9\xa5\xac\xba\xdc\x12\xbf\x53\x2a\xf9\x38\x3f\x21\x63\xfc\xc7\xad\xe8\x61\x5e\xe4\xbc\xa2\x57\xba\xdf\x41\x14\x68\x79\x5b\xee\x39\xb2\x0c\xe2\x1e\x7f\x83\xb8\x93\x11\xef\x98\x50\xd8\x3d\x1f\x6e\x15\x87\x59\xc2\xb4\x8a\x7c\x16\x58\x22\xbb\x62\xf0\x21\xb1\x0b\xb7\x0f\xdc\x3e\x70\xfb\xc0\xed\x03\xb7\x0f\xdc\x3e\x70\xfb\xc0\xed\x03\xb7\x0f\xdc\x3e\x70\xfb\xc0\xed\xb3\x99\x6e\x9f\xad\x27\x13\x00\x5f\x10\x7c\x41\xf0\x05\x6d\x21\x5f\xd0\x6f\x0c\x5b\xbe\xa0\x8d\x11\x45\x2b\xea\xfd\xad\xc3\xec\x4f\xdd\x60\xf1\x23\x8d\x44\xb9\xb7\x12\x2b\xde\x69\x38\xfb\x23\x75\xa1\x64\xf7\x03\xc9\xce\x47\xac\x87\x45\x8d\x03\x46\x02\x46\x02\x46\x02\x46\x02\x46\x02\x46\x02\x46\x02\x46\x02\x46\x02\x46\x02\x46\x02\x46\x02\x46\x6e\x69\x18\x59\xb7\xaa\x86\xb9\x73\xbb\x1b\x94\x4e\x81\x30\x81\x30\x81\x30\xb7\x10\xc2\xfc\xa9\xd7\xd3\x11\x33\x9c\xbd\x5c\xd6\xeb\x54\x23\x33\xbe\x1f\x9a\x6a\x58\x6f\x63\x33\xc3\x1f\x29\xfb\xea\x35\xf6\xfe\x41\xba\xc7\x38\x73\x69\x3d\x1d\xdd\x57\x5f\x9a\x78\xca\x6e\xe1\x92\xd9\x42\x72\xd0\x38\x63\xb2\x5c\xd6\x3d\xf4\xb0\xe6\xb8\x2e\x07\x89\x19\x50\x36\x50\xb6\x0c\x28\x1b\x28\x1b\x28\x1b\x28\x5b\xcf\x50\xb6\x4c\xd7\x50\xb6\x96\xf7\xa4\x69\xca\x96\x01\x65\x03\x65\x03\x65\x03\x65\x03\x65\xeb\x3c\x65\xcb\xf4\x34\x7c\xca\x00\x3e\xb5\x0f\x3e\x65\xba\x1d\x3e\x65\xb6\x20\x7c\xca\xca\xc1\x19\xa1\x59\x11\x99\x35\x45\x93\x3c\x32\xeb\x18\x1d\xa5\xc3\xc1\xb9\xf2\xe5\xb2\x9e\x5a\x4f\xa7\x6a\xc9\xd1\x79\x45\xaf\x84\x27\xc7\x8b\x34\xd0\xc5\xf0\xc8\xab\xfd\x2c\x6d\x25\xc7\x97\xcb\xba\x15\x78\x55\x73\x55\x4f\x0c\x56\xfc\x03\x3b\x1c\x44\xf6\xb0\x2d\xf0\xe0\x43\xc3\x86\x4c\x6d\x87\xce\xf1\x30\x81\xaf\xe6\xe8\x0c\xcd\x54\x49\x3a\x1c\xa4\xfd\x4d\x0c\x37\x4a\x51\xa2\xb4\x6f\x93\xe9\xfe\xdf\xe9\xa3\x59\xf1\xce\x9f\xa2\x13\xfc\x9d\x3f\x4c\xcd\x4d\x42\x3a\x27\xc4\x36\xa6\x29\xe3\x88\x6d\x34\xdd\xd8\x79\x21\x47\x32\x43\x53\x2e\x39\x92\xa6\x5b\x0b\x0b\x0f\x6d\x93\x0d\x0a\x13\x06\x49\xbe\x3c\xe2\xd8\xa8\x84\x58\x11\xc7\x72\x46\x7b\x39\xeb\x7b\xe5\x63\xaf\xc6\xc5\x81\xc2\x5e\x4d\xd9\x47\x77\xd0\x72\x5d\xa6\x4b\x34\xef\xb6\x5c\xc9\x0c\x9d\x6e\x22\x80\x77\x9a\xdf\xc8\x45\xce\x67\x75\x90\x5c\x90\x5c\x90\x5c\x90\xdc\x1e\x22\xb9\x58\xb6\x85\x2c\xdb\xba\x07\x75\xbf\x1c\xa1\xaf\x46\xd8\x57\x22\xd1\x2f\xdb\x8f\xea\x63\x91\xcb\xee\xcd\xa5\x52\x8a\xe9\x62\x83\x18\x5b\x96\x57\x84\x5f\xd8\x86\x30\xce\x27\xcb\x7c\x4b\xf8\x60\xb9\x16\x88\x25\xb5\x34\x5a\x92\x0b\x12\x7f\x24\xe6\xfe\xd2\xbd\x90\x14\x38\xd6\x9e\x0c\xe6\x5a\x41\x59\x5d\x95\xf3\xc6\x52\xb5\x78\xdb\x71\xd8\x3a\xa6\x5d\x29\x8e\x98\xdb\x4e\x7e\xbb\xb1\x82\x26\xe5\xf8\x3c\x51\xd4\xbc\xfd\xe1\x71\x3e\x0e\xdc\x17\x6e\x3d\x97\x35\xdd\xe8\xa4\x7b\xa0\x24\xe3\x4c\xeb\x86\xcc\x8b\xac\x88\xf7\xcd\x6a\x22\x25\xfa\x29\x8a\x80\xfb\xf4\x31\xce\x78\x17\xe6\x79\x0f\xfc\xb6\xd3\xf0\x28\xc0\xa3\x00\x8f\x02\x3c\x0a\xf0\x28\xf4\xb4\x47\xe1\x0b\x11\x7a\x29\xc2\x5e\x8c\x44\x5f\xb0\xbf\xb6\xef\x8f\x4c\xbb\x12\x6f\xcb\x45\x59\xd2\x65\xfb\xd5\x9f\xd7\xd4\xb2\x54\xe0\xdf\xe1\x79\xb5\xa8\xe4\x6e\x7b\xe2\x89\xac\xc7\xed\x64\xee\x1a\x0f\x3c\x9d\x3a\x9c\x8a\x2d\x08\x3b\x22\x3e\x92\x65\xb9\x64\x4c\x53\xe7\x2b\x22\xc7\x54\xad\x7c\x5d\x2a\x59\xf1\x4d\xda\x9a\x3c\xb6\x22\x15\xad\xd5\x7f\x5c\xfc\x1a\x8f\xad\x28\x25\xa9\xa8\xbc\xd9\x32\xdf\xcb\x72\x4c\xca\x73\x3c\xaf\x8e\x09\x5a\x9b\x77\x96\x96\xa2\xf1\x84\xee\x9c\x24\xd6\xdb\xa9\xd8\x8c\xc2\x4d\x92\xab\xe3\xaa\x56\x7b\x67\x8e\x4b\xa4\x22\x96\xfb\x7c\xf5\xa7\x56\xae\xa7\xe2\xbb\x44\x7f\xa6\xad\x1b\xf1\x06\x4a\xbd\x73\x80\xde\x31\xc0\x9e\x1f\x88\x7e\xd7\x8e\x96\xfb\x52\xff\x15\xd3\x0e\x1a\x53\xf4\xba\x7a\x2b\x56\x90\xb4\x65\xa9\xe0\x81\x10\xf6\x42\x4d\xd6\x56\x54\x6d\xd5\x18\x0b\xdf\x9e\x5e\xac\xba\x78\x70\x47\xf9\xb2\xc6\x5a\x93\x94\xc5\x5d\x29\xc6\x7a\x21\xa7\xe4\x9d\x85\x35\xff\x36\xf2\x6d\x8e\x3d\xba\xc6\xc7\xc9\xfc\x72\x58\x5f\xbf\x94\x6b\x18\x2d\xbb\x69\xfb\x21\xec\x18\x39\xef\xc5\x52\x31\x41\x94\xb8\x2d\x76\xaf\x56\x13\xe2\x1e\x12\xc6\x9e\x87\xff\xcb\x3b\x31\xf4\x63\xb1\x44\x46\xca\xdd\x2c\x68\xea\x5a\x29\x6f\x1c\xc5\x83\xd3\xf8\x41\x55\x03\x27\x16\x2b\xe6\x0a\xc8\xdb\x88\x75\x07\xcb\x76\x4b\xc7\x62\x89\x59\x55\x93\x5d\xcd\xc6\x72\x92\x9e\x93\xf2\xc6\xdd\x9b\xe3\x23\xc2\x11\x79\x7b\xba\x58\x4e\xd7\x34\xb8\x62\xb7\x91\x8a\xdf\x5f\xae\x9e\x37\xee\xb5\x0d\x1c\x76\x70\xd8\xf5\xa8\xc3\x2e\x5b\x10\x68\x79\x1f\x47\xcb\x91\x8b\xe7\xd8\x09\x3a\x46\x47\x9b\x80\x97\x0b\x15\xa9\xb2\xa6\x87\xa2\xe5\xa4\x3f\x02\x7e\x80\xdd\x2f\xde\x66\xc7\xd2\xb6\xcd\x15\xb6\x05\x13\x5a\xd8\x9f\x26\x68\xd4\xd6\x7b\x57\xf5\x9c\x54\x54\x4a\x85\xb1\xf5\xf4\x18\x1f\x5e\xb5\x54\x91\x8a\x65\x35\x6f\xfd\x26\x6b\x3a\xfb\x85\x04\xfb\xa9\x01\x7a\x95\xeb\xf0\xa5\xf5\x74\xf4\x60\xfd\xe0\xf8\xb3\x76\x6b\xf3\x6a\x7e\xd2\x6e\x2d\x79\x84\x47\xc8\x3b\x4d\x2d\xa6\x03\x8e\x9c\x55\xb5\xc9\x62\xd1\x66\xef\x7a\x97\xc7\xcc\x67\xdf\x48\x8b\x62\xfa\x5f\xa4\xa7\xf8\xf4\x3f\x43\x33\x34\x55\x4f\x0e\xdd\x1a\x02\x63\xce\x07\x0c\xc2\x79\x45\x0f\x97\xe0\x58\x0e\x9f\xdc\xa7\xd8\x09\xf7\xbc\x75\x04\xd1\xad\x3e\x98\xf3\x3d\xa0\x1b\xf5\xfc\x2d\xdc\xd7\x0c\x5d\x0e\x64\x0c\x40\x97\x03\x7e\x26\xf8\x99\xe0\x67\xea\x21\x3f\x13\x74\x39\xa0\xcb\x01\xbe\x0f\xbe\x0f\xbe\x0f\xbe\xdf\x15\x7c\x1f\xfa\x17\xd0\xbf\xd8\x2a\x44\x13\xfa\x17\xed\xd0\xbf\xf8\x4e\x94\xa6\x05\xde\x5b\x29\xaa\xb7\x4c\x91\x8b\x94\xbd\xb9\xaf\x2e\xeb\x68\x1c\xa4\xe7\xae\xcb\xab\x92\x25\xe2\x3b\xa6\x73\xfc\xca\x3e\x11\x65\xbf\xd1\x4f\xbb\x5d\xad\x4c\x5a\x8d\xd8\xe5\x1d\x13\x9a\x2c\xe5\x63\xe2\x04\xeb\x01\x38\x5f\xe3\xd9\xa2\x7a\x6b\x81\xb7\x9d\x1c\x33\x0e\x9c\xf5\x69\xca\x2c\xf5\xe8\x1c\x2a\xe0\x6f\x6b\xc3\x6a\xb3\xf2\xc6\xd2\x2f\xdc\x23\x67\x15\x2a\x74\x7a\x18\xca\xe3\x6e\x86\xf3\xb8\xb3\x6c\xd6\x64\x70\xf5\x9e\x92\x09\xe5\x5c\x97\xae\xa9\x47\x78\xe3\x11\x7f\x30\xf7\x0a\x36\x50\x90\x2b\x14\xfd\xc0\x8e\x90\x47\x98\xd4\xe4\x72\x51\xca\xc9\x8d\x3c\xc5\x71\xf3\xd8\x4d\x7b\x90\x99\xa7\xe8\x1c\xcd\x55\x65\x76\x34\xff\x24\x11\x26\x88\xec\x8e\x26\xb3\x3b\x7e\xb9\x8f\x2e\x8a\xac\x8c\xb3\x34\xeb\x64\x65\xb4\xc5\xae\xb4\xd2\x78\x6d\xcc\x46\x85\xdb\x9f\x46\xac\x58\x1d\x1b\x55\x5e\xab\x50\xe6\xb3\x3b\x43\x6c\xd4\xfe\xb2\xa4\x19\xdb\x6f\x63\xcb\xca\x3f\x96\x8d\x18\xab\x7d\x65\xe3\xdb\xb8\x29\xa6\xea\xe1\xea\x86\x46\x79\x5f\xf6\x1a\xff\x9c\xd8\xed\xfe\x71\x55\xd6\x0a\xb2\xfb\xd7\x41\xf7\xaf\xc6\x62\xba\x22\x17\x94\xdc\x68\xcd\x71\x9e\x56\x8c\x7f\xdf\x36\x7f\x35\x7a\x99\xb9\x40\xe7\x29\x5b\x65\x26\x27\xe8\x48\x13\xbe\xd8\x79\x4e\xfb\x61\x27\x43\xec\xe4\x47\xfb\xe9\xc3\xfd\xec\x43\xfd\xd1\x9f\xb7\xdd\x5f\x6f\xef\xef\x1d\x3b\x59\x05\x48\x8c\x71\xe6\xc0\x9f\x43\x3c\x63\x72\x3a\x6e\xbf\xa1\x80\x89\x3b\xcc\xc9\xb7\xf0\x0f\x49\x45\x7e\x66\x49\x2d\x8d\x8a\xb3\xf9\x11\xdc\x3f\xa8\xc7\x86\xb2\xba\x5a\x9a\x17\xfe\xaf\xa7\x8c\xd7\xc2\xfc\xf7\x82\xf5\xb2\x38\x7f\x1c\x0e\x36\xe0\x99\x97\xfa\xe8\xc5\x3e\xf6\x42\x5f\xf4\x73\xf6\xda\xfe\x83\x7d\xb3\xaa\xb1\x55\x55\xf4\x58\x41\xe5\xc3\xae\xc6\xe2\x2b\xc6\x9f\xe2\xb1\x49\xcf\x5d\xf0\x5d\x9b\x88\xc5\x5e\xd3\x1d\xd2\x3f\x2a\xe5\xf8\x8d\x73\xff\x49\x51\xc9\x99\x41\x56\x72\x31\xaf\xc7\xd4\x5b\xe6\x98\x0a\xff\x42\x59\x56\xcb\x45\x39\x15\x13\x57\xe4\x0e\x4b\xeb\x59\xf2\xdd\x93\xef\x00\xd8\x97\x8f\x6f\xe3\xdd\xaa\xca\x3d\xee\xe4\x1a\x37\xb8\xec\x43\x59\x94\x7d\xe8\xe4\x22\x38\x7b\x98\x0e\xb2\xfd\xf1\xb4\x1d\x86\xf1\x3d\x6e\x35\x52\xe7\xcc\x5a\x41\xd2\x96\x47\x57\xfc\xd2\x4e\x4a\x1b\xdb\xaf\x3a\xc2\x83\x65\x35\x6f\xef\xb5\xa4\x4a\x45\xca\x5d\x67\xdf\xbc\x97\xbd\x2f\x42\xf7\xe4\x54\x4d\x5e\x5a\x4f\x47\x9f\xcc\xa9\xa5\x92\x9c\xab\xc4\xce\xcc\x5c\x76\xde\x9c\x8a\x1a\x13\x87\x1b\x37\x36\xaf\xe6\x93\x7b\xcc\xc3\xa6\x54\x4d\x5e\x4c\x9f\x91\x2b\x4e\xa2\xe2\xbc\x9a\x9f\xe4\xc7\x8e\x1b\xcb\x66\xb1\x09\x3a\x1c\xbc\xc0\xd8\x2d\x66\xce\x6b\xe8\x01\x3e\x73\x5e\x49\xf7\x7e\xbc\x6f\x3b\x99\xb7\x65\x3d\xf2\xc7\xfd\x1f\x39\xb1\xed\x66\x3f\xe8\xc6\xa5\xf0\x87\x3e\xc6\x46\xcd\x87\x1e\x8f\x9b\x8f\xd6\xee\xac\x99\xaf\xe8\xcd\x36\xff\x71\xd7\xc0\x0c\x5a\x03\x33\x7f\x71\xa1\xde\xc8\x3c\xe1\x19\x99\x79\x55\xbf\x2b\x87\x26\xfb\x93\x7d\xf4\xaf\xfb\xd8\xfb\xfa\xa2\xef\xb5\x6d\x58\xc5\x72\xb7\x4b\xc6\x07\xdd\xf8\x4e\x0a\x97\x21\x77\x04\xcb\xb9\xb5\x8a\xe5\x07\x5f\x5d\x95\x4a\x55\x69\x28\xee\xcf\x83\x71\x2e\x7f\x7d\x34\x6e\x0f\xf9\x4f\x6a\xa9\xaa\x69\xee\x3b\x50\xf3\xa9\xf8\x0e\xfb\xcf\xee\x35\xf2\x09\x3a\xc6\x8e\xc6\x0f\xdb\x6f\xe5\x6e\xf7\x5b\x59\x7d\x67\x9b\x21\x16\xfc\xf6\x3e\x7a\x6b\x1f\xfb\x67\xd1\x7f\x6a\x8d\xde\xd2\x42\x25\x2f\x6b\xe2\xd6\xb5\x35\xb9\x7a\x51\xa3\x9b\xbf\xf2\xd1\x5a\x36\xd6\x13\x62\x75\x22\x3b\x39\x3f\xe6\x3c\x14\xac\xca\x3d\xb8\xdc\x35\x10\x7f\x85\x68\xc2\x63\xb8\xdf\xd6\x47\xdf\xc7\xde\x12\xfd\x27\xee\x4e\x28\x25\xab\x0f\x23\xf6\x55\x04\xef\xd2\x2b\x52\x29\x2f\x69\xf9\x98\x52\x32\xac\x92\xc3\x92\xcc\x87\x61\xf6\x44\xd1\x7d\xfa\xc0\x23\xac\x53\xf1\x6d\xba\x71\x01\x4f\x1f\x7c\x47\x42\x35\xac\x5e\xe0\x48\xa8\xc2\x26\xde\xc9\x48\xa8\x6b\x15\x4f\x2f\xbe\xd1\x47\x7f\xd3\xc7\xfe\xaa\x2f\xfa\x35\x17\x6b\xbe\x7c\xf9\x75\x41\xbd\x90\x62\x86\x39\xb6\x23\xc3\x8b\x45\x55\xc4\xa0\xfb\x76\xc1\xf2\x41\x94\x25\x63\x11\x17\xab\x5c\xd7\xd4\xb5\xc2\x75\xdb\x5d\x24\x26\xb5\xb6\x56\xe2\x2c\x5e\x17\x48\xcf\x68\x5e\xd1\x5d\x2d\x9b\x6e\xc4\x5b\xaa\x76\x53\x36\xbe\xcf\x79\xd9\xf2\x22\xd7\xb4\xe1\x3b\xf4\xfd\xc6\xe7\xc2\x8d\xeb\xbe\x3a\x48\xbb\xac\xef\x85\xf1\xd9\x53\x72\xb2\xce\x7e\x6f\x90\xfd\x46\xbf\xeb\x73\x50\x3f\xd2\x6e\x41\x9c\x96\x7c\xdc\x38\x4c\x58\x3c\xf3\x4f\x5b\x2d\x84\xee\x39\x3a\x25\xac\xed\x11\x3a\xc4\xad\xed\x3e\x4a\xd1\x48\xe0\x12\xc6\x18\x1f\x1e\x2f\x2a\xee\xb6\xa1\x58\xb9\xb9\x70\x33\x3c\xc8\x9e\xa8\x36\xc3\xe6\x15\x1a\x95\x20\x40\x48\x1c\x42\xe2\x10\x12\x87\x90\x38\x84\xc4\x21\x24\x0e\x21\x71\x08\x89\x43\x48\x1c\x42\xe2\x10\x12\x87\x90\x38\x84\xc4\x21\x24\x0e\x21\x71\x08\x89\x43\x48\x5c\x47\x43\xe2\xfe\x34\x42\xf7\x8b\x90\x38\xf9\x4d\x15\xb9\xc4\x1f\xd5\x18\xfb\xad\x08\xfb\xbf\x22\x44\xce\x9f\xa2\xd1\x02\x57\x41\x58\x51\xb5\x55\x7b\xfc\xa4\x18\x67\x41\xc9\xd7\x14\xe4\xca\x8c\x7d\xe8\xe4\xfc\xdc\x19\xe3\xcf\xad\x0b\x19\xa8\x69\x68\xa2\xa6\xa1\x89\x46\x1a\xca\x2a\x74\x41\x30\xb4\x33\x34\xc3\x19\xda\x29\x3a\x41\xc7\x9a\xf0\xfc\x5b\xf7\x18\x86\xd4\xd8\x9f\x5f\xa3\x21\x31\xbc\x42\x51\xc1\x8e\x2d\x2c\xab\x79\x5d\xce\xad\x69\x4a\xe5\x36\xff\x45\x91\x75\xf6\xa9\x6b\xec\x23\x83\xf4\x2a\x71\xa4\x1d\xe0\x31\x56\x9f\x70\xce\x73\x49\x2b\xde\x90\x90\x5c\x48\xc6\x8d\x13\xc4\xbf\xcd\x68\x8e\x9a\x63\xba\x1c\x76\xa2\xc6\x16\xf0\x20\x6a\x6c\x01\x0f\x02\x0f\x02\x0f\xf6\x10\x1e\xec\x22\xe1\xd1\xae\xc1\x83\x50\xc4\x04\x1e\x04\x1e\x04\x1e\x04\x1e\x44\x8d\x2d\x48\xf6\xdd\x3d\x34\xaf\xeb\x25\xfb\xb6\x64\x8d\xad\x9b\x34\x2f\x00\xd7\x1c\x9d\xe1\x80\x6b\x92\x4e\xd1\x89\xc0\x20\x31\x53\x98\xd4\x0a\x71\xaf\xc1\x44\x0d\x45\x8d\x85\x55\xda\xba\x1a\x1e\x55\x76\x94\x1d\x0e\x4a\x9e\x12\x5d\x74\x22\x7e\xbd\x1d\xa4\xf8\x6f\xee\xa8\x01\x66\x51\xbb\xec\x56\x2d\x1b\x7b\x42\xfc\xd6\x21\x3a\x26\x60\xd6\x45\x7a\x8a\xce\x55\x25\x1c\xd5\x11\x7f\x0c\x7d\x2a\xc8\x38\x42\x66\x66\x93\x99\x99\x3f\x1c\xd9\x90\x0e\x69\xe8\x54\xa4\xa7\x45\x9e\x67\x96\xce\x3a\x79\x9e\x77\xd8\xe4\x25\x51\x83\xeb\x1c\xcd\xb9\x6a\x70\xdd\x61\x9b\xcd\xdb\x30\x5e\x31\xab\xbd\x36\x2c\xf9\xd7\x23\x35\x36\x6c\xd0\xb7\x2c\x57\xad\x3d\x4b\x89\xe3\x3c\xf6\xcc\x29\xce\xd5\x6e\xcb\x86\x8a\x5c\xe0\xbe\xe0\xbe\xe0\xbe\xe0\xbe\xa8\xc8\x85\x8a\x5c\xa8\xc8\x85\x8a\x5c\xf0\x3f\xc0\xff\x00\xff\x03\xfc\x0f\xf0\x3f\xb4\xc4\xff\x80\x8a\x5c\xa8\xc8\x85\x8a\x5c\xa8\xc8\x05\xf7\x1e\xdc\x7b\x3d\x54\x91\xab\x31\xdc\xdc\x18\x5d\xae\x15\x84\x6a\xb9\xae\xd3\x0b\xc3\x74\x20\x44\xd7\xc9\x9a\x2e\x6f\x5c\x53\x2b\xb6\x9a\x2e\xfb\xdf\x86\xd9\xff\x70\x29\x18\xc5\xb8\x62\xae\x77\x2f\x6a\xc1\xbf\xa7\x8d\x13\x93\x8f\x1b\x47\x08\x19\x0f\x47\xb4\xc8\x73\x48\x8b\xc5\x71\xaf\xd2\xa4\x98\x25\x13\x74\x84\xcf\x92\x71\xda\x47\xa9\x50\xd5\x0d\x4f\x97\x42\xa7\x46\x7d\x99\xda\x1b\x17\xc3\x67\xc3\x08\x4b\xfa\x94\xb0\xb2\x25\x3a\xbc\xdd\x89\xfe\xc6\x76\x67\xcc\xf7\x58\x12\xb7\xf5\x86\xfd\x09\xf3\xa0\x0e\x8e\x7c\x66\x9a\x32\x74\xba\xca\x6b\xba\xe1\xa1\x07\x59\x83\xab\xb4\x49\x57\xe9\xcf\xf6\xb5\xe2\xd5\x9f\x15\xfe\xd1\x53\x74\xc2\xf1\x8f\x76\xd6\x84\x94\xd7\x1a\x36\x21\xf5\xcd\x86\xdb\xc0\x24\xff\xee\x3e\xc7\x84\x7c\x8f\xb9\xbc\x95\xaa\xac\xc6\x1e\xf1\xf7\x4e\x1a\x8d\xf6\x38\x24\x61\x44\x42\x8c\x08\xa0\x78\x47\xa0\x38\x68\x08\x68\x08\x68\x48\xbb\x68\x48\xf6\xe7\x5a\xf2\xc5\x3f\x23\xc2\x97\x4e\xd3\x49\x57\xf8\x52\x3b\x3e\xf9\x31\xff\x4f\xfe\x0e\x76\x8f\x18\x90\xd6\x7f\xf5\x33\xdf\xb8\xd7\xf9\xea\x8f\xd4\xe8\xce\xd7\xdb\x41\xc4\xb9\xf2\x71\x87\x96\x02\x90\x98\xef\xb5\x05\x08\x24\xe6\x21\x31\xbf\xc9\x12\xf3\x57\x83\x65\xb2\xef\xfc\xab\xd2\xb0\xb2\x7c\xab\x4d\x7e\x76\x82\x8e\xb0\x43\xf1\x03\xb6\xd0\xf4\x43\x6e\x9d\x6a\xcf\x79\x9b\x21\x52\xdd\x72\x94\xf9\xd9\x14\x3d\x2d\xf4\x3a\xa4\xfc\xaa\xc2\xf5\x12\x34\xb9\xa0\xf0\x2f\x81\xa2\x96\xaa\x2b\x84\x71\xd3\x29\x19\xf3\xec\x96\xbc\x7c\x5d\x55\x6f\x7a\x42\x1e\x6d\xce\xf9\xf5\x51\xf6\xee\x01\x7a\xc4\xb7\x49\x3b\xe4\x37\xed\x43\x3f\x17\xed\xf6\xaf\x88\xf6\xa7\xdc\xed\x27\x4f\x1a\xa7\x4c\xfa\xb5\x6a\x86\x02\xd7\x3f\xbf\xc5\xac\xf4\xed\x7d\x74\x5d\xcc\x74\x89\x96\xf8\x4c\x7f\x1d\x5d\xa1\x67\x82\xab\xfc\xfb\x8e\xb0\x15\xc4\x5d\xbf\xeb\x77\xca\x54\xbf\x2f\xfc\x3d\xb9\xca\x9e\x33\xdf\x93\x7a\x53\xc1\x7c\x83\x42\x7a\x5b\x03\xe1\xa3\x1f\xa2\xb0\xf9\x70\xc0\x9f\xcc\x86\x4c\x89\x49\xf3\xac\x6e\x99\x15\x99\x9b\xa4\x50\xa1\x6a\x2d\xd4\xae\x59\x81\x85\x12\x70\x6f\x93\xb8\xf7\xff\xed\x0b\xfe\x7e\x77\xce\xaa\xad\x0a\x5c\xbc\x42\x79\x07\x17\xb7\xef\x72\x96\xad\xdc\x6c\x63\x18\x42\xaf\x93\xff\xb8\x2b\xcc\x56\x26\x6c\x04\x1d\x62\x1e\x4f\x8b\x03\xbb\xc6\x3a\x02\x58\x03\x58\x03\x58\x03\x58\x03\x58\x03\x58\x6f\x18\x58\x7f\xba\xaf\xc5\x31\x4c\x1b\x4f\xbd\xbd\xf3\xb8\xa8\x70\x8c\xbd\xd9\xcb\x93\xcc\xbb\x5e\x19\xb6\xfe\x38\x11\x02\xc3\x43\x56\x25\xa7\x38\xbd\xe9\x86\x45\x09\xd0\x79\xaf\x2d\x85\x80\xce\x81\xce\x37\x19\x9d\x77\x15\x38\x0c\x45\xec\x9b\xfd\x39\xca\x9e\xa3\x39\x76\x26\x3e\x63\x63\xf5\xa4\x1b\xc9\xd7\x6f\xad\x03\x45\x5e\x7f\x75\x88\x8e\x0b\x82\x5e\xaf\x70\xad\x4d\xd0\x79\xf8\x37\x3f\x54\xe7\x65\x68\x75\xf6\xcd\x04\xfb\x6f\x03\x21\x45\xcf\xd7\xcd\x1c\xb5\x52\x5e\x59\x57\xf2\x6b\x52\xd1\x2d\xc7\x24\xd9\xf9\xa0\x4e\x75\xdb\x94\x6b\xd7\x30\x61\xef\x37\x12\xbc\x99\x84\x23\xc7\xe0\xc9\xaf\x57\xcb\xb2\xbd\x19\xd4\x2b\xb2\x94\x4f\x25\x53\xb7\x1a\xae\x9b\x7e\x5e\xd1\x2b\x5d\x2e\xb3\xbd\x41\xb9\xa8\x3a\xdf\x5a\x2e\x8b\x35\xb3\x2e\x97\xc2\xe5\xa2\xf6\xf8\xbf\x5f\x3b\x19\xf1\x8e\x09\xcd\xa8\x4e\x56\xe0\x47\x2d\x42\x88\x8d\xa3\x16\x21\x44\x67\x20\x3a\x03\xd1\x99\x1e\x12\x9d\x41\x2d\x42\xd4\x22\x84\xd8\x07\xc4\x3e\x20\xf6\x01\xb1\x8f\xae\x10\xfb\x40\x2d\x42\xd4\x22\xdc\x2a\xf2\x06\xa8\x45\xd8\x8e\x5a\x84\x7f\x3f\x44\x07\x05\x3a\x2c\xa9\x79\xd9\x9f\x14\x6a\x6b\x25\xe3\xae\x73\x45\x49\xd7\x65\x3b\xc0\xf6\xd3\x43\xec\x7f\x0e\xd0\x4e\xe3\x34\x9b\x11\x7e\xa2\xcf\x1c\x7b\x17\x19\x2c\x59\xd6\xcc\xaa\x9e\x77\x49\x34\x37\x65\x34\xd7\x12\x46\x38\xe2\x2c\x5f\xf8\x12\xcd\xfc\xd8\x19\x2b\x20\x67\x5d\x96\xf0\x2c\x85\x5d\x6d\xa7\x92\xbb\xf9\xe5\x2e\xa8\x79\xd9\x24\x8a\xee\x1e\x82\x27\xd6\xf2\xc4\x0d\x69\x65\xb8\xe6\x95\x15\xfa\xee\x1a\x5e\x9f\xc8\xb4\x60\x6f\x00\xbf\x6d\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\x43\xc0\xc3\x4e\xc3\xc3\xa3\x74\x98\x1d\x8c\xef\xb7\x83\xa2\x1e\xf4\xe4\x29\xbb\x36\x79\xed\x0f\x81\x02\x77\x04\x77\x04\x77\xdc\x42\xdc\xf1\x37\x13\x34\x26\xb8\xa3\xb6\x2c\xe5\x52\x16\x63\xaa\xca\xf8\x1f\xd3\xd4\xa2\xbc\xac\x94\xf2\x4a\xa9\xa0\xb3\x77\x27\xd8\xdb\x06\xe8\xd5\xc6\x09\x93\xee\xe3\x97\xd6\xd3\xd1\x61\xb1\xca\xd4\xcc\xef\xa3\x65\x40\x6d\xe6\xa8\x16\xe5\x8c\x68\x27\x39\x6e\x1c\x7a\xa9\xba\x91\xc5\xb4\xeb\x98\x59\x55\x9b\x2c\x16\x6d\xd1\x9c\xae\x87\x80\xaf\xa7\x29\x01\x01\x8f\xd3\x04\x87\x80\x07\x68\x9c\xf6\x05\x06\xf3\xf2\x21\x5f\x4f\xa7\x5c\x77\xdc\x50\xd9\xc9\x7c\x38\xf7\x9b\x64\xa7\x4c\xee\x17\xf8\x5c\x2d\x0a\xe8\x5c\xdc\x23\x7f\x11\x56\xdc\x12\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xf8\x0f\xb1\x83\x60\x78\x60\x78\x60\x78\x9d\x62\x78\x9f\x4a\xd2\x21\xc1\xf0\xf4\xdc\x75\x39\xbf\x56\x34\x56\x6b\x55\x11\x84\x65\x4d\x51\x35\xa5\x72\xbb\x2a\x78\xf0\xdb\xc3\xec\x27\xfb\x89\x39\xe7\xd9\x21\x84\x7e\x05\x89\xe6\xcd\x46\xb8\x33\x21\x19\x37\x8e\x58\xb0\xcf\x34\x63\xf6\x3c\xc7\xb4\x58\x65\x73\x63\x75\xab\x52\xae\xd1\xb0\x12\xe4\x3d\xbd\x0b\xe5\x6c\xb9\x70\xce\x76\x9a\x9d\x34\x39\x5b\xcd\xd8\x5b\x65\xa9\x3c\x97\xdc\x88\x00\x5c\x41\xae\x50\xf4\x6d\x3b\x7c\x1f\x4f\x40\xed\x22\xef\x13\x7a\xd2\x3c\xa8\x93\x0f\x29\x73\x91\x9e\xa2\x73\x55\x2a\x26\x77\xf2\x94\x20\x63\x02\x61\xcb\x26\x85\x2d\x7f\x6d\x63\x22\x51\xe1\x06\xe3\x69\xa1\x51\x99\xa5\xb3\x8e\x46\x65\x9b\x6d\x50\x48\x75\xa3\x4e\x98\xa8\xe4\x87\x76\xf9\xda\x20\xa7\xf8\x91\xd7\xec\x3c\x21\xfe\xde\x51\xab\x03\x31\x49\x88\x49\x42\x4c\x12\x62\x92\x10\x93\x84\x98\x64\x6f\x8a\x49\x76\x64\xb3\x12\xaa\x58\x99\xf9\x89\x9d\xbe\x6b\x85\xb0\x92\x49\xde\x15\xc4\x1e\xae\x37\xd6\xa9\x05\x04\x84\x1f\x7b\x6d\xd9\x02\xe1\x47\x08\x3f\x6e\xb2\xf0\xe3\x5d\xc9\xb2\xc2\x04\x24\xeb\x97\x54\xf2\xb4\xde\x01\xb9\xc6\x4f\x0c\xd3\x61\xb3\xe0\x51\xb9\xac\xd7\xa9\xe0\x9e\x97\xcb\x45\xf5\xf6\xaa\xb1\xb8\xb0\xc0\xe9\x37\x87\xd8\xd7\x22\x74\x8f\x71\xe2\xd2\x7a\x3a\xfa\x88\x0f\x2d\x9d\xb6\xcf\x4a\x3e\xca\x8b\x15\x95\xcb\xba\xbb\x02\xa0\xf3\x7b\x8b\x31\xe9\xeb\xe8\xa4\x98\x5a\x87\xe9\x20\x9f\x5a\x63\x34\x4a\x7b\x83\x25\x45\xcb\x65\x5d\x6c\x7d\xad\xfe\xdc\x69\x85\xa1\x0b\xe1\x73\x6d\x2f\x1b\xb6\x64\x42\xcb\x65\xdd\x9c\x5e\xae\x1e\xb8\x23\x11\xa3\xff\x6e\xbb\x33\xd2\x31\x7f\xf0\xe9\x1a\xec\xc7\xad\x32\x40\x1d\x1a\xef\x0c\xcf\xf4\xae\xfa\x7c\x6f\x6c\xc0\xf1\xc5\x06\xe3\x6c\x92\x71\xfe\x54\x9f\x88\x39\x4e\xf3\x98\x63\x9b\x4a\xb6\xec\x95\xbf\x53\x5b\xd2\x20\xc7\x6c\xcc\x64\xd4\x35\x13\x6e\x7b\x92\x7c\x7e\x97\x63\x32\x1e\xb0\x39\xa5\xcb\x4a\x98\x9b\xc7\x8e\x19\x09\x00\x4a\x00\x4a\x00\x4a\x00\x4a\x00\x4a\x00\xca\xde\x04\x94\x8d\x7d\xe2\x83\xbe\xe8\x35\x9f\xfe\x06\x58\xe4\x5f\xdc\xeb\x2c\x02\x86\x43\x00\xa4\x6b\x69\xf0\x98\x28\x49\xd3\x81\x95\x01\xc8\x63\xaf\xad\x47\x40\x1e\x41\x1e\x37\x99\x3c\xb6\x1f\x0f\xb5\x98\xff\x84\x63\xc5\xc3\x74\x90\xed\x8f\xa7\x6d\xac\xf8\x3d\x6e\xac\xe8\xb4\x7a\x57\x94\x69\xff\x56\x84\x1e\x11\xd4\x32\x27\x6b\x66\xcc\xa8\xac\xbb\x72\xb5\x19\x0f\x5f\xa6\xfb\xdc\x3f\x1b\x5f\xc0\xd7\x16\xe4\x8a\x37\xaf\x42\x44\xac\x26\x1f\x29\xc8\x95\x29\xd7\xc1\x8b\xe9\xc9\xf9\x39\x2b\x91\xaf\x85\x7e\xb6\x9a\x86\x26\x6a\x1a\x9a\x68\xa4\xa1\x6c\x99\x9e\x11\x33\xd8\xf8\x7c\x19\x33\x78\x96\xa6\x29\xd3\xc4\x47\xcb\x75\x9f\x8d\x24\x5e\xb3\xaf\x47\x28\x6a\x8e\xbd\xaa\x6a\x79\xa5\xe4\xc9\x93\x67\xbf\x1b\x61\xbf\x1d\xa1\x9d\xee\xdf\xa2\xd1\x02\x5f\x03\x1b\x2b\x71\x3b\x68\x59\x8a\xf1\xb9\x9f\x34\x9e\xc8\x94\xeb\xe0\xc9\xf9\xb9\x33\xc6\x0f\x5d\x38\xe4\x0a\x5d\x10\x43\x7e\x86\x66\xf8\x90\x9f\xa2\x13\x74\xac\xb9\x21\xe7\xf7\x18\x3a\xd6\x9f\x4c\x50\xd2\x54\x44\x95\x2b\xb7\x54\xed\xa6\x27\xaa\xd9\xfa\x23\x5f\x6d\x2b\xb2\xce\xde\x91\x60\xdf\xea\xa7\x57\x3a\xc7\x1a\x53\x7e\x6f\x7d\x35\x82\x0b\xe2\x60\xb1\xde\x4e\x8e\x1a\x07\x5f\xb0\xcf\x5f\x4c\x7b\x7e\xde\x6a\x52\x04\x2b\x74\x4e\x3c\xb0\x69\xca\xf0\x07\x76\x9c\xea\x2c\xec\x52\xae\x31\x5e\x4f\xa7\x3c\x77\xde\x90\x24\x41\x88\x58\xc0\x8d\xd7\x87\x7f\x12\x8e\xb1\xa3\x96\x54\x69\xf5\x03\x37\xbf\x0f\x9e\x6e\x79\x3e\x11\x10\x23\x80\x18\x01\xc4\x08\x20\x46\x00\x31\x02\x88\x11\x40\x8c\x00\x62\x04\x10\x23\x80\x18\x01\xc4\x08\x20\x46\x00\x31\x02\x88\x11\x40\x8c\x00\x62\x04\x10\x23\xe8\x94\x18\xc1\x7f\xb8\x46\x7b\x4d\x31\x82\x8a\xaa\x49\x85\x9a\x5a\x46\xe6\x9f\x4d\x21\x02\xf6\x9e\x6b\xec\xe7\x06\xe9\x3e\xf3\xaf\x76\xbe\x48\xb2\x3e\xb9\x5b\x10\x87\x8b\x6c\x91\x47\x8d\x63\xcd\xbf\x98\x99\x22\xee\xdf\xbb\x9c\xd4\x65\x80\xae\x80\xae\x32\x40\x57\x40\x57\x40\x57\x40\x57\x3d\x83\xae\x32\x5d\x83\xae\x5a\xde\x93\xa6\xd1\x55\x06\xe8\x0a\xe8\x0a\xe8\x0a\xe8\x0a\xe8\xaa\xf3\xe8\x2a\xd3\xd3\xa4\x29\x03\xd2\xd4\x3e\xd2\x94\xe9\x76\xd2\x94\xd9\x82\xa4\x29\x5b\x08\xce\x59\xdc\x58\x6a\xbd\xc9\xa9\xac\xbc\x7a\x37\x3b\x3a\xaf\xe8\x76\x06\xe3\x52\x78\x08\xd3\x71\x36\x61\x65\xd0\x7b\xe0\x97\x19\xbf\xe4\x6e\xd9\x27\x7b\x3e\x24\x86\x2a\xfe\xe1\x1d\xb5\x9c\xec\x35\xe2\xb3\x14\x93\xbc\x48\xcc\xfc\x5a\x75\x00\x8a\x09\x86\x75\x8e\xe6\xe8\x4c\x55\x1e\xc1\x61\x3a\xd8\xd4\x98\x23\x89\x00\xc9\xd0\x4d\x26\x43\x3f\x1f\x11\x61\xaa\xe3\x3c\x4c\xd5\xc9\x6c\x3a\x4a\xcd\x4e\xc6\x60\x1b\x73\x56\xd8\x98\x49\x3a\xc5\x6d\xcc\x1d\x5c\xe2\x29\x91\xbf\x3d\x4b\xd3\x4e\xfe\x76\xf3\xcd\x85\x47\x64\x96\x55\xbd\x42\x0d\x9a\xb3\x1a\x2b\xd5\xb0\x7d\x4b\x7e\x6d\xa4\xd6\x5c\x3d\x6e\x66\xd8\xb9\xd2\x15\xd5\x15\xaf\xe9\x4a\x9a\xea\x91\x1e\xd3\x35\x65\x1f\xdf\x46\x23\xd6\x9e\x14\x6d\x90\x5d\x90\x5d\x90\x5d\x90\xdd\xde\x21\xbb\x58\xbf\x85\xac\xdf\xba\x07\x7d\x43\x1e\xa3\x23\xf2\x18\xf0\x30\xc0\xc3\x00\x0f\x03\x3c\x0c\xf0\x30\xf4\xb4\x87\x01\x2a\x49\x50\x49\x82\x4a\x52\xbb\x54\x92\xe0\xc0\x83\x03\xaf\x57\x1d\x78\x1b\x94\x93\xbe\x73\x31\xaf\xa4\x3f\x5d\x7e\x80\xdd\x2f\xde\x66\xc7\xd2\x36\x4a\x9a\x9b\x77\x9c\xb5\x5e\x63\xe5\x4f\x13\xf4\x90\x31\xa9\xc6\xd6\xd3\xe2\x89\x8e\xf1\xc7\xa8\x71\x8f\x08\xfb\xf5\x04\xfb\xf8\x00\xdd\x93\x53\x35\x79\x69\x3d\x1d\x5d\x37\x57\x98\xa5\xbc\xb2\xae\xe4\xd7\xa4\xa2\xdb\x5d\x2a\xd9\xbb\xb9\xf3\x46\x0b\x97\x8c\x1f\x52\xae\x4f\xf6\x84\xfd\xb1\x4f\xf0\x66\x12\x0e\x4c\xf5\xd0\x31\xb5\x2c\xdb\x1b\x72\xbd\x22\x4b\xf9\x54\x72\x98\x9f\x30\xa5\x6a\xf2\x62\xda\x69\xfc\xbc\xa2\x57\xb6\x9a\x56\xc5\x4d\x9a\x17\x93\x77\x8e\xce\xf0\xc9\xcb\x15\x95\x9b\x98\xbc\xdc\x63\x3d\xb3\xde\x88\x46\xd1\xf9\xf0\x49\x39\xcc\x12\x3e\x6a\x74\xf1\xb8\x39\x29\x9d\x31\xa7\x1b\x7b\xfc\x5f\x87\x9d\x8c\xf8\x6d\x72\xff\x2d\x34\x2a\x10\xe8\x0f\x8d\x0a\xb8\x83\xe0\x0e\x82\x3b\xa8\x87\xdc\x41\xd0\xa8\x80\x46\x05\x30\x3c\x30\x3c\x30\x3c\x30\x7c\x57\x60\x78\x68\x54\x40\xa3\x62\xab\x80\x47\x68\x54\xb4\x43\xa3\xe2\xcb\x43\xb4\xdf\x8b\xf7\x02\x0a\xbf\x59\x8d\xbd\x71\x4d\xad\x48\x3a\xfb\x99\x21\xf6\x29\x17\xf8\xbb\xdd\x18\xf8\xb3\x76\x78\x4f\x1b\x8d\xb4\x88\xfd\x0d\xba\xd8\x9f\x53\xa2\xc0\x73\xa5\xf3\x8a\xde\xba\x4a\x05\x77\x11\xf8\xbb\x18\x0e\xfe\x46\x58\x52\x20\x3e\xcf\x80\xfa\x15\x9b\x8a\xc7\xc1\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xc0\xfe\xba\x92\xfd\x6d\xbd\x82\x4f\xa0\x8b\xa0\x8b\xa0\x8b\x5b\x88\x2e\xfe\x55\x92\x66\x85\x02\x6e\x5e\xd1\x73\xaa\x61\x56\xdd\x75\xab\xfc\x49\xa3\x5c\xca\x97\x55\xa5\x54\xd1\x8b\x8a\xfd\xd3\x5b\xd8\xff\x91\x64\x3f\xdc\x4f\x3b\xed\x76\x96\xd6\xd3\xd1\x98\x26\x4b\xf9\xaa\x4d\xef\x8c\x79\xf6\x82\x71\x76\x72\xd0\x38\x62\xda\x3a\xc7\x0d\x08\x3d\xc7\xb5\x36\x7b\x3e\x2b\xd1\xac\x60\x79\xa7\xe8\x04\x67\x79\x87\xe9\x20\xed\x0f\x94\x30\x70\xc6\x66\x3d\x9d\xf2\xf4\x2b\x94\xe0\x05\x97\x92\x2f\xc8\x15\xba\x71\x2d\x1c\xf0\x4d\xb0\x23\x26\xc0\xab\x7e\x44\x66\x6c\x9f\xb7\x43\x6e\xf0\x17\xfd\xda\xf6\xaa\x07\xb2\x47\x93\xcb\x45\x29\x57\x5d\x50\xd6\xfb\x4c\x86\xcd\x83\x3a\xfd\x58\x32\x73\x74\x86\x66\xaa\x94\x59\x9a\x7b\x2e\xc8\xeb\x85\x2e\x4b\x93\xba\x2c\x1f\xe9\x6b\x99\x71\x38\x27\xc4\x52\xa6\x29\xe3\x88\xa5\x6c\x82\xa5\x29\xaf\xb5\xdd\xd2\x24\x7f\x7a\x57\x95\xa5\x79\xad\x99\x79\x23\x95\xaa\xac\xcb\x90\xf8\xa1\xe3\xc6\xa5\x3d\x8a\x29\xb0\x33\x21\x76\x06\x59\xfb\x1d\xc9\xda\x47\xba\x26\xd2\x35\x91\xae\xd9\xae\x74\xcd\xec\xa7\xfb\x5a\x9c\xb3\x76\x49\x48\xbf\x9d\xa3\x39\x97\xf4\x5b\x9b\xf3\xe0\x62\xfe\x2b\x84\x1d\xec\x1e\x31\x4c\xed\x5e\x24\x64\xde\xbd\xb3\x6a\x91\x30\x52\x96\xb4\x8a\xc2\x9d\x98\x62\x5f\x5b\x77\x5f\x92\xe0\x15\xd4\x3b\xb9\x70\x78\xb8\xba\x21\x51\xd6\x7f\x2f\xaf\x4e\xbd\xdb\xfd\xe3\xaa\xac\x15\x64\xf7\xaf\x83\xee\x5f\x75\xab\x8c\xff\x68\xcd\x71\x9e\x56\x78\x5d\x7c\xf3\x57\xa3\x97\x99\x0b\x74\x9e\xb2\x55\x3b\xa2\x3a\xa5\x91\xeb\xcc\x91\x79\xee\xea\xc7\x72\x25\x64\xb9\xf2\xd1\x7e\xfa\x70\x3f\xfb\x50\x7f\xf4\xe7\x6d\x9b\xff\xf6\xfe\xde\xd9\x16\x55\x79\x47\x8c\x71\xe6\xde\x7e\xee\xc1\x33\x26\xa7\x13\xf3\x33\x14\x30\x71\x87\xf9\xa7\x4b\x04\x87\x48\x45\x7e\xa6\xb1\x46\x13\x67\xf3\x23\xf8\x7a\x49\x8f\x0d\x65\x75\xb5\x34\x2f\x82\x5f\x9e\x32\x5e\x0b\xf3\xdf\x0b\xd6\xcb\xe2\xfc\x71\x38\x78\xbf\x96\x79\xa9\x8f\x5e\xec\x63\x2f\xf4\x45\x3f\x67\x63\xb7\x0f\xf6\xcd\xaa\x5a\x8e\x2f\xe5\x0a\x2a\x1f\x76\x35\x16\x5f\x31\xfe\x14\x8f\x4d\x7a\xee\x82\x03\x55\xb1\xea\x5a\xd3\x1d\x37\xff\xa8\x94\xe3\x37\xce\x83\x27\x8a\x4a\xce\xfc\xaa\xca\xc5\xbc\x1e\x53\x6f\x99\x63\x2a\x82\x0b\xca\xb2\x5a\x2e\xca\xa9\x98\xb8\x22\x8f\x56\xb2\x9e\x25\x07\x9b\xbe\x03\x60\x5f\x3e\xbe\x8d\x77\xcb\x13\x79\xd3\x39\x38\xd5\x5e\x73\x7f\xe3\x31\xff\xef\xcd\x76\xf6\x0a\x3e\x0a\x94\x9d\xa0\x23\xec\x50\xfc\x80\xed\xe0\x78\x88\xbf\x56\xe6\x3b\xee\x69\x38\x3e\x60\xfc\xe4\x76\x5d\x6c\x3d\xe7\x08\xfb\x76\x82\x86\x04\x6c\x15\x91\x72\xd5\xd5\xc6\x44\x78\xa7\xf8\x8d\x7d\x3e\xc1\x7e\x73\x80\x5e\x25\xfe\xcf\xd6\x24\x7d\x63\x63\x61\x9c\x3c\xba\xb0\x45\xe1\x9b\x7b\x6f\xd9\xf1\x8a\xba\xa9\x70\xca\xff\x07\xc9\xdb\x8d\xc5\x70\x36\x12\x72\x79\xe3\x4a\xf8\x9b\x78\x80\x8d\x9b\x6f\xa2\x67\xf6\x58\xaf\x21\xef\x4b\xad\xdc\x00\x62\x39\x11\xcb\x89\x58\x4e\xc4\x72\x22\x96\x13\xb1\x9c\x88\xe5\x44\x2c\x27\x62\x39\x11\xcb\x89\x58\x4e\xc4\x72\x22\x96\xb3\xb3\xb1\x9c\x88\xb4\x44\xa4\x25\x22\x2d\x7b\x38\xd2\xf2\x43\x0f\xd1\x41\x2b\x8f\x3b\x20\xae\xd2\x78\xe1\x5c\x11\x95\x63\x3a\x77\xeb\xb1\xff\xfe\x20\xfb\x7a\xc4\xc9\xe4\x7e\x82\xc7\x54\x8a\xdf\xac\xc1\x75\xbe\xb4\x0b\xa2\x8d\xe4\xe3\xc6\x51\xd5\x39\xd7\xe6\x8f\xc2\x5d\xd8\xe2\x90\xca\x45\x3a\x26\xd0\xda\x01\x1a\xe7\x68\x6d\x84\x92\x34\x14\x48\xad\x8d\xbb\xe1\xae\x4b\xd1\xa5\x3b\x8d\xa3\x9c\x0b\xe7\x67\x83\xec\x89\x6a\x55\x44\xeb\xe2\x9e\x98\xc9\x0f\x6f\x77\xc6\x3a\x61\x85\x4b\x86\x0d\xf7\x13\xe6\x81\x1d\x1c\xf1\xcc\x49\x3a\x4e\x13\x55\xbe\xc1\x0d\x0c\x39\x7c\x81\x08\x91\x6c\x32\x44\xf2\xc7\xfb\xee\xec\x65\x3f\x2d\xe2\x22\x8f\xd2\x61\x27\x2e\xb2\x53\xe6\x82\x07\x43\xb6\xce\x5c\x64\xfe\xf2\x5e\xc7\x5c\xec\xab\x09\x67\x08\xb3\x1b\xf1\xb2\x9f\x36\x46\x9b\xac\x06\xa2\x19\x7a\xcd\x82\x21\x9a\x01\xd1\x0c\x9b\x1c\xcd\xd0\xe6\x75\x61\xeb\x2c\x79\x78\xb8\xc2\x01\x1a\x67\xfb\xe2\x29\x3b\xe4\xe0\x01\x77\xb8\x82\xd9\xe4\x5d\x11\xa8\xf0\x5f\x46\xe9\xbc\x08\x54\x90\xf2\xab\x0a\xf7\x75\x6a\x72\x41\xe1\x76\xdf\xd8\xdf\x38\x19\x62\xdc\x46\x4a\xc6\x84\xba\x25\x2f\x5f\x57\xd5\x9b\x1e\x0f\x95\x9d\x1b\xf6\x9e\x51\xf6\xff\xf5\xd3\x83\xbe\xad\x19\x1f\xce\xb4\x4f\x9e\xd8\xa2\xdd\xf4\x15\xd1\xf4\x94\xbb\xe9\xe4\x51\xe3\x94\x49\xbf\x06\x17\xd3\xf5\x4f\x6d\xf1\xc6\xe7\x9f\x51\x4e\x4c\xf0\xab\xf4\x1c\x9f\xe0\x3c\xd1\x20\x70\x82\xfb\x8f\xe8\x7a\x3a\x55\xbf\xd3\x77\xba\x41\x7a\x73\xf8\x7b\x72\x85\x3d\x63\xbe\x27\xf5\x9e\xba\xf9\x06\x85\xf4\xd6\xb3\xa3\xfa\x21\xaa\xf3\xe8\x0f\xf8\x67\xa4\x85\x3c\xfd\xe3\xe6\x59\x5d\x30\x01\x32\x2b\x94\xa7\xe5\xaa\x55\x4d\x1b\x66\x00\x56\x3b\xd8\xaf\x35\xb9\x5f\xfb\xcb\xbe\xe0\xd2\xe0\x1d\x31\x5e\xd7\xc5\x86\x4f\xa2\x25\x67\xc3\xd7\x96\x2b\x35\xb8\xff\xdb\x44\x6b\x98\xfc\xdb\x5d\x75\xac\x61\xc2\xca\x9a\x0b\x33\x80\xc7\xc4\x81\xdd\x60\xff\x90\x58\x87\xc4\x3a\x24\xd6\x21\xb1\x0e\x89\x75\x48\xac\x6b\x26\xb1\xae\x93\xa9\x70\xad\xcd\xe2\x6b\x3c\xb1\xae\xb1\x05\xc7\x86\x17\x13\x8d\xac\x50\x32\xdf\xdc\x59\x67\xc1\x71\x22\x24\x03\x2f\x64\x19\x32\xc1\xd1\xc8\x26\xaf\x42\xc0\xb5\x7b\x6d\xed\x03\xae\x0d\xae\xbd\xc9\x5c\xbb\x4b\xb0\xdf\x26\xee\x64\xc3\x81\xf9\x39\x9a\x63\x67\xe2\x33\x36\xfa\x4e\xba\x81\x79\xfd\x2b\xd5\x72\xf4\x96\x53\xee\x8f\xd8\xe9\x78\x39\x55\xd5\xf2\x4a\xa9\x0a\x6e\x8b\xa4\x3c\xbe\xba\xd7\xd9\xdb\x12\xec\xef\xfb\xe9\xd5\xee\x23\xed\xa4\xbc\x3d\x22\x6a\x5b\x33\xe3\x4d\xad\x35\xbb\xba\x12\x33\xc6\x34\x76\xde\x68\x21\x39\x62\x1c\x34\xe5\x3a\xdd\xcc\xa5\xe3\xbf\x6e\xb5\x3c\x3a\x99\xb2\x62\xf2\x4f\xd1\x24\x9f\xfc\xc7\xe8\x28\x1d\xae\xe3\xd4\x71\x8d\xaf\x39\x6a\x29\x7e\xe3\xe7\x15\x3d\x3c\x83\xae\xb1\x34\x55\xb3\xd0\xa9\xd1\x6a\x6d\x1e\x9c\xf9\x02\xf8\x3c\x68\xba\xf1\xa8\xff\x34\xbe\x87\x6d\x43\x39\x04\xa4\xd0\x21\x85\x0e\x29\x74\x48\xa1\x43\x0a\x1d\x52\xe8\x90\x42\x87\x14\x3a\xa4\xd0\x21\x85\x0e\x29\x74\x48\xa1\x43\x0a\x1d\x52\xe8\x90\x42\x87\x14\xba\x8e\xa6\xd0\x7d\x67\xd8\x0a\x4b\xf5\x2b\x56\x50\xb7\x38\xaa\x7f\xc9\x82\x5f\x1c\x66\x6f\xdb\x56\x25\x49\xf9\xc9\x3e\xf3\x91\xb8\x74\xb5\x4a\x96\x91\xb3\x90\x9e\x47\x9b\xac\x25\x3a\x5b\x23\xce\xb2\x86\x2f\xdd\xcc\x8f\xa0\xb1\x32\x72\xd6\x6b\x09\xcf\x12\xd9\xd5\x76\x2a\x29\x2e\xd7\x41\x6d\xcc\xbb\x48\xa1\x2b\x98\x63\xf3\x8e\xb5\x5b\x16\x15\x80\x11\x80\x11\x80\x11\x80\x11\x80\x11\x80\x11\x80\x11\x80\x11\x80\x11\x80\x11\x80\x11\x80\x11\x80\xb1\xc3\x80\xb1\xb7\xe4\xc8\x81\x3f\x81\x3f\x81\x3f\xb7\x10\xfe\xfc\x1f\xd7\xe8\x98\xc0\x9f\x25\xb9\x72\x4b\xd5\x6e\x1a\x6b\xc9\xd0\x62\xad\x4a\xa9\xa0\xc9\xba\x2e\xeb\xec\x63\xd7\xd8\x7b\x07\xe9\x95\xce\xc9\x4b\xeb\xe9\xe8\x93\xf5\x63\x17\xe7\xc4\xd9\xc9\xc7\x8d\xc3\x2e\xd8\x67\xba\xf1\xa2\x79\x48\x97\x83\xc5\x0c\x20\x1b\x20\x5b\x06\x90\x0d\x90\x0d\x90\x0d\x90\xad\x67\x20\x5b\xa6\x6b\x20\x5b\xcb\x7b\xd2\x34\x64\xcb\x00\xb2\x01\xb2\x01\xb2\x01\xb2\x01\xb2\x75\x1e\xb2\x65\x7a\x9a\x3a\x65\x40\x9d\xda\x47\x9d\x32\xdd\x4e\x9d\x32\x5b\x90\x3a\x65\xdf\x40\x33\x22\x20\xeb\x24\x1d\xe7\x01\x59\x87\xe8\x00\x8d\x07\xa6\x7a\xba\xd0\xd4\x7a\x3a\x65\xb2\xa1\x86\xb2\x3c\x9f\x0d\x0f\xb2\x3a\xc8\xf6\x8b\x60\x2a\xb3\x5d\x3f\x2d\x8c\x1a\x36\x16\x9a\xe0\x19\xff\xfe\x1d\xd5\x54\xec\x7e\xf1\x19\x8a\x49\x25\x9b\x80\xed\x11\x7f\xea\x0c\x03\x13\xc8\x6a\x9a\x32\x74\xba\x4a\x56\x62\x1f\xa5\x36\x36\xf6\x10\x93\x80\x6c\x60\x93\xb2\x81\x7f\xd3\x47\x93\xe2\xdd\x9f\xa0\x23\xfc\xdd\x1f\xa7\x0d\xcf\x3f\x51\xcc\x38\xcd\x8b\x19\xdb\xda\x7f\xcd\xb4\x73\x46\xc8\x04\x9d\xa6\x93\x2e\x99\xa0\x66\x1a\x0a\x33\x44\xc1\xe6\xa2\xac\xea\x15\x6a\xd0\x50\x05\x9a\xa3\x60\x0b\x96\xfc\xd3\x91\x6a\x43\xb4\xdb\xd4\x6d\x72\x89\x60\xa9\x2b\xb6\x4d\x1a\x15\xbf\xba\x6d\xd2\x94\x7d\x60\xbb\xad\x53\x7b\x64\xff\x40\x68\x41\x68\x41\x68\x41\x68\x7b\x87\xd0\x62\x7d\x16\xb2\x3e\xeb\x1e\x84\x0d\xc9\xd5\x8e\x48\xae\xc2\x53\x00\x4f\x01\x3c\x05\xf0\x14\xc0\x53\xd0\xd3\x9e\x02\x28\x6f\x43\x79\x1b\xca\xdb\xed\x52\xde\x86\x23\x0e\x8e\xb8\x5e\x75\xc4\x65\x0b\xad\xd5\x81\x0f\x45\xca\x49\x7f\xa4\xfc\x00\xbb\x5f\xbc\xcd\x8e\xa5\x6d\x23\x5e\xde\x82\x39\x2a\xec\xdb\xc3\x94\xb1\xaa\x66\xd7\x15\xf8\xd0\x64\x1b\x5b\x1b\xab\x36\xcd\x18\x50\xcd\x16\xfa\xf8\x04\x17\xfa\xb0\xeb\xb4\x7e\xa6\x11\x8d\x8f\x4b\x4e\x8b\x53\x76\x8b\x5d\xa1\xf5\x91\xbc\xe5\x57\x34\xd6\xb7\xbf\x5d\x1e\x95\x9f\xbd\x19\x5c\xa9\xa1\xe5\x3a\x20\x0d\xcb\x7d\x5c\x09\x7f\x03\x0f\xb0\x71\x1f\xdf\xb3\x5d\x5f\xd2\xf7\x59\x40\xe8\x03\x39\x08\x10\xfa\x80\x87\x0b\x1e\x2e\x78\xb8\x7a\xc9\xc3\x05\xa1\x0f\x08\x7d\xc0\xb3\x00\xcf\x02\x3c\x0b\xf0\x2c\x74\x85\x67\x21\x9b\xa1\xd3\xec\x64\xfc\xb8\x0d\x42\x1e\x77\x0b\x7d\xf8\xee\xdd\x20\xf8\xb1\xd5\x89\x2f\x04\x3f\x20\xf8\xb1\xb5\x52\x2f\xd8\xaf\x24\x68\x54\x08\x7e\x68\xcb\x52\x2e\x65\xb1\x31\xdf\x32\x65\x9a\x5a\x94\x75\xf6\x8e\x04\xfb\x56\x3f\x3d\x64\x1c\x3e\xe9\x3e\xda\x2e\x55\x16\xaf\x2f\xf7\x71\x49\x2d\xca\xc9\x7d\xc6\x31\x97\xaa\x9b\x30\xcb\x95\x19\x47\x6c\xb5\x6a\x65\x57\x37\x16\xc6\xce\x07\xdb\xaa\x52\x66\xdc\x6f\x43\xe9\x2b\x21\x49\x26\x37\x96\xc3\xa1\xe2\x29\x76\xc2\x04\x89\x81\xcf\xdb\xe2\x8b\x6a\xd1\xa7\xc8\x19\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\xf8\x22\x2a\x95\x81\xdc\x81\xdc\x81\xdc\x75\x8a\xdc\xfd\x9b\x37\xd0\x9c\x20\x77\xd2\x5a\x45\xd5\x73\x52\x51\x29\x15\xc6\xd6\xc7\x05\xab\x0b\x08\x61\xe4\xb0\x47\x2d\x55\xa4\x62\x59\xcd\x5b\xe7\xc9\x9a\xce\x7e\x7f\x89\xbd\x3c\x48\x0f\xb8\x9a\x5a\x32\x9b\x8a\x1e\xac\xcf\xf3\xce\xda\x4d\xce\xab\xf9\x49\xbb\xc9\xe4\x61\xe3\xb4\x49\xa7\xbd\x45\xd1\x9c\x13\x4c\x18\x70\x62\x97\x93\x3e\x88\xfc\x02\x80\x41\xe4\x17\x00\x0c\x00\x0c\x00\xac\x87\x00\x58\x17\x29\x24\x74\x0d\x00\x43\xea\x3e\x00\x18\x00\x18\x00\x18\x00\x18\x44\x7e\x91\x5b\x7c\xf7\xf0\xaa\xae\xcf\x2d\xde\x92\x22\xbf\xeb\xf4\xbd\x22\x42\xea\x32\x5d\xe2\x11\x52\xe7\x29\x4b\x67\x03\x23\xa4\x5c\x24\x2a\x65\x92\xa8\x54\x00\x31\x6a\x49\xec\x54\x21\x3c\x76\x6a\x9a\x65\x9c\x78\x28\xd1\x25\x2b\x2b\xd3\xd5\x5b\x33\x7c\x2a\xa0\xaf\x14\xff\x71\xf2\xc7\x6c\x31\x4b\x0f\x38\x90\xa8\x1d\x15\x47\x6c\x26\x53\x13\x08\xec\x39\x7a\x96\x16\xab\x44\x83\x67\x69\xba\x15\xcf\x12\x52\x75\x90\x12\x6e\x52\x4a\xf8\xa3\x11\xba\x22\x2c\xcc\x3c\x5d\xe0\x16\xe6\x2c\xb5\x68\x56\xd2\x55\x21\x30\xfc\x0c\x2d\x38\x02\xc3\xad\x6b\xfd\x9a\x90\x1d\x5e\xa4\xcb\x2e\xd9\xe1\xd6\x35\x7f\xa7\x62\xc4\x8d\x99\xc6\x0d\x5b\xc2\x1a\x5b\x9a\xfc\xb1\x51\x7f\xd3\x38\xe2\xab\x50\x1c\x64\x26\xa7\xc4\xd1\xb5\x66\xd2\x4f\xb7\xb8\x33\x06\x13\x3a\xc6\x80\xd0\x80\xd0\x80\xd0\x80\xd0\xd0\x31\x86\x8e\x31\x74\x8c\xa1\x63\x0c\x67\x08\x9c\x21\x70\x86\xc0\x19\x02\x67\x48\x4b\x9c\x21\xd0\x31\x86\x8e\x31\x74\x8c\xa1\x63\x0c\x5f\x23\x7c\x8d\x3d\xa4\x63\xdc\x29\xa7\xdd\x16\x54\xe2\x61\x7f\x17\xa1\x87\x45\x7e\x40\x8d\x80\xf3\xd8\x7a\x7a\x8c\x7d\x31\xc2\xfe\x20\x52\x5d\x0b\xf0\xb5\x05\xb9\xe2\x5d\x77\x0b\x7f\x71\xf2\xe1\x82\x5c\x71\xd7\x00\x9c\x9c\x9f\xb3\x70\x63\x0b\x8b\xfe\xd5\x34\x34\x51\xd3\xd0\x44\x23\x0d\x65\xcb\xf4\x8c\x98\xb4\x17\xe8\x3c\x9f\xb4\xb3\x34\x4d\x99\x26\x26\xad\xeb\x3e\x1b\x71\x33\xb3\x9f\x5e\xa2\x73\x8d\xea\xa9\x04\xc9\x4c\xab\x45\x79\x59\x29\x19\x1f\x33\x9d\xfd\xe1\xeb\xd9\x6f\x0f\xd6\x53\x5b\x19\x0e\x57\x5b\xc9\x88\xc6\x92\xe9\x7a\xa2\x2b\x2e\x91\x67\xe7\x14\xe4\x62\x20\x17\x03\xb9\x18\x70\x83\xc1\x0d\x06\x37\x18\xdc\x60\x5d\xe3\x06\xeb\x1e\x2f\x0f\xdc\x0f\x70\x3f\xc0\xfd\x00\xf7\x03\xdc\x0f\x3d\xed\x7e\x00\x1f\x05\x1f\xed\x51\x3e\xba\x25\x73\x31\x72\x74\x56\xe0\xb1\x49\x3a\xc5\xf1\xd8\x51\x3a\x4c\x07\x1b\x57\xab\x35\xf1\x50\x4b\x12\x2f\x94\x70\x86\x3b\xcb\xa6\x37\x20\x5a\x6b\x76\xae\x56\xbb\x36\xfe\x1f\x76\xd4\x23\x69\xaf\xb6\x13\x30\xdc\xd0\x6c\xbf\xf8\xeb\xe6\x60\x33\x41\xb9\xce\xd0\x0c\x4d\x55\xa5\x5b\xec\xa7\xf4\x86\x1f\x17\xc2\xe7\x90\x5b\xd1\x64\x6e\xc5\x3f\xf4\xd1\xb4\xb0\x18\x27\xe8\x18\xb7\x18\x07\xa9\x99\x29\x48\x73\x22\x91\x22\x43\xa7\x9d\x44\x8a\x26\x9b\xca\x8a\xac\x89\x29\x9a\x74\x65\x4d\x34\xd9\xd6\x9d\xa6\x48\x34\x66\xc4\x6a\x2c\x52\x13\x56\x2d\xf9\xee\xd1\x7a\x46\x2c\xe6\x9b\x2a\xe1\x36\x68\x27\xc4\x11\x41\x06\xcd\x2f\x49\xa2\x7d\xa6\x0d\x89\x11\x20\xc2\x20\xc2\x20\xc2\x20\xc2\x48\x8c\x40\x62\x04\x12\x23\x90\x18\x01\xcf\x04\x3c\x13\xf0\x4c\xc0\x33\x01\xcf\x04\x12\x23\x90\x18\x81\xc4\x08\x24\x46\xc0\xf1\x07\xc7\x5f\x17\x3a\xfe\xba\x3a\x31\xa2\x31\x1e\xdd\x80\xcf\x2c\x94\x50\x6f\xc5\xd4\x88\x7f\xd9\x4f\x4f\x9a\xa5\x13\xca\x8a\x26\x17\x14\xc3\x2e\xf9\x45\xe8\xb3\xaf\x45\xd8\xcb\x11\x7a\x6d\xd5\x61\x36\x6d\x0f\x4c\x97\x78\xb2\x20\x57\x26\xbd\x27\x99\x7c\x1d\x89\x13\xfe\x89\x13\x5f\x1c\xa2\x83\xd6\x33\x29\xeb\x63\xeb\x69\xf1\x5e\x07\x25\x49\xe4\x25\x79\xd5\xd8\x22\x57\x74\xf6\xaf\x87\xd8\x2f\x0e\xd0\x3d\xc6\x69\x4b\xeb\xe9\xe8\x9a\xb9\xfb\x28\xe5\x95\x75\x25\xbf\x26\x15\xdd\x2e\x78\xc9\xde\xe9\x4f\xf3\x06\x16\xe4\x4a\xca\xb5\x9a\x9b\xb0\xd7\x81\x09\xde\x4a\xc2\xe1\xec\x1e\x70\xaa\x96\x65\x9b\xd5\xe8\x15\x59\xca\xa7\x92\x71\x7e\xc2\x64\xb9\xac\x2f\xba\x9c\xc2\xf6\x55\x8c\x31\xe8\xf2\x6c\x8a\xec\x4d\x9a\x17\xcf\x7e\x8e\xce\xf0\x67\x3f\x49\xa7\xe8\xc4\xff\xcf\xde\xbf\x87\xb9\x71\x9e\xf7\xdd\xf8\xb5\x07\x49\xe4\x2d\x59\x87\x47\x92\x65\x41\x94\x04\x81\xd2\x1e\xc0\x5d\xec\x62\x79\x58\x72\x79\x5c\xec\x81\x5c\x70\x49\xae\xb8\x24\x65\xc9\x12\xe9\x59\x60\x16\x04\x89\xc5\xc0\x18\xec\x52\x6c\x7f\xba\x7e\xb6\x15\xbf\xb1\x9b\xb4\x75\xd3\x24\x75\x53\xd7\x71\x9c\xd6\xb1\x53\xbd\xb6\x63\xbb\x71\xae\xb8\x75\xdd\x37\x3e\xc8\x51\xea\x38\x7d\x9b\xb8\x3e\xc4\x6f\xea\x26\xb1\x9d\xbc\x49\x9a\x34\xa9\x5d\x27\x6d\xde\x6b\x9e\xe7\x99\x13\x30\x83\x01\xb0\x00\x16\x4b\x7c\xff\xe1\xb5\xc4\xcc\x3c\x33\xf3\xcc\x33\xf7\x3c\xcf\xe7\xfe\xde\xf7\xdd\xc0\xb3\xe7\xea\x87\x39\x63\x45\x1e\x68\xd4\xce\x04\x1b\xaa\x28\x1b\x32\x03\xb6\x0a\x05\x5d\xba\xc4\xac\x8e\x75\x9a\x2c\xba\xb6\xdb\xdb\x46\xde\xc5\x88\xdf\x27\x57\x1c\xa0\x84\x2d\xa2\x46\x50\xc2\x16\x3e\x42\xf8\x08\xe1\x23\xec\x22\x1f\x21\x4a\xd8\xa2\x84\x2d\x7c\x33\xf0\xcd\xc0\x37\x03\xdf\x4c\x47\xf8\x66\xb6\x21\x39\x42\x91\x5c\xf0\x6e\x14\xc9\xdd\x3e\x81\x2e\xec\xab\x43\x74\xa0\x1e\xaa\xa8\x16\x72\xda\x4d\xe3\x13\xaf\xb3\xf7\x0e\xb1\x8f\x38\xb0\xe2\x46\x8d\x58\xd1\x6a\xa1\x49\x5c\x71\xb7\x37\x57\xb4\x4e\x03\xb0\xe8\x0d\x16\xcf\x06\x83\xc5\x3d\x6c\xd8\x03\x2c\x5a\x3d\x0b\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\x08\xb2\xd8\xcd\x64\xf1\x7d\xbb\xe8\xbc\x24\x8b\x76\x1a\xf1\xb1\x0d\xdf\xac\xce\x57\xad\xd4\xe2\x05\x2d\xad\x58\xa9\xc5\xe5\x4e\x2f\x8d\xe9\x5c\x04\xcc\xbe\xf8\x08\xfb\x62\x1f\xdd\xed\xaa\x82\x19\x0f\xed\x2b\xaa\x4a\x3a\x2c\x76\x31\x1f\x80\xfd\xbd\xf7\x2b\x84\x79\xd0\x38\xca\x59\x06\x33\xb8\x50\xb0\x90\x22\x37\x37\xc9\x43\x72\x8d\xce\x0b\xfc\x77\x9a\x16\x38\xfe\x9b\xa1\x69\x3a\x5e\x5b\x69\xd3\xc6\xab\x9a\xae\x04\x03\xc0\xe3\xec\x68\xad\x25\x4a\x3d\x93\xc6\x5f\x7b\xd4\x1b\x0a\xde\xce\xfa\x33\x6a\x89\x42\xbf\xb2\xb3\xe2\x49\x4e\x16\xd5\x42\x4e\x49\xa9\x75\x3f\xcc\xc3\xf2\xc0\xad\x7f\x9e\x89\x8b\xb4\x4c\x4f\x97\xe5\x23\xda\xfc\x03\x45\x0c\x3b\xb2\x13\x35\x98\x9d\xe8\x0b\x3d\x2d\xb1\x30\xcf\x88\x5c\x45\x4b\x74\xd6\xce\x55\xd4\x16\xd3\xe5\x6f\x56\x0a\xeb\x25\xaa\xd1\xb2\x05\xd8\xae\xea\x86\x2f\xf1\x85\xbb\x2a\x4c\xd7\x89\x82\x52\x34\x96\xff\xc6\x92\x99\x7f\x4a\xeb\xb6\x61\x87\x0a\xdc\x97\xb5\xa5\x16\xec\x91\xf2\x86\x46\xf9\x45\xed\xe1\x41\x13\xbb\x9c\x1b\xd7\xd4\x62\x46\x75\x6e\x1d\x70\x6e\xe5\x61\x1d\x6a\x26\x9b\x1a\xad\xd8\xcf\xd5\x8a\xf1\xf7\x4d\xb9\xd5\xb8\xca\xc4\x59\x5a\xa4\x64\x99\xf5\x9c\xa2\x83\x0d\x78\xc3\x96\xb8\x0f\x02\x66\x33\xc0\x6c\x7e\xb4\x8f\x5e\xe9\x63\x1f\xee\x0b\x7d\xd0\x72\xca\xbd\xdc\xd7\x3d\x66\xb3\x0c\xdb\x18\xfd\xcc\xdd\x10\x1c\x2d\x1a\x83\xd3\x76\x46\x0e\xf9\x0c\xdc\x61\xce\xe3\x85\xd7\x4a\xc9\xf1\x23\xf3\x5a\x7e\x54\x1c\xcd\xf7\xe0\x5e\x4b\x3d\x3c\x94\xd4\xb5\xfc\x92\xf0\xca\x9d\x31\x5e\x0b\xf9\xf7\xb2\xf9\xb2\xd8\x3f\x0e\xfb\xdb\xf3\xc4\x97\x7b\xe8\xb5\x1e\xf6\x6a\x4f\xe8\xf3\xd6\x7a\xe0\x43\x3d\xf3\x9a\xb1\xbc\xcd\xea\xe1\x8c\xc6\xbb\x5d\x0b\x47\x56\x8d\x9f\x22\xe1\x69\xd7\x5d\xf0\x95\x9e\xc8\x1b\xb2\xae\xdb\xfe\x87\x51\x25\xc5\x6f\x9c\x7b\x75\x72\xd9\x94\x0c\x08\x56\x73\x69\x3d\xac\xdd\x90\x7d\x2a\xbc\x1e\x05\x55\x2b\xe4\xd4\x58\x58\x9c\x91\xbb\x51\xcd\x67\xc9\x57\x5c\x9e\x1d\x60\x9d\x3e\x72\x1b\xbf\x2c\x97\x4b\x70\x6b\x66\xc0\x8f\x7b\x7f\x46\x76\xb0\xdb\xf9\x25\xd7\xfa\x21\xd9\x44\x61\xff\x38\x25\x67\x29\xc1\x4e\x44\x8e\x59\x28\x66\x37\x7f\xcf\xe4\x4b\xef\xd3\x42\xa4\xdf\xd8\xc9\x89\x5b\xb6\x1f\xd0\x61\x9f\x7d\x81\xc6\x82\x2b\x01\x8d\xa5\x72\xeb\x7a\xc9\x30\x96\x39\x55\x67\x3f\xfe\x02\xfb\xf0\x00\x3d\xe0\x95\xde\x2f\xa8\xd0\xcf\x8c\x68\xe7\xbc\x96\x53\xa3\x11\x9f\x42\x3f\x8e\x7d\x3a\x5c\x32\x82\xca\x3e\x50\x52\xa0\xb2\x0f\x94\x14\x50\x52\x40\x49\xd1\x45\x4a\x8a\x0e\x4a\x53\xd8\x31\x4a\x0a\xe4\xcf\x83\x92\x02\x4a\x0a\x28\x29\xa0\xa4\x40\x65\x1f\x24\xf8\xba\x75\x64\x09\x1d\x9f\xe0\x6b\x5b\x56\xf6\xb9\x4c\x33\x82\x34\x1e\xa1\x29\x4e\x1a\xf7\xd1\x04\x8d\x07\x15\xc4\x88\x39\xc8\x50\x53\x8a\xfa\xa4\x83\xc9\xe2\x34\x3b\x5e\x63\xf9\x0b\xc7\xc5\xb9\xd8\x62\xe4\xa7\x76\xfa\xb0\x32\xbb\x94\x8f\x13\x8b\x3d\xe9\x5b\xca\xa7\x15\x60\x4c\x70\xac\x04\x9d\xa0\x63\x65\xde\x9e\x18\x8d\xd4\xf3\x40\xe0\xe1\x81\x63\xbc\x41\xc7\xf8\x77\x7a\x68\x4e\x54\xc9\x39\x46\x47\x1c\x55\x72\xc6\xa9\xce\x31\xe8\x6b\x10\xe8\xb8\x30\x37\x07\xe9\x00\x37\x37\xf5\xb7\x3c\x2b\xbc\xec\x47\xe9\xb0\xed\x65\xaf\xbb\x95\x5a\xeb\xf4\xb4\xc5\x2e\x45\xff\x72\xc4\xc7\x2e\x79\x57\xe7\x71\xda\xa8\x98\x6f\x75\x1e\xbb\x30\x4f\xeb\xac\x15\xca\xf1\x00\xe3\x02\xe3\x02\xe3\x02\xe3\xa2\x1c\x0f\xca\xf1\xa0\x1c\x0f\xca\xf1\xc0\x9d\x00\x77\x02\xdc\x09\x70\x27\xc0\x9d\xd0\x14\x77\x02\xca\xf1\xa0\x1c\x0f\xca\xf1\xa0\x1c\x0f\xbc\x75\xf0\xd6\x6d\xf3\x72\x3c\xb5\xa1\x64\x8f\x28\xac\x7a\xb0\x72\x3d\x45\x7f\x9a\x2f\x61\xff\xc4\x30\x4d\xba\xb3\x27\xfa\x04\x37\x17\x55\x0e\x98\x0d\xcb\x22\xe3\x99\xd9\xf7\x87\xd8\x77\x7a\xed\xf4\x89\x8f\xf2\x08\x66\xf7\xb2\xf4\xbc\x38\x6a\x59\x2d\x45\x1f\xe3\xa1\xca\x65\x49\x0e\xed\xed\x4d\x0e\x48\x7e\xd6\xdf\x9d\x71\x4c\x8c\xa1\x49\xda\xcf\xc7\xd0\x18\x8d\xd2\x1e\xff\x38\x8d\x42\x41\x37\x46\x8c\x7d\xa1\xc1\x11\x7c\x19\xb5\x44\x0d\x27\x27\xb4\x4f\xe4\x72\x38\x84\x3e\xb6\xc3\xee\xe9\xb0\x19\x61\xec\xdb\xd9\x4f\x98\xa1\xc4\x6d\xea\xef\x04\x4f\xf7\x58\xe6\x04\xad\xaf\x5f\x41\xd4\xe0\x03\x6d\xd0\x07\xfa\xbe\x9e\xcd\xbe\xd4\x42\x52\x11\xe7\x92\x0a\xcb\x45\x59\x77\x23\x9b\x0c\xfa\xad\xcd\x64\x54\x35\x13\x4e\x7b\x12\x7d\xfb\xbd\xb6\xc9\xb8\x5f\xce\x67\x15\xa7\x95\x90\x93\xd2\xb6\x19\x89\xd6\xf8\x1e\x61\x37\x02\xec\x06\xf8\x77\x5b\xf8\x37\xc0\x07\xc0\x07\xc0\x47\xab\xc0\x47\xf2\xd3\x3d\x4d\x5e\xfd\x9d\x17\xb2\xa9\xd3\xb4\xe0\x90\x4d\xb5\x78\x45\xd9\xe4\x55\xc1\xb5\xb0\xf7\x8c\x62\x27\xbb\x43\x74\x3b\x25\xfe\xf0\x4e\x7b\x12\x30\x5c\x91\xde\xc3\x77\x01\xf1\x78\xc1\x2b\x27\x7d\x2b\x66\x06\xc8\xd6\xd1\x6d\xf3\x11\x64\xeb\x40\xb6\x8e\x2d\xce\xd6\xf1\xec\xa6\x17\x8c\x9b\xce\xcc\xd1\xe4\x4f\x41\x72\x92\xf6\xb3\xbd\x91\xb8\x95\x3f\xe3\xf5\xce\x2c\x1c\xf6\x41\xb7\x44\xe2\x8d\x1f\x1d\xa4\x5d\x82\x5a\xae\xf0\x5a\x2f\x66\x31\xef\x54\x51\xcb\x5f\xd3\x56\x74\xf6\x87\x03\xec\xeb\x7d\xf4\x3a\xbe\xd5\x2a\xe5\xfd\x54\x40\x7a\x8d\xa2\x96\x4f\x6a\x2b\xd1\x41\x63\xb7\x84\xb1\x8b\xac\xe7\x2d\x37\xcc\x6b\xc5\xe9\x5c\xce\xfa\x18\x36\x2f\x61\x55\x8b\x4a\xb2\xbc\x59\x08\xc3\xc7\xb9\x30\xdc\x18\xe3\x07\x68\x1f\x4d\xf8\x8e\x71\xde\x57\x31\xd9\x57\x31\x79\xcf\x35\x45\x8a\x3c\x1d\x3c\x92\x63\x6c\x44\x8e\x64\x7e\x1a\x13\x83\x8b\x93\x54\x96\xa4\x0f\x0a\x3e\x41\x15\x16\xe4\x0e\x41\x15\x16\x88\xce\x21\x3a\x87\xe8\xbc\x8b\x44\xe7\xa8\xc2\x82\x2a\x2c\x10\xfb\x42\xec\x0b\xb1\x2f\xc4\xbe\x1d\x21\xf6\x45\x8d\x14\xd4\x48\xd9\x2e\xf2\x46\xd4\x48\x69\x45\x8d\x94\x9f\x8f\xd2\x7e\x41\xe2\xf2\x6a\xe9\x86\x56\xbc\x6e\xcc\xd6\xac\xd4\xb7\x82\xca\x65\xf3\x99\xa2\xaa\xeb\xa9\x9c\xa2\xeb\xaa\xa5\x1e\xfc\xde\x30\x7b\x4f\x1f\x31\xfb\x30\x8b\xd3\x3d\xee\x21\x24\x5c\x10\x6d\xcc\x18\x6d\x44\x9f\x30\x76\x38\x6b\x1d\x28\x31\x9d\x73\x97\x26\xab\x09\x57\xe9\xb4\x40\x69\xb3\x94\xe0\x28\xed\x08\x55\xf1\x10\xc5\x1c\x5d\x61\xf2\x34\xe7\xc5\x6d\x42\x27\xc4\xa5\x85\x75\xe5\x74\xae\x78\x2c\x92\xbd\xb9\x2e\xa8\x02\xc0\x85\xfe\x7a\x87\xe7\xa3\x89\x78\x2b\x0f\x5d\x4f\xe7\x49\xb9\x4f\x1b\x1f\x50\xbd\x3e\xbc\xa0\x27\x04\x1f\x1e\xb4\x88\x0d\x6a\x11\x7f\xa5\xa7\xb9\xb6\x62\x49\x08\x13\x17\xe8\xa4\x2d\x4c\xdc\x2a\xeb\x53\x47\x69\x92\x4d\x59\x9f\xe8\x2f\xdc\xeb\x69\x7d\x5e\x6f\x8a\x18\xf3\x6e\x8b\xb3\x5b\xfc\xde\x4e\x83\x03\x29\x23\xa4\x8c\x90\x32\x42\xca\x08\x29\x23\xa4\x8c\xdd\x29\x65\x6c\xc3\x3c\xa0\x06\x79\xe3\x3f\xb9\xcb\x73\xa6\xb0\x27\x40\xe9\xe8\x9a\x3e\x44\xb8\x22\xa7\x4d\xb3\x07\xc8\x1d\xbb\x6d\xce\x02\xb9\x23\xe4\x8e\x5b\x2c\x77\x6c\x33\xbf\x6a\x6e\x55\xb2\xc6\xbe\x1d\xc9\x43\x34\xc9\xf6\x47\xf6\x5a\xb2\xc6\x37\x38\xf5\x90\xce\x63\x2b\x15\x91\x4d\xd7\x2b\x7e\x73\x0f\x9d\x14\x94\x54\x4c\x27\x2c\x34\xea\x13\x6c\x5d\xd0\xd2\xe9\xac\x5e\x5c\xe7\x23\x7e\x65\x3d\x9d\x71\x44\x5d\xbf\x7b\x0f\xfb\x85\x3e\xba\x5b\x34\x64\x7d\xf0\x06\x3c\x98\xe9\x92\x96\x9e\xb5\x5a\x49\xf0\x56\xa2\x31\x63\x3f\x31\x23\x91\x9f\x39\x5b\xdd\xef\xb1\x7f\x93\x39\x6a\x96\xce\x8a\x71\x78\x92\xe6\xf8\x38\x3c\x4e\x47\xe9\xb0\xef\x38\x94\xd3\x45\x73\x0c\x7a\x5c\xdf\x66\x51\xea\xe5\xe0\x81\x78\x98\x1d\x92\x03\x51\x5c\x8e\x1c\x7d\x5e\x17\x53\x89\x51\xdf\xbd\xb3\xe2\x49\x0d\x7b\x23\x54\xaf\x87\x15\x97\xbb\x6e\xd5\xf3\x4a\x3c\x4d\xc6\xab\xe0\x9e\x2b\x6c\xee\x81\x61\xba\x00\xb2\xda\x20\x59\xfd\x4c\x8f\x7f\x4e\x87\x66\x9b\x95\x65\x41\x5d\x17\x29\x69\x53\xd7\xcd\x36\x5a\x23\x5f\x6d\xb1\x49\x8a\x7e\xfc\xde\x0a\x93\xb4\xcb\x0a\x0e\xf7\xb2\x42\xe3\x62\xeb\x96\x19\x21\xa0\x56\xa0\x56\xa0\x56\xa0\x56\xa0\x56\xa0\xd6\x46\x50\xab\xef\xa4\xa1\xe3\x19\xac\x39\x63\x08\xc4\xa0\x35\x4e\x1a\x2a\x26\x03\xb5\xce\x22\x12\x1f\xbc\xab\x62\xd2\x30\x11\x80\x58\xbd\xa6\x12\x63\x7c\xfd\xbf\x15\x33\x09\x60\xd7\x6e\x9b\xbf\x00\xbb\x02\xbb\x6e\x31\x76\x6d\x3f\xee\x0a\x24\xaf\x2d\x5e\x5d\x26\x4f\xd0\x31\x76\x24\x32\x65\x51\xd7\xc7\x9d\xd4\xd5\xa3\x89\x5b\x22\x1c\xfd\xc7\x86\x28\x2e\xf0\xae\x5e\xd2\x8a\x4a\x46\x2d\x57\xc0\x72\xdd\xac\xb9\x51\xea\x60\xd9\x6f\x0f\xb2\x2f\xf5\xd3\x3d\xf2\x57\xeb\xbb\xfa\xa2\x0c\xef\xc8\xa7\xb3\x1b\xd9\xf4\xba\x92\x73\x0a\x7f\x15\x2b\x94\x6a\x59\x1c\xc6\x09\x76\xcc\x31\xdf\x9f\xb2\x56\x0a\x83\xbc\xa1\x41\x3b\x86\xd4\x15\x14\xa8\x15\x54\x6b\x19\xa7\x97\x54\x25\x1d\x8b\xee\xe6\x07\xc8\x86\xe5\xe7\xd9\x79\x9a\xc5\xac\xde\xbc\x2f\x72\x8b\x82\xde\xaf\x0b\xc1\xdc\x38\x17\xcc\x19\xaf\x1c\x4f\x4c\xd9\xc0\x57\x96\xeb\xae\xe7\x36\xd4\x7c\xf0\x4b\xb7\xdb\xfb\xa5\xbb\x8b\x11\xbf\x30\x51\x2e\xf1\x4a\xf0\x8b\x77\x84\x4d\xc9\x17\xcf\x3d\x8c\xe4\x0b\xe8\x7c\x14\x1e\x6f\x1e\x42\xe2\x11\x12\x8f\x90\x78\x84\xc4\x23\x24\x1e\x21\xf1\x08\x89\x47\x48\x3c\x42\xe2\x11\x12\x8f\x90\x78\x84\xc4\x23\x24\x1e\x21\xf1\x08\x89\x47\x48\x3c\x42\xe2\xdb\x14\x12\xff\xd7\xbd\xf4\xb0\xa0\x81\xea\x8b\x25\x35\xcf\x1f\x95\x45\x02\xd9\xef\xf6\xb2\xaf\xf7\x12\xb3\x37\x59\xe0\xef\xa1\x8c\x5a\x72\x7f\x68\xc5\x2d\x44\xc3\x19\xb5\x34\x67\xed\x2f\xc1\xdc\xf4\xd2\x82\xb9\xb8\x6b\x62\x74\x42\x45\x43\x53\x15\x0d\x4d\xd5\xd2\x50\xb2\x40\x17\x05\x87\x3b\x4b\x8b\x9c\xc3\xcd\xd3\x2c\x25\x1a\xe0\x70\x8e\xfb\xac\x25\x19\x25\xfb\xb7\x97\xe9\x80\xe8\xfd\xd5\x9c\x76\xc3\xf8\x14\x16\xb5\x5c\xcc\x42\x2b\xe5\x64\xd6\xd8\x49\x4f\x5d\x55\xd7\x14\x9d\xbd\xfd\x32\xfb\xd2\x00\xed\x72\x1c\x37\x6d\x1e\x66\x3d\xa4\xa1\xea\x39\x44\xe7\x73\xda\x8d\x65\xde\x5c\x74\xd8\xd8\x73\xde\xa3\x2d\xf9\x00\xed\x5d\x3b\x9c\xa9\x26\x40\x16\x41\x16\x13\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x5d\x43\x16\x9b\x5e\xcf\xbe\x61\xb2\x88\xca\xfa\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x5d\x4d\x16\x51\xfa\x1b\x20\xb0\x5b\x4b\x7f\x27\xb6\x21\x08\xac\x57\xfe\xe9\xe4\x55\xa6\x06\xd4\x86\x44\x35\x55\x62\x09\x28\x9b\x72\xed\x7a\xb0\x08\xed\x14\x9b\x97\x22\xb4\x6a\xfc\x4c\x4a\xd2\xec\xcb\xab\x14\xa4\x45\x7e\x67\x67\x00\x4b\xbb\x5f\x7c\xb1\xc2\x8a\x13\x9b\xed\x11\x3f\xb6\x17\x9c\x09\xce\x75\x86\x4e\xd3\x42\x99\x44\xff\x10\x4d\x36\xf8\xc0\xa0\xd0\x47\xa4\x73\x83\x91\xce\xef\xec\xa5\xa4\xb0\x1c\x33\x34\xcd\x2d\xc7\x61\x6a\x7c\x20\xd2\x39\x11\xcc\x7c\x8a\xe6\xed\x60\xe6\x4d\x35\xb8\x24\x42\x9d\x16\xe8\xa4\x23\xd4\x69\x53\x2d\x36\x6e\xd8\x0a\x5a\xbb\x0d\x5b\xf4\x5d\xa3\x01\x86\xed\x71\x19\x0d\xe7\x08\x2d\xd4\x56\x9d\x46\x6e\xbf\xd8\xa1\x8a\x91\x9b\xb1\x0e\x6d\x99\xb9\x6b\x4d\x5c\x35\x38\x31\x38\x31\x38\x31\x38\x71\xf7\x70\x62\xcc\xf3\x02\xe6\x79\x9d\x03\xd2\x91\xd3\xa2\x2d\x39\x2d\xe0\xaf\x80\xbf\x02\xfe\x0a\xf8\x2b\xe0\xaf\xe8\x6a\x7f\x05\x52\x1b\x21\xb5\x11\x52\x1b\xb5\x2a\xb5\x11\xdc\x81\x70\x07\x76\xab\x3b\x30\x99\x69\x72\xf2\xae\x20\xfe\x1c\xf5\xe6\xcf\xf7\xb3\xfb\xc4\xdb\x6c\x5b\xda\xf6\xb2\xe8\xe6\xe7\x32\x79\x4f\x1f\x0d\x09\xfd\x7c\x71\x45\x49\xc5\xcc\xae\xe0\x4f\xb2\x5c\x3c\xcf\xfe\xb8\x97\x7d\xb7\x97\x1e\x36\xf6\x9c\x76\xee\x18\x1c\xd3\x30\x98\x51\x4b\xe7\xcb\x0f\xdb\x2e\xa1\x0d\xbe\x19\xe5\x5a\x13\xf3\x60\x86\x36\xbc\x7b\x88\x62\xc1\x8f\x46\x26\x9b\x29\x6a\x39\x55\x67\xbf\x3b\xc8\x7e\xab\x9f\x1e\xf0\x7a\x40\xa1\x42\x6d\x89\x66\xce\x6b\xc6\xaa\xa3\x29\x09\x66\xc6\xf9\x01\x1e\x8f\xdd\x38\x87\x71\xa7\xf3\x5a\x71\x3a\x97\xb3\x52\xc2\x35\xef\xe9\xb7\x2c\xdb\x8c\xef\x50\x68\x7a\x1a\x9a\xba\xb2\xcd\xbc\x10\x6c\x83\xa6\xd8\x41\x69\x83\x7c\x47\x93\x34\x40\xc6\xd3\x71\x9a\x1e\xe4\x9a\x41\x44\x08\x72\xcd\xc0\xd3\x07\x4f\x1f\x3c\x7d\xdd\xe4\xe9\x43\xae\x19\xe4\x9a\x81\x87\x05\x1e\x16\x78\x58\xe0\x61\xe9\x08\x0f\x0b\x72\xcd\x20\xd7\xcc\x76\x61\xca\xc8\x35\xd3\x8a\x5c\x33\xff\x6b\x80\x42\x02\x09\x2a\x85\x82\x3e\xb6\x11\x1f\xd3\x4b\x4a\x49\x5d\x5d\xcf\x19\x2f\x29\xfb\xd6\x00\xfb\x8f\x7d\x74\x87\xb1\xed\xca\x46\x3c\x34\x5c\x3d\x79\xc9\xb2\x3c\x74\x59\x2d\x45\x9f\x32\x76\x9d\x2e\x14\xf4\x4b\x71\xc7\xcf\xdb\x8d\xcf\x5d\xa6\x19\x81\xe1\x8e\xd0\x14\xc7\x70\xfb\x68\x82\xc6\x7d\x45\xea\x46\x3f\x99\x4e\x01\x79\xc7\x4d\x09\xbb\x39\x17\x4c\xe3\x46\x58\x54\xd2\x38\xe3\x22\xac\x8c\xcf\xd6\x75\x80\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x81\xbf\x6d\x09\x7f\xfb\xbd\x5e\x7a\x48\xf0\xb7\x74\x56\x4f\x69\x86\xd1\x33\x95\x78\xec\x73\xbd\xec\xdf\xf7\xd2\x4e\x6b\x43\x28\x94\xe1\xca\xf7\x55\xad\xb8\x66\x75\xa2\x12\xe6\xd8\x27\xfa\x40\x46\x2d\xcd\x9a\x7b\x4e\x2f\x2d\x9c\x34\x7e\xed\x40\xf1\x63\x7d\x39\x6d\xaa\x6b\x1c\xf9\x3d\x06\x26\x74\xfe\xa7\xc3\x94\xf4\xe9\x62\x77\x79\x3d\xab\xd6\x9f\x3e\xf6\x77\xad\xbf\x5f\x1a\x53\xf3\xe9\x82\x96\xcd\x97\xf4\x5c\x36\xa5\xea\xec\xd7\x87\xd8\x37\xfb\xe9\x3e\xab\x2d\x4b\xaa\x7a\xb3\x36\x39\xe4\x9c\x6c\x6e\xd9\x68\xae\xa9\xba\x48\xeb\xf1\x57\x54\xc6\x75\x9d\x73\x7b\x54\xe1\xeb\x50\x5d\xe4\x4a\x30\x89\x3d\xce\x8e\x4a\x12\x5b\x3e\xde\x24\x95\x75\x3d\x0d\x14\xe2\x03\x9c\x05\x9c\x05\x9c\x05\x9c\x05\x9c\x05\x9c\x05\x9c\x05\x9c\x05\x9c\x05\x9c\x05\x9c\x05\x9c\xdd\x72\x38\xfb\x66\xba\xcc\x9e\x8f\x3c\xb7\xa3\x87\xf5\x17\x94\xd2\xd5\xd0\x94\x3c\xb3\xc8\x27\x6a\x4c\x70\xd6\x4b\x57\xc3\x7a\x4a\x2b\xa8\x23\x61\x7d\x3d\x75\xd5\xe8\x7d\x3e\x83\x55\x95\x35\x01\x93\x0a\x45\x8d\xdf\x69\x64\xa7\x05\x14\xa2\xd2\xbc\xbf\x07\xf8\x17\xf8\x17\xf8\xb7\xbb\xf1\xef\x8f\x45\x29\x21\xd8\x64\x5e\x2d\xdd\xd0\x8a\xd7\x8d\xf9\xa0\x1d\x89\xed\x43\x24\xb3\xf9\x4c\x51\xd5\x75\xf3\xd7\x97\xd8\xe7\x86\xd9\x0f\x7b\xe9\x75\x76\x1b\x57\x36\xe2\xa1\x50\x51\x55\xd2\x65\x2b\xde\x05\x71\x64\xf4\x09\x63\xdb\x59\x6b\xf7\x4b\x0e\x56\x28\x77\x69\x6e\xae\xd8\xe4\xf3\x34\x2d\xb8\xdd\x14\x1d\xe4\xdc\x6e\x82\xc6\x29\xe6\x2b\xa4\x74\xf4\xc6\x46\x3c\x26\x2f\x29\x50\x46\xf9\xc6\x60\x38\xb7\x9f\xed\x95\x70\xae\xa2\xc3\x25\x9d\x33\x4f\xe6\xd4\x4b\x5e\x7b\xd4\x9b\x0d\xde\xce\xfa\x33\x6a\x89\x42\x5f\xda\x51\xde\xf9\x8f\x16\xd5\x42\x4e\x49\xa9\x3e\xfd\xff\xa4\xdc\xdc\xc6\x47\x90\x98\xa5\x04\x9d\x28\xcb\x4e\x5e\xf7\x33\x40\xb2\x4a\x24\x25\x6f\x30\x29\xf9\xbf\xe8\x69\x86\x0d\x98\x17\xb9\xc8\x8f\xd3\x51\x3b\x17\x79\x2b\x6c\x89\xff\x1b\x5f\x58\x2f\x51\xeb\x4c\x4d\xf4\x1f\xdf\x5b\x6e\x4b\x64\x8a\x18\x63\x98\x9b\xf6\x63\xb7\xf8\xa9\x9d\xe6\xa3\x35\xd9\xbe\x61\x4e\x02\xcc\x09\x32\xce\xb6\x25\xe3\x2c\x52\x0d\x22\xd5\x20\x52\x0d\xb6\x2a\xd5\x60\xf2\xd3\x3d\x4d\xce\xb7\x76\x5e\x54\x10\x39\x4d\x0b\x8e\x0a\x22\x2d\xce\xe1\x16\xf6\x9e\x12\xec\x64\x77\x88\x6e\xaa\x75\x56\xe0\xfc\xdc\xd7\x38\x43\x48\xfc\xcf\x3b\xcb\x67\x05\x03\x05\xa5\x58\xca\x72\x07\xa4\x58\x93\xfa\x2c\x35\x22\x05\x63\xd5\xd9\xa6\x99\xc2\x23\xe5\x0d\x8d\xf2\xb3\xef\xe1\x8a\xa1\x5d\xce\x8d\x6b\x6a\x31\xa3\x3a\xb7\x0e\x38\xb7\xea\xa5\xa2\x52\x52\x33\xd9\xd4\x68\xc5\x7e\xae\x56\x8c\xbf\x6f\xca\xad\xc6\x55\x26\xce\xd2\x22\x25\xcb\x16\x39\x53\x74\xb0\x81\x41\xb1\xc4\x7d\xf3\x98\x9f\x04\xcc\x4f\x3e\xda\x47\xaf\xf4\xb1\x0f\xf7\x85\x3e\x68\x19\xf9\x97\xfb\xba\x67\xb9\x53\xe6\xce\x30\xfa\x99\xbb\xe7\xb9\xcb\xcd\x18\x9c\xb6\x48\x67\xc8\x67\xe0\x0e\xf3\x6f\x95\x50\x73\x28\x39\x7e\xa4\x31\x29\x13\x47\xf3\x3d\xf8\x04\x49\x0f\x0f\x25\x75\x2d\xbf\x24\xd4\x2a\x67\x8c\xd7\x42\xfe\xbd\x6c\xbe\x2c\xf6\x8f\xc3\xfe\xeb\xb0\xc4\x97\x7b\xe8\xb5\x1e\xf6\x6a\x4f\xe8\xf3\x16\x27\xfb\x50\xcf\xbc\x56\x4c\xf1\xb9\x5b\x46\xe3\xdd\xae\x85\x23\xab\xc6\x4f\x91\xf0\xb4\xeb\x2e\x38\x01\x15\xd3\xac\x75\xdd\xf6\xcb\x8f\x2a\x29\x7e\xe3\x5c\xed\x90\xcb\xa6\xe4\x67\x54\xcd\xa5\xf5\xb0\x76\x43\xf6\xa9\x50\x03\x14\x54\xad\x90\x53\x63\x61\x71\x46\x2e\x2f\x32\x9f\x25\x27\x91\x9e\x1d\x60\x9d\x3e\x72\x1b\xbf\x2c\x97\x54\xa6\x83\xd8\x92\xff\xba\xce\xd7\xd0\x5f\x7b\xdc\xfb\xb3\xb2\x83\xdd\xce\xef\x9d\x92\xfb\x68\x82\x8d\x47\x62\x96\x1f\xe2\x7e\xfe\x32\xc9\x37\x5b\x9e\x2b\xd2\x6f\xfc\xe8\xf4\x2d\x6c\x3f\xef\x05\xfb\x4f\xbd\xb4\x63\x4c\x29\x64\xc7\x36\xe2\x63\xec\x8b\xbd\xec\x73\xbd\x74\x47\x4a\x2b\xaa\xc6\xe7\xce\x37\x11\xe8\x83\x19\xb5\x34\xa3\x15\xd5\x4b\x9d\x9f\xf6\xb3\x35\xd9\x3d\x03\x05\xb0\x1f\x7c\x81\xc6\xdd\x31\xfe\x3e\x5c\xb9\xa8\xf2\xbb\xe0\x91\xff\x7f\xfe\x3c\x7b\xeb\x80\x1d\xf9\x3f\x54\x3d\xf2\xff\xbc\x38\x72\x59\x2d\x45\x1f\xb3\x03\xff\xed\x89\x86\xbd\xbd\xc3\x95\xa7\x09\x88\x2f\x21\xbe\x44\x0d\x3a\x88\x2f\x21\xbe\x84\xf8\xb2\x7b\xc4\x97\x1d\x54\x62\xad\x63\xc4\x97\xa8\xfd\x05\xf1\x25\xc4\x97\x10\x5f\x42\x7c\xb9\x15\xb5\xbf\xba\x5a\xc9\x88\xe2\x44\xdd\x5c\x9c\x28\xb1\x0d\x95\x8c\xc9\x17\x28\x21\xe8\xd6\x61\x3a\xc4\xe9\xd6\x5e\x8a\xd3\x58\x60\xa6\x44\x1b\x0c\xd5\x94\x28\xf1\x6c\x30\x85\xdd\xc3\x86\x65\xd5\x11\xab\x69\x2f\x10\xcb\xb3\x24\x06\x25\x5e\x8c\x7c\x6f\x87\x8d\xc0\xee\x17\xdf\x9c\xb0\xe2\xa4\x5d\xf2\x43\xd4\x06\xde\x25\xf0\x14\x0f\x77\x2e\xf3\x6f\x8d\xd2\x9e\x3a\xfa\x19\x2e\x2d\x28\xf8\x1a\x54\xf0\xfd\x7e\x0f\x1d\x13\x2f\xf9\x24\xed\xe7\x2f\xf9\x18\xd5\x37\xf8\x44\x3a\xd5\x38\x4f\xa7\x6a\xc9\xf7\xea\x6e\x64\x56\x38\xff\x8f\xd2\x61\x87\xf3\xbf\xee\x56\x9a\x63\x6b\xfc\x4c\x4b\x85\x0d\xaa\x62\x6b\x0a\x9a\x5e\xa2\xe8\x6f\x8c\xd8\xb6\xe6\x71\x29\xe4\x70\xa8\x62\xb4\x55\xa7\xdd\x89\x8a\x1d\x84\xdd\x99\xb1\xf6\x6a\x83\x05\x6a\x8d\x0e\x10\xc4\x15\xc4\x15\xc4\x15\xc4\xb5\x7b\x88\x2b\xa6\x61\x01\xd3\xb0\xce\x41\xd2\xd0\x60\xb7\x45\x83\x0d\xf2\x0f\xf2\x0f\xf2\x0f\xf2\x0f\xf2\xdf\xd5\xe4\x1f\xa1\x38\x08\xc5\x41\x28\x4e\xab\x42\x71\xe0\x58\x83\x63\xad\x5b\x1d\x6b\xc9\x4c\x93\xa3\xd0\xda\x8d\x90\xa3\xde\x08\xf9\x7e\x26\x43\xc4\x6d\xcb\x4d\xdb\x51\xff\xff\xed\x3d\x74\x4c\x08\xd5\x53\x6a\x51\x7a\x22\x55\xdd\x91\x0f\xc5\xf9\x73\x36\x93\xe7\x79\x50\xc4\x04\xcd\xcc\x84\xf2\xb3\x7b\xd8\x67\xfb\xe8\x1e\xe7\x8e\x57\x36\xe2\xa1\x11\x8f\x5c\x28\x33\xf6\x3e\xcb\xa2\xb1\xf3\xa2\xb1\xe8\x1e\x63\x6f\xc7\x66\xfd\x52\xdc\x77\xe7\x26\xe7\x49\xf1\x4f\x67\x5d\x57\xf4\x40\xcc\xd5\x81\x1b\xf1\x98\xef\xf5\x07\xe7\x3b\xc8\x18\x23\x2f\x17\x3c\x92\x17\xd8\x49\x39\x7a\x3d\x9e\x9e\x1c\xcc\xbe\x97\xe1\x72\xd1\x86\x3e\xb5\xb3\xf2\x11\x8e\x79\x67\x54\xf1\x7f\x8a\x31\x79\xc0\x96\x3c\xc8\xc4\x1b\xe9\x12\x5d\x28\x73\xd4\x36\xe5\x81\x01\x1c\xc2\x7f\xdb\xa0\xff\xf6\x37\x7a\x5a\x64\x44\xe8\x39\xe1\xd6\x5d\xa6\xa7\x6d\xb7\x6e\xb3\xda\xde\x64\xa6\x96\xb6\x5a\xae\xe8\xe7\xee\xad\xb4\x5c\x11\x33\x7f\x4b\x15\x63\x35\x2a\xf6\xd9\x1a\x5b\x85\xd4\x2e\x48\xed\x72\xeb\xba\x15\xc0\x93\xc0\x93\xc0\x93\x90\xda\x65\x73\xa9\x5d\xda\x3a\x8d\x48\xfc\xbb\xbb\x2a\xa7\x11\x07\x03\x12\xbe\xf8\x4f\x2e\x46\x78\x22\x81\xb6\xcf\x2d\x90\x0c\xa6\xdb\x66\x34\x48\x06\x83\x64\x30\x5b\x9c\x0c\xa6\xbe\x2c\x1b\x4d\x5c\x86\x06\xa5\x71\x69\xef\x17\x24\x39\x4f\xb3\x2c\x11\x39\x61\xe1\xdf\xa7\x9c\x49\x63\x7c\xdb\xa8\x4c\x23\xd3\x74\xc8\xfb\xfe\xcb\x74\x48\x40\x5e\xf5\xc5\x92\x9a\xe7\xae\x07\xab\x10\x5f\x50\xc2\x6b\xf6\xed\x17\xd8\xfb\x07\x88\xd9\x87\x5a\xe5\xf7\x9e\xaa\x9e\xa1\xc4\x4c\x87\x36\x60\xec\x36\x67\x1d\x5e\x51\x2a\xaf\xd9\x29\xd1\x90\xa6\xc4\xa1\x70\x42\x9a\x12\xa4\x29\x81\x68\x1e\xa2\x79\x88\xe6\x21\x9a\x47\x9a\x12\x88\x95\x21\x56\x86\x58\x19\x62\x65\x88\x95\xb7\xaf\x58\x19\x6a\x4a\xa8\x29\xbb\x54\x4d\xb9\x2d\xd3\x94\xa8\x94\x14\x78\x70\x86\xa6\x39\x1e\x3c\x4c\x87\x68\xd2\x17\x0f\xda\x9c\x29\x26\x39\x93\x99\x30\xba\xa6\x74\x25\x01\xe9\x45\xae\x5d\x0a\x46\x82\x7b\x59\x5c\x22\x41\xfb\x5a\xfc\xcb\x07\xf1\x4b\xa4\xc8\x07\x76\x7a\x22\xb2\xfb\xcc\x0c\x26\x76\x21\xa1\x21\xf1\x53\x1b\x81\x98\xe0\x57\x0b\x74\x92\xe6\xca\x5c\x33\xfb\x69\x6f\x03\x0f\x02\x5e\x19\xe8\xe1\x1a\xd4\xc3\xfd\x4d\x8f\x28\x27\x36\xce\xcb\x89\x19\xd6\x60\x92\x1a\x1b\x84\x74\x5a\x08\xe0\x66\x29\x61\x0b\xe0\x1a\x6e\x6c\x51\x78\xc0\xe7\x68\xc6\xe1\x01\x6f\xb8\xb5\xc6\x8d\x14\xcf\x4b\xd2\x2a\x23\x15\xfd\xfd\x11\x4f\x23\xb5\xcb\x33\xf5\x89\x69\xaf\x26\xc4\xd6\x0a\x7b\xe5\x95\x02\x65\x3b\xd5\x41\x03\xca\x05\xca\x05\xca\x05\xca\xed\x1e\x94\x8b\x69\x1b\xf2\x9f\x40\xa8\x8c\xfc\x27\x70\x29\xc0\xa5\x00\x97\x02\x5c\x0a\x70\x29\x20\x5e\x05\xf1\x2a\x88\x57\x41\xfe\x13\x78\xec\xe0\xb1\xbb\x35\xf2\x9f\xd4\x91\xaf\xa4\x55\x98\x79\x3b\xe6\x41\xf9\xd6\x10\x0d\x98\x75\x50\xf9\x50\x19\x33\xf1\x46\xbe\xb4\xa1\xe5\xd6\xd7\x54\x2b\xdf\xc9\x27\x86\xd8\x77\xfb\xed\x2a\xa9\x9f\xea\x91\x43\xcb\xf6\xbd\x2a\x79\x73\xea\x62\x4a\xe1\x97\xac\xc6\x2e\xf1\xc6\x62\x8e\xc9\xc1\x94\x35\xad\x18\xe4\x2d\x0d\xda\xd8\xd6\xc5\xe1\xb4\x82\x6a\x2d\xfd\xf5\x92\xaa\xa4\x47\xec\xf5\x0a\x5f\x93\xc9\xd9\xad\xb1\xe4\xb1\x17\x62\x83\x2e\xc4\xe0\x68\x3b\x16\x7d\x84\x9f\x4e\x14\x72\x2d\xbf\xc0\x0e\x17\xe3\x27\xaf\xfb\x27\x6d\x59\x12\xef\xdb\x02\x9d\xe4\xef\x1b\x2f\xdf\xd0\xc0\xfb\xc6\xbd\xea\x73\xc6\x82\x31\x38\xdc\xe4\x86\x08\x37\x39\x1f\xfc\x3e\x8d\xb1\x51\xf9\x3e\x45\x22\xf2\x3d\x2a\xef\x7a\x77\x50\x09\xa2\x0e\x10\x75\x90\x84\xab\x0a\xae\x2a\xb8\xaa\xe0\xaa\xea\x1a\x57\x55\xb2\x63\x3c\x31\x4d\xbf\x92\x86\x5d\x04\x49\xb8\x08\xe0\x22\x80\x8b\x00\x2e\x02\xb8\x08\xda\xef\x22\x48\x1e\xa5\xc3\xec\x50\x64\xd2\x62\x1a\xbb\x9c\xc1\xfd\xe5\x4b\xb8\xd6\xc7\xf4\x27\xbb\x9a\xa9\x26\xc1\x54\x5b\xc7\x54\x93\x1d\xcf\x54\xb7\x61\x14\x04\x7b\xd7\x10\x3d\xe9\x06\x8c\x45\xd5\x02\x5f\x86\x69\x2f\x6a\x86\x61\xd5\xd9\xef\x0d\xb2\xff\xe4\xc0\x8b\x6f\x93\x78\x31\x9b\x4f\x67\x37\xb2\xe9\x75\x25\xe7\x22\x8d\xd6\x84\xf3\xbc\xdd\xda\x8c\xd5\x5a\x53\x30\x63\x2c\xba\xcf\x01\x0a\x3d\xcf\xb3\x98\xd5\x4b\xf3\x5a\x71\x3a\x97\xb3\xa4\xc0\x9d\x9e\xce\x23\x79\xbd\xf9\xa0\x30\x10\xce\x3f\x13\x0c\x08\xf7\xb1\x89\x72\x40\xe8\xd9\xe5\x2e\x4a\x78\x6d\xb7\x37\x9a\xbc\x8b\x11\xbf\x63\x1e\xfa\x02\x94\x08\x94\x08\x94\x08\x94\x08\x94\x08\x94\x08\x94\x08\x94\x08\x94\x08\x94\x08\x94\x08\x94\x08\x94\xd8\x66\x94\x08\x16\x08\x16\x08\x16\xd8\xc5\x2c\xf0\x07\x03\xf4\xb0\xc8\xc7\xab\x14\x0a\xfa\xd8\x46\x7c\x2c\xad\x16\x72\xda\x4d\xe3\x1b\xab\xb3\xaf\x0f\xb0\x2f\xf7\xd1\x1d\xc6\xa6\x2b\x1b\xf1\xd0\x50\xf5\x24\xbb\xb3\xd6\x91\xd1\x27\x8d\x3d\xa7\x0b\x05\xfd\x52\xdc\xfe\x75\xbb\x61\xb9\x17\x28\x21\xb0\xdc\x61\x3a\xc4\xb1\xdc\x5e\x8a\xd3\x98\x6f\x12\x06\xa3\x97\x44\x68\xbf\x79\xc3\x35\x65\x89\x69\x46\x95\x40\xfb\x94\x14\x94\x75\x06\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\x0d\xe8\xad\x5d\xe8\xed\x7b\xbd\x14\x12\xe8\xcd\xa3\xdc\xd7\x18\xfb\xf5\x5e\xf6\xc5\x5e\xba\xcb\xb9\x2d\x14\xca\xf0\x34\x10\xab\x5a\x71\xcd\xea\x4a\x25\xcc\x79\x50\xf4\xa1\x8c\x5a\x72\x16\x79\x9c\x5e\x5a\x38\x69\x6c\x68\x5e\x02\xcc\x8a\x86\xa6\x2a\x1a\x9a\xaa\xa5\xa1\x64\x96\xce\x0a\xb0\x76\x92\xe6\x38\x58\x3b\x4e\x47\xe9\x70\x03\x7a\x37\xf3\x1e\x83\x20\x1b\xfb\x91\x2b\x34\xe9\xdf\xd7\x1b\x71\xd7\xcf\xa2\x26\x9a\xf9\x4d\x64\xaf\x5e\x66\x5f\x1b\xa8\x2c\xc8\x39\x59\x1d\x86\xfa\xd7\xe3\xdc\x63\x1c\xd8\xee\x72\x9c\x28\x44\x06\x98\x88\x42\x64\x80\x89\x80\x89\x80\x89\x80\x89\x9b\x84\x89\x1d\x94\x9c\xb5\x63\x60\x22\xb2\x86\x02\x26\x02\x26\x02\x26\x02\x26\xa2\x10\x19\xd2\x1a\xde\x3a\xec\xaf\xe3\xd3\x1a\x6e\xcb\x42\x64\x25\x7a\x56\x10\xb0\xf3\xb4\xc4\x09\x58\x92\x4e\xd1\xbc\xaf\xb4\xcc\x05\xad\x36\xe2\x31\x5f\x5e\xd4\x94\xba\x64\xb9\x60\x45\xda\x02\x3b\x29\x55\x68\x1e\x3c\x4d\x8a\xd2\x7c\x2f\xd2\x15\x2f\x1a\xf9\x51\xaa\x64\x6b\x11\xb3\x54\x59\x15\x8c\x36\x2a\xf6\x69\x37\x48\x13\xdc\xeb\x8d\x74\x89\x2e\x94\x15\x30\x9b\xa5\xc4\xe6\x1f\x20\x0a\x63\xa0\x9e\x59\x83\xf5\xcc\x3e\xdc\x4b\x17\x85\x51\x39\x4b\x8b\xdc\xa8\xcc\x53\x53\xc6\x24\x3d\x27\xca\x9b\x2d\xd3\xd3\x76\x79\xb3\x66\xb5\xfd\x26\x51\xed\xec\x02\x9d\x77\x54\x3b\x6b\x56\xe3\x9b\x2d\x7e\x56\x9b\x25\xac\xc7\xdc\x55\xb1\x9a\xd1\x7f\x30\x5a\x69\x09\x63\x9e\xf5\xd0\xfc\xad\xe2\xa4\xd8\xbf\xcc\x2a\x5a\x07\xb7\xcb\x3e\xa2\x4c\x1a\x40\x33\x40\x33\x40\x33\x40\x33\xca\xa4\xa1\x4c\x1a\xca\xa4\xa1\x4c\x1a\x1c\x1e\x70\x78\xc0\xe1\x01\x87\x07\x1c\x1e\x4d\x71\x78\xa0\x4c\x1a\xca\xa4\xa1\x4c\x1a\xca\xa4\xc1\x9f\x08\x7f\xe2\x36\x2f\x93\xd6\x56\x77\x5b\x3d\x45\xd9\x9a\x5f\xda\xec\x07\x43\x74\x5c\xc8\xf0\xed\x12\x6e\x63\xb2\x56\x9b\x4c\x45\x6c\xd5\x58\x93\x45\xce\xf8\xdf\x2f\x8d\x65\x45\x85\x37\x55\x67\x9f\x1c\x62\x5f\xe8\x27\x66\x37\x70\x45\x36\x10\xd2\x6b\x4b\x4f\x2c\x8b\xc5\x35\x29\x21\xf1\x1e\x7e\xc0\x9c\x75\x35\x97\xc4\xc5\x58\x59\x4e\xd2\xf2\x74\x8b\xd9\x8e\x57\xf3\x6f\x49\x25\xb3\x5a\xd2\x05\xb7\xac\x3a\x20\xc2\x17\x10\xbe\x80\x5c\x28\xf0\x2a\xc1\xab\x04\xaf\x52\xf7\x78\x95\x90\x0b\x05\xb9\x50\x40\xf3\x41\xf3\x41\xf3\x41\xf3\x3b\x82\xe6\x6f\xc3\x2a\xed\xc8\xb6\x02\x42\x8a\x6c\x2b\xdb\x27\xe2\x82\xfd\x5e\x2f\xdd\x2b\xd0\xa3\xf0\x97\x8c\x6d\xc4\xc7\xd8\x6b\xbd\xec\xd5\x5e\xda\x29\x7e\xb9\xb2\x11\x0f\x3d\x94\x51\x4b\xee\x0f\xb7\xb8\x60\x9e\x5d\x45\xb8\x49\x2e\xc5\xa7\x97\x16\xcc\x35\x62\xf3\x72\x18\x37\x2f\xbb\x4a\xa1\x2e\x19\x78\xf5\xec\x2a\xe6\x7d\xd6\x12\x57\xc2\xfe\x6b\x2f\xbd\x41\x74\xb1\x9e\xba\xaa\xa6\xd7\x73\xc6\x94\xdb\x4c\x67\xf3\x85\x5e\xf6\x6b\xbd\x44\xf6\x96\xaa\xc9\x6c\x1e\xcc\xa8\xa5\x65\x6b\x57\xa4\xb2\x71\xf4\xf2\x27\x86\xe9\x69\xd1\xcb\xc5\x15\x25\x15\x33\x77\xe0\x97\x69\x27\xb4\xa9\x01\xa9\x17\xb5\x9c\xba\x92\xcd\xa7\xb3\xf9\x8c\xce\x7e\x77\x88\xfd\x59\x3f\x3d\x6c\x34\x39\xed\x6c\xd1\x82\xeb\x37\x6a\xac\xfd\xa7\xe5\xd4\x84\x68\xb4\x49\x80\xfd\x00\x3f\xe0\x7c\xf9\x85\x55\x70\x76\xc7\x99\xb7\x07\x6b\x6f\x7f\xcd\xbf\x6c\x30\x46\x9f\x67\xb3\x12\xa3\xfb\x8e\x2e\xb3\x14\xa0\xdd\xdf\x95\x64\x1d\x55\x00\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\x81\xdf\xbb\x0c\xbf\x7f\xf2\x01\xda\x67\x3c\xba\xb1\x8d\xb8\x1f\x8f\x34\x5e\xb8\xac\xf5\xe3\x4b\x63\x85\xa2\xf6\xe2\x4d\xf6\xd7\xf7\xb3\x9f\xed\xa5\x3b\x52\x5a\x51\xe5\x25\x08\x53\x5a\x3e\x6f\xd8\xa6\x93\x73\x17\x6c\x2c\x54\xd2\xc2\x7c\x6f\xe3\xe2\x97\x45\x33\xd1\x01\xb9\xe7\x8c\x56\x54\x2f\xc5\x4f\xaa\x25\x1b\x15\xca\x5d\x96\x8c\x43\x26\xfa\xa2\x63\x51\x81\xe4\x26\x69\x97\x40\x72\x0f\xd2\xfd\x1c\xc9\xbd\x8e\xee\xfc\x78\xcf\x0e\x92\x63\x23\x10\xaf\x3d\xe1\xcd\xbc\x88\xed\x90\x97\x42\xd7\x2e\x06\x13\xb8\x09\x36\x2e\x09\x5c\x24\x22\x51\x9b\xf3\x7a\x65\x2e\x0c\x97\xa8\x3b\xe4\xd9\x43\x4b\x17\xeb\xea\xa1\xa5\xf5\x6d\xd4\x43\x1e\x29\x55\xaa\xf6\x56\xe4\xe7\x1c\x3d\x34\x6c\xf5\xd0\xb9\xe5\x80\x2e\x1a\x74\x77\x91\xa6\xb7\xbe\x8f\x5a\x33\x44\x6a\xe8\xfa\xe8\xbf\x74\xf4\xd1\x1e\xb3\x8f\x66\xe7\x16\xe7\x2e\xcc\x55\xef\xa5\x61\x57\x2f\x89\x9c\x2d\xdb\x67\x2c\xd5\xdb\x95\x13\xbf\xe0\xe8\xa7\x11\xb3\x9f\xce\x2d\x5d\x58\x38\x77\x76\xb9\x7a\x47\x45\x5d\x1d\x25\x1b\xbf\x75\x7b\x6a\xca\xf3\xad\x3b\x35\x37\x3d\x5b\xcf\x5b\x77\x4a\x55\xd2\xb7\xee\x5b\x97\xf8\x79\x47\x1f\x45\x2d\xcb\x34\x7d\x61\xe6\x54\xf5\x4e\x1a\x72\x9b\x26\xe3\x8b\xdb\x21\xbd\x54\x5b\xdf\xd8\x7d\x59\x43\x2f\x25\x4f\xd0\x31\x76\x24\x32\x65\x2d\x51\x1e\xe7\x6b\x13\x39\xb9\xf2\x38\x55\xa4\xdf\xd8\xc1\xb9\x04\x69\xfd\x22\xe7\x2b\xbd\xf4\x1f\x7a\xd9\xaf\xf7\x86\xbe\x68\xcd\xca\x5f\xe9\x5d\x52\x4a\x62\x95\x7f\x55\x0d\x17\x94\x22\xe7\x34\x17\xcf\x2f\xca\xa5\xa9\x5c\x12\x87\xe5\xdc\x27\xac\xe6\xd3\x05\x2d\x9b\x2f\xe9\xc6\x25\xac\xae\x66\x5f\x54\x75\x31\x03\x73\xb8\x10\x8c\x99\xac\xae\x5a\x53\xd1\xd4\x7a\xb1\x68\xac\x0a\xc5\x00\x31\x17\x31\x25\xcd\x6c\x33\x16\x9e\xd7\x8a\x61\xf5\x45\x65\xad\x90\x93\xfc\xfa\xc6\x55\x2d\x67\x11\x24\xe3\x6a\xf8\x54\xbf\x54\x2a\x4c\x8d\x8d\xe5\xb4\x94\x92\xbb\xaa\xe9\x25\x8f\xd9\x9a\xf1\x88\x46\xf5\x9b\x7a\x49\x5d\xb3\x67\x6b\x6a\x4e\xd1\x4b\xd9\x94\xae\x2a\xc5\xd4\xd5\xd1\x9c\x96\xc9\x64\xf3\x99\xb1\x2b\xe2\xff\xc7\xdf\x72\x74\x5d\x57\x8b\x53\xd7\xb3\x6b\xa9\xab\x37\x63\x61\xb3\x37\xbc\x37\x47\xf8\x93\x71\x06\x8c\x7d\x73\x90\x1e\x15\xce\xee\x15\xee\xca\xde\x30\x7d\xda\xa9\xa2\x96\xbf\xa6\xad\xe8\xec\x57\x07\xd9\x47\xfa\x69\x07\xdf\x6c\xbc\x42\x35\x06\x81\xcd\x14\xb5\x7c\x52\x5b\x69\x92\x8f\x5a\x1c\x90\x30\xfe\xb9\x14\x97\x4d\x2f\x66\xf5\x6d\x57\xf1\x7a\x2b\x9c\xd2\x35\x05\x80\x2d\x06\x1b\x9e\x61\x36\x28\x4d\x0a\x1f\x0b\x66\x54\xa4\x78\x18\x2e\xcb\x03\xbf\x33\xfc\xce\xf0\x3b\xc3\xef\x0c\xbf\x33\xfc\xce\xf0\x3b\xc3\xef\x0c\xbf\x33\xfc\xce\xf0\x3b\xc3\xef\x0c\xbf\x33\x4a\x60\xc3\x2b\x0c\xaf\x30\xbc\xc2\xed\xf2\x0a\x7f\xb5\x97\x1e\x90\xf9\xa0\x38\x5a\xb1\xa2\x85\x3e\xd3\xcb\x3e\xdd\x4b\xb7\x8b\x5f\xab\x46\x0a\xdd\x97\x51\x4b\x1c\x34\xa1\xe0\xb5\x33\x4a\xe8\x1d\x43\x34\x14\x90\x69\xcb\x4e\xa9\xf5\x9f\x07\xd9\x6f\x74\x50\x4a\xad\xb8\x77\x4a\x2d\x47\x22\x2d\x70\xd5\x26\x71\x55\x24\xd6\x02\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\x05\x61\xdd\xee\x84\xf5\x32\xcd\x07\x67\x0b\xaa\x29\x45\xd0\x8f\x5f\x66\x9f\x18\xa0\x07\xbc\x52\x04\x85\x86\xc5\x0c\xb2\x28\xbf\x7d\xa6\x71\xd4\x56\xc3\xd7\xb3\xf9\xb4\x33\x57\x4c\x74\xc4\xd8\xd5\x23\x9d\x8f\x67\x26\x9f\x0e\x07\x7b\x09\x70\x2e\x70\x2e\x94\x25\x06\xe7\x02\xe7\x02\xe7\xea\x1e\xce\xd5\x41\x55\x77\x3b\x86\x73\xa1\x1c\x2c\x38\x17\x38\x17\x38\x17\x38\xd7\x56\x94\x83\xed\x6a\x2c\x85\x7a\x95\xdd\x5c\xaf\x32\xb1\x0d\xb1\x54\xf2\x32\xcd\x08\x41\xd4\x11\x9a\xe2\x82\xa8\x7d\x34\x41\xe3\x7e\x82\xa8\x18\x67\x57\x1b\xf1\x58\x59\x8e\xe7\x40\x0d\x54\x3a\x58\xde\x34\xcd\x8e\x7b\x44\xa5\xd7\x93\xfc\xf8\xda\x63\xde\x4a\xab\x3b\xd8\x6d\x5c\x64\x15\xf9\xd9\x9d\x3e\xd4\xec\x01\xf1\x5d\x0a\x2b\x2e\x40\x16\x13\xbf\xb6\x1d\x91\x09\xa2\x95\xa0\x13\x74\x8c\xf5\xaf\x68\xe9\x9b\x21\xfe\x6f\xb8\x27\x1a\xa3\x91\x7a\x1e\x4d\xe2\x7d\xbd\xf4\xcf\x7a\xd9\x7b\x7a\x43\x3f\x65\xd9\xa3\x1f\xf4\x3c\x63\x7c\xed\xe5\x07\x60\x84\x2b\x0a\x79\x11\x53\x61\x40\x5c\x8b\x0e\x27\x88\x12\x65\xa5\xf9\x27\x32\x1d\x0b\x4f\xe7\xc3\xd9\xbc\x58\xee\x6b\xc5\xf0\x7a\xde\x22\x06\xe9\x70\xba\x78\xf3\xfc\x7a\x3e\x9c\xce\x16\x55\xe3\xad\x51\xad\x65\xa8\xf1\x35\xe1\x9f\x6b\xb9\x0e\x37\x67\xdd\x72\xe5\x13\x5e\x5d\x2f\xf2\x99\x5d\xa1\xa8\xa5\x54\x9d\x7f\x57\xe5\x4b\x2a\xbf\x00\xb1\xf0\x25\x7e\x46\x67\x1d\xe8\xd1\xf0\x74\x2e\x37\x25\xca\x2c\x17\x6f\x86\x8b\xeb\x79\x63\x45\x66\xbc\x66\x56\x31\x6c\xd1\x9c\x9a\x8e\xdc\x2e\x2e\xcd\x69\x53\x7e\xba\x97\xfe\x71\x2f\x7b\x77\x6f\xe8\xef\x5b\x1d\xf4\x47\x3d\x7c\xa2\x75\x46\xc9\x2b\x19\xb5\x28\x96\x0c\x22\x35\x81\xae\x6b\xa9\x2c\xff\x0a\x5a\x93\x73\x85\xaf\x6e\xb4\x62\xd8\x98\x24\x95\x6e\x5a\xd3\x82\x35\xe5\xba\x71\xfd\xa5\xab\xaa\xae\x9a\x2f\xbe\x28\xa6\x2d\xa6\xfb\x9c\x1c\xac\xa8\x61\x6e\x72\xf8\x44\x5c\x2b\x86\xe3\x13\x07\x8d\x7d\x8b\x4a\x8a\xf3\x1d\x63\x0d\x2b\x5e\x73\x3e\x59\x36\xe6\x35\x4a\x36\x2f\x9c\x42\x7c\x32\x6a\xef\xcb\xd7\xbc\x92\x3e\x19\x4b\x28\xd3\x74\x67\xb4\x9c\x92\xcf\xc4\xb4\x62\x66\xac\x70\x3d\x33\xb6\x9e\xcf\xa6\xb4\xb4\x3a\xb6\x7b\x41\x5f\x32\x5a\x89\x45\xee\x72\xde\xab\xd3\x07\xf0\x9d\x1e\x9a\x35\x0c\x43\x9c\x17\xdd\xbd\x63\x46\xcc\xdb\xd8\x38\xd5\x39\x04\x69\xce\x68\x65\x82\x1d\xa3\x23\xb4\x43\xd4\xf2\x6e\xa8\x19\xdf\x7a\xa8\xc7\x85\xf9\x3a\x48\x07\xb8\xf9\xaa\xbb\x65\xd3\x42\xf9\xdb\x8e\x82\xa6\x97\xa8\x46\x0b\x56\x35\x23\x7b\xb0\x4d\x8b\xfe\xef\x11\x1f\x0b\x65\x16\x28\x77\x54\x7b\x77\x27\xf9\x8f\x1e\x14\x7b\x78\x58\xab\x19\xeb\x98\x76\xd8\xad\x0b\x74\x9e\x96\x9c\x76\x2b\x9a\xa0\x13\x0d\x28\x6c\x45\xf6\x22\x99\x45\x04\x68\x17\x68\x17\x68\x17\x68\xb7\x8b\xd0\x2e\xe6\x6e\x01\x73\xb7\xce\x61\xdf\xbf\xdf\x4b\xff\xa5\x97\x7d\xab\x37\xf4\x0d\xeb\x51\xfd\x52\xef\x05\xe7\x6a\x33\x9b\x0f\xeb\x62\xc5\x18\x5e\x51\x57\x85\xa3\xd8\xa2\x32\xf6\x27\x4b\xbe\x25\xbc\xb3\x1c\xb3\xc4\xbc\x96\x1f\xcd\xab\x19\x85\x3f\x12\xb9\xe0\x74\xce\x26\x05\x9f\xb5\x06\x83\x9c\x2a\x64\xd7\xd6\xd4\xb4\x31\x5f\xcd\xdd\xb4\x3d\xb8\xb6\x69\xcf\xe6\x46\xe4\x3a\x94\xdf\x6e\x38\x53\x54\x52\x7c\x9c\x64\xb5\xb4\xf5\xe1\xb1\x3f\x0e\xdc\x39\x6e\x3e\x97\x75\xdd\xb8\x48\x67\x47\x29\xc6\x91\xe6\x0d\xc9\x93\xac\x8a\xf7\xcd\x6c\x22\x26\xae\x73\x4d\x55\xf2\x9e\xd7\x18\x61\xfc\x12\x96\xf8\x15\x78\xad\xaf\xe1\x62\x80\x8b\x01\x2e\x06\xb8\x18\xe0\x62\xe8\x6a\x17\xc3\x6f\xf5\xd2\x97\x7b\xd9\x6b\xbd\xa1\x57\xad\xaf\xed\xfb\x7b\x67\x1d\xf1\xae\x85\x9c\xaa\xe8\xaa\xf5\xea\x2f\x15\xb5\x82\x92\xe1\xdf\x61\x51\x38\xd2\x25\x30\x32\x1f\xb7\x1d\x30\x6b\x3c\xf0\x78\x6c\x32\x16\x5e\x16\x76\x44\x7c\x24\x0b\x6a\xde\x18\xa6\xf6\x57\x44\x0d\x6b\xc5\xc2\x55\x25\x6f\x0a\x9e\x8a\xeb\xea\xd8\xaa\x92\x33\x67\xff\x11\xb1\x35\x12\x5e\xcd\xe6\x95\x5c\xf6\xef\x98\xe6\x7b\x45\x0d\x2b\x69\xce\xeb\xb5\x31\x81\x6f\xd3\xf6\xd4\x52\x34\x3e\xa8\xdb\x07\x89\xf9\x76\x2c\x3c\x97\xe5\x26\xc9\x71\xe1\x5a\xb1\xf2\xce\x6c\x1f\x49\x49\x4c\xf7\xf9\xec\x4f\x2b\x5d\x8d\x45\xee\x15\xd7\x33\x6b\xde\x88\x5b\x39\xf5\xce\x7e\x7a\x47\x3f\x7b\x7b\x7f\xe8\x6f\x2d\xf9\xdc\x57\xfb\x9e\x91\x76\xd0\x18\xa2\x57\xb5\x1b\xe1\x8c\x52\x5c\x51\x32\x2e\x06\x61\x4d\xd4\xd4\xe2\xaa\x56\x5c\x33\xfa\xc2\xf3\x4a\xcf\x95\x9d\xdc\xff\x42\xf9\xb4\xc6\x9c\x93\x88\x7a\xa0\xc6\x88\x48\xab\xa9\x6c\xda\x9e\x58\xf3\x6f\x23\x5f\xe6\x58\xbd\xab\xf3\xd0\x74\xbe\xd5\xfc\xfa\xc5\x1c\xdd\x68\xda\x4d\xcb\x31\x61\x89\xe6\xdc\x27\x8b\x85\x05\xa5\xe2\xb6\xd8\x39\x5b\x1d\x14\xf7\x30\x68\xac\x79\xf8\x5f\xee\x81\xa1\x1f\x0e\x0f\x26\x94\xd4\xf5\x4c\x51\x5b\xcf\xa7\x8d\xbd\xb8\x5a\x8d\xef\x54\xd6\x71\x62\xb2\x22\x67\x40\xee\x46\xcc\x3b\x58\xb1\x5a\x3a\x1c\x1e\x9c\xd7\x8a\xaa\xa3\xd9\x70\x4a\xd1\x53\x4a\xda\xb8\x7b\xd9\x3f\x42\x9f\xc8\xdb\xd3\xc5\x74\xba\xa2\xc1\x55\xab\x8d\x58\xe4\xbe\x42\xf9\xb8\x71\xce\x6d\xe0\xc1\x83\x07\xaf\x4b\x3d\x78\xc9\x0c\x2d\x0a\x9c\x3c\x47\x33\x1c\x27\x1f\xa5\xc3\x74\xa8\x01\x78\xb9\x5c\x52\x4a\xeb\x7a\xa0\x5b\x2c\xea\x0d\x9d\xef\x67\xf7\x89\xb7\xd9\xb6\xb4\xb5\x02\xe8\xcd\xd5\x0c\xa5\x6d\x58\x81\x86\xbd\x77\x88\xf6\xd7\xa0\xac\x97\xe9\x8a\x73\xeb\x7a\x49\x2d\x16\xb5\x9c\xaa\xb3\x6f\x0f\xb2\xdf\xe9\xf7\x01\xee\x35\x96\xd9\x9d\x11\xed\x19\xfd\xd9\xd4\x14\xc6\x5e\x18\xdf\x3e\x15\xea\xea\x6e\x22\xd5\x46\xcb\x7c\xd1\x8e\x07\x84\xc4\x1b\x08\x48\x40\xe2\x0d\x78\xad\xe0\xb5\x82\xd7\xaa\x9b\xbc\x56\x48\xbc\x81\xc4\x1b\xf0\x16\xc0\x5b\x00\x6f\x01\xbc\x05\x1d\xe1\x2d\x40\xe2\x0d\x24\xde\xd8\x2e\x7c\x14\x89\x37\x5a\x91\x78\xe3\x0f\x87\x28\x6c\x96\x50\x33\x55\x50\xf9\xd2\x86\x96\x5b\x5f\xb3\x4a\xdc\xb2\x7f\x3f\xc4\xfe\xbb\xa3\xfc\xdf\xee\xa2\xaa\xa4\xcb\xd6\x99\x4b\xd6\xb1\x97\xf8\xb1\xd1\x90\xb1\x93\x2c\xfa\x57\xb6\xad\xb9\x92\xda\xe4\x15\xa1\xc5\x1e\xe7\x38\xba\xf7\xdc\x69\xb6\x9f\xf6\x52\xdc\x57\xe5\x6c\xdc\x45\x6c\x23\x1e\x2b\xbf\xaa\x40\x7c\xf6\xa8\x37\x3e\xbb\x9d\xf5\x67\xd4\x12\x5d\x3b\x1f\x0c\xce\xc6\xd8\x68\x79\x69\xc6\x8a\xab\x70\x15\xcc\x7d\x75\x87\xdd\xeb\x03\x45\xb5\x90\x53\x52\x6a\x50\xc7\x3f\x2a\xf7\x6b\x47\xdf\x27\x4e\xd2\x1c\xcd\x94\x85\x61\x34\xd2\xf9\xd0\xf3\x21\x16\xa3\xc1\x58\x8c\x5f\xec\x69\xd2\xfb\xbf\x20\x22\x3a\x12\x74\xc2\x8e\xe8\x68\x91\x29\x69\x81\xad\xa8\x62\x9e\x0a\xeb\x25\x8a\xfe\xe0\x1e\xdb\x94\x3c\x2c\xbd\xeb\x4a\xa5\xf5\xd8\x25\x36\xb5\xc5\x78\xb4\x26\x16\x02\x96\x24\xc0\x92\x40\x8f\xdb\x16\x3d\x2e\x84\x58\x10\x62\x41\x88\xd5\x2a\x21\x56\xf2\x5f\x35\xeb\xb3\x9f\x14\x21\x98\x33\x34\xed\x08\xc1\xdc\x46\xdf\xfd\xb0\xf7\x77\x7f\x27\xbb\x43\x74\x35\x25\xfe\xe2\x4e\xfb\xd3\x3f\x56\x50\x8a\xa5\x2c\xf7\xe9\x89\x85\x64\xd0\x72\xe2\x91\x82\xb1\x66\x6c\xf5\x7c\xe0\x91\xf2\x86\x46\xf9\x69\xf7\xf0\x6a\x36\xbb\x9c\x1b\xd7\xd4\x62\x46\x75\x6e\x1d\x70\x6e\xd5\x4b\x45\xa5\xa4\x66\xb2\xa9\xd1\x8a\xfd\x5c\xad\x18\x7f\xdf\x94\x5b\x8d\xab\x4c\x9c\xa5\x45\x4a\x96\x2d\x64\xa6\xe8\x60\x03\xb3\x11\x5e\xe2\x1e\xb3\x90\xa0\x59\xc8\x47\xfb\xe8\x95\x3e\xf6\xe1\xbe\xd0\x07\x2d\x53\xfe\x72\x5f\xf7\xac\x67\xca\x7c\x04\x46\x3f\x73\x9f\x37\xf7\x63\x19\x83\xd3\x56\xbe\x0c\xf9\x0c\xdc\x61\xfe\x45\x12\x12\x09\x25\xc7\x8f\x34\xa6\x5e\xe2\x68\xbe\x07\x9f\x06\xe9\xe1\xa1\xa4\xae\xe5\x97\x84\x04\xe4\x8c\xf1\x5a\xc8\xbf\x97\xcd\x97\xc5\xfe\x71\xd8\x7f\xa1\x95\xf8\x72\x0f\xbd\xd6\xc3\x5e\xed\x09\x7d\xde\xc2\x5b\x1f\xea\x99\xd7\x8a\x29\x3e\x43\xcb\x68\xbc\xdb\xb5\x70\x64\xd5\xf8\x29\x12\x9e\x76\xdd\x05\x07\x97\x62\x32\xb5\xae\xdb\xce\xee\x51\x25\xc5\x6f\x9c\x4b\x08\x72\xd9\x94\xfc\x58\xaa\xb9\xb4\x1e\xd6\x6e\xc8\x3e\x15\x2e\xf6\x82\xaa\x15\x72\x6a\x2c\x2c\xce\xc8\x35\x3b\xe6\xb3\xe4\x00\xd1\xb3\x03\xac\xd3\x47\x6e\xe3\x97\xe5\xd2\x9f\xb4\x0b\x1c\x3d\xee\x6d\xa1\x77\xb0\xdb\xf9\x45\xb6\x04\x1d\x25\x8f\xd2\x61\x76\x28\x32\x69\x49\x15\x77\xf1\x37\x49\xbe\xd6\xe5\x87\x46\xfa\x8d\xad\x2d\xd5\x21\x7e\x61\x98\x8e\x08\x1d\xa2\x52\x28\xe8\xb6\xe4\xd0\x27\xa5\x6f\x5a\x51\xd7\x0c\x13\x55\xb2\x20\xe4\x3b\x86\xd9\xff\xd3\x4f\x77\x18\x47\x1b\x1f\xb2\x8f\xf5\x48\xcc\xeb\xd0\x1d\xe6\xcd\x29\xbd\x99\xd0\x77\x96\xb7\xb2\xac\x96\x9a\xa2\x3e\x1c\xb1\xdd\xa4\x7c\x0d\x21\x9d\x6a\xd9\x92\xba\x66\xdb\xa5\x41\x97\xcb\xdd\xd1\x76\x2c\xfa\x18\x3f\xdd\x74\xa1\xa0\x3b\x73\xa4\x58\xd7\xd8\xf9\x92\x45\xdf\x34\x13\x4d\xd7\x32\x9a\xaf\xce\x99\xe0\x37\x23\xca\x86\xe4\x9b\x61\x0c\x0d\xf9\x6e\x58\x9d\xea\x9e\x2b\xf9\xbf\x89\xfc\x3e\xa1\x4b\x84\x2e\x11\xba\x44\xe8\x12\xa1\x4b\x84\x2e\x11\xba\x44\xe8\x12\xa1\x4b\x84\x2e\x11\xba\x44\xe8\x12\xa1\x4b\x6c\xb7\x2e\xf1\x00\xed\x63\x13\x91\x71\x8b\x5e\x3c\xe8\xa4\x17\xd6\xe2\xae\x12\x5b\x6c\xbf\x00\x4d\x28\x26\xa1\x98\x84\x62\x72\x1b\x29\x26\xbf\x39\x6c\x96\x2a\x13\xd0\xc7\x0e\xa2\xe6\x45\xe5\xab\x63\x4d\x71\x88\x89\x34\xff\xd1\x30\xfb\xb3\x7e\xba\x5b\xfc\x78\x45\x36\x10\x7a\xa5\x16\xb2\xc9\x29\x59\x47\x50\xcd\x27\x6e\x58\xd4\x4e\xbf\x24\x6e\xc1\x86\x9b\xfc\xe7\xce\x07\x9b\xed\x8f\xc5\x7e\x26\x18\x6c\xee\x63\x13\x12\x6c\xba\x06\x9a\x24\x9c\xe2\x34\x0e\xba\xc9\x7b\x1e\x88\x13\x88\x13\x88\x13\x88\x13\x88\x13\x88\x13\x88\x13\x88\x13\x88\x13\x88\x13\x88\x13\x88\x13\x88\xb3\xe3\x10\xe7\x04\x8d\xb3\x58\x64\xc4\x42\x95\xf7\x39\x11\x27\x5f\xdd\x01\x6f\x02\x6f\x02\x6f\x02\x6f\xb6\x15\x6f\xfe\xfa\xf3\xb4\x5b\xe0\xcd\xbc\x96\x56\x1d\x19\x22\x8b\xeb\x79\xe3\x56\x53\x39\x45\xd7\x55\x9d\x7d\xe0\x79\xf6\x8e\x01\xba\xc3\xd8\xe9\xca\x46\x3c\x14\x15\xf3\xd4\xa2\xfc\xc2\x9a\x26\xd8\xc4\x95\xe7\xc5\xc1\x33\xc6\xc1\xd1\xd7\x1b\xfb\x9e\xd5\xd2\xea\xa5\xb8\xf3\xf7\x0e\xc7\x84\x09\x80\x33\x80\x33\x54\xda\x02\x38\x03\x38\x03\x38\xeb\x1e\x70\xd6\x41\x85\xa4\x3a\x06\x9c\xa1\xc2\x11\xc0\x19\xc0\x19\xc0\x19\xc0\xd9\x56\x54\x38\xea\x6a\xa2\x84\x12\x2c\xdd\x5c\x82\x25\xb1\x0d\x89\x52\xbd\x61\xd6\x1c\x3b\x6d\xc4\x63\x4e\x34\xb4\x98\xd5\x83\x25\x55\xfe\x35\xca\x45\x65\x8b\xba\x24\x57\x0e\xf8\x65\x16\x85\x71\x5c\x8f\x2b\xaa\x34\xf2\x97\x3b\x6c\x0e\xf6\xa0\xf8\xfc\x84\x15\x37\xf2\x7a\x83\xf8\xb9\x85\xd0\x4b\x30\xaa\x19\x9a\xa6\xe3\x65\x69\x2c\xc6\x68\xb4\xae\xce\x46\xee\x0a\xe4\xe2\x6b\x30\x17\xdf\x1f\xf5\xd0\x9c\xc8\xa2\x77\x8c\x8e\xd8\x59\xf4\xe2\x54\xef\x18\xa4\x79\x91\x95\xe7\x38\x1d\x75\x64\xe5\x69\xa0\x1d\xdf\x90\xf5\x13\xc2\x26\x1d\xa2\x49\x6e\x93\xea\x6f\x3a\xd8\xec\x14\xb4\x56\x9a\x9d\xe8\x6f\x8e\xd8\x66\xe7\x09\x99\xa6\xc9\x91\xf3\x4a\x5b\x75\x9b\xa0\x88\xd8\x45\x98\xa0\x19\x6b\xbf\x16\x1a\xa3\xd6\xe4\xf7\x03\x81\x05\x81\x05\x81\x05\x81\xed\x1e\x02\x8b\xf9\x58\xc0\x7c\xac\x73\x10\x35\x72\xab\xb6\x25\xb7\x2a\x3c\x01\xf0\x04\xc0\x13\x00\x4f\x00\x3c\x01\x5d\xed\x09\x40\x8a\x6d\xa4\xd8\x46\x8a\xed\x56\xa5\xd8\x86\xa3\x0d\x8e\xb6\x6e\x75\xb4\x25\x33\xb4\x28\x00\xf1\x1c\xcd\x70\x40\x7c\x94\x0e\xd3\xa1\x06\xe0\xe5\x72\x49\x29\xad\xfb\x73\xe8\xfa\xf2\x01\x04\x20\x61\x0f\x88\x7c\x2d\xea\x8d\xa7\xef\x67\xf7\x09\x33\x61\x9b\x70\x6a\x7e\xd2\xdf\x5f\x92\x62\xf2\xb1\x8d\xb8\x5f\x46\x8c\x82\x96\xd6\xd9\xbb\x9e\x67\xdf\x7e\xca\x4e\x52\xff\x44\x75\x31\xf9\x92\x96\x8e\x3e\x64\xec\x22\xb2\xd2\xdb\xd9\x26\x96\xb4\x34\x44\xe4\x10\x91\x43\x44\x0e\x17\x06\x5c\x18\x70\x61\xc0\x85\xd1\x31\x2e\x8c\xce\x21\xf4\x40\xc7\x40\xc7\x40\xc7\x40\xc7\x40\xc7\x5d\x8d\x8e\xc1\xb6\xc0\xb6\xba\x94\x6d\x6d\x4b\x11\xf9\x25\x7f\x25\xe7\x61\x01\xea\xf6\xd1\x04\x07\x75\x23\x14\xa5\xa1\xe0\x22\x5e\x5a\x7a\x31\xab\x97\x6a\xd5\x8e\xcf\x05\xe3\xb9\x08\x0b\x57\x54\xe8\xd2\xd2\x6e\xa5\xf8\x2f\x3b\xea\xb9\xdf\x65\x29\xc5\x97\xb4\x74\xf4\x61\xf1\xbf\x56\x12\x2d\x01\xa0\xa6\xe8\x20\x1d\x28\x53\x88\x0f\xd0\x93\xb5\x74\x18\x84\x48\x10\x86\x37\x28\x0c\xff\xcd\x1e\xff\xf7\x77\x52\xbc\xbf\xe3\x14\xe3\xef\xef\x10\xd5\x38\x1c\xe9\x88\x90\x9a\xef\xa7\xbd\xb6\xd4\xbc\xf6\xa3\x8f\x0a\x85\xf9\x01\xda\xe7\x50\x98\xd7\x7c\xb8\x69\x38\x6a\x33\x0c\xde\xe6\xc0\x36\x17\x41\x22\xf2\xe8\xbf\x1e\xb1\x0d\xc7\x1b\x3c\xb5\xde\x86\x11\xd9\xed\x2c\xde\x6e\x4b\xbc\x5b\x69\x4e\xa0\xf1\x06\x20\x05\x20\x05\x20\x05\x20\x85\xc6\x1b\x1a\x6f\x68\xbc\xa1\xf1\x06\xa8\x07\xa8\x07\xa8\x07\xa8\x07\xa8\x87\xc6\x1b\x1a\x6f\x68\xbc\xa1\xf1\x86\x1f\x0c\x7e\xb0\x0e\xf4\x83\xb5\x5d\xe3\x5d\x87\x14\xbb\x49\x0e\xa7\x6d\x58\x30\x80\x7d\xeb\x79\x1a\x08\xd0\x8c\xeb\x6a\xaa\xa8\x96\x74\xf6\x89\xe7\xd9\x9f\x3a\x64\xe3\x4f\x56\x97\x8d\x2f\xf3\xa3\xa2\x21\x2f\xe5\xb8\xd8\x06\xf1\x38\xc4\xe3\x10\x8f\xc3\x37\x02\xdf\x08\x7c\x23\xf0\x8d\x74\x8c\x6f\xa4\x73\xd0\x3f\x98\x34\x98\x34\x98\x34\x98\x34\x98\x74\x57\x33\x69\x40\x33\x40\xb3\x2e\x85\x66\xdb\x52\x3c\xfe\x2c\x1d\x13\xa0\x6f\x92\xf6\x73\xd0\x37\x46\xa3\xb4\x27\x50\xe6\x29\xa0\x50\x53\x72\x8f\x9f\x0a\xc6\x79\x4f\xb1\xdd\xe5\x38\x4f\x5c\x80\x5b\x42\xfe\x9a\x43\x42\x7e\x8f\x25\x21\x97\x6c\x6b\x97\xb7\x8a\xbc\xc9\x74\x4b\xc0\x28\x0e\x4b\xcb\x84\xe4\xc3\x34\x58\x63\xaf\x42\xf0\x04\x2d\x79\x83\x5a\xf2\xff\xdc\x43\x53\xe2\x75\xde\x4b\x71\xfe\x3a\xef\xa1\xda\x07\x1e\x1d\x17\xaa\xf1\x83\x74\xc0\x56\x8d\xd7\xd5\xc0\x09\x21\x1c\x3f\x44\x93\x0e\xe1\x78\x5d\x2d\x34\x6e\x4d\x44\x4a\xf1\xda\xac\x89\xb7\xd4\xdc\x65\x59\xa2\x9f\x71\xe8\xca\x1f\xf1\xd4\x95\x4b\xcb\x32\x10\x24\x2d\x6f\x89\x8d\x81\xba\x1c\x04\x15\x04\x15\x04\x15\x04\x15\xea\x72\xa8\xcb\xa1\x2e\x87\xba\x1c\x24\x1f\x24\x1f\x24\x1f\x24\x1f\x24\x1f\xea\x72\xa8\xcb\xa1\x2e\x87\xba\x1c\x8e\x32\x38\xca\x3a\xd0\x51\xd6\xd1\xea\xf2\xa6\xb9\xa3\xb6\xa3\xc0\xfc\xdf\x0f\xd3\xac\x31\x3e\xf5\xb1\x94\x5a\x94\x3e\x45\x55\x97\x09\xd3\xc7\x36\xe2\x62\xc4\xb8\x36\x66\x33\xf9\x6c\x3e\x63\x4e\xd0\x84\x16\xfd\x25\xf6\x83\x21\xf6\x2f\x6e\xa3\x7b\x9c\x3b\x5e\xd9\x88\x87\x3e\xdb\x23\x87\x9c\xed\x0a\x55\xf2\xe6\x94\xc6\x14\xa3\xcf\xd8\x07\x2d\x8b\xd6\xcf\x9b\x70\x23\xed\x98\x7e\x98\xf3\x8e\x41\xde\xe4\xa0\xcd\x75\x5d\xa0\x4e\x2b\xa8\x16\x1b\xd0\x4b\xaa\x92\x1e\xb1\x17\x34\x7c\xd1\x26\xa7\xbf\xc6\x9a\xc8\x5e\xa9\x0d\xba\x18\x84\xa3\xed\x58\x74\x84\x9f\xce\x71\x8d\xfa\xa5\xb8\xef\x15\x77\xb8\x6a\x3e\x79\x9d\x96\xc4\x8b\xb8\x40\x27\xf9\x8b\x38\x4d\xc7\xe9\x68\x03\x2f\x22\xf7\x72\xcf\x19\x2b\xc9\xc0\x97\xf1\x71\xef\x97\x71\x07\xbb\x9d\x5f\x18\x5d\xcb\x05\xbf\x81\x0b\xec\xa4\x7c\x03\x3d\xc6\xa9\x7c\x25\x7d\x1f\x8a\xfb\x2d\x45\xe0\x00\x02\x07\x92\x70\x7b\xc1\xed\x05\xb7\x17\xdc\x5e\x5d\xe3\xf6\x4a\x76\x8c\x57\xa7\xe9\x57\xd2\xb0\xbb\x21\x09\x77\x03\xdc\x0d\x70\x37\xc0\xdd\x00\x77\x43\xfb\xdd\x0d\xc9\x79\x9a\x65\x89\xc8\x09\x8b\x96\x3c\xc5\x31\x89\xbc\x0a\xdf\xb5\x5c\xa4\xdf\xd8\xad\x95\x4c\x24\xd9\xd5\xa0\x36\x09\x50\xdb\x3a\x50\x9b\xec\x78\x50\xbb\x0d\x23\x1a\xd8\x9f\xf5\xd2\x83\x82\x61\x0a\xb7\xcd\xd8\x46\x7c\x45\x2d\x29\xf1\x31\xf6\xdb\xbd\xec\x3f\xf6\xd2\xdd\xe2\xe7\x2b\xf2\xe7\xd0\x43\x19\xb5\xe4\xfe\x50\x8b\x4b\x8f\xee\xca\xa8\x25\xe1\xb7\xb9\x24\x76\x9d\x5e\x5a\x30\x17\x86\x7a\xf3\xd4\xbc\x15\x0d\x4d\x55\x34\x34\x55\x4b\x43\xc9\x02\x5d\x14\x30\xef\x2c\x2d\x72\x98\x37\x4f\xb3\x94\x68\x00\xe6\x39\xee\xb3\x96\x08\x0f\xf6\xd3\xc3\x74\x54\xf4\xb8\x62\x2c\x97\x33\x59\xc3\x06\x72\x30\x65\x81\x63\xf1\x04\x04\x3d\x96\xbc\x26\x6b\xa5\x2e\x79\x89\xfd\xa7\x21\xf6\x0f\x6f\xa3\x87\xca\x0e\xb7\x1e\xd1\x2f\xd5\x82\x8d\xa7\x97\x16\x96\x45\xb3\x1d\xc1\x89\x9f\xe2\xa7\x9b\x76\xdf\x91\x3d\x90\xe4\xa5\x02\x10\x57\x02\x62\x35\x98\xff\x26\xd8\x09\x07\xc4\xe5\x7d\x2a\xa9\xaf\xdd\xb5\xa6\xb0\xdf\x7b\x4c\x52\x20\x87\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x06\x19\x6e\x33\x19\x9e\xa4\xfd\x6c\x6f\x24\x6e\x91\xe1\xd7\x3b\xc9\xb0\xbd\xde\x03\x0a\x06\x0a\x06\x0a\x06\x0a\x76\xa0\xe0\xaf\x0d\xd1\x94\x00\x93\x82\xc9\x54\x08\x59\x7d\x92\x28\x8b\xbd\xd9\xcf\x0c\xb1\x57\xfa\x69\xa7\xf8\xdf\x95\x8d\x78\xe8\x2d\xf2\xab\x98\x4f\x67\x37\xb2\xe9\x75\x25\xe7\x22\x92\xd6\x0c\x94\x63\xae\xa6\x40\xc8\x58\x54\x7c\x88\x79\x8b\xba\x33\x6f\x0d\xff\x65\x31\x0b\x89\xa9\x17\x41\xdc\xed\x8d\xf6\xee\x62\xc4\x2f\x4c\xe4\x1d\x3a\x1f\x8c\x19\xc7\xd8\xa8\x84\x88\xae\xf1\x23\x51\xa3\xb8\x16\x88\x49\x81\x0c\x81\x0c\x81\x0c\x81\x0c\x81\x0c\x81\x0c\x81\x0c\x81\x0c\x81\x0c\x81\x0c\x81\x0c\x81\x0c\xb7\x10\x19\x6e\xbf\xd0\x5b\xb0\x45\xb0\x45\xb0\xc5\x6d\xc4\x16\x7f\x38\x44\x23\x82\x2d\xea\x25\xad\xa8\x64\xd4\x72\xb1\x63\x4a\xcf\xe6\xb5\xb4\xad\x71\xfc\xd2\x10\xfb\x41\x2f\xdd\x23\xf7\xb6\xb4\x8d\xa1\xa2\xaa\xa4\xcb\x56\xb7\x33\xcb\x0b\x67\xb5\xb4\x1a\x7d\xd8\xd8\xb6\x2c\x0e\x90\xd2\x41\xb9\xa9\xb9\xc9\x64\x93\x97\x69\x46\x40\xba\x23\x34\xc5\x21\xdd\x3e\x9a\xa0\x71\xdf\x9c\xbd\xe6\x1d\xcb\x7b\x88\xc9\x8b\x0a\xe4\x72\x8f\x7a\x73\xb9\xdb\x59\x7f\x46\x2d\xd1\xb5\xe7\x82\x89\xdc\x24\xdb\x2f\x89\x9c\xbb\xd7\xcd\x98\x6f\x79\x21\x15\xe2\xc0\xd0\x6b\x3b\x2a\xbb\xfe\xd1\xa2\x5a\xc8\x29\x29\xd5\xa7\xf7\x77\xc9\xcd\x6d\x78\x00\x09\x2e\xd5\x2d\xcb\x18\xde\xc0\x13\x40\x36\x4b\xa4\x0e\x6f\x30\x75\xf8\x87\x7a\x9a\x63\x03\x4e\x89\x1c\xe2\xd3\x74\xdc\xce\x21\xde\x6e\x6b\x52\x58\x6f\xad\x35\x89\xfe\xcd\x3d\x95\xd6\xe4\x5e\x99\x69\x4a\xb1\x0c\x88\xcc\x1a\xde\x0e\xfb\xd1\x9a\x6c\xe0\xb0\x26\x01\xd6\x04\x19\x69\xdb\x92\x91\x16\xa9\x08\x91\x8a\x10\xa9\x08\x5b\x95\x8a\x30\xf9\xe1\x1e\x5a\x10\x85\x3b\x12\x74\xc2\x51\xb8\xa3\xb9\x9f\xed\xa6\x4c\x2f\xcc\x6f\x7f\xd8\xfb\xdb\xbf\x93\xdd\x21\x3a\xa5\xa5\x9f\xff\xc4\xf7\xef\xac\xfc\xfc\x0f\x14\x94\x62\x29\xcb\xbd\x8a\x62\xa1\xe9\xb3\xaa\x08\x15\x8c\xa5\x64\x8b\xe7\x04\x8f\x94\x37\x34\xca\xcf\xba\x87\x87\x17\xee\x72\x6e\x5c\x53\x8b\x19\xd5\xb9\x75\xc0\xb9\x95\x07\x4d\xa9\x99\x6c\x6a\xb4\x62\x3f\x57\x2b\xc6\xdf\x37\xe5\x56\xe3\x2a\x13\x67\x69\x91\x92\x65\xeb\x99\x29\x3a\xd8\xc0\x8c\x64\x89\x3b\xda\x31\x13\x09\x98\x89\x7c\xb4\x8f\x5e\xe9\x63\x1f\xee\x0b\x7d\xd0\x32\xe7\x2f\xf7\x75\xcf\xba\xa6\xcc\x37\x61\xf4\x33\xf7\xb5\x73\xff\x99\x31\x38\x6d\xc5\xcd\x90\xcf\xc0\x1d\xe6\x5f\x25\x21\xcd\x50\x72\xfc\x48\x63\xfa\x25\x8e\xe6\x7b\xf0\xa9\x90\x1e\x1e\x4a\xea\x5a\x7e\x49\x48\x4f\xce\x18\xaf\x85\xfc\x7b\xd9\x7c\x59\xec\x1f\x87\xfd\x17\x5c\x89\x2f\xf7\xd0\x6b\x3d\xec\xd5\x9e\xd0\xe7\x2d\xe8\xf5\xa1\x9e\x79\xad\x98\xe2\xb3\xb4\x8c\xc6\xbb\x5d\x0b\x47\x56\x8d\x9f\x22\xe1\x69\xd7\x5d\x70\x9c\x29\x26\x54\xeb\xba\xed\x64\x1f\x55\x52\xfc\xc6\xb9\x74\x21\x97\x4d\xc9\x0f\xa6\x9a\x4b\xeb\x61\xed\x86\xec\x53\xe1\xda\x2f\xa8\x5a\x21\xa7\xc6\xc2\xe2\x8c\x5c\x2b\x64\x3e\x4b\x8e\x15\x3d\x3b\xc0\x3a\x7d\xe4\x36\x7e\x59\x2e\xdd\x4b\x9b\x20\x92\x7f\xdc\x66\x41\xe4\x0f\xac\xcd\xf2\x07\xd8\x78\xef\x0f\x43\x72\x1f\x4d\xb0\xf1\x48\xcc\x72\x2c\xdc\xef\xca\x52\x21\x1a\x6b\xbd\x10\x99\xfd\x8f\x61\x3a\x20\xe0\x63\x71\x45\x49\xc5\xcc\x4e\x2a\x0b\xba\x1e\x4b\xe5\xd6\xf5\x92\x61\xa3\x72\x36\x86\xfc\xf4\x30\x7b\x77\x1f\x3d\x60\x1c\x37\xed\x3c\xec\xca\x46\x3c\xf4\x98\x17\x8b\x14\x6d\x9c\xd7\x72\x6a\x34\x62\x6c\x3f\x5f\x7e\xe4\xa5\xb8\x63\x9f\x26\x83\xc9\xe7\x44\x4d\xb2\x71\x5e\x93\xcc\x18\x53\xe3\x14\xa3\x11\xdf\x31\xc5\x7b\x63\x23\x1e\x73\x5c\xd0\x66\xa1\x64\x3a\x78\x34\x4d\xb3\xe3\x72\xb8\xf8\x3e\x0d\x73\x4a\xe1\xb8\x2e\xa7\x70\x30\xf4\x8d\x1d\x3e\x8f\xe4\x09\x1f\x46\xe9\x78\x2a\x4f\xc9\x5d\xda\xf9\x60\x12\xc6\x4c\xf1\x58\xd9\x07\xbe\xce\x27\x83\x8f\x3a\x60\x65\x83\xb0\xf2\xfd\x3d\x9b\xb7\x0b\xb3\x02\x54\x1e\xa5\xc3\x36\xa8\x6c\xbe\x75\x69\x8b\xf9\x08\x40\xa1\xd1\x5f\xbc\xd7\xc7\xba\x3c\x60\x33\x4b\x87\x41\x79\x52\xfc\xda\x56\x7b\x02\x80\x09\x80\x09\x80\x09\x80\x09\x80\x09\x80\x59\x37\xc0\xfc\x74\x4f\x93\x0b\x4a\x9c\x17\x38\xf4\x34\x2d\x38\x70\x68\x8b\x8b\x54\x04\x23\xcd\xb6\xcc\x25\x12\xff\xf0\x2e\x9f\xc9\x42\x34\x88\x70\x3a\xa6\x10\xbb\xf9\x5a\xbc\x5d\x33\x08\xe0\xce\x6e\x9b\xb7\x00\x77\x02\x77\x6e\x31\xee\x6c\x03\x9a\x0a\x44\x9d\xb5\x7d\x11\xaa\x1b\xfc\xc0\xef\x45\xf2\x20\x1d\x60\xfb\x22\x13\x16\xf4\x7c\xc8\x05\x3d\xed\x66\xdb\x00\x3e\xdf\x37\x48\x43\x35\x80\x4f\x4e\x3c\xd9\x9f\x0f\xb0\x3f\xf0\x43\x9d\x11\x11\xfb\x53\x94\x51\x0b\xe6\x14\xd9\x4c\x26\xc9\x3f\x62\x7b\x8c\x7d\x3c\xbe\x61\xc6\xc6\x79\xad\x38\x9d\xcb\x59\xf1\xd8\xcd\xcb\x07\xda\xa2\x30\xec\x67\xe8\x88\x18\xad\xfb\x69\x2f\x1f\xad\xa3\xb4\x87\x86\x03\x47\xab\x71\xab\xb5\xe4\x00\xbd\xf6\x98\xf7\x50\xbd\x83\xdd\x26\xa2\xad\x5f\x08\x1e\xa9\x53\xec\x60\x8d\x73\x97\x8a\x31\x8c\xc0\x6b\x04\x5e\x23\xf0\x1a\x81\xd7\x08\xbc\x46\xe0\x35\x02\xaf\x11\x78\x8d\xc0\x6b\x04\x5e\x23\xf0\x1a\x81\xd7\x08\xbc\x6e\x73\xe0\x35\xc2\xa2\x11\x16\x8d\xb0\xe8\x2e\x0e\x8b\xfe\xac\x95\x72\xb1\x1a\xa0\x73\x96\x83\x29\x6a\x39\x75\x25\x9b\x4f\x67\xf3\x19\x9d\xbd\x6d\x88\xfd\x55\x3f\x3d\xec\x85\xec\x84\xcc\xfe\x46\x6d\x29\x18\xb9\xbe\x4c\x34\xda\xa4\x44\x8c\x47\x6e\xf8\x78\xb4\xf8\x75\x39\xce\xb7\x98\xd5\x4b\xdb\x8d\x0e\x5e\xf7\x8f\xda\x68\x7a\xf6\xc6\xba\x92\x34\x66\x83\xb1\xe1\x3c\x9b\xad\x03\x1b\xca\x87\x54\x29\xf8\x05\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x04\x42\x6c\x17\x42\xfc\xc2\x30\x1d\x34\xcb\x49\x17\xf4\xb1\x8d\xb8\x5f\x99\x16\xbd\xa4\x94\xd4\xd5\xf5\x9c\xf1\xee\x9a\xe1\xcd\xef\x1a\x66\x7f\xdc\x4b\x77\x18\x47\xfa\x45\x34\x2f\xcb\xc3\x96\xd5\x52\xf4\x71\x63\xfb\x74\xa1\xe0\xaa\xad\xe2\xd8\x61\x6b\xc3\x99\x8d\xbb\x30\x23\x08\xe4\x05\x05\x0a\xf1\xce\x05\x13\xb3\x11\x16\xb5\x6a\x23\x17\x74\x09\xc7\x9c\xe7\xa8\x31\xb6\x30\xa3\x96\x28\xf4\xcb\x3b\xec\xde\xf6\x09\x56\x76\x76\x78\x44\xee\xd2\xb6\x3e\xaf\x3b\x52\xd9\xa3\xd3\xa1\xc7\x47\xa4\x72\x9b\x22\x95\xbd\x5e\xf9\xfa\x23\x95\x3b\xdc\x70\xf0\xa0\xe4\x77\xdc\x6b\x1b\x0e\x3b\x0e\xd9\x69\x2b\x9e\x10\xbf\xb6\xcf\x54\x20\x08\x19\x41\xc8\x08\x42\x46\x10\x32\x82\x90\x11\x84\x8c\x20\x64\xef\x20\xe4\x66\x4f\x13\x12\x7f\x74\xa7\x3d\x0f\x08\x0a\x31\x76\xce\x0e\x44\x8c\x5e\x5b\x26\x07\x88\x2f\xee\xb6\x29\x09\xe2\x8b\x11\x5f\xbc\xbd\xe2\x8b\x1b\x59\xf2\x05\xc6\x17\xd7\x66\xec\x3d\xa2\x89\x7d\x0c\x7f\xf5\x50\x62\xc7\x9e\x95\xa1\xc4\xdb\xaf\xa4\x13\xfb\xee\x20\x85\xdd\x20\x53\x28\x1e\x9d\xdc\x92\xfd\xda\x20\xfb\x54\xbf\xfd\x05\xac\x51\xdf\xe8\xe8\xaa\x26\xe9\x1b\xa3\x37\xec\xcf\xa9\xa3\xf5\xed\xa9\x66\x6c\x7f\xc9\xe9\xa6\xd3\x93\x5a\xe4\x91\xd0\x2c\x42\xb3\x08\xcd\x22\x34\x8b\xd0\x2c\x42\xb3\x08\xcd\x22\x34\x8b\xd0\x2c\x42\xb3\x08\xcd\x22\x34\x8b\xd0\x2c\x42\xb3\x08\xcd\x22\x34\x8b\xd0\x2c\xb6\x4b\xb3\xf8\xbd\x5d\xf4\x8c\x44\x7d\xeb\x25\x4d\x4f\x29\xb9\x6c\x3e\x33\xb6\x31\xb1\xa2\x96\x94\x09\x3f\xfd\x22\x47\x48\x5a\xbe\xa4\xe4\x0a\x5a\xda\x3c\x4e\x2d\x9a\x5a\x46\xce\x09\xd7\x75\xf6\xf7\x77\xb1\x6f\xf4\xd1\xfd\x8e\x86\xaf\xc8\x86\x43\xfb\xb8\xbc\x51\xec\x67\x3e\x0a\xfb\xbb\x7c\xca\x6a\x7f\x49\x4b\x4f\x5b\xed\x47\x8f\x72\xd1\xa3\xdd\xdc\x25\xd1\x9a\xed\x44\xf3\x39\x50\xf8\x10\x9b\x2c\x89\x2c\xd2\x33\x02\xd6\x2d\xd1\x59\x0e\xeb\x4e\xd1\x3c\xcd\xfa\x63\x6e\xfb\xb2\x63\xb2\x17\x62\x3e\x97\xbb\xd9\xca\x2f\x99\x60\xa4\x37\xcb\x12\x26\xd2\xb3\xaf\x4b\x92\x3d\xbf\xab\xb2\x29\x9f\xb8\x7c\x0a\xfd\xce\x4e\xef\xa7\x3b\x69\xca\x29\xeb\x7d\xc0\x27\x4c\x91\x65\x67\x3c\xe3\xc4\x73\xf4\x46\xba\x54\xe6\xbe\x6c\xd2\x43\x86\x2b\x13\xd2\xcc\x06\xa5\x99\x5f\xe9\x69\x9d\xe9\x79\x5e\x48\x36\x2f\xd2\xb2\x2d\xd9\x6c\x9f\x61\xab\xcd\x72\xd5\x69\xa6\x3c\x0c\x5d\x80\xc4\x33\xf1\xf5\xbb\xbc\x0d\xdb\x89\x0a\x99\x47\xbd\x16\xee\x98\x10\x7f\x6c\xbd\x7d\x83\x34\xa4\xdb\xec\x29\xa4\x21\x90\x86\x6c\xb1\x34\x64\x0b\xe7\xcc\x81\x92\x91\x76\xcd\x9a\x93\xb3\x94\x60\x27\x22\xc7\x2c\x41\xc8\x6e\xa7\x94\xc4\xa7\x99\x5b\x42\x56\xf2\xbb\xbd\x74\x8f\x58\x6b\xae\x70\x39\xc9\x46\x7c\x8c\x7d\xa9\x97\x7d\xa1\x97\x76\xf0\x1f\xae\x6c\xc4\x43\x0f\x65\xd4\x92\x1b\xe0\x8a\xa5\x71\xf4\xf5\x19\xb5\x94\x30\xf6\xba\x14\x9f\x5e\x5a\x30\x5d\x85\x4d\xfc\x20\x56\x34\x34\x55\xd1\xd0\x54\x2d\x0d\x25\x0b\x74\x51\x0c\x72\xe3\x0b\x67\x0c\xf2\x79\x9a\xa5\x44\x03\xdf\x35\xc7\x7d\xd6\x92\xca\x9e\xfd\xf0\x7e\x8a\x19\xfd\x5b\x25\xf2\xb0\xa0\xa5\xad\x55\x7a\x4e\xcb\xb0\x2f\xde\xcf\x3e\xd7\x4b\x77\xa4\xb4\xa2\x6a\xf4\xfe\xa3\x7c\x59\x9e\xd3\x32\x95\x13\x9a\x25\x2d\x1d\x0d\x19\x9b\x67\xb4\xa2\xea\x14\xae\x2e\x69\xe9\x45\x2d\x33\x41\x25\xf5\xc5\xd2\x58\x21\xa7\x64\xf3\x4d\x5e\x68\x4f\xd2\x2e\xd1\x9f\x0f\xd2\xfd\xbc\x3f\x5f\x47\x77\x7e\xbc\x67\x07\xc9\x91\x15\xf8\xe6\xcf\x05\xbf\xd8\x11\x16\x16\x6f\xf1\x92\x96\xf6\xaa\x31\x11\x89\x04\x85\x13\x26\xdf\xdd\x43\x7f\xbf\x87\xfd\xbd\x9e\xd0\x8f\x5a\xb6\xfa\xaa\xa9\x65\x50\x8c\x07\xcc\x5f\x3e\xe1\x90\x2d\x69\x26\xd4\xc9\x69\x99\x32\xa7\x99\xf3\xfb\x67\x7c\x92\xf9\x73\x28\x72\x83\xaf\xe5\x9d\xcd\x49\xcf\x4e\x41\x4b\xc7\x22\x3b\xad\x9f\x9d\xeb\x84\x67\xe8\x22\x5b\x0e\x3d\x6d\x5e\xce\xbe\x79\xcd\x12\xa8\x1b\x8f\xd8\xe6\x4a\x66\x3b\xae\x0b\xe1\xa1\x0d\xb1\xc8\xed\xab\xfc\x28\x97\x19\xff\xf1\xdb\xe8\x5d\xb7\xb1\xff\xe3\xb6\xd0\xcb\xb7\x99\x8d\xff\x66\x7f\x36\xaf\xab\xa9\xf5\xa2\xba\x7c\x3d\x5b\xb8\xb0\xb8\x7c\x49\x2d\x66\x57\x6f\x26\x94\xd4\x75\x35\x9f\x2e\x9f\x79\xf1\xe9\x84\x29\x20\x71\xce\xbc\xb8\x92\xa0\xb8\x66\x0a\x24\xb2\x69\x8e\xe0\x56\x2d\xc7\x1d\xf7\x79\xa8\x45\x89\xbb\x2c\xab\xb9\x62\x9e\x86\x9b\xa2\x94\x96\xcf\xab\xe2\xc3\x56\xd2\x62\x61\xf1\xe1\xe7\x9f\xbf\x35\xe5\xba\x00\xe7\xa7\x2e\x5c\x58\x5a\xb6\x76\xd4\xf2\xe1\x15\xb5\x74\x43\x95\x46\xcd\xbe\x32\xd3\x41\x64\xb5\x2f\xef\x51\x4e\x26\xc4\xd7\xd5\x7d\x48\x4a\xc9\x1b\x37\xb2\xc1\x6f\xdf\xea\x6b\xee\x3f\x14\x57\x27\x7c\x0e\xfc\x46\x0c\x53\x6d\xb9\xa0\x8b\xaa\x92\x0b\x1b\xe3\x2b\xa7\x96\x62\x61\x53\xd6\x20\x7f\x90\xb7\xc5\x55\x16\x82\x09\x3b\x4e\x60\x9d\x7c\x50\x0f\x5f\x58\x5c\x0e\xa7\x8a\x2a\xf7\x02\x29\x39\x7d\xc4\x25\x14\x31\xae\xd7\x74\x78\x99\x37\x2e\x69\xa4\xf3\xec\xc6\xc9\x36\xd6\x73\x79\xb5\xc8\x6d\x30\x77\x7b\xae\x29\x79\x2b\xd8\x22\x9b\x4e\xe7\xd4\xb0\x52\x2a\x29\xa9\xeb\xe1\x21\x35\x96\x89\x19\x13\x5d\xf1\x7f\xa3\x07\xac\xa7\xc9\xf5\x4d\x5c\xf4\x22\xe7\x8f\xeb\x4a\xce\xee\x8e\x94\xb6\xe6\x12\x35\x38\x2f\x61\x38\x16\xd9\x55\x6d\x40\xb9\x86\xe3\x5b\x7b\xe9\x7f\xf5\xb0\x1f\xf6\x84\xbe\x6f\xbd\x7a\xbf\xdd\xb3\xb0\x2a\x62\x4d\xb8\xc3\xd5\x72\x1d\xaf\xdc\x2c\x99\x6e\x63\x25\x5d\x2e\x16\xb2\x42\xb4\x38\x99\x57\x4a\x72\x56\x2b\xec\x21\xff\xd0\x99\xcf\x5d\xb9\xc9\xef\x2f\x9d\xd5\x0b\x39\xe5\x26\x77\x65\xac\x15\x78\x74\x07\x0f\x40\x09\xe7\xb2\x79\x3e\x36\x73\x5a\x26\x93\x35\x67\xb6\x0e\x8f\xae\x9e\xcb\x66\xae\x96\x72\x37\x85\x17\x51\x2b\xda\x3f\xd8\x93\x64\xb7\xf9\xe5\x08\x3b\x16\x11\x8e\xf1\x84\x71\x17\x2e\xf4\x3c\x4e\x31\x36\x12\x89\x5a\xb3\x81\x7b\x9c\x73\x8a\x25\x2d\x7d\x2b\xcc\x1f\x92\x2b\xf4\x66\x76\x39\xf4\xbc\xd9\xe0\xd1\xf3\xa2\x33\x2d\x15\x8b\xf9\xdc\xa4\xec\x49\x98\xc9\x4a\x0b\x2b\x0d\xdb\x0e\xf3\x38\xd7\x58\xfa\x0f\xbd\xf4\xeb\xbd\xec\x8b\xbd\xa1\xcf\x39\x62\xcd\xa6\xc3\x45\x35\x27\xa2\xf0\xb8\xeb\xcd\x3b\xaa\x2f\xb5\x5e\x2c\x72\x5f\x63\xd6\x7c\xad\x6d\x6b\x7f\x55\xbb\x21\xaf\xc4\x1d\x90\x57\x30\xac\x41\x5a\xd5\x6d\xb7\x9e\x62\x58\x62\xee\xc6\xe3\x52\x15\xee\x72\x35\x3e\x09\xc6\xd1\x61\x5d\x8a\xd9\xb8\xb9\x96\x5a\x16\x73\xed\x67\xf9\xf9\x2a\x63\xfe\xcc\x90\xa0\xf5\xd2\x7a\x51\x1d\x11\x0a\xb0\x8c\xee\x71\xe4\x39\xe3\x4c\x9a\x18\xbb\xfc\x5c\xd2\xcd\xc1\x87\xa8\xf1\xff\x0b\xc6\x25\x9a\x8e\x38\x2b\xc0\x2f\x72\x97\x73\x67\xd7\xc0\x7c\xb5\x87\x3e\xdf\xc3\xfe\xaf\x9e\xd0\xbf\xb3\xde\xcd\xf7\xf9\xbc\x9b\x39\xee\x0d\xb3\xde\x49\xc3\xde\xca\x01\xcc\xaf\x56\x76\x23\xbf\x3d\x57\x7c\xe1\x88\xd8\xce\x25\x9b\x57\xb5\x1b\x79\xbb\x09\xee\xbe\x76\x3a\x7b\xac\x51\x61\xde\x8f\xd7\xfd\x45\x76\x96\x94\x6c\x6e\xd1\xb8\x1a\xd7\xad\xfc\xb3\x1e\x7a\x4f\x0f\xfb\xa9\x9e\xd0\x4f\x58\xb7\xf2\x16\x19\x07\x38\x12\x56\xd2\x69\xc3\x08\x9e\x9f\x9f\xd9\xbb\x77\xef\x21\xa3\x3d\xf9\xe7\x59\x25\xaf\xf1\x27\xab\x97\x94\xb5\x82\xe9\xc1\x5d\x51\x33\xd9\x7c\x5e\x2e\xfb\x55\xe1\x6a\xb6\x6d\x86\x65\x6d\x3c\x86\x2d\x59\x6d\xb9\x06\x2e\xfb\xea\x20\xc5\xc5\xe4\xda\x5d\xfa\xdf\x4a\x5a\x9b\xd2\xb3\x72\x4b\x4a\x29\x28\xa9\x6c\x29\xab\xea\xec\xe7\x06\xd9\x4f\xf5\xd3\x3d\x72\x83\x95\xac\x76\xbc\x7a\x91\xa9\x99\xe5\x85\x65\x71\xc4\x8c\x68\xea\x66\x74\x9f\x71\x84\xfc\x51\xa6\x96\xad\xdc\x6b\xbb\xe9\xb1\xd7\x44\x28\xdb\x38\x0f\x65\x33\x66\x9e\x33\x34\x4d\xc7\x7d\x97\xab\x66\xc7\xcb\x5e\x8c\x55\x76\x40\x53\x2a\x52\xa9\xc1\xf3\xd9\x04\x3b\x21\x67\xae\xee\xc1\x60\x96\xd0\xac\xb8\x30\xa4\x95\x85\x44\x1b\x12\x6d\x48\xb4\x21\xd1\x86\x44\x1b\x12\x6d\x48\xb4\x21\xd1\x86\x44\x1b\x12\x6d\x48\xb4\x21\xd1\x86\x44\x1b\x12\x6d\x48\xb4\x21\xd1\xde\x3a\x89\xf6\x27\xae\xd0\xa9\xaa\x64\xcf\xc7\xd7\xeb\x09\xfc\xfe\xf0\x32\xfb\xe2\x40\x33\x80\x5f\xac\x12\xf8\xd9\x3e\xe1\xca\xfd\x3b\x1c\xf5\x25\x40\xbb\x40\xbb\x12\xa0\x5d\xa0\x5d\xa0\x5d\xa0\x5d\x5d\x43\xbb\x12\x1d\x43\xbb\x9a\x7e\x25\x0d\xd3\xae\x04\x68\x17\x68\x17\x68\x17\x68\x17\x68\x57\xfb\x69\x57\xa2\xab\xe1\x54\x02\x70\xaa\x75\x70\x2a\xd1\xe9\x70\x2a\xb1\x0d\xe1\xd4\xb6\x96\x48\x55\x68\x9e\xea\xd4\x4c\x45\x7e\x6f\x67\x25\x49\x7b\x44\x7c\xa2\xc2\x8a\x17\x34\x1b\x17\x1b\xb7\x00\x9b\x09\xca\xf5\x34\x9d\xa3\x33\x65\x31\x97\x47\xe9\xf0\x26\x1e\x17\xc2\x2e\x11\xc6\xde\x60\x18\xfb\x4f\xf6\xd2\x59\x61\x3b\x4e\xd2\x1c\xb7\x1d\xc7\x69\x73\x83\x91\x96\x45\xf4\xfa\x22\x25\xed\xe8\xf5\x4d\x37\x7a\x41\x94\x33\x38\x43\xa7\x1d\xe5\x0c\x36\xdd\x6a\x90\x89\x6b\x8f\xca\xb3\x8a\x25\x2d\x68\x7a\x89\xa2\x2f\x8f\x56\x9a\xb8\x41\x59\xc9\xc2\x51\x16\x44\x5b\xf5\x32\x77\x53\x62\xc7\x32\x59\xb0\x75\x54\x1b\x0d\x5f\x6b\x0a\x24\x81\x17\x83\x17\x83\x17\x83\x17\x77\x0f\x2f\xc6\x6c\x2f\x60\xb6\xd7\x39\x40\x1d\xc5\xe9\xda\x52\x9c\x0e\x7e\x0b\xf8\x2d\xe0\xb7\x80\xdf\x02\x7e\x8b\xae\xf6\x5b\xa0\x46\x29\x6a\x94\xa2\x46\x69\xab\x6a\x94\xc2\x2d\x08\xb7\x60\xb7\xba\x05\x93\x99\x26\x97\xe7\xed\x10\xf2\x1c\xf5\x26\xcf\xf7\xb3\xfb\x84\xd1\xb0\x0d\x3a\x6d\xc3\x64\x35\xec\x53\xf7\xd1\x13\x56\x32\x36\x2d\xad\x5a\x69\xd7\x0a\x45\xed\xc5\x9b\x63\x7f\xd7\xb8\x8f\x97\xd8\x3f\xba\x8f\xfd\xa4\x23\xff\xda\x53\x32\x31\x53\xf8\xe4\xdc\x05\x5b\x69\x5e\xd2\xc2\xfc\x20\x63\x10\x9d\xd5\xd2\x6a\xf4\x09\xb9\x9b\x48\xc5\x76\x52\x2d\x19\xbf\x2e\x19\xbb\x3c\x93\x2d\x5d\x5d\x52\x4a\x57\x27\xfa\xa2\x63\xd1\xe6\xa4\x4f\x7b\xc2\xfb\x39\x11\xdb\x21\xaf\x82\xae\x9d\x0f\x1e\x31\x63\x6c\xd4\xce\xa5\x26\x46\x89\x75\xd1\x12\xa7\xbb\x12\xaf\x85\x3c\xbb\x65\xe9\x62\xed\xdd\xb2\xb4\xbe\x3d\xba\xc5\x3b\xdd\x9c\x4f\x17\x45\x7e\xda\xd1\x2d\x03\x56\xb7\x9c\x5b\xae\xd6\x2f\x11\x77\xbf\x68\xfa\x2d\xd8\x31\xd1\x9f\x71\x74\xcc\x90\xd9\x31\xb3\x73\x8b\x73\x17\xe6\xaa\x74\xcd\x93\xae\xae\x11\xde\x9d\x6d\xd1\x39\x75\xbd\x4c\x13\xef\x75\x74\xce\xb0\xd9\x39\xe7\x96\x2e\x2c\x9c\x3b\xbb\x5c\xa5\x77\x9e\x72\xf5\x8e\x6c\xb9\x95\xdd\xd3\x82\x7b\xaf\xa1\xcb\xa7\x3c\x5f\xaa\x53\x73\xd3\xb3\x35\xbf\x54\xa7\x54\x25\x7d\xeb\x75\x4c\xe2\x3d\x8e\x8e\x19\xb4\xac\xcd\xf4\x85\x99\x53\x55\x7a\x66\xb7\xdb\xdc\x18\xb3\xa6\x5b\xef\x95\x4a\x1e\xa5\xc3\xec\x50\x64\xd2\x9a\x92\xec\x72\x66\xdc\x2b\x3f\xb4\x32\xfd\xde\x7e\xda\xcb\xe2\x91\x31\xeb\xf0\x07\x8c\x7f\xed\x64\x8c\x62\xca\x1a\xe1\xdb\x9c\x87\x95\xa5\xc0\x4b\x18\x7d\xc9\x01\xd8\x55\x35\x7c\xf1\xfc\x62\xd8\x6c\x65\x5d\x57\xad\xc9\xab\x99\x97\x4e\x3c\x25\x73\xd9\x53\xd2\xc2\xc6\xb4\x24\x26\xce\xe1\x9c\xb9\x7c\xfa\x32\x1d\x17\xf1\x86\xe9\xac\x9e\xd2\x36\x8c\xb9\xa4\x15\x71\xe8\x17\x6c\xa8\xe6\xd3\x05\x2d\x9b\x2f\xe9\xb9\x6c\x4a\xd5\xd9\xf7\x5f\x60\xbf\x30\x40\x77\x59\x0d\x18\x03\x68\x4f\xf5\x18\xc3\x39\xd9\xc2\xb2\xd1\x42\x74\xc0\xd8\x79\xd6\x3c\xdc\x99\x6f\xd6\xb5\x1f\xc2\x0a\x11\x56\x88\xb0\x42\xc8\x44\x20\x13\x81\x4c\x04\x32\x91\x8e\x91\x89\x74\x8e\x0a\x02\xee\x79\xb8\xe7\xe1\x9e\x87\x7b\x1e\xee\xf9\xae\x76\xcf\xc3\x7f\x08\xff\x61\x97\xfa\x0f\xb7\x65\x58\xa1\x4a\x49\x01\x05\x67\x68\x9a\x43\xc1\xc3\x74\x88\x26\x7d\xe3\x6d\x6c\x50\xb5\x11\x8f\xb9\x00\x51\x53\xc2\x09\x5f\x08\xe6\x87\x53\xec\xa0\xe4\x87\xe5\xd0\x4c\xd2\x44\xd7\x55\xb9\x50\x62\xe4\xc3\x3b\xcb\x48\xd9\x43\x66\x0c\x61\xbe\x8c\x8a\x0d\x89\x0d\x6d\xe4\x62\x02\x63\x2d\xd0\x49\x9a\x2b\x8b\x1b\xdc\x4f\x7b\x1b\x78\x1e\x50\x90\x23\x5e\xb0\xc1\x78\xc1\xbf\xe9\xa1\x79\x61\x14\x8e\xd3\x51\x6e\x14\x26\xa9\xb1\x41\x48\xa7\x45\x9c\xe0\x2c\x25\xec\x38\xc1\x86\x1b\x5b\x14\xf1\x81\x73\x34\xe3\x88\x0f\x6c\xb8\xb5\xc6\x6d\x15\x0f\xd8\x6b\xb1\xad\x8a\xfe\xf1\x48\x99\xad\x8a\x78\x06\x03\xba\xcd\xd6\x84\xd8\xc7\x61\xb6\xbc\x82\x00\x5b\x69\xc0\x10\xff\x07\xb0\x0b\xb0\x0b\xb0\x0b\xb0\x8b\xf8\x3f\xc4\xff\x21\xfe\x0f\xf1\x7f\x70\x30\xc0\xc1\x00\x07\x03\x1c\x0c\x70\x30\x20\xfe\x0f\xf1\x7f\x88\xff\x43\xfc\x1f\xfc\x77\xf0\xdf\x75\xa0\xff\xae\xed\xf1\x7f\x75\x04\xe6\xb5\x98\x36\x6f\xc7\xb8\xbf\x4f\xbc\x40\x51\x59\xad\x27\x75\x55\x4d\xaf\xe7\x8c\x55\x9b\x2d\x9f\x2f\x14\xb3\x5a\x31\x5b\xba\x99\xca\x29\xba\xae\xea\xec\x47\x5e\x60\x3f\x37\x40\xaf\xb3\xf7\xad\x41\x29\xbf\x24\xdb\x98\x31\xda\x88\x3e\xca\x0b\xf1\x58\xc7\x5f\x8a\xbb\x36\x43\x20\x0f\x81\x3c\x04\xf2\xf0\xa3\xc0\x8f\x02\x3f\x0a\xfc\x28\x1d\xe3\x47\xe9\x1c\x37\x01\xf8\x35\xf8\x35\xf8\x35\xf8\x35\xf8\x75\x57\xf3\x6b\x00\x36\x00\xb6\x2e\x05\x6c\xdb\x52\x20\xbf\x2a\x34\xac\xe3\x5c\xc3\xda\x7b\xee\x34\x3b\x42\x53\x74\xd0\xbf\x20\x85\xcd\xa2\x36\xe2\x31\x17\x21\xaa\x49\x21\x7f\x39\x98\xf3\x1d\x66\x87\xcc\x9c\x60\xe5\xe0\x4b\x82\x3e\xd7\x69\xdd\xb9\x48\x02\x14\xf8\x91\x7f\xb9\xb3\x9c\x91\xbd\xde\xaa\xb3\xe3\xc6\x61\x8f\xcb\x12\x3b\x2d\x07\x62\x82\x5f\x25\xe9\x14\xcd\x97\x29\xe3\x0f\xd0\xbe\x46\x1e\x04\xc4\x55\x90\xc6\x37\x28\x8d\xff\xdb\x1e\x3a\x29\xcc\xc1\x09\x3a\xc6\xcd\xc1\x41\x6a\x70\x14\x0a\x67\x43\x9c\x3b\x1b\x2c\x6d\x7c\xe3\xad\x9d\x11\xe2\xf8\x79\x9a\x75\x88\xe3\x1b\x6f\x6e\xb3\xea\xf8\x16\xdb\xb1\xe8\xef\x8f\x94\xdb\x29\x6f\x7d\xbc\xdb\x66\x0d\xcb\x3a\x39\x0e\x9b\x65\x0b\xe4\x5b\x69\xbd\x20\x8b\x07\xce\x05\xce\x05\xce\x05\xce\x85\x2c\x1e\xb2\x78\xc8\xe2\x21\x8b\x87\x5b\x01\x6e\x05\xb8\x15\xe0\x56\x80\x5b\x01\xb2\x78\xc8\xe2\x21\x8b\x87\x2c\x1e\x5e\x3b\x78\xed\x3a\xd0\x6b\xd7\x76\x59\x7c\xab\x5d\x60\xf5\xd4\xc3\x69\xba\x6a\xfd\xed\x7d\x14\xf6\x57\xad\xf3\x8a\x3d\x63\xec\x77\x7b\xd9\xd7\x7b\x89\xb9\xf8\xb6\xa8\x08\xff\x50\x46\x2d\xb9\xa7\xc5\xc2\x31\x1a\x0d\x67\x54\x97\x36\x9d\xef\x3e\xbd\xb4\x60\x22\xc1\x26\x02\xed\x8a\x86\xa6\x2a\x1a\x9a\xaa\xa5\xa1\x64\x81\x2e\x8a\x81\x75\x96\x16\xf9\xc0\x9a\xa7\x59\x4a\x34\x30\xb0\x1c\xf7\x59\x8b\x93\x95\x7d\xf4\x51\x4a\x8a\x87\xa0\x14\xb2\xea\x8b\x25\x35\xcf\xdf\xe5\xf2\xe7\x90\x5a\xd7\x4b\xda\x9a\xd9\xc7\xce\xab\x92\x15\x86\x74\x3e\xa0\xd9\xef\xed\x62\x6f\xef\xa7\x07\x5d\x6d\x59\x4f\x6c\x7f\x51\x55\xd2\x61\xb1\xa7\xf9\x76\xda\x0b\x97\x19\x7e\x0a\xf3\xe2\x67\xad\x53\x44\x0f\x18\x87\x4d\x3b\x5b\x34\xab\xfa\xfb\x1c\x21\x5e\xae\xe6\xba\x2d\x92\x3f\xd1\x43\x37\xc4\x23\x2a\x50\x9e\x3f\xa2\xab\xb4\x4a\x69\xbf\x47\x64\x5f\xec\xa8\x25\x56\xb7\x9f\x96\xbb\xaf\x65\xff\xc4\xfc\xee\x27\xd0\x4c\x3c\xea\xfd\x1a\xdf\xce\xfa\x33\x6a\x89\xae\xbd\x25\xd8\x8a\x9c\x65\x8b\xd2\x8a\x78\x0d\x03\xb3\xc4\x96\xdf\xf5\x55\x14\xda\x0a\xfd\x1b\xf2\x1b\x04\x07\x8b\x6a\x21\xa7\xa4\xd4\xfa\xc7\xc1\x21\x79\xe4\x56\x0f\x85\xc4\xdf\xa1\x17\x69\xa3\xcc\xff\xde\xa6\xa1\x00\xea\x0b\x7f\x7d\x83\xfe\xfa\x77\xf7\x6e\x9d\x01\xfb\xff\x09\xff\xfe\x3a\xe9\xb6\x7f\x7f\x3b\x98\xcf\xc2\x7a\xcd\xe6\xb3\xc2\x06\x6e\xc2\x9e\x26\xde\xf6\x3a\x3f\xf3\x39\x5d\x50\x8a\xa5\x2c\xf7\xa3\x0a\x02\x53\xbf\x1d\x9d\x2c\x18\xb3\xe7\xad\xb4\xa2\x8f\x94\x37\x34\xca\x2f\x69\x0f\x9f\x41\xed\x72\x6e\x5c\x53\x8b\x19\xd5\xb9\x75\xc0\xb9\xd5\x58\x5b\x96\xd4\x4c\x36\x35\x5a\xb1\x9f\xab\x15\xe3\xef\x9b\x72\xab\x71\x95\x09\x63\x96\x95\x2c\xb3\xe0\x55\xa4\x6c\x55\xe6\x5b\xbc\xd4\x14\xac\x72\x90\x55\xfe\x68\x1f\xbd\xd2\xc7\x3e\xdc\x17\xfa\xa0\xc5\x8f\x5e\xee\xeb\x1e\xab\x5c\x46\xcf\x8d\x7e\xe6\xaa\x04\xee\xe1\x31\x06\xa7\x1d\x6a\x3a\xe4\x33\x70\x87\x39\x06\x13\x22\x16\x25\xc7\x8f\xcc\x6b\xf9\x51\x71\x34\xdf\x83\xbb\xe0\xf4\xf0\x50\x52\xd7\xf2\x4b\x22\xe6\xf2\x8c\xf1\x5a\xc8\xbf\x97\xcd\x97\xc5\xfe\x71\xd8\xff\x73\x91\xf8\x72\x0f\xbd\xd6\xc3\x5e\xed\x09\x7d\xde\x5a\x8b\x7f\xa8\x67\x5e\x2b\xa6\xb8\x77\x30\xa3\xf1\x6e\xd7\xc2\x91\x55\xe3\xa7\x48\x78\xda\x75\x17\x1c\x62\x08\x47\xde\xba\x6e\xcb\x11\x46\x95\x14\xbf\x71\x2e\xf2\xc8\x65\x53\x92\xd0\xa9\xb9\xb4\x1e\xd6\x6e\xc8\x3e\x15\x22\x88\x82\xaa\x15\x72\x6a\x2c\x2c\xce\xc8\x83\x64\xcd\x67\xc9\x61\x82\x67\x07\x58\xa7\x8f\xdc\xc6\x2f\xcb\xa5\x5e\xed\xe4\x09\xfc\xe3\xde\x5f\xa0\x1d\xec\x76\x7e\x63\x5b\xf1\x0d\x4a\xce\xd1\x0c\x9b\x8e\x1c\xb7\xe2\xe0\x9f\x74\x16\x9b\xf3\x3b\xac\xb2\xe8\x5c\xd3\xb1\xc1\x9f\xf5\xd2\x2e\xb9\x62\x5d\x37\x5a\x28\xc9\x97\xc5\x5c\xb2\x32\x0e\xff\xe9\x6e\xf7\xd6\x50\x28\xc3\xf9\xef\xaa\x56\x5c\xb3\xf0\x90\x12\xe6\x1d\x14\x7d\x38\xa3\x96\xa6\x5d\xbb\x4f\x2f\x2d\x9c\x34\x36\x75\x20\x2a\xc8\xd2\x59\x31\x8c\x4f\xd2\x1c\x1f\xc6\xc7\xe9\x28\x1d\x6e\x0c\x15\xf0\x7b\x0c\x64\x04\x7f\x3e\x44\xfb\x24\xa8\x71\x55\x5e\xb6\xe8\x00\xc7\x71\x63\x29\x3d\x9b\x2e\x66\x8d\x31\x28\xa1\x00\xfb\x95\x21\xf6\x97\xfd\x74\x8f\x3c\xca\x9a\xc3\x7c\xac\x47\xf2\x3b\x5b\xe4\x6e\x18\x66\xe1\x1f\x32\x13\x0e\xcc\x2c\x2f\xcc\xf2\xd6\x62\x0e\xd7\xcb\x94\xe5\xb4\x19\xe4\x4d\x0c\xda\xa2\x38\x97\xca\x49\x2b\xa8\x96\xb0\x42\x2f\xa9\x4a\x7a\xc4\xf6\x06\x73\x8f\xb7\xf4\x1d\x66\x4b\xea\x9a\xfd\x91\x19\x74\x09\x38\x1c\x6d\xc7\xa2\xbb\xf8\xe9\x64\x81\x69\x73\xb6\x64\x5e\x61\x87\xe7\x3b\x48\x5e\xa7\x25\x31\x62\x16\xe8\x24\x1f\x31\xd3\x74\x9c\x8e\x36\x30\x62\x78\x7c\xc2\xdc\x86\x9a\x0f\x16\xef\xfb\x5b\xb4\x1b\xc2\xa2\x3d\x1f\x6c\xd1\x0e\xb1\xc9\x80\x8a\xdf\xa2\xff\x2b\xf9\x43\x12\x29\x1e\x90\xe2\x21\x09\x4d\x30\x34\xc1\xd0\x04\x43\x13\xdc\x35\x9a\xe0\x64\xc7\x48\x5e\x9b\x7e\x25\x0d\x6b\x31\x93\xd0\x62\x42\x8b\x09\x2d\x26\xb4\x98\xd0\x62\xb6\x5f\x8b\x99\x3c\x40\xfb\xd8\x44\x64\xdc\x82\x48\x0f\xba\x20\x92\xb9\x80\x6b\x3d\x35\x4a\x76\xb5\x6a\x2d\x09\xd5\x5a\xeb\x54\x6b\xc9\x8e\x57\xad\x6d\xc3\x5c\x13\xec\xfb\x0f\xd3\xa4\x0f\x73\x1c\xdb\xd0\x72\xeb\x6b\xaa\x52\x2a\x29\xa9\xab\xc6\xb7\xba\x5c\x84\xf4\xe9\x87\xd9\x7b\xfa\x88\x6c\xec\x18\x1a\xad\xaa\x3c\xba\xc4\xdb\x9b\xb6\xda\x8b\xee\x36\x76\xb7\x60\x5f\xf9\xe6\x96\xc8\x8b\x56\xea\x0c\xa6\x97\x9d\xb2\x11\x8f\x95\x5f\xde\x66\xd5\x42\xf5\x69\x0e\xbd\xc0\x5c\xc5\x15\x39\x35\x87\xa1\xff\xbe\xc3\xf5\x68\xc6\x83\xf4\x40\x15\x4f\x67\x40\x1e\xd1\xd6\x07\x54\x7f\xd2\x0d\xff\x27\x04\x77\x31\x44\x3c\x0d\x8a\x78\x3e\x56\x6f\xd2\x8d\x2a\x76\xa2\x81\xa4\x1b\x2d\xb1\x3a\x5c\x64\xd3\x62\xab\x93\xf8\x27\x77\xb9\xac\xce\xa1\x9a\x65\x34\x15\xe6\xe7\xc9\x82\xcb\x15\xd4\x6a\xe3\x03\xad\x4c\xb7\x19\x3f\x68\x65\xa0\x95\xd9\x62\xad\x4c\x1b\x67\xa3\xb5\x19\xfe\x1a\x4c\xbc\xf7\xb7\x21\x50\x5a\x93\x3c\x4a\x87\xd9\xa1\xc8\xa4\x85\x28\x76\x39\x11\x45\xf9\x19\xdb\xa0\x6f\xf9\x93\x21\xda\xeb\xb7\xf2\x11\x42\x0b\xf9\xbb\x2c\xe7\x60\x8a\x2d\xfe\xf5\x10\xfb\xd3\x7e\xd7\x47\xee\x13\xb5\xe8\x2c\xe4\x87\x8c\x07\x07\x75\x84\xd4\x22\xe4\x96\x5a\x38\xaf\x0f\x42\x8b\xca\xf7\xe7\xb9\xe0\xf7\x67\x92\xed\xaf\x36\x71\x72\xf6\xb0\x3b\x3c\x2c\x48\xc4\x01\x99\x05\x64\x16\x90\x59\x40\x66\x01\x99\x05\x64\x16\x90\x59\x40\x66\x01\x99\x05\x64\x16\x90\x59\x40\x66\x01\x99\x45\x9b\x65\x16\x87\x68\x92\xed\x8f\xec\xb5\x18\xc6\x1b\x9c\x0c\xc3\xb9\xbe\x83\xd2\x02\x4a\x0b\x28\x2d\xa0\xb4\x70\x28\x2d\x3e\xa0\x58\x19\x60\xd2\xc6\x2c\x38\xab\xe5\x8b\x6a\x26\xcb\xfd\x45\x8e\xb0\x3a\x33\xd6\x8b\xaf\x36\x15\x63\x8e\x73\x43\x5d\xb9\xaa\x69\xd7\x5d\xab\x46\x9d\xfd\xdf\x6f\x66\x1f\x1c\xa4\x47\x3d\xdb\xb2\x22\xc0\x8e\x54\x2f\x36\x7b\xc9\x3a\xc7\x33\xe2\x1c\x33\xce\x73\x44\x8f\x19\x47\x4f\x7b\x9d\x40\x46\x66\x55\x3f\xbe\xc3\x29\x22\xca\xd3\x02\xaa\xa1\x3c\x2d\xa0\x1a\xa0\x1a\xa0\x5a\x17\x41\xb5\x0e\x4a\xd7\xdf\x31\x50\x0d\x79\xe4\x01\xd5\x00\xd5\x00\xd5\x00\xd5\x50\x9e\x16\x89\xae\x6f\x1d\x90\xd5\xf1\x89\xae\xb7\x65\x79\xda\x1f\xed\xa1\x9c\x90\x5f\xa9\x94\xe2\xf2\xab\x17\xe8\x4d\xf4\xac\xaf\x6a\xd1\x1b\x77\x99\xb9\xbc\xaa\x33\xa4\x9a\x0a\xd8\x06\x14\x98\xbd\xf6\xff\x0f\x96\x6e\x3d\xcf\x9e\x33\x73\x78\x55\x81\x73\xa6\x02\xbe\xea\x25\x57\x66\xc9\x89\xfc\xf8\x9d\x41\xa0\x6e\xd0\xaa\x78\x1b\xc0\xe4\x4e\x88\x1d\x3b\x80\xca\x09\x88\x76\x9d\xb2\x94\x29\x53\xad\x3f\x43\x17\x5b\x32\x18\x20\x69\x47\x3c\x4f\x83\xf1\x3c\xdf\xee\xa5\xab\xc2\x66\x29\x74\x85\xdb\xac\x67\xa9\x55\xc3\x94\xd6\x44\xc0\xcf\x2a\xa5\xed\x80\x9f\x16\x9e\x2e\x2f\xca\xf0\x66\x48\x75\x94\xe1\x6d\xe1\xf9\x36\x5b\xa7\x77\xab\xcd\x71\xf4\x8b\xa3\x41\xe6\x78\xaf\x67\x61\xdf\x00\xd3\x9c\x14\x07\x55\x33\xcd\x76\xe5\xdf\xb6\x1a\x69\x94\x02\x06\x3a\x07\x3a\x07\x3a\x07\x3a\x47\x29\x60\x94\x02\x46\x29\x60\x94\x02\x86\x0b\x07\x2e\x1c\xb8\x70\xe0\xc2\x81\x0b\xa7\x29\x2e\x1c\x94\x02\x46\x29\x60\x94\x02\x46\x29\x60\x78\x48\xe1\x21\xdd\xe6\xa5\x80\xeb\x28\xd5\x5b\x23\xc9\x6e\x8c\x52\xd7\xc2\xbf\x9b\x9f\x14\xe5\x53\x44\x7b\xaa\x15\xfd\xd9\x88\x8f\x71\x2a\x56\x54\x37\xb2\xea\x0d\x9d\xfd\x28\x45\xfe\x67\x3f\xdd\xe7\xde\xf9\xca\x46\x3c\xf4\x80\xe5\xd0\xbc\x60\xec\x7f\x9e\xef\x1f\x7d\x42\x7a\x2f\x5d\xbb\x5f\x8a\x3b\x76\x69\x32\xf9\x5e\xa4\x24\x9d\x2a\x73\x4f\x1e\xa4\x03\xfe\x8e\x11\xf7\x5d\x6f\xc4\x63\x8e\x6b\x4b\xbe\xa3\x97\x16\xc4\x60\x4c\xd0\x09\x3e\x18\xa7\xa8\xe1\xd6\x44\x79\xa1\x38\x2f\x2f\x64\xb9\x89\x36\xd3\xde\x39\xe1\x07\x3a\x45\xf3\x0e\x3f\xd0\x66\x1a\xdc\xac\xa3\xe7\xcd\xc1\xaf\xc7\x51\x76\xd8\x1c\xe8\x5e\xe3\x4d\x7a\x78\x9c\x17\xe5\x4c\x9c\x92\x04\x90\xab\x0e\xe4\x92\x70\x08\xfb\x38\x84\x9b\x6e\x39\xff\x6a\x90\x26\x2c\xcb\xa9\xe9\x29\x25\x97\xcd\x67\xc6\x36\x26\x0c\x6b\x3e\x31\xc6\xdf\x1c\xe3\x06\x73\x05\x2d\x6d\xee\xa0\x16\x75\xf6\x99\x41\xf6\xf1\x7e\xba\xdf\x71\xcc\x15\x79\x4c\x68\x7f\xf5\xe0\xad\x53\x56\x93\x4b\x5a\x7a\xda\x6a\x32\x7a\x94\x47\x6d\xd9\xed\x5d\x12\xcd\xf9\xec\x3e\xaf\x15\xa7\x73\xb9\xb3\xca\x9a\xaa\x17\x94\x66\xd6\x6c\x6f\x51\xea\xa7\x0d\x7a\x93\xb0\xc0\x17\xe8\x3c\xb7\xc0\xdc\xc2\x57\x33\x70\x66\x3f\xc4\x64\xbf\xc6\x7c\x7a\xa2\x29\x52\xa3\x4c\xb0\xc9\x9b\x65\x09\xdb\xe4\x99\x17\x27\x0d\x9d\xcf\xa5\x39\x8c\x9e\xb8\x07\x64\x84\x42\xf0\x1a\x32\x42\xc1\x03\x0f\x0f\x3c\x3c\xf0\xdd\xe4\x81\x47\x46\x28\x64\x84\x82\xe7\x13\x9e\x4f\x78\x3e\xe1\xf9\xec\x08\xcf\x27\xd2\x3a\x21\xad\xd3\x76\xf1\xf5\x20\xad\x53\x2b\xd2\x3a\xfd\xf4\x15\x3a\x21\xb8\x9f\xf0\xfc\x5a\x09\x9c\xf2\x16\x53\x13\x99\xe3\xf9\xdf\x2f\x8d\x15\xb4\x74\x3a\xab\x17\xd7\xf9\xc2\x7c\x65\x3d\x9d\x51\x4b\x3a\xfb\xad\xcb\xec\xb3\x03\x74\xb7\x68\xc1\x8a\x42\x88\x57\x07\x80\x4b\x5a\x7a\xd6\x6a\x29\xc1\x5b\x8a\xc6\x8c\x43\x84\x03\x59\x86\x1c\x58\x68\x2f\xed\xb1\x7f\x87\xd3\x3e\xa4\x68\x02\xe5\x42\x8a\x26\x50\x2e\x50\x2e\x50\xae\x2e\xa2\x5c\x1d\x14\x46\xd1\x31\x94\x0b\xfa\x7e\x50\x2e\x50\x2e\x50\x2e\x50\x2e\xa4\x68\x82\x00\xf9\xd6\x81\x52\x1d\x2f\x40\xde\x96\x29\x9a\xd6\xe8\xbc\x50\x49\x9d\xa6\x05\xae\x92\x9a\xa1\x69\x3a\xee\xab\x92\x92\x61\x16\x66\xfe\x0f\x0f\x50\x54\x93\x38\xaa\xae\xda\xc2\xe2\x9c\x52\xf7\xe4\x71\xc6\x4a\x45\x74\x90\xf8\x2a\xf2\xb5\x9d\x15\x08\x6d\x97\x25\x43\xf6\xa2\x65\xe3\x62\xeb\x16\xf0\x32\x81\xb7\x9e\xa6\x73\x74\xa6\x4c\x9d\x7c\x94\x0e\x6f\xe2\x39\x21\x44\x1d\x29\x92\x1a\x54\xc4\xfe\x64\xaf\xef\x0b\x2e\xa4\xea\xe3\x5c\xaa\x6e\x58\x93\xe3\xb4\xb9\x51\x4a\xcb\x42\xfa\xbe\x48\x49\x5b\xfa\xbe\xe9\x46\x2f\x08\xfd\xfb\x19\x3a\xed\xd0\xbf\x6f\xb6\xd5\x46\xca\xe7\xd6\x64\xcd\xdc\x66\x30\x48\x4b\x1f\xfd\x9b\x91\x0a\xdb\x36\xe4\x99\xa4\xc8\xcb\xce\x4d\x89\x3d\x5d\x76\xce\x4e\x45\xd4\x4e\x8b\x87\x4c\x44\x20\xc4\x20\xc4\x20\xc4\x20\xc4\xc8\x44\x84\x4c\x44\xc8\x44\x84\x4c\x44\xf0\x54\xc0\x53\x01\x4f\x05\x3c\x15\xf0\x54\x34\xc5\x53\x81\x4c\x44\xc8\x44\x84\x4c\x44\xc8\x44\x04\x47\x20\x1c\x81\xdb\x3c\x13\x51\xab\xdd\x69\x75\x64\x3a\x4a\xbe\x99\x2e\xb3\xe7\x23\xcf\x59\x85\xcf\xa7\xe4\x07\x5d\xb8\x49\xf2\x69\x9e\xbe\x25\xac\xa7\xb4\x82\x3a\x12\xd6\xd7\x53\x57\x8d\x49\x0d\x7f\x6a\xaa\xb2\x26\xec\x6b\xa1\xa8\xf1\x8f\x63\x64\xa7\xa5\x86\x6f\x65\x7d\x74\xf6\x97\x97\xe9\xb0\x10\xe6\x17\x57\x94\x54\xcc\xec\x5f\xcf\x62\xcb\xa9\xdc\xba\x5e\x52\x8b\x45\x2d\xa7\xae\x18\x4b\xe5\x7c\x46\x67\x9f\xbc\xcc\xbe\x36\x40\x0f\x1b\x07\x4f\x3b\x8f\xb5\xf8\xfb\x78\x75\x79\xfe\x8c\x68\xf3\xbc\x96\x53\x13\xa2\x4d\xa1\xce\x3f\x5f\xde\xa0\x09\xe4\x2b\xf6\x87\x3a\x1f\xea\x7c\xa8\xf3\xe1\x7b\x81\xef\x05\xbe\x17\xf8\x5e\x3a\xc6\xf7\xd2\x39\xae\x05\x30\x6f\x30\x6f\x30\x6f\x30\x6f\x30\xef\xae\x66\xde\x80\x72\x80\x72\x5d\x0a\xe5\xb6\xa5\x3a\xff\x9a\x48\xd5\x3c\xce\x53\x35\xf7\x9e\x3b\xcd\x4e\xd0\x31\x3a\xe2\x2b\x52\xe5\xf8\xca\x94\xa8\x56\x52\xa2\xa6\xe4\x2d\x2d\x04\xb3\xc6\x33\xec\xb4\x64\x8d\xbe\x3c\x4d\xe2\xc7\xca\x6b\xf4\xa8\x89\xfc\xbd\x9d\xd5\xd8\xda\x23\x96\x6e\xdf\x03\xa3\x49\xd9\xfe\x16\x80\x34\xc1\xbd\xce\xd2\x22\x25\xcb\x64\xfb\x53\x74\xb0\xd1\x07\x08\x31\x17\x34\xfb\x0d\x6a\xf6\x7f\xac\x97\x4e\x0b\x53\x32\x4b\x09\x6e\x4a\x8e\xd0\x26\x46\x22\x2d\x09\x5d\xfe\x02\x9d\xb4\x75\xf9\x9b\x6b\xf1\x69\x21\xca\x4f\xd2\x29\x87\x28\x7f\x73\x4d\x06\xd9\xba\xda\x6c\x59\xed\xa6\x2a\xd0\xea\x05\x8a\xf7\xdf\x33\x5a\xcd\xd6\x0d\x7a\xea\xf8\x3d\xec\x9e\x94\xf1\xfb\xda\x3d\xeb\xf8\x96\x5b\x40\xc8\xf8\x81\x92\x81\x92\x81\x92\x81\x92\x21\xe3\x87\x8c\x1f\x32\x7e\xc8\xf8\xe1\xd2\x80\x4b\x03\x2e\x0d\xb8\x34\xe0\xd2\x80\x8c\x1f\x32\x7e\xc8\xf8\x21\xe3\x87\xc7\x10\x1e\xc3\x0e\xf4\x18\x76\x74\x41\xe1\xb6\xa3\xeb\xe6\xcb\xee\x3f\x49\x34\x68\x8c\xb2\xb1\x0d\xdf\x14\xf8\x96\xc4\xfe\x9d\x14\x79\x4f\x3f\xdd\x91\xd2\x8a\xea\x95\x8d\x78\xe8\x5e\xcb\xe9\x67\x12\xef\x47\xc5\x2f\x33\x5a\x51\xbd\xe4\xc8\xcc\xd5\x1a\xa8\x7d\x8c\x8e\xd0\x54\x99\x5b\x2f\x4a\x43\xbe\x7e\x0a\xe3\xb2\x8d\x71\x20\xaf\x26\xf9\x8d\x1e\x3a\x2c\x46\xd6\x3e\x9a\xe0\x23\x6b\x84\xea\x38\x9e\x4e\x08\xe7\xcb\x21\x9a\xb4\x9d\x2f\xf5\xb5\x30\x2d\x9c\x2d\x53\x74\xd0\xe1\x6c\xa9\xaf\x89\xcd\xd6\xfc\x5d\x08\x1e\xc1\x03\xec\x49\x67\x11\x5f\x39\x46\x23\x11\xe9\x3d\x36\xaf\x04\xc5\x7d\x51\xdc\xb7\x41\xb7\xe8\x76\x8c\x56\x1a\xa6\x25\x59\x3e\xb8\x4a\xc5\x77\xc3\xa8\xf2\xef\xe6\xd8\xda\x7a\x89\x17\x93\xbf\x21\x8a\xc9\xbb\x3c\x2e\xd2\xda\xbe\xc4\x7e\x76\x98\xbd\x7a\x1b\xbd\xc1\xb3\x45\xc3\xe0\xfe\x5a\x8f\xfc\x08\xdb\x2a\x19\xe3\x61\x8a\xce\x32\xe3\x98\xce\xc8\x33\x79\x95\xad\x8f\x39\x96\x54\x53\xd6\x62\x6c\x90\xb7\x3a\x68\x3b\xbb\x5c\xde\x0b\xad\xa0\x5a\xc0\x54\x2f\xa9\x4a\x7a\xc4\xa6\x3c\x9c\x64\x49\x26\x90\x2d\xa9\x6b\xf6\x58\x1d\x74\x81\x59\x47\xdb\xb1\xe8\x41\x7e\xba\x69\xaf\x9b\xbc\x14\xaf\x76\xf5\x1d\x1e\x5f\x95\xbc\x2e\xfc\xf1\xe3\xdc\x1f\x6f\x7c\x4f\xa6\xe9\x38\x1d\x6d\x60\xa6\xc2\xf5\x50\x73\x1b\x6a\x3e\x58\x28\xf4\xb8\xb7\x7d\xdf\xc1\x6e\xe7\x17\x46\xd7\x5e\x0c\x36\xf0\x17\xd9\xb2\x59\xe1\xb8\xca\x58\x96\xe6\xbe\xda\x03\x72\x17\x7b\x47\xb8\x19\xc2\xcd\x50\xf2\x18\x1a\x01\x68\x04\xa0\x11\xe8\x1e\x8d\x00\x4a\x1e\xa3\xe4\x31\x7c\xb3\xf0\xcd\xc2\x37\x0b\xdf\x6c\x47\xf8\x66\x93\x0b\x74\x92\xcd\x45\x66\x2c\xba\x32\xc4\xb1\x8a\xbc\x8a\x6a\xcb\xb9\x48\xbf\xb1\x67\x2b\x31\x0a\xaa\x27\xc3\xb1\x85\xea\xc9\xdb\x27\x14\x8e\xfd\x9b\x61\x3a\x27\xb1\xe7\x7a\x49\xd3\x53\x4a\x2e\x9b\xcf\x8c\x6d\x4c\x88\xb4\x4c\x02\x75\xfa\x78\x91\x38\xb8\xd2\xf2\x25\x25\x57\xd0\xd2\xe6\xd1\x6a\x51\x67\xdf\x1d\x62\x3f\xec\xa7\xfb\x1d\x0d\x5e\x91\x0d\x86\x5e\x96\xc0\x33\x9b\x4f\x67\x37\xb2\xe9\x75\x25\xe7\x62\x9f\xd6\x9c\xf5\x94\xd5\xf6\x92\x96\x9e\xb6\xda\x6e\x0a\xee\x8c\x45\x8f\x08\x60\x69\x5f\xdf\xa5\x89\xb2\xd2\x33\x3e\xe7\x5f\xcc\xea\x9d\x5e\xb2\x79\x4b\xa0\x65\x26\x98\x49\xce\xb2\x84\xc9\x24\xed\x7e\x97\x08\xd2\xa7\xb7\x1d\xf4\x71\x42\xa6\x4c\xdb\xed\x4d\x47\xef\x62\xc4\x7b\x80\xc7\x52\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\x82\x53\xb6\x99\x53\x6e\x3f\x15\x18\xf0\x25\xf0\x25\xf0\xe5\x36\xc2\x97\xaf\x0c\xd3\xa4\xc0\x97\x2b\x1c\x55\x6e\x04\x30\xcb\x6b\xda\x8a\x25\xce\xfc\x6f\x43\xec\x2b\xfd\xb4\x83\x1f\x78\x65\x23\x1e\xfa\xc5\x5a\xc4\x98\x49\x6d\xa5\x23\x34\x97\x0f\xf3\xd3\x25\x8c\x7f\x9c\xc2\xfc\xa4\xb6\x02\x3e\xd9\x80\xa8\xf2\x54\x30\xc0\x7c\x8a\xed\x96\x00\x93\x8f\x18\x89\x2e\x93\xda\x0a\x44\x92\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x80\x8f\x5b\x09\x1f\xc7\x29\xc6\x46\x22\x51\x0b\x3e\xde\xe3\x14\x49\x26\xb5\x95\x4a\x2d\x24\x70\x25\x70\x25\x70\x25\x70\x65\x0b\x71\xe5\x07\x86\xe8\x80\x54\x5b\x1a\x0b\x41\xaf\xf0\x72\xa7\xf0\x52\x92\x88\x6c\x4a\xd5\xd9\x77\x06\xd9\x37\xfa\xe9\xa1\xb2\xe3\xac\x1c\xd6\x1b\xb5\xe9\x2a\xa7\x97\x16\x96\x45\x8b\x4d\x92\x52\x0e\x0b\x29\xa5\xfb\xaa\x64\x3e\x6c\xfb\x64\xd0\x4d\x7a\x73\x49\x35\x18\x3b\x26\xd8\x89\x8a\xac\x32\x02\x3d\xda\xdd\x6b\xa6\xf0\xf0\x1e\x54\x50\x4d\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x02\x5c\x76\x24\xb8\x04\x24\x04\x24\x04\x24\xec\x62\x48\xf8\xda\x65\x3a\x2c\x20\xa1\xc8\x3c\x5e\x25\x91\x6f\x41\x4b\xa7\xb3\x7a\x71\x9d\xaf\xc9\x57\xd6\xd3\x19\xb5\xa4\xb3\x1f\xbb\xcc\x5e\x19\xa0\x9d\xe2\xe0\x2b\x1b\xf1\x50\x5c\xcc\x15\x8b\xf2\x2b\x67\x9a\x41\x53\xd2\xb8\xa4\xa5\x67\xad\x46\x12\xbc\x91\xe8\xb0\x71\x88\x48\x5b\xee\x94\x17\x7a\xec\xda\xe1\x58\x2f\x01\xac\x05\xac\x85\xc2\x86\xc0\x5a\xc0\x5a\xc0\x5a\xdd\x83\xb5\x3a\xa8\x6e\x5f\xc7\x60\x2d\x14\x94\x03\xd6\x02\xd6\x02\xd6\x02\xd6\xda\x8a\x82\x72\x5d\x4d\xa1\x50\xf1\xaa\x9b\x2b\x5e\x25\xb6\x21\x85\x4a\x5e\xa5\x33\x42\x0e\x35\x4f\xb3\x5c\x0e\xc5\xab\x33\xf9\x96\x31\x92\x75\xfd\x36\xe2\x31\x0f\x46\xb4\x98\xd5\x83\xb5\x50\xcf\x06\x6b\xa1\x0e\xb0\x7d\x52\xe9\x24\x4e\x27\x85\x50\x1e\x67\x74\xc5\x64\x56\xa9\x99\xc4\xa5\x4f\x91\x5f\xde\xe9\xc4\x65\xbb\xac\x2a\x58\x5e\x64\x6c\x8f\xd8\xda\x5e\x36\x26\x50\xd6\x22\x25\xe9\x54\x59\x7d\xac\x83\x74\xa0\xb1\x67\x82\xd2\xe7\x41\xa5\xcf\x51\xdd\xc9\xa7\xba\xd3\x3b\x7a\x7d\x5f\x66\x5a\x10\x46\x23\x41\x27\xb8\xd1\x98\xa2\x86\x07\x28\x9d\x15\xe5\xd8\x4e\xd2\x9c\x5d\x8e\x6d\x33\xed\x9d\x13\xc5\xd9\x4e\xd1\xbc\xa3\x38\xdb\x26\x1a\xac\xb5\x22\x5b\xeb\x0c\x5b\xf4\x4f\x46\x9c\x86\x6b\x48\x56\x3b\x75\x94\x8e\xd5\x56\x3d\x8d\xd8\x7e\xb1\xa7\x69\xc4\x66\xac\x03\xda\x69\xce\x2e\xd0\x79\x5a\x72\x9a\xb3\x68\x82\x4e\x34\xa0\xb7\x9d\xe5\xf7\x72\x8e\x5f\xa8\x0e\xd4\x0b\xd4\x0b\xd4\x0b\xd4\xdb\x45\xa8\x17\x13\xb9\x80\x89\x5c\xe7\xb0\xf0\xdf\xef\xa5\xff\xd2\xcb\xbe\xd5\x1b\xfa\x86\xf5\xa8\x7e\xa9\xf7\x82\x73\xf5\x99\xcd\x87\x75\xb1\x82\x0c\xaf\xa8\xab\xc2\x71\x6c\x51\x1a\xfb\x93\x25\xdf\x12\xde\x59\x8e\x29\x63\x5e\xcb\x8f\xe6\xd5\x8c\xc2\x1f\x89\x5c\x80\x3a\xa7\x96\x82\xd7\x5a\x83\x41\xce\x17\xb2\x6b\x6b\x6a\xda\x98\xbc\xe6\x6e\xda\x1e\x5d\xdb\xb4\x67\x73\x23\x72\x5d\x2a\x4a\xb8\x67\x8a\x4a\x8a\x8f\x93\xac\x96\xb6\x3e\x3c\xf6\xc7\x81\x3b\xcb\xcd\xe7\xb2\xae\x1b\x17\xe9\xec\x28\xc5\x38\xd2\xbc\x21\x79\x92\x55\xf1\xbe\x99\x4d\xc4\xc4\x75\xae\xa9\x4a\xde\xf3\x1a\x23\x8c\x5f\xc2\x12\xbf\x02\xaf\xf5\x36\x5c\x0e\x70\x39\xc0\xe5\x00\x97\x03\x5c\x0e\x5d\xed\x72\xf8\xad\x5e\xfa\x72\x2f\x7b\xad\x37\xf4\xaa\xf5\xb5\x7d\x7f\xef\xac\x23\xea\xb5\x90\x53\x15\x5d\xb5\x5e\xfd\xa5\xa2\x56\x50\x32\xfc\x3b\x2c\x56\xc6\x2e\xc1\x91\xf9\xb8\xed\xb0\x59\xe3\x81\xc7\x63\x93\xb1\xf0\xb2\xb0\x23\xe2\x23\x59\x50\xf3\xc6\x30\xb5\xbf\x22\x6a\x58\x2b\x16\xae\x2a\x79\x53\x00\x55\x5c\x57\xc7\x56\x95\x9c\x39\xfb\x8f\x88\xad\x91\xf0\x6a\x36\xaf\xe4\xb2\x7f\xc7\x34\xdf\x2b\x6a\x58\x49\x73\x7e\xaf\x8d\x09\x9c\x9b\xb6\xa7\x96\xa2\xf1\x41\xdd\x3e\x48\xcc\xb7\x63\xe1\xb9\x2c\x37\x49\x8e\x0b\xd7\x8a\x95\x77\x66\xfb\x4c\x4a\x62\xba\xcf\x67\x7f\x5a\xe9\x6a\x2c\x72\xaf\xb8\x9e\x59\xf3\x46\xdc\x4a\xaa\x77\xf6\xd3\x3b\xfa\xd9\xdb\xfb\x43\x7f\x6b\xc9\xe9\xbe\xda\xf7\x8c\xb4\x83\xc6\x10\xbd\xaa\xdd\x08\x67\x94\xe2\x8a\x92\x71\x81\x08\x6b\xa2\xa6\x16\x57\xb5\xe2\x9a\xd1\x17\x9e\x57\x7a\xae\xec\xe4\xfe\x17\xca\xa7\x35\xe6\x9c\x44\x40\x10\x63\x44\xa4\xd5\x54\x36\x6d\x4f\xac\xf9\xb7\x91\x2f\x73\xac\xde\x35\x3e\x4e\xf2\xcb\x61\x7e\xfd\x62\x8e\x6e\x34\xed\xa6\xe5\xa8\xb0\x44\x74\xee\x93\xc5\xc2\x02\x22\x71\x5b\xec\x9c\xad\x0e\x8a\x7b\x18\x34\xd6\x3c\xfc\x2f\xf7\xc0\xd0\x0f\x87\x07\x13\x4a\xea\x7a\xa6\xa8\xad\xe7\xd3\xc6\x5e\x5c\xbd\xc6\x77\x2a\xeb\x38\x31\x59\x91\x33\x20\x77\x23\xe6\x1d\xac\x58\x2d\x1d\x0e\x0f\xce\x6b\x45\xd5\xd1\x6c\x38\xa5\xe8\x29\x25\x6d\xdc\xbd\xec\x1f\xa1\x57\xe4\xed\xe9\x62\x3a\x5d\xd1\xe0\xaa\xd5\x46\x2c\x72\x5f\xa1\x7c\xdc\x38\xe7\x36\xf0\xe8\xc1\xa3\xd7\xa5\x1e\xbd\x64\x86\x16\x05\xe8\x9e\xa3\x19\x0e\xba\x8f\xd2\x61\x3a\xd4\x00\xbc\x5c\x2e\x29\xa5\x75\xbd\x49\xce\x31\x27\x1b\xae\x95\x27\x5f\x8b\x7a\xe3\xeb\xfb\xd9\x7d\xc2\x50\xd8\x46\x9c\xb6\x61\x8a\x1c\xf6\x1f\xaf\xd0\xac\x47\x81\x33\x5f\x49\xbd\x6f\x55\xb3\x77\x5d\x61\x5f\x19\xa0\xbb\x5d\x55\xcd\xe2\xa1\xfd\xd5\xb5\xf5\x3e\x95\xad\xa2\x13\xc6\x61\xce\x02\x64\xc1\xb5\xc7\x20\xb4\x87\xd0\x1e\x42\x7b\x78\x5f\xe0\x7d\x81\xf7\x05\xde\x97\x8e\xf1\xbe\x74\x8e\x73\x01\xd4\x1b\xd4\x1b\xd4\x1b\xd4\x1b\xd4\xbb\xab\xa9\x37\xb0\x1c\xb0\x5c\x97\x62\xb9\x6d\x29\xb4\x7f\x0b\x5d\x12\x28\xf1\x1c\x9d\xe1\x28\xf1\x24\xcd\xd1\x8c\xaf\x24\xd5\xc1\x9f\x62\x1b\xf1\x58\x95\x42\xf5\x81\x50\x31\x40\x16\x7f\x6d\x25\x18\x3a\x1e\x67\x47\x37\x51\xd5\x3d\x4e\x91\xbf\xda\x59\x81\xd4\xc2\x96\xfe\xde\x8f\x9e\xed\x13\x7b\x6c\x11\x3f\x13\xb8\xeb\x22\x2d\xd3\xd3\x65\x62\xfc\x69\x3a\xbe\xc9\xe7\x06\x31\x17\x54\xf9\x0d\xaa\xf2\xdf\xdf\x4b\xe7\x85\x21\x39\x4d\x0b\xdc\x90\xcc\xd0\xe6\x07\x24\x3d\x23\x54\xf8\x4b\x74\xd6\x56\xe1\x37\xa5\xe1\x37\x0a\x39\xfe\xd3\x74\xce\x21\xc7\x6f\x4a\xcb\x8d\xdb\x3d\x21\xd8\xaf\xcd\xee\xd5\x6e\xe4\x3c\x2c\x64\xf4\x9d\xa3\x15\x76\x6f\xc4\x53\xbe\xef\x67\x03\x8f\x89\xbd\x5d\x36\xd0\x4b\xc7\xdf\x1e\x6b\x08\x2d\x3f\x68\x32\x68\x32\x68\x32\x68\x32\xb4\xfc\xd0\xf2\x43\xcb\x0f\x2d\x3f\xbc\x1a\xf0\x6a\xc0\xab\x01\xaf\x06\xbc\x1a\xd0\xf2\x43\xcb\x0f\x2d\x3f\xb4\xfc\x70\x1a\xc2\x69\xd8\x81\x4e\xc3\xb6\x6b\xf9\xeb\x10\xdc\xb7\xc3\x05\xb7\x1d\x45\xfd\xaf\x0c\xd1\x7e\x21\xea\x2f\xae\x28\xa9\x98\xd9\xc7\x65\xa5\x34\x65\x15\xcd\xa2\x96\x53\x57\x8c\x85\x72\x3e\xa3\xb3\xbf\x18\x64\x7f\xd8\x4f\x0f\x18\x87\x4d\x3b\x8f\xba\xb2\x11\x0f\xdd\xa8\xad\x86\xe6\x79\x2d\xa7\x26\x44\x7b\x4d\x2a\xa2\x39\xc9\x0f\x38\x5f\x7e\x4d\x97\xe2\x8e\x53\x2d\x66\xf5\xd2\xbc\x56\x9c\xce\xe5\x2c\xb6\xaf\x77\x78\x48\x40\xf2\xba\x7f\x9e\xa8\xa6\xd7\xda\x34\xdf\xae\x5a\x6a\x5d\x5e\x4b\x07\xbf\x56\xd3\xec\xb8\x7c\xad\x7c\x87\x98\x7c\xc9\x1c\x0f\xc9\xfd\x62\x21\x22\x02\x11\x11\xa8\xa8\x09\x1f\x16\x7c\x58\xf0\x61\x75\x8f\x0f\x0b\x15\x35\x51\x51\x13\xbe\x03\xf8\x0e\xe0\x3b\x80\xef\xa0\x23\x7c\x07\xa8\xa8\x89\x8a\x9a\xdb\x85\x96\xa2\xa2\x66\x2b\x2a\x6a\x7e\xe9\x79\x8a\x08\x5c\xa8\x97\xb4\xa2\x92\x51\x1d\x8c\x30\xa5\x67\xd3\xc5\xac\xf1\x00\xd9\x07\x9e\x67\xef\x18\x20\x92\xfb\x5c\xd9\x88\x87\x06\xab\x67\xf7\x98\x59\x5e\x98\xe5\xc7\x46\x5f\x6f\xec\xb8\x2c\x0e\xbc\x14\xb7\x7e\xef\x70\x40\x87\x9c\x1d\x20\x54\xc8\xd9\x01\x42\x05\x42\x05\x42\xd5\x45\x84\xaa\x83\x44\xc4\x1d\x43\xa8\xa0\x6e\x05\xa1\x02\xa1\x02\xa1\x02\xa1\x42\xce\x0e\xc8\xef\x6e\x1d\xa0\xd4\xf1\xf2\xbb\x6d\x99\xb3\xe3\x0a\xcd\x0a\xfd\xd2\x51\x3a\xcc\xf5\x4b\xfb\x69\x2f\xc5\x7d\xe3\xd6\x4d\xea\xb4\x11\x8f\x59\x68\xa8\xa6\x0c\x1d\xcf\x04\xeb\x94\xf6\xb1\x09\xa9\x53\x72\xb3\x2d\x29\x4e\xb2\xce\x57\x5f\x45\xcc\xbf\xdc\xe1\xe2\x60\xcc\x4a\xc9\x61\x23\xaf\x37\x88\xdf\x5a\x08\xbd\x04\xa3\xe2\x29\x01\xca\x12\x6d\x8c\xd1\x68\x5d\x9d\x8d\xb8\x4a\xa4\xd5\x68\x30\xad\xc6\x1f\xf5\xd0\x09\xf1\xae\x1f\xa2\x49\xfe\xae\xc7\xa9\xde\xe1\x47\x73\x22\x89\xc6\x31\x3a\x62\x27\xd1\x68\xa0\x99\x79\x91\x32\xe3\x38\x1d\x75\xa4\xcc\x68\xa0\x9d\x0e\x34\x3b\x3c\xf3\x46\xf4\x37\x47\x5c\x66\xe7\x31\xcf\x8c\x18\xb6\x09\x8a\x88\xed\xb6\x09\xb2\xf6\x6b\x95\x31\x42\x9e\x0b\x10\x58\x10\x58\x10\x58\x10\x58\xe4\xb9\x40\x9e\x0b\xe4\xb9\x40\x9e\x0b\x78\x02\xe0\x09\x80\x27\x00\x9e\x00\x78\x02\x9a\xe2\x09\x40\x9e\x0b\xe4\xb9\x40\x9e\x0b\xe4\xb9\x80\xa3\x0d\x8e\xb6\x2e\xca\x73\x51\x1b\x71\xf6\x48\xaa\x1c\x40\x9f\x9b\x9f\x7a\xe2\x0f\x76\x0a\x2d\xb9\x57\x09\xc9\x97\xc6\x4c\x9b\xcc\x3e\xb3\x33\xf4\x83\x3e\xba\x23\xa5\x15\x39\xc9\xde\x53\x54\x0b\x39\x63\x09\x6a\xee\x60\x3e\x60\x7b\xee\x6b\xa5\x75\x88\x3e\x26\x77\x9e\xd1\x8a\xaa\x23\xb1\xfd\xbc\x3c\xb4\xc9\x48\xfb\x04\x1d\xa3\x23\x65\xfe\xb5\x11\x8a\xfa\x7a\x14\x8c\x5b\x32\x46\x81\x75\x5d\xc9\xf7\xf6\xd0\x51\x31\xb2\x0e\xd0\x3e\x3e\xb2\x62\x54\x57\x0b\x94\x10\xfe\x91\xc3\x74\xc8\xf6\x8f\xd4\xdb\x46\xd0\x70\x7c\xd4\x7b\x38\xde\xce\xfa\x0b\xeb\x25\xba\x76\x3a\x78\x04\x0e\xb1\x01\x39\xea\x22\x11\x39\xd2\xec\xd3\xbb\x32\x3f\x00\x6f\x55\xc7\x5b\x49\xb8\x1b\x7d\xdc\x8d\x07\x68\x1f\x9b\x88\x8c\x5b\xc9\x78\x1e\xe4\x5d\x20\x9f\x87\x35\xda\x22\xfd\xc6\xcf\x2d\x4d\xb1\xf3\x6a\x2f\xdd\x2e\x62\x66\xd8\xbf\xed\x65\xbf\xda\x4b\xfd\xc6\x7f\x42\xa1\x8c\x5a\x72\xac\xe7\xa7\x97\x16\xac\xb9\x50\xf4\xee\x8c\x5a\x9a\x5e\x5a\x90\x1f\xc8\xe6\x65\xa7\x99\xaa\x68\x68\xaa\xa2\xa1\xa9\x5a\x1a\x4a\xae\xd5\x55\x22\xa1\xca\x27\x70\x7a\x69\xe1\xa4\x61\x0a\x6a\x51\x71\xb0\x77\x46\xe9\xa2\x2c\x41\x9c\x5e\xcb\xf2\xe0\x90\xa2\x9a\xc9\x1a\x93\x4b\x77\xc2\xa2\x15\xb5\xa4\x98\x59\x8b\xd6\xd6\x4b\x8a\xb1\x08\xb8\xa1\xae\x5c\xd5\xb4\xeb\x2e\xcf\x92\xf9\xb9\x61\x1f\x19\x66\x5f\xbd\x8d\x1e\xf5\x6c\xf6\x8a\x6c\x2f\xf4\x6b\x3d\x72\xc6\xe1\x48\x66\x94\x37\x57\xb4\x66\x28\xd3\x19\x79\xba\x67\xc4\xe9\x66\x9c\xa7\x6b\x4a\x96\xa3\x11\x1b\x69\x71\x6c\x27\x01\x48\xb6\xa4\xae\xd9\xaf\xf5\xa0\x8b\x42\x3b\xda\x8e\x45\x45\xd8\xd2\xb4\xd7\x9d\x5e\x12\x37\x5a\xed\x16\x3a\x3c\x10\x6b\x4b\x32\x25\x3d\xee\xfd\x41\xdc\xc1\x6e\xe7\x17\x46\xd7\x5e\x0a\xfe\x24\x3e\xc7\xde\xe8\xf8\xf0\xf1\xc7\x60\x95\xbb\xa8\x32\xd6\xe5\xd7\xb3\xda\x13\x43\xfa\x24\x04\xa7\x21\x7d\x12\xa4\x11\x90\x46\x40\x1a\xd1\x4d\xd2\x08\xa4\x4f\x42\xfa\x24\xb8\xa4\xe1\x92\x86\x4b\x1a\x2e\xe9\x8e\x70\x49\x27\x17\xe8\x24\x9b\x8b\xcc\x58\x48\x6a\xc8\x89\xa4\xaa\x2d\xe1\x5a\x4f\xa9\x90\x89\x09\xfe\x3c\x64\x62\xda\x3e\x81\x73\xec\xa3\x77\xd2\x94\x44\xa1\xde\x39\xdb\x05\x02\xd5\xd5\xdc\xaa\xbe\xce\x0d\x5c\x71\x3d\xa7\xea\xc6\x1a\x44\xbd\xa1\xb3\xff\x46\x91\x6f\xdf\x46\x0f\x2a\x65\x99\xdb\x05\xe7\x7c\xdc\x0a\x52\x5b\x56\x73\xab\xcb\xe2\xf0\xf3\xc6\xe1\xe7\xf9\xe1\xd1\x71\xab\x6c\xb4\x33\xc7\x3a\x3f\xd8\xfb\x88\x26\x7b\xda\xde\x44\xcf\xd2\x33\x65\x9e\xb6\x93\x34\x57\xad\xdc\xad\xa3\x8b\xe4\x6d\xc6\xbc\x2f\x35\xf9\xf1\x5e\x51\x51\x77\x9c\x57\xd4\xed\x3d\x77\x9a\x13\xc3\xe6\x34\x4e\x2f\x08\xff\xdc\x25\xba\x60\xfb\xe7\x9a\xd8\xfc\x65\x11\xd7\xf4\x0c\x5d\x74\xc4\x35\x35\xb1\xfd\xcd\x16\x04\xd6\x82\x41\xe8\x22\x4b\xda\x55\x18\xfc\x32\xc5\xfb\x5c\x5e\x05\x41\x85\xff\x10\xfe\xc3\x06\xfd\x87\x4d\xf7\x03\x7e\x69\x88\x0e\x5a\x16\xdb\x2c\x2f\x32\xb6\x31\x61\x8c\xd4\x09\xe9\xab\xba\x6a\x95\x19\x29\x68\x69\xc5\x2a\x33\xa2\xb3\x1f\x1f\x62\x6f\xbd\x8d\xee\x77\x15\xba\x16\x47\x86\x5e\xee\xa9\xad\xda\x86\x4f\x09\x93\x26\x55\xde\x48\x08\xaf\x92\xa3\x88\xb6\xb8\x3c\x9f\xb3\xa2\x08\x47\x13\x8b\x70\x64\x82\xad\xea\x2c\x4b\x6c\xa2\xb6\x8d\x78\x94\x70\x24\xc1\x91\x04\x47\x12\x1c\x49\x70\x24\xc1\x91\x04\x47\x12\x1c\x49\x70\x24\xc1\x91\x04\x47\x12\x1c\x49\x70\x24\xa1\x0e\x07\xbc\x3f\xf0\xfe\xc0\xfb\xd3\x36\xef\xcf\xcf\xf5\xd1\x1e\xc1\x12\x57\x73\xda\x0d\xc3\x18\x17\xb5\x5c\xcc\x5a\xdc\x97\x7b\x81\xd8\x9f\xf7\xb2\x3f\xed\xa5\x5d\x8e\x9d\xa7\xcd\x7d\x2d\xaf\xcf\x43\xee\x80\x04\xeb\xf6\xa2\xd1\x8c\x5a\x9a\xf7\x38\x52\x7a\x7c\xa6\x97\x16\xcc\x25\x60\x27\x46\x2a\x14\xe8\xa2\x80\x73\x67\x69\x91\xc3\xb9\x79\x9a\xa5\x44\x63\x91\x0a\xe6\x7d\xd6\x14\xac\xf0\xe7\x7b\xe8\x7c\x70\x69\x65\xf1\x80\xca\xe3\xdf\xf8\xdf\x2f\xb9\x0a\x2e\x9b\x91\x0a\x3f\xb3\x87\x7d\xa4\x8f\x1e\xf6\xaa\xbb\x2c\x9e\xe3\x63\x45\x55\x49\x97\xf1\x04\x47\x65\xdd\x68\xdc\xd8\xee\x51\x22\x99\x1f\x6d\xd1\xd8\xb4\xe3\x90\xe6\x7a\xef\x92\x57\xfc\x39\x6a\x5d\xc9\x40\x79\xb7\x9a\x0e\x2b\xc7\xe5\x9a\xec\x34\x1b\x8c\x45\xe7\xd9\xac\x9f\xea\xbe\x9e\x62\xc5\x55\x42\xe2\x32\x6a\x89\x42\x2f\xef\xac\xf6\xc0\x9e\x30\x43\x1a\xfd\x9f\xd9\x3e\xb9\xcb\x16\x3e\xb6\xc4\x49\x9a\xa3\x99\x32\xa7\x6b\x23\x8f\x07\x29\xab\x90\x42\xb4\x41\x9f\xdc\x2f\xf6\x34\xc7\x42\xd0\x82\x70\xc3\x27\xe8\x84\xed\x86\x6f\xb0\xa9\x4d\x46\xcb\xd6\x66\xa4\x36\x57\x40\x5d\xd8\xb5\xe8\xaf\xdd\x5b\xcd\x0a\x3d\x20\x33\x4d\x28\x2e\xc3\xb3\x57\xfc\xba\x95\x76\xa7\x35\x99\x42\x61\x84\x02\x8c\x10\xb2\xd5\xb5\x25\x5b\x1d\xd2\x14\x21\x4d\x11\xd2\x14\xb5\x2a\x4d\x51\xf2\xd3\x3d\x4d\xce\xd5\x72\x5e\x28\xec\x4e\xd3\x82\x43\x61\xd7\xe2\xfc\x2f\x61\xef\x29\xc4\x4e\x76\x87\xe8\xa6\x76\xce\x22\x12\xff\xe7\x5d\xd5\x66\x11\xd1\x82\x52\x2c\x65\xb9\xf3\x59\x10\x8f\x2a\x8b\x9a\x89\x82\x52\x4a\x5d\xdd\xa2\xa9\xc5\x23\xe5\x0d\x8d\xf2\xab\xd9\xc3\x51\xc3\x2e\xe7\xc6\x35\xb5\x98\x51\x9d\x5b\x07\x9c\x5b\x79\x3c\xb6\x9a\xc9\xa6\x46\x2b\xf6\x73\xb5\x62\xfc\x7d\x53\x6e\x35\xae\x32\x71\x96\x16\x29\x59\xb6\x9c\x9a\xa2\x83\x0d\x8c\xa2\x25\xae\xd5\xc0\x84\x26\x60\x42\xf3\xd1\x3e\x7a\xa5\x8f\x7d\xb8\x2f\xf4\x41\xeb\xab\xf0\x72\x5f\xf7\xac\xaa\xca\xdc\x5b\x46\x3f\x73\xb9\x06\x77\xc1\x1a\x83\xd3\x16\x6d\x0d\xf9\x0c\xdc\x61\xfe\x71\x13\xea\x1e\x25\xc7\x8f\x34\x66\x71\xe2\x68\xbe\x07\x9f\x51\xe9\xe1\xa1\xa4\xae\xe5\x97\x84\x7a\xe9\x8c\xf1\x5a\xc8\xbf\x97\xcd\x97\xc5\xfe\x71\xd8\x7f\xb9\x97\xf8\x72\x0f\xbd\xd6\xc3\x5e\xed\x09\x7d\xde\x22\xb3\x1f\xea\x99\xd7\x8a\x29\x3e\xd9\xcb\x68\xbc\xdb\xb5\x70\x64\xd5\xf8\x29\x12\x9e\x76\xdd\x05\x67\xee\x62\x5e\xb6\xae\xdb\x3a\x8d\x51\x25\xc5\x6f\x9c\xab\x5f\x72\xd9\x94\xfc\xee\xaa\xb9\xb4\x1e\xd6\x6e\xc8\x3e\x15\xea\x90\x82\xaa\x15\x72\x6a\x2c\x2c\xce\xc8\xe5\x66\xe6\xb3\xe4\xec\xdb\xb3\x03\xac\xd3\x47\x6e\xe3\x97\xb5\x99\x6a\x37\x0d\xaf\x39\xdb\xf7\x39\xa8\x92\xfb\x82\xf7\x07\x25\x0f\xd2\x01\xb6\x2f\x32\x61\x05\x3d\x3d\xe4\x0c\x7a\x72\xb4\x5d\x19\xe3\xf4\x66\xba\xcc\x9e\x8f\x3c\x67\x1d\x39\x25\x67\xe2\xe2\x0d\xcd\xa7\xb9\x40\x3c\xac\xa7\xb4\x82\x3a\x12\xd6\xd7\x53\x57\x8d\x37\x83\x4f\xf2\x55\x65\x4d\xcc\x91\x0a\x45\x8d\x4f\x70\x23\x3b\x2d\xaa\xda\xd2\x5c\x3f\xff\x34\x46\xe7\xea\x48\x50\x53\x53\x6a\x9a\xcf\x8f\xb2\x77\xf4\x07\xa5\xa6\x19\xf3\x80\xbe\xd5\x22\xca\xa2\x47\x8c\x03\xb6\x3c\x09\x8c\x00\xc2\x6f\xeb\xa1\x55\xf1\x5e\x5c\xa1\x17\xf8\x7b\xf1\x0c\x5d\xa4\x65\xff\x90\x05\xcf\xbe\x35\x5f\x94\xaa\xb9\x50\x1a\xa7\x35\x19\xb5\x54\x6b\x22\x97\x26\xe5\x6d\xa9\x78\xd9\x42\xff\x9c\x82\xc6\xc1\x84\x37\x4b\xae\x3a\x14\x8e\xcb\x63\x3a\x63\x34\x24\xb2\x94\x21\xb5\x6c\x62\xd4\x9a\xd1\x80\x39\x13\x48\x74\x83\x24\xfa\x4f\xdb\x67\xb2\xae\x0b\x56\x9d\xa6\x15\x9b\x55\x77\xa0\x7d\xe4\x34\x7b\x6b\xed\x63\xf4\xfb\xf7\x06\xd9\xc7\xa7\x2c\xca\x5d\xd5\x24\x1e\x13\xbb\x75\x88\x45\x04\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\xf7\x26\xe0\x5b\x9b\x62\x33\xf1\x23\xaf\x0b\x9a\x79\x1c\x0e\x20\xe3\x55\xe7\x23\x47\x0b\x1d\x91\xb3\x15\xd4\xbc\xeb\x26\x41\xa0\xe6\xa0\xe6\x5b\x4c\xcd\x3b\x08\x0f\x6e\xed\xfa\x36\x18\xb6\x77\x70\x86\x31\xf6\x4f\xf6\xd0\x99\x60\x3d\x74\x3d\x52\xe8\xcf\x47\xd9\xcf\xf4\xd1\x03\x5e\xde\xe8\x40\x15\xf4\x88\x8f\x0a\xba\x1d\x02\xe8\xe7\xe8\xb8\x18\xcf\x07\xe9\x00\x1f\xcf\xe3\x14\xa3\x91\x20\x37\x50\x5d\x1e\xa0\x74\xf0\x40\x9d\x66\xc7\x37\xe7\x01\x0a\x94\x3d\xff\xc1\x0e\x9f\x87\x53\x83\xe2\x79\xcc\x5f\xf1\xdc\x0e\xd1\x61\x82\x4e\xd0\xb1\xb2\x79\x46\x9d\xcf\x08\x73\x0b\xd0\xe5\x06\xe9\xf2\xfb\x7b\x36\x6f\x21\x66\x05\x36\x36\x16\x66\x16\x36\xbe\xf5\xec\x4c\x61\xbd\x44\xd1\x4f\xde\xeb\x63\x67\xbc\x35\xcd\x31\x5f\x4d\x33\xe4\xcc\xb7\xac\xad\x01\xcc\x05\xcc\x05\xcc\x05\xcc\xdd\xf6\x30\xd7\x37\x76\xb2\xe3\x29\x6f\x7b\xe7\x0c\x81\xc8\x38\xf1\x4f\xef\xf2\x99\x36\xd4\x23\x62\x1e\xf5\x13\x31\x43\xbf\x0c\x12\x0b\x12\x0b\x12\x7b\x2b\x91\xd8\x5b\x06\x5c\x41\xb5\xec\x24\xb3\xff\x6f\x94\x4e\x37\x21\x53\x85\xc5\x65\xdf\x1b\x65\x3f\x51\x35\x45\xc5\x43\x3e\x70\x36\x1a\xad\x3d\x37\x45\x93\x99\xec\x1b\xeb\xab\xbc\x5c\x21\xcd\x0f\x1c\xd8\x2b\xc1\x03\xfb\x38\x3b\x6a\x8f\xdb\x4a\xfc\x1f\x38\xe8\x83\x78\xec\xd7\x76\x54\x7b\x28\x21\x7f\x28\x1b\x1d\xad\x2b\xff\xc4\x16\xd7\xd5\xae\x0c\x9b\xc0\xdc\x02\x24\xb6\x31\x12\x5b\x6f\x45\xf6\x4a\xbb\x50\x7f\x45\xf6\xfa\x6d\x4b\x80\x2a\xb7\x36\xd3\x53\xc7\x37\xd5\x43\x7a\xfb\x91\xaa\xc9\x25\x5e\xe7\x02\xb1\xd1\x91\x7a\xb2\x4a\x80\xbf\xde\x0a\x16\x06\xfc\x15\xfc\x15\xfc\x15\xfc\x75\xdb\xf3\xd7\x8e\xc7\xac\x4d\x5a\x89\x6c\x6a\x3a\x50\x03\x7d\xfd\x47\x55\xf3\x48\xec\xae\x01\xc1\x46\xf7\xd4\x91\x40\x02\xe4\x15\xe4\x15\xe4\x15\xe4\x75\x9b\x92\xd7\x96\xe3\xa9\x20\x22\xda\x8e\xaf\x46\x32\x4e\x63\x6c\x34\xb2\xc7\x62\xa7\xf7\x96\x53\xd7\x5b\x02\xb7\xfe\xef\x41\x1a\x15\xb8\x55\x2f\x69\x45\x25\xa3\x96\x33\x56\x51\x0b\x2e\xa5\x67\xd3\xc5\xac\xd1\x49\xec\x37\x06\xd9\x67\xfb\xe9\x1e\xb9\xbb\xf5\x91\x5c\xaf\xad\xf0\xdb\xcc\xf2\xc2\x2c\x6f\xa8\x49\xa5\xde\x44\x25\x84\x65\x71\x31\xf2\x63\x6b\x9d\x63\x31\xab\x97\x3a\xbf\x90\x5b\xd3\xeb\xb5\x05\xbe\x5f\x35\x15\x72\x7b\x3e\xf8\x1d\x3b\xc4\x26\xe5\x3b\xe6\x1e\x3c\xf2\xc5\xb2\x9e\x83\xc7\xdb\x85\xea\x6d\xa8\xde\x86\xea\x6d\xa8\xde\x86\xea\x6d\xa8\xde\x86\xea\x6d\xa8\xde\x86\xea\x6d\xa8\xde\x86\xea\x6d\xa8\xde\x86\xea\x6d\x4e\x4f\x23\xaa\xb7\xa1\x7a\x1b\xaa\xb7\xa1\x7a\x5b\x0b\xab\xb7\xfd\xf6\x1e\x4a\xd4\xaa\xb7\x4c\xe5\xd6\xf5\x92\xb1\x6e\xa9\x0c\x7f\xff\x07\x7b\xd8\x67\xab\xca\x2c\x9f\xf2\x90\x59\xce\x88\xf6\x5c\xf1\x90\xd5\x44\x97\x95\xfb\x37\x59\x78\xb9\xea\x1f\xd1\x72\x5a\x50\xba\x59\x4a\x70\x4a\x77\x84\xaa\x78\xb9\xdc\xc8\xbb\xf2\xaa\x4d\x0e\x57\x08\x46\x6c\x67\xd8\xe9\x1a\x31\x76\xe5\x69\x3c\x5c\xa1\x01\xa2\xcc\x9f\xaf\x5a\x1b\x6c\xc8\x5b\x94\xe9\xf1\x14\xe3\x01\x12\xcd\x56\x3f\xc8\x7a\xdd\x92\x41\x0f\x0c\x6e\x49\x88\x36\x1b\x14\x6d\xfe\x4a\x8f\x00\xfc\x71\x0e\xf8\x2d\xd5\xe5\xa6\xec\x47\x7b\xad\x54\x80\xa0\xb3\x36\x23\xd6\x68\x65\x43\x8f\x9b\x8f\x7e\xb9\xaa\xbc\xf3\x11\x4b\xde\xe9\x61\x98\xc6\xab\x8b\x3d\x5b\x6e\x97\x20\xf8\x84\xe0\x13\x82\x4f\x08\x3e\x21\xf8\x84\xe0\xb3\x3b\x05\x9f\xc1\xd9\x53\xdb\xbe\x2a\x4a\xfc\xbb\xaa\xf2\xcf\x78\x80\xfc\xd3\x63\x9a\x31\x56\x55\x0c\xda\xca\x59\x06\x04\xa1\xdd\x36\xb7\x81\x20\x14\x82\xd0\x2d\x16\x84\xae\x36\x75\xdd\xb9\x79\x79\x68\xdb\xbf\x20\xc9\xe3\x74\x94\x1d\x8e\x1c\xb2\x94\x9f\x8f\x39\xc5\xa2\x95\x2d\xb4\x21\xc3\xe9\xbb\x42\x74\x78\x4c\x29\x64\xab\xa5\x31\x95\x13\xae\xb7\xac\x6b\x25\xc5\x24\xb9\x63\x3a\xff\x8e\xb3\x2f\x3d\xcc\xde\xd6\x47\x77\xa4\xb4\xa2\xca\xd3\xd0\x70\x7c\x2b\xb6\x99\xf7\xe5\x88\x80\x90\x2d\x3d\x6d\xb4\x14\x1d\x34\xf6\x9d\xd1\x8a\xaa\x2b\xf1\x8c\x73\x17\x31\x57\x68\x32\xbc\x7d\x9e\xa6\xc5\x28\x9c\xa2\x83\x7c\x14\x4e\xd0\x38\xc5\x7c\x47\xa1\x71\x67\x3c\x21\x84\xf3\xc2\x36\x5b\x93\xeb\x5c\xf0\xc8\x1b\x61\x51\xa9\x3b\x76\x9d\xd8\x99\x12\x42\x8e\xcc\x48\x84\x42\x5f\xd9\x61\x3f\x03\x33\xf0\xbd\xc6\xc7\x10\x95\xbb\xb7\xfd\x49\x24\x0c\x33\x70\xa2\x6c\x0e\x50\xf7\xa3\xc0\x97\x1f\xcc\xb5\x41\xe6\xfa\x2f\x7a\x9a\x61\x0a\xe6\x05\xb6\x3d\x4e\x47\x6d\x6c\xdb\x0a\x93\x52\x9b\xcd\xf0\xb6\x0f\x5e\x96\x24\x00\xd8\x26\xde\x7a\x97\x6d\x52\x0e\x54\xac\x6d\x6a\xb3\x2d\x43\xfc\x4b\xdb\x56\xcb\x82\x95\x4d\xb7\xd9\x37\xac\x6c\xb0\xb2\xd9\xe2\x95\x4d\x5b\xe6\x94\x81\xeb\x99\xda\xbe\x10\xd5\xbf\x0a\xce\xef\x47\x72\x8a\x0e\xb2\x03\x91\x7d\xd6\x72\xe5\x61\x57\x6c\x9b\xf3\xb8\x5b\x22\xc8\xed\x0f\x1e\xa6\x63\xb2\x12\x72\xa1\xa0\x57\x59\x11\xa5\xd5\x42\x4e\xbb\xb9\xa6\xe6\x4b\xe5\xcb\xa1\x0f\x3c\xcc\xbe\xdf\x4b\x77\x18\xc7\x1b\xdf\xcd\xc1\xaa\xcb\xa1\x59\xab\x99\xe8\x93\xbc\xb4\x71\xa1\xa0\x3b\xbf\x93\xf6\xf6\x96\x2c\x84\x9e\xa5\x63\x62\xd0\x4e\xd2\x7e\x3e\x68\xc7\x68\x94\xf6\xf8\x97\x28\x29\x14\x74\xe1\x53\x34\xaf\x2a\x70\xc4\x9e\x0d\x1e\x90\x7b\xd8\xb0\x59\x7a\xa4\x50\xd0\xe5\x90\x74\x9c\xa2\x9e\xda\x0d\x9f\xdd\x61\xf7\x7c\x34\x68\x11\xe4\xe8\xfc\x41\xb3\x98\x70\x5b\xfb\x3f\xc1\x23\xf8\xca\x26\x0a\xf5\x3d\x00\xcc\x0d\xb0\xf6\x69\x70\xed\xf3\xbe\x9e\x4d\xbf\xfd\x33\x62\xe1\x73\x84\xa6\xec\x85\x4f\xd3\x4d\x48\x80\xaa\xa4\xc9\x16\x26\xf1\x3f\xee\xb4\x4d\xc8\xde\x9a\x17\x3d\x0e\x5b\xf2\x94\xa8\x7a\xd7\x36\x4b\x82\xe5\x4e\xb7\x99\x34\x2c\x77\xb0\xdc\xd9\xe2\xe5\x4e\xeb\x67\x8e\x81\x6b\x9d\xda\x0c\x7f\x55\x63\xef\xfc\x2a\x24\x27\x69\x3f\xdb\x1b\x89\x5b\xcb\x95\xd7\x3b\x97\x3a\x76\x03\xb7\xc4\x3a\xe7\x43\xc3\x94\x94\xeb\x9c\xf5\x92\xa6\xa7\x94\x5c\x36\x9f\x31\x96\x3b\x22\x89\x87\xcf\xa2\x87\x3f\x21\xe3\x85\xce\x15\xb4\xb4\x79\xa0\x5a\xd4\xd9\xd7\x86\xd8\x1f\xf7\xd3\xdd\x8e\xb6\x8c\xaf\xe7\xcb\x3d\xb5\x65\xfa\x38\x65\x35\xbb\xa4\xa5\xa7\xad\x66\x9b\x94\xf7\x63\x92\x1f\x30\x6d\x5f\x9a\xf3\xab\xec\x73\x6a\xa4\x03\x69\x42\x0e\x36\xc7\x68\x90\x53\x2e\x9f\xde\x76\xaf\xf0\x6a\x49\x39\x82\xa4\x20\x48\x0a\x82\xa4\x20\x48\x0a\x82\xa4\x20\x48\x0a\x82\xa4\x20\x48\x0a\x82\xa4\x20\x48\x0a\x82\xa4\x20\x48\x0a\x82\xa4\x20\x6d\x4e\x0a\xb2\xfd\xd0\x07\xd2\x8e\x20\xed\x08\xd2\x8e\x6c\xa3\xb4\x23\xef\x1d\x16\xf2\x74\x7d\x4c\xc0\x1b\x3b\xd5\x88\x1f\xa6\x14\xfb\x99\xf9\x46\xbe\x39\xc4\xbe\xdd\x4b\x3b\xc5\x8f\x57\x36\xe2\xa1\x37\x78\xe4\x17\xe1\xf8\x2b\xfa\x88\xb1\x85\xff\xe9\x72\xdd\xf1\x5f\x9a\xac\xbc\xb8\x44\x87\x05\xbd\xdb\x47\x13\x9c\xde\x8d\x50\x94\x86\x7c\xf9\xb9\xbc\xf5\x8d\x78\xac\x36\x50\x17\x20\x3e\x3f\x1f\xcc\xf1\xc6\xd8\xa8\xe4\x78\xae\x6e\x97\x24\x4f\x5c\x85\x93\xdb\x85\xfe\xe5\x0e\x67\x27\x3f\xe2\x9d\x01\x44\xf4\xf3\x63\x72\x63\x5b\xba\x3a\x71\x8c\x8e\xd0\x54\x99\x7b\xb2\x8e\xbe\x86\x3b\x12\x0a\x8b\x06\x15\x16\xef\xa9\x52\xf7\x7a\x53\xaf\xff\x09\xa1\xbb\x38\x44\x93\xb6\xee\xa2\xae\x16\x6a\xd4\x56\xb4\xc0\x4e\x44\xdf\x76\xaf\xd3\x4e\xdc\x63\xe6\xe0\xc8\x4b\xdb\xf0\xa8\xf8\xa1\x3d\xa6\x01\x49\x36\x90\x64\x03\x49\x36\x90\x64\x03\x49\x36\x90\x64\xa3\x91\x24\x1b\xed\x4c\x8b\xd1\xdc\x8c\x1e\xb5\x27\xd9\xa8\x6d\x0e\xe0\xf7\xb9\xf7\x9c\x1b\x24\xfe\xcb\x9d\xce\x39\xc0\x93\x01\x69\x33\xc4\xc4\x60\x57\xc1\xd2\x29\xb4\x72\x5e\x00\x35\x65\xb7\xcd\x46\xa0\xa6\x84\x9a\x72\x8b\xd5\x94\x2d\xa6\x41\x81\x52\xca\x16\xac\xf3\x92\x13\x34\xce\x62\x91\x11\xcb\x35\x70\x9f\x53\x4e\xc9\xf7\xbf\x25\x94\x94\x7f\xf6\x02\x4d\x09\x3c\x99\xd2\xb4\x62\x3a\x9b\x2f\xcb\x87\xec\x07\x29\xf9\xd4\x5b\x67\x1f\x7b\x81\xfd\xe3\x01\xba\xc7\x79\xac\xf1\x49\xdc\x2d\x3c\xe6\x45\xe9\xeb\x33\xe7\xd2\xda\x6a\xd8\xe8\xef\xf0\xa2\x71\x70\xf4\x09\x63\xa7\x19\xc7\x91\xce\xcf\x22\xdf\xa5\xc3\x35\x8c\x09\xc8\xf8\x20\xe3\x4b\x40\xc6\x07\x19\x1f\x64\x7c\x90\xf1\x75\x8d\x8c\x2f\xd1\x31\x32\xbe\xa6\x5f\x49\xc3\x32\xbe\x04\x64\x7c\x90\xf1\x41\xc6\x07\x19\x1f\x64\x7c\xed\x97\xf1\x25\xba\x5a\x13\x97\x80\x26\xae\x75\x9a\xb8\x44\xa7\x6b\xe2\x12\xdb\x50\x13\x97\x7c\xb3\xbf\x97\x68\x4e\x80\xc4\x63\x74\x84\x83\xc4\x03\xb4\x8f\x26\xaa\x64\xa1\x72\x20\xab\x8d\x78\x8c\x43\xa3\xc5\xac\x6e\xe9\x43\x1e\xf3\x26\x87\x77\xb0\xdb\x44\xe9\xf7\x37\x06\x83\xc3\xfd\x6c\xaf\x87\x43\xc8\x03\x96\x49\x94\xc8\xaf\x81\x22\xef\xd8\x59\x49\xc5\xee\x16\x1f\xa2\xb0\x22\x01\xd8\x6e\xf1\xff\xf6\x20\xb0\x46\x13\x96\x7a\xf4\x30\x7c\x32\x90\x94\x35\x28\x29\xfb\x93\x2a\x92\xb2\x3a\x13\xd0\x79\x0c\xcc\x86\x32\x99\x7a\xb5\x73\x52\x38\xc6\x4f\xd0\x31\x87\x63\xbc\x81\x86\x82\xed\x50\x41\xab\xdd\x0e\xd5\x68\x7b\x5c\x72\xb5\x6f\x8e\x54\xda\xa1\x90\x94\x24\x38\xf4\x1d\xda\xaa\xb4\x49\xa3\x62\x9b\xdb\x26\xcd\x58\x3b\xb6\xd6\x3a\xb5\x46\xcf\x06\x40\x0b\x40\x0b\x40\x0b\x40\xdb\x3d\x80\x16\xf3\xb3\x80\xf9\x59\xe7\x10\x6c\x68\x89\xdb\xa2\x25\x86\xa3\x00\x8e\x02\x38\x0a\xe0\x28\x80\xa3\xa0\xab\x1d\x05\x08\x29\x41\x48\x09\x42\x4a\x5a\x15\x52\x02\x3f\x1c\xfc\x70\xdd\xea\x87\x4b\x66\x9a\x5c\xb2\x38\x48\x14\x1f\xf5\x46\xca\xf7\xb3\xfb\xc4\xdb\x6c\x5b\xda\x16\xe2\xe5\xed\xa8\x78\x7f\x75\x90\x86\x85\xe2\x5d\x98\xc0\x31\x59\xdb\x72\xac\xa0\xa5\xd3\x59\xbd\xb8\xce\x61\xf0\xca\x7a\x3a\xa3\x96\x74\xf6\x33\x83\xec\x1f\xf4\xd3\xdd\x62\x57\xbb\x52\x72\x75\x7d\xfb\x92\x96\x9e\xb5\x5a\x4a\xf0\x96\xa2\xfb\x8c\x43\x84\xc9\x94\xd5\x91\x3d\xf6\x9a\xd7\x8a\xd3\xb9\x9c\x05\xd9\x9b\x97\x65\xbf\x45\x49\x7c\xd7\x44\x08\xe1\x38\x0f\x21\x34\x06\xfd\x0c\x4d\xd3\x71\x5f\x1f\x89\xfc\x4c\x9a\x15\x51\x3d\x3a\x60\x31\xab\x07\xc7\x83\x04\x79\x75\x2f\x07\x0f\xf7\xc3\xec\x90\x1c\xee\xe2\x9a\xe4\x08\xf7\xb8\x22\x8f\x1a\xa8\x90\xfd\x43\xf6\x8f\xec\xbd\xf0\x2a\xc1\xab\x04\xaf\x52\xf7\x78\x95\x90\xbd\x17\xd9\x7b\x41\xf3\x41\xf3\x41\xf3\x41\xf3\x3b\x82\xe6\x23\xb7\x2e\x72\xeb\x6e\x17\x7e\x89\xdc\xba\xad\xc8\xad\xfb\xd6\x61\x3a\x20\x50\x9e\x5e\xd2\x8a\x4a\x46\x75\xe4\xad\x10\x95\xc0\x36\xb4\xdc\xfa\x9a\xaa\x94\x4a\x4a\xea\xaa\xb3\xda\x31\xfb\xfc\x10\xfb\x61\x3f\x91\x3c\xee\xca\x46\x3c\xf4\x29\x59\xee\xcb\x59\xe3\x2b\x6f\x1a\x33\x93\xec\x5d\xe2\xed\x4d\x5b\xed\x35\xa5\xc8\xd7\x88\x3d\x83\xe1\xb3\x34\xf9\xbd\x33\x26\x41\xf6\xd4\x6c\xd0\x35\x1b\x76\xb4\x1d\x8b\x3e\xc6\x4f\xb7\x2c\x6e\xe5\x52\xbc\xfc\x1a\x3b\x1d\x22\x6e\x45\x25\xb0\xba\x10\xa1\x7b\x70\x49\x54\x58\xde\xcb\xee\x2a\x60\xfe\x29\x6b\xf8\x8d\x83\x21\x82\x21\x82\x21\x82\x21\x82\x21\x82\x21\x82\x21\x82\x21\x82\x21\x82\x21\x82\x21\x82\x21\x82\x21\xb6\x9b\x21\x1e\xa5\xc3\xec\x50\x64\xd2\x12\x30\xed\x72\xa6\xf9\x2c\x5f\xe3\x55\x66\xfc\x04\x82\x04\x82\x04\x82\xec\x62\x04\xf9\x2d\x46\x93\xc6\xa3\xab\x92\x2a\xb7\xa0\xa5\x4d\xec\x38\x56\x28\x6a\x2f\xde\x1c\xfb\xbb\x86\xa9\x79\x89\x7d\x80\xb1\x7f\xd6\x4b\x77\xa4\xb4\x22\x07\x90\x4f\xa6\xb4\x7c\xde\xb0\x7a\x27\xe7\x2e\xd8\xd8\xa7\xa4\x85\xf9\x41\xc6\xf5\x2f\x69\xe9\x68\x54\xee\x35\xa3\x15\xd5\x4b\xf1\x93\x6a\xc9\x0e\xc9\x5f\xd2\xd2\x4b\xc6\xae\xcf\x64\x4b\x57\x97\x94\xd2\xd5\x89\xbe\xe8\x58\x54\x30\xb6\x49\xda\x25\x18\xdb\x83\x74\x3f\x67\x6c\xaf\xa3\x3b\x3f\xde\xb3\x83\xe4\xf0\x08\xe4\x65\x4f\x78\xf3\x2c\x62\x3b\xe4\xe5\xd0\xb5\xa7\x83\x91\x5a\x8c\x8d\x78\xe4\x52\x89\x44\x6c\xf5\x1d\xbf\x7e\x19\xb8\x4f\x21\xcf\xde\x59\xba\x58\x73\xef\x2c\xad\xb7\xa7\x77\x6a\xbb\xf5\xa0\xdb\x75\x01\xc4\xe0\x0e\x8f\xfc\x73\x47\xef\x3c\x65\xf5\xce\xb9\xe5\x2a\xdd\xb3\xc7\xdd\x3d\x9a\xbe\xcd\x46\x4f\x3d\x5d\x18\xfd\x39\x47\xff\x0c\x9a\xfd\x33\x3b\xb7\x38\x77\x61\xce\xbf\x87\x46\x5d\x3d\x24\x12\x49\xdc\xba\x7d\x34\xf1\xf3\x8e\x3e\x1a\x32\xfb\xe8\xdc\xd2\x85\x85\x73\x67\x97\xfd\x3b\x29\xe6\xea\x24\xd9\xf0\xf6\xea\xa5\xe0\xbe\xb1\xfb\x71\xca\xf3\x4d\x3b\x35\x37\x3d\x5b\xeb\x9b\x76\x4a\x55\xd2\xdb\xab\x7f\xea\x19\x45\x89\x9f\x75\xf4\xcf\x80\x65\x89\xa6\x2f\xcc\x9c\xf2\xef\xa0\x11\xb7\x29\x32\x3e\xac\xb7\x6e\x0f\x25\x8f\xd0\x14\x3b\x18\x39\x60\x2d\x33\x1e\x71\x2e\x33\xca\x8e\xdc\x8a\xba\x02\xfb\x69\x2f\x8b\x47\xc6\xac\x33\x3c\x60\xfc\x6b\xce\xb4\xcc\x79\x50\x84\x6f\x73\x1e\xa6\xd0\x15\xf6\x42\xe8\x4d\xe6\x6c\x6d\xda\x78\x56\x7c\x5d\x7f\x55\x0d\x5f\x3c\xbf\x18\x36\x5b\x59\xd7\x55\x6b\x8a\x99\x5a\x2f\x16\x8d\xa5\x9c\x18\x0d\xe6\xe2\xc4\x18\x1e\x5a\x3a\x26\x4e\xe1\x0c\xd7\xf8\x91\x41\x7a\xc8\x9c\x60\x19\x4b\x97\x6c\x4a\x55\x52\x29\x6d\x3d\x5f\xd2\xd9\x77\x07\xd8\x37\xfb\xec\xa1\x37\x52\x3d\x2a\x63\x59\x1c\x3d\x2d\x8e\x8e\x0e\x89\xf2\x03\xc6\xe8\x73\x6f\xd9\x6e\x41\x18\x8a\x48\x7b\x35\xce\xd3\x5e\x19\x6f\xc4\x24\xed\xa7\xbd\x55\x12\x55\x15\x55\x1e\x67\xe4\xba\xe9\x9a\x02\x2f\x96\x82\x5f\x8c\x51\xb6\xa7\xda\x14\xcf\x7d\x52\x0a\x0a\xe5\x80\x9b\x14\x6e\x52\xb8\x49\xe1\x26\x85\x9b\x14\x6e\x52\xb8\x49\xe1\x26\x85\x9b\x14\x6e\x52\xb8\x49\xe1\x26\x85\x9b\x14\xa1\x16\xf0\x73\xc2\xcf\x09\x3f\x67\xbb\xfc\x9c\xbf\x7a\x99\xf6\x88\x50\x0b\xc5\x58\x74\x65\xb2\xbc\x40\xb4\xbb\x54\xa8\x5c\xea\x67\x53\xaa\xce\xfe\xde\x65\xf6\x99\x01\x62\x65\x3b\x73\x37\x43\x75\x4a\x37\xbd\xb4\x20\x21\x51\xf4\x71\x63\xcf\x69\x77\x0b\x97\xe2\xf6\x0e\x1d\x0e\xe6\x50\x1e\x14\xf0\x0a\xe5\x41\x01\xaf\x00\xaf\x00\xaf\xba\x08\x5e\x75\x50\x72\xf5\x8e\x81\x57\xc8\xfa\x0d\x78\x05\x78\x05\x78\x05\x78\x85\xf2\xa0\x48\x4b\x7c\xeb\xb0\xa6\x8e\x4f\x4b\xbc\x2d\xcb\x83\xbe\xe4\x5f\x23\x70\x45\x28\x9d\xde\x44\xcf\x72\xa5\xd3\x32\x3d\x4d\xe7\x3c\x95\x4e\xc6\x00\x19\x55\x32\x19\xa3\x17\x4b\x5a\xd1\x4e\x16\x51\x8e\xaf\x36\xe2\x31\x9b\x28\x39\x6b\x87\xbe\x39\x58\xec\x74\x94\x1d\x96\x02\x27\x6f\x28\xe6\xd2\x00\x0a\x01\x94\x7d\xae\x40\xf1\x53\xe4\x9d\xe4\x09\xd0\x1e\x30\xcb\x88\xe6\x9d\xb0\xec\x09\xf1\x6b\x3b\x70\x99\xa0\x5b\x0a\x5d\xa1\x17\xca\x4a\x89\x9e\xa1\xd3\x4d\x7c\x1a\xa8\x63\x85\x3a\xa3\x0d\xd6\x19\xfd\x5c\x2f\x5d\x16\xa6\xe2\x19\xba\xc8\x4d\xc5\x39\x6a\xee\xe0\xa4\x94\xa8\x35\xfa\x3c\x3d\x67\xd7\x1a\x6d\xfa\x49\xd2\xa2\x10\xe9\x0b\xf4\x26\x47\x21\xd2\xa6\x9f\xa5\xf1\x84\xdb\xa2\x7c\x69\x6d\xa6\xb2\xc2\x00\xd6\x6e\x3b\xa3\x7f\x34\xe2\x69\x0a\x1f\xf7\xac\x64\xea\xb0\x8a\x7b\xc4\x0e\x15\x56\xd1\xae\x68\xda\x32\xfb\x88\x62\xa6\xc0\xc9\xc0\xc9\xc0\xc9\xc0\xc9\x28\x66\x8a\x62\xa6\x28\x66\x8a\x62\xa6\x70\x6b\xc0\xad\x01\xb7\x06\xdc\x1a\x70\x6b\x34\xc5\xad\x81\x62\xa6\x28\x66\x8a\x62\xa6\x28\x66\x0a\xaf\x21\xbc\x86\x5d\x54\xcc\xb4\xe5\x7e\xb9\xe6\x97\x1c\xfd\x57\xbb\x68\x51\x88\xe7\x53\x6a\x51\xba\x45\x55\xdd\x56\xce\x8b\xfa\xa3\xce\x6d\xd9\x4c\x3e\x9b\xcf\x98\xb3\x2a\x33\x7b\x98\x52\x28\x14\xb5\x0d\x25\xc7\xbe\xfe\x08\xfb\x6e\x1f\x3d\xe0\x3c\xc2\xaa\x4d\x7a\xb0\xa8\x2a\xe9\xb0\xb9\xab\x39\x3a\xec\x89\xf3\x8c\x7d\xd0\xb2\x38\xcd\x79\x71\x9a\x28\x3f\xd2\xb1\x59\x97\x85\x4a\x7d\x8f\x98\x96\x27\x69\x2e\x39\x4f\xae\xd3\x73\x62\xf4\x2d\xd3\xd3\x7c\xf4\x9d\xa6\x05\x3a\xe9\x9f\xd0\xc2\xd9\xa7\x66\x6d\x51\xdf\x6b\x0e\x1c\x8b\x6f\x09\x1e\x5f\x67\xd9\xa2\x59\x2c\xb7\xf2\x79\xca\x01\xe5\x7f\x01\x15\x05\x45\xaf\x3d\xea\x3d\xfc\x6f\x67\xfd\x19\xb5\x44\xa1\xef\xed\xf4\x79\xd4\x87\x8b\x6a\x21\xa7\xa4\xd4\x86\x9e\xb6\x79\xf0\xd6\x3f\xf0\xc4\x0b\xf4\x26\x7a\xb6\xcc\x95\xdc\xbc\x27\x0e\x82\x08\x37\x72\x83\x6e\xe4\xdf\xe9\x69\xa9\x29\xba\x22\x5c\xc8\x6f\xa4\x4b\xb6\x0b\xb9\x7b\x6d\x5d\x61\xbd\x44\x89\xef\xdc\xe5\x63\xeb\x66\x0b\x4a\xb1\x94\xe5\x8e\x35\xb1\x24\x6f\xc8\xe8\x1d\x2a\x18\x33\xaa\xad\x35\x79\x8f\x94\x37\x34\xca\x2f\x6a\x8f\xf1\xe7\xd4\x2e\xe7\xc6\x35\xb5\x98\x51\x9d\x5b\x07\x9c\x5b\xf9\xa4\x46\xcd\x64\x53\xa3\x15\xfb\xb9\x5a\x31\xfe\xbe\x29\xb7\x1a\x57\x99\x38\x4b\x8b\x94\x2c\x33\xb7\x53\x74\xb0\x81\xe9\x1d\x4f\xdb\x06\xfb\x1a\x64\x5f\x3f\xda\x47\xaf\xf4\xb1\x0f\xf7\x85\x3e\x68\x51\x85\x97\xfb\xba\xc7\xbe\x96\x31\x55\xa3\x9f\xb9\xaf\x9a\x73\x7f\x63\x70\xda\x01\x90\x43\x3e\x03\x77\x98\xc3\x11\x21\x6d\x50\x72\xfc\xc8\xbc\x96\x1f\x15\x47\xf3\x3d\xb8\x63\x46\x0f\x0f\x25\x75\x2d\xbf\x24\x22\x01\xcf\x18\xaf\x85\xfc\x7b\xd9\x7c\x59\xec\x1f\x87\xfd\x0d\x7f\xe2\xcb\x3d\xf4\x5a\x0f\x7b\xb5\x27\xf4\x79\x6b\x85\xf6\xa1\x9e\x79\xcd\x58\xcf\x67\xf5\x70\x46\xe3\xdd\xae\x85\x23\xab\xc6\x4f\x91\xf0\xb4\xeb\x2e\xf8\xd2\x56\xb8\x77\xd6\x75\xdb\x49\x3d\xaa\xa4\xf8\x8d\x73\xd7\x7f\x2e\x9b\x92\xdc\x46\xcd\xa5\xf5\xb0\x76\x43\xf6\xa9\x70\x8d\x17\x54\xad\x90\x53\x63\x61\x71\x46\x1e\xba\x69\x3e\x4b\xbe\xc4\xf4\xec\x00\xeb\xf4\x91\xdb\xf8\x65\xb9\x34\x95\x5b\x3b\xa7\xf6\x2f\x87\xc5\x2f\x9d\x6a\xfc\x10\x55\x7c\x4d\x36\xf1\x65\x4a\xce\xd3\x2c\x4b\x44\x4e\x58\x29\x0f\x9f\x72\xa6\x64\xf4\x3d\xae\xf5\x29\xe0\xd9\x77\x86\x68\xc2\xa7\xaa\x9d\x58\x28\x8a\xd2\x76\x29\x3d\x9b\xd7\xd2\xaa\x55\xd1\xee\x63\x43\xec\x4f\xfb\xe9\x1e\xbb\xa2\x9d\xf8\x6e\x7e\xa4\x96\xb2\x76\x33\xcb\x0b\x67\xb5\xb4\xda\x11\xd5\xec\x42\xee\x6a\x76\xe2\xeb\x2c\xae\xaf\xc3\x03\xbe\xb7\xa4\x92\x5d\x50\xa5\xb9\x6b\xcf\x05\xbf\x5a\x93\x6c\x7f\xb5\x52\x77\xb2\xf7\x2b\x27\x73\x48\xcf\x88\x08\x77\xa4\x67\x84\x24\x11\x92\x44\x48\x12\xbb\x48\x92\x88\xf4\x8c\x48\xcf\x08\x29\x18\xa4\x60\x90\x82\x41\x0a\xd6\x11\x52\xb0\xe4\x3e\x9a\x60\xe3\x91\x98\xc5\x32\xee\x77\xb1\x0c\xb1\x7c\x43\xf1\x3a\x24\x75\xdc\xb6\x92\x19\x24\x75\x6c\x45\x52\xc7\x8f\x0e\xd2\xb0\x20\x8d\x79\xb5\x74\x43\x2b\x5e\x37\xe6\x78\x65\xb0\x91\xcb\x50\x74\x5d\xd5\xd9\xdb\x06\xd9\x5f\xf5\x11\xb3\x77\xb5\x18\xe3\x53\xd5\x53\x3a\x2e\x88\x26\xa2\xbc\x3e\xcb\x59\xeb\x70\xc9\xf6\xe4\xd6\xed\x56\x75\xc5\x3f\x6a\x96\x92\x02\x02\xce\xd0\x34\x87\x80\x87\xe9\x10\x4d\xfa\x92\x76\x47\xcf\x9b\x9c\x5d\xf6\x88\x33\x19\x41\x40\xaa\x80\x6b\x2f\x04\x43\xbe\x29\x76\x50\x42\xbe\x8a\x87\x2d\x39\x9f\x3c\x2f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\x1f\x38\xdf\x96\x73\x3e\x10\x3b\x10\x3b\x10\xbb\x2e\x26\x76\x6f\x8d\xd2\x29\x41\xec\x44\xc8\xeb\xd8\x86\x29\x07\xb4\xca\x3a\x4b\x41\x20\xff\xfb\xa5\xb1\x82\x96\x4e\x67\xf5\xe2\x3a\x5f\x99\xaf\xac\xa7\x33\xaa\x15\x4e\xc6\x3e\x31\xcc\x5e\xbe\x8d\x76\x8a\x96\xae\x6c\xc4\x43\xbf\x5a\x8b\x56\x70\x49\x4b\xcf\x5a\x2d\x26\x78\x8b\x1d\xa1\x1b\x8c\xf2\xd3\x89\x38\xde\x4b\x71\x57\x2d\xf1\xf2\xeb\xed\x74\xb6\xd8\x91\x3a\xc2\x67\x83\x11\xe3\x01\xb6\xcf\xa3\xf8\xb3\x18\x5f\x76\x95\xf4\xf2\xa7\x01\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\x08\xbc\xd8\x6e\xbc\x78\x82\x8e\xb1\x23\x91\x29\x4b\x46\xf8\xb8\x53\x46\xe8\xb1\x72\xab\x94\x14\xbe\x99\x2e\xb3\xe7\x23\xcf\x59\x2d\x4c\xc9\x6b\x17\xa1\xcd\xc6\x14\x69\xbd\x74\x35\xac\xa7\xb4\x82\x3a\x12\xd6\xd7\x53\x57\x8d\xe7\xc7\xe7\xc0\xaa\xb2\x26\x80\x4a\xa1\xa8\xf1\xbe\x8a\xec\xb4\x30\x06\x44\x8b\x40\xa0\x40\xa0\x40\xa0\x1c\x81\x7e\xeb\x11\x3a\x5b\x86\x40\x85\x52\xb1\x7e\x02\x3a\xa6\xf3\xf4\x64\xec\xc7\x1e\x61\x9f\xea\xa3\xbb\x2d\x10\x2a\x54\x8d\xe3\x3c\x91\x96\xd8\xa5\x32\xc7\x88\x87\x39\x8c\xee\x33\x8e\x30\x09\x24\x6f\xa4\x2a\x86\x14\xc9\xd1\x9a\x9c\x3c\x2b\x4b\x67\x05\x3b\x3c\x49\x73\x9c\x1d\x1e\xa7\xa3\x74\xd8\x57\x7e\x28\x93\x3d\x9a\xd2\x43\x2f\x3c\x17\x44\x0e\xab\x67\xaf\xba\x76\x39\x98\x1b\x1e\x66\x87\x6a\x65\x85\x15\xda\xc4\xd0\xfb\x77\x56\x3c\xb9\xbd\x66\x5e\xac\x7a\x1e\xde\xa4\x3c\x68\x6b\x9f\x5f\xe2\x69\x32\xbe\x70\xee\xe4\x2c\x9b\x7b\x80\xc8\xcf\x82\xfc\x57\x0d\xe6\xbf\xfa\x4c\x4f\xd3\xad\xc9\xb2\xc8\x79\xb5\x48\x49\x3b\xe7\xd5\x16\x9a\xa8\xc2\x7a\xeb\x4d\x54\xe2\x97\xef\xaa\x30\x51\x47\x2a\xd2\x59\xd5\x63\xab\xf6\x17\x9c\xbe\xae\xf6\x5b\x2a\xa4\xb0\xea\x36\x13\x89\x14\x56\x48\x61\xb5\xc5\x29\xac\xda\x3f\xb3\x0d\x4c\x5b\xd5\xe2\x0f\x47\x57\x02\x19\xf6\x13\xc3\x74\x44\xe6\xbf\x4a\x5d\x55\xd3\xeb\x39\x8f\xa8\x34\xa1\x79\x29\x14\xb3\x5a\x31\x5b\xba\x99\xca\x29\xba\x6e\x67\xc2\xfa\xad\x21\xf6\xae\xdb\x88\xd9\x47\x5b\x5f\xdd\x4f\xd6\x24\x70\x91\xad\xce\x18\xad\x76\x84\xb4\x65\xb7\x48\x89\x65\xdd\x8f\xfc\xe4\xbb\x2e\x14\x9a\x96\xca\xf7\x37\x15\xfc\x7a\x9e\x60\xc7\xcc\xd4\x57\xe5\x83\xcd\x7c\x53\x9d\xbd\xec\x91\xd0\x34\x48\x38\x03\x75\x0b\xd4\x2d\x50\xb7\x40\xdd\x02\x75\x0b\xd4\x2d\x50\xb7\x40\xdd\x02\x75\x0b\xd4\x2d\x50\xb7\x40\xdd\x02\x75\x4b\x9b\xd5\x2d\x53\x74\x90\x1d\x88\xec\xb3\x50\xc8\xc3\x2e\x98\xe2\x5c\xe5\x21\x55\x16\x54\x27\x50\x9d\x40\x75\xe2\x50\x9d\xbc\xb3\x8f\x22\xb2\x84\x9b\xa6\x15\xd3\xd9\xbc\xb3\x9e\x9c\x85\x25\x19\x2f\x7f\x4e\x0f\x38\xf7\xb1\xe0\xe3\x43\x19\xb5\xe4\xfe\x5e\x8b\xbb\x89\x46\x32\x6a\x69\xc6\x71\x84\xc4\x7b\xd3\x4b\x0b\xe6\x2a\xb1\x89\xae\xbb\x8a\x86\xa6\x2a\x1a\x9a\xaa\xa5\xa1\x64\x81\x2e\x0a\xa0\x77\x96\x16\x39\xd0\x9b\xa7\x59\x4a\x34\x00\xf4\x1c\xf7\xb9\x98\x0d\x2e\x26\xc1\xfe\xeb\xb0\x19\x03\xe9\x95\xb5\xac\x7a\x38\xa4\x95\xcd\xcc\x64\xc5\x3f\x3d\xcc\xfe\xa2\x9f\x5e\xe7\x4a\x6a\x56\x5b\xcd\x04\x99\x2d\xab\x23\x00\x71\x84\x9f\xce\x99\x5a\xcd\x76\x07\xcb\xeb\x04\x1f\x6e\x20\xe6\xf1\x8d\xc1\x00\x79\x3f\xdb\x5b\x7f\x5a\x35\x40\x61\x40\x61\x40\x61\x40\x61\x40\x61\x40\x61\x40\x61\x40\x61\x40\x61\x40\x61\x40\x61\x40\x61\x40\xe1\x8e\xaa\x9c\x20\x57\x6e\xb7\x82\xaa\x0e\xc0\x19\xc0\x19\xc0\x79\x1b\x01\xe7\xbf\x37\x4c\x27\x1b\xa5\x9c\xf2\x08\x2e\x38\xce\xaa\x3a\xfb\xcc\x10\xfb\x4a\x05\xe4\xbc\x29\xbf\xc2\xf9\x74\x76\x23\x9b\x5e\x57\x72\x2e\xdc\x69\xcd\x78\x25\x54\x14\xb1\x26\x4d\x81\x9d\xb1\xe8\x48\x15\x5c\xe9\x3a\xdf\x62\x56\x47\xb2\x36\x0f\x70\x59\x9b\xee\x5c\xe0\x47\x57\x87\x92\x47\x7e\xb6\x4a\x6e\x79\x6d\xb7\x37\x18\xbd\x8b\x11\xbf\x71\x5e\x73\x02\x08\x13\x08\x13\x08\x13\x08\x13\x08\x13\x08\x13\x08\x13\x08\x13\x08\x13\x08\x13\x08\x13\x08\x13\x08\xb3\xcd\x08\x13\x30\x12\x30\x12\x30\x12\x30\xb2\x85\x30\xf2\x5d\x7d\xb4\x5b\xc0\xc8\xe2\x8a\x92\x8a\x99\x40\xaa\x4c\x03\x6b\xc9\x5f\x8d\x9d\xa6\x9d\xfb\x5c\xd9\x08\x90\xbf\x9e\x2f\x3f\xe2\x12\xe4\xaf\x3e\xf2\xd7\xef\xbd\x81\xe2\xc6\xb3\x30\x3a\xdc\x3f\xe5\x5d\x79\x8a\xbb\x4f\xbc\x81\x7d\xad\x97\xee\x48\x69\x45\xd5\x78\x18\xe1\xa0\xdc\x76\xd1\x47\x8d\x3d\x66\xb4\xa2\x5a\x56\x4b\xa3\x25\x49\xeb\xce\xd3\xa4\xe8\xcc\x71\x8a\xf1\xce\x1c\xa2\x01\x7a\xd2\x37\xb5\x87\x71\x17\x3c\x77\x8f\x96\xde\x6c\x76\xba\xb9\x60\x92\x1a\x61\x61\x49\x4a\x23\x11\x3b\x7b\x87\x8b\xa4\x86\x7e\x72\x87\xdd\xb7\xbb\x6b\xc8\x3e\x17\x0d\xcb\x9d\xda\xd4\xc3\x89\x29\x3a\x48\x07\xca\x12\x26\xd5\xd8\xc5\x48\x8e\x84\xfc\x71\x0d\xe6\x8f\x7b\x77\x4f\xe3\x2f\xf6\x11\x91\x28\x6e\x3f\xed\xb5\x13\xc5\xb5\xc3\x2c\xf0\x8c\x70\xb5\x99\x05\x0f\x67\x8a\xcb\x44\x24\xfe\xf3\x9d\xb6\x59\x18\xa9\x27\xe3\x5b\xf4\x31\x9e\x61\xa8\x0d\xd6\x01\xa9\xdc\xba\xcd\x5a\x21\x95\x1b\x52\xb9\x6d\x71\x2a\xb7\x16\xce\xf7\x6a\xb3\xdc\xde\xd3\x38\xdb\x86\x07\xa6\x7e\x4b\x8e\x53\xec\xff\x63\xef\xcf\xe3\xe3\xc8\xaa\xbb\x61\xfc\x53\x2d\xaf\x77\x56\x0a\xc2\x30\x9a\xad\xa6\x3c\x63\x2d\x96\x5a\x96\xed\x19\xdb\xf2\x78\x3c\x6a\x49\xb6\x25\x6f\x42\xf2\xd8\xc3\x30\x33\x76\xa9\xfb\xaa\x55\xe3\x52\x55\x53\x55\x2d\x59\x2c\x09\x30\x84\x40\x58\x03\x09\x10\xc3\x43\x12\xe0\x19\x06\xc2\x12\x48\x42\x02\x24\x64\xe1\x47\x02\x21\x21\x04\x92\x90\x10\xc8\xf2\x23\x4f\x12\x92\xbc\x3c\x7c\x42\x12\xc2\x24\x6f\xf2\x7e\xee\xb9\xb7\x6e\xdd\xea\xae\x5e\xd4\x6e\xc9\xf2\xe8\xcc\x1f\x9e\x56\x2d\xf7\xde\xba\xcb\xb9\xe7\x7e\xcf\xf7\x9c\xa3\xf7\x99\xbd\x12\x75\xb9\xa1\x22\x34\xdb\x33\x81\x34\xa6\xff\x73\x17\xb9\x2d\x3a\x6b\x70\xc6\x49\x40\xfd\x05\x3b\x4f\xad\x7c\xde\x2b\xbb\x61\xa0\xff\x5e\x97\xfe\x99\x0d\xf1\x26\xf7\xe2\xe6\x48\x25\xd3\xbc\x94\x61\x5e\x4a\x9b\x58\x25\xfd\x8b\xf1\x96\x99\xac\x80\x9d\xab\x0e\x7b\xfe\xb0\xe3\xc8\xad\x14\xfd\xe1\x52\xd6\x4e\x33\xb4\x8f\xc7\x27\x1b\x2f\xb0\x7e\x7d\x07\x5f\x60\xc9\x61\xa8\xb1\xd6\x90\x48\x82\x44\x12\x24\x92\x20\x91\x04\x89\x24\x48\x24\x41\x22\x09\x12\x49\x90\x48\x82\x44\x12\x24\x92\x20\x91\x04\x89\x24\xab\x4c\x24\x41\x9a\x07\xd2\x3c\x90\xe6\xb1\x8e\x69\x1e\xbf\xfc\x28\xa7\x16\x04\x03\x56\xa9\x14\xd4\x21\x18\x04\xa1\x15\xd2\xd9\xb2\xc3\xd6\xae\xfe\x3f\x8f\xe8\xaf\xda\x4e\x36\xb3\x57\xce\x2d\x0c\x76\xf6\x70\xfd\xd0\x17\x3b\x5b\x24\xfa\xa2\xb0\x59\xd3\xe2\xd5\x69\x1a\xf6\xde\xc1\x1e\x1d\x2e\x95\x02\xd5\xc6\xa5\x3c\xb0\xc6\x31\xba\x1c\x02\x57\x08\x5c\xe5\x10\xb8\x42\xe0\x0a\x81\x2b\x04\xae\xd6\x0d\x70\x95\x5b\x33\xc0\x55\xdb\x5b\xd2\x32\x70\x95\x43\xe0\x0a\x81\x2b\x04\xae\x10\xb8\x42\xe0\x6a\xf5\x81\xab\xdc\xba\xc6\x99\x72\x88\x33\xad\x1c\xce\x94\x5b\xeb\x38\x53\xee\x2a\xc4\x99\x26\x1e\x23\x23\x9c\xdb\x74\x1f\x19\x02\x6e\xd3\x1e\xb2\x8b\xec\xac\x49\xff\xb3\x4a\xa5\x20\xbb\x30\x98\x55\x90\xa1\x66\x3c\x65\x1e\xbf\x3d\x9d\xce\xb4\x59\xdf\xc8\x99\x4c\xa7\x1a\x33\x99\xfa\xf4\x5e\x41\x54\x62\x8d\x10\xd4\x6e\xa5\x1d\x09\x4a\x93\xf9\xdd\x2d\x31\x0c\xf6\x1c\xbe\xeb\x18\x56\x02\xf1\xba\x93\x5f\x5d\x0d\xcc\x8b\x43\x54\x39\xf2\x00\xb9\xbf\x82\x5d\x9d\x25\x7d\xcb\xe9\x6b\x64\x54\xa3\xff\x47\x8b\xfe\x1f\x7f\xaf\xd5\x5c\xa4\xe4\x10\x17\x01\xfb\xc8\xbd\x20\x02\x76\x92\x65\x4e\x4b\x32\xca\x1d\x44\x0e\x92\x03\xb1\x83\xc8\xf2\x4b\x19\x63\xa5\xec\xd2\xef\x27\xf7\x91\x2d\xc3\x79\xb6\x91\xb4\x52\x4c\x63\x91\x53\xf2\x56\x42\xe4\xf4\xfe\x61\x5f\x2c\x72\x84\x4a\x6b\xe4\x3d\x87\x1d\x50\xc5\x86\xa3\x8a\x9f\x1d\xfc\x09\x2e\x7e\x46\xe4\x63\xab\x21\x88\x4e\x93\x29\x32\xa9\x0a\xa2\xde\x1c\x79\xa0\x05\x3a\xeb\x28\x7c\xc1\x29\x80\x54\x03\x04\x5f\x11\x7c\x45\xf0\x15\xc1\xd7\x75\x04\xbe\xa2\x32\xd6\x40\x19\x5b\x3b\xe8\xf4\xdf\x66\x08\x04\x44\xe8\xfc\x0b\x39\x54\x1f\xcb\x9c\x56\xcf\x83\xb6\x6b\x04\xfc\x4c\x67\xcc\xd0\x59\x6e\xca\x95\xb8\x49\xbc\x65\x89\x55\x02\x9d\xa5\xa8\x7d\xae\xe7\xf6\xbb\xb4\x68\xc1\x90\x88\x23\xa1\xaa\x1e\x72\x04\x55\x4e\x06\xa1\x1d\xd8\xf3\xf3\xb4\xc0\x14\x50\x67\x29\xb6\xb1\xc6\xa2\xdd\x76\xfa\xc4\x49\x11\x3e\xd7\x28\xfa\x56\x1e\xe6\x89\xed\x15\xe4\xc6\x13\x6f\x0e\x60\xbe\x8e\xc6\xa5\x1c\xb0\x46\xaa\x1d\x65\xb1\x37\xa3\x0f\x12\x95\xcc\xf2\xf5\x16\x15\x91\xe5\xed\xe4\x1e\x6a\x29\x6d\x34\x75\x68\xc2\x24\xb4\x20\xed\x04\x8c\x46\x00\x34\x02\xa0\x11\x00\x8d\x00\x68\x04\x58\xd7\x46\x80\x3f\xca\x90\x2f\x65\xf4\x2f\x66\x3a\x3f\x2f\x77\xdb\x9f\xcd\x8c\x2a\x8e\xa5\x25\x87\x5a\x01\x95\x4b\x7f\xd2\xf7\x4a\x56\x11\xf6\x61\x1e\x1b\x3b\x41\x01\x8a\x86\x3b\xf6\x4c\x65\x03\x3e\x98\xdd\x9b\x35\xa6\xb9\x1c\xe1\x9b\x64\x89\xba\x6c\x9a\xc6\xbb\x08\x35\x3c\xbf\x34\x67\xb9\x11\x25\xc9\x2f\xd3\x81\x59\xcb\x89\xb4\x7f\x93\xdf\x35\x8d\x59\xdb\xb5\x1c\xfb\xc5\x91\xf8\x9e\xa1\x86\x55\x00\x44\xdd\x1b\xe0\x00\x6b\x21\x56\x2d\x79\xe1\x5d\x41\xfc\x12\xd7\xb7\xb3\xc6\x98\x0d\x22\x49\x69\xb8\xe7\x57\x7f\x59\x6c\xc5\x08\xb9\xba\x0f\xda\x9f\x17\xce\x65\xcd\x1b\x79\x7b\x46\xa3\x0f\x49\x72\x9b\x5e\xb3\x81\xfc\xe8\x06\xfd\x95\x1b\x3a\xff\x47\x12\xdc\xbe\xd6\x71\x56\xc8\x41\x36\x45\xe7\xbc\x45\xa3\x68\xf9\x33\x56\x31\x01\x3b\x48\x45\x8d\xfa\xb3\x9e\x3f\xcf\xfa\x22\xb5\xa5\xa7\x2a\x2a\xaf\xdd\x50\x50\x6b\x22\x9d\xa4\xc4\xbf\xca\x66\xfa\x42\xde\x2e\xc4\x8a\x35\xec\x8d\xdc\x91\x3d\xea\x5d\xb6\x39\x89\x9d\x23\xda\xfd\xb2\x4a\x37\x46\x72\x53\x9a\x0e\x24\xad\x2d\x59\x59\xd6\xe0\xe0\x10\xc8\x62\x55\x5b\xed\xe2\xdf\xd0\xc5\xce\x3c\xf0\x2b\x39\x31\x82\x03\x46\x57\xce\xca\x5f\x28\xfa\x5e\xd9\x2d\xb0\xa7\x80\x4f\x06\x0f\x55\x74\x1c\x57\x56\x84\x06\x94\x2c\x24\xfa\x82\x19\x59\xd2\x01\xa3\xeb\xb0\xe7\x53\xa5\x58\x23\x6f\x05\x79\xab\xc0\xbe\x5e\xf4\x0f\x67\x10\x42\x79\x01\x57\xa7\xab\x0a\x9c\x95\x65\x64\xcd\x67\x95\x2a\xe7\x8d\xaa\xdb\xa0\x8d\x0d\x6d\x6c\xeb\xd4\xc6\x36\x51\x24\xc7\x39\x58\x3d\x46\x46\x00\xac\x3e\x48\x0e\x90\xfd\x2d\x80\x97\x3c\x78\x4f\x43\xc3\x55\x73\x28\x71\x7d\x64\x58\xc5\x90\x1f\xef\x4d\x87\xa5\x9f\xad\x3f\x8b\x8b\x87\x58\x74\x93\xab\x31\x4c\xc5\x7f\x67\x48\xa7\xe0\xad\x97\x43\x2f\xc8\x5b\x8e\xed\x16\x07\x16\x76\xcd\xd0\xd0\xda\x35\xa0\xff\x55\x46\xff\x46\x86\x3c\x5b\xb9\x77\x4e\xdc\xab\x1d\x94\xf0\xce\x22\x0d\x87\xe3\x17\xce\xf0\xe7\x31\x26\x61\x7a\x4c\xc2\xa7\x7a\xc9\x43\x8d\xe3\x43\xf2\x1c\xe9\x75\x33\xd7\xf8\x9e\x43\x67\x6c\x97\xed\x62\x32\x45\xf7\x17\x7b\xf4\xf7\x6e\x24\x37\xa7\x05\x95\xe4\x89\xd5\x3f\xde\x4c\xba\xee\x29\xcf\xa1\x39\x5e\xf2\x9a\x48\xd9\xbd\x0b\xaa\x4b\x89\x7b\x09\xdf\x14\x5b\x64\x94\x76\xaf\x71\x77\x88\x2b\x12\xb2\xc4\x6e\x2c\x2a\x0f\xeb\xa3\x42\x18\xd6\x9c\x9c\xc2\xca\xa6\xf4\xb5\x2a\x4b\x61\x44\xea\x85\x04\xe2\xd9\xc2\x31\x8e\x09\xba\x83\x60\x1c\x13\xb4\x48\xa2\x45\x12\x2d\x92\xeb\xc8\x22\x89\x71\x4c\x30\x8e\x09\x5a\x82\xd0\x12\x84\x96\x20\xb4\x04\xad\x09\x4b\xd0\xc4\x3e\x72\xaf\xbe\xc7\xdc\x25\x11\xac\x9b\xd4\xd0\xac\xca\x19\xef\x99\x10\xa2\x15\x63\xac\x20\x2e\x8f\x31\x56\xae\x1e\xdf\x17\xfd\x17\xbb\xc9\x5e\x0e\x95\x06\xa1\xe7\x5b\x45\x9a\x8e\x8f\xe6\xd9\x46\x00\xf7\xf3\x56\xc9\xca\xdb\xa1\x4d\x03\xfd\xfb\x5d\xfa\x3f\x6d\x20\x37\x88\x1b\x12\xff\xfc\xe1\xe6\x82\x2e\x8f\x4c\x8f\x4f\xf3\x37\x47\x78\x91\xed\x4a\xe7\xbd\x1f\x5e\x10\x65\x0b\x00\xb3\xba\x32\x0c\xc2\xdc\x1c\xa2\x49\x1b\x23\x9a\x39\xfd\x01\x81\x68\x26\xe7\x90\x80\x31\xab\xfb\x3e\x05\xcd\xc4\x14\xdf\x88\x68\x22\xa2\x89\x88\x26\x22\x9a\x88\x68\x22\xa2\x89\x88\x26\x22\x9a\x88\x68\x22\xa2\x89\x88\x26\x22\x9a\x6b\x10\xd1\x44\xd4\x10\x51\x43\x44\x0d\xd7\x31\x6a\x78\x94\x6c\x1a\x70\xbc\x62\x30\xa0\xdf\xaf\xdf\x47\x36\xb0\x9f\xbd\xba\xe3\x15\x0f\xdb\x0e\x30\x34\x8f\x5a\x6e\xc1\xa1\xfe\xc4\xcd\x0d\xa9\x9a\xff\xb3\x83\x9c\xaa\x8b\x3f\xd6\x60\x66\xa6\x01\x92\x11\x43\xf3\xe7\x77\xe8\x1f\xe9\xa8\xc6\x25\xef\x86\x24\xd3\xc9\x83\x6e\x35\x36\xd5\x9b\x65\x8f\x25\xd1\xc3\x98\xfe\x58\xfd\x7c\x9b\x53\x4f\xdb\xe4\x24\x87\xf8\x8e\x90\x31\x80\xf8\x0e\x91\x83\xe4\x40\xcd\xd8\x1f\x51\xa7\x89\x6f\xcc\xa6\x60\x6d\x97\x99\x91\x7a\x75\xf0\xbf\xce\xb7\x6f\xad\x1e\xb0\xee\x28\x73\x75\xc3\x31\x1b\x14\x4f\x5e\xa9\x61\xcb\x3d\x9f\xb0\x3d\x31\x19\xd7\xe8\xf2\xc6\x0d\x3d\xeb\x31\xcc\x51\x8b\x61\x8e\x3e\xa3\xb5\x5d\x88\x4c\xf3\xe0\x46\xc7\xc9\x44\x1c\xdc\xe8\x0a\x4a\x26\x48\x8a\xdd\x9c\x64\x6a\x5e\x06\xa5\xcb\xb0\xde\x4f\xde\x58\x2d\x99\x6e\x11\x6e\x71\x56\x9a\x30\xda\xc9\x6f\x5e\x31\x59\xb4\x32\xa1\x8d\x50\x1c\x35\x10\x47\x18\x5e\x63\x55\xc2\x6b\xa0\x5f\x35\xfa\x55\xa3\x5f\xf5\x4a\xf9\x55\x4f\x7c\x5a\x6b\xb3\x73\xe9\x14\x0f\x68\x78\x8c\x8c\x2b\x01\x0d\x57\xd8\x61\xd5\x48\x57\x1c\xb6\xea\x9b\x79\x37\x35\xab\x3b\x34\xa9\x22\xd4\x54\x31\x72\x1f\xbe\xb6\x5a\x77\x18\x2c\x59\x7e\x68\x83\x7d\x94\x1f\xca\x1b\x1f\x6f\x06\x4a\xd5\x8c\x96\xd5\x51\x28\x6e\xa9\x2c\xa8\x1f\x9a\xb2\x03\xdc\x42\x6f\x55\x6f\xce\x53\xbf\x48\xd5\xbb\xdb\xd5\xbb\x41\xe8\x5b\x21\x2d\xda\xf9\xfe\xaa\xe7\x12\xa5\x40\xca\x7e\x71\x97\xb5\x32\x77\x92\x1c\x27\x13\x15\x07\xab\x21\xb2\xaf\x85\xb9\x33\x09\x94\x02\x54\x63\x1a\xa8\x31\x1f\xed\x20\x1f\xea\xd0\x3f\xd0\xd1\xf9\xa4\xdc\x0b\x9e\xe8\x58\x3f\xa7\xaa\x0a\x2b\x0c\xeb\x67\x60\x15\x80\xa5\x90\x4d\xce\x98\x5b\xd4\x5d\x63\xe2\xf6\xc0\x96\xc6\x49\x28\x96\x03\x6f\x32\xdd\x8d\xbf\x0d\x4f\x80\x1e\x15\x18\xdd\x13\x81\xe7\x4e\x72\x92\xcd\x09\xb6\x2c\xc4\xef\xe9\x68\xb1\xc4\x17\x7b\x6a\x1f\xf7\x72\x5f\xd2\xc8\x17\x35\xfd\xf3\x5a\xe7\xe7\x24\x80\xf8\x94\x76\xd8\xf3\xf3\xa0\xe2\x15\x3d\xe8\x76\xcf\x30\x67\xd9\x25\xd3\x18\x4e\x7c\x05\x40\xc3\x5c\x1b\x2b\x07\x31\x9d\xa0\xdf\xca\xc3\x87\x03\x49\xc3\xb1\xf3\x62\xb7\xa5\x4e\x21\x30\xbc\x45\xd1\xa7\x9c\xc4\x50\xa2\x5e\xc9\xa1\x59\x83\xd7\x08\xac\xa8\x68\x2c\x01\xa2\x4d\xed\x00\x59\xbd\xb9\x11\x9a\x95\x60\xf8\xac\x3e\x06\xb6\x3a\xdb\x41\x1d\x97\xdd\x12\x77\xd9\x3d\x44\x0e\xea\x07\xcc\xfd\x92\xf0\x7d\xbb\x4a\x15\xaf\x2e\xf1\x99\xc0\x18\xd7\xff\xf4\x51\x92\x1d\xb0\x4a\x76\xbd\xfc\x7e\xd4\x5f\xb0\xf3\xd4\xca\xe7\xbd\xb2\x1b\x06\xfa\x7b\x1f\xd5\xdf\xb8\x9d\x6c\xce\x7b\x3e\xdb\x56\x3b\xfb\x1a\xa4\xf8\xe3\x6f\x0f\xf3\xb7\x7b\x4d\xf6\xf4\x88\xe7\xd3\x44\xc4\xf3\xc4\x33\x6b\x9c\x07\x8a\x89\xfe\x90\x07\x89\x89\xfe\x90\x07\x89\x3c\x48\xe4\x41\xae\x23\x1e\xe4\x1a\x0a\xa5\xbc\x66\x78\x90\x18\xe3\x17\x79\x90\xc8\x83\x44\x1e\x24\xf2\x20\x31\xd1\x1f\x06\x21\x7d\xe6\xd0\x16\xd7\x7c\x10\xd2\xab\x32\xd1\x9f\x55\x3b\xfd\xd7\x61\x8e\x38\x1e\x22\x07\x01\x71\xdc\x4b\xee\x21\xbb\x6b\x22\x8e\x79\xcf\xa7\x60\x8e\x4a\xa0\x46\xc7\xed\x40\x26\xde\x9a\x6c\x0c\x28\xf6\xeb\x3b\x52\x02\xa4\x9a\x66\x94\x62\x2b\x51\x36\x69\x94\x3d\xd0\x7c\x62\x6b\x0c\x88\xdd\x14\x27\xfb\x4b\x62\x5f\x77\xf1\x1b\xab\x84\x7e\x71\xb0\x6a\x8c\x8c\x90\xe1\x0a\x0b\xce\x20\x19\x58\x66\xe7\xa2\xe1\x06\xe9\x70\x2d\xd2\xe1\xbe\xab\x91\x1c\x5f\xdd\x07\xc8\x7e\x58\xdd\xbb\xc9\xf2\x27\x20\x39\xc2\x29\x70\x0f\x90\xfb\x63\x0a\x5c\x4b\x05\x1d\xe5\x16\xf1\x61\x72\x48\xb1\x88\xb7\x54\x52\xeb\x19\x47\x79\xfa\xbf\xe6\xa4\x54\xaa\x3c\x4a\x17\x5d\xbd\x7f\xda\x17\x4b\xa1\x6d\xe9\xf9\xff\x92\x12\x29\xcb\x1f\xe2\x12\x29\x35\x05\xe0\x4a\xca\x26\xcc\x02\x88\xc8\x2c\x22\xb3\x88\xcc\x22\x32\x8b\x59\x00\x31\x0b\x20\xd2\x94\x31\x0b\x20\x5a\x08\xd0\x42\x80\x16\x02\xb4\x10\xa0\x85\xa0\x2d\x16\x02\xf4\x56\x41\x6f\x15\xf4\x56\xc1\x2c\x80\x68\x80\x43\x03\xdc\x55\x9e\x05\x70\x19\x49\xfb\x9a\x04\x96\x1b\x98\xbc\x54\x88\xf9\x6a\x24\xb6\xff\xd6\x8d\x3c\x0d\x20\x10\xdb\xbd\x82\x8c\x4f\x32\x50\xf2\xbd\x8b\x4b\xfa\x7b\x6e\xd4\x5f\x9f\x89\xd1\xf2\xbb\xf3\x9e\xeb\xb2\x2f\x3a\x32\x76\x3a\xe6\x6d\x87\x9e\x01\x4f\xb3\x89\x72\xd2\x2b\xd0\xde\x9b\xc5\x63\x1c\x30\x3f\x42\x43\x76\x75\x92\x3d\xb2\xab\xa3\x77\xa0\x97\x07\x11\xd9\x4b\x6e\xe5\x53\xe3\x87\xc8\xb3\x61\x6a\x5c\x47\xae\xf9\xb8\xb6\x85\x88\xc6\x35\x1c\xea\x3b\xd3\x87\x9a\xe8\x5b\x44\xed\xe4\xf1\xa9\xc6\x23\x3c\xa0\xf7\xf3\x71\x95\x4d\x14\x00\x79\x0d\xe3\x41\x67\x6a\x77\x4c\x3e\xd8\x7c\x77\x4c\x96\xd7\x76\x77\x54\x4e\xf8\xba\x1d\x63\xbe\x51\xe9\x8e\xed\xb2\x3b\x4e\x4d\xd7\xeb\x8f\xce\x64\x7f\x78\xc1\x33\xa8\x43\x7a\xdf\xa2\x74\x48\x77\xd4\x21\xa3\x63\xc7\xc7\x4e\x8f\xd5\xe9\x92\x5b\x13\x5d\xc2\x2d\x35\x2b\xd1\x29\x2b\xf0\xc5\x4d\x74\xf4\xae\x9f\x54\x3a\xa5\x27\xea\x94\x53\x93\xa7\xc7\x4f\x9d\x9c\xae\xd3\x2b\xb7\x25\x7a\x45\x54\xfe\xcc\xe9\x96\xa1\xd4\xc5\x73\x74\x6c\x78\xb4\xe9\xc5\x73\x94\x5a\x85\x67\x4e\x87\xe4\xde\xac\x74\x48\x97\x94\x26\xc3\xa7\x47\x8e\xd6\xe9\x91\x5b\x92\xe2\x84\x69\x3a\x57\xae\x4b\x96\xb5\x99\x34\xd1\x25\x13\x07\xc9\x01\x7d\xbf\xb9\x57\xea\x14\xb7\xaa\xde\x76\x95\xf5\x54\xfb\xda\xcd\x90\xf3\xfa\x63\x9d\x8f\x44\x0a\x43\x6e\xd2\x0a\x39\xea\x34\x47\x8d\x07\xa7\x8e\x1b\xac\x4c\x50\x38\x03\x2a\x35\xc6\x7c\xd9\xf7\xd9\xc9\x9c\x77\x73\x74\xd6\x08\x3d\x83\x29\x08\x59\x13\xda\xa1\xea\x10\x1f\x3e\x4f\x0e\xf3\xf0\x68\xb3\x8e\xb7\x98\xf7\xdc\xd0\xf7\x9c\xac\xf4\x1e\xaa\x0c\x96\x56\xf2\x6d\xcf\xb7\xc3\x25\x87\x2e\x50\x27\x61\xbd\x0c\xf4\xff\xff\x39\xfd\xb5\x5d\xe4\x56\xa5\x9c\xe1\xa8\x18\xe9\x9b\xbe\xaf\xbe\x27\xdd\xa4\x28\xfe\x38\x2b\x7e\x44\x2d\xbe\x77\x3f\x7b\xf3\x70\x4a\xd9\xc2\x5d\xbd\xf6\xab\xe8\x6c\x87\xce\x76\xe8\x6c\x87\x94\x0e\xa4\x74\x20\xa5\x03\x29\x1d\x6b\x86\xd2\xb1\x76\x18\x0b\x68\x4a\x47\x53\x3a\x9a\xd2\xd1\x94\x8e\xa6\xf4\x75\x6d\x4a\x47\x5b\x1f\xda\xfa\xd6\xa9\xad\xef\xaa\x74\xb6\xab\x1d\x75\x8b\x3c\xc6\xd1\xc2\xb3\xe4\x41\x40\x0b\x4f\x91\x13\xe4\x58\x4d\xdf\x17\x15\xf8\x8a\x42\x7c\xd5\x46\x93\x54\x27\xbc\x97\x34\x86\x15\x1f\xd2\xcf\x08\xcc\xb0\x1e\xbe\x26\x30\xd8\xda\xb5\xa6\x24\xb4\x6c\xe4\xaf\xf7\x3b\xa4\x01\x16\xb7\x4d\x3a\xf1\xd5\x81\xdd\x0e\xf0\x87\xae\x2c\xf0\xc6\x71\xb2\x47\xc9\x0b\xc9\x0b\x2a\xfc\xfc\xc6\xc9\x91\x36\x8d\x2b\x52\xcc\xd1\xff\xaf\x45\xff\xbf\x5f\xcd\x90\x87\xb9\xc0\x99\x26\xcf\x07\x81\x73\x8c\xb4\x6f\x62\x92\x73\xdc\x2f\xf0\x21\x72\x26\xf6\x0b\x6c\x6b\x05\xe7\xb9\xbf\xe0\x0b\xc8\x59\xc5\x5f\xb0\xad\x35\x5c\xae\x1f\x61\x73\x82\xb6\x56\xf8\xc4\xcb\x93\xbc\xbd\x9f\xec\x6f\x20\x48\x07\x52\xfd\x10\xeb\x08\xd5\x31\xfe\x42\x1d\xa1\x1a\x3b\x2a\xae\x9a\x78\x45\x57\x45\xc4\xb5\x11\xd7\x46\x5c\x1b\x71\x6d\x74\x55\x44\x57\x45\x74\x55\x44\x57\x45\xb4\xaf\xa0\x7d\x05\xed\x2b\x68\x5f\x41\xfb\x0a\xba\x2a\xa2\xab\x22\xba\x2a\xa2\xab\x22\x9a\x2f\xd1\x7c\xb9\x06\xcd\x97\x6b\xda\x55\xf1\x4a\x1a\x09\xdb\xef\x73\xf8\xbe\x6e\x32\xc0\xfd\x05\xb8\x0c\x93\x9e\x01\x30\xf4\x03\x25\xaf\x50\xb0\x03\xbf\x0c\x98\xee\x4c\xb9\x50\xa4\x61\xa0\x7f\xb7\x4b\xff\xdb\x0d\xe4\x7a\xfe\x82\x44\xcd\x7f\x44\xa8\xa6\x6e\xc1\x5e\xb0\x0b\x65\xcb\x51\xad\xcc\x96\x3c\x06\x4e\x7a\x85\x51\x59\x62\x0e\x4a\xcc\x2a\x9b\xfe\x90\x54\x17\xba\xa0\xbc\xae\x18\x8e\x4d\xe0\x6b\x5e\x89\xca\x23\x7d\x10\x52\xab\x90\xed\xdd\x0f\x2f\x70\x41\x1a\xd9\x32\xab\x2b\x3b\x6e\x07\xe1\x61\xcf\x1f\x76\x1c\x19\x32\x30\x58\xe3\x1e\x05\x13\x17\xc8\x24\x5f\x0e\xe3\xe4\x08\x2c\x87\x61\x72\x88\x1c\x6c\x61\x39\x00\x03\x60\x8c\x9d\xe7\x1a\x2e\x89\x6d\xe9\x4b\xe2\x5a\x9d\x40\xc3\xc0\x26\xfd\xf8\x63\x8d\xd7\xc2\x01\x7d\xbf\x58\x0b\x7c\xbe\x44\xb3\xbe\x7a\x64\x52\xa6\x3b\xba\x52\xa0\x2b\xc5\x04\x9a\x9c\xd0\xe4\x84\x26\x27\x34\x39\xad\x1b\x93\xd3\xc4\x9a\xb1\xa8\xb4\xbd\x25\x2d\x43\xfd\x13\x08\xf5\x23\xd4\x8f\x50\x3f\x42\xfd\x08\xf5\xaf\x3e\xd4\xdf\x76\xd8\x61\x62\x5d\x83\x9b\x13\x08\x6e\xae\x1c\xb8\x39\xb1\xe6\xc1\xcd\xab\xd0\x37\x43\xff\xce\xa3\xe4\x10\x87\x09\xfd\x19\x2b\x9f\x8d\xe0\x22\x28\x2a\x8e\x29\x52\x2b\x1d\xb7\xef\x39\x34\xd0\x3f\xf8\xa8\xfe\xf6\xed\xe4\x39\xac\x80\x61\xf5\xfd\x73\x0b\x83\x9d\x66\xfd\x38\x22\x53\x9e\x43\x7b\xef\x66\xcf\x4c\x55\xbe\xad\xa6\x25\x62\x8f\xad\x71\x2c\x0f\xa3\x83\x20\xa4\x85\xd1\x41\x10\xd2\x42\x48\x0b\x21\xad\x75\x04\x69\xad\x21\x92\xf0\x9a\x81\xb4\x90\xbd\x8a\x90\x16\x42\x5a\x08\x69\x21\xa4\x85\xd1\x41\x90\x5e\xf7\xcc\x41\xa0\xd6\x3c\xbd\xee\xaa\x8c\x0e\x72\xb6\x76\x74\x90\xfb\x38\x39\xea\x1e\xb2\x1b\xc8\x51\xfd\x64\x07\xe9\xa9\xe9\xe9\x0e\xf8\xd5\xc2\x60\x76\xca\x73\xa8\x1a\xfb\xe3\xd1\xc6\x54\xa6\x21\x7d\x9f\xa0\x32\xd5\x04\xc1\x04\xbb\x89\x15\x9e\x0c\xb8\xdc\x28\xba\xc7\x77\xb6\xd4\x40\xc6\xae\x93\x51\x3d\x00\x04\xeb\xe2\x7f\xae\x1e\x0c\xc6\x51\xab\x03\x64\x3f\xd9\x5b\x11\xab\xa3\x8b\xdc\xdd\x54\x2f\xa3\x07\x25\x46\xe2\x68\x31\x12\xc7\x57\xb4\xda\xab\x7e\x1f\x5f\xf5\x83\x64\x00\x56\x7d\x0f\x69\x76\x3e\x92\x83\x3c\x00\xc7\xbd\x64\x4f\x1c\x80\x63\x19\xaf\xdf\xcf\xc3\x6b\xec\x25\xf7\x28\xe1\x35\x9a\x7f\xbf\xd9\x18\x19\x2b\x2b\x90\x7a\xbf\xdd\x57\x43\xe0\xdc\x9c\x1a\xfd\x02\x84\xcf\x20\xbf\x95\x22\x7c\xd2\x12\x71\xaf\x80\x18\xc2\x98\x16\x88\xc6\x22\x1a\x8b\x68\x2c\xa2\xb1\x18\xd3\x02\x63\x5a\x60\x4c\x0b\x8c\x69\x81\x56\x01\xb4\x0a\xa0\x55\x00\xad\x02\x68\x15\x68\x8b\x55\x00\x63\x5a\x60\x4c\x0b\x8c\x69\x81\x31\x2d\xd0\xe8\x86\x46\xb7\xab\x3c\xa6\xc5\x0a\x1b\xb4\x96\x11\x32\xe3\x6a\xcc\xc5\xfd\x96\x6e\x92\xe5\x84\xf7\x82\x1d\xe4\x3d\xa6\xfc\x28\x3c\x77\x1e\x1c\x83\xba\x85\x92\x67\xbb\x61\xe0\xd8\x79\x1a\xe8\x7f\xd5\xa5\x7f\x75\x03\xb9\x56\x3e\x7f\x6e\x61\xb0\x73\xa9\xb9\xa0\x18\x63\xa2\xa4\x69\x56\x52\x9b\xc2\x61\x0c\xc2\x0b\xa3\x51\x6b\xce\x0c\x26\x2a\xc1\x30\x18\x6d\x0c\x83\xb1\xac\xa5\x56\x39\x9f\xc4\x0a\x4b\x8c\x4e\x32\x95\x3d\xba\x0c\xa0\xcb\x00\x46\xc1\x40\x23\x15\x1a\xa9\xd0\x48\xb5\x7e\x8c\x54\x18\x05\x03\xa3\x60\xa0\x71\x00\x8d\x03\x68\x1c\x40\xe3\xc0\x9a\x30\x0e\x60\x14\x0c\x8c\x82\x71\xb5\xc0\xa1\x18\x05\x63\x25\xa2\x60\xfc\xef\x47\x49\x0f\x07\x05\x5d\x1a\x2e\x7a\xfe\x05\xa6\xad\xc5\xa8\xa0\xed\x16\x7d\x1a\x04\x79\xc7\x0a\x02\x1a\xe8\xdf\x7f\x44\xff\x5f\xdb\xc9\x75\xf1\xa3\xe7\x16\x06\x3b\x7b\xeb\x07\xba\x18\xe7\x45\x8c\xb0\x22\x7a\x6f\x65\xcf\x9e\x94\xaf\x9f\x19\x54\xef\xae\x71\xb0\x0e\xe3\x5c\x20\x68\x85\x71\x2e\x10\xb4\x42\xd0\x0a\x41\xab\x75\x04\x5a\xad\x21\xe2\xf0\x9a\x01\xad\x90\xd1\x8a\xa0\x15\x82\x56\x08\x5a\x21\x68\x85\x71\x2e\x90\x72\xf7\xcc\xc1\x98\xd6\x3c\xe5\xee\xaa\x8c\x73\x41\xc9\x04\x27\x39\x8d\x90\x61\x20\x39\x41\xe8\x85\x9a\x9e\xe5\x0a\x10\xb5\x30\x98\x55\x01\xa2\xe3\x76\xd0\x98\xde\xd4\x20\x2c\xc5\xf2\x98\x4d\x55\xa0\x98\xa0\x36\xa9\xad\x4a\x30\x9b\xcc\x9f\xd9\x5a\x89\x8f\x3d\x37\x0a\x77\xe1\x26\xb1\xb0\xdb\xf9\xf5\x15\x47\xc3\x38\x78\x35\x4e\x8e\x90\xb1\x8a\x70\x17\xf7\x90\xdd\x2d\x8c\x02\xba\x5a\x62\xf0\x8b\x16\x83\x5f\xfc\x97\x46\x0e\x73\x51\x70\x88\x1c\x04\x51\xb0\x97\xb4\x36\x09\xc9\x31\x1e\xf1\x62\x94\xe4\xe2\x88\x17\x2d\x17\x76\x9c\xc7\xbf\x18\x23\x23\x4a\xfc\x8b\x96\x4b\x6b\x2b\x8d\x79\xd9\x12\xa8\x51\x14\x8e\xde\xbf\xe9\xab\x94\x50\x77\xa6\xc6\xc7\x48\x08\xab\x6e\xfe\x88\x2a\xac\xe2\x00\x19\x2b\x28\xb6\x30\x3c\x06\x82\xb8\x08\xe2\x22\x88\x8b\x20\x2e\x86\xc7\xc0\xf0\x18\x18\x1e\x03\xc3\x63\xa0\x31\x01\x8d\x09\x68\x4c\x40\x63\x02\x1a\x13\xda\x62\x4c\xc0\xf0\x18\x18\x1e\x03\xc3\x63\x60\x78\x0c\xb4\xd5\xa1\xad\x6e\x3d\x85\xc7\x58\x3e\xae\xbc\x9c\xf0\x18\x6d\x0f\x5e\xf1\xee\x1e\xb2\x8b\x4d\xa9\x3a\x29\x19\x4b\x5e\x21\xa4\xf3\x25\x80\xa2\xf8\x8d\x97\xe9\x7f\xdf\xad\x7f\x3b\x43\x36\xe7\x3d\x9f\x9e\x5b\x18\xec\xbc\xdd\xa7\x56\xa1\xe2\x28\x3a\xe9\x15\x4e\x8b\xd7\x7a\xef\x60\xf7\x47\x3c\x9f\xaa\xb1\xe7\x95\x07\xda\x0b\x6e\x4f\x3c\x4c\x0e\xf1\x09\xb2\x8f\xdc\x0b\x13\x64\x27\xc9\x92\xbe\x9a\x26\x07\xf6\x15\x6c\x3a\x28\x0d\x6a\x38\x27\x6e\x4b\x1f\xb4\x4d\xfa\x86\x22\x0d\xc9\xe3\x27\x1a\x4f\x99\x5e\xbd\x5b\x9d\x06\x62\xfa\x98\xa6\x98\x2f\x6a\x63\x3a\x7f\x69\x4b\xdc\xd7\x77\xfa\xb4\xe4\x58\x79\x5a\xa7\xbb\x4d\xf1\xc8\xaa\xf5\x78\x2e\x47\x1e\x20\xf7\x57\x58\x41\x97\xd9\xe5\x08\xa5\xa1\xf9\xb3\x45\xf3\xe7\xcf\x6a\x97\xbf\xe0\x47\xb9\xdd\x93\x6d\x25\xd2\xee\xb9\x9a\x62\xa3\x54\x6e\x5a\x6c\xd4\x13\x15\xc9\xc0\xfd\x37\xc4\x62\xe3\x39\x42\x93\xb5\x12\x92\x42\x58\x28\x57\x4f\x50\xac\x8c\xdd\x11\x45\x47\x03\xd1\x81\xd8\xf7\xaa\x60\xdf\x08\x7a\x20\xe8\x81\xa0\xc7\x4a\x81\x1e\x13\x3f\xa7\x91\x31\xce\x28\xba\x9f\xdc\xa7\x30\x8a\xda\xb7\x4f\x5f\xb6\x1e\x11\x6d\xf4\x46\xfa\x46\xbf\x55\xdf\xcc\x3b\xa3\xdd\x7b\x7d\xee\x1f\xae\x89\xf7\xfa\xde\x92\xe5\x87\x36\xd0\x3c\x38\x40\x5c\xe7\xac\x60\x94\xd8\x29\x7e\x55\x14\x80\x5b\x2a\x0b\xea\x87\xba\x77\xb0\x9f\x43\xb7\xaa\x37\xe7\xa9\x5f\xa4\xea\xdd\xed\xea\xdd\x20\xf4\xad\x90\x16\xed\x7c\x7f\xd5\x73\x89\x52\xd8\xef\x25\x71\x97\xb5\x32\x77\x92\x1c\x27\x13\x15\xa7\x94\x21\xb2\xaf\x05\xf5\x63\x12\x1c\x59\x51\xed\x68\xa0\x76\x7c\xb4\x83\x7c\xa8\x43\xff\x40\x47\xe7\x93\x52\x76\x3f\xd1\xb1\x7e\x4e\x2c\x15\xe6\x3a\xd6\xcf\x40\x83\x02\x93\x32\x9b\x9c\xb1\x47\x7b\x77\x8d\x89\xdb\x03\x5b\x10\x67\xcd\x59\x0e\xbc\xc9\x74\x2d\xfe\x36\x3c\x01\x7a\x4f\x60\x74\x4f\x04\x9e\x3b\xc9\x5d\xbb\x4f\xb0\x65\x21\x7e\x4f\x47\x8b\x25\xbe\xd8\x53\xfb\x28\x95\xfb\x92\x46\xbe\xa8\xe9\x9f\xd7\x3a\x3f\x27\xc1\xbf\xa7\xb4\xc3\x9e\x9f\x07\x95\xac\xe8\x41\xb7\x7b\x86\x39\xcb\x2e\x99\xc6\x70\xe2\x2b\x00\x35\xe5\xda\x53\x39\x88\xf9\x4f\xfd\x56\x1e\x3e\x1c\x58\x65\x8e\x9d\x17\xbb\x23\x75\x0a\x81\xe1\x2d\x8a\x3e\xe5\xac\xab\x12\xf5\x4a\x0e\xcd\x1a\xbc\x46\xf0\xc5\x8f\xc6\x12\xd0\xcb\xd4\x0e\x90\xd5\x9b\x1b\xa1\x59\x09\x92\xfc\x2a\x60\x41\xed\x05\x7b\x1e\xbf\x23\x7d\xeb\xd8\xa2\x6f\x82\xcf\x25\x13\xfb\xc8\xbd\xfa\x1e\x73\x97\x8c\x91\x7b\x13\xac\x1f\xb1\x98\x95\xa2\xcc\x0d\xec\x86\x1a\xfb\xf6\x2a\x8c\xae\xfb\xe7\xdd\xa4\xbf\x01\x40\x19\xd0\xbc\x4f\x43\x89\x4d\xbe\xbf\x5b\xff\x13\x05\x9b\xbc\x39\x05\x9b\x9c\x86\x37\x7a\x3b\xd3\x60\x49\x7e\xaf\xcd\x88\xe4\x83\x64\x88\xcf\xc2\xdd\x64\x10\x66\xe1\x0e\xd2\x43\xba\x1a\xce\x42\xde\x96\xcb\x05\x23\x8f\x36\x9e\x9f\x77\xeb\xdb\xf8\x4c\x14\x35\xa6\x4f\xd6\xce\xb7\x29\x38\xe4\xad\xe9\x38\xa4\xe8\xda\xdb\x6a\x40\x90\x2b\xd1\xbb\x39\xc0\xff\x2b\xf6\xf5\xe6\xbb\x17\xb7\x71\x04\x1e\x5b\x04\x1e\xdf\xa6\x5d\xd6\xba\x3e\xc4\x31\x47\xb6\x33\x49\xcc\x71\x95\x04\x03\xc0\x8d\xcd\x09\x86\xca\xcd\x2a\x45\x44\xf4\xfe\xab\x82\x34\xde\x20\x91\x46\x21\x0b\x6e\x4d\x07\x19\x57\x44\x14\x20\xbe\x88\xf8\x22\xe2\x8b\x88\x2f\x22\xbe\x88\xf8\xe2\xb2\xf1\xc5\x4f\x6b\x6d\x66\x96\x4c\x71\xb4\xf2\x18\x19\x57\xd0\xca\x15\x66\xab\x34\x46\x1e\x9b\xdb\xf6\xeb\x9d\x57\x85\x0a\x90\xfb\x63\x05\x74\xbc\xbb\x01\xe8\x28\x94\x81\x5b\x52\xf1\xc6\x76\xeb\x02\x08\x35\xae\x37\x0d\x04\xa1\x46\x84\x1a\xaf\x30\xd4\xb8\xb2\x20\x4f\x23\x58\xb0\x7d\xa7\xb9\x89\xdd\x64\x50\x1f\x30\xfb\x25\x4c\xa8\xab\x00\x23\x7f\xe1\x19\x81\x2d\xbe\xb6\x9b\xdc\x15\x61\x8b\x3c\x4f\x57\x24\x18\xdd\x70\xc1\x73\xca\xf3\x34\xef\x58\xf6\x7c\xa0\xff\x75\x97\xfe\xc7\x1b\xe2\xad\xee\x15\x5a\x73\xb9\xba\x26\x65\x69\x67\xa0\xb4\x11\x56\x5a\x9b\x72\x76\xed\x59\x8c\xf7\xd1\xd4\x7a\x30\x6d\x57\x1b\xd3\x76\x9d\x6d\xbc\xb6\xf6\xe8\xbb\xaa\x60\xfd\xb4\x71\xc1\x84\x5d\x18\xfb\x18\x13\x76\x61\xd8\x0c\x0c\x9b\x81\x61\x33\xd6\x6b\xd8\x0c\x4c\xd8\x85\x09\xbb\x30\x5c\x01\x86\x2b\xc0\x70\x05\x18\xae\x60\x4d\x84\x2b\xc0\x84\x5d\x98\xb0\xeb\x6a\x71\xd0\xc6\x84\x5d\x2b\x91\xb0\xeb\xb3\xb7\x90\xfb\x79\xc2\x2e\x8b\x1d\xba\x8a\x36\x98\x7e\xd8\xd3\x32\x6b\xd7\x0c\x0d\xad\xc1\x01\x71\xde\xb7\x25\x0b\xf1\x65\x03\x01\x58\xe6\xf4\x97\xdf\xa2\x7f\xa1\x83\xdc\x54\xf1\xfe\x39\xf1\x62\x67\x17\x10\x11\xf9\xb3\x51\xaf\xc7\x5b\xf0\xf0\xe4\xf8\x34\x2f\xb6\xb7\x97\x3d\x38\x9c\x2c\xe5\x0c\x2f\x24\x7e\x8a\x5b\x03\xdb\x4c\x53\xfc\x61\x92\xe7\xf8\xdb\x23\xe4\x61\xc0\xdf\x80\x4e\x93\x8a\xbf\xb1\x32\xfa\xad\x62\x91\xcd\x98\xd0\xf3\x63\x08\xae\xb2\xfb\xc4\xe7\x67\xe3\xa6\x37\x84\xe4\x68\x63\xb4\x2d\xa7\x3f\x20\xd0\xb6\xf4\xe1\x52\x51\x36\xa8\x5f\x40\x72\x4a\x2b\x1a\xd0\x26\x3b\xbf\xb1\xb5\xf6\x58\xf6\x46\xe4\xc7\x26\x86\xb3\x5f\x3c\x7b\x25\x46\x34\x37\x4b\x0a\x64\xa6\xc2\x0e\xb9\x02\x43\x8a\xf6\x49\xe4\x50\xb6\xc8\xa1\xfc\x07\x6d\x75\x84\xce\x1c\x27\x5b\x5a\xe4\x5c\x4c\xb6\x5c\xa7\xe2\xad\x54\x0e\x49\xee\xf3\xd7\xd6\x16\x6f\xbb\xab\xb8\x1c\x4d\xc8\xb9\x1d\x60\x87\x5c\x6d\x29\x87\x4c\x8f\xf5\x26\x49\x91\xe9\x81\x4c\x8f\x2b\xcc\xf4\x58\x5f\x7a\x72\x43\x87\xb4\xbd\xe4\x1e\x7d\xb7\x39\x28\xa9\x1f\xcf\x55\xf9\x22\x71\x49\xd5\x9c\x91\xb6\x33\x3a\xbe\xb1\x9d\xdc\x10\x31\x3a\x84\x5b\x98\xfe\xf9\xed\xfa\xaf\x75\xc4\xe4\x8d\xbb\xea\xa7\x55\x16\x34\x45\x38\x40\x73\x76\x05\xbf\x72\xb5\x51\x29\x5e\x40\xee\xe7\x53\x74\x2f\xb9\x07\xa6\xe8\x00\xe9\x27\x3b\x9a\x24\x23\xb5\x25\x21\x50\xdb\xdc\xce\x90\x24\x81\x24\x09\x24\x49\x20\x49\x02\x49\x12\x48\x92\x40\x92\x04\x92\x24\x90\x24\x81\x24\x09\x24\x49\x20\x49\x02\x49\x12\x48\x92\x40\x92\x04\x92\x24\x90\x24\xb1\x5a\x24\x89\xdf\x7b\x8c\x0c\x73\x92\x44\x55\x98\xfb\x3a\xf1\x99\xc4\xb3\xe0\xb1\x6d\xd3\x40\x7f\xd5\x63\xfa\xcf\x6f\xaf\xcc\x95\xba\xa3\x3e\x2c\x27\x12\xa4\x72\x9f\xed\xde\x2e\xf6\xb0\x9a\x33\x35\xf6\x24\x4e\x3c\xb8\xc6\x41\xba\x1c\x02\x5b\x08\x6c\x61\xd2\x5c\x04\xb6\x10\xd8\x42\x60\x6b\xfd\x00\x5b\x6b\x28\x27\xec\x9a\x01\xb6\x30\x59\x29\x02\x5b\x08\x6c\x21\xb0\x85\xc0\xd6\x95\x48\x56\xba\xae\x71\x28\xcc\xa6\xb8\x9e\xb3\x29\xe6\xae\x42\x1c\x6a\x62\x96\x1c\xe3\xe4\xa6\x51\x92\x03\x72\xd3\x7d\xa4\x0e\xe5\x36\xab\x80\x55\x0b\x83\xd9\x04\x42\xd4\x16\xa6\xd3\x63\x8d\x99\x4e\x07\xf4\xfd\x9c\xe9\x94\xa8\x3d\x8d\xf0\x54\x9d\x40\xd2\xfc\xe8\xd6\x4a\xb4\xec\xb9\x7c\x4b\x32\xac\x0a\x60\xac\x87\x5f\x5f\x4d\x68\x8c\x23\x59\x13\xe4\x28\x39\x5c\xc1\x82\xbe\x97\xec\x69\x65\x48\x90\x01\x8d\xbe\x24\x2d\xfa\x92\xfc\x8f\x46\x8e\x70\xc1\xf0\x00\xb9\x1f\x04\xc3\x3e\xd2\xe2\x2c\xe4\x91\x40\x07\x21\x12\xa8\xf4\x17\x69\xbd\xb4\x13\x3c\x12\xe8\x61\x32\xaa\x44\x02\x6d\xbd\xb8\xd6\x25\x56\xc9\x6b\x5e\x62\x35\x48\x69\x5b\x5b\x94\xf5\x7e\xa7\xaf\x52\x62\x99\x22\xe8\xab\x12\x41\xd7\x9b\xad\x90\x5e\xbb\xf9\x33\xaa\xf4\x1a\x91\x8f\xaf\x8e\x1c\x5b\x99\xf8\xe1\x08\xf1\x22\xc4\x8b\x10\x2f\x42\xbc\xeb\x07\xe2\x45\x1d\xae\x81\x0e\xb7\x76\x30\x70\xcc\xdd\xb0\x2a\xb9\x1b\xd0\xd4\x80\xa6\x06\x34\x35\xa0\xa9\x01\x4d\x0d\xeb\xda\xd4\x80\x29\x7c\x30\x85\x0f\xa6\xf0\x59\xa9\x14\x3e\x68\xc9\x43\x4b\xde\x7a\xb5\xe4\x4d\x14\xdb\x9c\xbd\xaa\x11\xc8\xbc\xc2\x20\xf2\xe3\xbd\xe9\x20\xf6\xb3\xf5\x67\x71\x69\x11\x4b\x72\x72\x35\x66\x1f\x79\xcf\xcd\xa4\x2b\x8a\x55\x51\x99\x77\xa4\x32\xb4\xe0\xbf\x3f\x4f\x7f\x42\x89\x61\xd1\x5f\x37\x94\x60\x65\xda\x89\x5e\x23\xce\x73\x5c\x79\x6f\x45\xc2\x08\x9e\x23\xa3\x7c\x22\xb2\x09\xc8\x26\xe2\x3d\x64\x37\x19\x6c\x9c\x73\xbb\xa2\x6d\x97\x9b\xf7\x78\xaa\xf1\xfc\x1c\xd0\xfb\x1b\x25\xed\x48\xcc\xc9\xce\x3f\x53\x32\x20\xef\x6c\x14\x04\xb0\x6a\x20\xb6\x25\xb2\x22\xaf\xc6\x58\xe4\x8e\x90\x31\x32\x52\x61\x98\x6d\x65\x30\x10\xd1\x43\xab\x6c\x8b\x56\xd9\x0f\x6a\x6d\x92\x07\xe3\xdc\x24\x9b\x23\x0f\xc4\x26\xd9\x15\x12\x2d\x2b\x20\x3b\x1a\x05\xe4\x7b\xe5\xb5\xb1\x68\xd9\xdf\x74\x00\xbe\x2a\x19\x73\x67\xa9\x76\x62\x28\x0c\xbe\x87\xc1\xf7\x30\xf8\x1e\x06\xdf\xbb\xaa\x83\xef\xad\x96\x76\xd9\x30\xe1\x62\x73\x7b\x44\x13\x3b\x83\x12\xea\xec\x20\x39\xa0\xef\x37\xf7\xca\x73\xcc\xad\x6a\x28\xbd\xca\x42\x56\x21\xa0\xde\x97\x7b\xc9\x01\xee\xf1\xeb\xcf\x58\xf9\x6c\xd4\x4b\xa9\x91\xd1\xf3\x4e\x39\x08\x99\xa8\x72\xe4\xf9\x45\x7f\x73\xaf\xfe\xbe\x0e\x72\x33\x7b\x79\x58\x7d\x57\x86\x9a\xbd\x1d\x8e\x32\xc9\x3d\x6d\x84\x17\x34\xe5\x39\xb4\x17\xa2\xa6\x4f\x55\xbe\x2e\xc2\xca\x2a\x0f\xb6\xfd\x08\x53\x6b\x7a\x2c\x6f\xf6\x41\xaf\x45\xa1\x1d\x95\xe6\x36\x79\x84\xb1\x1b\x4f\xb1\xc3\xfa\xa8\x98\x3f\x35\x47\x48\x68\x27\x4a\xf5\xd5\x61\x1f\x3b\x7f\xb0\xa5\xde\x30\xdd\x19\x9d\x75\x6a\x8f\x54\x14\x13\x7d\xd5\x07\x6b\xd9\x67\x9c\x5a\x83\x82\xdb\x3f\x9e\x71\x56\xe9\x8c\x53\x6b\x0a\xb6\x70\xc6\xa9\x59\x54\xeb\xf0\x09\x3b\x8f\xac\xa2\xec\xe9\xfd\xe4\x8d\xf5\x64\xcf\x73\x84\xb1\xc1\x4a\x88\x9b\x1e\x7e\x75\xf5\xa5\xcd\xca\x50\x44\x51\xf4\x34\x10\x3d\x48\x53\x5a\x15\x9a\x12\xda\xa7\xd1\x3e\x8d\xf6\xe9\x95\xb2\x4f\x4f\x7c\x5a\x6b\xb3\x91\x6e\x8a\x3b\x96\x1c\x23\xe3\x8a\x63\xc9\x0a\x1b\xfe\x8c\x74\xc5\x61\xab\xbe\x99\x77\xd3\x6a\xea\x0e\xb9\xf7\x5e\x5b\x4f\x77\xe8\xad\x02\x52\x6b\x1f\x60\xba\xe1\x5c\xbf\xaa\x0a\x05\x02\xa8\xeb\x4d\x8d\x41\x00\x15\x01\xd4\xab\x0b\x40\x6d\xf9\x7c\xb9\x7a\x9b\x40\xe3\x14\x25\xfb\xc8\xbd\xfa\x1e\x73\x97\xc4\x55\x6f\x52\x71\x55\xa5\xec\x55\x80\x54\xbf\xda\x43\x76\x72\x48\x35\x08\x3d\xdf\x2a\x52\x25\x82\x22\x67\x7f\x58\x61\x68\xe5\xe7\xe6\x99\x86\x11\xe1\xa8\xef\xee\xd1\x5f\xd3\x41\x88\x78\xe3\xdc\xc2\x60\xe7\xb6\x14\xe0\x94\xc3\xc2\xc3\xf2\xf5\xde\xdb\xd8\x43\xd3\xfc\xa5\x33\x83\x95\xb7\xdb\x8c\x99\xce\xd4\xc6\x4c\x97\xe5\x95\x1b\x75\xcb\xc2\x60\xb6\xb2\xc5\x2d\x51\x8f\x92\xdd\x2c\xe6\x53\x65\xc9\xcd\x9a\x6a\x21\x35\xe4\xb7\xb6\x24\x86\x62\x7b\x3a\x38\x5a\x35\x1a\x86\x78\x6e\x95\x06\x64\xd9\x4e\xf9\x75\x3a\x1e\x37\x76\x84\x46\x5b\x84\x46\x7f\x41\x6b\xc1\x97\xbe\xce\x54\x5c\x3d\x29\xd3\x00\x20\x5d\x61\x21\xd4\xfb\xe6\x1b\x13\x52\xe6\x66\x09\x83\x56\x09\x96\x3b\xf8\xad\xd5\x92\x2b\x88\x80\x22\x02\x8a\x08\x28\x22\xa0\x88\x80\x22\x02\xba\x6c\x04\xf4\x63\xcb\x8c\xd1\x53\x4f\x13\x68\x21\xaa\x4e\x2b\x8a\x45\xf3\xb8\x67\x73\x2a\x41\x0a\xf7\xa7\x39\xf5\x20\xf7\x63\xd7\x26\x54\x82\x81\x06\xe8\x66\x95\xa2\x70\x3b\x1c\x87\x57\x41\x4f\x40\x60\x73\xbd\x69\x27\x08\x6c\x22\xb0\x79\x85\x81\xcd\x99\xf6\xed\x2c\x97\x4d\x0e\x5d\xe1\xc3\x61\x7d\xa2\x68\xe5\xab\xab\x80\x6a\xbe\xbf\x8b\xdc\x1e\x79\xb3\xf9\x54\xce\x6f\xb6\xd4\x7c\xa6\x96\xf8\x81\xfe\xaa\x2e\xfd\x3f\x14\x27\xb6\xdd\xf5\x33\xbe\x4c\xc5\x85\x8c\xc8\x42\x7a\x07\xe2\xbc\xcc\xa9\x0f\x5c\x6d\x69\x9a\xe7\xb8\x0e\xb3\x13\x74\x18\x36\x65\xef\x27\xf7\x91\xa1\x86\x64\xe6\xd4\x6f\x6f\x4b\x2c\xd3\xb3\x8d\xe7\xed\x1e\x7d\x57\xa5\xe3\x4b\x6a\x7b\x92\x13\x16\x73\xdd\x60\xae\x1b\x4c\xe2\x8c\x81\x10\x31\x10\x22\x06\x42\x5c\x3f\x81\x10\x31\x89\x33\x26\x71\xc6\x00\x74\x18\x80\x0e\x03\xd0\x61\x00\xba\x35\x11\x80\x0e\x93\x38\x63\x12\xe7\xab\x25\xe4\x16\x26\x71\x5e\x89\x24\xce\xff\xfe\x08\xb9\xbb\x06\xff\x50\xf8\x71\x07\xb6\xeb\x15\x68\xa0\xff\xfe\x23\xfa\x4f\x6c\x27\x37\xc4\x06\x27\xce\xa9\xbf\xbb\x3e\x70\x37\x32\x3d\x7e\xd2\x2b\xd0\xde\x9b\xd9\x63\xd2\xd4\xc4\x39\xf4\xfc\xd6\x1a\x07\xe5\x30\x2d\x33\x42\x55\x98\x96\x19\xa1\x2a\x84\xaa\x10\xaa\x5a\x47\x50\xd5\x1a\x4a\x49\xb1\x66\xa0\x2a\xcc\x95\x80\x50\x15\x42\x55\x08\x55\x21\x54\x85\x69\x99\x31\x98\xfb\x33\x07\x59\x5a\xf3\xc1\xdc\xaf\xca\xb4\xcc\xed\x77\x00\x15\x4e\xc7\x1c\x36\x3a\x6e\x07\x61\xb3\x34\xa6\x87\x1b\xd3\x98\xf6\xea\xf7\xd4\xa3\xdf\x89\x4a\xab\x9d\x8d\xcd\xd7\x6e\xad\xc6\xc4\x6e\x94\x09\x99\x23\xf8\xeb\x16\x7e\x65\x65\x01\x30\x8e\x57\x1d\x26\xa3\x24\x57\xc1\x75\xde\x45\x76\x2e\xb7\x7f\x91\xe3\x8c\x3e\x9e\x2d\xfa\x78\x7e\x4f\xab\xbd\xf4\x47\xf8\xd2\xbf\x8f\x0c\xc1\xd2\xdf\x43\x5a\x98\x9a\xe4\x28\xf7\x20\x1d\x26\x87\x62\x0f\xd2\xd6\x4a\x1a\xe7\x3e\x23\x39\xf2\x80\xe2\x33\xd2\x52\x51\xcd\x26\x5b\x6e\x4e\x16\x55\xc9\x99\x66\x84\x53\xef\x9f\xf5\x55\xcb\xa2\x5b\x53\x53\x2d\x47\x72\xe9\xee\x0a\x5f\x51\x2e\x97\xe4\xa3\x2b\x23\xa1\x30\xad\x32\x42\xb4\x08\xd1\x22\x44\x8b\x10\x2d\xa6\x55\xc6\xb4\xca\xe8\xad\x8f\x69\x95\xd1\x54\x80\xa6\x02\x34\x15\xa0\xa9\x00\x4d\x05\x6d\x31\x15\x60\xd0\x16\x0c\xda\x82\x41\x5b\x30\xad\x32\x5a\xe2\xd0\x12\x77\x95\xa7\x55\x5e\x46\xda\xe3\x95\x34\x73\xb5\x3f\x44\xc4\x3f\x66\xc8\xf3\x6a\x05\xbe\xd5\xff\x20\xa3\xff\x5e\x26\x11\xdc\xe8\xa6\x22\x0d\x93\x2a\x30\x37\x6c\xf6\x3e\xaf\x48\x63\x5e\xf9\xf0\xe4\x78\x04\xfb\xb5\x31\xb1\x65\x55\x41\x43\x55\x05\x0d\x35\x53\xd0\x44\x89\x3c\xc8\x27\xcf\x49\x72\x1c\x26\x0f\xd8\xea\x5a\x98\x3c\xca\x77\x36\x13\xe7\x41\xff\xe3\x1e\x32\xce\x3b\xdb\x2a\xd9\xf4\x62\x48\x5d\x58\xaf\x95\x5c\x7f\x58\x54\x03\xf9\x72\x10\x7a\xf3\x51\x0f\xab\x6d\x13\xe1\x87\x5f\xdd\xa3\xff\xe2\x46\xf2\x43\x89\xa2\xa4\xbd\xe1\x37\x34\xb1\x34\x63\x9b\xb4\xe5\x46\xaa\x9f\xf4\x0a\x80\x2a\xa2\x4f\x18\x95\x55\x64\x15\x25\x6b\x48\xaa\x67\x5d\x50\x62\x57\x0c\x7f\x27\xf0\x4c\xaf\x44\x25\x84\x12\x84\xd4\x2a\xf4\xc5\xe7\x3e\x38\xdb\x8a\x53\x02\x3b\x3a\xc6\x07\xda\xae\x04\x54\xa3\x94\x9d\xed\x1d\x84\xea\x86\xd5\xaf\x8b\x0c\x23\x35\x9a\xbd\xc6\x5d\x18\x26\x2e\xd4\xb6\xc7\x4d\xf2\x09\x39\x4e\x8e\xc0\x84\x1c\x26\x87\xc8\xc1\x16\x26\x24\x70\x11\xc6\x16\x94\x70\xa9\xb5\x63\xe2\x2c\xf2\x98\x38\x2f\x6a\x2c\xad\x4e\xea\xc7\x6b\x19\xc2\xd2\xe6\x71\x24\xc4\x6a\x8c\x12\x46\x1d\x41\x57\x0e\x8c\x3a\x82\x76\x42\xb4\x13\xa2\x9d\x70\x3d\xd9\x09\x31\xea\x08\x46\x1d\x41\xfb\x0c\xda\x67\xd0\x3e\x83\xf6\x99\x35\x61\x9f\x99\x18\x23\x23\xfa\xb0\x79\x48\x86\x2b\xbd\x2b\x91\x7f\xa9\xc6\xf1\x6d\xe5\xc3\x96\x62\xf0\x12\x04\xb6\x31\x78\xc9\xd5\xe3\x62\xa2\xbf\xfa\x5c\x94\x3c\xcd\x62\x67\xb7\xa2\x0d\x01\xe2\xe3\x6c\x71\x12\xd8\x14\xb0\x81\x9d\xa7\x81\xfe\x47\x8f\xe9\x5f\xdd\x4e\x6e\xaa\x78\x43\xe2\x97\xdd\xf5\xe3\x99\x0c\x4f\x8e\x4f\xf3\x92\x7a\xef\x62\x4f\x0e\x27\x8b\x11\x40\x61\xfc\xd4\x1a\x87\x06\x31\xba\x09\x42\x62\x18\xdd\x04\x21\x31\x84\xc4\x10\x12\x5b\x47\x90\xd8\x1a\x62\x86\xaf\x19\x48\x0c\x29\xcb\x08\x89\x21\x24\x86\x90\x18\x42\x62\x18\xdd\x04\x39\x95\xcf\x1c\xe8\x69\xcd\x73\x2a\xaf\xca\xe8\x26\xaf\xd4\x48\x91\x53\xa7\xce\x93\xc7\x80\x3a\xf5\x10\x39\x43\x4e\xa7\x52\xa7\xd8\x44\xe8\xb7\x8a\x45\xd6\x5b\xa1\xe7\xc7\xec\xa9\x4a\xd4\x2a\x0a\x29\x10\xc3\x47\x6d\xc9\xe2\x44\x1b\x33\xad\x72\xfa\x03\x31\xb1\x2a\x05\x49\xab\x66\x84\x72\xae\x55\xdc\x52\x62\xfe\x3c\xa9\x8d\xaa\x3d\x27\x8a\x88\xe2\xaa\x08\xda\x76\x7e\x75\xd5\x30\x34\x0e\x79\xcd\x92\x02\x99\xa9\x08\x90\x32\x45\x26\xdb\x3d\x74\xe8\x9e\x8b\x01\x54\x5a\x0c\xa0\xf2\xd5\x4c\x6d\xc2\x66\x9e\x4b\x9d\x47\xc8\xc3\x20\x75\x20\x96\x46\xbb\xa7\x2e\x99\xe3\x01\x56\x2c\x72\x2e\x0e\xb0\xb2\x32\x35\xd9\x3c\x00\xcb\x0c\x39\xaf\x04\x60\x59\x91\xaa\x22\x69\xd9\x9c\x34\x6c\x2c\xef\xea\xcb\xcb\x46\x71\x60\x7a\x7f\xd0\x57\x5b\x5a\xde\x91\x1a\xb3\x45\x11\x9c\x3b\xf9\x03\xe9\x82\x33\x0e\xdf\xb2\x62\x22\x14\x23\xb8\x20\x0c\x8d\x30\x34\xc2\xd0\x08\x43\x63\x04\x17\x8c\xe0\x82\x11\x5c\x30\x82\x0b\x9a\x43\xd0\x1c\x82\xe6\x10\x34\x87\xa0\x39\xa4\x2d\xe6\x10\x8c\xe0\x82\x11\x5c\x30\x82\x0b\x46\x70\x41\x6b\x23\x5a\x1b\xdb\x1e\xc1\xa5\x26\xb6\xde\xd6\xd0\x2e\xad\x44\x70\x59\x1d\x4b\x5d\xfb\x83\xb9\xfc\x75\x17\xb9\x3d\x22\xe2\x97\x82\x81\x85\x28\x94\x48\xc1\xa2\xf3\xec\x44\x14\x06\xfa\xaf\x77\xe9\x1f\xdb\x40\x36\xb3\xfb\xe7\x16\x06\x3b\xcb\x42\xd9\x74\x0b\xf6\x82\x5d\x28\x5b\x4e\x22\x60\x88\x3c\xd8\x8d\x42\x01\xd3\x34\x6c\x4b\x64\x90\x6c\x6f\xb7\x88\xed\x51\x0a\xce\x0c\xca\xb2\x8f\xdb\x41\x78\xd8\xf3\x87\x1d\xe7\xa4\x35\x4f\x83\x92\xd5\xce\xe8\x31\x2b\x16\xd2\xa3\xed\x91\x3b\x1a\xda\x99\xb7\xa5\xcf\xe4\x6b\x75\x02\x0d\xe3\xc6\xe6\x13\x8d\xa7\x70\xaf\xde\x2d\xa7\x70\x29\x10\xf3\x53\x8e\x86\x3a\x81\x31\x64\x07\xfa\x27\x60\xc8\x0e\x34\x0c\xa1\x61\x08\x0d\x43\xeb\xc9\x30\x84\x21\x3b\x30\x64\x07\x02\xf2\x08\xc8\x23\x20\x8f\x80\xfc\x9a\x00\xe4\x31\xd6\x06\xc6\xda\xb8\x5a\x20\x48\x8c\xb5\xb1\x12\xb1\x36\x2e\x5d\x4b\x4e\x0a\x88\x4f\x00\x43\x15\x91\x36\x06\x5c\x09\x9e\xf1\x40\xc1\xf0\xfb\x65\x03\x8e\x97\xb7\x9c\xa0\x0c\x82\xca\xca\xe7\x69\x10\xb0\xc3\x04\x5d\x0c\xf4\xdf\xbd\xc6\xfc\xda\x46\x72\x63\xa2\xbc\x73\x0b\x83\x9d\x77\xca\xf4\xa9\xc7\xd9\xbb\xd3\xfc\xdd\x61\x78\x77\x0a\xde\xed\xdd\x2b\x3c\x07\xd4\x57\xcf\x0c\x4a\xf8\xae\x50\xeb\xc5\x36\xf3\x60\xc1\xe7\xa3\xc2\x95\xa0\x4e\x44\xe7\x6c\xb2\xeb\x16\x06\xb3\xb5\xda\x39\xf1\x81\x0c\x79\x21\xa7\x46\x9f\x26\x53\x0a\x35\xba\x6e\xc4\xe8\xe6\xcb\xaf\x8d\x81\x2f\x27\x42\xf5\x32\xea\x7b\x98\x33\xca\xa7\xc9\xf3\x63\x46\x79\x9b\xca\x6e\x36\xf3\xa6\xd3\x18\x99\x1c\xd7\x8f\x44\xc8\x64\xca\x24\x17\x48\x65\xcd\x6f\x4c\x00\x97\x48\xed\xab\x4f\xed\x9b\x40\xef\x8f\x1a\xde\x1f\xe7\xc9\x63\xfa\x23\xe6\xc3\x32\x3a\xda\x90\x50\xf2\x78\x4f\xb8\x05\x98\x9b\x46\x90\xf7\x4a\xb4\xcf\x08\xca\xf9\x39\xd6\x02\xd8\xd7\xa8\x35\xcf\xb7\x86\x92\xef\x81\x52\x69\x6e\x95\x82\x78\x25\x03\xa7\xe9\xff\x92\x21\xb7\xd5\x0d\xc4\xa4\x7f\x39\xa3\xff\x41\x86\xdc\x50\x71\xbb\xb3\xb3\x08\x16\xfd\x59\xcf\x9f\x97\xbb\xad\x65\xc0\x1a\xec\x65\xf7\x2a\x3c\x1c\x86\x27\xc7\x8f\xb0\x7b\x6b\x30\xb6\xbf\x4d\x4e\x72\xc9\x79\x84\x8c\x81\xe4\x3c\x44\x0e\x92\x03\xad\xc5\xf6\x87\x6f\x6c\x18\xd4\xff\x9f\x33\xe4\x39\xbc\xd3\x67\xc0\xd8\x16\x05\xbb\xd2\xbf\x9a\xd1\xbf\x9c\x21\xd7\xc1\x55\xe9\x53\x52\x33\x81\xc2\x2d\x45\x1a\xe6\xd8\xa3\xb1\xdf\x1d\xe6\x50\x48\xe9\xee\xff\xe8\x24\xfb\x1b\x04\x1b\x53\xe3\x8c\x89\x64\x09\x03\x01\x18\x83\xf5\x4f\x75\xea\xbf\xdc\x41\xf4\x6a\x8f\x9f\xce\x2e\x9f\x5a\x05\x83\x3f\x16\x49\xdd\x18\x79\x50\x9c\x7e\xee\x66\x0f\x56\xb9\xfc\xc4\x0f\x70\xb3\x73\x7b\x35\x9c\x89\x25\xf2\x18\xef\xee\xb3\xe4\x41\xe8\xee\x53\xe4\x04\x39\x76\xb9\xbe\x59\xaa\x07\x58\x23\xab\xe3\xf9\xc6\xdb\xf6\x41\xfd\x40\xd3\x36\xf1\x6a\x73\xf8\xe3\xb7\xa5\x2b\x0e\x9b\xf4\x0d\x45\x1a\x92\xce\xcf\x6c\x4d\x1d\xb7\x5e\x9f\x96\x1c\x2b\x4f\x9b\x19\xba\x6e\xf1\xec\x2a\x8f\x5e\xce\x22\xe7\xc8\xa3\x15\xfa\x69\x7b\x87\x0f\x5d\x18\xd0\xcb\xb5\x45\x3d\xe7\x2f\xb5\xda\xa7\x90\x95\x16\x3a\x79\x7e\x1c\x79\x84\x3c\x1c\x1f\x47\xda\x5d\x49\x24\xc0\x6a\x8b\x97\x52\x39\x24\x2b\x2f\xdf\x72\xbf\x74\x6d\xaa\x00\xdb\x5d\xb2\xfc\xd0\x06\xab\x2d\x87\x0a\x9a\x91\x64\xdb\x4b\x22\x39\xce\x6a\xc9\xb1\x5b\x2a\x0b\xea\x87\x16\xec\x00\x1d\xe2\x56\xf5\xe6\x3c\xf5\x8b\x54\xbd\xbb\x5d\xbd\x0b\x6d\xa5\x45\x3b\xdf\x5f\xf5\x5c\xa2\x14\xf6\x7b\x49\xdc\x65\xad\xcc\x31\x3d\x63\xa2\x42\x86\x0e\x91\x7d\x2d\x68\x1c\x93\xc0\x70\x40\x81\xd9\x40\x60\x7e\xb4\x83\x7c\xa8\x43\xff\x40\x47\xe7\x93\x92\xf2\xf3\x44\xc7\xfa\x11\x98\x15\x46\x21\xd6\xcf\x40\x72\x00\xc3\x25\x9b\x9c\x31\xd5\xa9\xbb\xc6\xc4\xed\x01\x6b\x3f\xe7\xc4\x58\x0e\xbc\xe9\x7a\x6e\x3f\x7f\x1b\x9e\x00\x4e\x54\x60\x74\x4f\x04\x9e\x3b\xc9\x39\x3f\x27\xd8\xb2\x10\xbf\xa7\xa3\xc5\x12\x5f\xec\xa9\x2d\xc9\x73\x5f\xd2\xc8\x17\x35\xfd\xf3\x5a\xe7\xe7\x24\x9e\xf9\x94\x76\xd8\xf3\xf3\xc0\x23\x29\x7a\xd0\xed\x9e\x61\xce\xb2\x4b\xa6\x31\x9c\xf8\x0a\x40\xaa\xb9\xc3\x58\x39\x88\xd9\x0d\xfd\x56\x1e\x3e\x1c\x38\x23\x8e\x9d\x17\x4c\x70\xea\x14\x02\xc3\x5b\x14\x7d\xca\x39\x15\x25\xea\x95\x1c\x9a\x35\x78\x8d\x40\xd2\x8a\xc6\x12\x10\xe3\xd4\x0e\x90\xd5\x9b\x1b\xa1\x59\x09\xc2\xd1\x95\xd7\x7a\x6b\xa7\xcf\x2a\xf1\xf4\x59\x2b\xbe\x6d\x4c\xec\x25\xf7\xe8\xbb\xcd\x41\x89\x43\x3c\x57\x8d\xd2\x1e\x3f\xb8\xf2\x71\xd9\xd9\x49\xf7\x96\x3a\xd8\xb3\xfe\xfb\x19\xfd\x0b\x19\x72\x5d\xe2\x66\x5d\x68\xe1\x79\x45\x1a\x26\xb0\x63\x04\x16\x94\x93\xee\xdf\x74\x47\x61\xb5\x5d\xaf\xa0\xe6\x65\xe4\xa4\x5e\xbf\xec\x86\xf6\x3c\xcd\x3b\x56\x10\xc8\x73\xae\xfe\xa1\x6e\xfd\xff\x6c\x20\x9b\xd9\x1b\x4c\xb9\xf8\xc5\x66\xd2\x00\x4e\xf1\x92\x46\x58\x49\x6b\x22\xf5\xdf\x4d\x50\xdd\x49\xaf\x40\xcf\x0c\xaa\x8d\x43\x36\xf0\x72\x24\x94\x48\xf0\x77\xb6\xb1\x84\xda\xa3\xef\xe2\x72\x47\xed\xec\x84\x58\x12\x02\x4c\x99\x88\xc8\x09\x46\x4e\x30\x72\x82\x91\x13\x8c\x9c\x60\xe4\x04\x23\x27\x18\x39\xc1\xc8\x09\x46\x4e\x30\x72\x82\x91\x13\x8c\x9c\xe0\xd5\xe6\x04\xef\x27\x7b\xf5\x7b\xcc\xdd\x12\x20\x7a\x9e\x0a\x10\xa9\x27\x3a\x4c\xdd\x87\x74\x62\xa4\x13\x23\x9d\x58\xa1\x13\xff\x4f\x17\xc9\xa6\x61\x8c\x9c\xc2\x94\x06\x34\xea\xbf\xdf\xa5\xff\xe6\x06\x72\xad\x40\x18\x39\xab\xe9\x62\x73\x61\x04\xda\x0e\x34\x66\x7b\x0d\x05\x2a\x84\xb6\xa8\x75\x1c\xb7\x83\x10\x31\xc3\x16\x23\x08\x3c\xd2\x18\x37\xdc\xaf\xef\x4d\x01\x06\xeb\x43\x89\x3c\x18\x06\x82\x87\x08\x1e\x22\x78\x88\xe0\x21\x82\x87\x08\x1e\x22\x78\x88\xe0\x21\x82\x87\x08\x1e\x22\x78\x88\xe0\x21\x82\x87\x18\x50\x00\x11\x40\x44\x00\x11\x01\x5c\x2d\x04\xf0\x2d\x5d\xdc\x7d\x11\x42\x07\x78\x85\x98\x49\xf8\x83\xed\xfa\x67\x33\x64\x73\xde\xf3\x81\x49\x78\x13\xf8\xc7\x25\xcf\x96\x27\xbd\x02\xed\xbd\x9e\xdd\x18\xf1\x7c\x7a\x66\x90\xfd\xdd\x66\xc7\xb7\x69\xb2\x8f\xc3\x63\x83\x64\x00\xe0\xb1\x1e\xd2\x45\xee\xae\xe9\xad\xce\x5a\x9b\x5d\x18\xcc\xb2\x96\x34\x84\xc1\xea\x3b\x9c\x3d\x7e\xb8\x31\x00\xb6\x4d\xbf\x53\x00\x60\xa6\x29\x70\x2f\xa8\x59\xa5\xce\x75\xbe\x7c\x4b\xdc\x8b\x9d\x91\xb7\x5a\x4a\x47\x3e\x4b\xdc\x5b\xa9\xbe\xcc\x1d\x20\xfb\xc9\xde\x0a\x17\x8a\x66\x3b\x13\xfd\x25\xd0\xc1\xac\x45\x07\xb3\xb7\x68\x97\xb1\x86\x0f\x72\x0f\xb1\x7b\xc9\x9e\xd8\x43\xac\x8d\x22\xa0\x4d\x6b\xbc\x81\x73\x59\xef\xdf\xde\x10\x8b\x80\xeb\x44\x34\x7a\x8b\xaf\xfa\x1b\xf9\x9f\x2b\xb6\xe8\x57\x26\x47\x20\x4a\x83\x06\xd2\x00\xf3\x54\xad\x4a\x9e\x2a\x4c\x50\x82\x09\x4a\x30\x41\xc9\x4a\x25\x28\x99\xf8\x74\x1d\xdf\xf0\xb6\x66\x69\x20\x53\x3c\xca\xd6\x31\x32\xae\x44\xd9\x6a\x4f\xe6\x87\x76\xed\xf1\x46\xfa\x1e\xbf\x55\xdf\xcc\xbb\x9b\xe4\x7e\xfb\x9a\x78\x9b\xdf\x56\xe5\xd6\x9d\xa2\xf2\xdf\x00\x5e\x84\x2b\xb1\xf7\xa3\xbf\xf6\x7a\xd3\x38\xd0\x5f\x1b\xfd\xb5\xaf\xb0\xbf\xf6\x4a\x82\x35\x0d\x3d\xb1\x9b\x13\xf3\x35\x84\x7b\x2c\xfd\x27\x06\xc9\x80\xde\x6f\xee\x90\x74\xda\x1b\x55\x3a\x2d\x7b\x71\x15\x3c\xad\xbf\x9e\x21\xd7\x27\x13\xf9\xe8\xbf\x9b\xd1\xff\x7f\x99\x38\x71\x4f\xcd\x38\x62\x3f\x04\xe1\xda\x4a\xc1\x19\x8c\x20\x56\xc3\xaf\xfa\xff\x12\xb2\xab\x7e\x08\xd5\xd4\x30\xa9\xbf\x4a\xcc\x0f\xa6\x85\x49\xbd\x55\x86\x49\x4d\x8b\x90\xda\x9d\x1a\x21\x75\xe5\x43\xa2\x4e\x93\xe7\x93\x53\x15\xdb\x6f\x1d\x5e\x61\x75\x98\xcf\xb4\x68\xa8\x97\x32\xcb\x22\x2b\x36\x55\x28\x9f\x05\x83\x30\x0b\x24\xc2\xd3\x86\x62\xcf\x70\x85\xf2\x14\x39\xa1\x28\x94\x6d\x28\xb7\x91\x9c\x6a\x14\xfe\xb4\xd8\x58\x4c\x8d\xea\xb9\x14\xd9\x54\x27\x14\x6a\x5a\x3b\x31\xf2\x29\x46\x3e\x6d\x11\xb0\x6d\xfb\x76\xf6\xf6\x6b\xc8\xde\xba\x12\x97\xd3\xcd\x53\xc5\xee\x5f\x10\xf3\x0f\x36\x92\x1f\xaa\x14\xbb\x9c\x77\x5e\x5f\xf6\xf6\xa7\xca\x5e\x78\x73\xed\xc7\xa4\x86\x66\xa6\x4a\xe1\xf6\xc4\xa4\xae\x59\xfe\x0a\xc5\xa4\xae\x5d\xdf\xe5\xc7\xa4\xae\x59\x76\x24\x94\x9b\x8b\x39\x5d\x5b\x98\x56\xf1\xd7\xeb\xc9\xe4\x46\x5b\x00\x4a\x66\x94\xcc\x6b\x45\x32\xff\x5d\x0f\x19\x4a\x0b\x5e\x5c\x23\x89\x40\xde\xf7\xdc\xc7\xbd\x19\xc9\x11\x78\x7f\x8f\xfe\xbd\xaa\x10\xc7\x9d\x29\x4c\x81\x11\xdf\x73\x27\xbc\x99\xde\x3b\xd9\x3d\x35\xcc\x71\x9c\x2b\x40\x3c\xd2\x66\xfe\xc0\x23\x64\x98\x4b\xad\x21\xb2\x0f\xa4\xd6\x2e\xb2\x93\x64\x6b\x4a\x16\xf8\x12\x29\x51\x44\x93\x2e\x97\x48\xf0\xfc\xc6\xd2\x27\xab\xf7\x09\x91\x02\x2d\x10\x7a\x5d\x54\x7f\x95\xf8\xe9\xfc\xed\x2d\x95\xbd\x7e\x5b\x3a\xb3\x20\xea\xf8\xbb\xc4\xed\x55\xec\xfb\xdc\x28\xc9\x91\x07\x2a\xb6\xc0\x65\x77\x3e\x02\x7f\x48\x3c\x68\x51\x5a\xbe\x4f\x23\x87\xb9\x6e\x71\x88\x1c\x8c\x75\x8b\x76\x4a\x80\x36\x48\x97\x26\xe3\xd3\xb6\x5f\x88\xf4\xbe\xee\xc6\x4a\x21\x72\xa3\xe4\x26\x44\x72\x63\x1b\xbf\xb2\x9a\x62\x03\x19\x0b\xc8\x58\x40\xc6\x02\x32\x16\x90\xb1\x80\x8c\x85\xf5\xc9\x58\x68\xff\x5e\xdf\x04\x79\xe1\x5f\xae\xa9\xd4\x06\xb6\x37\xa0\x30\x44\x3a\x82\x09\x16\xb8\x55\x52\x11\x90\xd8\xb0\xde\x14\x13\x24\x36\x20\xb1\xe1\x0a\x13\x1b\x56\x05\x45\x6a\xc8\x70\x68\xff\xb6\x30\xb1\x87\xec\xd2\x77\x9a\x59\x49\x76\x78\xb6\x4a\x76\x10\xaf\x55\xf3\x1d\xae\xc2\xd4\x78\xbf\xda\x4d\xee\xe3\x40\xa7\x3f\x63\xe5\xb3\xb5\x2c\xff\x3c\xe2\x51\xde\x29\x07\x21\x13\x80\x0e\x9d\x61\xb2\xd7\x2d\x06\xfa\xf7\xbb\xf4\x7f\xda\x40\x9e\xc3\xde\x1e\xae\xa4\x00\xfc\x70\x73\xe1\x8f\x46\x78\xb1\x53\x9e\x43\x73\xbc\xd8\x36\x05\x41\xca\xc2\x0b\x53\x95\x4d\x3b\x33\x58\x5d\x23\x86\x44\xba\x8c\x90\x48\x17\x1a\x2f\xc0\xa3\xfa\x61\xb1\x00\x6b\xce\xb3\x68\x51\x56\x8d\x4d\x32\x73\x29\x46\x48\xc2\x08\x49\x18\x21\x09\x23\x24\x61\x84\x24\x8c\x90\x84\x11\x92\x30\x42\x12\x46\x48\xc2\x08\x49\x18\x21\x09\x23\x24\x61\x84\x24\x8c\x90\x84\x11\x92\x30\x42\x12\x46\x48\x5a\xa5\x08\x49\xbf\xdf\x4d\x86\x1b\x43\x87\x6a\xc4\xf4\x34\xfc\xf0\xd5\xdd\xfa\xd3\x1b\xc8\xcd\x69\xf8\x21\xb7\xb6\x5d\x69\x10\x71\x77\x2d\x10\x11\x9a\x87\x48\x62\x5b\x91\xc4\x52\x63\x24\xf1\x84\x7e\xac\x2d\x48\x22\x06\x5c\x47\x38\x11\xe1\x44\x84\x13\x11\x4e\x44\x38\x11\xe1\x44\x84\x13\x11\x4e\x44\x38\x11\xe1\x44\x84\x13\x11\x4e\x44\x38\x11\xe1\x44\x84\x13\x11\x4e\x5c\x5d\x38\xf1\x4d\x3d\x64\x40\x06\x5c\x4f\x77\xb3\xa6\x6e\xa1\xe4\xd9\x6e\x28\xfd\xac\xff\xa2\x5b\xff\x1b\x25\x16\xfb\xad\x29\x1e\xd6\x63\xd1\x3b\xbd\xb7\x29\x01\xd9\x25\x0f\x5f\xde\x6e\xb3\x7f\xf5\x43\x3c\x42\xf3\x4e\x88\xd0\x9c\x39\x75\x4c\xcf\x92\x3e\xd2\xdb\x30\xe4\x97\x6c\xce\xe5\xfa\x56\x1f\x6b\x0c\xa4\x75\xeb\xdb\x2b\x83\x3b\xc6\xd5\x27\x22\xb5\x3f\xa5\x44\x6a\xbf\x23\xdd\x9f\x3a\xee\x66\x23\x19\xae\x7d\xa5\x7b\x3a\xf7\x00\xb9\x9f\xdc\x57\xe1\x77\xb0\xac\xae\x46\x4f\x03\xf4\xa4\x6e\xd1\x93\xfa\x5d\xda\xe5\x2e\xf3\x1c\x77\xc4\x3e\x40\xf6\xc7\x8e\xd8\xed\x16\x15\xed\x94\x05\x8d\x22\xba\xff\xa7\x12\xd1\x3d\xf2\x9a\x8e\x65\xc3\x1d\x89\xa0\xee\x2b\x2e\x1a\xd0\x63\x1a\x3d\xa6\xd1\x63\x1a\x3d\xa6\xd1\x63\x1a\x3d\xa6\x5b\xf1\x98\x5e\xf3\x8e\xd1\xab\xbb\xf5\x37\xf6\x95\xfe\x6b\x25\xd0\x7b\x77\x03\x2f\xe9\x58\x2b\xb8\x5d\x8d\xf6\xbe\x92\x4a\x01\xfa\x48\xaf\x37\x55\x04\x7d\xa4\xd1\x47\xfa\x0a\xfb\x48\xaf\x38\x12\xd4\x56\x19\xdf\xc8\xd9\x7a\xe2\x5e\xb2\x47\xdf\x65\xee\x94\x3e\xce\x3f\xa4\x7a\x46\xcb\x42\x9f\x11\xbe\xd1\xbf\xdf\x45\x6e\x4e\x06\x81\x14\x3c\xc6\xc7\xbd\x99\x40\xff\x48\x97\xfe\x9e\x0d\x64\x4b\x14\x1a\xa4\xd3\x6b\x8e\xa7\x38\xe1\xcd\xb4\x89\x98\xb8\x6d\x51\x09\x2f\x32\xe1\xcd\x1c\xb7\x83\xf0\xb0\xe7\x0f\x3b\x8e\xdc\x44\xdb\xb7\x77\xae\x37\x22\xe2\xd1\xc6\x8b\xea\x6e\x7d\x5b\x4a\x4c\x81\x8a\x78\x02\x48\x30\x44\x82\x21\x12\x0c\x91\x60\x88\x04\x43\x24\x18\x22\xc1\x10\x09\x86\x48\x30\x44\x82\x21\x12\x0c\x91\x60\x88\x04\x43\x24\x18\x22\xc1\x10\x09\x86\x48\x30\x5c\x35\x82\xe1\xbf\xf7\x90\x07\x38\x9c\xc7\xa1\x95\x4a\x1f\xe5\x5a\xa4\x43\x78\x38\x62\x1c\x7e\xac\x47\xff\xf7\x0c\xb9\x9e\x5f\x94\x3e\xca\xcf\x4b\x23\x1e\xb2\x47\x7a\x0d\x76\x07\x7e\x06\x55\x31\x80\xe1\x72\x9b\x79\x87\x2f\x24\x0f\x70\x40\x6d\x3f\xd9\x0b\x80\xda\x20\x19\x20\xfd\x35\xd1\x66\xd1\x13\x51\x48\xce\xe6\x00\xb4\x06\xd4\xc3\xb3\x8d\xa1\xb3\x3d\xfa\x2e\x01\x9d\x25\x86\x22\x82\xa6\xa1\x15\xd5\xc9\x5d\x3e\xbb\xa5\xaa\xe3\x6f\xa9\xc1\x46\x84\xbe\xdf\x26\x6e\xae\x5e\xf7\xe7\x46\xc8\x30\x39\x54\x61\xe0\x5b\x6e\xff\xa3\x55\x0f\xf9\x88\x2d\xf2\x11\xdf\xa3\xb5\x61\xf9\x8f\x71\x4a\xe2\xfd\xe4\xbe\x98\x92\xb8\x02\x52\xa4\x39\x31\x51\x57\x22\xa4\xca\x90\x46\x0c\xc5\x37\xdf\x58\x25\x45\x6e\x88\xd2\xbb\xb8\x42\x72\x98\xfc\xc2\x2a\x0a\x0e\xa4\x2a\x22\x55\x11\xa9\x8a\x48\x55\x44\xaa\x22\x52\x15\xd7\x27\x55\xb1\x21\xbb\x70\xc5\x0e\x16\xb9\x7f\xbb\xa6\x4a\x25\xb8\xab\x11\x7b\x11\xf4\x84\x3b\x4b\x92\x76\xb0\xe2\x6a\x02\x92\x17\xd7\x9b\x72\x82\xe4\x45\x24\x2f\x5e\x61\xf2\xe2\x6a\xc0\x49\x0d\xf3\xbb\xac\x94\xdc\x9f\xd8\x45\x76\xea\x59\xb3\x4f\x72\x12\x9f\x95\xe0\x32\xb2\x97\x9e\x11\x3c\xc6\x57\xee\x20\x39\x0e\x7c\x72\xe5\xa3\x8e\x87\x75\xc9\x2b\x14\xec\xc0\x2f\xc3\x8a\x99\x29\x17\x8a\x34\x86\x3e\x3f\xdd\xab\xbf\xb5\x83\x6c\xe5\x65\x9c\x5b\x18\xec\xdc\x9e\x82\x7a\x4e\x7a\x85\x51\x59\x40\x0e\x0a\xe8\xed\x61\xcf\x71\xad\x45\x25\xf8\xa7\x3c\xda\x66\x30\xb4\x40\xc6\xf9\xec\xcd\x91\x07\x60\xf6\x0e\x91\x7d\xe4\xde\x9a\xb3\x57\x68\x93\x6c\x83\xaa\x6e\x5a\xc3\x69\xfc\x82\xc6\xb3\xf4\x5e\x7d\x8f\x98\xa5\xbc\x2a\x31\x3d\xd3\x6a\x6b\xd2\xe7\x92\x3d\xda\xf9\x2f\x5b\xd4\x51\xe9\x49\x87\x44\xd3\x06\xa6\x4f\x3c\x7a\x05\xc6\x26\x77\x9c\x4c\x90\xa3\x15\xda\x44\xcb\x83\x83\xba\x04\x42\xa6\x2d\x42\xa6\xbf\xa4\xb5\x53\x48\x9c\xe4\xd8\xe9\x11\x32\x16\x63\xa7\x2b\x29\x74\x1a\x24\xc7\x5e\x39\x99\xd4\xfb\xde\x1b\x55\xa1\x73\xab\x4c\x90\x9d\x26\x67\x76\xf0\xbb\x57\x42\xcc\x20\xae\x8a\xb8\x2a\xe2\xaa\x88\xab\x22\xae\x8a\xb8\x2a\xe2\xaa\xe9\xb8\xea\xca\x69\x09\xb9\xb7\x5c\xab\x6a\x09\xbb\x1a\x80\xaa\x69\xba\x43\x2f\x80\x00\xab\xac\x3a\x20\xd6\xba\xde\x14\x16\xc4\x5a\x11\x6b\xbd\xc2\x58\xeb\x33\x05\xad\x6a\xe8\x42\xfe\x00\xb9\x5f\xbf\xcf\x1c\x92\x10\xea\x1d\x2a\xec\x9a\x52\xfc\x33\x02\x84\xfd\xda\x79\x32\xc6\x41\x58\xab\x30\x6f\x83\xf7\xaa\x4f\x8b\x36\xec\x10\xc9\x5c\xdb\x20\x4d\x2d\x36\xf5\x16\xe9\xcc\x9c\xe7\x5d\x48\xf8\x1c\x06\xfa\x1b\xce\xeb\x6f\xe9\x22\xcf\x4b\x2d\x86\xed\xb2\xf7\x71\x17\x26\x5f\x38\x5f\x44\x5a\xba\x37\x6b\xb0\x01\xe4\xa2\x13\x8a\x3f\xcb\x8b\x1f\x51\x8b\xef\xdd\xcf\xde\x1e\x4e\x2b\xfb\xcc\x60\xfd\x57\xd7\xb8\xaf\x79\x0e\x1d\xb1\xd1\x11\x3b\x87\x8e\xd8\xe8\x88\x8d\x8e\xd8\xe8\x88\xbd\x6e\x1c\xb1\x73\x6b\xc6\x11\xbb\xed\x2d\x69\xd9\x11\x3b\x87\x8e\xd8\xe8\x88\x8d\x8e\xd8\xe8\x88\x8d\x8e\xd8\xab\xef\x88\x9d\x5b\xd7\x7e\xd3\x39\xf4\x9b\x5e\x39\xbf\xe9\xdc\x5a\xf7\x9b\xce\x5d\x85\x7e\xd3\x13\xaf\xd4\x48\x91\x03\x93\xe7\xc9\x63\x00\x4c\x3e\x44\xce\x90\xd3\x35\x81\xc9\x74\x7c\x6b\x61\x30\x5b\x1f\x3e\x3a\x6e\x07\x8d\x61\xcb\xdb\xd3\xb1\xc5\xcd\xfa\x46\x1e\xb6\xef\xc5\x8d\x61\xcd\xb3\xfa\x83\x2a\x5c\x29\x20\xce\x7a\xa0\x9c\x00\x3e\xeb\x37\x9f\x98\xdf\x26\x75\x40\xb9\x2e\xbe\xad\x19\x56\x23\xfc\xed\x00\x7f\xf0\xca\x22\x70\x1c\x30\x9b\x25\x05\x32\x53\x61\x84\x9a\x22\x93\xed\x1e\x78\x34\x4e\x21\x79\xaf\x45\xf2\xde\x57\x33\x24\xcf\x45\xd3\x23\xe4\x61\x10\x4d\xc0\xf7\x6a\xf7\x0c\x25\x73\x9c\xd6\x67\x91\x73\x31\xad\x6f\x65\x6a\xb2\x39\x43\x60\x86\x9c\x57\x18\x02\x2b\x53\x55\x23\x59\xdb\x9c\x2c\x6d\x9b\xfc\x4c\xd8\x90\x6a\xcb\xf9\x92\x17\x84\xa4\xf7\x57\xfb\xeb\xc8\xda\xdd\x82\x4f\xa2\x90\x73\xbc\xd9\x46\x72\x77\x8c\xbf\x54\x43\xee\x8e\xc8\xa2\x56\x55\x02\xaf\x0c\x7b\x11\x31\x70\xc4\xc0\x11\x03\x47\x0c\x7c\xfd\x60\xe0\xa8\x61\x36\xd0\x30\xd7\x8e\x91\x00\x99\xe3\xab\xc2\x1c\x47\x5b\x0c\xda\x62\xd0\x16\x83\xb6\x18\xb4\xc5\xac\x6b\x5b\x0c\x3a\x10\xa1\x03\x11\x3a\x10\xad\x94\x03\x11\x9a\x3a\xd1\xd4\xb9\x5e\x4d\x9d\x13\xc5\x36\xfb\xce\x35\x82\xab\x7b\xd3\x21\xe3\x67\xeb\xcf\xe2\xab\x39\x96\xb4\xcd\x9a\x09\x97\x0d\x5b\x37\x83\x85\xb7\xdf\xb3\xe0\x3f\xb6\x47\x69\xea\xac\x52\x29\x18\x58\x18\x1c\xf0\x29\xe0\xcb\x4c\xb0\xe8\x5f\xdf\xae\x7f\xa9\x83\x6c\x66\xb7\x20\x2b\x6b\x7d\xe7\x80\x29\xfe\xe6\x34\x0d\x7b\xef\x02\x47\x80\x52\x29\x38\x33\x18\x5f\xbd\xda\xf2\xcb\x3d\xca\x13\xec\xef\x84\x04\xfb\x6c\x12\xee\x26\x83\x64\xa0\xb6\x29\xa5\x54\x0a\xd8\x94\x8b\x3f\xb8\x2d\x56\xe9\x93\x8d\xa7\xdb\x0e\xbd\x27\x9a\x3d\xa5\x52\x20\x2c\x26\x71\x33\x30\xa5\x1c\x7a\x32\x60\x4a\x39\xb4\xe2\xa0\x15\x07\xad\x38\xeb\xd5\x8a\x83\x29\xe5\x30\xa5\x1c\xa2\xe7\x88\x9e\x23\x7a\x8e\xe8\xf9\x9a\x40\xcf\x31\xa5\x1c\xa6\x94\xbb\x5a\xf0\x42\x4c\x29\xb7\x12\x29\xe5\x9e\xe8\x26\x03\x1c\x7a\x73\x69\xb8\xe8\xf9\x17\x98\xb6\x16\x47\xf2\x80\x17\x06\x6c\xb7\xe8\xd3\x20\xc8\x3b\x56\x10\xd0\x40\xff\xc3\x2e\xfd\xb3\x1b\xc8\x75\xf1\x0b\xe7\x16\x06\x3b\x2f\x8a\x5d\xcd\x2d\xd8\x0b\x76\xa1\x6c\x39\xea\xf7\x5a\x52\x83\x1c\xe7\x45\x8d\xb0\xa2\xb2\x8a\x99\x6d\x48\x6a\x17\x5d\x50\x50\x57\x7c\x74\x4e\x9c\x85\xbc\x12\x95\x24\x9a\x20\xa4\x56\x21\xdb\x7b\x27\xbc\x70\x52\xb6\xe6\xcc\xa0\x5a\xc9\x71\x3b\x68\x5f\x58\xad\x15\xc2\xf8\x2e\x90\x49\x8e\xf1\x8d\x93\x23\x80\xf1\x0d\x93\x43\xe4\x60\x0b\x40\xf3\x59\x99\xcc\xa1\x21\xe2\xb7\x2d\x1d\xf1\xbb\x56\x27\xd0\x30\x0e\xfb\x3d\xda\x18\xf6\x1b\xd2\xf7\x09\xd8\xaf\x6a\x02\x09\x0c\x50\x1d\x0d\x44\x01\x11\x05\x44\x14\x10\x51\x40\x44\x01\x11\x05\x44\x14\x10\x51\x40\x44\x01\x11\x05\x44\x14\x10\x51\x40\x44\x01\x11\x05\x44\x14\x10\x51\x40\x44\x01\xaf\x08\x0a\xf8\x83\x0c\xb9\x8d\xa3\x80\x79\xcf\xf3\x0b\xb6\x5b\x19\xd1\x57\x87\x09\x4d\x6e\x50\x6f\x9f\x5b\x18\xec\xbc\xa9\x48\xc3\xe4\x4e\xcb\xbf\xa1\xf7\xb6\x22\x0d\x47\x94\x87\xcf\x0c\x0e\x4f\x8e\x47\x47\xbb\xf6\x91\xee\x86\xaa\x0a\x1a\xaa\x2a\x68\xa8\x99\x82\x26\x4a\xe4\x41\x8e\xc1\x9d\x24\xc7\x01\x83\x3b\x4c\x46\x49\xae\x05\x0c\x4e\xf9\xce\x66\xa8\x77\xfa\xa7\x1e\x23\x87\xea\xf5\x3d\x64\xfa\xab\x95\xed\x0e\x7c\x1b\x02\xfd\xdf\x1f\xd5\xdf\xbf\x9d\x3c\xa7\x62\x74\x78\x1e\xd8\x6d\xf5\xf9\x92\xc7\x59\x09\xbd\xdb\xd9\x43\xc9\xf1\xaa\xc8\x05\x0b\xcf\xad\x71\x20\x15\x03\x24\x23\xa0\x88\x01\x92\x11\x50\x44\x40\x11\x01\xc5\x75\x04\x28\xae\xa1\xd8\x07\x6b\x06\x50\x44\xa7\x7c\x04\x14\x11\x50\x44\x40\x11\x01\x45\x0c\x90\x8c\x5e\xc3\xcf\x1c\xfc\x6f\xcd\x7b\x0d\x5f\x95\x01\x92\x29\x99\xe0\xe0\xd7\x08\x19\x06\xf0\xeb\x00\xd9\x4f\xf6\xd6\x74\x32\x4d\x00\x55\x02\x67\xca\x02\x40\xd4\x16\x67\xd3\x65\xb1\xce\x52\x40\x33\xc1\x3b\x83\x06\xa9\x84\x33\x68\x27\x31\x9f\xda\x5a\x03\x29\xbb\x5e\x46\x38\xe6\xa0\x58\x37\xff\x7b\x15\x61\x31\x8e\x62\x8d\x93\x23\x64\xac\x22\x6a\xf1\x3d\x64\x77\x0b\xc3\x81\x61\xe3\x30\x30\x71\x8b\x81\x89\xff\x4b\x23\xc7\x78\xcc\xe0\x51\x92\x8b\x63\x06\xef\x25\xad\xcd\x44\x1e\x4a\x61\x17\x84\x52\x88\xc3\x02\xb7\x5c\x5a\x2d\x11\x43\x0e\x73\x39\x76\x88\x1c\x04\x39\xd6\x6a\x05\x8d\x45\x15\x44\xf1\x5d\x61\x51\xd5\xfb\x4f\x7d\x35\x44\x55\x67\x6a\x80\x60\x2e\xb6\x76\xf1\x7b\x29\x62\x2b\x8e\x01\xbc\xb2\x02\x0c\x83\xfe\x22\xae\x8b\xb8\x2e\xe2\xba\x88\xeb\x62\xd0\x5f\x0c\xfa\x8b\x41\x7f\x31\xe8\x2f\xda\x17\xd0\xbe\x80\xf6\x05\xb4\x2f\xa0\x7d\xa1\x2d\xf6\x05\x0c\xfa\x8b\x41\x7f\x31\xe8\x2f\x06\xfd\x45\xf3\x1d\x9a\xef\xae\xf2\xa0\xbf\x2b\x8b\x20\x2f\x27\xa6\xf0\xc4\x79\xf2\x98\xfe\x88\xf9\xf0\x16\x4d\xdf\x50\xb2\xc2\xb9\xce\x21\xb1\x9f\x73\xe3\x87\x5b\x30\x58\xeb\x8d\x20\xef\x95\x68\x9f\x11\x94\xf3\x73\x4c\xa7\x81\x41\xa3\xd6\x3c\x17\xaf\x25\xdf\x83\xbd\xd1\xdc\x2a\x49\xea\xbd\x62\xf4\xdf\xb1\x02\x8e\x46\xfa\xab\x65\xf8\x92\x20\xf4\x7c\xab\x48\xab\x62\x97\x2c\x78\x4e\x79\x9e\x5a\x61\x68\xe5\xe7\x98\x06\x1e\xe8\x5f\xe9\xd2\x7f\x67\x03\x21\xe2\x85\x73\x0b\x83\x9d\x2f\x6d\x2e\x76\xc9\x19\x28\x6a\x58\x16\xd5\xa6\xf8\x25\x26\xbc\x30\xcd\x9b\x73\x66\xb0\xb2\x96\xab\x23\x80\x49\x4d\x83\x4c\xdb\x23\x9b\x2c\x2b\x80\xc9\x63\x8d\x57\xd7\x01\x7d\xbf\x58\x5d\xc9\x29\x14\xe5\x7c\xac\x18\x0e\x8c\x60\x82\x0e\x07\x18\xc1\x04\x0d\x53\x68\x98\x42\xc3\xd4\x7a\x35\x4c\x61\x04\x13\x8c\x60\x82\x06\x01\x34\x08\xa0\x41\x00\x0d\x02\x6b\xc2\x20\x80\x11\x4c\x30\x82\xc9\xd5\x02\x81\x62\x04\x93\x95\x88\x60\xf2\xdd\x9b\xc9\xfd\xc9\x14\x62\x35\x22\x66\x04\xa1\x15\xd2\xd9\xb2\xc3\xd6\x2e\xbf\xf1\xb2\x81\x20\x6f\x39\x54\xff\xf0\xcd\xfa\x7f\x66\xe2\x3c\x63\x5d\x3e\xb5\x0a\x06\xdc\x8a\x3a\x39\xde\x71\xa7\x45\x29\x90\x66\x8c\x3d\xc8\xd3\x8c\xc5\xb4\x5a\xe5\x81\x69\x56\x44\x7b\x19\xb6\x13\x2f\x24\x0f\x70\x5c\x6d\x3f\xd9\x0b\xb8\xda\x20\x19\x20\xfd\xb5\xb3\x82\x95\x43\x8f\x7d\x09\x53\x61\x17\x06\xb3\xd0\xa2\x86\xc8\xf4\x64\x63\xec\xac\x5f\xdf\x11\xe5\xfc\x8a\x6b\x10\xc0\x19\xaf\x44\x45\xcb\x1e\xbf\x2d\x1d\xb2\xdb\xa4\x6f\x28\xd2\x90\x74\x7e\x7e\x4b\xdc\xfb\xbd\x3e\x2d\x39\x56\x9e\x36\x33\x00\x5d\xe2\xd9\xd5\x1d\x83\xdc\x08\x19\x26\x87\x2a\xdc\x34\x96\x3b\x08\x48\xf1\x43\x07\x8d\x16\x1d\x34\xde\xa3\xb5\x41\x06\x8c\x71\x17\x8f\xfb\xc9\x7d\xb1\x8b\xc7\x55\x2e\x4a\x4a\xe5\x90\xe4\x9e\xbe\x26\x16\x25\xbb\x4b\x96\xcf\x4e\xb1\xec\xe4\x07\x7b\x4e\x33\x32\xe5\xee\x12\xdb\x62\x56\x4f\xa2\xdc\x52\x59\x50\x3f\x34\x60\x07\x84\xb4\xba\x55\xbd\x39\x4f\xfd\x22\x55\xef\x6e\x57\xef\x42\xb2\x4e\x5a\xb4\xf3\xfd\x55\xcf\x25\x4a\x61\xbf\x97\xc4\x5d\xd6\xca\xdc\x49\x72\x9c\x4c\x54\x48\xb3\x21\xb2\xaf\x05\x53\xcd\x24\x20\xe5\x28\xd8\x1a\x08\xb6\x8f\x76\x90\x0f\x75\xe8\x1f\xe8\xe8\x7c\x52\x9a\x8e\x9e\xe8\x58\x3f\x82\xad\x02\x5c\x60\xfd\x0c\x60\x39\x00\x60\x6c\x72\xc6\x26\xb3\xee\x1a\x13\xb7\x07\x50\x63\x6e\x5b\xb1\x1c\x78\xd3\xf5\xdc\x7e\xfe\x36\x3c\x01\xb6\xb5\xc0\xe8\x9e\x08\x3c\x77\x92\xdb\x8e\x4e\xb0\x65\x21\x7e\x4f\x47\x8b\x25\xbe\xd8\x53\x5b\xe2\xe6\xbe\xa4\x91\x2f\x6a\xfa\xe7\xb5\xce\xcf\x49\xbd\xf8\x29\xed\xb0\xc7\x8e\x79\x76\x60\x14\x3d\xe8\x76\xcf\x30\x67\xd9\x25\xd3\x18\x4e\x7c\x05\x9c\x78\x38\xf1\xb9\x1c\xc4\x28\x79\xbf\x95\x87\x0f\x07\xdb\x83\x63\xe7\x05\xa3\x89\x3a\x85\xc0\xf0\x16\x45\x9f\x72\x6c\xbe\x44\xbd\x92\x43\xb3\x06\xaf\x11\x8c\x7d\xd1\x58\xc2\xc9\x23\xb5\x03\x64\xf5\xe6\x46\x68\x56\xc2\x70\xb5\x1a\x5a\xe4\x1d\xe9\x82\x7a\x8b\xbe\x09\x1a\x48\x9a\xdc\x1b\xd2\x92\x14\xd7\xd8\x27\x26\x76\x91\x9d\x7a\xd6\xec\x93\x3c\x86\x67\xc1\x1a\x12\x0b\x1a\x9e\x31\x37\xb0\x4b\x2a\x33\xe1\x2a\xe4\x3e\xbc\xb9\x8b\x98\xfc\xc8\xc3\xad\xc9\x95\x21\x03\xf9\x55\xfd\xdb\xdb\xf5\x6f\x76\x90\xeb\xf9\x5f\xcd\x46\x05\x04\x4b\x3b\x8f\x0a\x08\x3f\x03\xe1\x41\x08\x7f\x5c\x6d\x29\x94\xcf\x91\x51\x3e\xcd\x0f\x92\x03\x30\xcd\xef\x21\xbb\xc9\x60\xcd\x69\x2e\x7a\x33\xf2\x07\x85\x4f\x6e\x8b\x5f\xfb\xd9\xc6\x33\x7d\x8f\xbe\x4b\xcc\xee\xc4\x98\x8a\xf9\xcd\x33\xbb\x54\x91\x7c\x90\x85\x80\x2c\x04\x64\x21\x20\x0b\x01\x59\x08\xc8\x42\x40\x16\x02\xb2\x10\x90\x85\x80\x2c\x04\x64\x21\x20\x0b\x01\x59\x08\xc8\x42\x40\x16\x02\xb2\x10\x90\x85\xb0\x5a\x2c\x84\x57\x76\x10\xa3\x76\x36\x65\x0e\xcb\xe9\xdf\xcc\xe8\x5f\xcf\x10\x3d\x91\x3e\x99\x83\x72\x35\xb3\xa9\x18\x45\x1a\xaa\x09\x8e\xe1\x71\x4c\xa8\x92\x9e\x50\xe5\x1f\xbb\x48\x77\x7d\x9f\xb0\x7c\x60\x17\x7c\x9b\xad\x25\xfd\xd7\xbb\xf4\x8f\x25\x9d\xc1\xca\xcd\x39\x83\x8d\x4c\x8f\x8f\x42\x19\x6d\xf2\x02\xbb\x25\xe9\x05\x26\x8b\xbf\x3a\xdc\xbf\xd6\x68\xfe\xea\x65\x21\xae\xa9\xee\x5f\x72\x1c\xd0\xef\x0b\x11\x57\x44\x5c\x11\x71\x45\xc4\x15\x11\x57\x44\x5c\x11\x71\x45\xc4\x15\x11\x57\x44\x5c\x11\x71\x45\xc4\x15\x11\x57\x44\x5c\x11\x71\x45\xc4\xf5\xca\x20\xae\x3d\x64\x5f\xe4\xf7\x65\xfb\xb4\x68\x03\x01\x3e\x99\xbc\x9a\x83\x7e\xe2\xc0\x6f\x4b\x87\xb0\x97\xe9\x9f\xeb\xd6\x5f\xbe\x91\xe8\x15\x6f\x9e\x5b\x18\xec\xfc\x98\x26\x06\x41\x81\xfe\xdc\x48\xac\x45\x24\xc9\xe1\xc9\xf1\x69\x5e\x62\x5b\x30\xc0\xbe\x58\x8b\x01\x4d\x4d\xec\x79\x4c\x11\x8a\xd5\xb3\xae\x84\x46\xac\x94\x9d\xed\xe5\xfb\xf2\x70\xf2\x63\x20\xef\xb6\x68\x25\xc2\x88\xcb\xa1\x27\x2f\x72\x7a\xf2\xf9\xc6\x10\xe2\x41\xfd\x40\x44\x49\x4e\x9d\x83\x09\xe8\x90\xc3\x8a\xf1\xa0\x20\x96\x88\x58\x22\x62\x89\x88\x25\x22\x96\x88\x58\x22\x62\x89\x88\x25\x22\x96\x88\x58\x22\x62\x89\x88\x25\x22\x96\xb8\xda\x58\xe2\x5e\x72\x8f\xbe\xdb\x1c\x94\x2e\xa0\xcf\x55\xdd\x46\xe3\x03\x5b\xb5\xef\x28\x82\x90\x08\x42\x22\x08\xb9\x8e\x41\xc8\xff\xce\x90\x4e\x01\x42\xc6\x3e\xe9\x03\x0b\xbb\x04\xe1\xf3\xaf\x32\xfa\x37\x32\xe4\xd9\xca\xbd\x73\xe2\x5e\x6d\xc6\xe7\x9d\x45\x1a\x0e\xc7\x2f\x9c\xd9\x85\x94\xcf\x3a\x94\xcf\xd7\x74\x44\xae\xf0\x79\xea\x8b\x61\xa4\x55\x0e\xf1\x3a\x64\xe8\x23\xcf\x51\x9f\x69\xcc\xbd\x35\x8b\x34\x1c\x51\xde\x40\xf6\x6d\xdd\xa1\xf8\xaf\x1e\x32\xcd\x87\x62\xd6\xf1\x16\x99\x46\xe3\x7b\x4e\x56\x22\x64\x95\x63\xc2\xb1\xf9\x92\x6f\x7b\xbe\x1d\x2e\x39\x74\x81\x3a\x09\x04\x45\x62\xf5\xef\xeb\xd1\xff\x70\x23\xb9\x55\x29\x74\x38\x2a\x53\x8e\xe1\x6f\x35\x83\xda\x4f\x8a\xba\x8e\xb3\xba\x46\xd4\xba\xd6\x04\x8a\x3f\x04\xd5\x1d\x4e\xf9\x4c\x31\xf1\x6a\xb7\x7f\xed\xe3\xfb\xab\x9f\x25\xe2\x25\x8d\xe1\xfb\x87\xf4\x33\x02\xbe\xaf\x37\x63\x05\x70\x5f\xbb\xf7\x53\x92\xaf\x34\x32\x2e\x20\xf4\x8f\xd0\x3f\x42\xff\x08\xfd\x23\xf4\x8f\xd0\x3f\x42\xff\x08\xfd\x23\xf4\x8f\xd0\x3f\x42\xff\x08\xfd\x23\xf4\xbf\xca\xd0\xff\x11\x32\xa6\x8f\x98\xc3\x12\xfa\xdf\xae\x42\xff\xb5\x8f\x7c\x68\x0a\x40\x53\x00\x9a\x02\xd0\x14\xa0\x98\x02\x3e\xdf\x43\x8e\x72\xf8\xd3\x9f\xb1\xf2\xd9\x08\xe2\xaa\xa0\x24\xab\xd8\x67\xde\x29\x07\x21\x3b\x06\x39\x74\xc6\x76\x0b\xb6\x5b\x94\x98\xe7\x7f\x76\xeb\x1f\xd9\x48\x6e\x66\x25\x0d\xab\x05\x49\xc0\xf3\x93\xcd\x00\x9e\x23\xbc\x82\x29\xcf\xa1\x39\x5e\xc1\x9a\x00\x3a\xf9\xe7\x4f\x55\x7e\x9b\x40\x39\xab\x1b\xbd\xf6\xd1\xcd\x35\xc8\x5e\x2e\x35\x86\x3f\x4f\xe8\xc7\x04\xfc\x59\x73\xc6\x46\xb1\x10\xaa\xc6\x04\x63\xd1\x22\xa4\x89\x90\x26\x42\x9a\x08\x69\x22\xa4\x89\x90\x26\x42\x9a\x08\x69\x22\xa4\x89\x90\x26\x42\x9a\x08\x69\x5e\x71\x48\xf3\x10\x39\xa8\x1f\x30\xf7\x4b\x48\xf3\x76\x15\xd2\xac\x3e\xc9\x21\x94\x89\x50\x26\x42\x99\x08\x65\x26\xa1\x4c\x08\xad\x10\x87\x50\xa8\x91\x52\xb7\xe4\x15\x42\x3a\x5f\x82\x33\x4a\x04\x5d\xbe\xae\x47\xff\xd6\x06\xb2\x39\xef\xf9\x10\x54\xf5\xe3\x4d\x31\x33\xbd\xc2\x69\x51\xce\x9a\x40\x28\xf9\x76\x3e\xe2\xf9\x54\xcd\x01\xa9\xb4\x12\x21\xc9\x16\x20\xc9\x13\x8d\x21\xc9\x5e\xbd\x3b\x25\xdf\x9b\x69\x46\x1c\xcc\x78\x04\x10\x6f\x44\xbc\x11\xf1\x46\xc4\x1b\x11\x6f\x44\xbc\x11\xf1\x46\xc4\x1b\x11\x6f\x44\xbc\x11\xf1\x46\xc4\x1b\x11\x6f\x5c\x6d\xbc\x71\x1f\xb9\x57\xdf\x63\xee\x92\x78\xe3\x4d\x09\x0a\x65\x7c\x62\x7b\x26\xa4\xde\x46\x28\x13\xa1\x4c\x84\x32\xaf\x22\x28\xf3\xdb\x37\x93\x83\x51\x94\xd8\x52\x30\xb0\x30\x58\x0b\xca\xf4\x29\xe0\x6c\x6c\xe9\x0a\x24\x73\x20\xc8\x5b\x0e\xd5\xdf\x7f\xb3\xfe\x83\x0c\xd9\xcc\x5e\x3f\xb7\x30\xd8\xb9\xdd\xa7\x56\xc1\x80\x5b\x51\x1f\xc7\xca\xc0\x14\x2f\x64\x9a\x86\xbd\xdb\xd8\x73\xc3\xa5\x52\xa0\x22\x88\xf1\xfd\x69\x56\x40\xfb\xa2\x07\x00\xe8\xf7\x42\xf2\x00\x07\xfd\xf6\x93\xbd\x00\xfa\x0d\x92\x01\xd2\x5f\x33\xdb\xbd\x12\x95\x22\xbb\x30\x98\x85\x16\x35\x04\xf9\x26\x1b\x63\x78\xfd\xfa\x8e\x28\x28\x6a\x5c\x83\x00\xf0\x78\x25\x2a\xc4\xf7\xf8\x6d\xe9\xb0\xe1\x26\x7d\x43\x91\x86\xa4\xf3\x77\xb6\xc4\x7d\xdf\xc3\x06\xc9\xca\xd3\x26\xba\x7f\xbb\x78\x74\x55\x47\x20\x37\x42\x86\xc9\x21\x7d\xc3\x8c\x57\x58\xea\x84\x7f\x0d\xad\x77\xb9\x43\x90\x7b\x77\x86\xbc\x33\xa3\xbf\x23\xd3\xf9\x53\x52\xc8\xfe\x87\x76\x96\xed\x56\x62\x0b\xef\x83\x14\x65\x10\x92\x82\x4b\xc5\xc4\xb1\x51\x85\x12\x67\xa8\x51\x62\x7d\xcd\x74\xf5\xac\x31\xec\x1a\xb6\xcb\x01\x1b\xcf\x37\xca\xae\xc4\x7c\x0a\x46\xc1\x5f\x9a\x2a\xbb\x46\xc1\xf6\x29\x13\x05\x54\x02\x09\x4c\x1f\x00\x85\x4b\x20\x29\xd1\xb9\x49\x9c\x5d\x8d\xd9\xb2\x0f\xba\x79\xc9\xf7\xf2\x34\x00\xcd\x48\x0c\x8b\xd8\xd6\xb2\xc6\x19\xa8\x11\xce\x3c\xa0\xca\x0c\x19\xfd\xc6\xb0\xe3\x0c\x81\xde\x53\xf0\x97\x0c\xbf\xec\xb2\x33\x35\x93\x1d\x91\xb2\x27\x8a\xa3\x05\x73\x13\x6f\x9a\x22\x28\x73\x6f\xcb\x90\xb7\x66\xf4\x37\x67\x3a\xdf\x20\x3b\xe8\x1f\x35\x50\x95\x4f\x58\xae\x55\xa4\x3e\x3f\xf4\x71\x2d\x22\x08\xbc\xbc\x0d\x7a\x8c\x3c\x5e\x59\x70\x3e\xf5\x7c\x83\xa9\xb9\xe1\x92\x54\xec\xe6\xad\x0b\xac\xfd\xe1\x1c\x0d\x68\x24\xcd\x98\x84\x8d\x80\x2f\xc0\x7e\x66\xa8\x01\x72\x14\x8e\x52\x9e\x6f\x0c\xee\xda\xc7\x9e\xf5\xad\x3c\x20\x74\x8e\xe7\x16\xb9\xec\x82\xe3\x0e\xd3\x4c\x2d\xdb\xe5\x2a\x05\x1c\x27\xe2\x67\x01\xb5\x10\xf8\x21\x3b\x04\x47\xfb\x51\xd1\x73\x2c\xb7\x98\xf5\xfc\xe2\x40\xe9\x42\x71\xa0\xec\xda\x79\xaf\x40\x07\xb6\x8d\x07\x93\xac\x94\xac\x79\xad\xfa\xad\xea\x0e\xf2\x1e\xad\x0d\x12\x60\x8c\x95\x30\xa8\xdf\x4f\xee\x23\x9b\x47\xb8\xee\xbe\x12\x82\xa4\xf6\xb2\x2f\x95\x43\xd2\x7e\x39\x93\xfb\x8f\x6b\x62\x41\xb2\xab\x64\xf9\xec\x70\xcd\x0e\xa4\xb0\xdd\x34\x21\x51\xee\x2a\xf1\x20\xdb\xab\x24\x4f\x6e\xa9\x2c\xa8\x1f\xea\xdf\x01\xa1\x61\x6e\x55\x6f\xce\x53\xbf\x48\xd5\xbb\xdb\xd5\xbb\x10\x91\x9a\x16\xed\x7c\x7f\xd5\x73\x89\x52\xd8\xef\x25\x71\x97\xb5\x32\x77\x92\x1c\x27\x13\x15\xb2\x6c\x88\xec\x6b\xc1\x86\x34\x09\xf0\x3d\x8a\xb5\x06\x62\xed\xa3\x1d\xe4\x43\x1d\xfa\x07\x3a\x3a\x9f\x94\xf6\xac\x27\x3a\xd6\x8f\x58\xab\x40\x3c\x58\x3f\x03\x82\x0f\xa8\x1c\x9b\x9c\xb1\x1d\xaf\xbb\xc6\xc4\xed\x01\x28\x9b\x1b\x7c\x2c\x07\xde\x74\x3d\xb7\x9f\xbf\x0d\x4f\x80\xc1\x2f\x30\xba\x27\x02\xcf\x9d\xe4\x06\xad\x13\x6c\x59\x88\xdf\xd3\xd1\x62\x89\x2f\xf6\xd4\x96\xb7\xb9\x2f\x69\xe4\x8b\x9a\xfe\x79\xad\xf3\x73\x52\x21\x7e\x4a\x3b\xec\xb1\xf3\x9d\x1d\x18\x45\x0f\xba\xdd\x33\xcc\x59\x76\xc9\x34\x86\x13\x5f\x01\x47\x9d\x79\x6a\xb9\x81\x51\x0e\x62\xe8\xbe\xdf\xca\xc3\x87\x83\x41\xc4\xb1\xf3\x1c\xb4\x07\x8c\xd6\xf0\x16\x45\x9f\x72\x83\x41\x89\x7a\x25\x87\x66\x0d\x5e\x23\x58\x20\xa3\xb1\x84\x23\x47\x6a\x07\xc8\xea\xcd\x8d\xd0\xac\x84\x35\x6d\x35\x34\xc8\xda\x66\xe2\x12\x37\x13\xb7\x5d\xf4\x4f\xec\x22\x3b\xf5\xac\xd9\x27\x71\x86\x67\xa9\x08\x05\x3c\xff\x4c\xc0\x26\xf4\x4b\xdd\x64\x4f\x8d\xf4\xb7\xaa\xdb\xd9\x82\xe7\x94\xe7\xa9\x15\x86\x56\x7e\x6e\x9e\xba\x61\xa0\xff\x4d\x97\xfe\x27\x1b\xc8\x0d\x71\x2a\x5c\xee\x63\xf6\xd2\xe6\xf2\xe1\x9e\x81\xf2\x86\x65\x79\x6d\x4a\x8b\xdb\x95\x4c\x8b\x0b\x4d\xaa\xac\x0a\x53\xe4\x5e\x46\x8a\xdc\x7c\xe3\x75\xf6\x80\x7e\x7f\xbd\x14\xb9\x95\xc3\x81\x4e\x61\x48\xd2\x40\x92\x06\x92\x34\x90\xa4\x81\x24\x0d\x24\x69\x20\x49\x03\x49\x1a\x48\xd2\x40\x92\x06\x92\x34\x90\xa4\x71\xc5\x49\x1a\x48\x84\x40\x22\x04\x12\x21\xd6\x2f\x11\x62\xe2\xd7\xbf\xbf\x93\xbc\x7f\x23\x31\x15\xf8\x36\xef\xf9\x34\xbb\x30\x98\x3d\x79\x78\x9a\x23\x39\xd3\x50\x8a\xfe\xf2\x8d\xe6\xcb\xb5\x29\x2a\x64\x11\x6b\xac\x71\xf2\xf0\xb4\xc1\x75\x10\x98\x7d\x4c\x8b\x0e\xc4\x38\xcc\x82\xe4\xe6\xa2\xb4\xe4\x15\xb2\xf0\x2c\xc7\x18\x83\x08\x9c\x89\xb6\x77\x6f\xd1\xa5\x7e\x30\x67\x97\x8c\x79\x40\xd3\x61\x83\xf7\x7c\x63\x7a\xec\xb8\xed\x96\x2f\x1a\x3e\x05\xed\x91\x29\x92\x97\xb4\x4d\x7c\xab\xbc\xa4\x01\x00\xfb\x71\x6d\x0b\xd9\xc4\xc5\xe5\xef\x69\x7f\xd9\x41\x5e\xae\x11\xb8\xae\x5f\x34\x2f\x4c\x5a\xa0\x93\x71\xc9\x4f\x2f\x8a\x0d\x58\xa8\xf1\xac\x39\x11\x46\x70\x02\x14\x45\x77\xd6\x1b\xaa\x37\xe9\xe5\xe4\x16\xa0\x97\x40\x4c\x83\x6d\xee\x6c\x00\xcd\xe0\x53\x95\x7c\x56\x23\x5b\x7c\x6a\x15\x4e\xb9\xce\x92\xfe\x09\xcd\x7c\xbf\x36\x25\xfe\x32\x60\xc9\xc1\xa6\x08\x20\xbb\x6c\x08\x6f\x1c\x9b\x19\x33\x94\x77\x68\xa4\x52\xb2\x82\xfa\x41\xfb\x2a\x31\x79\x01\x00\x4f\x85\x9a\x3d\x6b\x39\x01\x6d\xcf\x47\x6c\x25\xd1\xdc\x20\x6f\xd1\x88\xe8\x69\xfd\xc7\x34\xf3\xc5\x5c\x47\x03\xa5\x65\x8e\x1a\x73\x5e\x10\x72\xb0\xdc\x37\xc6\x27\xd9\xbc\xf4\xc5\xda\x59\xd9\xae\xfd\xbc\x46\xee\x52\x66\x6a\xc1\x0e\xf2\x1e\xdb\xa7\xb3\x02\x58\xcc\x1e\xf6\xfc\x87\x3d\x97\xea\x3f\xa3\x99\xc7\xc5\x6f\xa3\xe4\x7b\x0b\x76\x81\x06\xd0\x0c\x7f\x9e\x2f\x7f\x6b\x86\x89\x0d\x7e\x3e\x7e\xb1\xc7\xc4\xb0\xd0\x74\xd9\xa6\xc4\xf5\x3c\x36\x69\xdc\x42\xc9\xb3\xdd\x30\x7b\x49\x03\x4b\x40\x62\xb6\xdd\x47\x86\x08\x5c\xd5\x77\x99\x77\x43\x6f\xf8\xf1\xe2\x00\x3d\x5f\x31\x27\xb0\x3a\xb2\xea\xa7\x3c\x79\x07\xb9\x43\xf9\x14\x0e\x6c\xca\xef\x00\x3c\x57\x7f\xe5\x1d\xe6\x67\x3a\xe0\x27\x3f\x0a\xf9\x94\x2f\x97\x59\xb0\x50\xc2\x75\xa6\x22\x71\x49\x2e\x34\x7d\x11\x3d\x0e\x36\x8a\x18\xbf\x29\x50\xd7\x0b\x29\xd7\xa8\xd8\x19\x38\x8c\x4c\x7e\xd1\x6b\xc1\x52\x10\xd2\xf9\xac\x31\xc6\x01\xd6\x39\x6b\x81\x46\xba\x21\x48\x28\xa6\x69\xb1\x7e\x83\x45\x0d\x7a\xb3\x6f\x17\x8b\x4c\x6b\x67\x7f\xcc\xd3\x20\x00\x8b\x26\xdb\xa3\xe9\x82\xe7\x80\x4d\x95\xad\x3e\x7b\x9e\x66\x0d\x5e\x6a\xd4\xb5\x7e\x02\x31\xf5\xa9\xb3\x14\x1d\xb7\x42\x7b\x5e\x98\x54\xe5\xf7\x09\x1c\xa4\x68\x2f\x50\xd7\x98\xa2\x56\xe0\xb9\x4c\x19\x64\xc7\x48\xa1\x4a\xc5\x8a\x60\xd9\x2d\x50\xdf\x59\x02\xf3\x1a\x6f\x1d\xa8\xab\xaa\x86\x5f\xe0\x27\xc1\xe8\xf4\x92\x80\x93\x99\xa0\xe0\x35\x44\x2d\x0e\x14\xe4\x31\xe4\xaa\x30\x13\xc9\x72\x1e\x2d\xd0\x3e\x63\x86\x06\x61\x3f\x9d\x9d\xf5\xfc\xb0\x0f\x04\x1a\x87\x9b\x2d\xc7\x60\x67\xc6\xec\x25\x6d\x2b\x54\xc2\x36\xaa\xc4\xec\xf9\x9d\x4e\xf2\x52\xb2\xc9\x87\xfa\x74\xdf\xa4\xfc\x17\x1b\xe7\xc5\xb9\xa5\xc8\x70\xcb\x7a\x9c\x69\x74\xa1\xc5\xa1\x2e\xbe\xf7\x97\xe7\x2d\xb7\x9f\x09\x86\xaa\xd3\x06\xd3\x50\xf9\xd0\x85\xc6\xbc\x17\x84\x15\xf6\xda\xc4\x04\xfc\x23\x8d\x6c\x66\x82\x35\xa4\x05\xfd\x73\x1a\xd9\x59\xd3\x9c\x17\xed\x07\xa7\xa0\xe5\x53\xd1\xd6\x6f\xbe\x59\x13\xaf\x47\x52\x41\x5a\x58\x03\xd8\x1c\x2d\x7f\x29\x52\x64\xd9\xf6\x07\x07\xa6\xbc\x37\x5f\x72\xe8\x45\xf1\x6d\x41\xd6\x18\xcb\x16\xb3\x11\xa2\xcb\xf6\x60\x18\x7f\xa1\xfd\xca\x19\xc6\xcf\x21\xb0\x6b\x8b\xd3\x87\xd8\xc1\xa3\x06\xf0\x37\xb2\xe4\x93\x1a\x79\x36\x5f\x24\xb6\x5b\x1c\xe1\x89\x3b\x1c\xea\xeb\x3f\xa7\x99\x6f\xd5\x52\x6e\x44\x2d\x57\x57\x6a\x3e\xbe\x0b\x53\x82\xce\xdb\x61\x08\xfa\x95\x1d\xf0\x69\xd1\x67\x50\xd6\xea\xf3\x49\x41\xc6\xfe\x72\x68\x78\xbe\x72\x40\x04\x33\x01\x4e\x94\xdc\xf4\x4a\x17\xc5\xf4\x4a\x0a\x04\x8d\x6c\x12\x06\x99\x77\x6a\xe6\x1b\x34\x31\xfc\x30\x21\xd8\x51\xa6\x62\x36\x0c\xcc\x5a\xb6\x03\x4b\x33\xea\x36\xa1\x57\x54\xf6\x63\x34\x6d\x84\x09\xe9\xf2\x27\xce\x6b\x34\x72\x43\x6c\xca\x1b\x61\xbb\x95\x5e\xd6\x37\xda\x6e\xb8\x7b\x97\x79\xbe\xe2\x4e\xd4\xc3\xf1\x65\x51\x23\x3b\x4e\xc3\x69\x68\xc6\xca\x5f\x58\xb4\xfc\x02\xcc\x0d\x2b\xb4\x67\x6c\x87\xe9\x5d\xb0\x26\xc5\xd4\x13\xf2\x23\x5c\x2a\x31\x19\xba\x95\x44\x4a\x17\xf9\x0d\x8d\xdc\x14\x97\x7c\xdc\x0a\x60\x9d\x05\xa1\x35\x5f\xd2\xdf\xab\x91\xbd\x2d\x98\xd3\x58\x01\xe6\x5c\x8d\x42\xdb\xfe\x35\xe4\x2b\x1a\xd9\x12\x01\x4c\x6c\x19\xb6\x62\x01\xe4\x0b\xf3\x04\x0d\x2d\xf3\x09\x6d\x3a\xb4\xd8\xd2\x8b\xd6\x44\x57\x20\xf1\xab\xd4\xcd\xb8\x68\x87\x91\x39\x98\x1d\x25\xca\xae\x1d\x2e\x0d\xc0\x12\xb0\x67\xca\xa1\xe7\x07\x03\x05\xba\x40\x9d\x81\xc0\x2e\xf6\x5b\x7e\x7e\xce\x0e\x69\x3e\x2c\xfb\x34\x52\xf7\x17\xf8\xbe\x10\x64\xe7\x0b\xdb\xa2\x8a\xc8\x6f\x69\xe4\x79\x71\x17\x1d\x66\x87\xd4\x78\x5c\xde\x77\x19\xe3\x62\xd7\x2a\xb5\xfd\x03\xf3\x76\x8d\xc4\x92\x5b\x7f\xad\x46\xee\x6b\xa1\xd1\x27\xec\xbc\xef\x41\xcb\xc7\x65\x59\x51\x53\x61\x27\x5d\xe4\x87\xec\x48\xb2\xc0\xf2\xe6\x87\x7a\x6f\x86\x1f\x74\xa3\xe5\x1b\x11\x5f\xb2\xe4\x93\x19\xb2\x01\xcc\xb0\xbf\x90\x31\xdf\x93\x39\x66\xbb\x05\xae\x1d\xf0\xe5\x29\x51\x6b\xa1\x8a\x08\x8e\x8f\x31\x35\x36\x7d\x3a\x3e\x5e\x43\x95\x42\xce\xc6\x5a\x4b\x12\x8a\xb3\xdd\x59\x90\x81\xb6\x62\x55\x8b\x14\x22\xd5\x18\x12\x94\x67\xe0\x78\x25\x4d\xb5\xa1\x97\x35\x46\xa4\xe0\x13\xe0\x47\xd6\x18\x77\x8d\x11\x6b\x9e\x3a\x23\x56\x0d\x75\xb5\x9d\x73\x11\x08\x3d\x60\xad\x4e\x28\x90\x9f\xc9\x10\x62\x95\x6c\x71\x22\xd2\x3f\x9a\x31\x7f\x2e\x33\x3c\x39\x1e\x9d\x7e\xb9\x45\x30\x50\x21\x19\x5a\x30\x82\xfc\x1c\x9d\xb7\xf8\xfe\x00\x23\x21\xfa\x4b\x1e\x24\x65\xe4\x8a\xb8\x03\x63\x6d\x72\x81\xfa\x8a\x89\x2e\x2a\x4d\x1e\x03\x85\xcd\x06\x4c\xc1\x6c\xef\x84\xe1\xe3\xa7\x3b\xc5\xfe\x97\xa0\xaf\x71\x72\xd9\x8a\x77\xa1\x3c\x96\x56\xec\x52\x37\xc6\x8b\x4c\x9c\x0c\x7f\x42\x23\x7d\x0d\xd5\x07\x98\xe0\xfc\x05\xd3\xaa\x2c\xa2\xfd\xeb\xf7\x13\x19\xb2\x55\x6e\x85\xfa\x53\x99\x16\x14\x9c\xef\x69\xf1\x5e\x2a\xa8\x6d\x41\x12\x9e\x93\xeb\x96\x2d\x40\x76\xa2\x80\x59\x0e\x1b\x68\xde\x0a\xd8\x91\x83\x09\x60\xcb\x35\x4e\xc9\xa5\xc6\x55\x10\x55\xcb\x90\x14\x85\x40\x28\x16\x31\x79\x74\x24\xed\x29\xe5\x7e\x20\x20\x6c\x76\x54\x89\xda\x11\xe9\x2b\x33\x34\x6f\x95\x03\xb0\x14\x5b\xf9\x30\x60\x3a\x36\x68\xff\x11\x0c\x61\xbb\x86\xa5\x14\x25\x75\xa8\x9f\xe1\x07\x3f\x9b\x06\xfa\x4f\x6a\x64\x77\xcd\x5e\x4b\x3b\xb1\x4c\xc3\x7b\xe6\xa3\xfc\x7d\xd6\x1a\x30\x9c\xf0\xc3\x16\xeb\x39\xde\x5b\xe2\xb6\xd2\x6e\xe5\xe4\xc4\x34\x24\xdb\x31\xec\x59\xd1\x79\x02\xbb\x0e\x3d\x97\xbf\x9d\x25\xbf\xae\x91\x0d\xec\x38\xa3\xff\x92\x66\x3e\xa9\xb1\x5f\x5c\x00\x26\x15\x63\x01\xb0\x94\x54\xb4\x87\x9d\x7c\xca\x81\x5c\xc9\x92\x5e\x94\x35\x4e\x58\x17\xed\x79\xcb\x31\x1c\xea\x16\xc3\xb9\xe8\xf9\xa8\xec\xc1\x0b\x39\x6e\x6a\x75\xec\x19\xdf\x82\xc6\xc7\x27\x83\x92\x4f\x4b\x96\xc0\xa6\xe7\x2c\xb7\xe0\xd0\x88\xfe\xc9\xcd\x31\xf7\xee\xb9\x90\x4b\x28\x50\xbf\xad\x91\x67\xc9\x99\x30\xee\x06\xa1\xe5\xe6\xa9\xfe\x21\xcd\x7c\xb7\x56\x75\x39\x5a\x18\xe3\xa3\x29\xfa\xa9\x2d\x1e\x52\x15\x52\x87\x86\xfd\x17\x97\x5e\x3c\xbb\x1c\x3d\x14\x66\x91\x1d\x2e\x4b\x0b\x7c\xb5\x46\x36\xb0\x95\xa6\xff\xb0\xf9\x22\x60\xe2\x44\xdb\x1a\xfb\x1d\x75\x30\x1f\xdc\xee\x93\xec\xb8\xe4\xf4\x19\x67\x2d\xdf\xb5\xdd\x62\x4f\x1f\xd4\xcd\x39\x97\xf9\xa8\x1b\xad\x82\x62\x1d\x9e\x2d\x33\xa9\x54\x53\x75\x55\x1a\xf2\xb4\xf6\x68\x63\xc2\xd2\x90\xbe\xaf\xdf\x10\x94\x25\x31\x6f\x05\x63\xc9\x30\x38\x67\x89\x13\xa8\x0c\xa3\x8a\xaa\x44\xbe\xd3\x41\x0e\xa9\xc2\x42\xcd\x27\x1a\x4d\x7e\x25\x1d\xe8\xb4\x5d\x64\xdf\x38\xc5\x37\xc3\x69\x98\x70\xfa\x07\x3b\x12\x67\xbf\x77\x76\x90\x77\x68\xe4\x1a\xa5\x28\xfd\x35\x9a\xbe\x61\x66\x29\xa4\xe6\xa9\xf1\xd9\xd8\x76\x6e\x01\x0c\xec\x7b\x0b\xb4\xd0\x57\x39\xf8\x00\x23\x71\xbf\x17\x76\x47\x58\x5c\x94\x32\x01\x6d\x4a\x76\xd5\x36\xf2\xdc\x44\x57\x39\x76\x10\xf6\xc3\x28\x6e\xd5\x37\x5b\xa1\x37\x6f\xe7\x09\xf9\x7f\x35\x42\xd8\xf9\x8d\x2f\x79\xfd\x3b\x9a\x79\xff\x88\xfc\x53\x85\xa5\x15\x63\x64\xcc\x75\xe4\xed\xb5\x1c\x7e\x54\x73\x6d\xcb\xc9\x7e\x5c\xdb\x4c\x36\x5a\xbe\x6f\x2d\x7d\x4a\x3b\x4d\xa6\xc8\x64\x6d\x49\xbc\xac\xce\x95\xad\x7a\x5a\xeb\x22\x9d\xd5\x1f\x36\x6f\x95\xfa\x2f\xd0\xa5\x80\x7d\x5c\x3f\x4c\x37\xf2\xb4\x66\xd4\xec\x82\x4d\xfa\x86\x79\xab\x44\xc8\x47\x48\x2a\x1c\x3a\xe2\xcd\x97\x3c\x97\x09\x39\x3e\xa6\x3f\x46\xcc\xa5\x8a\x6b\x46\x37\x5b\x48\x15\x17\x8f\xdb\x41\xd8\x63\xcc\x79\x4e\x21\x50\x01\x1a\xce\xbe\x89\xc8\x8d\xb3\x5e\xd6\x18\x55\xe8\x91\xb0\x74\x87\x27\xc7\x41\x84\xc6\x7b\xa2\xed\x1a\x0b\x83\xd9\xc1\xfd\x3b\x12\xd3\xe9\xbd\x5b\x50\xaf\xb9\x5c\xbd\xe6\x4b\xc9\x49\xff\x1b\x9a\x79\xf7\x71\x41\x7a\xc8\x47\xe3\x69\xc4\x0f\x48\xf5\x5c\x9d\xdb\x7b\xc9\x3d\x75\xf6\xcb\xaa\x79\x94\x98\xbe\xb7\x26\x26\x25\x50\x9e\x85\x67\xc6\x05\xba\xa4\x6f\xd6\x37\x8a\xe9\xdb\x45\x6e\x49\x79\x52\x78\x73\x2c\xe9\x5b\xf4\x4d\xf0\x16\xc1\x53\xc2\x65\x9c\x12\x9e\x89\x87\xf2\xa7\xb5\xb3\x8d\xf7\xc9\x3d\xfa\x2e\xb9\x4f\x9a\xa6\xdc\x1c\x2b\x24\x5a\x72\x9b\x24\xe4\x1d\x9b\x48\x6f\x9a\xfd\xc8\x2b\xd0\x88\xf0\x33\xc5\x4f\xaf\x4c\x7f\xd5\xbf\xbd\xd1\x5c\x1c\x36\x5c\xaf\x40\x63\xc2\x91\x1f\xdf\x17\xd3\x54\x52\x91\x80\x69\x13\x69\xdf\x5c\x44\xf4\x19\x16\x67\xbf\x01\xe1\xde\x15\x7a\x5c\xf4\x30\x07\x05\xb9\xb0\xbb\x40\x97\xe0\x21\x21\x5a\x2e\x69\x1d\x17\xe8\xd2\x25\x6d\x4b\xf4\x46\x42\x8e\x7e\x79\x03\xf9\xcf\x0c\xd9\xc4\x9f\xd5\xbf\x9b\x31\xbf\x9a\x19\x76\x0d\x58\xdc\x4c\x0e\xa8\x2b\x27\x90\xbc\x43\x59\xb7\x1d\xb0\xb9\xeb\xf9\xc6\x49\x2f\x1c\x77\xfb\x62\x8e\x67\x20\x8a\x88\x9c\x30\x5c\xcf\xed\x07\x2d\x2c\xb5\x8c\xb1\x8b\x76\xc0\xb5\xe0\x51\x8f\x06\x27\xbd\x70\x2c\xa6\x2e\xa5\x16\x57\xbb\xa8\x23\x60\x44\x3b\x5e\xf3\x65\x01\xf2\x0b\x6e\x08\xe5\xc7\x8b\x88\x34\x18\x53\x37\x42\xea\x97\x7c\x2a\xb0\x6f\xcb\x35\x04\xf8\x26\xf4\x4b\x5e\x1e\xdf\x3e\x98\x32\xa2\xb0\x1c\xa4\x8f\x99\x01\x32\x89\x7b\x9a\x24\xb4\x81\xeb\xc8\x35\xea\xba\x3b\x44\xd8\xf0\xe8\xfb\xcc\x1d\xa7\x61\x07\x99\xa1\x0e\x8c\xa0\xa4\xc2\xc8\x59\xc1\xb5\x10\x10\x22\x6a\x01\x6f\xd4\x88\x1c\x5a\xfd\x15\x9a\x19\xaa\xe6\x4a\x56\x54\x57\xc0\xa7\x07\x5b\x1d\x73\x76\x49\x84\x81\xe4\x54\xac\x68\x60\xb9\x07\x57\x54\x0c\x67\x70\xb0\x01\x15\xe3\xca\x07\xa8\x2f\x31\x3c\x59\xe3\x88\xa0\x9f\x1c\x0f\x13\x0d\x7a\xcb\x3d\xa4\x2b\x65\x69\x4c\x0a\x37\x35\x37\x14\x16\xd6\x12\xcd\xeb\x7f\xb7\xc7\x1c\x4e\xbb\x11\x69\xd6\x11\x6d\x3c\xde\x9e\xa5\xbb\x9b\x1b\x0a\xd3\x6a\x36\x31\xa1\x7f\x72\x0f\xf9\xdf\x1a\xb9\x9e\x1d\x29\x16\x3d\xff\x22\x2f\x52\x7f\x8b\x46\xee\x69\xb8\x4b\x4d\x26\xde\x11\xc7\xf6\x17\x24\xaf\xaa\xa7\x36\xcb\x88\x6a\x11\x2d\x31\xb8\x27\x89\x60\x59\x45\x36\x4d\xcf\x35\xc4\x21\x25\x00\x23\x62\xa4\xda\x93\x27\x35\xb2\x19\x9c\x2d\xc7\x4f\xe9\x97\x34\x72\x7f\xc3\x06\x4e\xf3\x87\xab\x3a\x8c\xb7\xf4\x21\x71\x3b\xd9\xc4\xe8\x62\x55\xb7\xd5\x6c\xec\x31\x29\x2e\x41\x60\x05\x59\xf2\x8a\x0c\xd9\x2a\xac\x94\x5e\xa0\x7f\x4f\x23\x0f\x34\x6e\x2a\x7f\xfc\xd4\x74\x8d\xc6\x7e\x44\x93\x4f\x54\xb4\x57\x5e\x16\xcd\x8c\x8c\xd9\xb2\xb9\x42\xd9\x12\x7d\xda\x95\xec\xd4\xc4\xc7\xd8\xae\x78\xb6\xe4\x15\xd2\xf6\x1c\x7a\xd1\x9a\x2f\x39\x34\x90\xce\x41\xdc\x02\x3b\x20\xbf\x75\x60\x6a\x6c\x78\xf4\xc4\x58\x76\xbe\x40\xbe\xa3\x91\x6b\xa1\xdc\x53\x25\xae\x2e\x7d\x53\x33\x7f\x53\x1b\x96\x34\x51\x41\x4b\xe4\x37\xc5\xe1\xf4\x85\xa6\xef\x99\x7d\x86\x19\x78\xb3\xa1\xf9\x68\x96\xad\xa6\x48\x05\xa6\x05\xa3\x5f\xbc\xc3\x59\x99\xf6\x7c\xc9\x59\x32\x66\x2d\x0e\x03\x78\x2e\xe5\x14\x66\x78\xbc\x65\x93\x72\x3c\xe8\xfd\xd1\xb7\x6d\x83\x4a\xfb\x45\x43\xeb\x08\xa6\xdf\xd2\x48\x87\x3b\x1b\xe8\x9f\x6c\xc6\x4e\x56\xc1\x9b\x30\x5f\xa5\x9d\x3c\x9c\x1c\x58\x95\x38\x21\x4c\x9f\x6c\xdc\xb2\xc6\xa4\xef\x2d\xd8\x42\x3b\x9f\x61\x3b\x97\x61\x15\xe6\x6d\xb7\x1d\x66\x74\xf2\x36\x8d\x6c\xca\xd3\xd2\xdc\x6c\xa0\xbf\xae\x8e\x4a\x13\xeb\xa9\xb4\x34\x77\xb8\xd6\x94\x9d\xe0\x77\x93\xd3\x95\x5d\x33\xd2\x3e\x8c\xcf\xdb\x60\xce\xf2\x29\x97\x14\x85\xae\x40\x52\x44\xc8\xaf\x67\xc8\xb3\x8a\x79\x1a\xd7\x34\x6a\x07\x17\xf4\xf7\x67\x9a\x10\x02\x47\x46\xc6\x92\xaf\x25\x1a\xf9\xb7\x5a\xd5\xfd\x64\x83\x8f\x8c\x8c\x19\xe2\xaa\xd4\x78\xab\x17\x98\x55\x6f\x79\x01\x4b\x8e\x5e\x2c\x79\x41\xbc\x1a\x81\xee\xb2\x52\x43\x59\xcc\xd3\x78\x26\x17\xec\xe0\x02\xf9\x5b\x8d\x5c\xcb\x84\xd3\xf0\x2c\xf4\xd5\x92\xfe\xd5\x7a\xb0\x5d\xd4\x75\xbc\xa7\x4e\x2a\xef\x99\xef\xd4\xd4\x3f\xe5\xa1\x31\xa6\xe9\x09\x56\x25\xa7\x4f\x2e\x72\x22\x6c\x21\x02\xf2\x84\x88\x12\xa4\x68\x2b\xcf\x3d\x9a\x05\x4f\x4f\x65\xc4\xba\xb3\x4e\x99\xba\x79\xa1\xa1\xb1\x63\x63\xa1\xec\x08\xbb\x7f\xc9\x2b\x88\x4a\x38\xd9\x58\x96\x9b\x25\x1f\xcb\x90\xe7\x58\x8b\xc1\x98\x63\x05\xa1\x9d\xcf\x39\x5e\xfe\x02\x93\x8f\x54\x7f\x47\x86\x0c\x37\xfc\xdc\xe1\xb3\xd3\x55\x6f\x26\x26\xcb\x57\xb4\xb4\x47\x2a\x96\xed\xf0\xd9\xe9\x15\x9b\x30\x97\x39\x2f\xac\xc5\x80\xf2\xd6\xcf\xb0\xd6\xb3\xdb\x94\xfc\x69\x86\x6c\xc9\x3b\x96\x3d\x3f\x45\x67\xf5\x2f\xb4\x82\x81\xff\x64\x66\x44\xbc\x0f\x04\x50\x4b\xd0\x4f\x8c\x19\xbb\x5f\xb8\xcf\x83\xc5\x5f\x24\x2a\x34\x66\x68\xb8\x48\xa9\x6b\x54\x8a\x0d\xf8\xf4\xca\x8b\x50\x72\xd6\x18\xbb\x58\xa2\xf9\x90\xf7\x86\x50\x8f\x5d\xdb\xe1\xa6\xb1\x19\xaf\xec\x16\xb2\x06\x7c\x43\x34\x65\xad\xd8\x86\x26\x5c\x1c\x43\xe0\x62\x40\x23\xe2\x16\x9c\xe1\x75\x9e\x19\x69\xe3\x8e\xb1\x4d\x7c\x27\xf9\xa0\x46\x32\xb3\x79\xfd\xbd\x1a\x19\x68\xd8\xa5\x87\x47\x12\x13\xad\x74\x78\x24\x29\x85\x0e\xdb\x33\x3e\x35\x46\xe6\x2c\xd7\xa5\xce\x0a\xcc\x2c\xf2\xff\x68\x64\xf3\x2c\x9b\x14\xd4\xd7\xff\x46\x23\x7b\x1a\xb7\x98\x3f\x9c\x68\xf6\x53\x9a\xb8\x5a\xd1\x78\x71\xb1\x52\x8f\x6a\xd4\xd8\xea\x76\x02\x26\x6d\x87\x81\x51\x0e\xac\x62\xc4\x4e\x28\xd0\x12\x75\x0b\x41\xb4\x99\x44\xb5\x09\x18\x14\x58\x66\x76\x9e\x1a\x33\x94\x4d\x3e\xbf\xec\xba\x6c\x70\x7e\x34\x43\xb6\x16\x39\xca\x36\x1b\xe8\xdf\x6f\x46\x45\x3b\x12\x3d\x5e\x63\xbf\xfb\xb8\x26\x9f\xa8\xd8\x42\xe4\xe5\x3a\x2a\x9a\xc5\xbf\x3f\xfd\xbb\x97\xb7\x55\xd4\x52\xd3\xe4\xf7\x2a\x6a\xda\x4b\xc9\x46\xc7\xcb\x5b\x8e\x1e\x90\x5d\x0d\x7b\xe0\x38\x7b\x32\xf1\xcd\x07\xe1\x92\xfa\xb9\x7c\xc5\x3b\x4b\xfd\xf2\xe3\xc4\x52\xe1\x66\x38\x38\xd2\x5b\x62\xfb\x20\x5f\xd0\x08\xe1\xad\x3b\xe1\x15\xa8\xfe\x29\xcd\xfc\x90\x16\xff\x2d\x77\x17\x1b\xb8\xf6\xbc\xef\x40\xcb\x0b\x05\x9d\xd9\x4b\xfa\xd6\x58\x06\x27\x5d\x71\xd3\xa0\x43\x39\x61\xcd\x88\x9c\x95\xe6\x2d\x9b\x9d\x4c\x0d\xdf\x5a\x34\x40\x00\x72\x96\x1b\x9c\xe6\xb8\x9b\xe5\xe1\xf8\x25\x56\xcf\x3c\x87\xb1\x41\xca\x80\xa3\xb0\xe2\xa2\xc0\x4e\x5a\x89\x63\xdc\x77\x35\xb2\x25\x6f\x95\xac\x3c\xdb\x5e\xbf\xa5\x99\x6f\xd5\x86\xd3\xcc\x4b\x55\xe7\x0a\x38\x6e\xaa\xe4\xde\xa8\x90\x76\xca\xa4\xa8\xcc\x0f\x6a\x87\xc8\x41\x72\xa0\x69\xac\x2a\x1b\x35\x2d\xfb\xfc\xb2\x05\xb1\x41\x94\xc3\x23\xf9\x3a\x53\x17\x6d\xb7\x40\x7d\xfd\x0f\x9b\x52\x17\xe1\xd9\x1a\xcb\xe7\x0d\x1a\xbf\x9d\x5c\x3b\xbc\xf8\x96\xce\x8b\x4d\xad\x8d\xf9\xa5\xe0\x45\x4e\x3f\xaf\xa5\xbf\x54\x50\x56\xc6\x9b\x34\xb2\xf9\x45\x65\x6f\x66\x29\xa4\xfa\x13\xcd\x48\xc4\xe7\xf3\x87\x13\x1f\x75\x4c\x5c\x4c\x7e\x54\x74\x71\x99\x4a\xf0\xbb\x34\xb2\xd5\x7a\x71\xd9\xa7\xa0\xfc\xbe\x51\x23\xf7\x36\x56\x69\xa2\xc7\x13\xad\x3a\x2d\x2f\x57\xea\x2e\xec\xba\x31\x6a\x85\x16\x57\x61\xaa\x1b\x08\x2e\xf0\x76\xd4\xfb\x89\x2d\xe4\x4f\x34\xb2\xd1\x0e\xf2\x81\xad\x7f\xb1\x0e\x13\x46\x36\x6d\x7c\x7a\x64\x7a\xbc\xc6\x5c\xf8\x71\x0d\xee\x56\xb4\x8e\x5f\x5b\x6d\x65\x9c\x3c\xa5\x91\x1b\xc5\xd2\x1a\x71\xac\x20\x60\xca\x85\xfe\x56\xcd\xfc\x51\xed\xa4\xa0\x04\x4e\x2b\x77\x59\x99\x1c\x1d\x03\xcd\xb4\x1a\x46\x98\xa1\x8e\xe7\x16\x83\xac\x31\x06\x56\x56\x11\x48\x07\x82\xb9\x08\x0c\x2b\x56\x94\xa5\xfb\x36\x7f\x89\x67\x40\x59\x4a\xd4\x97\x90\x41\xbf\xa6\x91\x6b\xb8\x56\xcd\xe4\x67\xa0\x7f\x48\x33\xdf\xa4\x0d\xc7\x17\x62\x90\x14\xfc\xbb\xac\x25\x61\xee\x49\xa8\xe5\x62\x5d\xb5\x53\xfc\xf0\x36\xf5\xcf\xb3\x36\xd4\x39\x42\xff\x5b\x86\x6c\x61\x03\x36\x69\x85\x73\xfa\x3f\x64\x9a\x80\xa0\x8e\x8a\xa7\x93\x58\x49\x26\xba\x9c\x5c\x76\x7c\x6b\xf2\xfc\xa5\xfa\x07\x6a\x03\x80\x73\xaf\x44\x21\x22\x52\x48\x39\x55\x3a\xf2\x37\x2b\x07\x74\xb6\xcc\x03\x04\x71\x50\xb4\xdf\xe5\x5b\x15\xbc\x23\xdd\x0e\xd9\x6b\x70\x5e\x71\x9d\xa5\x3b\x8d\x53\x6e\x3f\x4c\xc4\x68\x2b\xac\xf6\xf8\xe3\x3e\x6f\x8b\x16\x87\xa2\xcf\x8e\x1f\x3f\x6e\x9c\x3c\x75\xda\x38\x7b\x6a\xea\x18\x67\x61\xcc\x97\x9d\xd0\xe6\x75\x49\xfa\xf6\x65\x1e\x07\x58\x93\x4a\x56\x38\x47\xbe\xaa\x91\x0e\x7f\xa6\xc0\xd6\xed\x50\xc3\x2e\x9f\xca\x8d\xd6\x58\xb5\xaf\xd7\xa6\x72\xa3\xc9\x2e\x9f\xb2\x0a\x5e\x60\xc0\x59\xc9\x18\xa5\xa0\x87\x2d\x4f\xe8\x2d\x4b\xc1\xf1\x67\x54\x01\xfe\x09\x8d\x90\x59\x87\x46\x90\xe6\xfb\xb5\xda\xfb\x9e\xa2\xd5\xd2\x8b\x35\xbe\x6e\xee\xb0\x2c\x2b\xf9\x8d\xc0\xa0\xb7\xf3\xd1\x2a\xaa\x12\x4d\xa5\x78\x7e\x0d\x48\x31\xc5\x9d\x0c\x2d\x26\x8a\x68\xde\x98\xb1\x98\x34\x2a\x39\xe5\x22\x13\x38\xbf\xd6\x41\x6e\x2b\x55\x34\x62\x8a\xc2\xf9\x66\xd2\x73\xec\xfc\x92\x7e\xa9\xc3\x7c\x7d\xc7\x59\x56\xfe\x9c\x55\x2a\x51\x57\x84\xd5\xa9\x16\x36\x82\x35\xed\x50\x2b\x3a\x67\x83\xfa\xcc\xcf\x4a\x12\xc5\x16\x56\x7b\x9f\x1a\x53\x14\x82\x73\x75\x17\xb8\xef\x08\x27\x65\x5b\x6e\x19\x1c\x04\x84\xa7\x67\xd5\x19\x2d\xe8\xe9\x33\x46\xc1\xdf\x33\xf9\x62\x61\xc9\xb5\xe6\xed\x3c\xbc\xab\x74\x42\xea\xfb\x6c\xd6\x4f\xd1\xfc\x52\xde\x81\x42\x22\x6b\x76\x4f\x56\x5e\x8d\x6c\x19\xf1\xa2\x11\x5e\x3a\xe2\x53\x79\xef\x25\x08\xfe\x6c\xc9\x56\x56\xd6\x4e\xb1\xe6\xf3\x41\xb1\xdd\x62\x85\x99\xf8\x39\xa5\x39\x2f\xf4\xdc\x0a\xb8\xea\x97\xb5\x26\x40\x88\xc9\x94\x37\x93\x67\xc3\xb4\x27\x92\x33\x92\x3f\xa1\x10\xc3\x94\x89\x51\x60\x4f\x2f\x0f\x77\xff\x90\x46\xae\x5b\x08\x4a\x73\x34\x82\x43\xf4\x4b\x1a\x39\xd4\x18\x3d\x12\xaf\xd8\x7e\x58\xb6\x9c\xaa\xef\x38\x7b\x46\x2d\x32\xf9\x01\x0b\xd3\x70\xab\x35\x33\xc1\x07\x22\x35\x89\x69\xf3\xfa\x3b\x9b\x39\xda\x0d\x47\x8f\xd7\x58\xfb\x67\xe5\x03\xa9\x0a\x13\xdc\x98\x16\xe7\xcc\x65\xe9\x4c\x6f\xd3\x48\x07\xd3\x98\xde\xd8\x8c\xe4\xad\xad\x2f\x4d\x56\x28\x4b\xd1\x3e\x13\x89\x20\x4e\x39\x8b\xf4\x1a\x7a\x51\xf0\x2d\xd8\x5b\x05\xdf\x5e\xa0\xbe\xd1\x9d\xa3\xa1\x15\x85\xf7\xe9\xc9\x92\x4f\x5c\x4b\x9e\x9b\x6a\xf2\x29\xe8\xef\xbc\xd6\x5c\x9c\xf4\x04\x55\x20\xcf\x26\x58\x3e\x3a\xe0\x08\xf5\x42\x06\x58\x62\x1a\x85\x5f\x76\x59\x6f\x58\x62\xb7\x3d\xcd\x69\x24\x81\xa4\x74\x46\x52\x65\x66\x49\xba\xd2\xb3\x0e\x13\x98\x1f\x0c\x74\xe8\xc1\xcb\x41\xd2\x72\xf5\x39\x82\x94\x96\xcb\xa5\xb4\x20\x03\x04\x19\x20\x09\xb7\x8c\x3f\xd5\xc8\x86\xa0\x44\xf3\xfa\x1f\x68\xa4\xbb\x09\x0b\x70\x61\xba\x44\xf3\xe6\xcf\x6a\xd3\x95\x86\x67\xe1\xbe\x0c\x41\x9a\x66\xe8\x9c\xb5\x60\x7b\xbe\xc4\x40\x6a\x60\xd9\xed\xfc\x22\xf6\x11\xfd\x96\x5b\xe8\xe7\x7c\x5e\xf2\xf6\x0c\x1b\x38\x60\xe5\xbd\x3e\x43\x7a\x9b\xfa\x34\x78\xdc\xfc\xa6\x76\xc2\x83\xe0\x09\x79\xea\x86\xce\x92\xa4\x75\x25\x98\xc2\xe2\xa3\x38\x20\xc9\xce\xcf\x6c\xd6\xcb\x59\x0b\xf1\x4e\x00\x6a\x9a\xf4\x4a\x22\xec\x42\x14\x2a\x48\x38\x55\x4e\x45\x4e\xbc\xab\xdd\x33\x4f\x6b\xc7\x1a\x93\x7e\xba\xf5\xed\xfd\x86\x4c\xa4\x58\x41\xee\x31\x14\x3a\x10\x79\xfb\xe6\x84\xcf\xaa\xd2\x9d\x92\xcb\xa6\x7f\x6f\x93\x39\xaa\x5e\x88\x8f\xa5\xc2\xf9\x5f\xfa\xea\xe7\xcb\xbe\x9f\x60\xd5\x49\xb1\xcd\xba\xfb\x92\x06\xf4\xe2\x4b\x9a\x18\xd9\xc4\xf6\xf0\xc6\x4d\xe4\x21\xb2\x59\x78\x9d\xea\x27\xcc\x07\x8e\x26\xa9\xdf\xe2\x4e\x14\x39\x98\xc9\xb6\xa8\x76\xce\x45\x87\x78\x4f\xa1\x6f\xb9\x01\xd4\x9c\x38\x68\x3f\x26\xdd\x32\x4f\x9b\x47\x1e\x74\xed\x17\x31\x21\xef\xb9\xb4\x7f\xd1\xf3\x0b\x7d\xb1\x6c\x32\x84\xcb\xe6\x6c\xec\x63\xca\xbf\x83\x1d\x71\xea\x94\xff\x2b\x9a\x9c\xad\xc0\xf9\x16\xdc\xd1\x88\xe5\x91\x98\x78\xb2\x4c\x90\x94\x6c\xc2\x9d\xf6\x59\x73\x0e\x5b\x4e\x40\xfb\x8c\x07\xdd\x0b\xae\xb7\xb8\x7c\xa3\xe2\xa2\xe7\x5f\x70\x3c\xab\x10\x0c\x94\x3c\xfe\x4f\x3f\x3b\x94\x81\x32\xbe\x8d\xfd\x15\x33\x1d\xd5\x96\xbf\x35\xe2\x7c\xff\xb8\x66\xbe\xec\x74\x2a\xe9\x3b\xd1\xe4\xd5\x69\x95\x4b\xae\x63\xdd\x3d\xe9\x7b\x33\x14\x9c\xb5\x1e\x6d\xdd\xc1\xcc\x3c\x0e\x03\x07\xae\x59\xe0\x39\x3e\x43\x0b\x15\x5f\x45\x7e\x42\x23\x3a\xab\xf0\xb4\x1c\x5e\xa8\xf5\x25\xad\xd7\xfa\x40\x5c\x6b\xa2\x2e\x65\x06\x45\x27\x3c\xcf\x95\x53\x04\x00\x23\x08\x3f\x97\x25\xdf\xce\x90\x8a\x08\xbc\x73\x3c\x0e\x10\x34\x99\xd5\xe3\x5d\xa0\x6e\x92\x94\xfe\xcb\x19\x73\x4f\xf5\xe5\x68\x48\x45\x3c\x19\x30\xf6\x45\x71\xec\x78\x88\xeb\x4b\xda\x46\xb8\x70\x49\x7b\x76\x1c\x2c\x50\x7a\xe2\x25\xd6\xe9\x57\x34\xa6\x05\xa7\x3d\xa6\xff\x48\xeb\xdd\x35\x3a\x56\x5d\x5e\xc2\xab\xce\x9b\x55\xc3\x18\xca\x08\xdd\x22\x7c\x12\x8f\x1b\x48\xf6\x12\xfe\x19\x7a\xd6\xbc\x13\xba\x21\x76\x63\xb6\x5e\x54\xa6\xc6\x0c\xb5\x7c\xa6\xd7\xc0\xd3\xea\x7c\xfb\xc0\x46\xb2\xb3\x56\xb4\xe3\x5d\x33\x34\xb4\x76\x65\x4f\xd0\xd0\xb7\xf3\x60\x7a\x10\x9d\xfd\xaf\x1b\xcc\x7b\xab\xae\xaa\xac\x70\x21\x0c\x45\xa0\x49\x88\x66\x36\x0f\xcf\x27\x3a\xf4\xc3\x1b\xc8\x3f\x6b\x44\xb7\x16\x28\x3b\x03\x3c\x18\xda\x8e\xfd\x62\xf8\x4a\xfd\x4f\xb4\xc8\x05\xf7\x37\x34\x51\xd8\x70\xd5\x53\xd1\x27\x26\x6b\x93\x61\xb3\xe0\xf1\xb8\xbf\x84\x0e\xc8\x9b\x61\x58\x79\xdf\x0b\x02\x11\x43\xcc\xa1\x0b\x96\x1b\x82\xb9\xbe\x2f\x56\x0c\x05\x45\x90\x1d\x49\xd9\x7e\x9a\x28\x2c\x8a\x3a\x96\xa8\x52\xd6\x11\xc9\x50\x56\x60\xd2\xc5\xf7\x57\x34\x72\xad\x68\x19\xf4\x9c\xfe\x64\x1d\xac\xa7\x09\x1b\x87\x59\x54\x4b\x5b\x4e\x87\x34\xe8\x07\xa3\x1b\xbe\xfd\x45\xa2\x9e\x1e\xf2\x0a\x8d\x6c\x84\xf2\xf4\x8b\x97\xd7\xe2\xfd\x0b\x0d\x9b\x2a\xda\x56\xd1\x04\x80\x9b\xee\x4c\x63\xa4\xf1\xc0\x2a\x11\x7d\xeb\x6d\x1d\xe6\x7d\xc9\x4b\x00\x18\x85\x11\x09\x33\x10\xd6\xb8\x0a\xdf\x0f\x79\x22\x4c\x4c\xd1\x2f\x64\xc8\xf3\xc9\x46\x87\xa9\x2c\xfa\x51\xf3\xc0\x71\xf6\x83\xb5\x3d\x0a\xe6\x02\x77\x04\xb9\x93\x07\xe9\x92\x5c\xce\x64\xa9\x89\x45\x37\x49\x36\xf8\x9e\x43\x59\x89\x53\x9e\x23\x1c\xbe\x64\x7c\x18\x76\xa5\x85\x12\x61\x2f\x3b\x6a\x1e\x88\xb6\xb2\xb8\x44\xd8\xd0\x5a\x28\xb1\x1c\x50\x9f\x95\xf8\x60\x10\xc5\xe5\x8f\x4a\x84\x60\xee\xcb\x2d\xf1\xaf\x33\xe4\xee\x1a\xf1\x4e\xb2\x63\xe2\x88\x75\xd4\x76\xc3\x40\xff\x44\xc6\x3c\x96\xb8\x12\x87\x3d\x99\x83\x3f\x45\xb8\x21\xa6\x0a\xcd\x79\x8b\x00\x15\x44\x67\xb4\xd8\x95\x4d\x44\xe8\x28\x24\x0f\xe3\x5f\xd7\xc8\xd7\x34\xb2\x65\x96\xc7\x53\x09\xf4\xdf\xd3\xcc\x42\xf4\x47\x22\x39\x03\x0f\x73\xd2\x1d\xf4\x24\xa3\xa7\xa4\x54\x00\x1a\xb2\x27\xe2\x09\x1b\xa1\x57\xf2\x1c\xaf\xb8\x64\x58\x8b\x10\x2f\xd9\x2b\x33\x95\x2d\xc1\x14\xae\x1f\x71\x3e\xd1\x31\x22\xee\x4b\x93\xde\x4e\xdf\xda\x4c\x72\x97\xef\x81\xa4\xbf\x6b\xb3\x50\x58\x13\x3d\xf7\xea\xcd\xe4\x13\x99\x54\x6d\xe1\x67\x32\xad\xef\x7f\xdf\xd2\xaa\x0b\x4c\xec\x7f\x49\x25\xa2\x42\x17\xad\xaf\x49\x18\xe3\xb3\x3c\x1e\x57\x1f\xc7\x97\x2d\x70\xd5\x53\x34\x92\x68\xa9\x80\xb7\x1e\xdb\xa6\x5c\x1e\x52\x45\x38\xb5\x4a\x0d\x38\x90\xda\x04\x77\x38\x2d\x24\xa2\x6f\x16\x64\xfc\x22\x36\x51\xa2\x35\x20\x24\x1b\xd8\x06\xc8\x4b\xc9\xf5\xac\xe5\x0f\x02\x2c\x00\x7d\xf6\x78\xeb\x5d\x36\x10\x4a\x45\x41\x86\x0a\x63\xdd\x22\xd2\xbc\x40\x03\xec\x20\xfe\x00\x32\x1d\x1f\x2f\x8e\x9a\x07\xc0\xb3\xd4\xa8\x3a\x5e\x00\x31\x21\x79\xb0\x50\x36\x3b\xce\x40\x50\x97\xf3\x7e\x79\xb2\x18\x30\xcd\x19\xdf\xa6\xb3\x95\x67\x88\x9a\xaf\x7e\x25\x3e\x34\xfc\xae\x66\x7e\x32\x3a\x34\x54\xea\xdc\x70\x50\x61\x57\xd3\x8f\x09\xc3\xd2\x99\x70\x94\xba\x10\xce\xd1\x72\x0b\xc6\x61\x1e\x49\x44\xf1\x6e\x52\x0e\xb8\x26\x94\x61\xb2\xa1\x36\x45\x39\x66\x32\xfe\x94\xc9\xea\x32\xd5\x99\x93\x1a\x35\x47\x3c\xa6\x7e\xd4\x9c\x10\xc1\xe7\xcd\xe9\x1a\x27\x88\x63\xac\x3e\xb5\x65\x82\xa6\x61\x98\xd1\xa7\x98\x7d\x86\xc9\x3f\xc6\xe4\x5f\x63\xf2\xcf\x49\xd6\xf4\x2a\x42\x96\xcb\x71\xd5\xbf\xbc\xd5\x7c\x43\x22\xde\x99\x20\xb8\xc6\x3c\x92\xe6\x42\x9f\xc1\x6b\x92\x20\x74\x79\xc1\xcf\xb6\xcc\x7b\xae\x1d\x7a\x7e\xf2\x38\xfc\xe4\x16\xf2\xa4\x46\xe4\x3d\xfd\xa7\x35\xf3\x65\xc2\xe3\xa6\x30\x64\x9c\x10\x97\xd3\x40\x5d\xf8\xa2\xe8\xbd\xe5\x98\xe3\x38\x93\x38\xb6\xc8\x6d\x9b\xf3\x16\xfb\x43\xaf\xbf\x1c\xd0\x7e\x3b\xac\x63\x07\xa6\x22\x3e\xdb\xa3\xe6\xe4\x29\x11\x2f\x68\xc8\x78\x30\xe0\x73\x04\x54\x19\x61\x03\xf0\x3d\x2f\xec\x33\x7c\x0b\xc2\xc7\xca\xe0\xb7\xb3\x65\xc7\xe1\xad\x0e\x7d\x4a\xfb\x22\x41\xc2\xbe\x6d\xa0\xc2\x50\x13\x07\x61\xfb\x2d\xcd\xfc\xb8\x16\xd7\x56\x15\x3c\xcd\xe8\x66\x8f\x0e\x2c\xfa\x76\x48\x7b\x38\x5c\x53\x33\x5c\x9b\xbc\x19\x88\x80\xbd\xb6\x2b\x12\x1e\x9c\x60\x0d\x4f\xc7\x7a\x5b\xed\x45\x25\x1e\xdb\x67\x34\x42\x02\x9a\xf7\x69\x08\xa6\x8f\x8f\x6a\xe6\xcf\x29\x9f\x34\x2d\xef\x44\x1b\x41\x09\x82\xdf\x79\xe0\x3f\x03\x78\x2f\x13\x31\x4c\x2d\x49\xf6\x19\x0d\xf3\xd0\x8a\x01\xa6\x9f\x64\x79\x05\xed\xfc\x82\x78\x44\xfe\x4e\x23\x5b\x79\xf9\x53\x74\x56\xff\xb3\x66\x98\xf4\xd3\xd1\xe3\x82\x8c\xfa\xde\xaa\x2f\x16\xbc\x54\x19\x8f\x32\xda\x4a\x92\x07\x60\x43\x7c\x57\x6a\x17\x08\xff\xa9\xb6\x7d\x33\x79\x87\x26\xf4\xc0\x37\x69\xe6\x2b\xb4\xc4\x24\x97\x01\xa7\x7c\xb0\x9d\x83\x4e\xe8\x5a\xf3\xc9\x79\x0c\xd4\x94\x95\x19\x83\xff\xec\x20\x9d\x69\x4c\x0b\x30\x21\xf9\xfa\x37\x3a\xcc\x7b\xc5\x6f\x69\x5f\xb1\xe2\x74\x15\x22\x04\x95\x30\xe9\x4b\x11\x6f\x5d\xa0\x6e\x42\x1c\xfd\x54\x07\xf9\x09\x8d\x6c\x0d\xf3\xa5\x69\x2f\x7f\x81\x86\xfa\x13\xcd\x0c\xf6\xe9\x91\x49\xfe\xf8\x30\x54\x63\x9e\x92\x17\x64\xc4\x66\x30\xcb\x45\x71\xb1\xdc\x05\xcf\x59\xe0\x8e\x66\xa7\x47\x26\xc1\xfd\x28\x0b\xbf\xe6\x3c\xef\x02\xe7\x60\x2c\x51\x85\x87\xc1\xda\xb4\x81\x5e\xa4\x79\xfd\x35\x1a\xd9\xd1\x38\x5c\xc9\x45\x9a\x17\x2d\x79\xc1\x29\xc1\x2f\x82\x38\x8c\x62\x83\x05\x61\xe4\x39\x8e\xb7\xc8\xda\xa0\x64\xa8\x88\xa2\x4b\x67\x0d\x56\x84\xd2\x76\x25\xa6\x1b\x9b\xa5\xd6\x05\x9a\x25\x17\xc8\x66\x36\xba\x47\x68\xa8\x9f\x27\xd9\xc6\x94\x98\xd3\xa7\x27\x8f\xc8\x1e\x8a\xfe\xac\xa8\x83\x15\x28\x15\x89\xd0\x63\xa7\xf1\x59\xcf\x9f\xcf\x92\xbf\xcf\x24\xe0\x5b\x7f\xc6\xca\x4b\x15\x97\x1d\xaa\xd8\xba\xfc\x6c\xc6\x3c\x24\x7e\xc7\xc8\xad\x1a\x38\x11\x46\x1f\xd4\x7a\xa9\xb7\xc1\xf1\x8b\x53\x70\xcb\x01\x2d\x5c\xd2\xb6\x58\x25\xfb\x88\xef\x95\x4b\x97\x34\xb0\x40\xa5\x45\x4f\xfc\xba\x46\x26\x89\x7c\x50\x1f\x35\xf7\x0e\x4f\x8e\xc3\xef\x68\x8d\x00\xf0\xac\x68\x46\x02\x2a\x10\x5c\xdf\x68\xc5\x17\xd4\xf9\x9d\x13\x16\xaf\x21\xb3\x3f\xb2\x77\xa9\xf0\x64\xb3\x65\x40\x50\xc7\x21\xb3\x5f\xa5\x9a\x47\x21\xe2\x9a\x2a\xe3\xcf\x3b\xc8\x76\xa5\xa7\xa3\x2c\x39\xf2\x3c\x31\x3d\x0e\xce\xb1\x25\x9a\xd7\x3f\xd2\x61\x3e\xa2\xfc\x2d\x20\xa1\xea\x50\x95\xe9\x1e\x80\x8e\x6a\xe8\x0d\x78\x40\x10\x87\x1b\x56\x99\xea\xee\x15\xe8\x25\x6d\xb3\xb8\x9b\xe8\xfe\x5f\xc9\x90\x5f\xca\x90\xe8\x96\xfe\x54\xc6\xfc\x71\x4d\x96\x12\x28\x69\x9c\xd4\x96\x28\x35\x8e\x8a\x67\xa5\xee\x2f\x2b\x04\x45\x90\x27\xbb\x8b\x1a\x25\xf4\xa2\x80\x27\x93\x28\xbb\xb2\x99\x22\x77\x0c\x27\xa7\xe5\xbd\xf9\xc8\x9b\x55\xd5\x1f\x86\xc8\xbe\x3a\x64\xc8\x1a\x5d\xcb\x9b\xd7\xbc\xbb\x7c\x53\x2e\xf8\x6c\x0a\x10\xf2\xca\x2d\xe4\x60\xcb\x07\xc6\xe3\x76\x10\xea\xbf\xb9\xf9\x92\xb6\x11\xe2\xf3\x27\x86\xe4\xe7\x37\x11\x8b\xf0\xeb\xfa\x43\x6a\x0f\x1c\x23\xe3\xe4\x48\x9b\x62\x66\xa0\x55\xf8\x32\xac\xc2\xe3\x8a\x51\x78\x39\x4c\xeb\xf8\x30\xca\xc6\xff\x04\x0d\x2d\x64\x37\x5c\x26\xbb\xe1\x69\xed\x25\x8d\x2d\x9b\x0f\xe9\x67\x64\x38\x83\xc4\x32\xa9\x08\xfe\x53\x77\xb9\xa6\x06\x05\xfa\xd7\x9b\xc8\xf3\x6a\xc5\x38\xd3\xbf\x76\x93\xf9\xda\x96\xc3\xf6\x62\x04\xde\xb6\x47\xe0\x95\x8b\xf6\x92\x76\x3d\x57\x1b\x69\x81\x93\x33\x92\xe1\x1f\x9e\x4d\x26\xc8\xc6\x3c\xc4\x33\x1d\x8e\x8c\x29\x7d\xa7\xd9\xde\x2f\x53\xcf\xf0\x6c\x14\x4a\x6c\xab\x39\x2b\x30\xbc\x3c\x20\x57\x85\xa4\xe1\xe2\xc1\x18\x41\x9a\x30\x0f\x0e\x5f\x4e\x74\xb2\x67\x3c\x33\xe5\x4d\x1a\xd9\xc4\xd7\xb8\xfe\xaa\xe5\xc6\x12\x3c\xcd\x33\xc6\x45\x31\x7a\xe2\x10\x7b\xf1\x28\x65\x8d\x69\x39\x8b\x2c\x36\xa3\xfc\xd8\x6f\x00\xa6\x6c\x00\x7d\xc0\x86\x85\xf7\x72\x16\xe5\xf3\xe5\xb2\xcf\x7e\x4a\xe3\x66\xf9\xd8\xe2\xfb\xc4\x65\x04\x7e\x3d\x7a\x3a\xc2\xb6\xad\x50\xba\x60\x50\x1e\xa4\x8e\x93\x77\xa2\x55\x28\xf3\x12\xc9\x25\xba\x68\x05\xd0\xad\x7e\x81\x16\xb2\xc4\x8f\xa3\x5e\x17\x5b\xf0\x87\xdd\x7d\x6a\xf9\x41\xae\x49\x89\xe8\x4a\xf0\x69\x31\x53\xf5\x87\xcd\x13\x27\xdb\x19\x77\x5a\xed\xfd\x07\xd3\x22\x0c\x3e\x60\xee\x6e\x21\x8c\x60\x45\x7c\x95\x28\x36\xe4\x13\xcd\xaf\x53\x1e\x14\x72\x7a\x74\x05\x42\x41\x9e\x91\x31\xb3\x8f\x9b\x87\xce\x2e\x2b\x4e\xf6\x54\x65\x9c\x6c\xf5\x43\x2d\x81\x4c\xbf\xc0\x3c\x7e\xba\x8d\x01\x0d\xd5\x2a\x7e\x5a\x93\xd6\x80\x37\x6a\xe6\xcb\x35\xa0\x97\x05\x95\x72\xaa\xaf\xbe\xa0\xe2\x53\x85\xed\xcd\x11\x6b\x23\x61\x4e\x88\xad\x3e\x71\xfc\x0f\x29\xe6\x23\x73\x0b\xdf\x76\x2a\x63\x91\x5f\x3f\x9b\x0c\xdb\xfc\xd6\xcb\x58\xbd\x0f\xa5\xaf\x5e\x5a\x11\xf2\x58\x2e\x52\xa3\xfb\xb4\x00\xd0\x85\xb9\x08\xf2\x34\x95\x78\x66\x29\xd7\x60\x23\xc2\x36\xaf\x6c\x0f\x79\x9c\x54\xec\xe8\xfa\x43\x2d\x2c\xea\xbb\x4e\x57\xe4\x5c\x4a\x46\x54\xe5\x91\x5d\xf1\xe4\x74\x19\x27\xa7\xb2\x1a\x4c\x7b\xee\x32\x63\x69\xf3\xd9\xd1\x44\xe8\xec\xa7\xb5\x13\x8d\x0f\x09\xbd\x7a\x77\x7f\xed\x20\xa0\x49\x02\xe4\xa7\x36\x91\xac\x32\x9f\x5c\x1a\x2e\x7a\xfe\x05\x9e\xe7\x9c\x1f\xbc\xc7\xdd\xa2\x4f\x83\x00\xdc\xf7\x00\xdf\xf9\xb1\x4d\xe6\x68\xe5\xc5\x7a\x29\x49\xf8\x00\x5b\x51\x26\x22\x43\xbc\x9b\xb4\xc8\x7f\x7c\x23\x79\xb2\x03\x02\x16\x46\x19\x0e\xde\xde\x61\xbe\xb6\x43\xf1\x0a\x01\x5c\x4a\xea\x0e\xf5\xf2\x1b\x08\x91\x23\x22\xd7\x72\x3c\x06\x9c\x0f\x45\x7c\x2f\xc7\xf1\x16\x45\xf6\x26\x7b\x16\x56\x4c\x68\x98\xb3\x8e\xb5\xe0\xf9\x81\xa9\xa4\x08\x8d\x8a\x8d\xd9\xaf\xac\xd2\xb8\x36\x48\x48\x6f\x08\x0c\xbb\xcf\x58\xf2\xca\x30\xdb\xe1\x9c\x13\x97\x3c\x19\x27\xcc\x8d\x84\x18\xcf\xa8\x18\x85\x3f\x4e\x06\x4f\x16\x8d\x4c\x81\x5f\x39\x13\xa9\xe0\xcd\x5b\xb6\xdb\x5f\xf2\xe9\xac\x7d\x91\x16\xb8\x55\x24\xca\xde\x07\x46\xa5\x5d\xf7\xec\x54\x42\xdc\x32\xf1\xc2\x43\x00\x8b\xcd\xd0\xb4\xf2\xf3\x94\xad\x0c\x9b\x8f\x43\x7f\x5c\xb7\x59\x99\xe8\xcf\x9e\x9f\x2f\x87\x95\xf1\x69\xc9\xab\x33\x84\xc4\x69\x80\xf5\xef\x6b\xe4\x58\x4d\x01\xd5\x60\x42\xc5\x9d\x13\xcb\xae\xf7\x6b\x4a\x97\x09\xe4\xce\xbd\xc0\x7d\xc6\xf2\xe5\x20\xf4\xe6\x63\xe9\x23\x00\x5d\x38\xb6\x15\xb8\x45\xd5\x72\x2a\xb2\xf7\x2a\x6c\xd6\x64\x1f\xc3\x89\x44\xbc\x62\x57\x4d\x25\x25\x19\x31\xd8\x1d\x0d\x7a\x31\xf4\x2d\x25\xff\x71\x96\xfc\xdd\x96\x44\x98\xc0\x54\x32\x4b\x1c\x68\x56\xff\x95\x2d\xe6\x48\xf5\xe5\xca\x1c\x3b\xa9\x24\x62\x85\xdc\x92\x5c\x37\xdf\xda\x4c\xbe\x9a\x21\x1b\xd9\x21\x6c\x49\xff\x42\xc6\xfc\x54\x06\x7e\x26\x48\x2c\x52\xfc\x47\x22\x97\x27\xbe\x93\x21\x9e\xa3\x9c\x81\xa1\x6f\xcd\xce\xda\xf9\x3e\xc3\xca\xb3\x6d\x4b\x68\x16\x8b\x73\x56\x48\xd9\x96\x15\x87\x1a\x00\xb3\x6e\xb4\x1d\xc8\x76\x19\xc3\xa0\xdb\x08\x6a\x97\xac\xdf\x72\x8d\x32\xb7\xb2\x47\x01\x0c\x92\x01\xc5\xab\x4e\xf0\x32\x3e\x1f\x6f\x75\xe2\x65\x03\x34\x5f\xab\xb0\xc4\xd7\x5e\x32\x72\x3a\x57\x15\x80\xbe\xc7\xba\x20\x02\x04\xa0\xf1\x33\xd4\x30\x43\xbf\x4c\x4d\x3e\x1d\x20\x53\x1d\xa7\x51\x47\xed\x17\x2c\xbd\xc8\x3a\xf9\xe7\x19\xb2\x19\x82\x71\xb8\x45\xfd\x0f\x32\xe6\x67\x32\xe2\x0f\x58\x16\x6a\x4a\x4a\x5e\x99\x48\x85\xcd\x5d\x98\xc2\x28\xe9\x5c\x32\x0b\x1b\xe8\x2f\x4a\xcd\xfc\x8b\xbc\x59\xa5\x09\x7c\x62\xc6\x43\x9f\xcc\x13\xce\x64\x9f\x2f\xc9\x94\xa2\xe2\x78\x1f\xe5\xde\x53\x4a\x0d\x00\x5c\xbb\xb6\xd3\x57\xdd\xc9\x05\x0a\x5b\x72\x14\xf2\x98\x95\xa4\x90\x14\x2a\xf2\xa8\xcc\x50\x99\x27\x5d\x66\x85\x8d\x26\xf2\xb4\x63\xe7\xe9\xe9\xb8\xce\x98\x26\xaf\x66\x5f\x4f\xf6\xed\x47\x32\xe4\x1a\xa5\x99\xfa\xbb\x33\xe6\x9b\x32\x6a\xcf\x34\x9a\xbd\x89\x6f\x6c\x76\xda\x8d\x2c\x6b\xa2\x85\x1e\xb8\xb9\xc7\x91\x1a\xd5\xfa\x21\x99\xab\xda\x86\x15\xee\xb0\x7f\xda\x42\x6a\x91\xc3\x44\xac\xfa\x55\x93\x36\xb8\x2e\x70\x5d\xac\x99\x75\x81\x5b\xdf\x4a\x6c\x7d\xbf\x90\x21\xb7\xa8\x2a\x9c\x57\xe0\x67\xcc\x05\xea\xcf\x51\xab\xa0\xbf\x39\x63\x4e\x45\x7f\xb0\xb3\x62\x19\xce\x4e\x95\x92\x45\xea\x68\x5e\xf4\xa8\x15\x04\x5e\xde\xb6\xe2\x4c\x90\x3c\xba\x95\x48\xa7\x99\xf4\x0a\xd0\xc8\x07\x35\xb2\xa5\xe4\x15\x0e\x33\x2d\x57\xff\x5f\x9a\x79\x72\x52\xfc\x51\x59\xd1\xac\xb8\xb8\xbc\xea\xda\x1b\x5b\xe8\xab\x5b\xc8\xee\x7a\xde\x2a\x5c\x4a\x0b\xdf\x94\x05\x9b\x2e\x0a\x27\x8a\x77\x6e\x31\x47\xab\xae\x56\x3b\xac\x80\x84\x04\x8f\x8e\x0a\x1a\x50\xe4\xc1\x92\x4c\xc4\xb7\x99\xbc\x65\x23\xd9\x6a\x95\x0b\x36\x44\x25\xd4\x5f\xb5\xd1\xfc\xfb\x0d\xc3\xd1\x9f\x3c\xb7\xb7\xf8\x4b\xc8\xec\x59\x9b\xcd\xc2\xfc\x9c\x17\x50\x37\x3a\xed\x28\x55\x45\x81\xa8\x2d\x81\x05\xb2\x89\xe7\x08\x73\xcc\x8c\x27\x16\xb2\xf2\x1d\xdc\x9a\x03\x7e\x26\xc6\xb0\xab\xd4\x01\xfa\xbc\xbb\x94\xb8\xc2\xe1\x24\x98\xfa\x41\xcc\xe3\xab\x28\x11\x0e\x99\x56\xfc\x09\x22\xcf\x39\xd4\xd1\x15\xc4\x77\xd8\x52\x14\x68\x42\x75\x21\x90\x7f\x80\x1f\x11\x69\x18\x87\x20\xce\xc6\xe5\x72\x51\x25\x96\x4f\x14\xcc\x55\x66\xde\x8e\xbf\x3c\xa5\xfb\xd4\xcc\xd4\x32\x07\x25\x07\xa2\x2a\xcb\x07\x9e\xb8\xc8\xba\x2d\xa4\xa8\xda\xca\x40\x66\x26\x95\xd5\x00\x8d\x9c\xf3\x10\x12\x8f\xf2\x0a\x41\xd2\xf0\x34\x28\x15\x35\x8a\x0a\xb9\x61\x4e\xde\x93\xe3\xca\x33\x1d\x72\xe9\xd0\xa7\xcc\x32\x3b\xe0\x1f\x6f\x58\x45\xcb\x76\x45\x46\x6f\x59\xa6\xe8\x58\x25\x92\x30\xeb\x57\x91\x1f\xb5\x0e\x51\xb2\x48\xae\x4b\xd4\xad\x9f\x31\x8f\x0e\x27\x1b\x53\x29\xba\xa3\x06\x41\xde\x90\x8a\xf5\x6c\x19\x5c\x3a\x02\xcf\x2f\x21\xc0\x8e\x92\x8d\xd4\xf7\x3d\x5f\x3f\x64\xee\x1a\x63\x3f\x6a\x17\x0c\xd8\xa6\xdb\x05\xf8\x52\x7e\x8e\xe6\x2f\x24\x69\x2f\x2f\x15\xc4\xb7\x90\xec\xab\x79\xc4\xad\xb1\xd8\x1f\x0c\xa8\x3f\xee\xce\x7a\xe6\x3e\x95\x26\x17\x5d\xad\xfa\x1a\x60\x38\x72\x10\x45\x3a\x74\x7d\xef\xda\x7a\xc4\x8c\x66\x48\xfc\x1f\xbb\xd6\x7c\xac\xf1\x63\x32\x73\xb3\xc8\x61\xae\x68\x82\xb5\x2d\xc9\x02\xf4\xab\xeb\xd5\xfa\xf1\x6b\xc8\x6c\x6c\x34\x7c\xa1\x79\x32\xe2\x99\xc7\x91\x9d\x8c\x36\x31\xd1\x8f\x4b\xec\x39\x67\xde\x13\xa5\x1e\x8d\x77\x62\x63\x79\xe4\xf4\xb7\xc5\xe4\xf4\xd7\x69\xe6\x0f\xd7\x70\x61\x5d\x25\x6e\xba\xda\xb0\x77\x6c\x14\x38\xfe\x9b\x36\x9a\xaf\xd8\xd8\x2c\xc7\x9c\xc9\xed\x26\xf8\xe5\x84\x0c\xbb\xca\x73\xca\x54\x90\xfe\x11\x0b\xb6\x05\xb5\x0d\xc8\xac\x38\x41\x79\x26\xda\x1a\xfb\x54\xf7\x64\xb5\x7f\xd5\xb4\x3f\x3c\x5e\x86\xd4\xdf\x45\x96\x9f\x08\x61\xb3\x8b\x2e\xf5\x59\x43\x64\x1b\xdb\xdb\x8a\x02\x14\xaa\xb6\x41\xf4\x7c\xed\x76\x88\xde\xa9\xdb\x0e\x31\x41\xea\xb5\x82\x17\x69\x08\xe3\x4d\xe8\xf1\x1a\xf9\xd0\xc5\x6b\x8c\x55\xa9\x76\x14\xef\x84\xca\xb1\x9c\x2f\x87\x3c\xc8\x11\xbd\x98\x77\xca\x81\xbd\x40\x9b\x9f\x6d\x71\xe2\x2c\x9e\x6b\xbd\x60\x78\xb0\xc3\x14\xc0\x98\x48\x4e\x45\x8c\xd0\x0a\x39\xc0\xd9\x12\xd2\x59\xc6\x71\xbc\xc5\xc8\xab\x4a\x4c\x4d\xf4\x0b\x6a\xd1\x2f\xe8\xf5\x5a\x95\x63\xd0\x8b\x5b\xef\xb3\x43\xc9\xa2\x2a\xdd\x88\x1b\x3b\x0a\xfd\xf5\xd6\x54\x0f\xcb\xe3\xf6\xbc\x1d\x4e\xb1\x0f\x04\xca\xdf\xa7\xb6\x9a\xfd\xc9\x4b\x49\x96\x65\x7c\xcf\x00\x02\x60\x36\x95\x21\xf8\x4e\x4c\xf4\x74\xd9\xbc\x84\x2f\x68\x11\xc7\xf2\x37\x34\xf3\xe5\xda\x38\xfb\x59\x73\x2c\x78\xa7\xa4\xb7\xb4\x5e\x24\x83\x04\xa4\x3e\xc0\xfd\x7a\xfa\x65\x73\xfa\xe3\x50\x4a\x03\xaa\x02\x38\x40\xfa\x9b\x20\xa6\xc7\xcd\x43\x8b\xe4\x65\x58\x24\xff\x50\xe5\x51\xfd\xf6\x72\x7c\xca\xab\xd9\x9c\xe6\x2b\x63\x16\x15\xcc\xa2\x55\x63\x50\x29\x1f\xf7\xb4\x76\xa6\xb1\xb1\x73\xb7\x3e\x98\x96\xe0\x29\x29\x9a\x2a\xf3\x3b\xbd\xaf\x8f\xdc\xa3\x6a\xf0\x85\x79\x3b\x60\x77\x7d\x5a\xb4\x81\x4a\x2d\x62\x4c\x9c\x11\x19\xed\xdc\xe2\x59\x3a\x33\xe7\x79\x17\xf4\x7f\xdc\x61\x9e\xaf\xba\xaa\xea\xcf\x3c\x10\x2b\x94\x66\x2c\x8a\xdb\xd1\x69\x35\x19\x39\x59\xb2\xe1\x02\xc8\x68\x1a\xe7\xff\x11\xce\x05\x97\xb4\x6b\xf9\x2c\x1c\x81\xb5\x77\x49\xbb\x26\xb0\x0b\x74\x6c\x76\x96\x2d\xe0\x4b\xda\x4d\xb2\x1e\x7e\x16\x14\x02\x33\x29\x5f\x3f\xdd\x4b\xfe\xad\x83\xa8\x2f\xea\xff\xa7\xc3\xfc\x5a\xc7\x74\x7c\x81\x2b\xbf\x01\xdb\x04\x85\x63\x9a\x1d\xc8\x96\xcf\x59\x81\xc1\x5e\x36\x28\x7f\x38\x6b\x0c\xe7\x99\x40\x00\x55\x5d\xe6\x5f\xa2\x43\xc6\x49\xcf\xa5\x7d\xf0\xef\x29\x77\xd4\x5f\x9a\x2a\xbb\x46\xb7\x28\x24\x0e\x9a\xc6\xd4\x25\x71\x3c\x82\x55\x68\x39\x81\x27\xec\x9b\x4b\xc6\xb4\xc7\x36\x28\x3f\x52\x9f\x7b\xb2\xc6\xd9\xe8\x7d\x38\x0b\xa8\xcd\x30\x4e\x3c\x38\x7d\x3a\xb6\xa0\x02\xe8\xcb\x44\x95\xed\xd8\xc2\x3d\x0a\x00\xbb\x3e\x23\xb0\x41\xb7\x91\x2a\x20\xab\x15\x34\x9f\xc7\x79\x80\x7e\x88\xa0\xca\x99\x2c\x46\x10\xd2\x52\x74\x70\x8f\xc7\x30\x3f\x67\x41\xb8\xd3\x82\x50\xe2\x94\x46\xb0\xee\xa2\xb3\x6c\x39\xb8\x34\x8e\xef\xed\x16\x3c\x97\x66\x8d\xa9\x48\x62\xc8\xd3\x5d\x81\x77\x8b\x15\xf2\x65\x41\x65\x42\x2a\xab\x1c\x7a\xfd\xb2\x49\xdc\x12\xb9\x64\xcc\x5b\x61\x9e\x1d\x72\xa3\xa1\x90\x9d\x10\x8d\xdb\xc1\x83\x51\x5f\x81\x73\xa5\x37\x9f\xb4\xd2\xbe\x2b\x43\xae\x67\x9b\xbe\x57\x0e\xa7\x81\xd8\x15\xe8\xaf\xcd\x44\x24\xd0\xef\x69\xa7\x13\xb7\x2a\x1c\x6e\xc4\x7b\xe2\x9c\x14\xcf\x87\xac\x31\x3c\x1b\x52\x3f\xf1\x4c\xc9\x0a\x02\x1a\x70\x9d\x27\x6a\x6b\x1e\x02\xf6\x46\xf9\xb6\x8a\xae\xe7\x73\x15\x8a\x3d\x33\x3c\x39\xae\xdc\x87\x94\x38\x3c\xa0\xa8\x20\xec\xb2\x2b\x6c\x38\x4a\x10\x3c\x34\x6b\x9c\x56\xea\x12\xe1\x87\x45\x6c\xcd\x28\x31\xc2\x20\x0c\xcf\xee\x9d\x82\xc0\x16\x48\x67\x5e\x36\x24\x83\xf1\xe5\x04\x99\xf5\x6d\x1b\x48\xad\x15\xa4\x3f\xdd\x61\x7e\xb3\x63\x38\xfd\x26\x47\xb0\x0c\xcf\x2f\x50\xf6\x51\xd1\x16\x5b\x02\x7a\x02\xbb\x72\xbe\xe2\xc5\xf3\x91\xe8\xe1\x5d\x1b\x89\x0c\x0a\x39\x22\xd8\x82\x92\xd8\x09\xef\x91\xd0\x87\x40\x02\xe5\x80\x0a\xf2\x87\x78\x3d\xe1\x75\xc2\x29\x47\xb6\x74\x06\xe3\xd9\xdb\x5c\xc5\x93\x4b\x56\x1a\xf3\x07\x6c\xc1\x2e\x81\x12\x12\x81\x49\xe3\x36\xf4\xa9\x59\x3c\xe3\x11\x92\x13\x21\x52\xaa\xc6\xd5\x64\x5d\x85\x78\xe0\x13\x96\x77\xc5\xcb\x8d\x9f\x17\xe2\x56\xb1\x11\x93\x16\xf6\xc8\x05\xda\x72\x97\xe2\x27\xf8\xcc\x16\x2a\x19\x6b\xe0\xb4\x68\x20\x9b\x3c\x52\x57\x8b\x57\x47\xd4\x54\x88\x5f\x49\xd9\xce\xcd\xd9\x4f\x5e\xda\xac\xaa\x83\x52\x7d\x48\x23\xd7\x89\xa7\xa3\xf8\xb5\x9a\xf9\x7a\xed\xb0\x7a\x49\x6a\xa5\x73\xde\x62\x52\x11\x04\xdc\x49\x51\x2a\x62\x41\x22\xd5\x0b\x76\x80\x8b\x42\x5c\xf6\xc7\x3d\x23\x85\xa9\x31\x0e\x0b\x86\xad\x17\x56\x69\xd2\x35\x1d\xae\xa8\xcd\xfd\x7b\x4d\x78\x77\xfd\xa5\x66\x7e\x59\x3b\x5d\x41\x8d\xa9\xda\x8b\xb2\x06\x30\x34\xe3\xf3\xf7\x6c\x99\x9d\x23\x5f\x54\xb6\x1c\x98\x24\x9c\x24\xd2\x67\xd8\xf3\x56\x91\x8a\xce\x4a\xe8\x85\x7d\x02\x4c\x34\x95\x27\xcc\x4a\x97\x32\x65\x64\xb8\xde\x9c\x28\x22\xed\x69\xcf\x2f\x5a\xae\x88\xe9\xc3\x85\xa7\xed\x57\x9c\x2f\xbf\xbd\x91\x5c\xcf\xa7\x5f\x94\x83\x51\xff\xd3\x8d\xb5\xc3\xa5\xd6\xd3\x72\xac\x19\xea\x44\x85\x98\x3f\xb1\xf1\x54\xa2\x54\xa3\x40\xf3\x40\x65\x92\x3b\xa2\x07\x61\x41\xd5\xe9\x26\x25\x96\xe0\x8d\x08\x75\x93\x6d\x97\x20\xb7\x99\x66\xca\x03\xcd\x64\x8d\x64\x9b\xc1\x41\x97\x0d\x36\x67\xfe\x0b\x7c\x55\x82\xe8\x9e\x23\x68\x7f\xd0\x6d\x2e\x5d\x3c\xa5\x90\xf8\x16\x63\xa3\x67\x1c\x99\x35\xd1\xcf\xfc\x20\xc7\x76\x09\x61\xe0\xe2\xbb\x88\x3d\x6b\x50\x1b\xbe\x45\x34\x14\x2e\x47\xf9\x8a\x44\xcb\xc0\x8a\x55\x76\x9c\xe8\x99\xee\xb8\x2d\x91\x4b\x87\x15\xc0\x80\xf1\xfd\x1c\x1c\x21\xe2\x16\x56\x3c\x53\x80\x10\xcb\x3d\xe2\xe0\xac\x52\x11\x05\x00\x01\x34\x29\x11\x8c\xa7\xdb\xb1\x2f\xb0\x6d\x7a\x94\x96\x1c\x6f\x89\x6d\xe9\x53\x9e\xe3\xcc\x58\xf9\x0b\xf0\xbe\x31\xe9\x15\x26\x7d\xef\xe2\x52\x14\xcb\x87\x17\xd7\x13\x99\x1c\x53\x3e\x39\x6b\x3c\x18\x50\x75\x68\x64\xa2\x45\x70\x79\xb5\x13\x73\x54\x30\x81\xfa\x6d\xb7\x4f\x26\x99\xa7\x6e\x01\x90\x65\x7e\x5e\x08\x2e\xd8\xa5\xf4\x05\xc5\xc4\x67\xe4\x38\x1f\xca\xf0\x42\x89\x2d\x08\x0e\x17\x80\xce\x27\x26\x5e\x94\x97\x32\x1a\x0b\xba\x40\xfd\xa5\x70\x0e\xc8\xfa\xdf\xde\x40\x36\xfa\x65\x87\x06\xfa\x5f\x6d\x30\x7f\x6c\xc3\x14\xfb\xa9\x68\x99\x90\xa9\x4a\xd1\x1f\x59\x73\x78\xa2\x50\xa1\x60\x0e\x28\x48\x54\x50\xb1\x35\x43\xe0\x72\x9e\xda\xff\x74\xfa\x8d\x38\x03\xa9\xcd\xa7\xb8\x1d\xcf\x98\x73\x96\xbb\x74\xce\x60\x0d\xca\x1a\x47\xbd\x45\x0a\x22\xd9\x16\x1b\x22\x38\xe7\xfa\x9c\x66\x1a\x6b\xc9\x72\x43\x94\x0a\x1d\x9b\xaa\x27\xca\x61\x8d\xbb\x20\x3c\x4b\xe5\xb8\x4f\xa3\x04\xd3\x10\x5a\x9e\x1b\x43\x79\xd7\xa9\x58\x16\x30\x22\x22\xc8\x87\x29\x4b\xec\x43\x38\x71\x3d\xa4\xce\x92\x51\xb0\x03\x6b\xc6\x89\x8a\xe4\xf1\xb7\xfb\x2e\xa3\x95\x16\xe8\x7d\x6c\xdb\xce\xc7\x0e\xaa\x72\x76\xc8\x03\x23\xdb\x39\xab\x0e\x0c\x23\x89\x6d\x52\xad\x27\xf5\x81\xe8\xd0\xae\xee\x5a\x47\xc9\x61\x32\x5a\xdb\x26\x51\xeb\x44\xc3\x06\xee\xac\x1d\xce\x9d\x92\x93\x87\xfc\xb4\x46\x12\x07\x0d\xfd\x35\x1a\x19\x5b\x7e\xc9\x51\xcb\x95\x92\xcc\x43\xea\x5f\x89\x4d\x33\xf4\x0c\x71\x54\x84\xc4\xee\x52\x49\xe6\x7b\x54\x24\xff\xc9\xbb\x36\x93\x6b\x60\xe6\x89\xad\xf8\xf5\x9b\xcd\xff\xde\xa4\x5c\x48\x96\x39\x47\x0d\x13\x56\x8d\xc9\x75\x1c\x9e\x00\x41\x95\x83\x6e\xde\x9b\xe7\x4e\xce\x7c\x78\xb2\xc6\x70\xf5\xfe\x6b\x8e\x5d\xb4\xf2\x21\x07\xe1\xc7\x5e\x54\xb6\x17\x2c\x87\xba\xa1\x99\x25\xa4\xdf\x80\x5b\x43\x52\x39\x8f\xce\x15\x91\x50\xb1\x43\x83\xb2\x27\x9c\x25\xb9\x60\x2c\x45\x03\xf3\x61\xd9\x24\x58\xa3\x36\x13\x95\x91\xd4\x0b\xe2\xac\x16\x05\xfe\x06\x3b\x32\x59\xa5\x52\x30\xb0\x30\xd8\x17\xfd\x80\x03\x54\x9f\x48\xbe\x14\x52\x17\xf4\xa5\xf8\xfa\x4c\x39\x94\xdd\xc0\xdb\x15\x25\x02\x3a\x1f\x39\xa7\x07\x43\x2f\x34\x59\x61\xe6\xa3\xac\xd0\x08\x7b\x63\x57\x17\x06\xd9\x35\x29\x3a\x86\x8c\x17\x9a\x4a\xf3\xcc\x47\xcf\xf7\x29\x5f\x1d\x7a\x89\x26\x19\xf0\x5d\x95\x0d\x12\xbb\x96\x58\xa7\x29\x3b\x17\xef\x57\xd9\xcf\xd5\x9d\x6b\xcf\x46\xfd\xc1\x7d\x0f\x05\xce\xe3\x70\xd5\xd3\x76\xa1\x5b\x83\x3e\xa0\xb7\xf3\x0e\xe3\x10\x2f\x68\x8d\xdc\xe7\xde\xf3\x23\xc5\x72\xc5\xbb\x1f\x2c\x2a\x6b\xae\xfb\x79\xa4\xb2\x05\xea\x8b\xb4\x74\xe2\x45\x6e\x08\x49\x1d\x93\x44\x40\x24\x65\x19\xa8\x2a\xd9\x37\xb6\x92\x67\x31\x2d\x2e\x28\x59\x79\x99\x19\x5b\xff\x9d\xad\x6d\xd0\xca\x5e\xb1\xf5\x64\x65\xc1\xcd\x28\x66\x20\x4e\xa3\x3d\x5f\x6a\x69\x31\xb6\xc1\xb5\x4e\x28\x56\x1c\x6a\xac\xb0\xbe\x52\x34\x9e\xd0\xf0\xec\x30\xa0\xce\x2c\x07\x25\x65\x49\xfc\xf0\x2b\x15\x3f\x9e\x48\x67\xd6\xf3\xe7\x79\xed\xe2\xcc\x24\x21\xb4\x48\x45\xa8\x28\x39\x90\xf3\x36\xda\xef\x82\xbc\x57\x52\xa8\x2d\x7d\x4c\xbc\xf0\x2d\x87\xe9\x23\x41\xc5\x80\x25\xe6\x75\xad\xae\x89\x7c\xbb\x58\x27\x7a\x81\xda\x1b\x42\x97\xaa\xb4\x4b\x9b\x7e\xd9\x85\x00\x87\x26\xd3\xea\xcc\x9d\x5c\x2c\x0e\x9a\x07\x0c\x20\xbc\xf3\xf4\xb6\xb4\x32\x99\x75\x20\x22\x89\x04\x43\x86\x59\x35\x41\xcc\x21\xe3\x25\xc4\x30\x4c\xe8\xb0\xb1\x8b\x25\x9f\xc2\x86\x12\x98\x43\xc6\x0b\x89\x61\x18\x70\x97\xfd\x67\x5e\xa0\x4b\xe6\x90\xd2\x84\xbe\xe8\x46\x94\xcb\x9a\xdd\x85\x2c\xd6\xf1\x2d\x2e\xc8\x65\x59\x70\x6d\xa7\xbc\x6d\xb0\xb6\x8b\xdf\x8f\xc2\xff\x5f\x46\xd8\xaf\x97\x11\x32\x3e\x0b\x7e\x63\xd4\x2a\xf0\x2f\xb3\xf8\xaa\x80\x35\xbc\xfc\xbe\x4c\xb3\xef\x9b\xd4\x5d\xb0\x7d\xcf\x65\x8b\x99\x77\x67\xc9\xf7\x0a\xbc\x47\x83\x10\x78\x65\xe6\x81\xd5\xeb\x56\xb5\x35\xa9\x3d\xdb\xa8\x5b\xa1\xf5\x4a\xcf\x46\xdf\x90\xde\xbf\xd3\x94\x36\x65\x6a\x60\x7a\xdc\x82\x4d\x17\x07\x84\xe3\x40\x3f\xeb\xbf\x7e\xd1\xd5\x03\xe2\xc4\x20\x5d\x13\xa3\x10\x42\xac\x3b\x79\x30\xca\xa8\xbb\x82\x58\x88\xb5\xa8\x85\xff\xdf\x2d\x89\xf0\x32\x33\x70\xb0\x58\x18\xcc\x8e\xf8\x9e\x3b\xe1\xcd\x80\x1d\xec\x8b\x5b\xcc\x1e\xe5\xef\xd4\x3c\x13\xbe\xe7\x1a\x8f\x7b\x33\x35\x6c\x60\xbf\xb0\x19\x6d\x60\x97\x6b\x03\xcb\x47\x26\xb0\x87\xcd\xdb\xed\xc8\x00\x26\xd1\x32\x6f\xd6\x10\x43\x94\xd4\xa1\xfb\xc9\x0e\xd2\x53\x53\xd3\xad\x1c\x6e\x34\x4e\x5d\x86\x71\xea\x8b\xaa\x71\xea\x33\x97\x69\x9c\xfa\x91\x2b\x64\x9b\x8a\x2a\x5a\xb6\x61\x0a\xa6\x52\x1c\x9c\x23\x16\x17\x95\x86\xa9\x5f\xd9\x48\x76\x29\x53\x70\xd6\xf1\x16\x85\x4b\x92\xe4\x95\x1d\x76\xbc\xc5\x69\x58\xac\x31\xcd\xeb\xe5\x1b\xcd\xa1\x94\xeb\x0a\x62\xa0\x10\x40\x38\x4b\x3e\x7e\x3c\x49\x1e\xfd\xe5\x0d\xe4\x23\xe9\xe1\xe0\xdf\x7e\x19\xfe\xb2\x8f\x9c\xaf\x2e\xf0\xbc\x5c\xa3\x97\x1b\x2b\xbe\x18\x73\xcc\x1e\x31\x4f\x9d\x17\xbf\xcf\xf3\xd5\x39\xd7\xc6\x44\x0a\x8e\x24\x99\xcd\x98\x0f\x9e\xe7\x3f\x45\x35\xe5\xf6\xa7\x55\x98\x95\x1c\x34\xf6\x55\xfc\xe7\xf9\xcb\xcb\xaa\x90\x0a\xac\x8e\x08\x4a\xd9\x01\x33\x7b\x9e\xfd\x38\xdf\x20\x09\x42\x6a\x21\x3f\xba\x91\xf4\x28\x33\x57\x40\xc7\xd1\xa4\x15\xa7\xec\x28\xd3\xa1\xfe\xb5\x0d\xe6\x9b\xb4\x8a\x8b\x89\x6d\x0e\x52\x51\x45\x99\x1e\xe3\xb9\xcb\x61\x50\x61\x58\x8a\x5c\x09\xc0\x88\xc7\x2b\x84\x3e\xe6\x29\xd7\xc1\x84\x92\x35\xc6\xa5\x03\x9f\x12\x45\x92\x7b\x4f\x0a\x8b\x1d\xaf\xb6\x82\x80\xfe\x89\x0e\xf2\x61\x70\x73\x0c\xe7\x26\xe1\x61\xfd\xdd\x19\xf3\xcd\x99\xf8\xef\x44\x54\xca\xa8\xbc\x88\x5b\xaa\xb6\x1e\xac\x55\x02\x93\x1c\x0f\x63\xa3\x47\x14\x2d\xf5\x7c\xef\xf9\xac\x71\xda\xb7\x6c\xc0\xa6\x02\xc7\x0a\xe6\x04\x0e\x11\xfa\xf6\xfc\x7c\x94\xd1\x77\x41\x62\x48\x55\xd5\x0a\x36\x2c\x54\xca\xae\x67\x09\x19\x13\x5a\xd1\x90\x71\x7e\x60\xd6\xf3\xce\x8b\x53\x20\x18\x1b\xc4\xa5\x3e\xfe\xff\x81\xf3\xb0\xa9\xf3\xdf\x33\x96\x7f\x3e\xf9\x06\x1c\x09\xe2\xb7\x0a\xe7\x99\xb2\x7a\x1e\x82\x6c\xb2\xa7\xd4\x19\xf0\x84\x1a\xa0\x74\xd1\xb4\xa1\xd9\x8a\x7b\x4e\x1f\x1f\x38\x5e\x9a\xd2\x43\x0a\x64\x1e\x7d\x99\xe8\xe2\x08\x55\xb1\x1c\x47\xf6\x26\x84\x24\x85\xee\x89\xea\x4a\x72\x7f\xbf\xb5\x95\x0c\xd6\x9e\x87\x93\x5e\x61\xd4\x0e\xfc\x32\xe0\xc8\xb9\x72\xa1\x48\x43\xf0\x69\xfe\xf0\x56\x33\x57\xe3\x1e\x5f\xde\x15\xc1\x6d\x00\x92\xae\x7c\xba\x22\xc6\xf8\x16\xf2\xdd\x0c\xb9\x7e\xde\xba\xf8\xa0\x6b\x2d\x58\xb6\xc3\x26\xab\xfe\xcd\x4c\xb3\xf1\x6d\xca\xa1\xed\x64\x6d\x37\x0c\x42\x3f\x3b\xee\x86\xa7\xfc\x69\xe8\x66\xf3\x9d\x99\x61\xd7\xa0\x0b\x76\x5e\x52\x11\x05\x82\xc5\x3a\x2a\xe4\xee\x25\x66\xb2\x56\x93\x2f\x09\xae\x18\xf3\x05\x63\x46\x5a\xb2\x29\x22\xd8\xc9\x87\x0d\x4b\x9a\x78\xa3\x5a\xfa\x0c\x3b\x4b\xb3\x1c\x5f\xb1\x5d\xc3\x9a\x09\x54\x1e\x38\x3c\x45\x0b\x3c\xa5\x50\xe2\x10\x0a\x34\x42\xcb\x95\x90\x70\x34\x8e\x6e\x68\xf9\x4b\xb2\xf4\x00\x20\x74\xce\x02\x60\x93\x60\x67\xec\x9e\x6b\xa5\x30\x1d\x25\xda\xce\x8f\xa7\xf3\xb6\x3b\x2c\xbf\x33\x4b\x7e\x37\x43\xae\x55\x2f\xe9\x9f\xb8\xec\x0e\xff\x57\xad\x6e\x87\x3b\xd4\x82\x1e\x57\xdb\x51\xb7\xbf\xf9\xd9\x2e\x8c\x8c\xff\xcd\xf7\x3b\xd8\x1f\xea\xf4\xbd\x31\xed\x81\xfc\x13\xfd\x0f\x07\xc9\xe5\x77\xbf\x39\xb8\x73\xe7\xdd\x66\x96\x5c\xea\x20\x5b\xa2\x46\xeb\x6f\xe8\x68\x03\xa4\xf3\xae\x0c\xfc\x6d\xbc\xa8\x4c\xfd\x25\xf0\xd3\xe1\xfd\xc4\x4f\xcf\x71\x7b\x80\xe1\x0a\x0c\x33\xc9\xc7\x2d\xc8\xa5\x66\xcc\xf0\xb5\x16\x19\xab\xe4\xe1\x98\xff\x60\xb2\x95\xe7\xdc\x30\x86\xa5\x4f\x44\xf4\x48\xf7\x4b\x5e\xd6\x23\x48\x27\xc9\x87\xa3\x43\x20\xf7\xda\x17\x6a\x77\x10\x29\x99\x6a\x42\x30\xfe\x22\xb8\x11\x39\x8e\xa8\x68\xdc\x15\xbb\x0f\x47\x09\xab\xaa\x15\x87\x79\x07\xac\x7a\xe2\xad\x68\x3c\xe5\x29\x3e\xfb\xb4\xd6\x53\x3f\xb4\x23\xd1\xb7\xf8\xb4\xe4\x58\x79\x4a\xc8\x87\x37\x54\x66\xc7\xa9\x91\xb1\x65\x5c\xfa\xa7\xe8\x3f\xe8\x30\x0f\x57\x5e\x4c\xec\xb9\x60\x8f\xe5\xcc\x24\xee\x15\xef\x28\x1f\xa1\x26\x70\x49\x8b\x7e\xfa\xce\x0e\xb2\x5f\x58\xa2\x07\xcd\xbb\xdc\x94\x38\xa3\xec\x37\x67\x11\x2b\x69\x60\xc4\xde\xf1\xcd\x8c\x32\xd9\xbe\x9c\x69\xc3\x64\x7b\x5d\x26\x50\x2c\xae\x5c\x63\x62\x55\xf5\x53\x37\xef\x15\x98\x42\xe9\xf9\xf3\x5c\x94\xcb\x81\x8e\xfb\xbe\x02\x4a\x90\xaa\x9b\xda\x7e\xe3\xac\xd8\xe0\xfa\x84\x67\x2b\x30\x61\x78\x98\x06\x57\x0d\x47\x20\x43\x06\x44\x87\x6b\xfe\x7e\x10\xd1\x3e\x24\x96\x21\xe3\x12\xcb\x07\xf2\x5e\x09\xfc\x28\xa1\x2a\xc1\x56\x7e\xbc\x2c\x1c\x72\xf8\x53\x60\xcf\x8f\x54\xa2\xc8\xdc\x51\xe4\x41\xc6\x45\x39\x59\xf2\x9a\x2d\x89\x73\x45\xc4\xbc\x3c\xc2\x81\xc6\xd9\xa0\x46\xc0\xf8\x3f\xda\x6c\xbe\x2e\x19\x30\x5e\xbe\xd1\x64\xa8\xf8\xac\xf2\x46\x3b\xc2\xc5\xff\x7f\xec\xbd\x0b\x94\x25\x59\x59\x26\xba\xe2\x64\x56\x55\xd6\xee\x87\x74\xc0\xc0\x2c\x70\x60\x1b\xcd\x58\x99\x90\x27\x2a\xb3\xb2\xaa\xba\x3b\x9b\xa6\xc9\xce\xac\xea\x4e\xba\x1e\x49\x66\x56\x37\x74\xd3\x90\x71\xce\xd9\x79\x32\xa8\x38\x11\x87\x88\x38\x95\x75\x9a\xee\xb9\x02\x32\x20\xf2\x94\x77\x31\x88\x30\x5e\xd4\x06\x01\x75\x68\x75\x8d\xa8\x3c\xbc\x3a\xea\x28\x17\x14\xf5\xaa\x80\x20\xea\x20\x73\xf5\xfa\x40\x6d\x07\xbd\x6b\xff\xff\xbf\x77\xec\x1d\x27\x4e\x66\x76\x67\x75\xb7\xcb\xc9\x5e\x0b\x2a\x4f\x3c\x76\xec\xe7\xbf\xff\xfd\x3f\xbe\xef\xa0\xce\x59\xbc\xe4\x00\xec\xba\x35\x09\x3f\xb0\x9f\xbd\xa3\xc6\x5c\xfd\x8c\x36\x21\xbb\xff\xe4\x78\x7f\xe9\x9c\x18\xb8\x6e\xce\x51\xbc\x80\x2e\x6a\x95\x9e\x52\x54\xbe\xc8\xf5\x5c\x24\x83\x8b\x46\xc9\x80\xe5\x8e\x36\x60\xf5\x85\xe2\x03\x2d\xc3\x8c\x9e\x2b\xec\x8f\xe2\x73\x04\x11\xdf\x48\x7a\x71\x8b\x2f\xdd\x31\xff\xb0\x60\xbb\xdb\xaa\x76\x06\x52\x36\xfa\xe6\xeb\x41\xbd\x9b\xb4\x4a\xda\x19\xc2\xd4\xbf\xc2\x8b\x41\xdb\xa6\x96\x97\x87\x07\x35\xc8\x47\xab\x16\x9f\x37\x75\xc4\x5f\x72\xbc\x07\x9d\x2d\xa1\xe9\x07\x2a\x87\x1a\xbb\x02\xd4\xc7\xb4\x4e\x45\x45\xc8\xbb\x22\x25\xdf\x64\xe6\x0f\x42\xe2\x5f\xe6\x36\x19\x2a\xe7\x87\x1d\x56\x4c\x4b\xf7\x7d\x8e\xf7\x66\x7b\xa6\xa9\xae\xd6\x36\x1c\x90\x88\x30\xd1\xd4\xd1\xb3\x68\xa8\x62\x8e\x79\xd4\x86\xe0\x8d\x57\xb3\x53\x85\x28\x28\x1c\x46\x75\x29\x4c\x31\x87\x4f\x8b\x56\xeb\x81\x21\x4e\x5e\xf7\xe7\xae\xf2\xee\xac\xb8\x5e\x2c\xa2\x7c\x43\xd8\x80\xdb\x09\xef\x04\x10\xef\xb1\x7a\x6a\x45\x3e\x16\x93\xad\x54\x1b\xca\x95\x53\xc3\x5a\xdc\xbf\x75\x25\xfb\xab\x31\x36\xd2\x4b\x23\xf7\x1b\x63\xde\x1f\x8c\xf5\xd2\xc8\xc0\x09\x8b\x12\x9b\xcd\x53\x87\xc5\x84\x71\x21\xe0\xcf\x2d\x9f\x42\xb9\x3f\xbe\x06\xf6\x4e\x31\x7b\xf8\xb0\x3c\x89\xcc\x4a\x41\x74\x58\xce\xfc\xb5\x09\x1f\x7d\xbc\x05\x2e\xfa\x5a\x2f\x8d\xf0\xd8\x93\x21\x69\xf2\x5a\xc1\xee\xad\xa1\xd1\x19\x5b\xdd\x10\x7c\x4d\x16\xb6\x66\x43\x91\x12\x9e\x40\xc0\xe9\x6d\x9d\x87\x6c\xe3\xa4\xde\x08\x11\x7f\xf2\x42\xf1\x19\x92\x30\xe8\x94\xc0\x68\x0d\xe4\x8e\x0e\xdb\x1b\x14\xf5\x90\x01\x52\x18\xf8\x29\x35\x51\xf2\xc2\x99\x15\x68\x75\x22\xb7\x73\x35\xa8\x19\x1f\xc7\xc8\x2e\x00\xc3\x2b\x06\x7b\x4d\x05\x51\x50\x59\x3c\x8c\xeb\xca\xff\x24\x0b\x0a\xb2\x52\xec\x51\xc0\xa3\xa0\x2f\xd0\xe2\x19\x26\x11\x74\xfa\x84\xaf\x9a\xae\xc3\x7a\xe5\x93\x31\x5f\x5c\x92\x1b\x21\xc0\x3d\x31\xb6\x04\xc4\xee\xb2\x5b\x68\xf6\x23\xad\xfc\x9a\x1c\xba\x08\x5f\x97\xbd\x3c\x7d\xe4\x3a\x7f\xca\x9f\xf2\xa7\xd7\x10\xf1\x88\x4a\x0e\x33\x9e\x86\xd9\xf9\x3e\xef\xc5\x00\x1c\x21\xb5\xdb\x5c\x4e\xa3\xb6\x9c\xe8\x10\xbe\x52\x78\xbd\x8c\x10\xe6\x24\x06\xa5\x0b\x28\x99\x49\xcd\x93\xcf\xc8\x2d\x5a\xf5\x81\x72\x01\x40\xb7\xaa\x58\x5e\x98\xa2\x46\xac\xa1\x19\x05\xbb\xd2\x83\x60\x02\xc0\xfc\x46\x7d\x35\x0a\xcf\x8b\xa8\x4f\xf2\x29\x4e\xe2\xba\x9c\x51\x52\xad\x47\x1d\x7e\x12\x26\x83\x08\x32\x78\x24\xef\xa5\x31\xef\x75\x31\x9e\x05\xb2\x8e\x08\x2b\x17\x67\x11\x4e\x4d\x3d\xc9\x3c\x58\xfd\xde\x8d\x98\x3f\xba\x7c\x4a\xdd\x68\x87\xb4\x62\x3c\x25\x1e\x20\x83\x0f\x4d\x03\x06\xb6\x11\x45\x86\x41\x8c\x2a\xb8\x7a\x29\x08\x3a\x88\xfb\xca\x74\xad\x24\xa7\x3c\x86\x40\xa5\xce\x2d\x9f\xf2\xf9\x8b\x08\xd0\x4a\x4d\x4c\x45\xc0\x21\xf5\x1c\xe8\xbf\xb4\x11\xe6\xa9\x3c\x4b\x28\xe8\xbe\x52\x44\xda\xba\x79\x18\x0c\x8a\x30\x1e\xad\x7e\xca\xfa\xe6\xb9\xdc\x3f\xe9\x75\xf9\xa9\x00\x59\x24\x92\x94\x37\x82\x2c\x6c\x42\x1e\x3a\x41\x57\xc9\x1b\xb3\xf2\xf3\x9b\x49\xda\x7a\x9e\xa7\x7d\x96\x94\x8d\xc6\x4f\xa6\x41\x1b\x1d\xf9\xe3\xde\xb5\xbe\xef\x7b\x13\xd0\x76\x3c\x70\x14\xc8\x4d\x7c\xdc\xbb\x99\xee\xa6\xc2\x2c\x61\x92\xc2\xe4\x2c\xcb\xd6\x67\x1c\x36\xd6\x0c\x6e\xe9\xc5\x2d\x79\x9c\x74\xdc\xd1\x46\x3f\x17\xde\x07\x1d\x75\x0d\x0f\xaa\x4b\x27\x4e\x73\xa5\x5a\xce\xcf\xf1\x06\xde\xc2\x89\x55\xd6\xce\x8c\xc4\x72\xdd\x5b\x87\xb4\x3a\x68\x66\x03\x52\x92\x9a\x12\x33\x93\x0a\x89\x22\x4f\xe5\x1c\x48\x93\x24\xcf\x54\xdc\x74\x31\x9b\xe1\x40\x9f\x95\xec\x73\xef\x54\xb8\x2d\x4d\xe1\xbe\xbe\xc6\xee\x1c\xa6\x60\x3f\xcc\x0d\x81\x78\xe4\x0b\xf4\xae\x8f\x3a\x4a\xce\x11\x52\x74\x89\xc7\x44\xdd\x1d\x0c\x2d\x3f\x81\x11\x8a\xea\x81\x24\xe5\x52\xc2\x57\x89\xda\xc5\x81\xe8\x3d\x25\x53\xe5\x7a\xb0\xe5\x2a\xe8\x65\x31\x48\x0b\x12\xcb\x72\x8e\x69\x19\xeb\xb3\x87\x6a\xec\x8e\x47\xda\x7c\x82\xa6\x07\x50\xb2\x65\x72\xf1\xdc\xa1\xe3\xa7\xdd\xf7\xd7\xbc\xd3\xc3\x6e\xda\x19\x5a\x46\xd0\x75\x47\xe4\x1b\xf2\x80\x28\x3b\xc8\x7e\xb9\x04\x99\xf7\x29\x87\xfd\xbc\xc3\xbe\x23\xe9\x8a\x78\x6e\x69\xf1\x8e\x19\xb4\xe6\xbb\x0f\x38\xec\xee\xcb\x33\xb8\xd8\xba\xe7\xaf\x9c\x3d\x83\x45\x2f\xa5\x49\x37\xf3\x6e\x2f\x7d\x50\x69\x37\x67\xf1\x32\xbf\x30\xa3\x3c\x93\x2a\x8a\x3d\x49\xcd\xf6\xc9\x05\xd9\x4d\x7b\x31\x78\x58\x2f\x3d\xc1\x64\x1e\xdd\xe2\x70\x37\xb7\xb4\xa8\xba\xc1\xfd\xfa\x77\x78\xcf\x37\x7e\x97\x72\x17\xd4\x71\xd3\x88\x14\x92\x9f\x54\x21\x20\x78\x54\xd3\x0a\x78\xcb\xc8\xb6\x91\xbb\x51\x2f\x0a\xd2\x33\xf0\x93\x15\xcf\x68\xd2\x8f\x7d\x17\x44\xda\xb0\xbd\xb7\xbf\x7b\x35\xdb\x60\x0c\x90\x4d\x41\xe9\x77\xef\xf2\x9e\x5b\xfc\xb2\x47\x39\xeb\xb5\xdb\xc8\x19\x8a\x88\xcd\xf0\x85\x32\x73\xe8\x56\xb1\xe9\xbf\x3e\xc2\x5c\xe2\x87\x20\xdf\xf0\x6d\x41\xb6\xe1\x7e\x62\xc4\xfb\x91\x11\xd0\x0e\x82\x6c\xc3\xa6\xd1\xa4\xa7\x95\x97\x69\xd2\xf4\x21\x2b\xbf\x31\xf5\x12\xc6\x0e\x17\x21\x42\x60\xbf\xdd\x4c\xc3\x3c\x17\x3a\x0e\x1f\xc8\xc2\x65\x99\xc2\xe7\x77\x58\xc9\x18\x06\x96\xb9\xa2\x9b\xed\x93\xa3\x32\xf3\x39\x68\xf8\x02\xa2\xcc\xf3\x3e\x02\x69\xa4\x61\x26\xb5\x35\x5c\xac\x9a\x0f\x14\x26\x8a\x61\x00\x94\x9b\x77\x77\x23\xd0\x18\x40\xca\x01\x8d\xa9\xb6\xb0\x49\x14\x89\xcc\x16\x32\x2d\xea\x4b\xfa\x90\xd6\x2d\x53\x99\x17\xf2\xd2\x8c\x08\x5e\x19\xe8\x5d\x0b\x7e\x08\x8e\x7b\xe8\x76\xb0\xc0\x8d\x40\xc2\xa7\xa2\x13\x84\x71\x81\x29\x88\x76\x3b\x08\x0a\x6c\xa7\x41\xab\x07\xb8\x0d\xe6\x70\x7e\xd8\x61\x38\xa3\xdc\x0f\x38\xde\xf7\x3a\xf0\x67\x79\xc6\xa8\x2c\x0d\xa9\xb5\x71\x7c\x62\x1c\xc6\x8d\xc2\xca\x32\xde\x16\xf9\x24\xbc\x30\xc9\x37\x83\xbc\xb9\x31\xa9\xe3\xb3\xd1\x03\x3c\xc9\xbb\x78\x19\x43\xb2\xd5\xbf\x45\x8c\xc2\x24\xad\xc9\xe4\x62\x7f\x62\x8b\xc9\x77\x13\x99\x77\x8e\x79\xe3\xa6\x79\xa7\x1b\xf5\xd2\x20\xb2\xac\x3c\xc6\x54\x2e\x5e\x5f\x65\xc6\x8a\x72\x4f\x7a\xd7\x17\xbf\x0c\xa0\x86\xd0\x5a\xb8\xd6\x4a\x85\x70\xf3\x24\xb7\xcd\xfd\x2d\x72\xc9\xbf\xd8\x3b\x7b\xde\xe0\xc7\x81\xbf\x07\x88\x76\x40\x03\xe6\x87\x4e\x26\xc9\xa1\x81\x07\x8d\xaf\x1e\x5a\x4f\x92\x43\x13\x66\xdd\x7f\xb0\xc6\x2c\xe9\xe0\xbe\xb1\xe6\xfd\xb3\x63\x5e\xd1\x46\x26\xba\x56\xdd\x1f\xdc\x42\x20\xa5\xd5\x21\xd7\x16\x61\x95\x52\x5f\x42\x50\x9e\x2a\x08\x57\x53\x44\x99\x50\xe5\x6f\x12\x3c\x77\x9a\x2a\xbc\xee\x02\x2d\x5e\x79\x08\x63\x0d\x33\x0d\x99\xd8\x98\x20\x93\x40\x32\x19\x7d\x02\x86\x9f\x3e\x2d\xd7\x18\xd9\xb7\x75\xf0\x81\x9c\x7b\xcd\x3c\xe2\xf3\xa7\x16\x31\x22\x64\x3d\x28\x0d\xee\xf7\xd5\xd8\x01\x92\x2a\xee\xb7\x1d\xef\x2f\x1c\x9d\xb4\x44\x93\x44\xa7\x47\xa9\x1b\x83\x5d\x73\x02\xac\xa6\x61\x87\xf2\x20\x0d\x39\x65\xe0\xbc\x12\xd8\xa7\x15\x04\x0a\x4e\x07\x33\xe0\x9d\x48\x78\x34\x18\x6b\x60\xc0\xb1\x52\x20\x8b\xa1\x9a\xce\xf2\x0b\xd3\x7c\x7c\x73\x03\xa8\xe6\x20\x91\x80\x17\x69\x8a\x03\x55\x48\x8b\x3a\x1f\xca\xd0\xbf\x3f\x61\x43\x74\xdc\xcf\x98\x9c\xcb\xed\x04\x50\xc5\x13\x6f\xa5\xf8\x65\x2f\xee\x5c\x51\x42\x19\x31\x7e\x59\x49\x28\x37\x44\x94\xc4\x6d\x98\x23\x34\x7b\x83\x28\x3a\xb4\xd5\x32\xfd\x65\x87\xed\x83\x52\xdd\x4f\x3a\xde\xc7\x9c\xb6\x49\x40\x55\x0c\x03\x85\xc5\xee\x60\x10\xac\x27\x87\x0d\x81\xbf\xeb\x31\x58\x69\x06\x51\x89\x4c\xf3\xfd\xfb\x4d\xf8\x9b\x9d\x5a\x7e\xc9\x3b\x2d\x8f\x01\xee\x1f\xee\xf3\x2e\xce\x95\x6d\xb9\x69\xf1\x00\xc5\xef\xa8\x3b\xb6\xed\x0f\xe3\x99\xe4\xc1\xe5\xbc\xe8\xa3\x8c\xd4\x79\x11\xea\x61\x04\xde\x27\x51\x22\xfa\xf0\x10\xc5\x41\x5d\x72\x46\xce\x8b\xfe\x25\x67\x4c\xbd\x61\xa9\x0d\x9f\x19\x65\x97\x1c\x26\x9f\x70\x7f\xc0\xf1\x8e\xc8\x77\x75\x34\x84\xac\xad\xbc\xa0\xdd\xc9\x45\x00\x60\x91\x24\x6c\x31\xc7\xec\x9c\x14\xea\xbb\xb7\x21\x85\xda\xef\x8e\x9e\x17\x7d\xc6\xde\xe4\x30\x5d\x71\xf7\x95\x8e\x97\xeb\x76\xa7\xa6\x39\xf8\xbc\xe8\x1f\xca\xb0\x17\xa4\xd2\xb8\x11\x76\x95\xad\x43\xa9\xb5\x10\x12\x06\x6a\xaf\xee\x3a\xca\x72\x8b\x27\x39\x84\x6e\x4e\xf2\x13\x17\x43\x79\x38\x07\xbc\x93\x44\x64\x67\x92\x1c\xae\x58\x93\xe1\x07\x6a\x6c\x3f\x16\xe7\xbe\xb6\xe6\x7d\xc3\xa1\x78\x7d\xd2\x11\xe4\x4a\x80\xfd\xd2\x08\xc5\x32\x22\x6c\x55\xdd\xc3\x8c\x2f\x42\xb6\x2c\x7d\x58\xab\x1e\x19\x15\xa1\xb4\x19\x79\x82\x27\x1a\xc5\x8a\x32\xa8\xbe\x49\x6a\x55\x77\x8b\xe2\xa8\x28\x14\xfe\x70\x07\x03\xf6\x22\xd8\xd7\x5a\xbd\x14\x71\xe9\x68\xc0\xc0\x01\x90\xb6\x05\xee\xdb\x5b\xe9\x83\x9f\xa8\xb1\xe3\x25\x24\xaa\x24\x55\x59\x74\x4a\x81\x3f\x93\xc4\x4a\x55\x9e\x53\xb9\xc7\x99\xfb\x90\xe3\xbd\xa2\xf2\x4e\xa1\x58\xe4\x04\x02\xa7\x8b\x2c\x72\x97\x33\xc3\x6f\xb9\x0e\xfb\x72\xac\xd1\x20\xcc\x78\x34\x28\x63\x8e\xca\x80\x0c\x1e\xda\x3d\xac\xc5\x90\xb0\xeb\xc9\x38\x3d\xe5\x5d\x6b\x1a\xa7\xcf\x2d\x9f\x42\xab\x83\x4d\xba\x6f\x76\xc1\x0c\x1b\x95\x5a\x91\xfb\x6c\xef\xe9\x77\x88\xb4\x61\x84\xc7\xa0\xc9\xef\xb6\xd5\xd5\x25\xd0\x9b\xcc\x97\x3e\x3e\xca\x5e\xf8\x88\x8f\xbc\xf6\xa1\x6c\xa5\x90\x79\x84\xe9\xf7\x85\x11\xef\x97\x46\xb6\x79\xa8\x9c\x0b\x03\x1f\x35\xe3\x7a\x0c\x51\x5a\x79\x12\xe4\x05\x54\xa0\x5e\x90\x85\x6e\xbb\xe6\xab\x68\x21\x79\x7a\x23\x8b\x10\x6e\x6d\x08\xea\x65\x95\xe6\x1b\x9e\xac\x67\x71\x71\xb1\x9b\x64\x90\x69\x51\x81\x6a\x54\xc4\x30\x95\x80\xb8\x9f\xc5\x97\xce\xad\x0e\x8c\x7c\x55\x09\x39\x9a\x7e\xcb\x05\xe0\x5c\x20\x5b\x15\x26\xc0\xa2\x8e\x4f\xa1\x5e\x18\x93\x5b\xe0\xc9\x16\x3d\x95\x07\xf1\xbd\x01\x56\xe0\xf0\xd2\xd9\x95\xd5\xc3\x4b\x73\xab\xf3\xb7\x0d\xd4\xa5\xfc\xc1\xc1\x8f\x0c\x14\x6a\x22\x3c\x7e\x8e\x59\x40\xf9\x55\x1e\xd7\xdb\x96\xe6\x56\xf0\x12\xe4\xeb\xb9\xef\x63\xde\xdb\x47\x4a\x17\x75\x86\xb4\xca\x33\xc0\x7b\x85\x8f\x79\x1d\x52\x14\x05\x6f\x85\x29\x6a\xe7\xa0\xf7\x65\x82\xe3\xeb\xa0\x9f\xc1\x2e\xd0\xa2\x80\x81\x66\x10\x35\xe5\xc9\x46\x96\xb2\x20\xb2\x30\x15\xad\x65\xd1\x8d\xc2\x66\x40\x7e\x6c\xe5\x4c\x54\x43\x77\xdb\xd2\x1c\x94\xd9\x87\xe0\x00\xa0\xe3\xb2\xaa\x72\x41\x44\x49\x53\x1e\xd3\xec\xb8\x00\x75\x1b\x5c\xdd\xa1\xc8\x8c\x32\x54\x80\xc1\x7a\x14\x74\xbb\xd0\x1a\xeb\x4d\xea\xd7\x46\x18\x29\x39\xb2\x19\xc6\xad\x64\x73\x92\x67\x49\xb1\xc9\x15\x94\x54\xa9\xaa\x3e\x99\xf7\xe4\x66\x82\x8c\x2e\x79\xd4\x9f\x54\xf6\x70\x42\x59\x0a\xd6\x45\xa6\x80\x01\xb4\xc2\x5a\xf5\x35\x04\x68\x4a\x32\x11\xdb\x66\x94\xbf\xdb\x2f\x8f\xd5\x4f\xb5\x5e\xb9\x13\xde\x50\xb0\x09\x3f\x3e\xa2\x60\x13\xde\x35\xb2\x32\xf4\x31\xed\x4a\xd4\xed\x20\xdc\x01\xe8\x78\x34\x04\x76\x03\x22\x05\xe9\x74\x44\xdc\xa2\xe4\xcd\x22\x17\xdb\xc8\x63\x45\x75\x54\xf5\x39\x66\x30\xa9\x5f\x2d\x08\xe5\xdb\xa2\x26\x6a\xdb\x01\xcb\xb8\xe2\x9a\x96\xca\x96\x3c\x80\xcb\x99\x7e\xaf\x48\x13\x58\x69\x60\x49\x1f\xb8\x3d\x73\x7c\x6a\x8a\x8f\xcb\x59\xb8\x91\xf4\xd2\x09\xc2\x16\xc8\x51\x42\x28\x6b\xb0\xe2\xf7\xc5\x0d\x6f\x96\xd7\x51\x05\x94\x6a\x1c\xef\x75\x67\xf9\x14\x1f\x87\xe8\x95\x38\x29\x0d\x47\x98\xf1\x56\x12\x8b\x09\xdf\x7a\x45\x36\x6a\x96\xcf\x4c\xa9\xd7\xb6\x1a\x46\xf9\x94\xea\x5c\xa9\x1e\x4f\xd8\xd0\x0e\x6f\xad\xb1\x31\x35\x4b\xdd\xef\xad\x79\x3f\xe5\xa8\x5f\xb6\xfe\xdd\x4d\x80\x81\x2e\x88\xec\xb9\x2d\xb2\x22\xbb\x54\x5b\x6d\x69\x8f\x56\x4b\x9e\xcf\xa9\xc0\x1f\xd9\x4b\x14\x7b\x38\x60\xaa\x9c\xe4\x10\xa0\xba\x19\x52\x97\x95\x65\x81\xb2\x0b\xb7\xc2\xac\x19\xa4\x2d\xc5\x04\x07\x36\x10\x73\xd7\x9f\x63\x37\x0f\x0f\x62\xda\x46\x14\x61\x9e\x24\x7b\xab\xc3\xae\x44\x35\x92\x32\x29\xbf\xc7\xf1\x32\xf3\x82\x99\x2c\xa9\x20\x59\x68\xd2\xe2\xfd\x62\x9a\x82\x6d\xd9\x9a\x13\x03\xf3\x81\x9f\x0e\x2e\x62\xb9\xa8\x96\xab\xd2\x6d\xe2\xb7\x7d\xec\x68\x15\x47\x36\x64\x3d\xe9\xad\xb3\x42\xad\x7f\xcf\x3e\xef\x2d\xce\x1c\xa5\x47\x15\x7a\xc7\x2e\x34\x7c\x28\x8a\x88\xab\xb7\x55\xf4\x8b\x87\x6d\x7d\xff\x20\xdc\x40\xeb\x61\xb5\xd6\xff\x95\x11\x88\x92\xd4\x8a\xf5\xa6\xf7\x32\x2b\xba\x02\x0a\xb8\x6c\x0a\xf5\xe4\x70\x6d\xfa\x34\x2b\x2a\xeb\x3e\xcf\x9b\x29\xe3\x46\x60\x13\x77\x7a\x00\x61\xaf\x2b\x94\xf3\x6f\x3b\xde\x57\x9c\xb9\xff\x7d\x55\xf2\x5f\xbc\x8a\xdd\xf9\x28\xa8\x96\x52\x46\xba\x7f\x7c\xa5\xf7\x92\xad\x1f\xd9\x42\xaf\x84\xdb\xdb\xa9\x95\x97\x9c\x27\xc8\xc5\xaf\x14\x08\xa9\x8b\x5f\x72\x5c\x54\x8a\xcc\x8b\xd6\xac\xfe\xef\x57\xb0\x3f\xda\xc7\xae\x89\xcc\x53\x38\x44\x79\xff\xda\x3e\xef\x93\xfb\x06\x2e\x5b\x31\x67\xd5\xfa\x69\x59\x57\xa3\x65\x9b\xa6\x22\xeb\x82\xd4\xcf\x13\x34\x18\x70\x0a\x8b\xf7\xd5\x0c\x5d\x23\x5b\xb3\x2e\x37\xd3\x58\x00\x70\x9c\x81\x91\x8e\x13\x4a\x92\x32\xac\x5d\x3e\x3f\x4d\x33\x23\xc0\x97\xa1\xaa\x40\x51\x66\xa8\xd3\x09\xfc\xe8\x8a\xe6\x5a\xf1\x3c\x85\x38\x6f\x26\x29\x81\x34\xdd\x26\x0f\x3b\x52\xc6\x44\x4b\x49\x6b\x8e\xa4\x33\xb2\xce\x28\x83\x34\xc4\x81\x28\x7d\x3d\xcc\x8c\x6e\xe8\xe8\x5a\xd0\xc2\xc1\x17\xc6\xc1\x4b\xa9\x89\xf8\x8a\xa8\x47\x80\x5f\x9f\x50\x3b\x56\x01\xab\x9b\x89\x34\x0c\x22\x48\xcc\x2a\xd9\x3f\x20\x20\x02\x8b\x4e\xd2\xce\x4e\x91\x00\xf3\x20\x3b\x9f\x1d\x0e\x9a\x4d\x91\x65\xa6\x01\x21\xe8\x86\x87\x71\xb8\x0a\x38\xc0\xf2\x85\xba\xb1\x73\x5d\x0b\x9d\x51\x37\x27\x22\x2e\xe7\x94\x52\x50\x69\x03\xc1\x9e\x2f\x62\xee\x68\x8e\x54\xa9\xf2\x93\x14\x36\x51\x9a\x09\x1a\x8e\x1f\x6f\x1f\x86\x0f\xaf\x59\x4b\x00\x36\xe0\x56\x55\x8e\x20\xb1\x66\x9a\x2b\xfb\x6b\x35\x36\xb0\x38\xdc\xdf\xa8\x79\xbf\x58\x2b\x5f\xbd\xcc\x33\xbc\x2b\x9a\xbe\xd2\x89\x1f\xad\xf9\x8d\x53\xfa\x32\x8d\xc4\xd0\xae\x46\xb8\x72\x08\x91\x05\x58\xee\x24\xe6\xb7\x9e\x58\xb5\x7a\xf9\x7b\xc0\xc5\x55\x96\x37\xee\xff\xa8\x79\xbf\x5f\x1b\xbc\xfe\xa8\xc8\x92\x47\xbb\xaf\x51\x96\x5c\xb6\xde\x2e\xd7\xfa\x11\xcc\xfb\x29\x6b\x08\xbe\x39\x6a\xd1\x5a\x6d\x83\x6f\xee\x7e\x72\xd4\xbb\x4b\xc3\x9a\x23\xd9\x7f\x39\xe2\xab\xc0\xef\x86\xd0\x8e\x58\x88\x16\x61\x20\x6b\x08\x3f\x75\xd3\x87\x62\x4a\x5e\x06\xb5\xd3\x7c\x7e\x84\xc5\x6c\x1f\x10\x71\xb9\xc2\xbb\x61\x2e\xee\x9b\x71\xb6\xe6\x17\x35\x94\x7a\x15\x99\x81\xff\x80\xf3\x9d\xec\xa9\xc3\x37\x71\xf3\xdc\xff\x02\xb6\x1f\x2c\xe0\x99\x7b\xab\xf7\x6c\xa5\x26\x81\xd3\x16\x2f\x13\x53\x07\x41\xbc\x03\x3f\x98\xd4\xd4\xb6\x52\x11\x3e\xea\xb0\x91\x5e\xd8\x72\x3f\xe4\x78\xef\x76\xe6\x28\x73\x8c\x46\x0d\xd9\x6a\x54\x80\x8c\x59\x7a\xd0\x4c\x13\x79\x58\x03\xf6\x6b\x15\x83\xaa\xbe\x8b\x2e\xbd\x16\xe9\xae\x88\x22\x00\xf7\x4c\xe6\x3a\xe5\xb5\x03\x44\x63\x98\x3c\x7d\x9c\x09\x25\xc6\xba\x73\x8b\x0b\xb6\x9f\xf2\xc5\x6c\x4c\x16\x06\xde\xbf\x25\x6f\x5e\x2b\x8b\x18\xc4\x05\xd5\x8f\xfa\x43\x6a\xdd\x49\x28\x56\x3e\x68\xe6\xe1\x05\x1c\x63\xbb\xf4\xff\x35\x62\x83\x71\x9a\xc7\x19\x48\xbe\xcd\x32\x72\xc7\x96\x58\x2e\xdd\x4f\x8f\x78\x8d\x2d\xee\x17\xdb\xa1\x88\x93\x5e\x7b\xa3\x1c\x84\x18\x89\x1c\xe2\x42\xa8\xe2\x7d\x32\x31\x92\x87\x44\x3b\x45\xb4\xf7\x7f\x30\xe8\xfd\x53\x35\xf6\x6e\x87\x3c\x90\x6f\x76\xbc\x57\x3a\x90\x14\xac\x8d\x95\xd8\x9f\x37\x3e\x86\x29\xb7\x9e\x9d\xad\x88\x0e\xdb\x96\x77\xe7\x19\xcb\x1b\x39\xa4\x5e\xd5\x1b\xbf\x1c\xb0\x7a\xbb\x17\xb6\xc4\x61\x83\x2b\xe4\x5a\x58\x06\xe6\xd7\x8e\x5b\x29\xec\xe3\xde\xd3\x20\x18\xa4\xec\xed\xc3\x4f\x5b\x18\x22\x07\x98\x67\x0e\x7f\xb7\x8b\x81\x45\x79\x90\x8b\xf5\x5e\xb4\x22\x80\xb3\xde\xfd\xc4\x01\x6f\xaa\x74\xad\x2a\xd7\xde\x78\x64\x48\xba\xfd\x5b\xf6\xef\xa5\xdb\xef\x36\xdd\xfe\xa4\x4a\xb7\xbf\xc9\x14\x73\x53\xcc\xdf\x82\x41\xb9\x62\x60\xf7\x32\xea\x77\x91\x51\xbf\x68\x24\xd4\x3f\x1c\x22\xa3\xc1\x7c\xfa\x87\x9c\x17\x6d\x9f\xce\x7e\xdc\x3d\xaa\xd3\xd9\xe5\x50\xea\x6c\xf6\xd2\xa2\x2c\x67\xb4\x7f\xed\x2a\x0b\x43\x83\xe2\x91\x40\xb2\xaf\x2c\x2e\xa4\xe1\x05\x91\xba\xbf\x70\x95\xf7\x1b\x35\xfd\x93\x37\x83\x6e\x0e\x06\xf2\x41\x15\x22\xe0\xf3\x0a\x69\x5c\x85\xea\xf0\x45\xa5\x2b\xf0\xf1\xf9\x95\xc5\x09\x95\x2a\xd0\xc2\xc2\x10\x3e\xa9\x80\x97\x55\xa1\xb6\x26\x7d\x4d\x90\xe7\x41\x13\xd9\x46\xe8\x10\x45\x1c\xa0\xbd\x4c\x6d\x66\x89\x46\x13\x6d\x09\xa4\xd0\x12\x3a\xa4\x8c\x5e\x0f\x33\x65\x78\x6a\x61\xe9\x72\x7f\xd9\x59\x09\xdd\xa4\x65\x35\x56\x2a\x48\x19\x05\x13\x53\x56\x51\x12\x63\xda\x83\xcf\x8b\x8e\x52\xf0\x33\x18\xc5\x1a\xd7\xed\xa8\x36\xa9\xd3\x5b\x92\xef\xef\x0f\xee\x49\xbe\xdd\x4a\xbe\x3d\x89\xf5\xc8\x25\xd6\x7b\x46\x0d\x91\xf5\x86\xd1\x9d\xe6\xe5\x5a\x32\x0b\x55\x3c\x40\x01\xf9\xd5\x11\x0d\x03\xa2\x00\xbc\x34\x10\x88\xfe\x0b\xe3\xa4\x0c\xf6\x27\xdb\xce\x39\xbf\xb2\xa8\x04\x45\x41\x16\xa8\x87\x86\x08\xa7\x6f\xe4\x61\x8e\x88\xe3\x0d\x51\xd2\xa6\x35\xe1\x17\xe9\xd9\xb2\xbc\x5b\x45\xbe\x04\xe8\x92\xf2\xdb\xe3\x13\x08\x76\xad\xb1\xc6\xf0\x6b\x68\x0b\xa2\x2f\x43\x49\xca\xee\x73\x7c\xc6\x24\x6f\x4e\x52\x70\xd1\x4c\x62\xb8\x3f\xd2\xd7\xc5\x2d\x39\x13\x74\x06\xb4\x0a\x91\x8c\x7b\x1d\x91\x86\xcd\xe2\x6d\x3e\x7e\x77\x50\xbf\x77\xaa\x7e\xc3\x5c\xfd\xae\x7b\x26\x88\x50\x09\xd1\x05\xc6\xeb\x13\x93\xbc\x95\xe4\x19\x1f\xf7\x27\xc8\xe2\x6d\x94\x91\x29\x68\xed\xc7\x0c\x51\x85\xdd\xc3\x40\x5e\xb9\xe7\xd8\xf4\x50\x0d\xa2\x6a\xef\x58\xe9\x8a\xa6\xc7\x57\x28\x71\xd2\xca\x00\x92\x63\x81\x0f\xf9\x0f\x39\xf7\x6c\xbf\xc3\xcd\xba\xd7\xeb\x1d\x4e\x7d\x0a\xdb\x59\x20\xb7\xa8\xcf\x96\x77\xb9\x4f\x5e\xc9\xea\x46\x4d\x2b\xc2\xa8\x6f\x5b\x5d\x5d\x22\xe6\x6b\x30\x68\x7c\xff\x95\x5e\x52\xba\x56\x60\x86\xe1\x41\x32\xdf\x50\xd0\x0e\x8d\xa0\x79\x5e\xc4\xb0\x8c\x09\xc0\xb2\x97\x46\x15\xb8\x09\xb0\x13\xac\x27\xe9\x26\x7a\x93\x48\xc4\xaa\xb7\x2f\x39\x07\xe8\x4f\x6b\x5b\x78\xe5\x15\xec\x87\x1d\xa6\x6e\xb9\xef\x76\xd8\xec\xd0\x21\xa8\x68\x18\x35\xe0\x16\x7c\xdd\xbb\x8b\xfe\xb0\xf6\x12\x9d\x05\xd0\xd2\xe1\xfd\x85\x30\x4b\xc8\x78\x29\x1f\x24\x1e\x4e\xed\x19\x33\xdb\xe2\xb3\x1f\xac\x51\x98\xc8\x3b\x6a\xde\x6b\x6b\x2a\x50\x04\x91\xbb\x6c\xae\x3a\x15\x34\x12\xc4\x03\x98\x9f\x3e\x9f\x47\xf6\x1d\x79\x64\x05\x34\x62\x75\x4e\x34\x17\x5e\x2b\xcc\x06\xc2\x20\x8b\xb9\x1b\x44\xdc\x93\x9f\xf0\xd4\x71\x1f\x53\x66\x78\xa0\x02\x3b\x40\x20\x2c\x9f\x9c\xe7\x33\x37\x5c\x7f\xdc\x07\xfb\x4f\x36\x90\xb7\x13\xf0\x43\x87\x0f\x15\xb9\xbd\x85\xd7\x10\x92\xc4\xe1\x15\xf8\xf6\x00\x6a\x29\x65\xc8\x43\xb3\x6d\x0a\xa6\x03\x6c\x4c\xbe\xb8\xda\xef\x0a\xf7\xc7\x0e\x78\xef\x3d\xb0\x44\xbf\x0a\xa5\x43\x59\x67\x88\x69\xd4\x5a\x32\xd0\xa5\x6a\x56\x61\xb5\xe1\x65\xf2\x84\x1a\xf0\xf2\x88\x43\x57\x38\x76\x66\xf9\xb3\x14\x36\xea\x69\x03\x46\x51\x07\xf0\x10\x2a\xaa\xcf\x9f\xc5\x11\xdc\xa3\x78\x4e\xa3\x34\x06\xc5\xe3\x84\x6c\x92\x75\xa3\x30\x97\x5d\x09\x1d\x75\xba\x00\x58\x64\x1c\x7c\xc8\xf8\x16\x7e\x80\xec\x48\x8d\x7e\xf1\x67\x90\x85\x40\x08\x69\x3d\x40\x62\x1d\xbd\xf5\x8c\x6b\x6f\x30\x01\xcb\x91\xd9\x0c\x5e\xd1\x9f\x97\x57\x0e\x1d\x3e\xc4\x33\xd1\x0d\xc0\xe5\x24\x4b\xd5\xf0\xa4\x19\x0f\x18\x27\xd4\x52\xc0\x9f\x81\x16\x00\xe8\xf6\x05\x48\x3c\xa2\x28\x35\xaa\x41\x1d\xfc\xc1\xd4\xc2\x64\x9d\xab\xe0\x4a\xc6\x75\x91\x98\x83\x7b\x46\x27\xca\x85\x06\x6f\x93\x6a\x07\x0d\x84\xca\xf6\x0a\xa4\x2a\x40\xd3\x80\x5b\x44\x4f\xea\x85\x30\xb6\xca\x57\x59\xeb\xe8\x58\xc0\xda\x63\x88\xa9\x82\x82\x51\x8d\x12\x99\xbe\x74\xb8\x11\xdc\x8b\x08\xb3\x1a\xc7\x06\xdf\x54\x0f\x34\x82\x7b\x27\x7c\xf6\x2c\xbe\xa8\x0c\x7b\x30\xbf\x94\x7c\x9e\x45\x65\x7d\xeb\x79\x07\xbe\xe2\x2e\xcf\xa5\xd8\x95\x37\x49\xc0\xcc\x47\x41\x96\xf9\xa5\x82\x11\xb1\x15\x72\x0f\x70\xef\x0e\xd0\xf7\x01\xc3\x24\xf4\x04\x96\x3d\x92\xd2\x63\x61\x5e\xf0\x54\x63\xc6\x9e\x42\x9b\x49\x71\x06\x53\x9e\x9b\xd4\x61\x7c\x56\xfe\x1c\x02\xcf\xa0\x96\x0f\x0e\x36\x4a\x62\x57\x4b\x96\xde\xb3\xd2\x8f\xab\xfb\xc2\x5a\xb8\x3f\xcc\x2c\xa6\xc7\x2a\xdc\xaf\xa5\x34\x4c\xd2\x30\xef\x9f\x92\x9b\xab\x85\x0f\x0d\xf6\x91\xaf\x1e\xf4\xce\x6e\xfd\x88\x1d\xfc\x30\xfc\x59\x0d\x37\x5d\x69\x3e\xf9\xff\x0e\xb0\xaf\x9a\x58\x6e\xbf\xbd\x4b\x2c\xb7\x77\x38\x6b\xaa\xac\xb5\x81\xe0\x3d\xfc\xe8\xa1\xec\xb1\x87\x78\xdb\x3b\x29\xed\xf6\xa4\xf4\x06\x4d\x4b\xf6\x2a\xc7\x1b\x5f\x83\x3f\xd7\xec\x39\x48\xe2\xa8\xde\xc5\xb9\x18\x0a\x1b\x9e\xf1\x76\xb6\xc8\x6e\x1d\xaa\x8a\x3c\xbc\x35\xb2\x77\x70\x7b\xe4\x07\xb7\x87\x9c\xd7\x38\xdb\xab\xcf\x0d\x77\x4d\xab\xcf\xe6\xd8\x14\x71\x0f\x25\x65\x7a\x6b\x69\x65\x6b\xd8\x30\xbc\x8c\xbd\xa5\xc6\xc6\x2b\x22\x86\xb4\x4d\x08\x6c\x51\x77\x06\x21\xb0\xe7\x7f\xcd\xf1\xae\xaf\xbc\x83\x03\xbf\x49\x3f\x90\x4f\x00\x5d\x85\xea\x69\xdb\xeb\x74\xc9\x61\x67\x0b\x84\xc1\x05\xef\xba\xd3\x84\x21\x98\x8a\x76\x80\x64\xf1\x9b\x1b\x7d\x33\x3f\x02\x9d\x32\x72\x28\xfb\x22\x57\xc9\xa9\x96\xc0\xbf\x55\x23\x09\xde\xe4\x4d\x8d\x03\x39\xed\x84\xc2\x09\x7c\x58\x05\xfd\xd7\xab\xd8\xb5\x15\x5d\x52\x06\x74\x71\xdf\x76\x95\xf7\x13\x4e\xf9\x2a\x1f\x5f\xba\x63\x42\xad\x03\xb4\xa6\xe9\xc9\x0e\x1e\x34\x92\x6b\x8d\xbe\x22\x36\x8b\x11\xf3\x1f\x80\xb1\x09\xb3\x3a\x88\x92\x76\x42\x88\x8c\x1c\xc8\xe2\x1f\x2e\xb9\x20\x7d\xfb\x70\x57\x57\xaf\x4e\xc8\x16\xd6\x38\xfc\x2d\xdb\x93\xca\x7b\xf6\xab\xc7\xcf\x7e\xf5\x79\x53\xef\xf9\x25\x67\xb7\xf6\xab\x57\x3b\x2b\xff\x02\xf4\x9c\xd7\xd7\xc8\xf2\xf2\xca\x1a\x3b\xb6\x2d\x55\xe7\x00\x4e\x54\x57\x34\xbd\x5f\x76\x00\x27\x51\x2d\x3e\x4d\x78\x51\x98\x63\x0a\xaa\xb0\x58\xe3\x62\x26\x9b\x86\xdd\x4c\x5b\xe5\x97\xb6\x93\x3a\x97\x4d\xb4\x5c\x3b\x78\x89\xbd\xad\xa6\x21\x57\x5f\xb7\x05\x6d\xf1\xf0\xee\x80\x77\xbd\x5f\x77\x28\x3b\xc3\xc8\x95\xca\x0d\xda\x5f\xc3\xd4\xaf\x32\x24\x54\x70\xfe\x40\x3f\xf9\x7c\xa9\x9c\xb9\x8c\x18\x10\x3e\x5f\x56\xc8\x47\x8f\x6e\xaf\x3c\xe4\xbc\x70\xfb\xdd\xff\x98\x3b\x53\x45\xc3\x59\xee\xa0\xb2\xdd\xec\x53\xfb\xd9\x73\xb7\x0b\x65\xae\x8e\xa6\xbb\x85\x12\x26\xdc\x7f\xda\xe7\xbd\xc7\xd9\xe6\xa1\x6d\xb3\x2e\xe8\x1c\x9a\x07\x69\x1b\x92\x0d\x30\x41\xf6\x5c\x97\x72\xd2\x36\xe3\x22\x1f\x23\xe3\xe3\x50\x38\xdd\x84\xbf\xe1\x09\x08\xd4\x83\xd4\xcd\xae\x80\x58\x87\xa8\x3f\x61\x2b\x12\x5f\x18\x65\x1f\xaf\xb1\x83\xfa\x15\xf7\x83\x35\xf6\x9c\x47\x18\xd7\x0d\x11\xe4\xde\x1f\x39\xc5\xf7\xc3\xcc\x8e\x63\x47\x30\x5c\x75\x69\x01\x92\x06\xb6\x0e\xda\x46\x82\x6a\x04\x68\x95\x67\x5b\x1d\x98\x0f\x78\x38\x61\xac\xf3\x4a\x08\x33\x11\x8d\x58\x45\x34\x7e\x75\xc0\xfe\x38\x02\xe0\xc8\xcf\x6d\x84\xed\x0d\x31\x90\x02\xa1\x27\x3f\x98\x2b\x66\xa6\xa6\x32\x44\x5d\xed\x65\xa2\x35\xe1\xb3\xf7\xd5\xd8\x01\xea\x71\xf7\xad\xbb\xed\xb0\xcf\x39\x6a\xf0\xb6\xee\xae\x73\xdd\x9d\x74\x96\x6a\x92\x9c\x40\xb3\x8c\xf3\x67\xf1\x30\x6e\xa6\x84\x72\x84\xb9\xd9\x90\x5c\x71\x14\x31\x1f\xbb\x22\xe5\xc7\x75\xea\x02\x3c\xdf\x4a\x7a\x8d\x48\x94\x12\x47\xaa\x1e\x3e\x53\x91\x46\x01\x11\xf5\xec\x9f\xc6\xd8\x33\x2b\x60\xeb\x09\x9b\xc4\xa0\x2a\xf8\xed\x5d\x53\x15\x7c\xe2\xc0\xbf\x2e\x20\xf7\x3d\x7d\x72\xb7\xfa\xe4\x86\x3a\xe4\xbf\xf4\xe1\x10\x2f\x1c\x61\x53\xcc\xdf\x96\x78\xc1\x9a\xc1\x7b\x9a\xeb\x6e\x0e\xf0\x77\x6f\xbf\x83\x5f\xef\x1e\x7f\xb8\x7c\x05\x74\x2a\xff\xed\x83\xec\xbb\xb6\xa4\x75\x5f\xcc\x45\xc7\xfd\xd8\x41\x6f\xc5\xbe\x64\x68\x8c\x9d\x30\x3e\xdc\x09\x2e\xf2\x1e\x9c\xab\x31\x27\x11\x50\x39\xe2\x7e\x29\x72\x57\x59\xa4\x93\x18\x6a\xe7\x5f\x72\x00\xbc\xde\x12\x53\x9f\x1d\x63\x3f\xe9\xb0\x03\x24\xaf\xdd\x0f\x39\xde\x9d\x8a\x08\xc6\x4a\x52\x56\x39\x42\xf8\x3d\x9c\x41\x0d\xe3\x83\x18\x30\xb9\x6e\x42\x1d\x74\xd0\x68\x9e\x74\xc2\x5c\x8e\xee\x03\xce\xcd\xec\xe1\x04\xdf\xf8\x3a\xaa\xf0\x05\xbd\x20\xce\xc3\xbc\x6f\xc6\x9a\x7e\xc1\x61\x57\x53\xa5\x89\x3e\xda\xfd\x45\xc7\x7b\xf9\x82\x75\x49\x2d\xb0\xd6\x56\x2d\x52\x86\xfe\x1d\xb4\xc9\x70\x63\x3c\x3a\xad\xfa\x7e\x87\x8d\x74\x82\x8b\xee\xab\x1d\xef\xe8\x69\x3d\xc6\xcd\x24\x96\x3a\x7d\x18\x2b\x04\xb3\x30\x43\x30\x96\x72\x65\x2f\x73\x6d\x7e\xa7\xc6\x9e\xd4\x09\x2e\xe2\x4c\xc4\xa6\x2f\x4b\xc1\xed\xfe\x7c\xcd\xfb\x7f\x9d\xd3\x15\x77\x64\x6f\x19\xce\x39\x15\x42\x50\x44\x88\xa2\x63\x8f\x50\x2e\x54\x77\x42\x76\x25\xe5\xd6\x06\x48\xfa\x0b\x3a\x65\x9c\xc4\x75\x48\xbf\x44\x1a\x5d\x7c\xa4\x15\xea\x60\x65\x63\x38\xaa\x93\x33\x51\x1e\xf5\x3a\x22\x45\x4e\x75\x39\xc2\x37\x96\x76\x22\x9c\x21\x72\x41\x35\x7a\x69\x96\x6b\x05\xcb\xae\xf7\xa3\x31\xce\x61\x8c\xe3\x1c\xc6\x8f\xf3\x38\xcf\x11\xa9\xc5\x0d\xde\xe4\x2a\xd1\x58\xd8\x82\x84\xa8\xb0\x3b\x36\xf7\xbe\xb9\xb7\x7d\xf6\x49\x56\x64\xaa\x11\x7e\x40\xa1\x67\xe0\x92\x72\xdf\xf7\x24\xef\x6b\x8e\x79\xc5\xa0\x5d\x41\x0f\x9d\xc6\x1d\x44\xa0\xa1\x26\x3c\x04\xa9\x71\x68\x73\x2b\x12\x84\xcb\xe7\x25\xcd\x48\xd8\xea\xc7\x41\x87\x5c\x56\x86\x65\xce\x67\xcc\xfc\xb2\xa8\x8a\x05\xbb\x71\x20\xea\x45\x7d\x17\x2b\x12\x34\x9b\x09\x5a\x31\xf3\x84\x8b\xbc\x09\xfb\x6a\x18\xf3\xc2\x54\x01\x51\x34\xfe\x25\xe7\x8a\xe2\xcb\x76\x5e\xe3\x4f\x5f\xc3\x62\xf6\x24\x38\x38\x60\xbd\x4f\x5c\xec\x06\x10\x27\xe0\xde\xe1\xdd\x36\x57\x71\x5d\x2a\x44\x9b\x99\x45\xcc\x57\xaa\x15\x9c\x42\xc8\x56\x20\xe4\x6b\x36\xe8\xee\x9e\xe6\xb6\x4b\xcd\xed\x9b\x0e\xbb\x12\x22\x0a\x89\x54\xd9\xfd\x7d\xc7\xfb\x79\x67\xa1\x7a\x9e\x55\x4c\x4c\xd5\x89\xa5\x61\x4b\x05\xc1\x8f\x15\xbc\x77\x19\x21\x36\xd3\x87\x90\x62\x9c\xdf\xed\xa5\x89\x37\xc9\xbd\x2c\x59\xcf\xbd\x7b\xc0\xdb\xad\x81\x31\x5b\xbc\x4e\xf8\xe2\xca\x41\x7c\x07\x65\x4b\x67\x61\xa7\x1b\xf5\x91\xe9\x3d\x5c\x87\x88\x84\x50\xe7\x4c\x6f\x95\x74\xf1\xf7\x0e\xbb\x06\x67\xd3\x2d\x21\x44\x4f\x9d\x4e\x5a\xc2\xfd\xaa\xe3\xfd\x96\x73\x47\xf9\xb2\x11\x30\xb6\x91\x6c\x0e\xb4\x7d\x3e\x0a\xc2\x8e\x99\xb7\x6f\x76\x14\xa2\x6b\xf5\xe2\x96\xcf\x2d\xd4\x76\xeb\x23\x8b\x9d\x8e\x68\x85\x84\x27\x87\xe9\xd4\xab\x16\xa4\x38\x40\xd2\x6d\x24\x71\x92\xe2\xae\xa0\x30\x7c\x41\x72\x21\x00\x1d\xf4\x0b\x99\x9d\x9a\x1b\xa2\xd5\x83\x73\x2b\x81\xd5\x59\x72\xec\xf5\x23\xec\x1a\x0a\x66\x59\x45\x74\xe9\x50\x64\xee\xdf\xd4\xbc\x0f\xd7\x96\x85\x7c\xa6\x49\x08\x0c\xb2\xe9\xb9\x7e\x82\x76\xa8\x0b\x3b\x13\x44\xfc\x44\xd0\xdc\xd0\x18\xe2\x10\xff\xa6\x17\xa1\x54\x93\x93\xcd\xd8\x40\xb3\x53\x28\xd7\xb6\xa9\xd0\x24\x6d\xa0\x9a\xf6\x55\xe6\xe8\xaa\x48\x3b\xea\x94\x18\x20\xa8\xb4\xce\x9c\xd2\xa5\xa5\xd4\x1c\x82\xcd\xb8\xcc\x3d\x5a\xcc\xad\x1b\xd8\x75\x3b\x30\x96\x56\x35\x61\xef\x18\xb3\x67\x80\xb7\x4d\x10\x6f\x75\x18\x2b\x54\x04\xf7\x55\x8e\x27\x96\x0a\x8d\xa1\xc8\xa7\x2b\xa9\x11\x70\xa9\xd8\x8e\x71\x1e\xab\xbd\x0a\x04\xb0\x5e\xb9\x95\x92\xda\x7f\x60\x8b\xa4\xb7\x33\xcc\xdc\xea\xdd\x9b\xbd\x23\x4b\xc6\xa7\xec\x70\x5a\x93\x24\xcc\x78\xc9\x92\x40\xef\x72\xd8\x55\xa9\x68\x4a\xb9\x49\xf0\x13\xaf\x71\xbc\x8b\x8f\xc2\x5e\x03\x9b\xba\xf1\x1d\x3b\x10\x67\x01\xb2\xe4\x6c\xe8\xb4\xb5\xed\x4f\xc6\x37\xb9\x37\x6e\x17\x18\x6a\x2a\x61\x65\x1b\xf7\x0f\x15\xb1\x3d\x08\xbd\xde\x6e\xa7\xa2\x0d\xde\x0c\x13\x09\xa0\xcc\xa7\x3e\xb7\xb4\x48\xe0\xca\xee\x97\x0e\x7a\x77\x17\x3f\x6d\x04\x36\x83\x43\x04\x93\x0f\xc3\x26\x60\x3a\x02\xd1\xf4\x1d\x8a\xf3\xfa\x8c\x19\x62\xec\x29\x2a\x6c\x68\x93\x67\xdb\xa8\xbf\x6f\x8c\xdd\x6e\xac\xb2\x2d\x80\x47\x76\xb4\xc8\xd8\x07\x1d\xf2\x2f\xbd\xc7\x61\x67\x2b\x8b\x7a\x64\x9d\x02\x9e\xa7\x93\xe0\x78\xd2\x69\x7e\x66\x0a\x85\xec\x10\xe4\x04\xa0\x98\xe9\x82\x6e\xbe\x08\x9d\xa6\xde\x93\xf3\x53\xf9\x7d\xde\xe0\xb0\x17\x5c\xce\x6a\xa2\x47\xe8\x3a\x72\x08\xe9\xaa\xb6\x44\x1a\x22\x64\xed\x40\x8a\x4b\x0c\xbc\xe5\x54\xb3\x3d\x35\x77\xcf\xe1\xfd\xf8\x99\x0d\xd7\xb7\x17\x8e\xf3\xee\x5c\x9d\x84\x60\x31\xeb\x4b\x22\x90\x17\x89\x63\xf6\x52\x21\x29\xca\xbe\xf0\x94\xed\x72\x40\x81\xa5\xef\x23\x4f\xf1\xae\x9f\xe3\xa5\xab\x3a\x68\x70\xd0\xe9\x6c\x3c\xe9\x5f\x72\x34\xc1\xd4\x25\x67\x2c\x17\x9d\xae\x9c\xc8\x97\x9c\x2b\x28\x20\xfd\x4c\x39\xb7\xf7\x17\x9e\xcc\xde\x5c\x63\x63\x2a\xad\xde\x7d\x55\x4d\xa1\x80\x7d\xd3\x31\x11\xca\xd0\x16\x07\xb8\x6b\x55\x18\x66\x16\xf1\xd5\x2a\x7d\x56\x61\xba\x61\x34\xa9\x2a\x0b\x23\x7f\x32\x11\x67\x05\xf4\x4e\x1f\x9e\x21\xf4\xb3\x90\x82\x50\xd5\x31\x5e\x0a\x75\x55\x24\x46\xe5\xca\xad\xf9\x42\xd8\xea\x05\x51\x51\x2e\xd0\x71\x90\x75\x0a\x10\xc6\xd0\xbd\x8c\x41\xb0\x04\xa7\x63\x85\x9f\x9b\x94\x45\xd3\x36\xb4\xd6\x8f\xd7\xd8\x93\x52\x81\x3b\xf5\x6d\xa1\xdc\x0c\xfb\x60\x2f\x73\xdf\xae\x7b\xe7\x9f\x9c\xaa\x07\x54\x4f\x75\x82\x8b\x61\xa7\xd7\xb1\x7a\x0a\x9f\x2e\x91\x7b\x76\x82\x10\x23\xa0\x34\xac\xb7\x31\x9a\x00\x91\x84\xaf\xf1\x0d\xfc\x0a\x66\xd4\x94\xaf\xaa\x06\x43\x9f\x05\x80\x40\xa1\xbe\x86\x9c\x27\x16\x74\x62\xa0\x3c\xf4\x51\x5f\x83\xec\x95\x67\x9b\xda\x3b\x31\x81\xa7\xec\x11\x9c\x9e\xb2\xfb\xeb\x47\x4d\x5e\xb3\xf7\x5e\x0e\x5e\xb3\x2f\x3a\x26\xaf\x59\x40\xe0\x2e\x65\x4a\x3d\x53\x1b\xc4\x00\x6d\x4c\xc3\x80\x29\xc1\x9b\x98\x48\xb8\x98\x1b\xe4\xa4\xa8\xbf\x25\x2d\xae\x96\xc6\x21\x22\x41\xab\x96\xdb\xbb\x25\x70\x3f\x7c\x2d\xfc\x5b\xd7\x64\xed\xec\x1f\x6b\xcc\x5c\x8b\xee\x9f\xd7\xbc\x3f\xac\x19\x17\xaa\xe8\xe4\x54\x32\x09\x34\xb7\x2d\xbf\x1b\x53\xb6\xa5\xb9\xf4\xf1\x24\xa8\x9e\x85\x26\x8b\x8b\x21\xe8\x42\xeb\xe8\xa2\xb5\x26\x17\x61\x50\xa2\x27\x5f\x4e\x1f\x05\x72\x0a\x5f\x17\x39\x00\xfd\xa8\xd5\x53\xd4\x24\xf7\xf9\x92\xec\xfa\xb6\xc8\xf9\xc2\x99\x15\xe0\x15\x42\xac\x08\xa8\x1d\x26\x4c\xa8\x90\x7d\xb9\x77\xce\xca\xee\xae\x2b\xb1\x55\x27\xc4\x19\xa3\xc5\x3e\x4d\x2f\x3f\xbb\xd0\xf4\x55\x84\x0c\x90\xe5\xd0\xd1\xdc\xab\x28\xc0\xc3\x94\x18\x8b\x3d\xd1\x68\x9c\x91\xdd\x6a\x69\xe8\x1f\xa9\x31\x2d\x14\xdd\xf7\xd7\xd8\xd4\xf6\xb1\x2f\x49\x4b\xc9\x1e\xd0\xc5\xbe\xe1\xa8\xf7\xd5\x48\xa9\x8c\x57\x24\xba\xb2\x4c\xa2\x72\x9e\x99\xab\x5d\xa9\xf1\xe1\xba\x94\x75\xbd\xf5\xf5\xb0\x19\xa2\x1f\x43\x09\xb1\x14\x53\x58\x9a\xb9\xb6\x35\xc8\x42\xb2\x3c\xe8\x74\x45\x8b\x4b\xc5\xa9\xa2\xb1\x48\x27\xd6\x8b\xd6\xe5\xbf\x30\x2f\x6c\x71\x49\x52\x91\x10\x38\xf4\x98\xea\x0d\x3c\x15\x05\x52\xb7\x39\xa3\xd8\xd7\x1d\x76\x35\x6e\xd5\x2b\x0a\x64\xf9\xf3\x5b\xe5\x4d\x55\xec\x68\xe7\xac\xd7\xbd\xb7\x39\x76\x79\xa5\x53\xd6\xd0\x17\x4b\x94\xc8\x1d\xca\x76\xce\x13\x52\x25\x70\x52\x0e\x8a\x50\xa4\x77\x08\x0a\x89\x09\xd3\x06\x0c\x40\xc5\x36\xc5\xfe\x69\x84\x3d\xe9\x42\x61\xf4\x52\x37\x32\xf7\xeb\x23\xde\x7b\x47\xaa\xee\xd8\x11\xde\x4d\x34\x95\x41\x1d\x41\x30\x99\xf8\xf2\x79\x52\x24\x86\xa1\x3c\xad\x9e\xa8\x55\x6b\xb1\x43\x48\xa3\xa5\xf5\x88\x56\x7c\xf5\x59\xa0\x32\xda\x0c\xfa\xca\x73\x18\x9a\x84\x64\xc6\x02\x26\xa6\xc2\x13\x90\xad\x03\xef\x62\x87\x81\x83\x20\xcb\x4d\xff\x8e\x09\xfb\xa8\x13\x56\xc6\x1b\x7d\x10\x4b\x2a\xf7\xfc\x34\x98\x2e\xc3\x18\x09\x70\x8b\x48\x5e\x1c\x84\x5c\x2b\x01\x73\x55\x1f\xcb\x83\xf3\x22\xe3\xdd\x54\x34\x45\x0b\xa9\x5e\x81\xd3\x27\xee\xeb\xf3\x7c\xa9\x9c\xc9\x82\x37\x4d\x27\xa6\x5a\xd6\xaa\x59\x76\x3d\x3b\xfe\xb0\x63\xd9\x60\x58\xd9\xcf\x8e\xb2\x27\x76\x93\xd6\x69\xcd\xcc\x48\xe7\xf7\x0f\x8c\x7a\x6f\x1b\xad\xb8\xa1\x86\x0d\x2d\xa7\x7a\xc8\xd5\xfa\x26\x5c\x3d\xa8\x07\xe1\x6e\x4a\x9d\x77\x12\x67\x23\xc2\xef\x61\x44\x4e\x0b\xdc\x45\x71\xd2\x12\xd9\x24\x07\xff\x88\x88\x4b\x08\xa8\xe6\x0e\xdc\xd5\x20\x96\x6b\x67\x53\x80\x50\x5d\x16\x41\xab\xbf\x36\x49\xa2\x72\xa0\x26\x61\xac\x62\x75\x64\x81\x89\x7c\x87\x8f\x4b\x79\x3a\x45\x8c\x41\xf2\xef\xe9\x49\x2e\xf2\x26\x92\x48\xa9\xa8\x6c\x9a\x93\xb0\xe2\x36\x83\x50\x1e\x75\xf2\x30\xe2\x42\x89\x24\x98\xac\x41\xab\xaf\x76\x17\xf9\x4e\x18\xf7\x0a\x1a\x4f\xb3\x11\x93\x4a\x1c\xaa\xcc\x22\x8b\xc8\x24\xe9\x76\x93\x2c\xcc\x05\x56\x0f\x1b\x1c\x44\x70\xee\x02\x2c\x1d\xa3\xd1\x4b\x41\x1a\x44\x91\x88\xd6\x4c\xca\x29\xb2\x03\x29\xbe\xd9\x2e\x3d\x83\xec\x66\x6a\xcf\x57\xba\x2b\x8e\x85\x02\xb8\xa2\xe8\x78\xdc\x0b\x93\xd8\x8c\x05\x23\xf0\xa8\x48\xe4\xa2\xa0\xb3\x0d\xe4\x92\x28\x91\x42\x3c\x38\xca\x26\xb7\xc2\x91\xf2\x57\x93\xf3\x22\x5e\x16\x52\x57\x00\xfd\xfe\x35\xa3\xde\x89\xd2\xb5\x4a\xf6\x6d\x98\xfd\xf2\x39\x6e\x97\xa9\x53\x3c\x2d\x45\xfe\x63\x23\xec\xaf\x6b\xec\x60\xd0\x6b\x85\x72\x3d\x65\xee\xd7\x6b\xde\xaf\xd4\xe6\xd4\xcf\x41\x4e\x06\x03\xe6\xa6\x80\xc0\x34\x50\x46\xc1\xe0\x52\x28\x8e\x7a\xf5\x61\x95\x0c\x18\xa4\x20\xf3\xb9\xfa\x4e\x3d\xd8\x44\x86\xba\x52\xb5\x93\x94\x3c\x1b\x17\x44\x8a\x18\x44\xf4\x41\x7c\x72\x13\x0e\x06\xb9\x88\x89\xb2\xd6\x96\x40\x54\xe1\xa0\x68\x8b\x21\x49\x28\x80\xcd\xb8\x2b\x2b\xa0\x80\xb1\x26\xad\x37\x2b\x91\xf0\xf4\x4d\xfa\x8c\x89\x98\xa1\x93\x3a\xb6\x70\xb9\x5c\xc7\xf6\x41\x23\x5c\xdf\xfb\xae\x55\xec\x1b\x52\x0c\x88\x20\x48\x04\x29\x90\x23\x9e\x57\x98\xcc\xf4\xe2\x7f\x74\xd9\xd3\xab\x80\x61\x51\x35\x5a\x4a\xd2\xdc\xfd\xda\x35\xde\x8c\xf1\xbb\xda\x06\x24\xe7\x2d\x3e\x73\x28\xe3\xdd\x24\x95\xa7\xc0\x51\xf9\xaf\x35\x3f\xde\x7d\x0d\xfb\xea\x28\x63\x18\xfb\x09\x85\x7f\x61\xc7\xd8\x04\xc3\x38\xc3\xdf\x3a\x7a\x86\x0e\x38\x36\x13\x0c\xe4\x0a\xe6\x09\x47\x1c\x45\x85\x49\x82\x2a\x3b\x7c\xdf\x88\xf2\xc5\x9a\xfb\x9c\x4a\x52\xd6\x3b\x92\x0d\x29\xf0\x1f\x4d\xcb\xc2\x8e\x1f\x3b\x36\x73\xac\x64\xe3\x0b\x62\xbe\x38\x77\x66\xee\xa5\x2b\x77\xcc\xbf\xf4\xcc\xdc\xe9\x13\x05\x64\x98\x61\xfd\x80\x14\x50\xa5\x3a\x44\x49\x72\x5e\xb4\x78\xaf\x8b\x39\x94\x18\x2a\x00\xf5\x55\xfb\x0d\x06\xc7\x2e\x25\xad\x43\x99\xb1\xab\xc9\x47\x32\xab\x78\x08\x9a\xb4\x03\x26\x2c\x4e\xaa\x43\xf2\x95\x43\x85\x6b\x06\x60\x87\xc7\x83\xb8\xd8\x93\x3b\x41\x77\xa2\xec\xc0\x41\xbc\x76\x5c\x04\xd4\x37\x88\xc7\xa7\x82\xc7\x17\x97\x6e\x3a\x93\xc4\x64\x8a\x2a\x9c\x73\x14\xd4\x02\x00\xda\x22\xb7\xe3\x28\xcc\x9a\x3c\xfc\x20\x6a\xaa\x44\x9d\x74\x90\x30\x6e\xab\x6b\x87\xaf\xc5\xc9\x13\xb7\xeb\x41\x9d\xae\xb1\x4f\x8e\xb0\x2b\x82\x6e\x77\x29\x4d\xf2\xa4\x99\x44\xee\x8f\x8f\x78\xef\x07\xd6\x2e\x38\x6a\x36\x35\x6e\x1d\xdc\x2d\xf8\xf1\x60\xde\x9a\x7d\x81\x27\x89\xac\xc8\x92\xbc\x7d\x80\xb6\xba\x1f\xe7\xc1\x45\x9f\x9f\x8b\xeb\x98\x66\x2c\x5a\x04\x5b\x87\x9b\x0c\x2c\x5d\xec\x49\x39\x49\x8a\x92\xd4\x19\x09\x1f\x1e\x0f\x30\x0a\x75\xf9\xe4\x7c\xfd\xf8\xcc\xcc\x31\xe8\x58\x02\x0a\xdb\xdc\xdc\xf4\xc3\x20\x0e\xfc\x24\x6d\x1f\x0e\xb2\x2c\x6c\xc7\x40\xbc\xa8\x7a\x00\xe3\x0f\x26\x7c\x7e\x26\x89\xeb\xba\x7c\xd5\xba\xcc\xe4\xe2\x2b\x55\x31\xeb\x35\x37\xe4\x14\xec\xf4\x81\x26\x2c\xee\xfb\xcd\xa4\x73\xb8\xd3\xaf\x13\xa8\xa8\x2a\xc3\x20\x09\xe3\x0d\x91\x07\xd4\x3b\x98\x2f\x9d\xf1\x76\x0f\x71\x03\xd4\xa9\x00\xab\x35\x57\xf4\xbf\xcd\xeb\x85\xa8\x1a\x40\xec\x25\x5f\x51\x27\x2f\x53\x2a\xfd\x64\x8d\xf0\xd4\x7e\xac\xe6\xbd\xb7\x66\x23\x26\xd3\x38\x99\xfc\x83\x7a\xfd\x42\x35\x0b\x58\xd7\x85\x33\x2b\x2f\x3d\x35\x77\xcb\x89\x53\x3e\x9f\x83\xdd\x33\xcd\x33\xf5\x5e\xc0\x0d\xeb\xb6\xa1\x76\xd2\x01\x05\x3a\x88\xb4\x08\x85\x15\xaf\x2c\x96\x9a\x80\x98\x5c\x01\x54\x90\xa2\xe1\xb1\xcf\xf9\x87\x64\x49\x7a\x05\x62\x7d\x15\x65\xf1\x12\xcc\xb7\xb3\x8a\xc4\x0c\x9c\xec\x44\x86\x6b\x0a\xdc\xb0\x80\x3a\x50\x61\x3d\xaa\xc5\x66\xa7\xbd\x6a\x1f\x1b\x93\x4a\x1c\xc8\xd5\xbf\x1d\x55\x46\xa2\x2f\x8f\xae\x2a\x69\x98\xc4\xa8\x3b\x81\xf7\x39\x89\x35\x18\x84\x71\x64\x0f\x33\x62\xa5\x68\xa1\x1a\x08\x2e\xa7\x30\xe3\x67\xa8\x60\xb9\xb6\x4f\x25\x41\xeb\x96\x20\x0a\xe2\xa6\xd4\x94\xf8\xb9\xac\x07\x7e\x25\x9c\x9a\x83\xb9\x13\x8b\x52\xeb\xd7\x66\x1b\x43\x5a\x85\x71\x1d\x44\x2b\x8a\x11\x29\xca\x20\x94\x49\x98\xa2\x52\x0a\xac\x32\xb6\x3b\x82\x51\x63\xd8\xbb\x3c\x74\x06\x61\xe4\x73\x1d\x42\x6e\x60\x3b\xa8\x89\x82\x45\xc9\xb3\x50\x53\x1d\x7d\xd1\x72\xa1\xbd\x3c\x10\xce\x27\x37\x09\x81\x25\xd9\xac\xe3\xba\x4c\xec\x13\xd0\xf5\x10\xb3\x5a\x95\x80\x3d\xa9\x13\xf5\x81\xd8\x36\xcc\x89\x1e\xae\x54\xd3\x32\x97\x5d\x43\xaa\x04\x5d\x55\x38\x1c\x25\xed\xc2\xf3\x44\xea\x16\x51\x12\xb7\x09\xd0\x53\x76\x10\xe2\x07\x00\xa5\x06\xcc\x4b\x39\x4e\x70\x98\xd6\x23\x95\x27\x7c\x5e\x89\xeb\x89\xcb\x2b\x72\xe5\xd7\xea\x72\x16\xe1\x0e\x6f\x58\xe1\x6e\x67\xb0\xeb\xbb\xf3\x6a\xfa\xf9\x7a\xf6\xd9\xa7\x67\x9a\x64\x0a\x95\xb9\x98\xd0\x46\x61\x2f\x63\x63\x4a\x04\xb9\x2f\xf1\x5e\x20\x4b\x5a\x5c\x1a\x2a\xb7\x57\x30\xe4\x21\xe3\xde\xea\xfc\x92\x37\xc9\xbd\x73\x0b\xf2\x1f\x39\xb7\xbc\x95\xf9\xd5\x25\x4f\x7b\x28\xe5\x98\xae\xce\x2f\x59\xab\xe7\x4f\x1d\x0b\x89\xb5\x2a\xe5\xf9\x5c\x26\xd2\x95\x1e\xa8\x32\xee\xcf\x38\xde\x4d\xc6\x6f\x72\x1e\x23\xc7\x78\xc9\xd7\x24\xab\x09\xc8\x8e\x10\x8b\x97\xe1\x0b\x7e\x15\xc6\x65\x8b\x35\x48\xee\xdd\xe5\x9d\x5e\x93\x7f\xe8\x3c\x7d\x05\x0a\x6a\x45\xe6\xc2\x69\xcd\x7b\x96\x57\x9c\x32\xe4\x21\x41\x3d\x9a\xf9\x7c\x59\x81\xa4\x59\x5c\x5f\xff\xc6\x4a\x62\x80\x34\x52\xd5\xc4\x65\x79\xc2\xea\x50\xb4\xdd\xb7\x9e\xe4\xfd\xee\x88\x79\xc5\x88\x22\xd6\x81\x75\x85\x76\x92\xe2\x93\x46\xe8\x49\x58\xc2\x81\x93\x03\x68\x95\x67\x10\x21\x98\x48\x6d\x06\xbe\xb6\x51\xae\xf1\x30\xb0\x4a\xcb\x53\x97\x7a\x88\x0c\x10\x60\x5b\x30\xbf\x40\xfb\xf0\xb8\x36\x36\x4f\xf0\x4e\x10\xa3\xbc\x32\xe0\x63\x0a\x1e\x64\x45\x9c\x6c\xb8\xd7\x71\x0a\x19\xb8\x3a\xd4\xac\xa5\xa4\x25\xf7\x0e\x6c\x96\x02\xa1\xab\xb0\x9f\x20\xdb\xb7\x76\x77\x19\xb5\x3b\x83\xb8\x56\x0a\x43\x95\x4e\xb1\x8a\x60\x97\xd4\x55\x9f\x03\x75\x07\x64\xb8\x10\x87\xfd\x24\xcf\x84\xa8\xf2\x4e\x89\x78\x43\x4a\x65\x54\x0f\xce\x8b\x6e\x06\x2e\x29\x39\xc2\x87\xa9\x17\xeb\x18\x91\xd0\x69\x5d\x72\x0e\x20\x13\xa2\x1d\x5e\xf8\xbe\x27\xec\xf9\x41\x77\xeb\x07\xfd\xdc\x28\x53\x7d\xeb\x7e\x76\xd4\x7b\x70\xf4\x36\xfc\x51\x62\xcf\x05\xf8\xe9\x08\x68\x7b\xd4\x14\x47\xe7\xb9\x09\x10\xa2\xcf\xa8\xf3\xcb\x8b\x05\x84\xb3\xb1\xa7\x00\x47\x4c\xc1\x6a\x09\xc6\x1c\x1a\x04\x1c\x6a\x8e\x02\x98\x28\xc7\x35\x41\x43\xe1\xc5\x53\xbd\x0c\x5a\xc1\x77\xc3\x87\xac\x3a\xc8\x1d\x11\x33\x98\xb2\xac\x27\x4f\x2a\x18\x65\x0d\x60\xc6\xd0\x2e\x32\x35\x6a\x96\x32\xa9\x66\x80\x7d\x4f\x96\x88\xc3\xa4\x9f\x34\xfd\x69\x48\xeb\x91\x0a\x79\x50\x08\x2f\x04\x11\x40\xed\x98\xef\x22\xd5\xa1\x41\x6b\x4e\xc5\x00\xce\x9b\x68\x71\x2f\xed\xc5\x4d\x8f\xc8\xe4\x15\x93\x4a\x61\x46\xe8\xc5\x4d\x7e\x76\x7e\x51\xf7\xee\x38\x92\xe0\x93\x45\xe7\x54\x18\xf7\x2e\x1a\x12\x64\x62\x80\x3b\x1c\xf9\xed\x45\xb5\x94\x91\xbd\xaa\xc6\x55\x69\x9c\x51\xb2\x29\xd2\x66\x90\x89\x49\xe8\xc1\x24\xed\xa8\xbe\x5d\x38\xb3\xc2\xc1\xb9\xc4\xc7\x97\x4f\xce\xf3\xe9\xe9\x23\x33\x13\x66\x16\x41\xa6\x3d\x21\x61\xa7\xd3\x03\x36\x7b\x7f\xcf\xb5\x7e\x79\x42\xd9\x3e\x61\x86\xb2\xfd\xe8\xae\x43\xd9\xda\x8f\x55\xb4\xda\xdf\xd4\xd8\x58\x72\x41\xa4\x1b\x22\x68\xb9\x7f\x5a\xdb\x22\x8b\xcb\xda\xc2\xcf\xd2\x2b\xde\x7f\xa9\xa9\x3f\xcb\xd9\x0a\x05\x13\x9c\x7a\x40\xc3\xe1\x91\x8d\x4d\xed\x41\x30\xd9\xe9\x98\x83\x7e\x75\x73\xff\xf2\x2f\xcb\xce\x74\x64\x6a\xfa\x86\xa9\x23\x47\x8e\xd7\xbb\x49\xab\xae\x6a\xe4\x77\x5a\x25\xa3\x04\xa0\x26\xd6\x23\xd9\x8f\x1c\x9d\xfd\xc6\x61\xfc\xc2\xb4\x3f\x7d\x4c\xaf\xa1\x9d\x06\xa0\x2e\x25\x2d\xdd\x45\x2a\xf6\x94\xbd\xb7\xc6\x58\xa6\x63\x52\xdd\x37\x6c\xe5\xa0\xb3\x3a\xbe\x08\x64\xf5\x7e\xc7\x31\xa2\x5a\x8b\xb8\xc2\xa2\x58\x2b\x57\x23\x4f\xb8\x88\xb3\x5e\x2a\x0c\xe7\x8d\xc9\xb2\x8f\xeb\xd1\x52\x9c\x40\x7e\x63\x69\x28\xab\xc0\x7a\x4f\xbe\x68\xc2\xc9\x0a\xd1\x2c\x69\x7c\x34\xcc\x78\x1c\x46\x93\x15\xe5\x99\xd2\x1d\xa0\x70\x0b\x25\x4e\xaa\x47\x51\x84\x1f\xf0\x1f\x72\x1a\xdb\xc7\xa9\xdc\xec\xde\xa4\x83\xf8\xa0\x87\x4a\x11\x7c\xe6\x97\x2b\xb3\xdc\x7e\x77\x8c\x3d\xc5\xe8\xe5\xb4\x11\x34\xe5\x22\x5c\x4e\x22\xe1\xfe\xdc\x98\xb7\x21\xff\x40\xf9\x57\xa4\x5e\x4c\xf2\x28\x69\x87\xcd\x20\xc2\x2f\x83\xa7\x61\x9d\xa3\xa7\x04\x29\xc0\x90\x8d\x02\x43\xab\x0d\x6d\x0e\x4c\x70\x72\xc5\xa2\x22\x28\xcb\xa6\xd0\x71\xdb\xb6\xfd\x87\x7b\x60\xe6\x7b\x11\x62\x8f\xe3\x36\xb6\x61\xec\x62\x2f\xde\xed\x26\xf6\xb4\x2d\xc2\xb1\x59\xce\xf6\xa5\xc0\xaa\x79\xde\xab\xe3\xd2\x41\x11\x16\x80\x67\x5f\x58\x8b\x4a\x9f\xc1\xe5\xba\x31\x1d\x14\x87\x59\x9d\x3d\x7b\xa8\xe0\x54\x4b\xba\x28\xea\x21\xa7\xb9\xbd\x64\x79\x9e\xfb\xdc\x7a\x75\xb8\x1b\x14\x68\x93\xf2\x96\xa5\x4e\x12\x09\xc6\x3e\x31\xca\xee\x79\xa4\x9c\x61\x94\x24\xbd\xc7\x49\xfb\xbf\x21\x27\xed\x67\xf6\xb1\x9b\xb7\xc4\xbf\x98\x1e\x86\x9e\x32\x9f\xc4\x48\x9b\xe3\xfe\xf3\xa8\x27\xb6\x7b\xa8\x14\xbd\x63\x20\xa6\x0d\x79\x93\x07\x40\x56\x26\x52\x80\xd5\x05\x09\xa7\x12\xb5\x2f\x39\x14\x69\x6d\xed\x63\xff\x30\xc2\x3e\xec\x30\x37\x0a\xb2\x7c\x35\x0d\xe2\x0c\x3e\xbb\x1a\x76\x84\xfb\x76\x67\x38\x0e\xcf\x16\x62\x45\xbe\xeb\xbd\x68\xb0\xbc\x82\x65\x3d\xcb\x81\x4b\x47\x1d\xa6\xa8\xa5\xb9\x7e\x5a\x21\xff\x26\xb1\x1e\x01\x18\x74\x30\xfb\xb2\x76\x01\x08\xf7\x62\xef\x2c\xfd\x89\xfb\xc6\x46\xaf\x13\xc4\xf5\x54\x04\x2d\xd0\xe9\xc4\xc5\x6e\x14\xc4\xb8\xb1\x1a\xf4\xf9\xa4\x9b\x1a\xd4\x48\xc5\xa7\x4d\x09\xbb\xa4\x81\xe2\x4e\x7a\x37\x10\x40\x5c\xa8\x34\x66\xf8\xa5\xa7\xbf\x6a\x04\x44\xee\xc9\xe6\xe9\x02\xad\x63\xdb\x8a\x8e\x75\x5f\xf4\x9e\x93\xe9\x25\x6b\xcc\xb4\x82\xec\x9f\x7a\x65\x7c\x35\x95\x3b\xf8\xc9\x20\x92\xc7\xc8\x73\xf1\xf9\x38\xd9\x8c\x27\xcc\x42\x6f\xa0\x74\xdb\x69\xef\x99\x39\xc2\x0d\x9b\x33\x46\x01\x1f\xe9\x12\xcd\x57\xbf\x72\x85\x15\xfc\x6b\xc4\x97\xcd\xc5\x79\x38\xb7\x0e\x23\xdf\x77\x7f\xf6\x0a\x6f\x71\x29\x69\xf1\x20\xce\x43\x1e\xd0\x55\xec\x71\x10\xb7\xb2\xd6\xa0\x72\xc0\xe9\xc0\x7e\xca\x50\x37\x61\x13\xb1\x95\xa8\x2f\x33\xf6\xb5\x51\xe6\x2b\x58\xd7\x05\x88\x51\x29\x74\xe5\x45\x74\x3c\xe2\xe5\x13\x17\x45\xb3\x07\x0b\xe7\xe3\xa3\xde\x37\x47\x88\x89\x52\x7e\xad\xae\xbf\x66\x1e\xa1\x0d\x33\xbd\xb2\x2a\xe3\xe1\x01\xf3\x74\x01\xf3\x44\xae\x16\xa3\x86\x72\x56\xea\xb8\x10\x3c\xff\xd3\xd6\x5f\x28\xd7\x49\x6c\xd8\x47\x34\x21\xe6\x23\xa8\x46\x13\xc0\x6a\x50\xbd\x56\x55\x49\x3a\x02\xd7\xac\x0a\xd7\x91\xf5\x10\xaa\xe1\x64\xe4\x6f\xf5\x04\x22\xfe\xc9\x9b\xe8\x76\x44\xcd\x64\x62\xd2\x70\xb4\x80\xe2\x03\xe1\x5a\xc0\x94\xc6\xf3\x14\x10\x77\x81\x2e\x1a\x0d\x9e\xe2\x82\x4a\x3f\x84\x43\x1d\xe0\x5d\xe7\x19\xb5\x0b\xf6\x00\xcc\xf4\x03\xc8\xeb\x5e\x94\x87\x5d\xb9\xa6\x22\x65\xa0\xc8\x09\x6a\x04\x26\x2d\x9e\x37\x0a\x6e\x37\x95\xd3\x4c\xa1\x39\x6a\x2a\x41\x2a\x21\x86\x55\xe7\x22\xcd\x20\xa6\x70\x92\x03\xeb\x30\x68\x13\x22\xed\x14\x0e\xbb\x2c\xc8\xc3\x4c\x76\x9c\x15\xe9\x70\x94\x1d\xd9\x59\x88\xa4\xf9\x4d\xf6\xad\x7d\xec\x70\x57\x11\x4c\xed\x70\x9a\x7d\x7a\x9f\xf7\x2d\x70\x96\xa9\xc1\xa7\xd0\x23\x2c\x07\x61\x9a\xf0\x06\x85\x11\xd8\xc7\x2e\xa8\x7d\xbf\x62\x76\x88\x8b\x72\x87\xc6\x50\xe8\x21\x93\x83\x42\xca\x73\x18\xbd\xe6\x46\x92\x64\x82\x10\x1e\xb1\xf0\x0b\x61\x82\x21\x7f\x10\x8a\x42\x67\x6e\x92\x1d\x46\xf1\x68\x96\x2a\x5e\x0b\x33\xde\x49\xb2\x9c\xeb\x9e\xd0\x61\x21\xb1\x28\x82\x69\x90\xbb\x3a\x93\xea\x70\x47\x16\xba\x29\xc2\xf6\x86\x1c\x71\x18\x27\x29\xf3\x0a\xaf\x21\xba\x1f\x84\xc8\x51\x1d\xd4\x1c\xb6\xc5\xa2\x37\x97\xc2\x78\x19\xfa\x62\x92\x0f\x5b\xfa\xc3\xfb\x0c\x02\xc3\xfc\x89\x49\x60\xe4\xec\xe5\xc0\xd7\xd9\xeb\xc8\xfe\x0b\x73\x70\x04\xc2\x31\x21\x05\xda\x31\xe8\x0f\x9a\xb1\xfa\xc8\x45\x52\x20\x6e\x01\x83\x5d\xdc\xe6\x1e\xb6\xd0\xd3\xbb\x7e\xaf\xa3\x70\xbf\xa1\x91\x1b\x84\xbb\x45\xae\x88\x8e\x81\xb2\x6e\xcf\xf8\xd2\x4c\xbf\x51\x17\x31\x9e\x4d\x14\xfd\xab\xa0\xb8\xe4\x67\x02\x0a\x82\xb6\x87\xc5\x9a\xf0\xcf\x61\xb3\xec\xfa\x6d\x27\xfc\x9d\xd0\x04\xd1\x2a\x4f\xfc\x9f\xbb\xba\x12\xb7\x95\xf0\xc4\x64\x71\xf3\x3a\xb2\xce\x7d\xf3\xd5\xde\xd9\xca\x3b\x03\x70\x7a\x36\x6a\xf5\x3a\x44\xb3\x16\xa1\x14\xe5\x48\x67\x25\xee\x7f\xf3\xca\xbd\x33\xf3\xde\x99\xf9\xf1\x3b\x33\x7f\xb9\x66\x1c\x9a\x3f\x5f\xdb\xed\xa9\xf9\x2d\x35\xda\xfd\x4f\x21\x81\x01\xac\x83\xea\xf5\x03\x6e\x8d\x4e\x37\xef\x4f\x16\xa9\x45\x14\xe9\xa2\x6d\x6c\xda\x11\x12\x64\xca\x1c\x29\xe5\x86\x11\x07\x59\xb5\xc2\x28\xe3\x00\xcf\x80\x8f\x7b\x4a\xf5\x1b\x14\xa6\xe9\xab\x6b\x5b\xc4\xe4\x6f\x29\x85\x20\xa5\xe1\xb3\x36\xb0\x69\x65\x96\x99\x19\x42\x5b\x86\x93\x1c\x22\x8d\x1e\xed\x7e\x90\x75\xac\x07\x71\xab\x8e\xea\x3c\xfb\x52\x81\x6a\xfa\x85\xad\xf0\x13\xb7\xee\x10\x4c\x64\xfd\x81\xda\x8a\x75\x62\x80\x2d\x23\x15\x4d\x4c\x9a\x4a\x1a\x14\x5a\x66\x1f\x24\x86\xf5\x03\xd8\xd2\xe5\x88\xc1\x5a\x6f\x08\x48\xe3\x48\xd6\x39\xe4\x2d\x34\xfa\xa8\x89\x12\x8a\xa4\x2c\x2a\xdc\x05\x1c\xea\xa3\xd8\xc1\x0f\x9b\x5d\xc8\x00\x48\xad\xec\xeb\x72\x06\xf9\x6b\x6a\x16\xc0\x1a\x86\x7d\x17\x04\x3c\x0b\x80\xa8\xe6\x7e\xd9\xf1\x8e\xd3\xdf\x2a\xca\xd7\x84\x56\xc3\x0d\x4a\xe5\x5e\x60\xf8\x68\xb2\xce\x17\x17\x32\xff\x92\x33\xd2\x09\x63\xf9\xff\xc1\x45\x6b\xab\x7c\x83\xc3\x4e\x20\xa8\xd3\x73\x21\x74\xe6\xf8\x51\x6f\xa2\x13\xc6\xc6\x71\x31\xd5\x51\xd3\x14\x35\x15\xc6\xcd\xa8\x97\x85\x17\x4a\x51\x33\xf3\x88\x01\xf6\x1c\x55\xcc\x21\x59\x25\x2a\x46\x18\x9c\xa9\x5b\x16\xf2\xae\xab\xd9\x0b\x76\x67\x27\xbb\x53\x34\x36\x92\xe4\xfc\x3c\xec\x1a\x88\x39\xef\x7e\xea\x2a\xef\xce\x8a\xeb\x45\x48\x71\x5e\x62\x33\x86\xc0\x16\x30\x18\xad\x9e\x5a\x91\x8f\xc5\x04\xa0\xa9\x95\xab\x4d\x2c\xce\xd6\x3b\x7e\xef\x4a\xf6\x19\x87\x8d\x35\x83\x5b\x7a\x71\x2b\x12\xee\x83\x8e\x3b\xda\xe8\xe7\xc2\xfb\xa0\xa3\xae\xe1\x26\xba\x74\xe2\x34\x17\x71\x33\x69\x89\x16\x9f\x9f\xe3\x0d\xbc\x65\xc4\xf5\x1b\xee\x60\x85\x98\x63\x7e\xf6\x50\xa6\x82\xd4\x9b\x22\xcd\x51\x5a\x89\xc1\x34\x4f\x3a\xa1\xe5\xa9\x3c\xe9\xa4\x49\x92\xeb\x78\x64\xdd\x9d\xb0\x43\x00\x3e\x8d\xb9\x81\xbd\xb7\xc6\x0e\x50\x68\x94\xfb\xe6\x1a\x7b\xf1\x30\x81\xf2\x48\x06\x88\xc2\xcb\x34\x87\xaf\xf7\x51\xc7\x88\x02\x0c\x8c\xc0\x14\xa5\x26\xd3\x5d\x6d\xf3\x55\x7d\xcf\x4f\x84\x00\x2d\xa5\x1e\x48\x52\xde\x4b\xa3\xe2\x5c\xa7\x3a\xc2\x67\x8c\x36\x50\x7a\x13\xec\x97\x86\xa7\xcb\x0e\x18\xa2\xec\x90\x7e\xd2\x33\x63\x58\xd7\xe8\x2b\x6b\x3e\xfb\xab\x31\x36\xd2\x4b\x23\xf7\x1b\x63\xde\x1f\x8c\xc9\x2f\xb6\xc3\x0b\xb4\x79\x20\x1e\x41\xb1\x6f\xd0\x07\x27\x91\x92\x9f\xf6\xcd\x73\xcb\xa7\x80\x98\x9f\x8f\xaf\x81\x8e\x28\x66\x0f\x43\x2e\xe1\x6c\x37\x49\xf3\xc3\xdd\x20\xdf\x58\x9b\xf0\x91\xa9\x86\xc2\x32\x93\x75\xbe\xd6\x4b\xa3\x35\xd9\x44\x5d\x91\xca\x86\xca\x83\xd8\x9a\x2c\x6c\x4d\x55\x1e\xb3\x60\xe9\x24\x19\xe8\xce\x52\xed\xb7\xdb\x7e\x23\x06\x76\x6c\x18\xed\xd5\x31\xa4\x59\x2e\x02\x8a\x40\x90\x1f\xa0\xf8\x87\x86\xa0\x48\x23\xa9\xaf\x86\x01\x97\x23\x0e\xfa\xed\xc2\x99\x15\x68\xb5\x94\xf0\x7a\x72\x64\x68\x5b\x98\xe4\x6b\x08\xb6\xa0\xae\xaf\xf1\x26\xaa\x72\x54\x16\x0f\xe3\xba\x8a\x89\x92\x05\x05\x2a\x77\x58\xc5\x7c\x07\x3c\x0a\xfa\x18\x1b\x8b\x87\xd4\x30\x89\x27\x7c\xd5\x74\xb9\xd9\x40\x52\x34\xc5\xc9\x2f\xc9\x23\x98\x3c\xd8\xf9\x8c\x2d\x45\x84\xde\xab\xa8\x97\x30\x3c\x63\x0d\xd2\x2e\xf1\x75\xd9\xcb\xd3\x47\xae\xf3\xa7\xfc\x29\x7f\x7a\x0d\x5d\x75\x54\xb2\x9c\x39\x61\x76\xbe\xcf\x7b\x31\x80\xf7\xc9\x59\x02\xb6\x65\x38\xcd\xf2\x26\x66\x80\x50\x0c\x47\x31\x57\x81\xc6\x2a\x8a\xa0\xe3\xd4\xf9\x0e\xa2\xc9\x62\x63\x29\xd2\xb1\x0f\xba\x15\xe2\x2d\x95\x18\x6a\x06\x51\x44\xe7\x08\x73\xfa\xaf\xf4\x9a\x1b\x98\x38\x2e\x6f\x07\x80\x35\x78\x5e\x20\xe3\x51\x03\x31\xe2\xe4\x8c\x0a\x1a\x91\xc0\x43\xf5\x24\x4c\x06\x11\x64\xf0\x08\x10\xf5\xf7\xba\x18\x5c\x12\x8b\x4d\x1d\x30\x87\xb3\x08\xa7\x66\x01\x29\x02\xdb\xac\x87\x67\x4d\x39\x83\xcb\x5c\x67\x9e\xda\x87\x3d\x9f\xb1\x39\xcd\x5a\x95\x50\x98\x31\xd9\xd8\xd7\x55\x5a\x8d\x52\x08\x82\xb8\xaf\xce\x14\x5d\x91\x76\x42\x0a\x18\x0a\x89\x2c\xcc\xe7\x2f\x4a\x7a\xf0\xac\x9a\x98\xc8\xc1\x94\x00\x6b\x2a\xf4\x5f\xda\x08\xf3\x34\x48\x75\x31\x24\x36\xf4\xd2\x5b\xb7\x83\x79\xd4\xb4\x2a\x72\x80\x64\x7d\xf3\x5c\xaa\xcd\xf4\x7a\x0f\xcc\x20\x2a\x36\xaf\x11\x64\x61\x13\x12\x7a\x10\xd6\xcc\x93\x37\x66\xe5\xe7\x37\x93\xb4\xf5\x3c\x4f\x53\x6c\xe1\xc6\xeb\xf3\x93\x69\xd0\x26\x43\x84\x77\xad\xef\xfb\x1e\xa6\x97\x61\x06\xb7\x01\xf2\x33\xee\xdd\x4c\x77\xc9\x52\x48\x25\x4c\x72\x01\x82\xcd\x12\xc9\x7f\xf9\x04\x76\xd4\xd4\xe4\x0a\xb9\x0f\x69\xa7\xf3\xc5\xef\x95\xb0\x2d\x57\xb6\xc2\x17\xfd\xe9\x27\x78\x1f\xd9\x37\xf4\xb6\x66\x98\x25\x7d\x42\xaa\x13\xa2\xb9\x11\xc4\x61\x06\x51\x44\x49\x03\x0c\xfc\x17\x8f\x4d\xdd\x60\xee\x35\x19\x28\x6f\x70\x24\xa3\xa8\x61\xe3\x26\xcf\xf0\x13\x85\xf5\x05\x63\xb1\x20\x18\x31\xcc\x79\x90\xf5\xe3\xe6\x46\x9a\xc4\x49\x2f\xc3\x3c\xfc\x14\x72\xe2\x30\x8c\x22\xeb\x81\x20\xa3\xc8\xc6\x8c\xc6\x3d\xcc\x00\x33\x45\xd7\x68\x96\xf1\x69\x5f\x9d\x0d\xad\x8a\x49\x29\x57\x24\x5f\xc1\x6a\xb4\x65\x0d\x1f\xd7\xbb\xb8\x67\x87\x22\xdb\xcf\xd5\xb1\x74\xd0\xfa\x22\x91\x7b\xd0\x2c\x91\x9e\x09\x3a\x62\xc2\x67\xfc\x88\x8f\xb2\x34\x6e\xdb\xdf\x97\xd3\x4d\x6a\x0d\x45\xb8\x7e\xe9\xf3\xc4\x47\x18\x13\x57\x70\x26\x9a\xbd\x54\xae\xda\x2d\xab\x15\x89\xbc\x4e\x9f\x2b\x55\x84\xad\xaa\xce\xa1\x20\x03\x1d\x47\x46\x23\xfc\x58\x74\x92\x55\x27\x88\x0f\x2e\xe6\x8e\xdd\x3b\xf2\x8c\x4f\x4e\x31\x29\xa0\x8c\x60\x1a\x2c\x21\xab\x24\x36\xfe\xf6\xc1\xcb\x8b\x59\xf4\x61\x75\x7e\xfc\x60\x8d\x9d\x1c\x7e\x5c\xda\xe9\x22\x83\xb3\xe4\xef\x38\x99\x85\x55\x04\xdb\xaa\xb1\x2a\xac\xd5\x60\x86\xdc\xf1\x60\x5d\x0a\x24\x15\xb4\xef\xf3\xb3\x71\xd4\xa7\x63\x15\xbd\x52\xf4\x2e\xbe\x0e\xc8\xab\x99\x22\x24\xa0\x81\xcf\x04\x04\x32\x1a\xe5\x80\x7a\x44\x0f\xa1\x29\x00\x31\x89\x1a\x7d\x2b\xd5\x2f\x6e\xa9\xed\xb7\x21\xcf\x7b\x2d\x6d\x29\x96\x92\x2e\xf3\xd9\xab\x8a\xe3\xe5\xdf\x39\xec\xb6\xcb\xd0\x5f\x78\xd4\x7c\xc0\xc9\x4a\xa0\x49\x83\x60\x49\x26\x7e\xa8\xc9\xc7\xa8\x84\x46\x92\xf2\x96\x88\x31\xf1\x42\xe5\xf0\x1a\x9d\x8e\xf2\x44\x1f\x24\xa1\x1b\x71\x7e\x6e\x08\x48\x8c\xe8\xa5\xa6\x1b\x8c\x52\xf2\x81\x35\x0a\x9e\x55\xcf\xf8\x7b\x16\xc5\x3d\x8b\xe2\xe3\x88\xd3\x94\x6e\x6f\x7f\x38\xeb\x9e\xd6\xf6\x07\x6b\x25\x96\x29\x8e\x87\xad\xca\xb2\x51\xe2\xa7\x47\x87\xb9\x6d\x4d\x58\x10\xf7\x2d\xa3\xde\x0b\x4a\xd7\x4a\xce\x61\x30\x00\xa1\x1f\x91\x26\x37\xe4\xbc\x15\x59\x4d\xca\x13\x1f\x68\xc8\x03\x4b\xfa\x7f\x74\x84\x7d\x51\x81\xcc\xfd\x77\x87\x8d\xef\xc4\x2d\x07\x22\xf9\x87\x9c\x4a\xde\xe8\x61\xd6\x3c\x88\xb0\x7e\x8c\x4d\x77\xff\x2a\x11\x2f\xbf\xf9\x94\x4a\xca\xba\x33\x49\x4b\xa0\xe9\x85\x82\x96\x3e\xfb\x14\xef\xce\xf2\xc5\x8a\x50\x14\x2b\x60\x61\x3d\x6c\x5b\x39\x7f\xf2\x7d\x1f\x32\x62\xa8\x14\x8c\x2e\xb2\x26\xd0\x1f\x3c\x99\x7d\xaa\xc6\xf6\x07\x40\xda\xe3\x3e\x58\xdb\x82\x82\xbc\xa2\xae\x50\xa0\xf7\x3d\xb5\x39\x78\x5d\xca\x1d\xc8\xf7\x82\xfa\x6c\x88\xe6\x79\x90\x2c\x42\xa5\x50\x14\xfe\x3c\x29\xdd\x88\x26\x08\x0f\x99\x3e\xa7\x22\xc0\xc6\xa3\xe5\x17\xa9\xfb\x56\x1c\x05\x2d\x44\xd5\xee\x39\xd5\x60\xfc\x86\xde\xc1\xd4\xe3\xa7\x82\x2c\xbf\x3d\x4e\x36\xe3\x5b\x93\xa4\x78\xa8\x25\xba\xc4\x23\x0f\x59\x97\xb8\x91\x06\x03\x47\x9d\x8a\x0f\x48\x89\x0b\x00\x61\x21\x9c\x51\x45\x9a\x26\xa9\xcf\xfe\x61\x94\x8d\xa9\xae\x77\xbf\x39\xfa\x48\x7a\xf1\xc1\x51\xfd\xa5\x1d\xf7\x23\xf4\x16\xc5\x15\xf4\x32\x15\x34\x50\x3d\xee\x48\x2d\x0c\x92\x7b\xb2\x28\xa1\x28\x1e\x3f\x67\xc4\x9d\xd3\xc7\xba\x41\x3f\x4a\x02\xd0\x9b\x11\x7f\xa9\x15\x66\xe7\x27\x79\x10\x25\x05\x94\xa4\xdc\x47\xd3\x96\xa9\x23\x68\xdc\x06\x2c\xc5\xf0\x83\x13\x07\xb5\x3a\xb3\xd3\xab\x79\xa2\xbc\xec\x72\xe3\xa1\x4f\x17\x95\x53\x29\x5f\xaa\x5f\x8a\x17\xc3\x58\x8f\x90\x5f\x8c\x15\x04\x9c\x63\x6b\x0b\x70\x39\x5c\x2f\xa8\x52\xa2\xd2\x04\x25\x6c\x04\x19\x6f\x08\x29\x70\xcd\xae\xce\x13\x68\x68\x11\x87\x51\xca\x28\xcb\x83\x14\x43\x27\x72\x9e\xa7\x04\xbd\x03\x26\x89\xaa\x39\x03\xd7\x70\x82\xd3\x95\x46\x9f\xcb\x5e\x55\x00\x9d\x64\xca\x54\x5b\xbb\x55\x13\x35\x00\xfa\x58\x0e\x8b\x5b\x37\x9a\xfd\xd0\x18\xdb\x07\xd3\xd0\x7d\xe7\x98\xf7\xba\xb1\x13\xf2\x4f\x43\x54\x04\x31\x20\xcc\x36\x22\xd1\xc1\x4e\x8b\x9b\x61\xa4\x3e\x34\x38\x4b\x48\x83\xb2\x6a\xeb\x73\x28\x14\x95\x88\xa4\xd9\xec\xa5\x65\xdb\x81\xb5\x72\x8a\xda\x0f\x2f\x1f\xa7\x92\x31\xe3\xe5\x50\x6c\x51\x10\xda\x37\xb0\x23\xca\xb9\x11\x03\x1f\x29\x97\x03\x6f\x25\xa9\x6d\x30\x1e\x10\x1c\x22\x6f\x0e\x36\x94\x07\x39\x6f\x85\xeb\x60\x78\xcd\x39\x2d\x94\xcd\x8d\x30\x12\x5c\x9e\xd5\x29\x73\x00\xfb\x28\x48\xa3\x50\xa4\x28\x12\x32\x15\x2a\x94\x6c\xc6\xea\xf3\x45\x73\x20\x72\x12\x1e\x9b\x28\x82\x9c\x50\xb0\xa0\x7d\x27\x4d\xa2\xa8\x11\x34\xcf\xcb\xda\x5b\x12\xcc\x54\x62\xd1\x1e\x18\x34\xd3\x24\xcb\xf4\xec\x4c\x05\x4c\x47\x9f\x9f\x0a\xf2\x72\x65\xd4\x8c\x2b\xba\x82\x2c\x14\x83\x32\x66\x42\x49\xe2\x1d\xd4\x09\x14\x41\x52\xbb\xe1\xb8\x06\x09\x57\x88\x7b\xd8\xa3\x5c\x74\x9d\xef\x06\xc7\x6f\xac\x39\xa8\x9f\x30\x5b\x1b\x7d\xbe\x1e\x5e\xd4\xb3\xbf\xb4\x95\x85\xf1\xe0\x10\xa3\xf5\x4b\x1e\xef\xd6\x43\x8a\x23\x51\x49\xfb\xa5\x4c\xdf\x96\x68\xf4\xda\x90\x92\x0d\x89\x27\xb0\x53\x2b\xdc\x00\xf8\xb8\x0e\x60\xb4\x97\x79\x94\xa8\x79\x5f\x19\xda\x58\x01\xcb\x83\xc5\x41\x84\xe8\x8d\x9c\x94\x12\x3c\x82\x42\x07\xeb\x7d\x26\x49\x61\xbc\x75\xd9\xe4\x96\x6e\xf4\x72\xdb\x32\x0d\x26\x6e\x25\xba\x1a\x61\x64\x40\xee\xe1\xbb\xb9\xb8\x98\x97\xc7\x9f\xb6\xc7\xcc\x32\x91\x7d\x6b\x3f\xbb\x2a\x32\x87\xcc\xfd\xe3\xfd\x8f\x64\x8f\xfa\xe8\x7e\x7b\x3b\x7d\x78\x1b\xd5\x7a\x10\x45\x5c\xcd\x20\x50\x71\xc3\x1c\x9c\x3d\x3d\x88\x45\xd3\xdb\xe9\xce\x36\x62\xdc\x52\xca\x92\xb6\x21\x9a\x49\x87\x14\xa4\xaa\xad\x9f\x40\x14\x54\xbd\x74\x92\xb3\x81\x76\x54\x2e\x12\xb0\xb7\xd1\x1a\x01\xd9\xa0\x69\x0a\x87\x52\x85\xc0\x51\xc0\x66\xea\x6c\x50\x95\xc4\x32\x3d\x55\xef\x84\x71\x2f\x17\x3c\x4b\x82\xf3\xbc\x2b\xd2\x10\x71\xfb\x52\xc4\x61\x56\x55\x41\x61\x48\xdb\x51\xb2\x5e\x55\x83\xe2\xcc\x55\xa9\xf6\xc8\x47\x48\x64\x53\x2b\xb4\x63\x31\xcc\xe8\xc3\xb0\x24\x87\xf6\x0f\x90\x50\xa8\x28\xb5\xf5\x0a\xc1\x8d\xdb\x9e\x00\xcb\x5c\x1c\x46\x7c\x5c\x8e\x0b\x69\x04\x04\x9a\x4a\xe1\x8f\x76\xbf\x87\x84\xc2\x1a\x2a\xde\x87\xa8\x6f\x15\x34\x29\xeb\x14\xa8\x41\xb6\x0a\x34\x1a\x17\x44\x9b\x41\xbf\xc8\x89\x6a\x43\x8d\x5f\x54\x38\xa2\x20\x9c\x14\x1c\x93\xf2\x09\x24\xdb\x30\xe2\x8b\xe5\x60\xa3\xbe\xbf\x91\x40\xbf\xa8\x81\x2f\x04\xb8\xb1\xc8\x8c\x71\x8e\x45\x96\x4d\xa2\x93\x25\xcc\x28\x18\x10\xdd\xc7\x29\xf5\x65\x11\x8a\x45\xd6\x79\xb0\x9f\xf4\x30\x77\xed\x1d\x57\xb3\xa9\x1d\x1d\x54\x0a\x7b\x49\xe6\x7e\xe5\x2a\xef\x8d\x8e\x71\x81\x82\x3c\x0d\xfb\x49\x56\x10\x2b\xe9\x2c\xdf\xc9\x82\xbf\x12\x8f\xf1\x19\xe9\x30\x4d\x00\xd1\x83\x49\xb3\xb4\x28\xa7\x87\x3c\xa0\x4c\x92\x4f\x45\x85\x53\x27\x49\x8e\xfe\x03\x12\x30\x91\x68\x07\xcd\x3e\xbf\x30\x2d\x5f\xf2\x2f\x39\x63\xea\xcb\x97\x9c\xa7\xa2\x7d\x73\x0e\xfd\x46\xb7\xf4\xc9\x71\xbc\xb8\xb0\x6c\xc7\xb5\x7f\xe6\x0a\xf6\x22\xa6\xdf\x73\x4f\x7b\xd3\xba\xf6\x2a\xe8\x2e\xe8\x86\x5b\x35\x69\x2b\x80\xad\x3d\xa3\xd3\x9e\xd1\xe9\x71\x0b\x63\xfb\xb3\x7d\x6c\x8b\x55\xe0\xfe\xb7\x7d\xde\xff\x18\x0d\x78\x27\xe8\x22\x0e\x28\xb4\x4f\xde\xd2\xa9\x45\xa9\x72\xbb\xea\xb0\x60\xe5\x31\x81\x8e\x02\x5b\x55\xb1\xc5\xe4\x09\xdf\x10\x51\x57\xaf\xeb\x14\xc2\x7f\x95\xdf\x98\x44\x0e\x44\x23\x11\x36\x4c\x5d\x68\x14\xd9\xcd\xa0\x50\xbe\x7c\x3e\x4f\x25\x48\xa5\x44\x89\x5c\x30\x18\x77\x53\xa0\xe4\x29\x55\x8e\x40\xb0\xe4\x53\x58\x7d\x8d\xcc\x0d\x21\xb8\x30\x18\x52\xdb\x93\xcd\xd4\x01\xea\x1a\x0b\x45\x55\xd7\x88\x15\x40\x11\x1f\xb7\x85\x82\x64\x92\x4d\x96\x45\xe3\x6e\x4e\x9f\x4f\x45\xde\x4b\x63\x4a\x1b\xce\x37\xe4\xa9\x10\x3a\x96\x3a\x0b\xf8\xcb\xe2\xf3\xc6\x96\xad\x1c\x39\x41\xac\x2a\x66\xe0\x14\xcc\x62\xf7\x04\xe0\xdb\x24\xad\x16\x3c\xbc\x41\x5c\x2c\xd6\xc5\x25\x6c\xa1\xfc\xe4\xa4\x8a\x3d\xa6\x62\xa1\xbb\x95\xd1\x05\x6b\x88\x9e\x71\xbb\x00\x35\xed\x01\x4d\x0e\x50\x21\x5f\x58\x3f\x99\xa4\x9b\x80\xc5\x25\xff\xe2\x1b\x22\x68\xa1\xfe\xf7\xc2\xfa\xb2\x08\xa2\xfa\x62\xd7\xb8\xa6\x00\x1c\x97\x45\x27\xc9\x85\x9c\x58\x7c\x3c\x24\xc0\x09\x00\xc1\x9c\x90\x13\xa1\x2d\xac\x26\x2f\x2e\x59\x42\x72\x89\x9d\x61\xa7\x1e\x81\x9d\x6c\x65\xd8\x64\x7e\xc8\x59\xd9\xde\xbe\x3a\xe5\xfa\x55\xf1\x5d\xc6\x16\x56\x36\xa0\xbe\x63\x84\x7d\x77\x25\xbd\x1c\x46\xc2\x10\x61\x1a\x1c\x6a\xbf\x51\xf3\x16\x8c\xdf\xbc\x19\x74\x73\x20\xb3\xd6\xea\xa2\x56\x21\x0b\x90\xd5\x40\x33\xad\x2b\x68\x2a\xdb\xec\xf5\x8e\x1a\xfb\xa8\x53\xa4\x31\x7d\xd0\xf1\xde\xee\x20\x7c\x21\x25\x27\xd9\x47\x84\x8a\x2f\xcc\xe5\xb9\x5c\x84\x49\xca\x17\x04\xfe\xa5\xbf\x44\x98\xdb\x58\x1c\x39\xf3\xa3\xa4\xdd\x86\xc0\xa3\x44\xce\x5f\x43\x69\x21\x07\x0f\x40\xdf\x87\xa0\xc0\x19\x07\x18\x4b\x89\x6f\xb0\xd1\x3c\xec\x08\xf7\xae\x47\x9e\x14\xf6\x8c\x55\x95\xf5\x85\xed\xda\x0c\x32\xb3\x6d\x3e\xfb\x15\x87\xad\xec\x2e\x88\xe9\xf9\x2b\x67\xcf\xac\xc0\xc6\xb8\x94\x26\xdd\xec\x6c\x7a\x4b\x92\x44\xee\xcb\xbd\xa4\xf2\x86\x19\xd6\x5e\x7a\x80\x03\xc6\x01\x51\xde\xe1\x76\x64\x33\xda\xe4\x69\xaf\xc8\x7f\x54\xcf\x49\x51\x26\xd2\xbc\xef\xb3\x37\x8e\x59\x81\x83\x0a\x85\x7a\x41\x74\xa3\xa4\x2f\xb5\x74\xe0\x85\xfe\xd3\x03\xde\xb8\x7d\xc9\xc6\x44\x2d\xee\x0d\xa1\x85\xfe\xb5\xbd\x2c\xf4\xcb\x49\xa4\xfc\x5d\x8b\x55\x44\xca\xe6\x28\xec\x3c\xa7\x79\x70\xc8\xf7\x94\x9e\x5d\x28\x3d\x0d\xc3\x1d\x73\xc7\xee\xb8\xcf\x9f\x32\x84\xfb\xfc\x21\xe7\x85\xdb\xef\x36\xc7\xdc\x99\x7a\x41\xaa\xd2\xcd\xf4\x7e\x63\xaf\xe3\xf2\x96\xf3\xc5\x27\x33\xbf\x2a\x90\xd8\x5f\x4a\x5a\x0b\x61\x96\xf6\xe0\xd0\x78\x4b\xaf\xd5\x16\xe4\x85\x77\xff\xf3\x93\xbd\xd7\x3b\x43\x6f\x9b\xb2\x6b\xd0\x3b\x6f\x7b\x68\x02\x5e\x51\x8c\x4e\x1a\x97\x33\x26\x4f\x83\x10\xb3\xf9\x83\x66\xde\x0b\x22\x33\xd3\x98\xa2\xbc\x2f\x39\x6e\x4b\x17\x91\xcd\x61\x48\xd4\x25\xe7\x6a\xb2\x40\xdc\x26\x82\x28\xdf\xe8\x5f\x72\xae\x26\x2f\x9e\xbe\x70\xa5\xb8\xd8\x85\x4c\xbf\xa5\xa4\x65\x4b\xaf\x4f\x3d\x89\x7d\xd5\x61\xae\x8a\x5d\xbf\x55\xc4\xb4\x89\xb9\xbf\xe6\xa8\x70\xe5\x4f\x38\xa7\x8b\x40\x77\xde\xd6\x8f\x14\x11\xef\x36\x4e\x22\xcc\xfa\xa5\x85\x5b\xa8\xf9\x3e\x5f\x18\xa8\x34\x22\x6b\x63\x88\x2a\x85\xd4\x1b\x1d\x18\x66\x68\x9e\x44\xfd\x2f\x5c\xe7\x83\xd5\x43\x28\x5b\x90\x77\x4b\x0b\xb7\x1c\xd2\xeb\xb1\xa8\x9d\x1d\x3d\xfd\x37\x63\x8c\xe9\x58\x86\xcc\xfd\xfa\x98\xf7\x37\xfb\x75\x8a\xb7\x8e\xb2\x28\xc2\x1d\x30\x58\x69\x69\xe1\x16\x42\x5a\xd7\x2d\x30\xf3\x3e\x32\x41\xa7\xf2\xa2\x81\xaa\x7d\xba\x20\x7c\x1f\x61\x6b\x41\x31\x49\x05\x87\x0c\x62\x05\xfc\xa5\x36\x30\x95\xd0\x0c\x21\xa5\xe3\x86\x3d\x13\x6f\xc8\x3a\xaa\x00\xcf\x56\xab\xc0\xbc\x43\x43\xc3\xc4\x2c\xaf\xf3\x95\x7e\xdc\x3c\x09\x68\x87\xb3\xf0\x51\xa3\xa6\xa6\xfe\x52\x58\xd9\xe2\x96\x54\x00\xe2\x43\x39\x0f\xc8\x32\x4b\x09\x7d\x8c\x5b\xff\x81\x01\x45\x73\xd6\xa8\x28\x7a\x63\x2a\x42\x23\x53\x04\xcb\x8b\x13\xf3\x8e\x6c\x6f\xa9\xb4\xc0\x98\x02\x95\x6e\x4c\x0a\x38\x51\xd1\xde\x90\x78\xed\xb3\x3a\x5f\x34\xc8\x31\xe4\x44\xc6\x56\x16\x15\xd3\x48\xf2\xe4\x34\x04\xb5\x99\x37\x84\xa2\x1e\xc1\x27\x4b\xb5\xc1\xff\x54\x0e\xa4\x8a\x86\xa9\x5c\xac\x67\xb6\x6b\xd9\x2e\xdb\xb7\x32\xd8\x3a\x95\xfb\x9b\x28\x14\x7f\x60\xc4\xde\x49\x6d\x2b\xaa\xb6\x5a\xf9\x79\xcc\x6e\x57\x95\x1d\x18\xe6\x8a\x72\x4a\x9d\xa0\x51\xd5\x55\x75\x06\x65\x54\xa1\x95\x99\x9b\xf7\xf3\xd8\x73\x87\x27\xe4\x6c\xb1\x8b\xe8\x75\xfb\x90\x73\x88\x3d\xd5\xda\x2d\xe4\x6e\x52\xef\x04\xdd\xfa\x79\xd1\xcf\xdc\x83\xee\x81\x3a\xe0\xb0\xb2\x87\x1c\xce\x9e\x3c\xf8\x24\x64\xed\xef\x77\x47\x3b\x41\x97\xc9\xc2\xbe\xd3\x7a\xa4\x2b\x4f\x92\xf5\x8e\x48\xdb\x42\x96\xe7\x1e\x70\xf7\x51\x61\x87\xd8\xd3\x2a\x9e\xcc\x14\x1b\xca\x98\xbb\x1f\xde\x62\xec\x04\x2b\x09\x67\x77\x46\xc1\xb0\x3e\x5d\x39\xa8\x8b\x2e\xdf\xc0\x67\x60\xa4\x2d\xe9\x75\x9a\x95\x44\xba\x7b\xa3\x2a\xe6\xbb\x3b\x61\x0c\xa4\x52\x83\x34\x5c\x43\x8b\x7b\xe3\x01\x76\x15\x0d\x12\x6e\x09\xee\x3f\xee\xf7\x3e\xb6\x7f\xc1\xbc\xb4\x55\xe0\x19\x65\xe2\xca\x63\x39\x64\xb0\xc3\x74\x0a\x20\x50\xb6\x29\xb2\xac\x98\x09\x05\x97\x5f\xf1\xa0\x09\x3d\xa2\xd0\x03\x91\x8e\x06\x63\x85\xfb\x22\x47\x6f\xac\xde\x5d\x86\xcf\x72\x2b\xa1\x6a\xae\x80\x0c\x50\x48\xf1\x60\xb1\xed\x16\x1a\x18\x60\x5e\x68\xab\xbb\x51\xbb\xa2\xe6\xa0\xa8\xa9\xba\xaa\xa8\x3a\xd2\xaa\xed\xd7\x89\xef\x22\x93\x75\x6d\xf4\x61\xc3\x33\x24\x6e\x90\xa9\xb0\x5e\x68\x4c\x27\x48\xcf\x13\xea\x38\xb0\x47\x00\x96\x80\x94\xc1\xe0\x8c\x0e\xa0\xe8\xa4\x97\x4f\xe0\x76\x71\x5e\xf4\xb5\x55\x27\xe8\x56\x91\x3f\x21\xcc\x03\x56\x57\x23\x39\xef\xa2\x89\xda\xaf\xa0\x6b\xd7\x0a\x5b\x72\x67\x20\x27\x68\xdc\xa2\x28\x25\x38\xd8\x22\xa3\x10\x70\x10\x17\x00\xc2\x8a\xbf\x43\xf7\x36\xa8\x76\x41\x2f\x4f\xe4\xe4\x41\xa6\x53\xd9\x4f\x5b\x0e\x23\x75\x08\xa4\x46\x60\xca\xdb\xe2\x3a\xe2\x4f\x22\xdc\x4c\x3b\x11\x19\xcf\x3a\x49\xa2\x40\xd8\x64\xff\x14\x78\xf7\x48\x9f\xac\xb6\x54\xb0\x88\x29\xca\x0c\x28\xec\x54\x90\xb6\x4d\x21\x27\x62\xf4\xdd\x1b\x9d\x8d\xda\x3b\xd2\x0f\x15\x7e\x73\xf0\x33\xcb\xf6\xab\xfe\xc9\xfc\x07\x9c\x1b\xd9\x0d\x8f\xf8\x28\x6e\x42\xd9\xac\xb1\x0a\xbd\xce\x7d\xbe\x5a\xe2\xc7\xce\x98\x5b\x9c\x25\x7e\xb5\xc9\xdc\xe0\x6e\xa3\x58\x7b\x6b\xc5\xdf\xcd\x2c\x15\xd0\xbd\x5d\x95\x7d\x3c\x4f\xf2\x20\x2a\x6f\xa2\xa8\x2c\x14\x88\x08\x86\xfa\xd3\x80\x11\xb3\x0a\xff\x23\x87\x3d\xa3\xc2\x79\x38\x1f\x74\xd1\x9f\x12\x8a\xcc\xfd\xa4\xe3\x3d\x67\xae\xd5\xca\x28\x8c\x43\xce\x94\x8c\x2f\x9d\x5d\x59\x7c\x21\x6f\x1a\xcf\xe1\xe4\x51\x09\x38\x05\xcc\xa7\x6d\xbc\x79\x39\x3b\xc6\x46\x82\x56\xcb\xf5\x3d\x77\x0e\x94\x21\xb3\x8c\x2d\x7c\x06\xd7\xb3\xd1\x56\x9a\x74\xdd\x29\xef\x49\xcb\x34\x5b\x77\xf8\xe6\x1b\xaf\xb6\xc2\xb6\x0a\x7b\x95\x3f\xbf\xb2\x78\x26\x69\x89\x85\x34\xbc\x20\x52\xf7\xeb\x57\x79\xf7\x58\x57\x08\xa9\x6b\xc8\xf9\xa0\x1c\x82\x97\xc4\x82\xcf\xaf\x2c\xf2\x16\xbe\x4b\x19\x2c\x08\x33\x8f\xe8\x13\x84\x52\x7d\xc9\xd9\x2f\x7f\x2d\x2e\x58\x1d\xf3\xa7\x57\xb2\x9f\x19\x65\x74\xc7\xfd\xf1\x51\xef\xfd\xa3\xf8\xb7\x5a\x05\xe0\xf3\xd4\x0b\x94\xbe\x82\x87\xd2\x64\x9d\x5f\x08\xc5\xa6\x85\x86\x8e\xc8\x8c\x99\x19\x11\x8d\x4a\x22\x91\xc9\x12\x64\x85\x62\x26\xc6\x63\x0a\xcd\xca\x56\x82\xe8\xef\x1b\xca\xd7\x83\x08\xb3\x49\x47\xc4\xcd\x08\xc9\x07\xd6\xc1\x11\xde\x12\x59\x09\x56\xd6\xf8\x1c\x5a\x2b\x74\x8a\x16\x22\x5e\x42\x2b\x82\x8c\x7b\xf2\x8f\x69\x0f\x3d\xe7\xa8\x6a\x99\x15\xb1\x5f\x36\xaa\x60\xbc\x3d\xe7\x51\x80\x8f\xf1\x4d\x08\x8c\x06\x48\xed\xa4\xd3\x01\x21\x9b\x54\x95\x2e\x2b\x84\x36\x40\x6d\x6c\xc4\x34\x32\x85\xe2\x8b\x60\xbb\x61\x6e\x18\xdb\x75\xcf\x2a\xfa\x2f\x0b\xea\x17\x04\x3c\x5a\x96\xe5\xb5\xc5\x85\xc2\xc0\x5d\xfa\x36\x02\x0d\xc7\x2d\x91\x42\xea\xdc\x24\xa5\xdf\x60\x8b\x54\x42\x9a\x1c\x52\xea\xa2\x32\x9f\x48\x5a\x05\x43\xfe\xe9\x7d\xec\x4a\xc5\x26\x7f\xbb\xd4\x9f\x7e\x62\x9f\xf7\xf6\x7d\xe6\x95\xb2\x59\x46\x6a\x59\x36\x30\x65\x31\xad\xa8\x5f\x03\x3d\x97\x33\xc5\x7c\x05\xe6\x26\x98\xd0\x3a\xad\x30\xcc\xcd\xac\x5d\x88\xd6\x5f\x2f\x98\xed\xe1\x33\xca\xf4\x5f\xb4\x5a\x45\xb5\x78\x26\x57\xc6\xbd\x49\x2c\xbc\x49\xfb\x5a\x2a\xda\x61\x12\x7b\x13\x3b\xa9\x92\x1e\x34\x5d\x1f\x3d\x71\xec\xfa\x18\xf1\x6f\xca\x74\xa6\x42\x30\x60\x74\x10\xb6\x4d\xbe\x9d\x0d\xbc\xaa\xd8\x25\xe5\x27\xc3\x3c\xe3\xf2\x38\x88\x14\x10\x64\xf0\x2b\x4f\x49\xe0\x31\xd0\x85\x20\x9b\x92\x06\x44\x57\x64\x36\xd6\x34\x43\x3a\xb5\x41\x14\x77\xfa\x70\x61\x83\xc6\x78\xa1\x0b\x86\x58\x30\x6a\x02\xe2\x1a\xd2\xc9\x54\xd8\x86\x39\xc0\x08\x42\xad\x23\x7b\x40\xb9\xd1\xa1\x52\x04\xc4\x83\xe1\x1b\xc5\x65\xab\x27\x68\x5a\x52\x92\x06\x6e\xde\xe1\xba\x1a\x1e\x4d\xde\xa0\xf0\x53\xd5\xbb\x5b\xb9\x85\x7f\xda\x61\x57\x10\xaf\x84\x94\x5d\xee\x8f\x6c\x01\xa1\x66\x8a\x71\xf4\x30\x48\xb9\xad\x31\xfd\xbc\xc8\x28\xa8\x8c\xbf\x42\x2b\x5e\x9b\x33\xd1\x50\x53\x20\xf2\xd8\x00\xdc\xc0\xdb\xa3\x91\x75\xca\xab\xb1\x21\xf5\x02\xf6\x80\x43\x54\x03\x1f\x70\xbc\xb7\x39\xda\xf5\x57\x52\xfc\x8c\xdd\x81\x84\x83\x69\x8c\xa4\xc8\x4a\xfa\xc0\xe9\x73\x2b\xab\x16\xac\x45\x8c\xf0\xf6\x79\x2f\x35\x58\x41\x64\x89\xb7\x8a\x7c\x29\xea\xb5\xc3\xf8\x4c\xd0\x11\xe3\x13\x90\x56\x49\x6a\x94\x14\xe5\x38\xde\x66\x37\xff\xf9\x18\x9b\xde\xd6\x7f\x83\x2e\x92\x8e\x88\x95\x3d\xed\x13\x63\xde\x7c\xf5\xad\x41\x10\xb6\x80\x97\x9f\xd4\xba\xea\x25\x67\x0c\x25\xaf\x68\x59\xdb\xdf\x17\x0f\xb0\x3f\x77\xd8\x15\x2d\xf0\xc7\xa0\xf3\xe8\xf7\x1d\x76\x74\x07\xc3\x3f\xe0\x75\xf2\x3e\xe8\xac\x2a\xa4\xbc\xa1\x2e\xa0\x56\xc9\xf1\x03\x7e\x43\x29\x74\xcc\xf1\x85\x64\x50\x30\x64\x51\x1e\x12\x75\x3b\xb1\x3d\x49\x39\x25\x35\x4a\x92\xf9\x15\x45\xfa\xc2\xc7\x17\x28\x7d\xb8\x4e\x6d\x4f\x7d\x68\x2d\xfe\x7a\x0c\x5a\x4b\x9b\xdd\xe5\x6c\x6d\x45\x91\x5b\xb4\xf6\x27\x1c\xa6\xc7\xdd\xfd\x61\xc7\x7b\x87\xb3\x68\xd1\x84\xd2\x92\x0c\x81\x46\x49\x9e\x75\xd6\x7b\xc0\x82\x43\xaf\x3c\x56\xf5\x04\xdd\x98\xdc\x51\xec\x33\x35\xe6\x06\x7a\x06\x9f\x56\xe6\xf3\x8f\xd4\xbc\x37\xd5\xce\x75\xe1\x38\xac\xaa\x4a\x5f\x99\x2c\x51\xdd\x74\x35\xe4\x07\xc6\x5a\xc7\x7d\x4b\x99\x2c\xaf\xe6\x72\x55\x09\x1f\x8c\x52\x95\xa5\x28\x07\xdb\x61\x9e\xc8\x83\x78\x26\x17\x54\x9c\xf3\x3b\x83\x30\x3f\x99\xa4\x85\x4b\x13\x49\x3c\x21\xb9\xfa\xb1\xea\xb6\x07\x4a\x92\xdc\x3c\x20\xfd\xcf\xe7\xb1\xd3\x97\xd5\x2b\xe9\xbe\xf7\x79\xde\xdd\x65\x7f\x23\x38\x82\xe4\xc5\x3a\x5e\x35\xac\xb6\x76\xae\xcc\x42\x1a\xac\xe7\xfc\x28\x1f\x27\x3e\xb0\x97\x65\x49\x5c\x47\x0f\x1c\x50\x82\x4d\x94\x38\x18\x6f\x66\x3f\x34\xca\xfe\x8d\x65\x31\xea\x04\x5d\xb4\x3e\xfd\xc7\x51\xef\xef\x47\x2a\x6f\x71\xf0\x02\x05\x98\x86\xa8\xf9\x84\x13\xbe\xde\x4b\xc1\xbc\xa9\x22\xbc\x41\x83\xd0\x5b\x23\x8e\x96\xee\x01\x7b\xc4\x7a\x03\x2c\x52\xc6\x36\x2f\xb5\x65\x48\x3e\x3a\x52\x26\xa7\x98\x65\x6c\x7a\x82\xaf\xb5\xd3\x20\xee\x45\x41\xba\x36\xcb\x94\x3d\x31\x83\x33\x33\xd1\x4d\xa0\xdb\x02\x7e\x8f\x9f\x17\xfd\x3a\xda\x25\xba\x41\x98\x66\x98\xe6\x0d\x51\x2b\x46\xce\x65\x18\x63\x1a\x86\x88\x73\x2c\x11\x54\x10\x78\x0a\x7d\x03\xe3\x10\x6a\x22\xd5\x03\xb8\x08\xcc\xe8\x71\x68\x40\xe0\x64\xa2\x1b\xa4\x40\x61\x06\x3c\xda\x13\x3a\x64\x86\x69\xc3\xb5\x0a\x25\xa4\x8c\xa6\x5e\x8a\xe0\xf9\x11\xd6\xd4\x67\x47\x26\xf8\x5a\x90\x27\x9d\xb0\xb9\x36\x5b\x28\xb7\x72\x3b\xa2\xf4\x2b\x88\xe4\x94\x4a\x79\xa4\xe6\xf9\x24\xa0\x09\xc8\xab\xcd\x20\x0a\x52\xb2\xb5\xce\x41\x21\xd8\x7e\x4d\xaa\x14\xe7\x61\x8a\x01\x8f\xdd\x28\x68\x5a\xae\x92\x92\xf6\x7d\x0d\xab\x85\x2d\xd7\xd2\x65\x3a\xca\x2d\xda\x62\x8d\xcb\x89\x33\x32\xe0\x89\x9f\x93\xaa\x14\x7b\x06\x1b\xeb\x04\x17\xc1\xff\xea\x3e\x91\x9c\x3f\xd6\x41\x9f\xb3\x83\x9d\xe0\xe2\x29\x11\xb7\xf3\x8d\xea\x27\x9e\xc9\xae\xea\x04\x17\x97\xd0\xda\x2b\xcf\xfd\x95\x4f\xf5\xd8\xbe\x24\x16\x67\xd7\xdd\xc8\x54\xe4\x5e\xca\xee\x61\x77\x3f\x8a\xcd\x64\x4f\x62\xfb\xf2\x30\x8f\x84\xdd\xc9\x4f\x66\x07\x9e\x89\x6b\xd7\xbe\x7e\x81\x7d\x47\xe1\x84\xc1\x4e\x69\xb2\xe0\x51\x1d\x86\x5b\x92\x24\x62\x4f\x95\x0a\x8c\x0e\x2d\xb7\xeb\xc4\xd9\x13\xc4\x45\x42\x07\x3a\x8d\x76\x5f\xf7\x4a\x6b\xbf\x69\xb1\x91\x38\xc9\xdd\x47\xb9\x27\x9f\xcc\x0e\x10\x8f\xbb\x5d\xbf\x27\xb2\xd1\x67\xa6\x62\xdd\xbe\xf8\x0a\xf6\xa4\xa2\x23\x8d\xa9\xf1\x98\xf4\x66\x8f\xed\x0b\xa2\xe8\x31\x9f\x6a\x4f\x67\x07\x3a\xc1\x45\x18\x9f\x27\xba\xfb\x5b\x49\xaf\x11\xa1\xc5\x8f\x7c\x50\xdf\xcd\xc6\xd4\x11\xdc\x7d\xea\x16\xa7\x99\x27\x12\xb2\xb0\x75\xf1\x4b\xfb\x87\xfa\x32\x3e\xb3\xdf\xfb\xc4\xfe\xea\x7b\xf6\x76\x02\x9f\xbb\x1c\xbb\x49\x12\x53\x74\xb2\xb5\x85\xcc\x0c\xd9\x42\x2e\xa7\xa0\xc5\xcf\xee\x50\xd2\x96\x1a\x82\x05\x51\x68\x96\x6a\x87\x54\xad\xa0\xa7\x92\x75\xac\xdb\x78\x96\xa7\xbd\x66\x3e\x49\x9f\x9f\xe4\xbe\xef\x4f\xe0\x8e\x91\x89\x5c\x6d\x83\x2b\x00\xbd\x9a\x0a\x1d\xa5\xad\xf4\x2d\x79\x74\x85\xde\xd0\x91\x91\x20\xcd\x4b\x8c\xe5\x14\xd8\x74\x22\x68\x6e\x60\x79\xb8\x6d\x16\x9c\x99\xea\xe3\x85\x12\x00\x05\x54\xeb\x0c\xaa\x87\x21\x78\x8a\x9a\x89\x63\x3d\xf8\x52\x31\x35\xd4\x5b\x3e\x9b\x99\xe0\x6b\x9d\xa0\x6b\x6f\xf1\x34\xc0\x84\xa2\x83\x3b\x5c\x58\x30\x61\x85\x69\x81\xf0\x8a\x44\xf1\x80\x5f\x21\x77\xf7\x8b\xf2\x98\x8f\x45\x29\xec\x0d\xca\x62\x03\xbd\xb1\xe3\x73\x20\x2c\x07\x3d\x57\x53\xb3\xf6\xa4\x5e\x0c\x0e\x34\xf4\x86\x74\x82\x2e\xcf\x83\x36\x8d\x59\xd5\x0c\xa4\x70\x2d\x68\xa2\x05\x36\x0b\xa3\x89\x36\x15\x66\x86\x8e\x61\x7b\x51\x13\x90\xbd\x63\x67\xcb\xfc\x56\x8d\x1d\x20\xb5\xc1\xfd\x95\x1a\x5b\xba\xdc\x92\xc2\xfb\x6b\xa7\x55\xf0\x1e\x06\x5a\x45\xc1\x91\x07\x76\xc2\x58\xf3\x8b\xe2\x90\xa3\xd6\xa4\xe3\xdf\x88\x3b\x46\x91\xbf\x12\x99\x2b\x98\xc6\xf0\x4c\x6f\xf1\x09\x18\x2f\x99\xbc\xaf\xfe\xe0\x63\xaa\x91\xd8\x95\x2a\xad\x43\x4f\x46\x95\x91\x5e\xd8\x28\x2f\x4c\x83\x1b\x2b\x16\x9b\x22\x9d\x18\x5a\x1e\xe4\x10\x80\xfa\x10\xc6\x5b\xa9\x0f\x1f\x1e\xdd\xd2\x9f\xfb\x86\x51\xef\x2f\x46\x86\xdf\xaf\x92\x70\x7a\xad\x0d\x9b\xf9\x72\xb2\x83\x26\x89\x64\x65\xaa\x5d\x50\x1e\x4c\xaf\x40\x61\xe0\xc9\xd9\x4c\x46\x98\x4e\xd0\x55\xe8\x32\x79\xd0\x46\x3b\x4b\xb5\x54\x84\x35\x02\x8b\x42\xbe\xe7\x55\xd7\xc2\x33\x24\x2c\xe5\xc5\x78\x9d\xa0\xeb\xf9\x7c\x2e\xca\x12\x83\x44\xda\x44\xab\xd6\x98\x6f\x41\x8e\x31\x5e\x83\x32\x03\xe6\x7f\x8b\x8e\x6e\x2a\xfe\x60\x23\x8c\x5a\x1c\x05\x9b\x9c\x08\xe3\x71\xc2\x63\x91\xa9\x19\xa5\x8d\xb9\x13\x84\x75\xd5\xd5\x1b\xb6\xf1\x6d\xf8\x10\x45\x59\x34\x44\x11\x8f\x90\xa4\x4a\x00\x58\x53\x7a\xd2\x22\x54\x4a\x32\xab\x54\x34\x65\x62\xb6\xb7\xd2\xcc\x71\x67\x90\xc2\x72\x2b\x53\xdf\xab\x1c\x76\x0d\xa9\x21\x86\x5a\xd1\x79\xe0\x51\xdf\xde\xcd\xa3\xe9\x2b\x18\xeb\x3e\x6e\x1f\xff\xc2\x08\x7b\xba\x35\xa5\x44\xa7\x21\x5a\x2d\xd1\xaa\x2b\xbb\xa4\xfb\xf1\x11\xef\x03\x23\x5b\x3f\x63\x04\xa8\x92\xb7\x41\x7b\x94\xe5\xc9\x8b\x1e\x37\xcd\xd1\xc4\xdf\x47\xe8\x0a\x93\xb8\xca\x56\xfb\x5d\x71\x5a\x8a\x23\xa9\x09\x14\xb8\x0b\x28\xc3\x61\xb1\xa9\xf9\xa9\xac\xdc\x44\x62\x48\x31\x22\x86\x32\x92\x0a\x39\xc4\x04\x8d\xaf\x2b\xa0\x5e\x3b\x1f\xc6\xad\x49\x5e\x04\xdc\xa2\xea\x41\xa6\x15\x98\x4f\x2a\xcf\xb7\x65\xbb\x9f\x7d\x5b\x0a\xa8\x1d\xa7\xde\x43\x4e\x87\x3a\x1d\x4d\xed\x3a\x35\x04\x04\x1a\xa3\x93\x49\x9b\xa4\x71\x49\xc3\x5d\xca\x15\x20\x41\x1d\x66\x1c\xcd\x4e\xc5\x6a\x19\xef\x75\x01\x45\xaa\x54\xed\x49\x5d\xe7\x09\xdb\x66\x14\xb2\x51\x11\xf7\x3a\x6e\x60\xce\xfd\x55\xb6\x7c\xf9\x37\x22\x96\xb1\x2b\x95\x25\x66\x21\x69\x5e\x76\x9d\xfc\x44\x51\x76\x4f\xb3\x64\xb2\x07\xaf\x61\xfb\xd1\x80\xe5\x7e\xf8\x1a\xef\x7d\xd7\xe0\xdf\x34\xdd\xce\x76\x45\x3c\xb7\xb4\xc8\x2f\xcc\x70\xba\x8e\xab\xdd\x57\xcc\x1b\x74\x99\x2c\x08\xc8\x1b\x50\x0e\xb2\x33\x1f\xd1\x73\x61\x96\xb1\x3a\x6f\x64\x49\x8c\x23\x15\xb6\x66\xe5\xfe\x99\x25\x5a\xad\x5a\x5c\x50\x44\x08\xfc\xc8\x51\xde\xdc\x08\xd2\xa0\x09\x79\xa9\x1b\xe2\xa2\x0a\x10\xae\xf3\x5e\x1a\xce\xca\x8a\x9e\x5b\x5e\x84\xf4\x98\x20\xa5\x80\x97\x5b\x93\x28\x88\xdb\x3c\x16\xf9\xe1\x5e\x1a\xf9\x4b\xf2\x06\xc1\xc7\xc8\x67\xeb\x5c\x74\x82\x30\x9a\xc5\x35\x15\x84\x91\x95\x64\x53\x55\x8a\x7c\x08\x8b\xa1\xbc\x0c\x5e\x07\x94\xc4\x38\xe8\x88\x59\x64\xf0\x0e\x5b\xe5\xc0\x71\x10\xa1\x31\x5f\x84\xc8\x6f\x91\x23\x1e\x65\x8c\xe8\x58\x99\xc9\xb0\x0b\xac\x97\x53\x33\x47\x27\x79\x46\x08\xae\x33\xfe\x34\xbf\x7b\xf9\xe4\xbc\xbc\x7a\x8f\xcf\xeb\x3c\xec\x5e\x38\x3a\x8b\x28\x91\x17\x8e\x02\x54\x64\x75\x4d\xb1\x92\x8b\x4b\xf8\xca\x71\xf5\xca\xf1\x9d\xbd\xd2\x0c\x5b\xa9\x6c\x0e\xa4\xdf\x6c\xf9\x38\x3c\x51\xe7\x9d\xa0\x29\x9f\x3f\x3d\x37\xbf\x5d\x17\xe2\x6b\xf2\xc1\x3a\xef\xf5\x60\xc4\x63\x7e\xee\x9c\x72\xac\xc2\x2a\xcf\x78\xaf\xdb\x45\x92\x50\xb3\x7b\x30\x24\xb3\x2d\x2e\xf2\xf1\x9b\xc3\x89\x97\xdc\x3d\x55\xbf\x21\xa8\xaf\xdf\xf3\x8a\xeb\xef\xaf\xdf\xac\x7f\x1c\xdd\xd9\x8f\xe9\x23\xf7\x3f\x93\x6a\x30\xa3\xab\x30\x73\x19\xeb\x30\xa3\x7f\xcd\xec\xa4\x12\x47\x75\x25\x8e\x5e\xc6\x4a\x1c\xb5\x2b\x71\xfd\x0d\x41\xe3\x9e\xea\x7a\x19\x55\x39\xa6\xab\x72\xec\x32\x56\xe5\xd8\xc3\xad\x4a\x98\x35\x62\x9c\xb6\x2b\xb7\x9c\x99\x9e\x92\x8a\x0c\xfc\x35\xa3\x22\x63\x68\xf9\xc3\xf9\xc7\x9b\x9a\x39\x32\x7d\xdd\xb1\xe9\xa9\xa3\x33\x1e\xf0\x7d\xdf\x70\xdd\xf5\x75\x7d\x6d\xda\xa3\xf2\xa6\xa7\xcc\x12\xb7\x2b\x87\xde\x99\x29\xde\xa9\xfe\xf6\xe0\xb7\x9a\xa9\x68\x85\x79\x33\x48\x41\xa0\xe1\x2f\x2e\x7f\xaa\xf7\x2b\x7b\xf0\x25\xe3\x37\xcf\xc2\x88\x41\x1f\x8c\xdf\x3c\x8b\x7f\xcf\xdc\x3f\x71\xf3\x7d\xc7\xee\x9e\xae\x1f\xbb\x87\x6e\x1e\xbd\xff\xbe\xe3\xe3\x37\xcf\x4e\x4d\x4f\xdf\x07\xdd\x8a\xd7\x27\xf4\xab\xf7\xcd\xdc\x7d\xf4\x3a\xf5\xf0\xcc\xfd\xf7\xcd\xc8\x87\xef\x9e\xaa\x1f\xbb\xe7\xbe\xbb\x8f\x5f\x6f\x3f\x3d\x7d\xff\x7d\xe3\x37\xcf\x1e\x99\x9e\x99\xbe\x6f\xfa\xfa\xa9\xa9\xfb\x66\x8e\xbd\xb8\x25\x3f\xf9\xe2\x96\xbc\x37\xf1\xcc\xc2\xc5\x11\x43\x04\x5a\x5b\xb6\xa4\x90\xc3\x9d\xf0\x22\x06\x3e\xd7\x79\x96\xc9\xf1\xe2\xe7\xfc\x15\x9f\x03\x18\x48\x84\x68\x91\x61\xde\x57\xcd\x2e\xb6\x03\xa3\xd5\xf0\xbd\xbb\xeb\xfc\x9e\x9b\x5f\xdc\x7a\xc5\x11\xfd\xd7\x51\x98\x05\x1b\xe2\x62\x33\x89\x92\x14\xc6\x60\x43\x5c\x0c\x5a\xa2\x19\x76\x82\x88\xc3\x55\xde\x4c\x5a\x74\x02\xf6\xae\x3d\x09\xff\xcd\x56\x7f\xe4\xda\x9b\xc7\x69\x82\xcd\xd5\x4f\xca\x3e\xbd\xcf\xfc\x79\x5c\x36\xb4\xce\xd3\x76\xa3\xf8\xd8\xf2\xad\xb7\x0c\x7c\x24\x6d\x37\xe8\x6b\x69\xbb\x31\x7e\xe4\xd8\xb1\x49\xfa\xdf\x0d\x72\xd4\x1b\xfd\x5c\xcc\xf2\x46\x90\x89\xe3\x47\x35\x60\x76\x23\x8c\x83\xb4\x8f\xa0\x62\x75\xae\x70\x58\x67\xa1\x47\x81\xeb\x3e\x59\x2f\x36\x32\xb9\x29\xca\x3e\x04\x40\x14\x6b\x8e\x1d\x99\x9a\x3a\x5e\x9f\x9a\xae\x4f\x1d\xf1\x4a\x9b\x86\x54\x6d\xea\xf0\x42\x18\xcb\x1d\x64\x66\x66\xe6\x06\x59\x14\xf1\x90\x40\x71\x8a\x93\xc4\x2e\xf2\x08\x8f\x33\xaf\x52\x4e\x83\x26\x09\x82\x7a\x41\xd3\x99\xa4\xe0\x4a\x0a\x72\xb0\x20\x21\xc6\x8b\x3c\xd1\x14\x85\x93\x6e\x80\xad\x90\x25\xe8\x96\x20\xe3\xbd\xdd\x9c\xe9\xa3\xf5\xe9\x23\xf5\xe9\x63\xab\xd3\x37\xcc\xce\x4c\xcd\x1e\x99\xf2\xa7\xa6\xa6\xee\x2a\x37\x4e\xbe\x5e\x47\x62\x7b\xdd\x38\xcb\x2e\xf0\x6f\xd9\x58\xdc\x8b\xc0\xa3\x5f\x32\xbb\xf6\xd8\xbe\x20\xee\x3f\xe6\x66\xc6\x7b\xd8\x01\x0a\x9a\x72\x1f\x0d\xed\xd0\x32\x37\x93\x39\xd3\x6e\xf7\x33\xd8\x58\x27\x8c\xb7\x70\x19\x3c\x9d\x1d\xa0\x00\xe5\x6a\x43\xe8\x6b\x47\x4a\x27\xfe\x30\xce\xeb\x49\x5a\xc7\x1e\x77\xff\xac\xe6\xfd\x5e\x6d\xf8\x7d\x8b\xbf\x5d\x05\x43\xe8\xc3\x8b\x4a\x03\xc0\x24\xdd\x36\x26\xd9\x06\x5a\x9f\x5c\xa4\xcc\x3b\xb0\x40\x62\x0c\x3c\x85\xa0\x28\xe7\x98\x15\xc9\x0f\x26\xd4\x8c\xce\xcf\x90\x9e\xd7\x3f\xbb\xce\x11\x5c\xa3\x13\xe6\xf2\xb0\x11\xae\x1b\xf2\x80\xc0\xc9\x73\x4b\x31\xa5\xe3\x2a\x99\x43\xa1\x08\xb0\xb4\x61\x98\xfa\x2c\xd7\x9e\x96\xe2\x12\xcd\xbd\x23\x13\x1c\x0c\xd9\xf4\x78\xf1\x6a\xe5\xcb\x03\xaf\xc3\x15\xdf\xf7\xf9\xbd\x22\x4d\x14\x5f\x95\x35\x94\xef\x1e\x61\xcf\xdc\xc9\xf1\xc8\xfd\x8b\x9a\xf7\x95\xda\x8e\x0e\x52\x59\x9e\x74\xb3\x72\x1c\x72\x4b\x34\x13\x00\x21\xca\x72\x41\xa1\xd9\xdd\xb4\x07\xf1\x9e\xf4\x1a\x06\x2a\x29\xe4\xe7\xe2\x1c\x45\x41\xba\x0a\xb9\x48\x8a\x18\xf4\xb9\xa2\x71\x37\x58\x5f\x07\xb8\x66\x2a\x25\x95\xdb\x41\x06\x10\x73\x04\xad\xb3\x19\x22\x6b\x94\x0a\x65\x8a\xa5\x0c\x89\xf4\xd7\x0b\x07\x61\xb8\x0e\x26\x91\x22\x6f\x41\x4e\xb0\x24\xe5\x55\x6e\x0c\x93\xb8\xbf\xa8\xa3\x55\x31\xf0\x5f\x6a\x63\x09\x64\xad\x9a\x26\x3e\x1f\x33\x40\xe0\xe4\x98\xa4\x8d\xb0\xd5\x12\xb1\x7d\x20\xbc\x8f\x5d\x61\xac\xec\xc7\xda\xc8\xf0\x7a\x87\x5d\xa9\xfc\xb4\xcd\x50\x64\xee\xbd\x0f\x38\x11\x7b\x19\xdb\x78\x54\xbd\x37\x98\x8c\x0d\x8e\x49\xb3\x32\xcf\x64\x57\x75\xc2\x78\x3b\x07\xe3\x77\x31\xa6\x6c\xee\x67\xd7\xab\x05\xcf\xd3\xd8\x15\xbd\x38\x7c\x79\x4f\xa0\xf0\xb2\xc5\xda\x4f\x5c\xc9\x66\x1f\x19\xb7\x28\xc0\x7e\xfe\xd5\x15\xde\x4b\xb7\xb8\x5f\x86\x00\xa5\xc4\x8d\xf5\x5e\xdc\xc4\xd9\x65\xe0\x3c\x0d\x29\xc6\xbf\xe4\x5c\x0d\x7f\xad\x06\x69\x5b\xe4\xcb\x62\xfd\x92\x73\x45\x27\xb8\x48\x8c\x2a\x76\x8e\xdf\xd7\x18\xfb\x31\x87\x99\xb7\xdd\xf7\x38\x2a\x00\xfc\xd5\x8e\x71\x5d\x45\x61\x81\xf6\xcd\xa3\xb0\x13\xe6\x3a\x98\xbe\x08\x11\x4f\xd5\xd3\x80\xe2\x24\xd7\x2a\x04\x7b\x14\x04\xa8\x72\xc2\xc3\x9f\xbc\xd7\x05\x2b\x4f\x01\xa1\x1c\x69\xc8\x8b\x4e\x18\xab\xcf\xda\xb1\xea\x0f\x8e\xb2\x03\x1d\x91\xa7\x61\x33\x73\x1f\x18\xf5\xbe\x3c\x42\x3f\x6c\xf8\x68\x2b\x74\x1a\xb3\xe8\xa8\x2e\x04\x18\x95\xf0\x66\x10\x35\x21\x54\xc0\xea\x66\xaa\x3d\xc6\xb8\x43\x84\x01\x27\x57\x5d\xe9\x16\xe1\x6a\x41\xa4\x00\xd5\xc0\x24\x20\x99\xf0\x31\xa3\xaa\xba\x5c\x58\xf6\xf4\xf9\x96\x72\x00\x69\xeb\x32\x28\x31\xbc\x21\xf2\x4d\x41\x89\x1a\x39\x8c\x23\xed\x5d\x1a\xad\x59\x21\x5e\xc2\x55\x52\xdf\x07\xd3\x86\xba\x49\x2b\xf3\x39\x3f\x91\xb6\x93\x49\x5d\x55\x30\x43\x83\xc1\xad\x25\x9a\x29\xf0\x3a\x90\x25\xbb\x0b\x58\x58\x54\xc9\x30\xc6\x9b\x84\x2b\x77\x21\x6c\x0a\x48\xb6\x0d\x7c\xce\x57\x84\x50\xa6\xef\xf0\x42\xd8\x82\xe0\x0e\x28\x9d\xab\x44\xe8\x7e\x97\x12\x18\x3b\x2a\x21\xd9\x8e\x71\xdf\x48\x36\x31\x72\x43\xb9\xd5\xe8\x7d\xa8\x17\xb1\xf6\x59\x16\xde\x2d\x33\x2a\x2a\xd7\xe1\x69\x28\x51\x2e\x2b\xf6\x57\x35\x76\x85\x31\xab\xdc\xaf\xd6\xd4\x24\xff\xf5\x9a\x71\x5d\x07\x31\x27\x9b\x97\x67\x92\xb7\x92\xcd\xd8\xe7\x72\x9e\xb7\x0c\x17\xd3\x34\x62\xe6\xf2\xd2\x97\x6d\xcb\xe2\x94\xb2\x1e\x06\x51\x77\x23\xb0\xbc\x33\xfc\xb6\xa5\xb9\x15\x58\xe2\xc9\x5d\x72\xcf\x96\x0a\x0d\x84\xe3\x13\x71\x62\xce\x23\x11\x80\x2f\x4c\x90\xb5\x55\xee\x2a\xca\xe2\xa6\x7a\x3a\xcc\x34\x3c\x95\xdc\x69\x40\x8d\x56\xce\x23\x82\x0b\xcb\x00\x90\x05\xbc\xaf\x66\x99\x54\x40\x61\x0a\x36\x40\x99\x8c\xd5\xfa\x1f\x6b\xac\x24\x89\xdc\xbf\xde\x0a\x08\xbd\x6a\x0c\xe7\xe5\x42\x23\xab\x28\x36\xa5\xa0\xc0\xf9\x90\x63\x97\xae\x20\x18\x55\x4a\x16\xae\x9b\x22\x37\x3f\xc1\x41\xd1\x50\xf2\xca\xed\x48\x33\xdf\x94\x14\x6a\xa9\x14\xe9\x43\xcd\x24\x8a\x88\x11\x35\xc8\xf8\xa6\x88\x22\x8e\x33\x00\x63\x9b\x22\x0d\xfe\x95\x17\xc4\x5e\xb8\x96\x7c\xf6\x3f\xed\x88\xd8\xaa\x76\x6a\xa3\xb8\x9c\xb1\x68\x9e\xff\xe9\x31\xef\xfb\x9c\xc1\xeb\x3a\x03\xa9\x20\x24\x87\xa9\x06\xbe\x4e\x1a\x19\xda\x45\x10\xed\xa4\xd0\xc6\x94\x35\x73\xdc\x02\xe8\xdc\x08\xf3\xac\xde\x15\x69\x3d\x13\xcd\x24\x26\x77\x37\x5f\x8c\xdb\x60\x3e\xc3\x57\x26\xfc\x4b\xce\x7e\xec\xd0\x4b\x0e\xc3\xaf\x9c\x81\x5c\x93\x2b\xf0\xea\x1d\x72\x2e\x58\x5b\xcb\xdf\xef\x67\x7f\x50\x63\x63\x99\x90\xdd\x96\xa4\xee\x6f\xd6\xd8\xf3\x1e\x09\x2e\x40\xd0\x10\xd1\x0a\x15\xe2\xbd\xbe\xa6\xca\x2b\xa2\x82\xa5\x32\x50\x57\xc7\x5c\x60\x08\xc2\xc4\x78\x05\x23\x60\xb4\x1f\xf9\x78\x75\x11\x6a\x69\x63\xea\x08\xf5\x5d\xc1\xaf\x8e\x28\x95\x14\x17\x19\xa0\xe7\xb0\xc8\xbd\xd6\x94\x24\x6a\x0a\xe9\x39\x83\xca\xac\x96\x7d\x3a\xe9\x43\x3f\xd0\x4c\xba\x72\x68\xe0\x4b\xbd\x18\xbe\xf5\x32\x29\xf6\x8a\x52\x64\xdf\x0e\x70\x5a\xb5\x03\x50\x14\xa9\x18\x9f\x7d\x8f\xc3\x68\x4c\xdc\x0b\x97\x71\x59\x3d\x8b\x16\x4e\xa8\x75\x10\xd0\x48\x2c\x2f\x8f\x72\x9c\xbf\xc1\x61\xe6\x04\x70\xef\x7f\x18\xd8\x0f\xbe\x5a\x99\xfe\x0b\x7a\x01\xc4\x74\x78\xcf\x35\xca\xd2\x59\x8b\xe6\xf6\xa7\x9c\xac\x38\x54\xe3\x10\x16\xf2\x72\x7a\x7b\xc2\x67\x0f\x3a\xec\xca\xe0\x82\x48\x83\xb6\xc0\xfa\xfc\x9f\xce\xee\x2a\xb4\x6e\x96\xb6\x55\x8d\xe8\xb9\x52\x05\x0d\x3d\x21\x15\x91\xb8\x10\xc4\x94\x21\x5b\xaa\x39\x3b\xc5\x8c\x45\xe5\x3e\xd7\x9b\x36\xa6\x41\x45\xa4\xbf\x92\xe0\x31\x07\x1f\x42\x19\x0d\xe8\xcd\x07\xd9\x84\x31\x01\x2a\x74\x6a\x5a\xdd\x00\x73\xf3\x47\x63\xde\x21\xe3\x37\x06\x0f\x90\xb8\xa3\xdc\x33\xba\x5d\x8d\x72\xf3\xf3\x07\xd8\xe7\x4c\x38\xf6\xcf\x3c\x9c\x3e\xaf\x00\x00\xf9\x97\x01\xc6\xbe\x07\xdc\xb3\x4b\xe0\x9e\x48\x45\xa8\x36\xbd\x7f\x57\x09\xdc\xa3\x26\x95\xa9\xe4\x5d\xc7\x8e\xb1\x99\xa1\x92\x6c\xf8\x44\xde\x03\xef\xd9\x0d\x4d\xc6\xda\xf6\xc0\x3a\x37\xb9\x37\x6a\x60\x9d\x62\x18\x34\xbc\x8e\x21\x3f\x6c\x6c\x1d\x18\x25\xc6\xfe\x74\x94\xfd\x7b\x7b\x4b\x52\x2c\x4a\x52\x76\x5d\x98\xf6\xcf\x65\x22\x5d\x8c\xd7\x13\xf7\x93\xa3\xde\x5d\xea\x07\x25\xc8\xe6\x1b\x55\x07\x08\x38\x8c\x66\x22\x05\xd2\x37\x8a\x1f\x53\x20\xbb\xfa\xa6\x0f\xc5\xc0\x92\x59\x0f\xca\xfc\x06\x9f\x1f\x61\x31\xdb\x27\x2e\xe6\x69\xe0\x0a\xef\x86\xb9\xb8\x3f\x0c\x2b\xba\x0c\x63\x61\xd4\x3f\x49\xfd\x07\x9c\xef\x64\x5b\x44\x6a\x9a\x36\x8a\x17\xb0\xfd\xd0\x89\x99\x7b\xab\xf7\xec\x55\x92\xea\x10\x8f\x86\x97\x71\x66\x41\xb3\x60\xfa\x76\x91\x48\x74\xab\x50\x97\x8f\x3a\x6c\xa4\x17\xb6\xdc\x0f\x39\xde\xbb\x9d\x39\x8e\x96\x0b\x9a\xf0\x98\x18\xa9\xb0\xe1\xcd\xd2\x69\x67\xd2\x79\xf4\xd6\x77\x21\x91\x5d\x1d\x26\x62\x8c\xb7\x87\x7b\x8a\x10\x48\x27\x8c\xc9\x4a\xb6\x5a\xc4\x1a\xd0\x47\x8d\x05\xe2\x1f\x8a\xac\xbe\x73\x8b\x0b\x76\x08\xdd\x8b\xd9\x98\x2c\x0c\xf2\xd9\x96\xbc\x79\xd5\x0b\x44\x2b\x08\xd5\x8f\xfa\x43\x6a\xdd\x81\xe3\x88\xd4\xbd\xf1\x84\x82\x54\x4b\x66\xe9\xdf\xcb\x2a\x33\xce\xcf\x24\x2d\xa1\xa8\xd7\xdd\xaf\x1c\xf4\x6e\x3c\x03\xa9\xbe\x8a\xc1\x1e\x3a\x1b\x86\x40\x8e\x45\x6c\xdd\x33\xf9\xf2\x7b\x91\x28\x25\x9c\xbf\xfd\x20\xfb\xd2\x3e\x76\x58\xb3\xc3\x97\xa9\xf2\x17\x31\x0c\x01\x2f\x9f\xb8\x28\x9a\x3d\x88\xdd\x7e\x60\x9f\xf7\xa5\xd1\xd5\x82\x8d\x5f\xc1\x4f\x76\x75\xea\xaf\xba\x41\xe0\x2e\x89\xca\x9a\x94\xbd\x94\x05\x79\x98\xad\xd3\x5c\xac\x60\xe1\x37\xac\x8e\x2a\x4f\x1f\x4c\x9f\x68\xed\x0c\x73\x02\x2b\x06\x4e\x05\x33\x33\x11\x49\x21\xe5\x84\x8c\x85\xb2\x04\x6b\x04\xf3\xa2\x78\x83\xb1\x41\x41\x94\xda\x14\xf9\x4a\xd4\xcb\x62\x74\x98\x1d\x70\x3d\x2a\x6a\xfd\x64\x9d\x23\xaf\x7f\x46\x81\x15\x70\x24\x09\x9a\x1b\x46\xb1\x1d\x01\xe1\xb3\x51\xa4\xaa\x60\x8e\x04\xc6\x95\x11\x61\xa0\x16\xe1\x9a\xfa\x4b\xc5\x9d\x95\x87\xa3\xb2\xbb\x10\xdf\x7f\x62\x52\x61\x1c\xc9\x7d\xa3\xd7\x91\x5d\x17\xe6\x90\x9f\x04\xdb\x45\x9a\xf4\xda\xd8\x12\x2b\x8e\xb4\xc8\x80\x0a\x08\x6c\x3e\x6e\x73\x0f\x1b\xe7\xe9\xfc\xf0\x5e\x47\x1d\xe7\xa1\x7d\x84\x83\x8a\x3b\x46\x92\x92\xbd\x03\x31\x21\xf3\xe6\xc6\x89\xa2\x6e\x37\xea\x97\xc6\x81\x10\x80\x3a\x73\x23\x6c\x6f\xa8\xbe\x54\x59\xf4\xf6\x18\x58\x32\xe3\x39\x6c\x96\x5d\xbf\x3d\x01\x90\x7a\xb7\xe8\xaf\x55\x91\x76\xd8\xc7\x47\x98\x3f\xac\x43\x87\xcc\xef\x6f\xd7\x58\x7d\x47\x18\xf2\xfa\x68\xf7\x6b\x8a\xaf\x5d\x8f\x90\x35\xc6\x43\x66\xb4\xf6\x05\x74\x44\xce\xe5\xca\x28\x46\x5a\xca\xb6\x49\x6d\xd6\xd2\x54\x0a\x8d\x62\xcd\xc9\xb3\xae\x91\x64\xaf\x81\x4e\x1e\x5e\x0d\x9a\x60\x41\x43\xa3\x8d\xaa\x45\xd2\x11\x84\x9d\x40\x19\x92\xb2\x0a\x42\xf5\x8e\xe2\x7f\xe8\x21\x2c\x80\x0a\x45\x27\x88\x74\x03\xa1\x40\x2e\xc1\xa0\x0f\xd5\x26\x26\x17\x21\xf7\x73\xb4\x37\x00\x4e\x8b\x6e\x1f\x68\x23\x52\xe3\x80\x96\xb0\xd7\x1e\x64\xcf\x34\x7a\x9c\xb0\x7f\xe1\x08\x68\x6b\xff\x5f\xda\xa5\xf6\xff\xb3\x07\xf6\x54\xe5\xdd\xaa\xca\x6d\xa5\x2a\xbf\xe4\x61\xa8\xca\x47\xd8\x14\xf3\x87\x2e\xb1\xca\x01\xdf\xd3\x92\x77\x01\x71\xf9\xaf\xef\x8c\xfb\x90\xd3\xd8\x5e\xf1\xbf\xd9\xbd\x49\x2b\xfe\xc6\xa4\x2a\xb1\xe3\x0d\xd5\xff\x19\xfb\x45\x9b\xa4\x5f\x43\x00\x29\x00\x9f\xc5\x4e\xd0\x16\xee\xa5\x9a\xf7\xd4\x05\x95\x67\x14\x14\xf0\x3e\x3c\x94\xb7\x2f\x39\xfb\x40\x51\xb6\x11\x6d\x1c\xf6\x01\x87\xe1\x0d\xf7\x5d\x8e\xf7\x8a\x33\xa0\x4b\x37\xfa\xda\x28\x0f\x74\xa1\x40\xdf\x92\x21\xb0\xa2\x8f\x60\x28\x77\x7b\xb2\x36\xed\x66\x2a\xbb\x73\xa3\xdf\x15\xa9\xec\x83\xd9\x0b\xd3\xfe\x94\x7f\x9d\x37\xc9\xbd\x56\xd2\x3c\x2f\xd2\x8d\x5e\x43\x3e\xd0\x4e\x92\x76\x24\x5e\x5a\x20\x0e\x0d\xbe\x72\xcf\x16\xfa\xf9\x1c\x3b\x98\x85\xf7\x8a\x5b\xfa\xb9\xc8\xdc\xa3\x0a\x38\xf3\x19\xa0\xfb\x85\xf7\x6a\xf5\x8a\x2a\x1a\x43\xf4\x4e\xc9\xed\xf5\x73\xfb\x2d\x44\x6c\xd5\x85\xb7\x86\xf9\xb2\xe8\x26\x98\xac\x4f\x46\xe3\xb7\xee\xf7\xfe\xae\xb6\x5c\xe0\x51\x14\xf8\x33\xa4\xa8\x95\x72\xc6\x73\x02\x83\x54\xea\x4c\xc0\xdb\x21\xac\xeb\x24\x0b\xf3\x24\xed\xfb\xfc\x56\xfa\x4d\x05\x65\x1a\xbd\x87\x90\x37\x92\x4d\xd9\x27\x1b\x61\x97\x77\x82\x38\x68\xc3\x66\x59\xf1\x96\x7a\x7c\xe5\xc4\xa9\x30\xee\x5d\xe4\xa9\x00\x63\xac\x9c\x4b\x8c\x2d\x9c\x58\x5a\x3e\x31\x3f\xb7\x7a\x62\x61\x96\x53\xab\xf0\x1c\xd2\x4d\x45\x93\x92\xb4\x92\x02\xd5\xc4\x9a\x20\x44\x33\xa6\xaa\x3d\xc9\x3b\xe8\xa2\x8b\xf9\x89\x4e\x37\xef\x2f\x84\x29\xe6\xb4\x83\x4d\x3b\xcc\xf5\xbc\xc3\x1e\x69\x46\x89\xda\x9b\xa0\xba\x98\xbe\xd2\x0e\x73\xe2\xb3\xc7\xc2\xe4\x6d\xbb\xb4\x1c\x51\xe7\x0e\x65\x45\x4d\xfc\x4b\x0e\x2b\x3a\xce\x9a\xaa\xff\x58\x63\xdf\x5f\x63\x07\x5b\x61\x0a\xba\x4e\xdf\xfd\xb6\xe3\xfd\x85\x83\x8e\x0b\xae\xaf\xc2\x31\xc8\xe7\xa7\x55\x82\x98\x42\x1e\x45\xe2\xa0\x94\x92\x98\x0e\xf9\xfe\x21\x9f\x4b\x55\xe5\x90\x7f\x48\xe5\x65\x44\xa1\x62\x6e\xa3\xe1\x2e\x0a\x55\x56\x64\x34\x74\xdb\x63\x8b\x5c\xb7\x9b\x21\x70\x32\xad\x17\x1a\x8e\x55\x14\x14\xa0\xea\x32\x58\x8a\x8e\x6b\xe8\x35\xcc\x8f\x2a\xa5\x1f\x51\x99\x64\xc3\xcc\x45\x31\xc1\x8c\xae\x72\x9f\xe6\x5d\xbd\x5c\x94\x77\x6e\xf9\x94\xf9\xe8\xf3\xd8\x58\x2a\x70\xdc\xdd\xa3\xde\xa1\xf9\xa4\xd3\x09\x01\x12\x70\x43\xdb\xef\x0b\xcd\x4c\x3d\x69\x7d\xec\x17\xc6\x2c\x74\x5f\x45\x7b\xd2\x2f\x62\xd1\x69\x6b\x5a\x89\xc2\xa6\x00\xad\xe8\x0d\x63\xde\xf1\x81\xab\x26\xce\x4b\x81\x02\xae\xf7\xb5\x4c\x3e\x97\x55\x2a\x49\xbf\xb1\x07\x04\xbe\x6b\x25\xe9\xbc\x52\x92\x1a\xde\x53\x4e\x55\xf7\xfd\xce\x4f\x3c\xdb\xcc\x81\x3d\x45\xe9\x5f\x3a\x16\x78\xb4\xbd\xe6\xb2\xe8\xde\x5a\x27\xf5\x64\x60\x29\x57\x19\x29\xb9\x52\x73\x8a\xc9\x41\x5a\x0e\xfb\xf4\x41\x6b\x03\xb6\x95\xeb\x33\xf8\x6b\x09\x40\xc3\xdd\x77\x1d\xf4\x16\xad\x2b\x46\x48\xcf\xa6\xdc\x71\xe8\x65\x9e\xa7\xf2\x98\xd9\x34\x1d\xff\xeb\x18\x84\x88\x10\x67\x03\x78\xdc\x5f\x3c\xf0\xaf\x93\xf6\xf6\x95\x8a\xa4\xb8\xbf\xc5\x82\xdd\xa2\xcb\x81\xb3\xf8\xa6\x9d\x51\x16\xeb\xcc\x48\xab\x84\x3d\x9a\xf2\x3d\xc6\xa8\xc7\xd3\xff\xd2\xda\x5e\x98\xcd\xb9\x37\x2b\x61\x66\x4d\xdd\xd2\x69\x8b\x0f\x3f\xaa\xb1\xcf\x8c\x5a\x07\xb1\x06\x90\x21\x5d\x80\x58\x80\xf8\xf9\x49\x83\x70\xd8\xde\x37\xea\xdd\x60\x5d\x29\xa3\xdb\xa9\x20\x33\x83\x83\xa0\x99\x26\x31\x7f\x59\xd2\xb0\x2d\xe3\x5f\x18\x91\xe7\x33\x45\x1f\xfd\x4e\xc7\xab\xcf\x69\xa5\x09\xb9\x20\x91\xea\xb7\xc0\xa7\x55\xe0\xae\x2f\x4b\x1a\xb6\xad\xe3\x28\x3b\xc2\xa6\xb6\x35\x27\x96\x02\x19\x1e\x72\xae\x1d\x0a\x86\x71\xd0\x3d\x80\x79\xfe\x8c\xbd\xd3\x61\x4f\x88\x82\x2c\x27\x93\xa6\x58\x0d\x3b\xc2\x7d\xe5\x16\xd0\x80\xdb\x11\xe9\x9c\x5c\x34\x3c\x47\x80\x34\xb1\x49\xe1\x75\x00\xa2\x96\x2b\x9a\x9d\x97\x25\x0d\xb8\x63\x21\x92\x69\xd3\xa4\xcf\xde\xe6\x30\x17\x2a\xa6\xef\x43\xd5\xee\x7f\xe4\x35\xbb\xe5\x61\xd4\xcc\xaa\x15\x81\x77\xc9\x5a\x7d\x71\x8c\x3d\xad\x6a\x16\x3d\x3f\x69\x80\xe6\xfc\xe0\x98\xc7\xe9\xef\x2a\x5b\x22\x0c\x6d\xa5\x8e\xfc\xa3\x7b\x86\xc4\x5d\x4b\xe1\xbb\x95\x8e\xbc\xec\x3d\x35\xac\x32\x24\x3e\xbf\xbc\xb2\x26\xd8\x21\xf6\xef\x87\xae\x2c\x73\x74\xf7\x44\xfc\x2e\x74\xe2\x5f\x33\x15\xb7\x9f\xdf\xa5\xf1\xf0\xff\x18\xa2\x15\x3f\x76\x76\xc3\xe5\xed\x37\xac\xc3\x6e\x5d\xdb\x0d\x61\x1a\x69\x5b\x21\x49\x87\xb2\x9d\xf0\x75\x8c\x95\xa2\xd6\x36\x92\x34\xbc\x57\x47\x08\xac\xf4\x60\xda\xcc\x81\x58\x5a\x16\x17\x42\xb1\xe9\xfe\xd6\x41\x6f\xa9\xe2\x3a\xf2\x0d\x67\x65\x52\xe5\x00\x9d\xc8\x49\x4a\x9e\xde\x66\x10\xf3\xae\x48\x21\x64\x30\x88\xc1\xa5\x9c\xc4\xfe\x25\x07\xb4\x51\x4b\x36\xfd\xaf\x03\xec\x47\x1c\x39\x98\xb0\x45\xfe\x27\x87\xcd\x6f\x15\x6d\xb7\x6d\xbd\x71\x63\xf5\xce\x15\xf8\xa6\xeb\x21\x40\x79\x83\xf9\x0f\x4d\x1a\x44\xb6\x18\xb7\x8c\x78\x4f\xd5\x1e\xb4\x56\x21\xff\x80\x71\x7c\xc0\x66\xee\x49\xd1\x3d\x5d\xf6\xf1\x13\x74\xb7\x1b\x72\xee\xe6\x5d\x9e\x4f\xd9\x6b\xd4\xc9\xf0\x7e\x36\xb7\xbb\x05\x27\x8f\x88\xd7\x43\x82\xcd\x56\x98\xfb\x6a\x4d\x35\x04\x90\xf0\xcb\x81\x96\x3d\xfb\x90\xd3\xd9\x5e\xde\x3d\xdf\xbd\xad\x60\x1e\xb3\xaa\x54\x72\x95\x54\x54\xaf\x2c\x0a\xbf\xef\x8a\xe1\xf1\xe2\x43\xd3\x8b\xdc\x2f\x30\xaf\xae\xf9\xa6\xf5\xb2\xe5\x1b\xfa\x71\x64\x06\x29\x12\x85\x2c\x11\xf7\xc0\xc1\x3d\xc1\xb1\x27\x38\xfe\xe5\xb8\x57\x77\x69\xda\x7a\x65\xd9\xb4\xf5\x38\x18\xb6\x7e\x4f\x89\xaf\xcf\x6d\xa3\x30\xec\x60\x75\x83\x04\x7b\x9d\x53\x24\xa2\xca\x95\x55\x2c\xe5\x47\xbb\x4d\xb2\x19\xf5\x20\x6e\xd5\x89\xb7\x8f\xdd\xa7\xb5\xa1\x94\x9d\xd8\x6d\xdb\x50\x1d\x9a\x50\x96\x85\x6a\xe1\x6c\xb4\xf6\x21\x67\x63\x7b\x81\x7c\xc2\x9d\xaf\x57\xdb\x46\x8c\x5a\x69\xa9\x3c\xa4\x6a\x8c\xbd\x7a\xd4\x64\xe9\x2f\xa8\x21\xd1\x6b\x70\x72\xe5\x56\x59\xe6\x0a\xd1\x7b\x9d\x45\xee\x1d\xf7\x37\x46\xbc\x17\x55\xdf\xb2\xa4\xa8\x62\x05\xa3\x0c\xf2\xb8\xc5\x13\x7a\x4a\xa5\x7c\x20\xaa\x9e\xf5\xb0\x2d\xb6\xff\xb8\xc6\xfe\xce\x61\xfb\xd3\x20\x6e\x8b\xcc\xfd\xa6\xe3\xfd\x98\x83\x7f\x17\x3c\xfa\xa4\x1d\xd2\xe5\x64\x9d\xaf\x13\x89\x78\x86\x6e\xc4\x7e\xd2\xe3\x9b\x20\x90\x01\x13\x21\x4f\xf8\x7a\x22\x25\x91\x06\xdc\x54\xcf\xa3\x47\x14\xdc\x8d\xfd\xe2\x2e\x94\x5b\x02\xae\x44\x77\x25\xe0\x3a\xc7\x2d\x9f\x2f\x2b\x10\xb8\xf5\x24\x05\xf7\xe6\x72\x2f\x9e\xb3\xcf\xa1\x33\x6c\x9a\x1d\x1e\x3a\x8f\x4a\xbd\xbe\xb8\xb0\x2c\x3f\xca\x36\xd8\x68\xda\x8b\x84\xbb\xe6\xad\xc8\x7f\x8d\x94\x20\xea\xd6\x8d\x80\x18\x37\x5a\x61\x13\xcc\x55\x60\x81\xa7\x91\xd1\x29\x60\xe4\xc0\x5c\x21\x90\x90\xf9\x24\xce\xc5\xc5\xdc\x72\x21\xfe\xd4\x41\x76\xc8\xa8\x4f\xda\x08\x9a\xba\x36\xcb\x49\x24\x6e\x09\x21\x7a\x0f\x2c\x20\xaf\x3d\xe8\xfd\xb0\x53\xba\x58\x65\x0a\x31\x1e\xc9\xf8\x82\x76\x75\xcb\xea\x5c\x98\xf6\xa7\xaf\x93\x7f\xac\x07\x17\x12\xcc\x39\x94\x5f\xac\xd2\x2b\x0e\x5f\x98\xe6\xa5\x8f\xe1\x06\x49\x01\x6f\x48\xa9\x9e\x22\x28\x3b\x60\x68\x62\xf9\x47\x8e\x54\x9b\x61\xbe\xb0\xe7\xaa\xbc\x9c\xa9\x0f\xcf\xd0\xf1\x5c\x85\xc7\xd8\x1c\x78\x73\x0d\x1c\x63\x33\x6c\x7a\xe8\x1a\x18\x36\xe7\xf6\xb4\x8e\x5d\x68\x1d\xeb\x86\xd2\x71\xd7\xee\xac\x32\x4f\xdb\xc2\x97\xf6\x90\x93\x6d\xbf\x61\x2d\xb9\x67\xf4\x09\x62\xe8\x72\xd7\x1b\x56\x69\xcd\x57\x66\x5d\xbc\x9b\xb1\x3b\x1f\x29\xf6\x82\x6f\x03\xb6\xce\x27\x51\xaf\x13\x17\xb0\xad\xee\xcf\x1d\xf4\x6e\xdf\xfa\x11\x03\x81\x05\x44\x5f\xaf\x83\x10\x74\x64\xda\xc8\xc2\x96\xe0\xdd\x34\x84\x29\xe9\x6b\xee\x32\x00\xb5\xbe\xe4\x8c\xbd\x2c\x4b\xe2\xa5\x20\xdf\xb0\x84\xd3\x2f\x8f\xb1\x37\xd6\xd8\x58\x37\x0d\x13\x29\xa9\xdd\xef\xd1\x69\xdc\xdf\x70\xd4\x45\x02\x0b\x54\xb8\x2e\x38\xa0\x1a\x84\x29\x0a\x90\x47\xbf\xd3\x4d\xd2\x3c\x88\x9b\x42\x4b\x2c\xaa\x21\x80\x0b\xa5\xb8\xfb\x42\xe6\x43\xe6\xf3\x53\x90\x0e\x8e\x09\xe0\xb8\xb1\x36\x93\x58\xd6\x5f\x3e\x07\x91\xd8\x29\x57\x9f\xf7\x39\xf6\x83\x82\x32\x40\x1c\xe9\x44\xc1\xc0\xc4\x98\x55\x2e\x45\x5c\x37\x68\x0a\x9e\x35\x45\x1c\xa4\x61\x62\xa6\x1a\x63\xcc\x4c\xa0\x8b\xa4\xc8\xf9\x14\x19\x5e\xa7\xec\xd8\xb0\x8f\x39\x84\x03\x0e\xf9\x1f\x1a\x9e\xa6\x40\x4b\x84\x4b\xc5\xb4\x2e\x5c\xa0\xd8\x60\x1f\x72\xf9\x8d\x25\xb7\xd1\x6b\x00\x33\xd6\xd9\xb9\xc5\xc3\x54\x46\xdd\x72\xae\x1e\x6e\x44\x49\xe3\x70\x27\xc8\x72\x91\x1e\xa6\x49\x97\x1d\x3e\xe2\x4f\xc9\x75\x26\xa7\x7b\xbd\x48\xfe\x6f\x89\x3c\x08\x23\x3b\x43\xe3\x4e\x1b\xc5\xfe\x36\xef\x46\xe3\x27\x8a\xae\x8d\x5e\x07\xb8\x6c\x83\x16\x50\x24\x99\xf7\xed\xe1\xb2\x0a\xfe\x99\x9a\x06\x8f\xfc\xf1\x9a\xf7\xfe\x9a\x05\x1e\x89\x9a\x55\x10\xed\xbc\x5f\x56\x37\x04\x3f\x24\x27\xe5\x21\x6e\x94\x84\xb1\x56\x3a\x6d\x3c\x0d\x3b\x41\x6a\x64\xab\xa4\x6a\x1e\xe5\x09\x0f\xb2\x0c\xb6\xfe\x98\x24\x63\xa6\x01\xae\x91\xc1\x10\x9e\x0b\x55\xf0\x19\x49\x62\x8c\x01\x7b\xac\xc7\xe4\x01\x87\xe9\x15\xe7\xbe\xcf\xf1\xde\xec\xa8\x5f\xb4\x97\x40\x6a\x15\x30\xa0\xf0\xae\xbc\x3a\x0e\x49\x1b\x18\x7b\x07\x68\xca\x00\xaf\x1c\x26\xf1\x04\x45\x5f\x86\x59\x61\x40\xe1\x41\x3b\x08\xe3\x2c\xc7\x04\x8f\x26\x48\x0d\x2b\x2f\xbf\x9b\x26\xad\x5e\x13\xf5\xd5\x02\xe9\x7a\xd8\x38\xdf\x42\x64\x58\xb3\x5e\x5d\x27\x21\x95\xa7\x0c\xdc\x50\xf1\x61\x15\x65\xfc\xd9\x08\xbb\x69\x47\xf2\x1d\x54\x45\x52\x7d\x4e\x26\xe9\x82\x8a\x0d\x71\x3f\x34\xe2\x6d\x9a\x37\x6d\x0c\x12\x0f\x04\xb9\x1a\x0a\x0f\x54\x15\x4f\xff\xa2\xad\x19\xec\x33\x74\x51\x41\xde\x76\x82\x96\xc0\xcd\xbb\x87\x9c\x2e\xe7\x85\xe8\xaa\x94\x3b\x60\xa3\xec\xfb\x97\x9c\x2b\xdb\xc6\x97\x2f\x39\x07\xa8\x10\x4b\x58\xbe\xa6\xc6\x42\x66\x3d\xe8\xbe\xc8\x3b\x65\xfe\xb6\x30\xb2\x10\x89\x09\x15\x7d\xc0\xfb\xa0\x67\x14\x99\x78\x92\x76\xca\xad\x32\x3b\xf4\x5d\x0e\x53\xb5\x70\xbf\xdf\xf1\xfe\xc3\x85\xca\x4f\x54\x15\x9a\xac\x17\x3d\xa3\x49\x5a\x20\x03\x4a\x61\x63\xab\xe5\x03\xc9\xd7\x29\xe0\xf5\x00\x5c\x5d\x37\x0a\x73\xad\xd0\x98\x63\x61\x0d\xf5\xff\x35\x66\x65\x43\x0f\xe6\x1e\xae\x26\xe7\x45\x4c\x70\xac\xee\xbb\xc7\xbc\x59\xf3\x42\xa1\xc6\x04\x3c\x97\xd7\x29\x4a\x07\x85\xb4\xdc\xcb\x42\x79\x56\x6a\x22\xd4\x43\x95\xf3\xe0\x0f\xf6\xef\xa9\x68\xff\x52\x2c\xca\x27\xc8\x22\x73\xd3\x70\x5d\x6f\x9b\x09\x02\x70\x32\x8b\xda\xfa\xb1\x45\x8d\xb6\x2b\x08\x0a\xd8\x3b\x6c\xed\xf2\xb0\xf5\x90\xd3\xde\x5e\xb5\x5e\x70\x6f\xb1\x8c\xf3\xc6\xa8\x94\xd4\x6a\x73\x84\xca\x66\xf9\x77\x8e\xb0\x99\xaa\xac\xb9\x8d\x24\x4f\xe2\x25\xf9\x64\x96\x8b\x38\x5f\x08\xb3\xf3\x56\x4c\xfe\x97\x6a\xde\xf5\x56\x48\x3e\xbe\xc1\xe7\x0b\x6a\xeb\xae\x7e\x9b\xb7\xc2\xec\xbc\x5e\xf5\x52\xa0\x74\x5b\x25\x2a\xdf\x37\xd5\xd8\x22\x83\xcb\xee\x9c\x77\x54\x21\x16\x1b\x09\xb3\xdb\x96\x6f\xae\xaf\x5f\x74\xd8\xfe\xf5\x6c\x55\xaa\x90\x9f\x70\xbc\x1f\x71\x4e\x86\x91\x50\x34\xb6\x52\x45\xca\x13\x0c\x82\xa7\xc0\x74\x48\xcf\x58\x2f\x3d\x33\xc0\xf3\x0a\xd0\xd2\x44\xee\x16\xb7\x29\xa5\xcd\xe7\x27\x2e\xfa\xdc\x13\x17\xf3\xa3\xde\x24\xf7\x2e\xae\x67\xf2\x9f\x38\x5f\xcf\x3c\x9f\x2f\x76\xba\x51\xd8\x0c\xf3\x88\xa4\x58\xaa\x81\x90\xf0\x05\x1e\xae\xf3\x5e\xac\x23\xbd\x2d\x09\xff\x9f\x0f\x6e\x83\xc9\x76\x64\x98\xe9\x11\x0c\x36\x7f\x3c\xe6\xdd\xbe\xc5\x7d\xfb\xf0\x3e\xd4\x9d\x42\x0b\x6e\x48\x74\xcb\x1b\x0e\xb0\x77\x3b\xca\x2c\xf0\x26\xc7\xbb\xae\x32\x3c\x63\xfb\xc2\x4d\x7b\xc1\x6d\xec\x24\x5b\x78\x18\xb0\x2f\x43\x7b\x61\x6f\x7f\xda\xc5\xfe\x94\x1a\xfb\xd3\xfa\xee\x4c\x08\x87\x34\x53\x41\x41\x79\x5a\x15\xe8\xb1\xb7\x69\xec\x7a\xd3\x78\xf9\xf6\x9b\xc6\x19\xf7\x54\xfd\x11\xb8\x0c\x06\xac\x31\xb8\xf6\x18\xfb\xc1\xa7\xb3\xeb\xcd\xed\x43\xa4\x39\x9e\xdf\x04\xda\x5b\x8a\xdf\x2b\x61\x3b\x0e\xe3\xb6\xa1\x71\xb8\x7f\xf4\xef\xbc\x9b\xb7\x7c\xc2\x3e\x7e\x18\x85\x1b\xd4\xb8\x07\xe8\xcf\x4b\x0e\xcb\xc2\x76\x2c\xd2\x33\x41\xc7\x06\xed\x7a\xe8\x3b\xd9\x5f\x39\x4c\x3d\xe7\xfe\x89\xe3\x8e\x36\xfa\xb9\xf0\x3e\xe2\x28\xdf\xb8\xfe\x4c\x10\xf3\x8b\xc7\xa6\x6e\xb0\x3e\x95\x61\xb5\xb4\x23\x5d\xa1\x72\x85\x31\x0f\xb8\x37\x7f\x62\x79\x75\xf1\xe4\xe2\xfc\xdc\xea\x09\xbe\x7c\xe2\x05\xe7\x4e\xac\xac\x7a\x7c\xe9\xc4\x69\xde\x88\x92\xe6\x79\x5f\x21\x6f\xa5\x8a\x74\x3b\xc8\xf0\xc0\x99\xa4\xfc\x45\x73\xa7\x4f\x61\x52\x92\x5a\x1e\x05\x52\x47\xd4\x27\xac\x6b\x05\x02\xe6\x5b\x23\xbd\xa3\x20\xd0\xb7\x7c\x07\x33\xba\xc4\x7d\xe5\x77\x78\x7f\x7d\x75\xf1\xdb\x08\xc1\x31\xc2\x04\xe4\x22\x80\x47\x34\xa0\x1c\x20\x3a\x45\x98\x89\x04\xe7\x78\xc6\xe4\x6c\x38\xbc\x19\xe4\xcd\x8d\x42\x98\xc9\x03\xc4\xd0\xb1\x44\x4c\xd8\xf5\x30\xca\xa5\xd4\x8f\xcd\x44\x73\xcc\x4d\x0b\xb8\x27\x77\x41\xbf\xa8\xde\x4d\x67\xe6\x4e\x9f\xf0\xf0\x21\x95\x4e\xef\x33\x76\xa7\x88\xa2\x3a\xf2\x62\x18\xc8\x5d\xf8\x1a\x58\xb1\x66\x19\x9f\xf6\xb9\x57\xf4\x8d\x5c\x6f\xf2\x57\x61\x1f\xac\xa3\x40\xf6\x66\x15\xf3\x3d\x09\x68\x73\xf2\x52\x22\x1d\x12\x75\x2b\xc7\x95\xa1\x65\x81\x0a\x61\x17\xeb\x33\xce\x97\xcd\xee\x80\x56\x62\xd5\x30\xa3\x5f\x40\xf4\x53\x2f\x4f\xea\x41\xb7\x9b\x26\x17\x50\xb5\x80\x52\x9a\x5a\xad\xa9\x63\xc6\x61\x3a\xa9\xbe\x0e\x95\xd4\x4a\x88\xd7\xcc\x52\x9a\x91\x1e\x2f\xde\x92\xb3\x71\x48\x41\x3e\xe3\x47\x76\xd6\x23\x30\xa3\x22\xb1\x93\x9e\xa1\x27\x33\x85\x3d\xba\x8b\xae\xa1\x66\x0e\xf4\x8b\x6a\x2d\x5e\xdb\x79\x7b\x71\xe6\x5e\xbe\xce\x9b\xa9\xec\xbc\x48\xe4\x75\x38\x28\xcb\xa2\xa8\xb7\xe8\xf7\x0e\xba\x0b\xba\x85\xaf\x9e\x5a\xd1\x9a\x42\x36\x49\x36\x26\xbb\xe7\xa0\x1d\xcd\x24\x8e\x89\x0c\x17\x40\xff\x45\xd4\xbf\xec\x93\xed\xb2\xf6\x19\x83\xed\x8f\x6c\x72\x25\x6a\xf8\x20\xd7\x3b\x22\xed\x86\xad\xa4\x99\x1d\x4e\x55\x84\xfb\xe1\x00\x82\x88\xea\x72\x46\xc5\xf0\xff\xf7\x1e\x36\x3a\xb4\x4e\x75\xa9\x2b\xc9\x73\xf8\x5a\x43\x0e\x92\x20\x60\x0c\xad\xf8\xbc\x90\x27\x28\x82\x82\x28\x03\xf5\xbb\x50\xb9\x39\xe6\x2c\x43\xbf\x91\x8a\x81\x22\x64\x35\x05\x30\xd9\x30\xc3\xfd\x19\xb6\x3d\xc0\xa6\x84\xeb\xe3\xf3\x73\xbc\xd1\x8b\x5b\x91\xc8\x26\xa0\x81\xfa\x41\xd1\xc2\x05\xb7\xa4\x61\xd2\x33\x0c\x8e\xca\x66\xa1\x93\x75\xda\x0e\xc4\xaa\x07\xf2\x45\xe5\xcd\xa6\x07\x79\x98\x15\xe2\x18\x27\xa0\x72\x36\x4f\x16\xe8\xeb\x93\x1c\x53\x7f\x10\x40\x1b\xb7\xad\xc2\xd9\xa1\x8c\x51\x6a\xd3\x1a\x0f\xe3\x66\xd4\x03\x30\x16\x15\x70\x49\x5f\x9b\x8b\x72\xec\x1f\x18\x26\xac\x89\x9a\x8b\xa0\x1d\x4e\x6a\x7a\x2a\x28\x38\x89\xb5\xf7\x1d\x95\x9d\x89\xed\x9a\x55\x90\xbd\x95\x1a\x76\x74\x27\x0d\x3b\x2f\xfa\xbc\x97\x05\x6d\x91\xf1\xc3\x58\x94\xdc\x7c\x8b\xab\x3e\xe3\xc7\xe4\x21\xac\x1b\x62\x80\x98\x39\x59\x78\x14\xae\x13\x5b\x82\x6a\x35\xe2\x5f\xae\x03\xb3\x86\x0a\x4b\xa5\x0d\x4f\xc7\x99\xc9\x69\xaa\x40\xb1\x5a\x9d\x30\xf6\x19\x3f\x0e\x3b\xb9\x19\x78\x6b\xef\x7c\x73\xf6\xa2\x37\xfa\xd2\x3a\xd4\xbd\x87\x70\xad\xde\xea\x78\xaf\x76\x7a\x61\xcb\x56\x70\xe4\x05\x4a\xf2\x82\xa0\x5e\xdc\x83\x88\x83\x50\x5e\x1d\xba\xc1\xfa\x7c\x29\x31\x68\xa2\x73\x1b\x82\x3e\x89\xb1\x14\xc5\x1d\x16\x76\x3a\xbd\x9c\xa0\x6d\x0d\x90\xa9\xfd\x6c\x3f\x76\xa9\xfb\x0f\xfb\xbc\x07\xf7\x51\xa7\x9b\x0e\x2f\xca\xe2\x33\x86\xa4\x50\x1b\x68\xc2\x91\xf0\x30\x7a\xc3\x67\xcc\x92\x54\x52\xea\x55\xee\x2b\xfd\x2e\xd2\x96\xa9\x42\x67\xb9\x07\x94\x28\x41\x04\x43\x04\x50\xc1\xf2\x70\x2d\x3f\x2f\xe2\x66\xd8\xdd\x10\x69\x47\x6e\xe4\x93\xdc\xa3\x02\xa5\xb8\xf0\xaa\x3e\x58\x2d\x9a\x2b\xbe\x58\x55\x78\x65\x2d\x54\x30\x33\x7d\xf1\x0e\x20\xa7\x22\xd6\x42\xd4\x43\x3c\x25\x36\x87\x15\x41\x18\x05\xbc\x09\xe9\xdf\xf8\x35\x56\x5d\x05\x79\x2d\x68\xa7\x42\xe8\x3a\x49\x95\xd1\x7e\x8a\x71\x4f\x36\x0f\xbe\x01\xc5\xa7\x91\xfe\x5b\x3d\x09\xb4\x8d\xf0\xbe\x28\x5d\x08\xe2\x3e\x14\x61\xb6\xab\xd4\xb1\xf0\x85\xa4\xa5\x55\x62\x28\x18\xa8\xbd\xba\x69\x92\x63\x10\x08\x74\xcd\xe1\x4e\xd8\x11\xf0\x78\xd8\xcd\x44\x53\x6e\x72\x64\x2e\x91\xb7\xf1\x5a\xde\x8b\x63\x11\x15\xbf\xe5\x8c\x87\x57\xe4\x7a\xcd\xf2\xa0\xd3\xa5\x2f\x24\xcd\xac\x6b\x7e\xb1\x13\x36\xd3\x24\x4b\xd6\x73\x9e\xb5\x9b\x60\x6c\x11\x79\xd6\x0c\xba\x02\x7e\x0f\x07\xad\xd8\xa1\xd6\xfc\xc3\x8e\x01\xe5\xf6\x2e\xc7\xfb\x3e\x47\xfd\xb2\x17\xab\x09\x5d\xfa\x38\xac\xd6\x5f\x72\x14\xca\xdf\x7f\x75\xbc\x37\x39\xf0\x67\x51\x41\xfc\xa9\x99\x32\xb3\xc7\xb8\xa2\x0f\x03\x3c\xf0\x0f\x1d\x8d\x1e\xf8\x79\xc7\x7b\xb3\x43\x90\x81\xba\x25\xe8\xbb\xe9\x08\x70\x53\x6f\x84\xdd\xc7\xbc\xcb\x77\x39\x9d\xbe\x6a\x07\x59\x69\xfb\xaa\xb6\x5d\xa2\x55\x15\xd3\xcc\x0e\x7a\x37\x54\xdd\xb0\x8d\x75\xe5\x27\x88\x38\xb4\xd2\x42\xf7\x9a\xb1\x3d\xb3\xca\x6e\x03\x9f\x7e\x4a\x9b\x38\x7f\xcc\xf1\x84\x42\x69\x30\x8c\xcf\x84\x06\x53\x59\x3b\xfb\xe0\x00\x9a\x6e\x33\x89\x9b\xa2\x9b\x67\x87\xb3\x3c\x49\x83\xb6\x38\x5c\x14\x55\xa7\xa2\x76\x1e\x40\x35\x6c\x3e\xed\x59\x3f\x77\x61\xfd\xfc\x4d\x33\x6c\xfb\xd3\xbb\x4c\x6c\x7b\x95\xf3\x38\x65\xb6\x19\x8d\x7b\xc8\x79\xf1\xf6\xa6\xc1\x1b\xdc\xeb\xb4\x69\xd0\xf3\xb4\x45\xb0\x4a\x20\x95\x9d\x48\x1f\xdc\xc7\x6e\xa7\x5e\xc1\xe3\x6b\xbb\x9d\x8a\x36\xe0\xc8\x9a\xb1\x58\xa9\x68\x87\x10\xc7\x8a\x9e\x43\x8c\xfa\x9b\x5b\x5a\x5c\x41\x87\xf3\x7c\x12\xa3\xf9\xcb\xfd\xfc\xa8\x77\xa6\xe2\x7a\x89\x48\xa6\x48\xd6\x06\xc2\xed\x15\xe5\xb6\xce\x09\x63\x36\x6c\xf6\xa2\x20\xc5\xb4\x6c\x1d\x83\x45\x3e\x4e\x9b\x30\x66\x84\xbd\x90\x1d\xe8\x88\x4c\xea\xb4\xee\x69\xef\x79\xb7\xf5\x3a\x41\x5c\xd7\x71\x17\x74\x47\x59\xcc\xe4\x62\xd1\x67\x5b\x08\xe5\xc6\x4c\xe3\x34\x88\xb3\x70\x00\xb7\xfc\x25\x6c\x7f\x2a\x82\x2c\x89\xdd\x55\xef\xd6\x73\x80\x04\x3b\xc9\x93\x58\xd4\x37\x93\xb4\x35\x59\x4c\x76\x8e\x4f\x19\xf1\x1d\xd4\xea\x43\xd9\x96\xe5\xdf\xa5\xdd\xb6\x4b\xde\x7c\x91\x7c\xa7\x3a\xa8\xa7\xf7\x7e\x5d\x20\xac\x3b\xb9\xe8\x56\x81\x06\x0c\xd8\x98\x26\x15\xdd\xac\x55\xf6\x75\x14\x86\x75\xd8\xf3\x56\x29\x08\x2b\x57\xe4\xc6\x03\xa5\x9a\x2f\xbe\x95\xd2\xbc\x57\x75\x9d\x21\xcd\xfb\x15\x8f\x3c\xcd\xfb\x79\xa7\xac\x6c\x6e\xfd\x59\xa3\x5b\x04\x01\x46\x26\xb1\x6e\x3a\x40\x64\x41\xc4\x9b\xcf\x3e\x5e\x2b\x27\x20\xd9\x29\x56\x67\x92\x58\x45\xfd\xcd\x69\xc5\xc9\x7d\xc8\xf1\x5e\x51\x79\x87\xe3\x89\x9a\xe6\xa2\x55\x9a\xa9\x78\x15\x86\x8f\x75\x38\x38\xc6\xf5\x32\xae\xab\xde\x17\xe7\xa8\x0c\x30\xae\x10\xb8\xb3\x35\x4f\x13\x76\x3d\x1b\xed\x06\xf9\x86\x3b\xe5\x5d\xab\xe2\x98\xe4\x9b\xe7\x96\x4f\x61\x04\x13\x0d\x0a\x95\x6c\x8e\xc8\x0c\x1b\xbd\x20\xd2\x86\xfb\x6c\xef\xe9\x77\x88\xb4\x31\xe0\x8c\xb9\x6d\x75\x75\x49\x2e\xea\x86\xf9\xd2\x6f\x8c\xb2\xeb\xb6\xcc\x4a\x13\xd1\xfa\x90\xcc\x34\xf7\x07\x46\xbd\x0f\x39\x5b\x3c\x80\xdb\xd1\x40\x14\x1c\x84\x9d\x88\x4c\x6f\x14\x3e\xe7\x27\x2e\x06\xcd\x3c\xea\x2b\x86\x39\x3d\x14\x66\x6d\x8c\x71\x91\x7a\x84\x39\x62\x43\x1e\x53\xe4\xdc\x99\xc8\xad\x4e\xfe\x7f\x6a\xec\x01\x87\xfd\x9b\xb8\x72\x36\xbc\xc5\x61\x37\xef\x3c\x69\xaf\x72\xde\x78\xa7\xab\xa7\x53\x21\xdb\xcc\x54\x11\x8c\xcb\xb1\xa6\x8d\xdd\x3f\xb2\xb2\x6e\x3a\x58\xd3\xb7\x39\xec\x39\x3b\xaf\x69\x45\x35\x57\xb6\xeb\xc0\xad\x2a\x3c\xac\xb2\xff\x3c\x66\xc1\x75\x90\x06\x04\x9e\xa2\x95\xc5\x33\x49\x0b\xf5\xe0\xdf\x1b\xf3\x0e\x1b\xbf\xab\xf2\x0c\xe8\xf6\xd6\xfe\xe9\x8f\xed\xa1\x2f\xec\x5a\xfb\x6d\x2a\xe5\xf7\x2e\xef\x3b\x2b\xdd\xfb\x34\x12\xa6\xca\x7a\x98\xd5\xd9\xb3\x87\x4e\xbe\xc1\x41\xdf\x53\x56\x77\xa1\xac\xfe\xaa\xa9\xac\x7e\x72\x97\xca\xea\x7f\xa8\xd6\x55\x1f\x3b\x10\x86\x97\x6e\xaf\xa7\x3e\xc7\x9d\xd5\x7a\xaa\x9a\x4b\xa5\x80\x27\x43\x78\x94\x55\xd5\x4f\x5d\xc1\xee\xd8\x1d\x75\x63\x39\x41\x20\x56\x11\xa2\x7f\xc2\xbc\xd3\xc3\x6e\x1a\xd2\x92\x08\xae\x94\xa4\x28\x48\x05\x54\x88\x33\x86\xd0\xce\x2f\xfb\x97\x9c\x31\x95\xf6\x65\xc9\xb5\x6f\x1f\x64\xff\x69\x94\xfd\xdb\xa6\x2e\x1d\x77\x55\x12\x6a\x99\xfb\xcf\x23\xde\x57\x47\x86\xdd\x55\xa1\xe3\x29\x46\xfa\x6b\xf0\x22\x0d\x73\xbf\x36\x5f\x7a\x73\xad\xa8\x99\x5c\x23\x77\x8a\xc6\x46\x92\x9c\xe7\xe2\x62\x17\x44\x2f\x78\x50\x0c\x63\x0a\xe4\x68\x81\x9f\x6b\x43\xf0\xf5\x30\xcd\xf2\x72\x84\x2c\x7c\x93\x22\xaa\x35\xe6\x6a\x06\x48\xe5\xb1\x41\x23\xab\xbf\x5a\xa2\x1b\x0d\x33\x2c\x01\x98\x48\xcd\x20\xad\xa2\x12\x93\xbc\x68\x3e\x56\x68\x3d\x08\xa3\x42\xb7\xb6\xa3\xb5\xe1\xcb\x81\x3e\xd2\xb7\x74\x13\xed\xcc\x73\xc3\x12\xae\x1c\x20\xaa\x86\x52\x7a\xb7\x12\x91\x81\x67\x80\x14\x43\x60\xa1\xd6\x4f\xa0\xcf\x5a\x69\x7a\x4b\x8b\x24\x97\x26\x79\x33\x88\x22\xbd\x39\x6c\xd2\x87\x75\x95\x7d\xbe\x60\x70\xec\xad\xdd\xed\xd1\x2c\xf4\xee\x59\xdb\x8a\x3f\xe3\xff\x1e\x61\x7a\xea\xb8\x9f\x1d\xf1\x1e\x1c\xd1\xf9\x83\x45\x2b\xe4\x44\x2c\xf5\x84\x4e\x04\x91\x53\x13\x30\xe6\x90\xb2\x51\x35\xc3\xe7\x73\x96\xef\x07\x2c\xdd\xbc\xce\xd7\xce\x24\xb1\x58\x9b\x85\xa9\xa0\xde\x46\xd3\xb2\xc9\x20\x57\x6c\xc4\x98\xcc\x07\x3b\x26\xe0\xc0\x27\xbd\x26\xf2\xa0\x23\x2f\x07\x86\x06\xd0\x6c\x19\x18\xac\x3a\x5f\xa3\x01\x5a\x9b\x35\xba\x92\x30\x5d\x83\x28\x22\x00\x7a\xa1\xb8\x01\x55\xaf\xe6\x09\x6f\x25\xea\xf8\xa0\x43\xd4\xe7\x2a\xe9\x51\x18\x97\x0b\x85\x68\x58\xb4\x83\x15\x13\x2e\x28\xa0\x9b\x80\xf4\x71\x7e\xfa\x8a\x14\x98\xce\x51\x27\x91\x95\x17\xa3\xfd\xd6\xf1\x88\x25\x1b\x0d\xcf\x1a\x9f\xa7\xba\xcd\xc3\xc6\x33\x0f\xf3\x8d\x5e\xca\x84\x9d\xae\xf9\xda\x1a\x7b\x62\xc5\xd3\xee\x5f\x38\xec\xa5\x97\x93\x9a\xf6\xce\xc1\x6f\x78\xdf\xef\x54\xd5\x33\x54\x34\x36\x18\xd8\xaf\x89\x49\x95\x84\x83\xa1\x30\x66\x75\xb8\x5e\xa4\xb1\x86\x59\x31\x8a\x46\x32\x2d\x78\x0f\xd7\xd4\x53\x6b\x00\x05\x2c\xc0\xf3\x5d\x3c\xcd\xfe\xf2\x2a\xe6\x55\xd9\xc0\x92\x96\x62\x43\x59\x15\x69\xc7\xfd\x6f\x57\x79\x5f\xae\x2d\x90\x86\xa7\x9d\x57\x48\x55\x16\xcb\x6d\xbb\xcf\xf3\x8d\x24\x23\xe2\x0a\xa5\x55\x44\x26\x37\x5f\x91\x5c\x45\x0b\xb4\x80\xfe\x85\x54\xa7\xf1\x6c\x62\xc2\xa0\xe2\xee\x26\x2d\x8b\x5d\xb1\x1e\x25\x98\xfe\x3a\xae\xd8\x17\x26\x94\xfb\xd0\xba\x19\xe7\x61\xbd\x78\x62\x33\xcc\x37\x26\x65\x4f\xa4\x56\x19\x61\x41\xde\x1e\x64\x1a\x56\x0e\x80\xa3\x81\x7e\x63\x13\xda\x62\x11\xb8\x21\x35\x20\x24\xb4\x9c\x17\x7d\xfe\x9c\x3c\xe9\x26\x51\xd2\xee\xdf\x2e\xfa\xcf\x35\xe8\x3a\x82\x9c\x08\xbe\xb1\x20\xc0\x51\x03\x4a\x6a\x68\x90\xe2\x29\x31\xba\x2f\xd4\xdf\xbf\xe4\x5c\x61\x14\x6a\xed\x52\xbf\xca\xd8\x9b\x1c\x76\x95\xd5\x9f\xee\xfd\x97\x81\x2f\xf1\x86\x39\x6a\xd8\xcb\x7b\x22\xed\xf3\x04\xdc\x55\xaa\x7e\x5a\x92\x4d\xea\xcd\xa2\x19\x64\x48\x38\xe3\xb3\xbf\x1d\x61\xd7\xe8\xc1\xd3\x95\xfa\xf2\xc8\x65\xa8\xd5\xa5\x91\x8a\x6a\x19\x1d\xa7\x3f\xab\xb9\xdb\x05\xcf\x45\xda\xa1\xec\x2b\xd4\x31\x57\xd5\xc5\xc1\xac\xac\x5e\x6c\x9c\x91\x8d\xc2\x90\xf8\x71\x90\x42\x84\x38\x75\x01\x09\x3c\x0a\x4d\x17\xad\xf1\x32\x3c\xeb\xf3\xb8\x17\x19\x0c\x92\xf2\x55\xb8\x92\xa4\x44\x10\x6f\xbc\x41\x7a\x61\x10\x67\xdc\x53\x73\xfe\x50\x56\x3c\xe1\xf9\x7c\x4e\xf1\xca\xeb\x12\xc7\x5f\x71\xff\x84\x9e\x6f\x52\x2a\x14\x05\x92\x2c\x25\x89\x9f\x21\x31\x6c\x3d\x92\xaa\xa3\x8a\x40\x83\x6d\x64\x23\x01\x4a\x16\x14\x10\xc6\x4a\x3f\x53\x1e\x4d\x4d\x2a\x5b\x10\xc8\xfa\xec\x97\x6a\x8c\x15\xdf\x74\x1f\xac\x79\xef\xa9\x99\x7d\x68\xfa\xb8\xe5\x31\xaf\xa9\xf5\x22\xfd\x14\xb1\x5a\x5d\xd6\xb1\x33\xc7\xa5\x7a\xe4\xcc\xd1\x1d\x98\xb8\xfe\x36\xc3\xa4\x07\x72\xe0\xcd\x2d\x47\x70\x0b\xbd\xe2\x5b\x35\x66\xae\x77\xf7\x4f\x6a\xde\xef\xd6\x56\x2f\xaf\xe8\xc3\x1c\x44\x29\x66\xb6\x90\xcb\x0a\x9f\x5d\x6b\x86\x45\xdb\x1f\x15\xd1\x69\x34\x7a\x27\x82\x33\xee\x17\x62\x93\x06\xb0\x24\x38\x7d\xc4\xde\xb7\x0a\x0e\x51\x7f\xac\x0a\x1c\xf9\xc8\x41\xcb\x78\xb9\x05\x72\x30\xb1\x56\x2c\xf7\x22\xe1\x7e\x6b\xcc\xfb\x2f\xce\xb0\xbb\xc6\x91\xc4\xb2\x97\x93\xbc\x52\x50\xce\x8a\x63\xc1\x20\x72\xb6\x07\x08\x27\x67\xc0\x07\xf0\x8b\x0f\xc1\xac\x28\x26\x2b\x2c\x10\x2a\x15\xac\x6f\xf0\x3a\x6f\x24\xf9\x06\x87\x13\x00\x4c\x58\x79\x30\xb7\xe1\x40\x3e\x7e\x80\xfd\xde\x08\x1b\x95\x37\xdc\xcf\x8d\x78\xdf\x3b\xa2\x7c\x70\x4a\x67\xc5\x4e\x2f\x66\x1f\x98\x5b\x81\xc5\xb8\x89\xf4\xeb\x54\x5d\x3d\x16\x5a\x9f\x4b\x7b\x91\xd4\xfe\xd1\xae\x51\x3e\x5b\x34\x93\x4e\x03\xe6\x8c\x8a\x51\x8d\x92\x76\xd8\x0c\x22\x7e\x76\x59\xa5\x81\x40\x8a\xa3\xc5\x97\x25\xa5\x0e\x0c\xac\x5c\x64\x61\x26\xdf\x9c\x2c\xbe\x65\x09\x41\x55\xfd\x71\xd5\x29\x18\x56\x84\xb1\x56\xd8\xab\xf8\xc8\x44\xc5\x47\xc8\xc0\x81\x71\x7b\x3a\x7c\xd9\x24\xb4\x0e\x73\xd1\x31\x3f\x0d\x03\x98\xe9\x11\x00\xa9\x4a\xf4\x5d\x7a\x54\x54\xed\xca\xe5\xe8\xdc\xc6\x34\xe9\x40\x07\x3d\x0c\x22\xae\x2d\xe6\xea\x92\x10\xa9\x3c\xab\xec\x83\xe1\x77\x7f\x79\xc4\xfb\xc7\x9a\x76\xb0\xc2\x8c\x28\x0f\x2d\x66\x92\xc2\xb8\x86\x0d\x24\x88\xde\x76\x78\x4f\x04\x70\xd0\x54\xad\x50\x43\x0c\x79\xb8\x43\x47\x78\x17\xe3\x8a\x35\xdf\x62\x54\xe5\x03\xbb\x1b\x53\x11\x5f\x86\x81\x05\xea\x12\xe3\x48\x7e\x99\xc6\x34\x49\x73\xf6\x87\xac\x92\xdc\x05\xf8\xdd\x17\xcf\x5a\x89\x64\x3f\xc9\xbc\xd9\x8a\xeb\x36\x2f\x85\xe1\x6d\xa7\x67\xc9\xeb\x7e\xc9\x39\xd0\x0e\x72\xb1\x19\xf4\x2f\x39\xfb\x31\xc4\xe8\x92\x73\x30\x13\xcd\x14\xd8\xd3\x2d\x39\xf2\x2b\x07\xd9\xe7\x1c\x56\xdc\x74\x3f\xed\xb0\x63\xdb\x7a\xd6\x4f\x25\xcd\x20\x2a\xd3\x49\xbf\xce\x59\x51\xa5\x70\x1d\xb9\xaa\x4f\xf1\xf8\x05\x98\x86\xaa\xb6\xc8\xf4\x18\xb7\xe8\x80\x9b\x09\xf0\x59\x5d\xb0\x38\x40\x8b\x19\x41\x1b\x81\x62\xe8\x9c\xe4\xa7\x92\x76\x18\x17\x42\xc7\xb0\x0f\xb0\x1f\x70\xd8\x15\x64\x06\x3b\x9d\xb4\x84\xfb\x2a\xc7\xeb\x2d\x56\xa2\x6b\xd2\x53\x64\xa2\x27\x22\x94\x62\x69\xad\x6e\x84\xcd\xf3\x4b\x8a\x8e\x06\xa1\x37\x57\x37\xc2\xd8\xb8\xa4\xad\x11\xb2\x86\xe5\x7b\xe6\x76\xf5\x81\x22\xff\xee\x9d\x8e\xf7\xba\xc7\x2b\xff\xce\xa8\x2d\xdc\xb1\xea\xb8\xc0\xd4\xd4\x71\x6f\xf0\x26\x57\xd5\x07\x82\x56\x0b\x39\xe2\x71\x05\xa9\x01\x9c\x5b\x5a\xe4\xb7\xe2\xe3\x56\x29\x6b\x8c\x65\x59\x74\x02\x15\x4d\x77\xd9\x5b\x38\x19\x05\x70\x82\x47\xdd\xf3\x70\x2b\xcc\x60\x3b\x5a\x59\x39\xc5\xc9\x4e\xda\x54\x63\x98\x6f\xa8\x22\x27\xa5\x7e\x02\x55\x05\x53\x01\x80\x61\x34\x92\x24\x12\x41\xcc\xee\xd6\xe3\xbb\x94\x24\x91\x7b\xca\xbb\x79\xd5\xa8\xd7\x0a\x0d\xaa\xbc\xc7\x83\x2c\x4b\x9a\xa1\x4d\x84\xf4\xff\xb3\xf7\xee\x61\x96\x64\x55\x9d\xe8\x17\x27\xb3\xba\x2a\x77\x3f\x68\x02\x1d\x1d\x07\x75\xdf\x00\xad\xcc\xe6\x9c\x93\xf5\xea\xa2\xbb\xfa\x81\xd9\x59\xd5\xdd\x09\x55\x59\x49\x66\x56\x23\xd3\x34\x9d\x91\x27\xf6\xc9\x13\x54\x9c\x88\xd3\x11\x71\x32\xeb\x20\x68\xf3\x10\xe4\x21\xa0\x80\x0c\xa5\x80\x30\x32\x8d\x2d\x82\x33\x82\x80\xe2\x8b\x0b\x0c\x17\x04\x01\xf5\x02\x8a\x0f\x50\xc0\xd7\xa7\x33\x57\x1d\xef\x6d\x75\xe6\x7e\x7b\xad\xb5\x5f\x71\x4e\x3e\xaa\xb2\x1f\x8c\x5f\xf6\x1f\xd5\x79\xe2\xb1\x63\x3f\xd6\x5e\x7b\x3d\x7f\xcb\x84\xe6\xf1\x28\xeb\x86\xb1\xeb\xa8\x3d\xcb\x68\xd7\xf8\xa7\x82\x1b\x96\x2b\x61\x6d\x8a\x60\x68\x55\xc2\x42\x1b\xc4\x50\x68\xa5\x3e\x38\x0d\xbe\xc8\x63\x0c\x49\x0b\x92\x4e\x8a\xa0\x6d\xb7\xaa\xc9\x2e\x4c\x72\x11\x46\x03\x1d\xbc\xa5\xe0\xaa\x68\x54\x8a\x0e\x94\xec\x31\x34\xae\xb8\xd0\x04\x8c\x56\x21\x17\x00\xe7\x5a\x33\xea\x93\x30\x68\x7f\x31\x38\x55\x1d\x9e\xfa\xd8\x82\x99\x21\x7c\xd8\xf6\xc1\xab\xe1\x2a\x4b\xb3\xfd\x9d\x0b\xec\x80\x1c\xc5\xd9\x34\x19\xf8\x49\xf0\x5c\xdb\x52\x07\xcb\xc8\x27\xe5\xed\xe9\x8d\x3c\x2e\xe5\x19\xbe\x48\xcf\x72\x10\x4f\x71\x0b\x67\x39\x21\x44\xe8\x9b\x85\xc0\xf4\xff\x38\xe5\xc8\x0f\xcf\xc8\x8d\x42\xc5\xb3\x14\x4d\xbc\xf6\x3b\x46\x5a\x40\x2a\x68\x5f\xfe\x9f\xff\xdb\xe0\xcb\x5e\xe5\x22\x81\x85\x16\x74\xb5\x62\xe3\x34\xe8\x62\x72\x4f\x1a\x2d\xc6\x2a\x4e\xd5\xe4\x4b\x59\x57\xe0\xa9\x85\xf6\x42\x75\x6e\xc5\x29\x0a\x73\xd5\x2f\x4a\xbe\xb7\x20\x25\x41\x17\x8b\x8c\x63\x72\x14\xbc\x02\xb6\x5c\x51\xd6\x0d\x5a\x06\x08\x64\xd5\x96\xca\xf0\x3c\x7c\xae\x25\x22\xc9\x71\x5d\x39\xf1\xa1\x6f\x63\xef\xf4\x18\xeb\xe5\xf1\x7a\x9c\x88\x35\x11\x49\xde\xf3\x72\x6f\xb1\x9f\xda\xa5\xd7\x52\x6e\x1e\xe0\x5d\xa8\x90\xb9\x90\x67\x52\xa0\xc0\x4f\x5a\x77\x4d\x75\x34\xe8\x9f\x7c\x24\x2d\x63\xac\x53\x79\x5f\x3f\x5e\x0f\x13\x28\xcb\x9c\xf1\x3c\xcb\x4a\x25\x8a\x48\x26\xe2\x5a\x6d\x81\x16\xdc\xf5\x7b\x65\x8d\x4d\xf4\xf2\xac\x05\x6b\xeb\xff\xb3\x17\xfc\x8d\xa7\x7f\xf2\x48\xa4\x99\x4a\xcb\x52\xa1\x16\xf2\xae\xaa\xdb\x95\x81\x75\xdd\xa2\x51\xea\x24\x4a\xd8\x91\xe1\x79\xd4\x89\x05\xdd\x32\x4a\x53\xfd\x42\x17\x69\x55\xb3\x92\xf7\x53\x08\xad\x88\x54\xb7\xdb\x60\xfb\x0a\x23\x10\x26\x7a\x61\xd9\x29\xc8\x43\x59\x9c\x97\xba\x8c\xbc\x50\xb5\x82\xca\x16\xf5\xa7\x20\x5c\x44\x69\xe2\x6d\x62\x89\xab\x42\x6b\xe4\xf6\x36\xfa\xd5\x31\x76\x55\xde\x4f\x67\x8a\xf9\x2c\x5d\xcc\xb2\xd2\x7f\xf7\x58\xf0\x96\xb1\x39\x2b\x3d\x8d\x54\x6e\xd3\x5f\x50\x1f\xf2\x7e\x2a\x79\x12\xb9\xc7\xe5\x12\x40\x75\x6c\x38\x45\x21\xb2\x45\xbe\xf3\x0c\xcc\xbc\x41\x9a\x5e\x0f\x93\x38\x52\xd8\x83\x58\x9c\x2e\x2c\xf5\xe0\x81\x69\x17\xfd\x5c\xd5\x95\x2b\x8d\x49\x9f\x3e\x75\x6e\xee\x24\x3f\xc4\x27\xe5\xb7\x30\xeb\x01\xbc\x0a\x65\x46\xd8\x80\x6e\x1f\xe3\xb6\x6a\x02\xba\xd4\x4f\x41\xb1\xca\x95\x31\x38\xcd\x78\xd1\x6f\x75\x54\x9f\xf4\xa9\xbe\x2a\x14\x7e\xb7\x3c\x6f\xcf\x84\x03\x93\xb4\x22\x60\x8b\x8d\xdc\x49\x73\x6d\x75\xfb\x12\x76\xa0\xb5\xdb\x5c\x1f\xcb\xa8\x6d\x57\xb8\xfb\xce\x22\xe6\x5f\xa8\xb1\x09\x58\xbf\x73\x85\xc8\xfd\x77\xd4\x54\x39\xc0\x57\xd7\x24\x3d\xca\x39\x93\x5b\xa4\x9f\x92\xbb\xb5\xcc\x07\xe8\x70\x35\x11\x44\x34\x63\x3d\xdc\x87\xee\xee\x01\xc9\xc9\xe9\x1d\x2e\x9c\x49\x2b\x76\x73\xe8\xbf\xa9\xa7\x4c\x01\x5c\xfd\x7d\x8d\x5d\x53\x08\x28\x1a\xa8\xe0\x35\xbf\x52\xdb\x02\x2b\x52\x33\xf8\x53\xf6\x3b\xc1\xcf\xc3\x0c\xab\xea\x83\x2d\xf5\xe9\xac\xc2\xbc\x9d\x59\x56\xc4\xe8\x96\xc5\x1b\x66\x05\x40\x8c\x52\xb5\x80\x44\x9a\x90\xe7\x61\x1a\x65\xdd\xa1\x8f\xe9\x12\xd7\xd6\x07\xbe\x59\x97\x80\x7d\xc3\x93\xd3\xde\x6a\x65\xdd\xde\x42\x9e\x49\x81\xd3\xff\x5d\x6f\x27\xd3\xee\xbc\x13\xfc\xb4\xb7\x8c\x52\xbe\xbc\xa8\xe1\x4d\x89\x2f\x2b\x33\xac\x3b\xe1\xd5\x87\xf1\xd8\xa4\x02\xfc\x61\x89\xe3\x27\x4d\x96\x7f\xaf\xb5\x1c\x60\x09\xad\xae\x91\x6a\x25\x5b\x17\x79\x1e\x47\x42\xbf\x49\x37\x9a\xec\xf7\x6a\xec\xdb\x41\x2f\x5c\x50\xc7\xd9\xa9\xa2\x15\x26\xc0\x67\xfc\x0f\xd6\x82\x9f\xad\xcd\x6c\x72\x57\xe5\xf8\x19\xdd\x21\x54\x1b\x13\x52\xe8\xd6\xa4\x80\x04\x35\xd4\xf5\x49\x59\x20\x88\x5b\x5c\x16\xbc\x17\x82\x2b\x5b\xef\x64\x38\x22\x24\xa3\xa0\xe2\x8d\x50\x77\x85\xda\xd7\xb5\xc3\xef\x4d\xc5\xc6\xbd\xb2\xb5\x02\x8f\x0a\xc5\x07\x81\x61\xa6\x9b\x31\x89\x4d\x07\x10\x17\xc0\xff\x79\x98\x6c\x84\x83\x02\x2d\xc6\x15\xd6\x5c\x9c\xe0\x87\xa7\x14\x57\x5f\x30\x27\xfe\x91\x29\xde\x09\x0b\x3e\x3b\xb3\x70\xef\xd2\xb3\x97\xee\x9d\x39\x79\x66\x6e\xde\xe1\x75\x1f\xf7\xd8\x55\xad\xb0\x17\x02\x68\x55\x2c\x0a\xff\xfd\xde\x0e\x6a\x82\xcf\x5a\x6f\x04\x2f\x01\xea\xb1\x1b\x01\x01\x2b\x8a\xa6\xa3\x3c\xeb\x61\x77\x95\x89\xd2\x3e\xdc\x6d\xa6\x58\x5a\x07\x3d\xd9\xeb\x9c\x06\xd7\xf2\x30\xb5\x34\xa7\xa1\xdd\xdd\x64\x1f\xad\xb1\x6b\x36\xe2\x34\xca\x36\x0a\xc5\x81\xfe\x73\x8d\xdd\xba\xed\x50\x9e\x85\xef\x54\xf6\x9a\x62\x48\x7f\x03\x63\xa3\x67\xd4\xee\x6c\x29\x99\xd6\xb1\x8b\x87\xa6\x00\x27\x0c\x6f\x14\x57\x52\x74\xae\x83\x75\x86\xb9\x81\x26\x96\x7e\x01\x11\x39\x8f\x32\x47\xb9\x8f\xfd\x1b\xa5\x02\x48\xd1\xc5\xe8\xb9\xfe\xb3\x82\xb9\x67\x69\xe5\xdb\xe6\x07\x40\x61\x21\x48\x57\x0d\x10\xaf\x40\x74\x31\xda\xaf\xa3\xb6\x8e\x90\x1d\x5f\x5f\x63\x0c\x8e\x5b\x00\x07\xf3\x5f\xaa\xcf\x5b\x9c\xfc\x3b\x2e\xe3\xbc\x3d\x27\x25\xc2\x8a\x0c\x48\xe7\xaa\x28\xff\xf7\x38\x51\x5f\xb6\x8f\xf1\xcd\x4b\x85\x2f\xdc\x96\x64\xad\xf3\xfe\x57\xc6\x83\x3f\xf5\xe8\xc7\x66\xe6\xef\xd9\xb9\x93\x8b\x7c\x12\x8c\x0a\x87\x6f\x3c\xd2\x3c\x7c\xfc\x86\xe6\xe1\xe6\xe1\xe9\x23\xc7\x82\x7a\x70\xe4\xd0\xa1\xc3\x27\xa2\xd5\x1b\x4f\x9c\x98\x3e\x7e\x2c\x98\x7a\x98\x6d\xe3\xe2\x42\x4b\xf4\x4a\x5c\x31\xab\x77\xb2\x43\x24\x04\x93\xb1\x86\xc2\xd0\x28\xce\x04\x75\xe2\xd8\xb2\x08\x36\x2f\x7a\xe3\xad\x38\xca\x1d\xed\xe8\x6b\x35\xb6\xce\xe0\xb2\x9f\x06\x21\x8c\xd2\x8e\xb1\x1b\x8a\xae\x9b\x5b\xe0\x38\x4f\x98\xd9\x28\x2e\x84\xdd\x5e\x42\x01\x22\xd5\x99\x91\x62\x6d\x65\x72\x6c\x01\xff\x2f\x3d\x76\x05\x0e\xce\xff\x63\x2f\xf8\x88\x77\x0a\x07\x8a\x9f\x4f\x62\xc4\x2b\xdd\xf1\x30\xc3\x74\xb7\x7d\xe3\xd4\x01\xd2\x33\x15\x07\xc1\xd0\x4b\x49\x7c\xb0\x51\x06\xd0\x5c\xd6\x2f\x0b\x75\xc2\xc2\xa4\x01\x34\xf8\x16\x2e\xb2\x0f\x4c\x38\x84\xd8\xca\xb2\x3c\x8a\x53\x1d\x69\x7b\x5a\x84\x85\xf0\x7f\x7c\x22\xf8\x2e\xf8\x4b\x47\xa0\x86\x60\x98\x85\xdd\x29\xfb\xe6\x6a\xb6\x2f\xde\x4b\xe3\xda\xab\x63\xf1\x18\xc6\x98\xbe\xdf\x8e\x31\x7d\xd7\xae\xeb\x58\xac\x3d\x5a\xc5\x2a\x3e\xa9\x8a\x55\xfc\xa6\xc7\x8e\x6c\x21\xda\x8c\xd8\xa2\x50\x9b\xe2\x95\xde\xc8\x0a\xac\xf0\xc0\xa3\x5d\x9e\xe2\x21\xef\xb9\xdb\xc7\xc6\xde\xe4\xdf\xa8\x63\x63\x9d\x61\x55\x02\x64\x61\x04\xd5\xd0\xd8\xd7\x8f\x8f\x34\x2a\x9a\x08\x0b\x4c\x33\xfa\xeb\xb1\xe0\x8e\xca\x35\xb9\x93\x46\x17\xb8\xb0\x0b\x6b\xf6\x29\xb2\x55\xbf\xec\x32\xb9\x37\x8c\xb1\x7f\xf0\x18\xd3\x99\x3d\x85\xff\x75\x2f\x38\xbd\xe8\x16\xea\x24\xa6\x63\xb2\x6a\xb2\xd5\x42\xe4\xeb\x61\x69\x02\x67\x75\x24\xc0\xc1\xc2\xad\xeb\xe9\xf8\xb7\x9e\xca\xae\x67\x47\xb7\x95\x76\x75\x57\x75\x0e\xda\x43\xde\x41\xf6\x44\x67\x19\x7a\xf2\xa0\x6f\x74\x45\xbe\x26\x1a\xe7\xc5\xc0\xdf\xef\xef\x93\xdb\x88\xc9\x27\xff\xdd\x88\x27\x75\x5c\xe6\x01\xff\x0a\x78\x8b\xb1\x37\x7b\x6c\x5f\xaf\x23\x0f\x86\xd7\x7a\xc1\x8b\xbc\x05\xf9\xa7\x8a\xae\x53\x43\x48\xe2\xb6\x68\x0d\x5a\x89\xe0\xf0\xe4\x50\x34\xc9\x4e\xd3\x4f\xcb\xb0\x38\x5f\x4c\x03\xae\x45\x5c\x94\x80\xfd\xd3\x07\x84\x65\x13\x41\x31\xed\xa4\x72\x3d\x9e\x7d\xe7\x26\xd5\x13\x89\x20\xfe\xf6\xda\xe0\xd0\xa5\x54\x56\x7d\x7a\xb5\xa8\xea\x7b\xae\x65\xbf\xb4\xcf\x59\xf9\x07\xf6\x05\x7f\x35\xb6\xbc\xa3\xf5\x4e\x0d\x34\xbd\xbb\xda\x68\x5d\x86\xcf\x81\x9d\xae\xa8\x73\x2b\xb8\xd8\x7c\x0c\xcf\xff\x0e\xe0\xf2\x0e\x7a\x82\x07\xb7\x87\x71\x22\x22\x84\x37\x56\x79\x64\x79\xdf\x6d\x30\x2e\x78\xd1\x2f\x7a\x00\x46\xb2\xd3\x76\x97\xd4\x0b\x43\x4d\xdf\x64\x94\x54\x6a\x3c\x17\x45\xbf\xab\xd4\x20\x3b\x8f\x0f\x35\x09\xca\x7d\x23\xc9\xa5\x05\xf6\x78\xd0\x14\x2a\x7d\xd4\x35\x4e\x77\xda\xc7\x59\x7a\x61\xc4\xe8\x2f\x31\xbb\x59\xca\xbb\x49\x16\x46\xc5\xb4\x41\x08\x2a\xa6\x9f\x97\xad\x16\x8d\xbc\x9f\x36\xca\xac\x41\x9d\x8b\xb3\x74\xda\xde\x99\x47\xd8\x21\xd6\xdc\x51\xf5\x4e\x6b\x53\xee\x24\x17\xff\x11\xd9\xba\x67\xd8\x15\x6d\xa0\x17\x7f\x56\xc1\xf6\x37\xc1\xd1\x04\xc0\xfa\x3a\x62\x12\x2d\xef\xb9\x08\x41\x27\xc0\x1d\x8c\x74\xe6\xaa\x31\x5f\xab\xb1\x09\x30\x26\x43\xbe\xe4\x17\x6a\x97\x9f\x30\xf9\x93\x35\x9b\x75\x82\x45\x4f\xd1\xd8\xf3\xb2\x55\x1b\xc9\x09\xbe\x27\x7b\x85\xea\x20\x46\x42\x3c\x2f\x5b\x1d\x22\x26\xd7\x57\xa7\xe9\x1f\xf7\x5b\xbd\x12\xcc\x20\xe5\x18\xa9\x0d\x4a\xad\x32\xb1\x72\x01\xa0\x2b\x71\x69\x11\x79\x35\x04\x50\x76\xba\xe4\x62\x5d\xe4\x03\x7c\x3a\xac\xec\x0a\x14\xac\xf0\xfb\x36\xcc\xb8\x66\x3d\xd8\xc9\xc5\xdb\x67\x8f\x1e\x3d\x7a\x23\xa7\x5a\x98\x11\x9e\x52\xfc\xdc\xf2\x6c\x93\x2d\xb1\x09\x28\x06\x2c\x22\x11\xf9\xb7\xab\x95\x3b\xbc\xc3\x95\x5b\x52\xaf\xba\x8b\x37\xab\x0b\x43\xdf\xa8\x5a\x7c\xb2\xdb\x22\xde\xb6\xaa\x42\x43\x58\xaa\xd3\xc8\x97\xc7\xd8\xb5\x7a\xdb\xce\xa5\x91\xb8\x20\x20\x32\xe6\x43\x63\xb3\x95\xab\xe4\xc9\xc3\x1d\x4d\xb7\x78\x4c\xf7\x60\xb1\x9b\x14\x6d\xae\xf6\xd9\x99\x2c\x12\xfc\x16\x1e\x60\x03\x51\x80\x18\x86\xca\x8e\xdb\x0d\x4b\x54\x44\x55\x1b\x52\xff\xb1\xe7\x34\x2c\x78\x24\x5a\x71\x17\x22\xe5\xa1\xb7\x05\x2f\x44\x2f\xcc\x15\xac\x86\x94\x79\x42\xf2\x43\xd9\x95\x25\x4c\x9c\x63\x9c\x4a\x2a\x02\x1a\x83\x0c\x14\xf9\x6c\x2e\x04\x44\xd5\x64\x54\x83\x42\xb4\xfa\x10\x1a\xe1\xd6\xa6\xe8\xca\x7e\x14\x02\xe3\x23\xed\x5e\x91\x8d\x0b\xa9\x4b\xde\x84\x6c\x69\x91\x00\x54\x8e\x09\xc3\xcb\x63\x51\xd4\xdd\xee\x86\xbc\x33\xe8\x75\x44\xda\xe4\xb7\x67\xb9\xd2\x22\xeb\xca\x2e\x39\x3c\xa7\xb2\x23\x87\xeb\xfc\x68\x9d\x1f\xab\xf3\xeb\xe1\x5b\x4f\xad\x1b\x55\xb1\x32\x55\xc1\xe1\xfa\xd1\xc6\xf5\xf5\xa7\xba\x51\x07\x1f\xa9\xb1\x6b\xcc\x82\xc0\x2e\x7f\xef\x2e\x76\xf9\xdf\x79\x5b\xee\xf2\x8d\xd0\x3a\x03\xd4\x36\x91\x3b\x73\xad\x1f\x82\x89\x50\x03\x12\x93\xe1\xa6\x13\xf6\xe4\xa6\x6a\xac\x8a\xb6\x5c\x0e\x58\x23\x1e\xb6\xf2\xac\x30\x4b\x6d\xe2\x50\x8a\x4b\xdf\x79\x94\x22\xa2\x26\x80\xd8\x01\x05\xf9\xca\x4e\x38\xfd\x97\x53\x52\x74\x84\x5b\x4f\xbc\xc9\xfe\xcf\x03\xec\x98\x75\x26\xb4\x93\x6c\x83\xf8\x99\xce\x62\xb0\x92\x6c\xd1\xea\x02\x01\x91\xaf\x3c\x10\x7c\xa0\x36\xf2\x16\x2a\x83\xbd\x5c\x44\x84\x61\x88\x85\x4e\x30\x6c\x6a\x74\xda\x76\xd8\x6a\x81\x84\xbd\x46\x1a\x71\x9c\x43\x02\xb5\x0e\xe0\x2d\xc3\x7c\x4d\x94\xee\xcb\xe7\x16\x4f\x37\xf9\x0c\x1f\xdd\x05\x1d\xa5\x65\x0a\xe1\xb6\x31\x68\x88\x42\xba\xc0\xac\x36\x19\x4e\xb9\x71\x5c\x08\x8e\x23\x29\x5d\x7e\xbe\xb0\xe2\x53\x0d\xfe\x9b\x6c\x65\x72\x75\xd3\x17\xad\x6c\xe7\x73\x8b\xa7\x47\x36\xd1\xbc\xe8\xed\x83\xf6\x2f\x7a\x8f\xab\x3c\xee\x08\x73\x0f\xec\x63\x9f\x1d\x63\xd5\x47\xfc\x0f\x8f\x05\xff\x5c\x5b\xa9\x5c\x5d\x21\x15\x1c\x2d\xd8\xfd\x3c\x81\x34\xb4\xf8\x82\x72\xfa\x52\x71\x63\x32\x49\x80\x98\xa2\x62\x3a\x33\x6d\x68\x20\x85\x19\xa2\xf3\x9c\x9d\x7c\x82\x71\xde\xe0\xc1\x74\x47\x84\x49\xd9\x79\x7e\x20\x3f\x96\x88\xb5\x30\x31\xd7\xaf\x83\x8b\x71\x52\xb9\x6c\x1e\xe5\xab\x7d\x9b\x0c\xca\x0e\x82\xe5\xd2\x73\xd3\xd7\x05\x68\x18\xdd\xf4\x09\xf9\x61\xf9\x94\x13\x1f\x28\x85\xee\xac\xdb\xcb\x52\xc9\xa0\xf0\x21\xaa\xee\xdc\x64\x41\xe5\x61\x87\x7a\xfa\x79\x52\x34\xc9\xb1\x6c\xe2\x05\xeb\xf2\xa7\x4a\x67\xc7\xc8\xf1\x64\x80\x66\x44\x93\x59\xb3\x25\x92\x11\xdf\x54\x7a\xba\xc2\x1f\x2f\x44\xc9\xd8\xe7\x3c\x86\xab\xef\xff\x57\x2f\x78\xb9\xb7\x02\x7f\xaf\xb8\xc0\x44\x3a\x5c\x1b\xc9\x70\xf4\xfa\x54\xc7\x07\xcf\x82\xdd\xfe\x51\x1f\xd3\x3f\x31\x76\xf7\xc3\x99\x0a\x6a\xaa\x44\x81\xda\xe8\xff\x24\x0b\xbe\x7f\xcb\x27\x2a\x10\xbc\x94\x60\xa0\xd0\x41\x41\x9c\xda\xec\xfd\x8b\xde\x15\xbd\xa4\x9f\x87\xc9\x45\x0f\xac\x5a\xce\x16\x7c\xc7\x04\xfb\x7b\xa9\x49\x4b\x59\x35\x93\xe7\x9e\xff\x35\x2f\xf8\x84\x67\x7e\xbb\xeb\x06\xf6\x02\x11\x59\x49\x80\xa8\x66\x54\x4a\xda\xac\x8a\x24\x4b\xd7\xa0\x83\x93\xa2\xb9\xd6\xe4\x07\xc3\x24\x39\x38\x65\xaa\x9b\xf4\xfa\xab\x89\xe4\xd6\x70\x04\xcc\x2c\xcc\xf1\x48\xd5\x95\xe1\x51\xd6\xea\xcb\xe3\xb8\x40\xfb\x20\x00\xea\x4a\x79\x41\xd5\x40\xc9\x54\xb4\x1f\x8f\xd3\xf5\xac\x45\xea\x1d\x94\x6e\x5c\x91\x6b\xd8\x2a\x13\x2e\x79\x69\x98\x24\x5b\xe6\x3d\xfe\x9a\x47\x46\x3e\x40\xfd\x3f\x4f\x46\x3e\x25\x00\x10\x04\x33\x5c\xd6\x48\x19\x3a\x05\x94\xce\xc6\xbc\x0b\x01\x3e\x06\x97\x05\x14\xa1\x38\x5d\xeb\x27\x61\xde\xa4\x05\x31\xb3\x12\xa7\x05\x94\xdc\x2a\xec\xbc\xd7\xb8\x20\x7b\x62\x88\x1f\x5f\x91\x9f\x5c\x31\xb0\x20\x6a\x82\x20\xf7\xd3\x11\x0f\xee\x63\x07\xe4\xb2\x3c\x43\x8e\x41\x04\xdf\xaf\xfe\xde\x66\x14\xb0\x92\x26\xea\x59\x8f\xc9\x76\x19\x06\xd8\x89\xd3\x71\x51\xba\x12\xc9\x8f\xd6\x18\xd1\x92\xff\xe2\x5a\xf0\xf7\x1e\xfe\xad\xbe\x48\xbf\xec\xb8\x39\xbb\xc8\x11\x90\x2a\x1d\xea\xa3\x52\x49\xa9\x3c\x63\x3f\x95\x72\xc4\xca\xb4\xdc\x4b\xd3\x37\x03\xc1\xdd\x3a\x7d\x33\x59\x9f\x6e\x9d\x6e\x36\x9b\xd3\x37\xe3\x97\x6e\x5d\xa1\x98\x50\x4c\x0e\x28\x2b\x31\x7b\x9b\xed\x07\x3e\x69\x57\xdf\x59\xb9\x19\xf6\x52\x93\x9a\x6c\xd2\x17\x57\xa6\xac\x78\xd3\x24\xe1\x49\xb6\x21\xf2\x56\x58\xb8\x51\x7d\x5f\xf1\x18\x2b\x3a\x59\x8e\xb0\xa8\xfe\xe7\xbd\xe0\xfd\x9e\xf9\x8d\xa3\x92\x3f\x69\xbf\xaa\x38\x2c\x35\xee\x3a\x17\x17\x7a\x59\xf1\x48\xee\x82\x9b\xe1\xfb\xf2\xf3\x72\xb6\xe6\x0c\xa3\xac\x0e\x6a\xd3\x7d\x72\xbf\xc7\x0e\x28\x9a\xf6\xcb\x60\x4d\xfd\xad\xe9\x4c\xfd\x1e\xb5\xee\x5b\x7c\xd2\xa1\x38\x7d\x39\xa2\x0d\xe0\x4c\xf3\x6f\x8d\xb3\xef\xb1\x21\x49\x7a\x3d\x40\x8c\x3f\x19\x8a\x6e\x96\x2e\x89\xd2\xa0\x41\xbd\x63\x3c\x98\x1b\xbe\xbc\x29\x18\x14\xd7\xcf\x22\x14\x54\x4b\xe4\x65\x18\xa7\x88\x03\xd5\xdc\x12\x08\xea\x83\x63\xdf\x94\xc8\x45\xcf\x36\xe0\x54\xf3\xc1\xcc\x4c\xb5\x2c\xd8\xb6\xf0\x54\x94\x09\x30\x0a\x3d\xea\x76\x8d\x4e\x75\x73\x30\xbd\xdc\xb9\x6c\x14\xaa\xd3\x1a\x85\xea\xb6\xe0\xfa\xa5\xd1\xb8\x53\xda\x06\xb5\x2d\xee\xd4\x61\xc2\x9d\x9a\x0a\x9e\xb8\x4c\x01\x90\x66\x51\x47\x23\x4e\xbd\xf5\x4a\xf6\x6d\x9b\x98\x89\xfc\x17\x5f\x19\x1c\x7e\x7a\xb6\x3a\x64\x9a\x1c\xae\x2b\x4f\xe5\x7f\x9f\x57\xb5\x50\xfe\xed\x04\xfb\xbc\xed\x03\xf9\xd8\xae\x7d\x20\x2f\xa9\xd6\xf2\xb6\x4b\x6b\x3e\x6a\xd5\xbc\x7f\x4f\x39\x48\x7e\xcb\x63\x53\x3b\xb2\xb9\x81\x5f\xe4\xad\xa3\xfd\x22\x91\x28\x20\x89\x5b\x63\x41\xc3\x9c\x82\x25\xe9\xd1\xf5\x95\xb0\x8f\x78\x9a\x1e\x3f\xe0\x6d\x81\x45\x33\x6c\xcd\x0e\x5e\xee\xcd\x8e\xf0\x59\x3c\x16\x83\xd8\x73\xf9\xee\xb9\x7c\x1f\x2b\x97\xef\x43\xde\xfc\xf6\xde\xc6\xa7\xf8\x53\xda\xdb\x08\x3b\x49\xfb\x17\x9f\x9e\xad\x56\xbd\x8b\x9f\x5f\x63\xdf\x35\x32\xb4\x12\x50\x1b\x01\x32\xee\xbd\x6b\xc1\xed\xd6\xef\xca\x09\x6f\x81\xea\xd9\xc6\x01\xb4\x4a\x23\x2e\xbd\x2a\x5d\xe8\x32\xef\x2f\xb6\xd9\x17\xf6\xb3\xc7\x27\x59\x18\xdd\x16\x26\x52\x58\xcf\x67\x93\xb0\x28\xfc\xdf\xdc\x1f\xfc\xe2\xfe\xa1\xcb\xda\xf1\x06\x3f\x94\x84\x9d\x85\x11\x5f\xa5\xc7\x38\x14\x0e\xed\xea\x5d\x05\x2b\xaf\xe0\x27\x8d\x9a\x84\xd1\x68\x6e\x44\x9b\x95\x63\xac\x4d\xde\x5a\x94\xc2\xa4\xe3\x46\x51\x0e\x12\x61\x55\x5f\xad\x53\x49\x52\xab\xde\x2b\x9a\x49\xea\x1c\xf4\xb0\x40\xed\xc7\xc6\x7a\xdc\xc3\x78\x17\xb2\x80\x40\x7d\x55\xe7\x6e\x93\x9f\x4b\xc9\xc6\x42\x39\xd3\x64\xba\x24\x41\x1d\xc2\x79\xd3\xa8\x21\xa7\xd6\xcd\xcd\x6f\xc9\xef\x4b\x25\x9c\x8c\x85\xda\x4e\xa7\x06\xae\xca\xf4\x1e\x3c\x6d\xcd\xe8\x41\x42\xf5\x29\x4d\x76\x87\x8a\x2f\xdb\x72\x4a\xa9\x8c\x7d\x9d\x97\x59\x14\x0e\x74\x56\x9c\x41\x6c\x8f\xa4\x2c\x51\x76\xf2\xac\xbf\xd6\xa1\xe5\xca\xfa\x91\x8a\xa9\x45\x04\xc5\x35\x3c\xda\xeb\x60\xc4\x21\x26\x17\x42\xa1\x7f\x48\x81\x4b\x07\x26\xd6\xcd\xf9\xba\x0a\x23\xac\x93\x4d\x22\x2c\xd0\xed\x40\x64\xb7\x5d\xc7\x37\x94\x1d\x04\x12\x00\x71\x76\x0a\x5a\x43\x63\x24\x01\xf2\x6a\xf2\x19\xab\x17\x5b\x36\x8c\x3a\xb7\x3b\xca\x62\x4a\x0d\x2b\x5e\x4b\x25\x9b\xd0\x5f\xc3\x38\x2a\x51\x5a\x74\xb6\xfd\x6a\x22\x06\x36\x98\xe6\x91\x19\xa1\x2b\x48\x2f\x70\x86\x6b\x5c\x5d\xe0\xb3\x69\x4b\xe8\xe9\x92\x0d\x13\x3f\x43\xe4\x1f\xf7\xbb\x2a\xd6\x6a\x23\xee\x29\x30\x07\xbd\x6b\x61\xd1\x91\x07\x62\xca\x51\x9a\xa5\x95\x8f\x41\x07\x1c\xb9\xef\x4f\x6b\xec\x1a\x7b\x0b\xcf\x2d\xf8\x9f\xad\x05\x1f\xa9\x41\x2e\x95\x81\x67\xd0\x83\x90\xf2\xe4\x09\x6e\x37\xea\xfe\x80\x0e\x4a\x25\x4b\x79\xbb\x74\x22\xdd\xdc\xc2\x08\x20\x2a\x67\x66\x29\xdd\x25\x12\x3d\x91\x46\x05\xc2\x01\x98\xf4\x4b\xd0\x81\x13\x2c\x93\x2c\x57\xb1\xa1\x69\x55\x21\x60\x51\xfb\x03\x0d\x79\xe0\x8c\x4b\xcd\x56\x85\x4a\xb4\x5f\x6e\xe4\x3c\x23\x5d\x44\xda\xa7\xe1\x7e\x58\xe7\xb7\x28\xbd\x13\x34\x68\x1c\x86\x33\xcb\xaf\x1f\x63\xdf\x6e\xf7\x06\x73\x74\x17\xe5\xfa\x16\xfe\xff\xa8\x05\x5f\xaa\xd9\xac\x0e\x4d\x26\xd5\xf4\xcd\x5e\x12\x96\x52\x3b\x27\x47\x21\xf4\x50\xe5\x45\x5b\x68\x03\x95\x1d\x6d\x3a\x2b\xbf\xdf\x58\x75\xd6\x09\x82\xf6\x74\x66\xb5\xca\xbe\xd5\xdd\xa0\x63\x77\x6e\xa1\x78\xb8\xe7\x26\xb8\xb4\x60\x0b\xaa\x68\x23\xa9\x11\x25\x66\x1d\x74\xa1\x33\x0c\x1b\xee\xe7\x1b\xed\x38\x17\x1b\x61\x92\x4c\x6f\xa1\xc5\xff\xdc\x18\x3b\xa0\x30\x56\xfc\xb7\x8e\x05\xf7\x8f\x2d\x66\xfd\x52\xe8\xfd\xa4\x27\x35\x23\x57\xa6\x24\x65\x04\xb6\x38\x2f\x06\x68\xa1\xa5\x18\x48\x0b\x6a\x23\x2e\x34\x70\x0b\x30\x42\x9d\xf6\x8e\x29\xc9\x64\x9e\x25\x6b\x94\xda\xb7\x9a\x45\x66\x64\xa6\xb7\xe0\xb7\x54\xf6\x00\x94\x07\x82\xd4\xc6\xb2\x18\xae\x7b\x64\x95\xf3\x82\x05\x92\x5f\xeb\x66\x51\xdc\x1e\x48\x0e\xe3\x6e\x66\xac\xfb\x3d\x8b\x73\x38\xb7\x50\xe7\xf3\x59\x24\x16\xb2\xbc\x44\x59\xd4\xde\xd0\x4d\x3e\x67\x2d\x33\x1d\x51\xa7\xa8\x6b\xf3\x50\x97\xfc\x52\x61\xdb\x89\xc7\x36\x4c\x8c\xb1\xba\x36\xfd\xe0\x16\x95\x0e\xde\x59\x63\x8f\x2b\x44\x21\x25\x22\x05\x5e\xe3\xff\x58\x2d\x78\x49\x6d\x49\x6d\xff\x00\xd1\xb5\xe6\x16\xa8\xb4\xf6\x7c\x96\x8a\x00\xa2\xb3\x61\x62\xbb\x61\x0c\x91\xdb\x9c\x5a\xe1\x0a\x37\xa5\xc9\x31\xff\xd8\x10\x3c\x14\x8f\x8b\x46\x3c\xa8\xcc\x5f\xea\x4b\x72\x5d\xe5\x67\x5c\xcb\x0d\x5e\x79\x18\xe7\xe5\x49\xeb\x71\x5e\xf6\xc3\xa4\x11\xf7\x0a\x54\x71\xf0\x86\xa4\xf7\x0b\xb1\x2b\xb5\xff\xde\x7e\x76\x95\x85\x8a\x52\xf8\x1f\xdd\x1f\x3c\xb0\xdf\xbe\xa2\x7d\x75\x94\x78\xdf\x40\x37\xa5\xb2\x67\xab\x47\x91\xc8\x09\x67\xd0\x39\x4c\x41\xac\x53\xfd\x55\x47\x28\x95\xe6\x52\xcd\x52\xa2\x67\x21\xb7\xbe\x26\x56\xe2\xc1\x2d\x15\x27\x61\x4b\x7e\xd5\xe3\x0f\xcc\x7b\x94\xb9\x58\x84\x5d\x41\xb1\x0c\x85\xde\x18\xcb\xb8\x3d\xd1\x01\x78\x0b\xe0\x0c\x34\xf9\xb2\xd3\x79\x25\x1a\x42\x92\x60\x75\xeb\x86\x72\x83\x14\x25\x3f\x7c\x9c\x1e\x0e\x07\x95\xda\x56\xa7\x74\xbf\xc1\x87\xde\xc9\x0a\x91\x12\x69\x50\x5e\x0d\xc5\x64\x58\x1f\x25\x41\x45\x07\x5c\xad\x86\xad\xf3\xf2\x38\x1b\x81\x57\xe1\x32\xfc\x30\x49\xc0\x41\x03\xd0\x82\x92\x07\xa4\x99\x7e\xd9\x72\xa7\xa2\x35\x4a\x7d\x50\xd9\x88\x80\x1c\x5d\xae\xd2\x09\x0b\xa7\x09\x34\x49\x85\xa5\xa2\x72\x42\xc9\x48\x05\x21\xcf\xd1\x32\x22\x6a\xe2\xb2\x3a\x08\x94\x96\x4a\x8e\xa7\x81\x5d\xcf\xaf\x2b\xc2\x14\x0a\xdf\xe8\xfe\xa8\x5a\xed\x2d\x08\xf7\x09\x13\xad\xe2\xc6\x6d\x92\x47\x41\x68\xea\x42\x2a\x42\x21\xd2\x42\x9b\xf8\xc1\x38\x86\x5f\xb2\x21\x35\xaa\x90\x0e\x66\xca\x14\x6a\x13\x64\x82\xea\xf9\x68\x65\x69\x51\xe6\x21\xd2\x9a\x9b\x86\x7d\xa9\x48\x59\xab\x03\x02\xc3\xa4\xd9\xc7\xd4\x5b\x5b\x64\xd7\xb4\xa6\x8e\xb5\xca\x17\x22\xa9\xef\x22\x82\x12\xe0\x35\xea\x23\xb7\x0b\xe5\xe4\x20\x78\xa4\xdd\x07\x99\x67\x5d\xd7\xa8\xdf\xf4\xb4\xfa\xcd\x31\xf6\xad\x23\x49\xdf\x7f\x60\x2c\x78\xf3\xd8\xc8\x5b\x3a\x17\x3a\x6e\xbb\x5a\x16\x9a\x9a\x0a\x4c\xbe\x96\x67\x9e\x3e\x6d\xac\x43\x2f\xcd\x22\x01\x18\x50\x80\x91\x45\x47\x6e\x63\x23\x8e\x8c\x7a\x5e\x34\x79\x00\x3b\x2f\xe0\x0a\xc0\xb1\x70\x54\x76\xb4\x02\xcc\x2d\x20\x89\xaf\x67\x71\x84\x1e\xeb\x56\x96\x46\xbc\x93\xf5\x80\x2c\x1d\x09\x12\xf1\xc7\x23\x81\x42\x03\x20\x59\x10\xa7\x41\x45\x24\x8f\x8b\xf3\x05\xef\x65\xa5\xe6\x31\x71\x97\xc4\x9a\x48\xf7\xbe\xe8\xe5\x22\x8c\x00\x27\x2a\xa0\x73\x2e\xe0\xd9\x6a\xd1\xea\xe7\x5b\x75\x50\x12\x78\x2b\x94\x9c\xcc\xe9\xa3\x31\x26\xc3\x9c\x38\x0a\x11\x6c\xd5\xb5\x2c\x8b\x20\x9b\x10\xcd\xf9\x61\xd4\x30\x1d\xb0\xd7\xf0\xaf\xae\x66\xac\xa5\x8e\xdd\xc2\xff\xf2\xd5\xc1\x9b\xaf\xd6\xc7\x70\xc5\xad\x28\xbb\x84\x58\x1a\x02\xe4\x83\x78\x2d\x55\x42\x5a\x5c\xa8\x39\xc1\x03\x5b\x72\xa7\x7e\xd1\x87\xc9\xd0\x4f\x62\xc2\x69\x32\xc0\xc4\xa2\x30\xd5\xc8\x1c\x0e\xe7\xe9\x86\x29\xbc\x57\xc7\x18\x93\x06\xa4\x67\xf0\xc9\xb0\xe0\x3d\x29\x59\x23\x8e\x84\x63\xe9\x9d\xd2\xa5\x4d\x11\xac\x55\x6e\x6c\xe0\xdd\x7a\xbf\x25\x0a\x37\x4c\xe3\xb9\x40\x5f\x6f\x42\xf0\x96\x8d\xb8\x10\xa6\x5c\x90\x89\x2f\x02\xba\xb4\x80\x5b\xad\xbd\x64\xb9\xc1\x49\x29\xd2\x82\x2e\x2a\x3c\x05\xef\xa7\x89\xc2\xaa\x02\x9a\xb1\xb6\x39\x24\x5e\x81\xda\xa0\xde\xcd\x1c\x09\x86\x4f\xaa\x18\x35\x9d\x89\xaf\xbf\x4c\x59\xf7\x92\xd1\x00\xfe\xda\x70\xfb\x6e\xd3\x60\xbf\x72\x1b\x8f\x15\xaa\x19\xe0\x28\x96\xee\xa8\x94\x45\x22\x71\x8f\x9e\x3a\xc6\x8a\xa1\xf9\x86\x87\xab\xd9\xba\x98\x6a\x72\x5e\x2d\x92\x46\x22\x4e\x5d\xe1\x05\xa2\x5d\x6e\x32\x08\xa6\xa0\x08\x60\x48\xe7\x9e\xa1\xa4\x26\xe7\x4b\x84\x8f\x01\x1d\x29\x33\x6a\x82\x58\x72\xc8\x83\x8e\x08\x23\x98\x4a\x5a\x92\x80\x4f\xa6\x19\x27\xf9\x83\xcf\x2d\x4c\x29\x81\x13\x4d\x0c\xed\x7e\x82\x87\x3a\xe6\xa7\x1a\xbb\x9d\x7d\xb4\x10\xc8\x05\x01\x24\x4b\xea\x91\x72\x0b\xe8\x69\x44\x46\xb9\x0a\x49\xe0\xbb\x14\x56\x37\x3f\x63\x5d\x15\xdd\xa8\xe5\x19\xc9\xb5\xf6\xaa\xd5\x0d\x81\x8e\xa6\xc8\x11\x7a\xf8\x16\x1a\xbf\x2b\x2f\x0f\x77\xd2\x39\xd9\x9c\xbd\x04\x46\x77\xf2\x58\x6b\xdb\xa8\xe6\x20\x4a\x7d\xde\x62\xd8\x75\xed\x1e\x05\x59\xc8\x06\x68\x30\x8c\xe8\xee\x43\xf7\xa0\x38\xa0\x1b\xc6\x90\x66\x25\x79\x01\xc5\x35\x19\x3b\x67\x36\x59\x30\xb7\xb0\x7e\xfc\x64\x3f\x4c\x96\xca\xb0\x75\x3e\xd0\xca\xfb\x5a\x58\xda\xd0\x91\xd5\xa8\xd6\x24\xee\xc6\xc4\x19\xb2\x54\x28\xe9\x00\x09\xca\x0e\x57\x81\xcf\x86\xc5\x56\xc3\x15\x3b\xfa\xa6\xdc\x64\x9d\x2c\x89\xc0\x74\x74\x21\xee\xf6\xbb\xb0\xe0\x1b\x99\x96\xb5\x26\x23\x29\x58\x17\x72\x18\x52\xd5\x05\xdc\x53\x11\x53\xa9\xca\x48\xe4\x72\xe3\x2d\x77\x44\x21\x0f\x09\x9a\xc5\x56\x96\xe7\xa2\xe8\xc9\xe3\x81\x58\x1c\xed\x49\xe2\x66\x71\xef\xf6\xb0\x1b\x03\x01\x53\x97\x6f\xcb\xca\x8e\x35\xe1\xc8\x41\xcd\x53\x72\x83\xac\xc9\xf3\x23\x35\xfa\x3e\xdd\x1e\xd0\x49\x4e\x0d\x3d\xaa\x4a\xc5\x2e\x6b\xb7\xbd\x71\x1f\x7b\x02\x86\x67\xcd\x76\x44\xeb\xbc\xda\xb7\xfe\xbf\x8c\xab\xf0\xde\x6f\x8c\x8f\xb8\x6f\xc1\x89\xca\x79\xc0\x27\x20\xbe\x0b\x4e\x5f\x78\x44\xf9\x57\x95\xc1\x1a\xf7\x66\x66\xb3\x0e\x34\xb0\x92\xc6\x4a\x20\xc8\x43\x82\xc6\x68\xa9\xc9\x7e\x01\xb4\x0a\xc0\x37\x27\xe9\xd4\xd9\x5c\xd6\xa1\xb9\xdd\xb1\x88\x09\xd6\xda\x9c\x6b\xb1\x7a\x6a\x59\x9f\x9f\xfd\x32\xeb\x86\x25\x19\x6a\xf5\x69\x0a\x95\x5c\x48\x48\xc3\x73\xb9\x20\xa3\xa6\x63\xd9\x29\xa6\x40\x89\xd2\x81\x34\x28\x46\x65\x3c\x12\xa5\xc8\xbb\x71\x2a\x20\x28\x52\x01\x31\x67\x91\xa0\x30\x68\xa3\xa3\x99\x72\xc8\x8a\x43\x82\xed\x62\x4b\x36\xb3\x29\x77\xc5\xad\xad\x8d\x41\xa9\x90\x82\x6f\xf9\xf0\xf1\xd7\x34\xe3\x49\x96\xae\x49\xc9\x0c\x9b\x56\x86\x5e\x79\x20\xc3\x39\x37\xe8\x89\x29\x37\x44\xfc\x7f\xd4\xd8\xb7\x2a\xab\xbe\x2b\x49\x7f\xb9\x16\x7c\xa6\x36\x37\xea\x96\x45\x94\xda\xde\x05\xbb\xd9\x78\xf3\xb4\xec\xa9\x21\xe1\x40\xb4\xd6\xd9\xfa\x66\x86\xb3\xdc\x96\xad\xad\xeb\xa9\x14\xd7\x8c\xc0\x0a\xef\x17\xc3\x5f\x00\xeb\xae\x35\x09\x4e\xeb\x46\x22\xa7\xd7\x47\x4b\xf4\xee\x57\xeb\xfa\x29\xa9\xb7\xe4\x59\xaf\x87\x16\x9f\x74\x93\x77\xd0\xe9\x11\x46\x03\x17\x34\x49\x6f\x11\x3d\x06\x47\xfc\xfd\x91\x7d\xec\x1a\x97\xad\xf9\xff\x30\x1e\x7c\x63\x7c\x6e\xc1\x61\x75\x95\x38\x03\xc3\x9f\x1b\xa9\x55\x0c\x07\x75\x40\x25\x38\x68\xdc\x90\x25\x5b\x30\x8e\x0b\x38\x1c\x4c\x31\xee\x2d\x4e\x2d\x7d\xae\x20\xf6\xb1\x14\x7a\x60\x34\x06\x45\xd0\xe0\x46\xba\x44\x4a\xbc\x62\x09\x82\x1f\xa0\xe5\xa6\xf1\x24\x50\x51\xf0\xc0\xba\x1b\xf0\x49\x1d\x2a\x21\x4f\x35\x18\xfb\x54\x9d\x07\x0b\x20\x23\x59\xfd\x9b\x94\x07\x95\x7a\x24\x46\x2f\x9d\x75\x5c\x59\xf8\x6a\x44\x8d\x05\xca\x7d\xd5\xc6\xe5\x7b\x78\x4d\xbd\x49\x8f\xa3\xa0\x18\x50\x44\xe8\xae\xbe\x5c\xb7\xc4\x7b\xb9\x9f\xa7\x28\x19\xc2\x3a\xe5\x6c\x11\xa3\x50\x08\x68\x68\xee\x57\x06\x95\x11\x0e\x3e\x38\x7f\x77\xc8\x19\x8a\xad\x24\x2f\x07\x10\x78\x1f\xfb\x76\x0a\xb8\x9c\xcf\xca\x45\x49\xca\x33\x4a\xdb\xf2\x5f\xbb\x2f\xb8\x7f\xdf\x66\x77\x9d\xa8\xd3\xb0\x04\x3f\x58\xb8\x26\x34\x3c\x58\x24\xc2\x84\x6c\xc5\x23\x78\xaa\xda\xb5\xc4\x24\xa2\xb8\xc8\xc5\x5a\x98\x47\xd0\x8c\x8a\x40\x22\x0b\x1b\x6c\xb0\xe9\x34\x2b\x1b\xd6\x56\xeb\xe5\x71\x37\xcc\x07\xc0\xe0\x41\xa3\x90\x4d\x17\xb6\x50\xaf\xb9\x33\x02\x55\x2e\x95\x61\x29\xc5\xf4\x25\x51\x1e\x2c\xf8\x9d\x4a\xbc\xb7\xd8\x47\x2f\xcf\x7a\x21\x08\x51\x4b\x8b\x77\xf1\x93\xf3\x4b\x10\xc2\x90\x93\xcd\x28\x2e\x0b\xbe\x90\x45\x26\x52\xaf\xd7\xcf\x7b\x19\x66\x50\xf6\x84\xc8\x4d\x80\x1e\x76\xd0\x32\x44\x5b\x99\x6b\x38\x57\x6b\x22\x15\x90\x02\x61\x59\xd6\xd2\x48\xff\x5a\x02\x30\x03\x13\x01\xe9\x38\x00\x81\x0b\xf6\x72\xe5\x93\x53\xf6\x27\x5c\x83\xa4\xca\x9c\x5a\x59\x5a\xc4\x58\xa2\x25\x80\xd9\x0b\xb8\x90\x27\x1e\x31\x6f\x18\x50\xd9\x11\xdd\x42\x24\xeb\x24\x80\xc1\x19\x37\xb3\x26\x52\x8d\x9b\x2b\x1b\xe9\x77\x29\x78\xda\x1a\x97\x1a\x46\x64\x7d\xd4\xf6\xbc\x9c\xb2\x39\xfd\x66\x83\x93\x8c\xa1\x08\xdb\x02\x75\xf6\x7e\x97\xce\x6b\x15\xfc\xe3\x82\x96\xbc\x62\x9c\x7d\xa7\x92\x04\x6c\x21\x46\xc9\x4c\x85\xff\x8d\xb1\xe0\x8b\x63\x5b\x3e\xa2\x23\x5e\xe2\x36\x37\x17\xb7\x91\x3a\x88\xbc\x6c\x27\x2c\x6c\x2c\x57\xed\x72\x90\x42\xcb\xbc\x2f\x02\x0c\x72\x0c\x07\x16\x7f\x0c\x20\xc5\x32\xa8\x9e\x9f\xae\x5f\xca\x40\xc6\x09\x64\x5b\xba\xa7\x4d\xbe\xf5\xe8\x40\x9d\xb6\x5c\xb2\x3b\xe8\xb8\x63\x96\x6b\x25\x22\xb4\xdc\x59\x4a\x72\xb4\x8c\x06\xa6\xc0\x0a\xb8\x51\x1f\x76\x7b\xe2\xe9\xdb\xd4\x68\x66\x71\xe3\x58\xbe\x44\x8b\x16\x2e\x7a\xec\x5b\x2b\x8e\x10\x2a\x65\x72\xbf\xc7\x8e\xef\x00\x8c\x6b\xc4\xab\xc1\xed\x23\x5b\x74\x4b\x76\x3b\x66\x20\xf2\x01\x54\x1c\x24\xec\xbf\x1f\x60\xcc\xb0\x7c\xff\xab\x07\x82\xb7\x1d\xa0\xe3\x7d\x28\x70\xde\x3e\x5e\x50\x6a\x9b\x5b\x58\x3f\x56\x97\xff\x1e\x9f\xda\xc6\xe4\x75\x89\x27\xbb\xbb\x54\x43\xb6\x32\x97\xf8\xb5\x85\x5f\x91\xa8\x1b\xe9\xa8\xf2\x93\x46\x2b\x68\x5b\xc8\xc8\xc6\xd6\x66\xe5\x05\xc9\x2d\x86\x67\x74\x6c\x57\xbb\x54\xc5\x7b\xb0\x07\x75\x5b\x61\x54\x1f\x24\xdc\xea\xb8\x1c\x52\x33\x2e\xd1\xd2\x56\x99\x1d\x1d\x20\x0a\x93\x41\x95\xbc\x4f\xc8\x6f\xd0\x17\xe1\x5c\x89\x22\x8a\x74\x00\x73\xb6\x3a\x7d\xe5\xbb\xf2\x74\xd2\x92\x07\xda\x4a\x6d\x2c\x48\x68\xc4\x92\xcf\xad\x33\xcd\x92\x57\xda\xf6\xc6\x18\x69\xfa\x92\xb4\x42\x8e\x3e\xb9\xf6\x81\x3b\x8e\x6c\x74\xfc\x42\xa1\xcc\x3d\x3b\xb4\x28\xd5\x4d\x69\x2a\x8c\x7c\x91\x7c\x4c\x99\xc8\x02\xcd\x5f\x2e\x45\x44\xd9\xd2\x38\xc4\xd8\xf2\xe5\xda\x2e\xd4\x56\xda\xc2\x80\xa1\x77\xdb\x8e\xac\x18\x55\x51\x0d\xbc\x38\x96\x77\xec\xe1\xb2\x69\xec\xda\xce\xf0\xff\xd4\xd8\x95\x4a\x91\x9f\x5b\x28\xfc\xaf\xd6\x82\x8f\xd5\xac\x0b\x5b\xd8\xd5\x25\x29\xe3\x59\x2f\xd5\x9c\xa2\xb2\xed\x14\x88\x63\x81\xb5\x10\x7a\x26\xce\xa2\xaa\x23\x3b\x36\x22\x12\x26\xd0\x61\x8f\x83\x37\xc2\x03\x3e\x89\xb1\x77\x98\x6c\xd2\x93\xa2\x8a\xaa\x70\x0b\x06\x3a\xdc\x17\xa1\xf9\x5c\x98\xe7\xf1\x3a\xe2\xcf\xab\x92\x1b\x1a\xc7\x79\x6e\xa1\xc9\xf9\x0c\xe4\x0e\x67\xa9\xca\xdd\x03\x73\x98\xb2\x16\xb8\xf6\x01\x92\x98\xa8\x8f\xbd\x30\xd7\x49\xbe\x96\x84\x43\x50\x6a\x5b\xb8\xa3\x3e\x58\x63\x57\x09\x8b\x74\xfd\x07\x6a\xc1\x9b\x6b\xf6\x15\x15\x08\xa8\xfb\xa1\x1d\xce\xd8\x05\x93\xd8\xd1\x15\x92\x1d\xc4\x45\x57\x87\xb5\x94\xfd\x1c\xc1\x69\x53\x1e\x26\x71\x38\xc2\x28\x81\x07\x46\x08\xe2\xea\xec\xfc\xcc\x99\x53\x24\xb4\x4e\x35\xf9\x7c\x66\xec\xcb\xc6\x90\xba\x9e\x25\xeb\x60\x45\x31\x90\xe9\x3a\xc3\x82\x2f\xde\x3e\xdb\x38\x7c\xf8\xc8\x51\x80\x1f\x86\xa4\x8d\x49\x65\x5e\x2b\xb3\x2c\x29\x9a\xb1\x28\xdb\xcd\x2c\x5f\x9b\xee\x94\xdd\x64\x3a\x6f\xb7\xe4\xd3\x53\x94\x4c\x4d\x7e\x83\x15\x49\x98\x2b\xe4\x33\x08\xec\x7d\xed\xaa\xc1\x6f\xd4\x15\x1b\x5e\x35\x16\xbc\x0d\x50\xe7\x12\xa7\x6a\x83\x5e\x21\x95\x0a\xa3\x94\x5b\x4d\x6d\x8f\x91\x19\xf0\x10\x6b\xb2\xfa\x0e\x24\x0c\x68\x40\x72\xd4\x87\xbc\x23\xec\x3b\x86\x77\x70\x37\xec\x35\xce\x8b\x41\xe1\x7f\x8b\xef\x37\x60\xd0\xac\x01\x48\xeb\x59\x2b\x4b\xd8\x36\xc9\x87\xdd\xb0\xb7\x33\x2c\x0a\x68\xf7\x12\xb0\x28\x5e\x75\x80\xf2\x26\x5e\x7c\x20\xf8\xfb\xfd\xc0\x9c\xb5\xd5\x8c\xea\x4e\x5a\xf1\x9a\xb0\xc3\x60\x79\xdc\xe0\x0f\x7d\xae\x34\xe9\xd0\xb2\x31\x4b\x5d\x27\xc3\x0e\x9d\x1a\x81\x7e\x2e\xd0\x32\x30\x94\xf4\x25\x8f\xac\xb6\x0f\x19\xd6\x06\xfb\xc5\xda\xf7\x94\x65\x6d\x19\x89\xdc\xd8\x06\x3d\x4e\xcd\xaa\x75\xf1\xa9\x2c\x47\x91\x38\x2c\x47\xb8\x2a\x56\x07\x24\xd5\x90\xe7\xbd\xdf\xb2\xe2\xe4\x2d\x35\x08\x03\xc0\x87\xb4\x21\x55\x7e\x58\x4a\x4d\xc6\xcc\x2f\x95\x08\x72\x6c\x39\xae\x27\x05\x15\x68\x5c\xea\x76\xa4\x38\xb9\x99\x74\x12\x65\x68\xa5\x46\x9b\x67\xf2\x90\x02\x0a\x25\x67\xb1\xda\x6e\xca\x4f\xe2\x12\x04\x7c\xb5\x1f\x27\x18\x80\xa8\x67\x5e\x85\x68\xe8\xc9\x07\xbb\xaa\x64\xb8\xc0\xc1\xa8\x0c\x12\xb8\x11\xc9\xea\x96\x19\x47\x86\xd5\xc5\x8a\x4b\x03\x8c\x75\x66\xad\xed\x4f\x6b\x73\x38\x98\x4d\x28\x4a\xdb\x8e\x0b\x73\x15\xa7\x49\x79\x38\xeb\x98\x41\x5d\xfb\x11\x53\x31\x20\x38\x6e\xea\x32\x3b\xe8\x30\x32\x64\xc7\xca\x49\x6a\x9b\x5b\x9c\xd0\x41\xe1\x78\xbc\x96\x04\xb8\xc6\xed\xea\x94\x05\x8f\x32\x14\x07\x95\x58\xe5\x78\x4d\x8d\x60\xf5\x70\x72\x3a\x22\x8e\x38\x5d\x6b\xe8\xe7\x14\xb7\x03\xa1\xd0\xc9\x63\x3c\xc0\x26\xf4\x34\xf8\xbf\x73\x20\xf8\xd8\x01\x87\x44\x29\xa6\xb5\x52\xef\x42\xcd\x08\xe9\x28\x5b\x78\xe5\xf7\x9c\xf2\xbb\x73\xca\xaf\x26\x61\x7a\xfe\x5f\x87\x53\x7e\xcf\x27\xff\x58\xfb\xe4\x1f\xa3\x58\xcd\x37\x3f\x81\x35\xb6\xc8\x7c\x6c\xea\x94\xe1\x67\xf6\xc3\xb4\x8c\xcb\x81\xff\x0d\x3f\xf8\x92\xaf\x7e\xa1\x62\x03\xc9\x28\x0d\x5c\xe1\x11\xf9\x6a\x84\x03\x04\x06\x39\xf2\x65\x14\x98\xab\x96\x42\x04\x54\x37\xcc\x8b\x4e\x98\xc8\x21\xf4\x53\xf3\x43\x32\x91\xa7\x2f\x9d\x9d\x87\x05\x7d\xf6\xcc\x99\xd3\xa0\x57\x86\x91\x4a\xbe\xcd\xf8\x12\x8c\x61\x12\x85\xe0\x99\x62\x2e\x2d\x8f\x1f\x93\xbf\x20\xba\x33\xcb\x0b\x50\x66\x4d\xce\x3d\xf6\x08\x71\x92\x78\x5c\x9c\x60\xec\xe6\xfb\x68\x1c\xb7\x72\xfa\xef\xc4\x89\x5b\xf8\xcd\xc8\x29\xe7\xa1\xd7\xb7\xde\x5c\xf4\xdb\xed\xf8\xc2\xad\x8c\xf3\xc9\xf9\x4c\x01\xcb\xa8\xab\xca\xc2\x48\x91\x80\x3a\x42\x22\x08\x70\x63\xc7\x29\xbf\x99\x20\x97\x96\xe6\x6e\x6d\x4e\xb1\x9b\xa3\x78\x2d\x2e\xf5\xf7\xe8\x93\x87\xf8\x0b\xf8\x61\xfe\x02\xde\x6c\x36\xf9\x0b\xf8\x8d\x1c\x9f\x2a\x6e\x75\x9f\x52\xef\xbe\x40\xfd\xa5\x1f\xbb\x19\xa7\x78\xe4\xf3\x85\x79\xa1\xb8\xb5\x39\xea\x9a\xfc\xb0\x69\x4a\x8e\xde\xee\x1f\x34\x15\x3c\x25\xe0\x2f\xe0\x41\x23\xa8\xcc\x8e\xfe\x94\xea\xc0\x0b\xa8\x01\x7d\x41\xcf\x94\xdb\xb5\xd5\x38\x0d\xf3\xc1\xd2\x1c\x76\x04\x67\xe8\xd4\x05\x84\x54\xb1\xaf\xc9\x27\xac\x87\xad\x26\x9e\x11\xf3\x17\xf0\x33\xf2\x9f\x3b\xe4\x3f\xcb\xf2\x9f\x05\xf9\xcf\xa9\x58\xae\x15\x39\x51\x29\xdf\x6a\x09\x8f\xac\xac\xcd\xfb\x69\x5c\x16\x37\xf1\x25\x21\x70\xa7\x9d\x98\x9e\xee\x75\x06\x45\xdc\x2a\x9a\x69\x5c\x94\xcd\xb5\x6c\x7d\xba\xd5\xef\x4f\x9f\x93\xcf\x4d\xe3\x97\x9b\x52\xe7\x92\x6b\x67\xfa\x64\xfa\xd1\x95\x13\x23\x67\xe7\xbc\xec\x8f\xec\x8e\xec\x8d\xec\x8c\xec\x8b\x4b\x36\x87\x0f\x1d\x39\xc6\x6f\xe1\x87\x9f\x11\x83\x55\xea\xf0\xa1\x43\x87\xe4\xcf\xf3\x37\xf1\x39\x1e\xc5\x51\x7a\xb0\xe4\xad\x4e\x96\x51\xed\xf5\x56\xd8\x8b\x4b\x4d\xb9\x4d\xd3\x01\x33\x51\xb0\x36\x62\x68\x55\x5e\xc0\x83\x53\xd5\x8b\x8c\xcd\x03\xce\x0d\x98\x15\x80\x59\xd3\x29\x5b\x02\x80\x97\xa0\x36\x61\x83\x14\x26\x7f\x2b\xcd\xb8\xda\x25\x94\x23\xaa\xab\xdc\x29\x5c\xb4\x35\x90\x0d\x49\xa6\x3d\xf2\xdc\xe3\x47\x1b\x87\x25\xe1\x77\xc3\xb5\x34\x2e\xfb\x11\xd4\x4a\xc9\x31\xd1\xb2\xc4\xc0\x1e\x80\x0a\x83\xc7\x8f\x6a\x44\xb2\x5e\x82\xe5\x64\xe7\x09\x35\x2c\x09\xf3\x35\xb0\x20\xa9\x62\x05\xa2\x25\x65\x04\x6d\x2d\x0f\xc1\x45\x9c\xe5\x52\x9c\x04\x14\xbb\x7e\xaf\xc9\x27\x4f\x35\xd7\x9a\x27\xf8\xa1\xe6\xe1\x2e\x69\xf2\xfa\xa6\xe4\x18\x87\xbb\xcd\x29\xe4\xd8\x6a\xdf\x5e\x28\x11\x02\x4f\xa1\x4f\x60\x34\x6c\xdc\xe6\x1b\x42\x9d\x3f\x56\x4f\x8a\x6e\x08\x88\x7b\x34\x23\xb1\x90\x3c\x86\x40\xf6\x6c\x9e\xd8\x0b\xf3\x42\x9d\xfb\x2a\x5d\xd5\x48\x43\xb9\x20\xe0\xa6\xd2\x2a\x10\x84\xdb\x04\x67\x28\xaa\x1b\xc7\x80\x2a\xc4\x8f\x11\xe8\xf2\xe1\x10\x2a\x38\xc0\xf1\x82\xe9\x6d\x06\x54\xa4\xc9\xd8\x6d\x08\xfc\xa5\xae\xc1\x87\x75\xd7\x74\x81\x9a\x3e\xc8\x68\x41\x2b\x4c\xb3\x14\x0a\x26\xca\x55\x57\xc1\xd3\x58\x5b\x16\x08\x56\x51\xda\x34\x75\x4f\x4b\x73\xd1\xf3\xfa\x60\x3a\xee\xf7\xe4\xbc\x44\xd9\x46\xca\x27\x29\x43\xce\x98\xf5\x90\x8b\x03\x60\x1c\x04\x71\x44\x82\xfe\x8e\x53\x7e\x46\x76\xa9\x28\xc2\x29\xac\xa2\x23\xbf\x76\x82\x71\x1e\x92\xf5\x44\x2e\x36\xa5\xe3\x25\x59\x51\x32\xce\x57\xe1\x4e\x3b\x0f\x5b\xb4\xa7\x91\x63\xe9\x2e\x89\x6e\x5c\x96\x22\x62\x9c\xb7\x14\x1c\x3b\x51\xf4\xa4\x5c\x39\xe8\xff\x14\xa6\xba\xe0\x8a\xca\x3f\x7a\x19\xd6\xa1\x6c\xe2\x49\x11\xaf\x99\x22\x3e\x19\xb6\x67\xcb\x90\x44\xef\x50\x57\x7e\x0d\xca\x8a\x37\x19\x3b\x45\xe0\xe5\xb2\xf7\x87\x9b\xd7\x5b\x4e\x79\x0d\xf5\x02\x60\x72\xd7\x1f\x3a\xd4\x0d\xf0\x99\x3b\xe2\xcd\x9f\x3a\x7a\xfc\x4c\x1c\xc8\xbd\xaa\x78\x86\xfc\xf0\x7d\xce\x02\xce\x9f\xba\xeb\xd4\x22\xda\x94\x50\x84\x48\x06\x55\x40\xbd\x90\xb7\x93\x0c\x25\x0f\x3c\x97\xd5\x19\xbc\x4c\x6a\xb4\x6c\x75\xa3\x93\x25\x82\x5b\xb5\x05\xc0\xa2\x20\x72\xb9\xcf\x9a\xb2\x0b\x69\xc3\x10\x88\x8d\xb5\x5e\x94\xf2\x5f\x20\x72\x98\xcd\x4c\x0a\x38\xd0\x26\x02\xe9\x6d\x08\x2c\x8d\x06\x00\xa8\x92\xd1\x99\xc8\xf3\x06\x2d\x13\x6d\xb8\x38\xe7\x2e\x0d\x36\xf9\xe4\x52\xa6\x2a\x7f\xa0\x8b\xd9\xbe\x5d\x47\x6a\x93\x8c\x32\x8a\xdb\xed\xe6\x94\xb2\x57\xab\x53\x1d\xe6\x04\x76\x34\x24\xdc\x9c\x07\xa8\x4a\xf9\x68\xdc\xea\x27\xba\xf6\x56\x09\xa6\x52\x85\x4f\x28\xe9\x36\x93\xbd\xcc\x63\x2c\x4c\x98\x41\x59\x3a\xb4\x4b\xaa\x24\x84\x4e\x98\x46\x09\x96\xf4\x88\xb4\x77\xa4\x93\xf5\x4c\x7d\x2b\x55\x78\x0d\xe3\xb7\x75\xc2\x4a\x96\x23\x18\x88\xd4\x38\x30\xb0\x1b\x93\x77\x71\xde\x2b\x89\xab\xb6\x6c\xf6\xa5\x71\xa7\x64\x70\xd8\x2f\xb3\xa2\x05\xa2\x51\x73\xfd\xc8\xaa\x28\xc3\x23\x06\x8f\x22\x8f\x5b\x54\x8e\xf3\x3f\x8e\x07\xaf\xf0\x86\xaf\x5b\x41\x03\x54\xdd\x5f\xb6\x25\x30\xeb\xba\x0b\x0f\x2a\xd5\x02\xc5\x55\x23\x7d\x2a\xe3\xc9\x64\xdb\x06\x59\xec\xc4\x65\xd1\xe8\x89\xbc\x41\x61\xea\x19\x02\xfb\x63\x79\x62\x7a\x65\xaa\x79\xd1\x7b\x9c\x52\x58\x22\xec\xd4\x45\xef\x0a\xc4\xd5\xbb\xe8\x5d\x81\xdf\x75\x72\xbd\xff\xac\xc6\xee\x61\xd5\x77\xfc\xa7\xb3\x3b\x37\x35\xfa\x8d\x9a\x97\xd9\x3c\x2b\x0a\xc2\x59\xa8\x94\xfe\x64\x2f\xf1\x18\x7d\xd8\x1f\x6c\x0e\x02\x32\xb2\x59\x9c\xd0\x39\x9d\xd8\x1d\xdc\x40\x33\xa7\x73\xbd\x49\xd7\x44\xe0\x40\xba\x49\x95\xbf\x31\xa9\x92\x8c\x5a\xec\x85\x8c\xa6\xc1\x2f\xd8\x8d\x97\xd1\x89\x65\x78\x39\x38\x4e\x9f\x72\x23\x16\xe9\x22\x86\xb2\xa8\xa8\x09\x0c\xb8\xc3\x3e\xb1\xf7\x8c\xb1\xc9\x11\x86\xd3\x99\xe7\xf7\x73\x71\x7b\x9c\x08\xa7\xc2\xeb\x8b\xc6\x82\x67\xe9\x3b\x4e\x5d\xd7\x94\xc3\x75\x0e\x37\x94\x82\x83\x55\xee\xac\x6a\x7a\x30\xf2\xd5\x38\x8d\x4c\x01\xbc\x12\xab\x6c\x34\x2f\x7a\x0c\x8b\xad\x4a\x2d\xe8\xa2\x37\x51\x74\xc2\x1c\x0a\x4f\x3a\x44\x71\x7f\xed\x31\x2c\xd5\xf8\x5c\x66\x75\xd1\x5f\x08\x66\x6d\x48\x2b\xaa\x14\x8b\xe1\xcc\xca\x55\x8d\x73\xa2\x0a\x7b\xce\xb4\x5a\x30\xea\x79\x45\x03\xcf\x10\x03\x7b\x9b\x3f\x99\x99\x51\xfb\xdf\x16\xb0\x25\xf9\x83\xeb\x39\xa0\xa7\x5e\x72\x0d\xab\x8f\x58\xb0\x05\x5d\x57\x17\x07\x30\x9b\x84\x71\x17\xb0\x18\x3e\x7f\x75\xf0\x22\x6f\xd3\xdb\x15\x68\x06\xf2\xdd\x58\x08\x0d\x72\x6c\x34\x80\x48\xa0\xe3\x52\x19\x20\x37\xa4\xce\x47\x5c\x45\x12\x97\x4e\x79\xd5\xe5\x82\x4c\x3b\xce\x32\xfe\xfa\x55\xec\x53\x1e\xbb\x12\xf5\xb3\x33\x59\x24\x0a\xff\x57\xbc\xe0\xad\xde\x8c\xb9\xe0\xfa\xfb\x15\x1e\x0d\x61\x46\x76\xe1\x09\xf0\x13\x3a\xb5\x6f\xa5\x48\x79\xe9\x1a\x34\x8e\x6e\xda\x54\x26\x6e\x60\xab\xc5\x93\x28\xdd\x17\x3e\xd7\x38\xbc\x85\x0b\xea\x77\xc7\x19\x8b\xc2\x32\xa4\x7d\xf2\xb1\x71\x76\x62\x5b\xdf\xc4\xf2\xa0\x27\xa2\x91\x35\x89\x5f\x36\x5e\xc9\xef\xb7\x32\xdd\x28\xb1\x9c\xdc\xaa\x27\xf8\x75\x7c\x26\xe5\xe2\x42\x5c\x00\xfd\xd2\x6e\x4d\xc3\x5e\xd1\xc9\x4a\xcd\xaf\x0b\xba\xd0\x54\xb5\x4f\x09\x72\xc4\x7d\x7c\xaa\xd2\xd8\xc2\x5d\xb3\x7c\x72\x24\xe1\x54\x9f\xac\x02\x0c\xa2\x89\x5e\x1d\x68\x05\x87\xea\x7e\xbd\xac\xd7\xa7\xba\x5e\x93\x33\x49\xaf\x13\x4e\xf1\xb9\x94\x70\x69\xe9\x34\x1e\x6a\x67\xa0\x4f\x55\xdd\x5a\xb5\x31\x0c\x61\x98\x49\x07\xd8\xbf\x93\x7a\x15\xdc\xa0\x7c\x15\xd2\xaf\xca\x56\xaa\xc8\xfd\x9e\xae\x7b\x0c\x72\xbd\x6d\xd5\xb6\x80\xb5\x21\x42\xca\xca\x1e\x37\x76\x23\xe8\x8d\xc2\x8b\x53\x72\x3d\xda\xc8\xa5\x5e\x24\x36\x14\x89\x3a\xc9\x9a\x50\x73\x0f\xdc\x11\xed\xcd\xdb\x6b\xb2\x3f\xf2\xd8\x84\x0e\xd3\xf2\x3f\xbb\x93\x22\xd7\x0a\x50\x8f\xe2\x28\x61\xfe\x83\x9f\xf0\x16\x75\xb0\x57\x25\xa4\xb5\x1b\xa7\xe0\xce\xb7\x21\x23\x1f\xe9\x6d\xa5\xbf\x25\x8f\x60\x93\xf7\xbe\xc1\xbe\xef\x32\x90\xb8\x4e\x87\xab\x22\x51\xe5\x97\x82\xa3\x33\x94\x5b\x7b\x5f\x5f\xe4\x03\x48\x84\xa3\xc1\xc0\x09\xa1\x62\xf1\x80\x5d\xc9\x83\x48\x9e\xa7\xec\x67\x3c\x76\x2d\x75\x17\x70\x63\x80\x01\x43\xfd\x86\x79\xbb\xc8\xb0\xf5\x44\x25\xd8\x17\x20\x66\xe2\xee\xc3\x39\x47\x00\x2a\x22\x79\x8e\x61\x32\x6f\xd1\x05\x99\xa1\x3c\xf8\x6b\xbc\xe0\x7e\xcf\xfc\xd6\x31\x75\x1b\x20\x84\x92\x32\x49\x0b\x19\x6f\xda\xe3\xbb\x54\x94\xab\x55\xd8\x5b\x0a\xcf\x5d\x2c\xef\x06\xca\x25\x1a\xf2\xa9\x76\x53\x9c\xe2\xab\x40\xb3\x8e\x98\xfa\x3c\xa7\x5e\xf4\x73\x02\xaa\x02\x6f\x3b\xe0\x69\xca\x6d\xff\x3b\x8a\x00\x55\x36\x03\xc9\xc2\xda\x10\x8d\x7d\xb5\xbf\xf5\xf1\x09\xdb\x5c\x39\x12\x37\xfa\xf6\x24\xdb\x58\x02\xd4\xab\xd3\x71\x51\xfa\x6f\x98\x08\xa6\xdd\x4b\x6e\x30\x86\xb9\xa7\x7d\x81\x17\xbd\x7d\x71\x29\xba\xee\xd9\xf5\xbb\xfb\xe5\xb6\x34\x00\x72\x9f\xf3\xd8\x4d\x97\x43\xb6\x71\x81\xf0\x71\x3f\xe6\xad\xa8\xb6\x56\x34\x68\xa3\x02\x94\x83\xce\x3d\xfa\x60\x72\x7b\x78\x65\xbb\xc4\x2b\x2b\x19\x52\x8e\x7f\x3e\xf8\x3f\x56\xe0\xaf\x95\xcd\xa8\xad\x70\x42\x5b\x6e\x62\x37\x6e\x8e\x4f\xb9\x0d\x99\xef\xa1\xa4\xed\x06\x25\xad\xbf\x3d\x4a\xda\xa2\xbf\xa0\x51\xd2\xec\xa5\x30\x28\xd3\x95\xe2\x4c\x2e\xbf\x71\x71\xd4\x60\xf5\x18\xfb\x9d\xab\xd9\x93\x37\xaf\x76\x88\x4a\x34\x00\xde\x3f\x78\x75\xf0\xd7\x9e\x75\xa1\x7a\x90\xe7\xfd\x04\xd0\x5f\x7a\x3d\x1d\xcb\x08\xc5\xbe\x11\xb0\x37\xb4\x44\x0c\xd0\xc4\x68\x5f\xe5\x22\xc1\xd8\x55\xc4\x66\xb0\xbc\xdd\x73\x69\x2b\xeb\x22\xaf\x56\x28\xf9\xb9\x42\x99\x10\x92\x94\x74\x4c\x76\x88\x2d\x02\x26\x04\xa5\xa6\x98\x74\x23\xe0\xfa\xd4\xf8\x70\xf1\x7f\x61\xe0\x6a\xac\xa1\xc1\x99\xe4\x22\xbf\xbd\xfb\x4a\xf6\x55\xc6\xc6\xe5\x87\xfc\x2f\xb1\xe0\x53\xec\xce\xac\xd0\xe6\x2b\xa8\x25\xc0\xef\xeb\x87\x09\xc9\x50\x58\xfb\x5f\xa9\x66\x52\x0c\x83\x59\x85\x7e\x92\xef\xb4\xad\x22\x4c\x16\x6f\x9f\xe5\x47\x6f\xbc\xe1\x78\x93\x93\xa5\x4d\xea\x32\x52\xb9\x41\xf4\xd5\xf5\x38\xac\x54\x2e\x0d\x64\x2b\x81\x13\xae\x76\x6e\x71\xce\x6e\x15\xeb\x26\x40\xab\x27\xf8\xe1\xa6\x13\x89\x47\x05\x26\x9b\x9c\xb0\x29\x93\x81\x65\x2d\xd1\x83\x37\xa8\x5e\x2a\x28\x81\x71\xae\xfc\xfc\x64\x72\x02\xed\x8d\x3a\x40\xc5\x7a\xa9\x99\x26\x3b\x82\x86\xcf\x95\x13\x2b\x3c\x12\x98\xe9\x9a\x1b\x67\xaa\x24\x05\x58\x74\x81\x16\x2a\x0c\xf5\xaa\xf6\x90\x4d\x70\xab\x93\xa8\xa7\xe3\x90\x2d\xf3\x8e\x12\x15\x5a\xb1\x7c\xe8\xc4\x0d\x87\x80\x22\xe4\xbe\x95\xbc\x59\x36\x71\xe2\xd8\xb1\xa3\xfa\x62\xd1\x64\xb7\x51\x7d\xe4\x42\x20\xc6\x01\x78\xc6\x5d\xab\xfb\x66\xc4\xa7\x0a\x71\x82\xdd\xbb\x28\x8d\x59\x81\x6a\x5c\xc0\xfc\x54\xc9\x48\x09\xfa\x1d\x22\x98\xa1\xd2\xb4\x6a\x28\x14\x8c\x12\x26\x26\xbb\xcd\x91\xd9\xcd\x06\x1a\xfa\x06\x43\x72\x54\x09\x56\xe4\xa1\x08\x8c\x2b\x3c\x74\x68\x52\x99\x19\xc1\x42\x03\xc1\x4e\x84\xf5\x9b\x95\xc3\xf4\x4a\xe1\x85\x41\x3b\xcb\x9a\xab\x61\xde\x6c\x65\xdd\x00\x62\x0d\x82\x8d\x38\x89\x5a\x61\x1e\x05\xf5\xcd\x3e\xa4\xf1\x01\xc9\x20\x4f\xc9\x58\xea\x45\x12\x95\xe9\x03\xd7\x35\xe5\x27\xa0\x79\xa4\x1e\xfd\x58\xab\x13\xe6\x61\x4b\x92\xd0\xc1\xeb\x0e\xa2\x26\x15\xf6\x7a\x22\xcc\xe5\xfe\x89\xcb\x42\x24\x6d\x15\xad\x83\xfc\xe1\xe4\xfc\x12\xb5\x8d\x07\x34\x16\x09\x40\x7a\x56\x9d\x80\xfb\x4d\xfe\xec\xac\x2f\xe7\x4d\x12\x1e\xc2\x4d\x55\x7b\x67\x3e\x81\xfd\x84\x99\xbe\xe5\x16\x1e\x5c\x17\x4c\x61\x01\x01\xa0\x0f\x65\x58\x1e\x45\x23\xf0\x0a\xf9\xfc\xd3\xca\x0e\xdf\x08\x07\xb8\x47\xdb\x5c\xb1\x14\x5a\x3e\x27\x36\xde\xaa\xa3\x41\xa5\x59\x55\x76\x06\xd0\x3a\x2c\x54\x47\x84\x11\x6e\x33\x21\x79\x91\xe4\x7e\xb2\xc9\x26\x3f\xe2\x34\x6f\x46\xa8\xf3\xf8\x2e\xf3\x33\x65\x66\xbe\x04\x14\x8a\xae\x99\x49\x1d\x07\x6f\x56\x04\xe6\x72\x4a\xb1\x0b\x3d\xc5\x50\x63\xd6\x16\x5c\x4e\xb1\x71\xf9\x25\xff\x96\xcd\x85\xda\xca\x19\x75\xe7\xf2\xf2\x42\x75\x43\xb0\x5f\xbe\xda\x81\x04\x77\xde\x40\xb1\x85\x5e\xf1\x5f\x77\x75\x70\x7f\xcd\xe2\x26\x21\x6f\x49\x95\x5b\x07\xef\xe1\xb1\xa6\x52\xab\xb2\x0d\x1e\xa7\xab\x59\xbf\x02\x18\x54\x66\x58\xd6\xa9\x12\x85\x67\x71\xf9\x50\x1d\x42\x4d\x3e\x63\xb8\x17\x6d\x58\x2b\x85\xb0\xcc\xc0\x3c\x6a\xd2\x75\x94\x35\x20\x19\x34\xe0\x13\x90\x12\xd1\xcf\x93\xa2\xee\xc0\xf7\x29\x96\x51\xd7\x1b\x5a\xf0\xa5\xa5\xd3\x75\x9e\xb5\xa5\xac\x04\xbb\x11\xb9\x89\x8a\x95\x91\x6b\x29\xd7\x48\x94\x2d\xf7\xa8\xfb\x0f\x6c\x4f\xfe\xde\xc3\x0b\x7e\xec\x4a\xc4\xfe\xab\x84\x47\xff\x43\x05\x8f\xfe\xbb\xde\x16\xfa\xd5\xe6\x7c\x0a\xc0\xd2\xff\x03\x80\xa5\x2b\xa9\x53\xd9\xa4\x75\xbd\x02\x4b\x98\x78\xd4\x11\xc6\xff\xd4\xc0\xa4\x7f\xd1\xdb\xc2\x8f\xb4\xc5\x08\x11\x34\xfd\xa2\x67\x0a\xc1\x96\xa3\xaa\x7e\x3e\x86\xa3\x7c\xc8\x6b\x6d\xaf\xa4\x7d\x9f\x7f\xab\x56\xd2\xac\xd1\x56\x34\x33\x1a\xc1\x48\x95\xec\xcf\x0e\x54\xea\x7e\xa3\x65\x55\xc7\x90\x83\x39\xe9\xd7\x0f\x04\xdf\xe3\x5c\x71\xf5\x7b\x13\xde\x3e\xd2\x88\xf4\xb3\xfb\xd9\x6f\xdb\xdb\xec\x23\xbb\x34\x22\xbd\xd8\x6c\xb2\x47\xd7\x64\x64\xf1\x90\xbd\x53\x6b\xb7\xa7\xd6\xf7\x2b\xab\xd1\xd9\xc0\x3f\x3d\x44\x48\xb6\x99\xa8\xc9\xea\xec\xba\x6d\x7d\x01\x9a\x3e\xf7\xce\xc3\xdd\x58\x86\xce\x6d\xcf\x74\x8e\xf8\x87\x1a\x0e\x48\x3e\x57\x2c\x28\x08\x34\xcf\x71\xd8\x05\x63\x1f\xbd\x6a\x64\x91\xee\x8a\x33\xd0\x7f\xcb\x55\xc1\x3d\x95\x6b\xc6\x37\x2a\x52\x88\x72\xb7\x2b\x76\x97\x19\x4f\x44\xc9\x07\x59\x9f\xc7\xa0\xea\x96\x18\x39\x17\xc5\xed\x01\xa9\x1a\x14\x69\x4d\xdb\xca\x61\x4c\xef\xbd\x92\xbd\xd4\x63\xe3\x52\x5e\xf5\x5f\x10\x64\xf3\x4e\xd1\x20\x44\x53\xbd\x64\x2f\x47\xb6\x2e\x85\x69\xb1\x31\x4d\xbc\xb8\x21\xb5\xd1\x06\x19\xd9\xb1\x58\xf5\xf4\x93\xe0\x7f\xf6\x66\x78\x8d\xc7\x26\x74\x21\x6b\xff\x7e\x2f\x28\x74\x09\xef\x47\xa3\x47\xc3\xe5\xb3\xbf\xee\xb1\xc7\xa9\xcd\xa0\x78\xdc\xe7\xbd\xe0\xa3\xba\x82\x0a\xaf\xdc\x95\x4b\x81\x8a\x39\xb1\x36\xb5\x7c\x10\x75\x18\x21\x16\x69\x98\x0e\x1e\x71\xc2\x96\xe3\x86\x63\xbc\x35\x68\x90\xcd\x12\x4e\x55\xf0\x86\x15\xa5\xbc\x6e\x0f\xf4\x45\x1e\x1b\xeb\xc7\x91\xff\xfc\xa0\x7b\x6e\xee\xe4\xa3\xb3\xfc\xfd\xd8\x15\x42\x9f\xea\x1c\x25\x53\xc1\x13\x67\x16\xe6\xd4\x06\x1b\xea\x51\xc5\x13\x3f\x01\xaa\xfe\x42\x58\x76\xfc\x8f\x8f\x07\x1f\x1c\x9f\x6b\x13\xcd\x53\xea\x59\xc8\x7b\xb1\x40\x2a\xd2\x87\x0b\x54\x7f\x13\x61\x44\x17\xe5\xe4\xe5\x2a\x29\x8c\x50\xdc\x88\x57\x9a\xc3\x07\x10\x9c\x55\x12\xc5\xd3\x97\xce\xce\x4f\xdf\x91\x91\x99\x81\xe2\x14\x40\x68\xea\x02\x1e\x2e\xc4\x36\x62\xd6\x86\x94\x1a\x01\x0a\xa5\xd9\x0d\xd3\xb8\x2d\x8a\xb2\x49\xad\x89\xbc\xb8\xfb\xc8\x3d\xa3\x8b\xd8\x6a\xce\x6b\x51\x11\x0c\x46\xbf\x0b\x56\x1e\xe8\x52\x2f\x53\xd0\x73\x1b\xd0\xd9\x32\x3c\x4f\x01\x5e\xc8\xe9\x93\xf8\xbc\x38\xc1\x03\xaa\x29\xac\x3e\xfd\x03\x72\x2d\x5e\x18\xf0\xc9\x0d\x88\x95\x09\x52\xc8\xb2\x82\x0f\xea\x13\xd5\xae\x22\x66\x3e\x4c\x19\xc2\xf1\xda\x1a\xc0\x9c\x00\xd7\x97\x04\x38\x45\x29\x7b\x69\x66\x3d\x9c\x92\x3f\xd0\xd8\xd3\xaa\x1d\xb9\xfb\xc8\x3d\x01\x9f\x74\xc7\x85\xa5\x7b\xf9\x11\x8d\xd7\xdf\xcb\x22\x55\xb1\xb0\x18\xa4\x65\x78\x01\x21\x32\x00\x18\x19\x2c\x4d\x0a\xc0\x1c\x82\xf8\x36\x44\x92\x34\x94\x3d\x60\x23\x1c\x20\xa8\x0d\x4e\x25\x86\xb9\x29\x7b\xae\x91\x37\x6c\xb2\x7a\x93\x2a\x47\x08\x5e\xd7\x67\x38\x75\x07\xb7\xd8\x17\x8f\x82\xba\xf6\x0b\x13\xac\x39\x32\x0e\x40\x43\xd4\xcf\xea\x38\x06\x2a\xf3\xff\x2f\x07\x82\xbb\xb6\xb8\xbf\x55\xe1\x7f\x55\x45\x29\x37\xaf\x5b\x71\x12\xcd\x8b\xde\x01\xba\xe3\xca\xbb\xaf\x3a\xc0\x5e\xe7\xb1\xc7\x6b\x50\x09\xfa\x7a\xe1\xff\x80\xc2\xdf\x6b\x57\xca\x6b\x6b\xf8\x09\xd5\x1e\xc6\x75\x51\xc1\x0f\x55\xfb\xb6\x1b\xa7\x00\x89\xb4\x04\xa1\x87\xc5\x94\x5d\x3a\x71\x64\x07\x1d\xf4\xb5\xd7\xd4\x18\x33\x15\xf4\xfd\xff\xe5\x05\xcf\x5e\x74\x87\x4e\x02\xa4\xe9\x4d\xb6\x5a\x88\x7c\xdd\xc0\x8e\x6c\x36\x13\x07\x0b\x57\x73\x72\xa4\xb7\x59\x36\xc3\x9e\xb6\x83\x48\x8e\x11\x2b\x64\x15\xca\x7f\x04\x4a\xe0\xbf\xc9\x63\xdf\x02\xce\x14\x08\xaa\x10\x91\x5e\xa6\x1f\x52\xcb\xf4\xbc\x11\x75\xd5\x61\xf7\xc3\x56\x03\x2b\xa3\x53\x82\x40\x5f\x53\x8e\x8a\x2c\xe2\xa5\xe8\xf6\x12\x4b\x9b\xdc\xc9\x52\xbd\xca\x63\x3e\xce\xbd\x88\xee\x40\xcc\x23\x79\x30\xf4\xa1\x5f\xc7\x8f\x05\x2b\x67\x87\x6e\xca\x9d\x99\xc8\x33\x06\x83\x1e\xcd\x75\xfa\x2c\x20\xae\xe7\xa2\x85\xfe\x0d\xd5\xf8\x8e\x7a\xd3\x62\x57\x03\x31\xea\xf9\x59\x54\xf3\x73\x8b\x3b\x3f\x48\xb2\x9a\x84\x2f\x89\x3a\xbf\xe0\x31\xbd\x99\xfc\x4f\x7a\xea\x0b\xef\xf3\xd4\x57\x95\x8a\x5e\x1d\x08\x8e\xc3\xee\x04\x3e\x7f\xc9\x27\xb7\x3c\xb0\x93\x2c\x8c\x8a\x69\x0b\x3e\x6b\xda\xea\xbb\xb9\xfc\xa4\x8d\x4e\x58\x36\xe2\xa2\x11\x36\x46\xde\x77\x46\xf6\x07\x57\xb1\xeb\x76\x64\x93\xc0\xe2\x4e\xef\xbe\x2a\xf8\xbb\x9a\x7d\x65\x88\x41\xd9\xf5\x9d\xe8\xc1\xba\x39\x29\x75\xc8\x8b\x32\xfb\x4a\x81\x8d\x1c\x64\x31\x5e\xc2\x12\x3e\xee\x5c\xc4\x45\x83\xc0\x04\x1b\x70\x7b\x85\x83\x7e\x42\x8b\xe6\x46\xe5\xa9\x98\x6a\x85\xc3\xc6\x9d\xee\x1a\x18\x46\x0b\x0b\x8c\xda\x6e\x72\xca\x1a\x51\x40\x79\xee\x38\x49\xf7\xea\x84\xe4\x12\xb0\xba\x40\x58\x56\x25\x94\x60\x4c\xc5\x86\x71\x64\xe9\x50\x2e\xe5\x68\x0a\x69\x8a\xac\x74\x47\x95\xbe\xa1\xa1\x8d\x3a\x00\x13\x8f\xd8\x59\x38\x1d\x0e\x07\xff\xf0\x81\x3d\xfd\x7e\xcf\x2a\xbd\x67\x95\x7e\x58\xad\xd2\xdf\x50\x56\xe9\x3f\xf2\xd8\xcd\x97\x61\xb3\x05\x2e\x01\xa6\xe9\xb7\xec\xd8\x34\x3d\x8b\xd5\xca\x1e\x6d\xcb\x6d\x67\x7b\x23\xca\x29\x7f\xb6\x31\x6c\x8e\xe5\x3b\x36\xe6\xc2\xc8\x18\xfb\x7a\x8d\x1d\xb4\x66\xaf\x07\x00\x4f\x7a\xe6\xee\xcc\x8a\x72\x21\xcb\x4b\xa8\x78\xe5\xff\x72\x2d\x78\x9d\xe7\x5c\xd2\x6c\x2c\xe4\x88\x75\x90\xb5\xd1\xed\x6a\xe1\xe1\xe8\x44\x33\x8c\x25\x46\xff\x62\x8f\x80\xa4\xb2\x9c\xa4\x22\x08\x69\x6e\x72\x3e\x57\x1a\xe8\x80\x55\x0a\x7e\x90\x2b\x43\x78\x1a\x22\x55\x30\x02\xa4\x9e\x34\x2f\x7a\x63\xdd\x38\x95\xff\x86\x17\x1c\x1e\xfc\x6a\x8f\xcd\x32\x79\xd5\xbf\x59\x49\x03\x07\xbb\xa8\xf8\xd0\x16\xd7\x22\x15\xc2\x40\x43\xd0\x66\x11\xaf\x0b\x57\xb2\x38\xc5\xe4\x07\xfc\x5b\x55\x23\x53\xdd\x38\xb5\x42\x0f\x4d\x5c\xcb\xd6\xcd\xfc\xcd\x04\x3b\xb6\xa3\x7d\x37\xb3\x30\xa7\xe2\x8f\xc1\x76\xfe\xfe\x89\xe0\x3f\x79\x95\x8b\xae\xf9\xdc\xba\xa9\x4a\xf7\xa9\xa3\x16\x91\x6e\x86\x94\x51\x73\xe8\x39\xd0\x23\x3a\xee\xa9\x85\x64\x84\x95\xb2\x90\xc6\x08\xa8\xa2\x52\x0a\x3d\x2e\xb8\x36\x00\xc9\xb5\xb8\x0a\xde\xa3\x43\xee\xa2\x37\xe1\x72\x76\xb5\x34\x3f\xb2\x9f\x3d\xe0\x04\x6b\xbf\xd9\x0b\xee\xb4\x10\x36\xed\x64\x86\xd1\xdd\x36\x7d\xc1\xd4\x39\xab\x13\xb6\xee\x70\x1b\xfb\x3e\x76\xeb\x65\xf0\x3e\x6b\x46\xf7\x0e\xf2\xdd\x1e\xe4\xf7\x32\x87\x2a\xfc\xb3\xc1\x6d\xf6\x6f\xb5\x97\x86\x28\x0e\xa7\x6e\x04\xe5\xb7\x01\x65\x75\x4f\x52\x78\x58\xec\xf5\xcf\xda\xfe\xa8\x39\xe6\x1f\xd9\xde\x5e\x5f\x59\x27\xc6\x3e\x7d\x80\x3d\x71\x94\x9b\x45\x76\x0a\x18\xdb\xbb\x0f\x04\xdf\xa5\x7f\x55\x1c\x82\xeb\x62\x53\x6f\xe0\xdb\xf7\xef\x6d\xc9\xdd\x6e\xc9\x05\xe5\x3b\xbb\x23\xb8\xe6\xb4\x33\xe7\x36\xf7\xbc\x8e\x4d\xb2\xef\xdd\xde\x6f\x26\xdf\xdb\xdb\x83\xbb\x90\xd6\xff\xd5\x39\xb7\x1f\xf2\x9e\xb9\x3d\x57\x69\xfa\xf5\xc6\x28\xbf\x9f\xe2\x08\xd5\x42\xda\xff\xe0\xb1\x27\x8d\x4a\xe5\xcc\xa2\x45\xa8\xd8\x25\x8a\xe2\x8e\xb0\x14\xfe\xa7\xbd\xe0\x44\xf5\xa2\x2b\x52\x38\xf9\x34\x60\xff\x37\xd8\xb9\x17\xbd\xab\xf5\xdf\xcb\x83\x9e\x9b\x50\x5b\xb0\xfb\x98\x7b\xdb\x5f\x09\x96\x66\xed\x0b\x96\x07\x20\x34\xad\xaa\x48\xca\x5e\x16\x1d\xb4\x80\x7a\x71\x85\xc0\x60\x6f\xac\x80\xd5\x4a\xbf\x1f\xbb\x9a\x3d\xc7\x8c\x1b\x90\x37\xe4\xa4\x14\x0d\x13\x48\xaf\xa9\xc1\x79\x40\x4b\xf1\xb3\x90\x28\xa8\xb8\xf3\x52\x7f\x55\xed\xb8\xa5\x56\x98\x08\xff\xcf\xae\x0a\x9e\xbb\xf5\x23\x9a\xa3\xaa\x1c\x78\xf9\x59\x14\x80\xe1\x76\x61\x9e\x07\x91\xde\x6d\x4d\xf2\xf1\x6b\xa5\x60\xa9\x4c\x71\x0b\x61\xd9\xb9\xe8\xf9\xa8\xec\xd8\x17\x9d\xc9\xfe\xf4\x95\xec\xfe\x31\x36\xe2\x29\xff\x2f\x6a\xc1\x1f\xd4\x86\xaf\x3b\x7c\x1f\x20\x90\x7a\x21\xb8\x42\x8a\x38\xa2\xd0\xf6\x91\xa9\x97\x06\x78\x03\x11\x87\x61\x48\x2b\xd8\x7e\x53\x99\x03\x57\xa8\x1a\xab\x6e\xb7\x70\x02\x93\x81\x63\x72\x6d\x6d\x92\x22\xa9\x8e\x5d\x37\xb8\xa5\xf0\x32\x74\x15\xf3\x0c\x56\x9a\xf8\x95\x95\xe6\xe8\x12\x16\xf8\x94\xc9\x42\xa7\xf1\x90\x9f\xc1\x19\x0a\xc6\xdf\x0e\xf5\xda\x2d\xcd\xb8\x32\x0d\xeb\xb5\xe2\x2c\x18\xe8\x67\xca\x96\x55\x66\xfc\x90\x43\x7d\x5f\xd9\xc7\x1e\x9f\xd8\x49\x83\xb0\x02\x9f\xda\x17\xfc\xea\xbe\xa1\xcb\x8f\xc8\x02\xa8\x84\xc7\x47\x76\x01\x78\x06\x3f\x7a\xa2\xb5\x62\x9e\x27\x9b\x21\x04\x97\xc3\x26\xbd\x33\xcb\xe3\xe7\x4b\x4e\x92\x2c\x64\xd1\x0c\x81\x0c\x20\x3c\x88\x02\xb1\x83\x83\xc8\x42\x7e\x35\xd3\x60\xca\xdd\xd3\x99\x88\x2f\x4c\x42\x16\x01\x6f\x65\xdd\x5e\x22\x2e\x18\xf8\x4e\x04\xe5\x9c\x32\x15\x0d\x90\x7f\x85\x36\xf4\x09\xc6\x7c\xeb\x57\xe2\x54\x37\x0d\x90\x20\x97\x51\xab\xd9\x62\xd9\x61\x2f\x9e\xc6\xe5\x6a\x68\x19\xa2\x7a\xa1\x61\x9d\x53\x4f\x82\xc9\x68\xd8\xa4\xf5\xf0\x12\xb5\xa6\x84\xcb\x20\x6a\x10\x13\x2c\x5c\xbf\x6a\x2d\xf5\x21\xfe\xe4\x7f\xa6\x16\xfc\x46\xad\x7a\xf5\x61\xa6\xf0\x9e\x68\x3d\xe2\x0c\x06\x49\xfa\x61\x5a\x89\x4d\xa7\x5a\x21\x3f\xa7\x5c\xe4\x79\x96\xf3\x2c\xe5\x77\x9c\x5a\x76\x66\xf9\x65\xfb\xd9\xa1\x1d\x89\x32\xe8\x57\x9d\x0d\xfb\x85\xf0\x3f\x77\x45\x70\xc1\xfa\x6d\x80\xf0\xba\x8a\xb6\x55\x24\x4f\xb8\x0a\xb6\xfb\x94\x4b\xe1\x80\x5c\xb3\xed\x30\x4e\xfa\xb9\xb2\xc9\x40\x2e\x6d\x0b\x40\x50\x21\x51\xb7\xdb\x4f\xca\xb8\x97\x08\xec\x32\x26\xcd\x88\x14\x50\x27\x44\x0e\x86\x04\xeb\x34\xfa\xc4\x3e\xf6\x4b\x63\x6c\x1f\xec\x59\xff\xdd\x63\xc1\x5b\xc7\xcc\x9e\xaf\x58\x27\x94\x8b\xaf\x40\x48\x1b\xf2\x16\xc0\x47\x20\x9d\x2a\x0d\xbb\xc8\x20\xa4\x7c\x0a\x2b\xe6\x40\xed\x35\xf9\x19\x90\x73\x21\x83\x18\xb2\x5c\x00\xe4\x31\x2b\xca\x76\x7c\xc1\x50\x80\x3c\x67\x53\xac\x43\x60\x70\x23\x9a\x7c\x46\xd2\x09\x8e\xe5\xf9\x22\xcf\x1a\x10\x08\x00\x58\x90\xb7\x23\x7c\xab\x14\xa2\x29\x1f\xc5\xc0\x89\x65\x10\x22\x01\xeb\x87\x84\x96\xb5\xb1\xf3\x05\x8f\xfa\x20\x26\x11\xf8\x6b\x27\x84\x54\x89\xca\xd4\x35\xf9\x59\x82\xde\xac\x00\x3b\x51\x54\x44\x83\x72\x2b\xe4\x64\xd1\xa5\xcc\x05\xba\x55\x73\x27\xdf\x01\xcd\xe4\xee\x43\xf7\x34\xb7\x7b\x19\x93\x35\xb0\xc3\x22\x2d\xf3\x01\x80\x73\xc1\xeb\x81\x4d\x79\x3f\xc4\xf6\x77\x45\x51\x84\x6b\xc2\x2f\x83\xb5\x19\xde\xe9\x77\xc3\x14\x6a\xf3\x80\x97\x1a\x31\x3d\x7a\xb6\xb7\x13\x33\xbd\xe8\x07\x8c\xd1\xad\x14\x40\x30\x68\x06\x3b\x2a\x2c\x1a\x2a\xc2\x24\x87\x8c\x13\x87\xf4\x5f\xe5\xb1\x2b\x72\x11\x16\x59\x0a\xa1\x59\x33\x9c\xa8\xff\x92\xfb\xa0\x20\x39\x75\xbd\x2e\xe4\x69\xf6\xd6\x76\xf6\x84\xf2\xc4\x3b\xbd\x79\xf0\x5a\xf6\x6f\x47\xa2\x7f\xb7\x72\x51\xfa\xaf\xbe\x56\xaa\x10\x88\xcf\x82\x15\xe6\x08\xac\x05\xb0\x16\x90\xc5\x89\x1c\xa2\x79\x54\x35\x15\x49\x20\x65\x98\xf0\xd5\x41\x69\xaa\x10\x10\xf0\x15\xb1\x93\x93\xf2\x65\x9a\x3b\xe2\x56\x04\x0f\x16\xa6\xfc\x4c\x78\x01\x3f\xb8\x14\x3f\x5f\x60\x2b\xee\xe6\x7b\xe0\x9a\x3d\xfd\x72\xcf\x1b\xe4\x7a\x83\x3e\x59\x63\x0c\xc7\x27\x69\xcb\xff\x95\x5a\xf0\x96\x9a\xf9\xad\x10\x77\x08\x00\x46\xd2\x43\x9a\xa5\x0d\xc4\xa5\x74\x28\xba\x2a\x34\xcd\x51\x56\x1b\x16\x8f\x43\x34\x72\x80\x47\x6a\x40\x9c\x54\x9c\xf6\xfa\x2a\x3d\x4e\xb2\x60\x0d\xc8\xda\x12\x4d\x3e\x93\x58\x65\xea\x2d\x64\x61\x08\x18\x89\x78\x9c\x92\x24\x12\x99\xdd\x90\xa5\xd8\x7a\x1d\xd0\x37\x14\x94\x5a\x98\x0e\x0c\x5e\x8c\x32\x14\x01\xd4\x9e\x19\xa1\xa9\x42\x2b\xd6\x45\xce\xb3\x7e\x29\x7b\x06\x27\x1b\x95\xb0\x36\x14\x3c\xb3\x30\xd7\x7c\xb0\x02\xc3\x63\xed\x30\x76\x86\x00\xf4\x4f\x05\x37\x9c\x23\xcf\x41\x3b\x6c\xc5\x49\x0c\x2e\xb1\x5e\x9e\xad\xe5\x61\x17\x0a\xed\x18\x40\x37\x03\xe3\x04\x0b\x6f\x13\xdc\x9e\x7d\x6e\x97\xf6\xb9\xcf\xd4\xd8\x38\xee\x56\x39\x75\xb0\xdc\x8e\x05\xc3\x9e\x78\x7e\x2a\x6c\x75\x24\xd9\xa9\xea\x30\x10\xa6\x0a\x13\x96\xf4\x3a\x61\xda\xef\x8a\x3c\x6e\x99\xb4\xd4\xa2\xce\x0f\x36\x0e\xd6\xf9\xc1\x7b\x0f\x4a\xad\xe7\x60\xf3\x20\x91\x96\xd1\x2b\xe4\x4e\x30\x60\xe2\xd6\x5e\x29\x20\x35\xb0\x10\xc7\x8f\x81\xa4\x14\x81\xd7\x14\x51\x3a\x87\x78\x6f\x98\xaf\xc6\x65\x2e\xb7\xdb\x24\xc1\x45\x0e\x60\x0f\xe2\x0b\x53\xd8\x22\xf2\x6d\x79\x7c\x35\xf9\x49\x85\x5e\x27\xf7\xe4\x36\x55\x3c\x8e\x1d\x3f\x76\xc3\x93\x0a\xcc\x68\x6c\x1c\x7b\xd0\x7b\x02\x7b\xbc\x3f\x2e\x4f\x8e\x4d\x08\xfc\x6b\x1e\x9b\x88\xbb\x54\x13\xc9\xff\x82\x17\x7c\xc2\x9b\x53\x3f\xb1\x3e\x8e\x1d\x1f\x82\xd5\x96\xc9\x7f\x89\x18\x43\x25\xd4\x02\x53\xc9\xe4\x38\x27\xad\x2a\xbb\xe7\x93\xc0\x23\xe8\x88\x51\xac\x4a\x85\xbf\x40\x54\x79\x2c\xa2\xa9\xa6\x2e\x26\x6b\x7f\xd4\x48\x39\x95\xe7\x39\x15\x2b\x2c\xe3\xae\xd0\x25\x2b\x70\x93\xa6\x71\xe2\xa0\xae\x3d\xe4\xcd\x6f\x6f\x78\x7b\x8a\x3f\xb5\xbd\x39\x1f\x87\xc8\xd8\xd7\x26\xd8\x91\x51\xc2\x02\x62\xf1\x9c\x5d\xaa\x42\xd1\x10\xa6\xd7\x7b\x26\x82\x23\x56\x74\x60\xc8\xf5\x0b\xdc\xc0\xf7\x28\xbc\x1d\x8d\x96\xed\x1c\xfa\x7f\x75\x80\xfd\x86\xc7\xae\x68\x17\x60\x67\x7b\xbf\x17\x3c\xe0\x59\xa8\x3b\x00\xd9\x53\x66\x08\x8d\x67\xab\x40\xed\xca\x33\xc6\xc1\x49\x71\x4d\xe0\x9f\xce\x7a\x10\xdf\x96\xae\xa9\x8a\x39\xfc\xd4\x85\x26\x0f\xc4\x85\xf2\x58\x50\xe7\xc1\x85\x76\x21\xff\x97\x96\xed\x22\x68\xf2\x39\x93\xb3\x0f\xe2\x01\xa5\xbe\xae\x0a\x7a\x41\xd2\x8f\x95\x28\xef\xb0\xc2\xc7\x0e\x80\xef\x93\x1e\x9b\xc0\xcd\xbb\x28\xda\xfe\x87\x3d\x76\x68\x5b\x53\x7e\x15\x5a\xed\xe5\x24\x08\x2e\x8a\x76\x05\x2a\x51\x01\xf8\x21\x12\x99\x3c\x08\xb3\x55\xc9\x9e\xd4\xe6\x37\xcb\x3d\xb3\x30\xc7\x5b\xb9\x00\xa4\xc7\x30\x29\x46\x56\x52\x76\xca\xd0\x5a\xa5\x0d\xcb\x52\xca\xb7\x22\x6a\x4a\x09\xda\x86\x4b\x7a\x61\xd0\x1b\x86\x4b\xaa\x88\xf5\xe9\x30\x1a\xd5\xd9\x25\x22\xb9\x26\xa7\x89\x43\xc7\x31\x1c\xd2\xb0\x75\xfb\x69\x7c\x5f\x5f\x98\xd8\x70\xed\x57\x76\x16\xf5\xb3\x63\xec\x71\xa6\x37\x98\x73\xf1\xe1\xb1\xe0\x3d\x63\x77\xb9\x17\xab\x93\xd6\xca\x7a\xba\x4b\x44\xfb\xf4\x25\xdd\x41\x35\x3f\xe6\xcb\x6e\xf4\xb7\xce\x81\x5f\x00\x53\xb2\x79\xaa\x52\xae\x1a\x94\x15\x12\x82\x4a\xb7\x46\x14\x4c\x8c\xec\x0b\x45\xf8\x4b\x56\x13\x4b\xfd\x82\xa0\x0f\xec\xee\x60\x10\x66\xbc\xd6\x51\x15\x93\xd7\x72\x52\x52\x97\x44\xc9\xad\x35\xa0\x32\x8b\x29\xfd\x2d\x85\x99\x3c\x8e\x04\x05\xd8\xe0\xf2\x52\x89\xcc\x7e\x8e\x6f\x97\x19\x0f\xe8\x16\x6c\xa1\x41\xd6\xd7\xb0\x1a\xfd\x02\x44\x36\x9d\x5b\x32\x62\x9e\xe6\xcd\x4d\xe4\xd3\x58\x97\xa4\x97\x8b\x06\x88\x4e\xc3\x63\xd1\x18\xd8\x00\x38\xe7\xee\xd3\xff\xfe\x2d\xec\xbb\xed\x28\xa2\x2c\xc2\xb0\xe2\x7e\x2a\xd9\x2e\x86\x56\x7e\xfa\x5b\x82\x4f\x8d\xd9\x57\xac\xf0\x17\x1d\x55\x69\x02\xf1\x73\x7c\x72\x44\x75\x17\xac\xbf\x80\xc7\xae\xd3\x9e\x15\xb9\x61\xea\x7e\x3b\x16\x40\xab\x5d\xeb\xe1\xbc\x9f\x02\x14\x87\xc9\x0a\xe0\x2a\xb5\xa1\xe9\x7c\x41\x09\xa4\x54\xa8\xc4\x4d\xfa\x87\xda\x66\x52\xa6\xa5\x2a\x6a\x16\xda\x5f\x9d\x2a\x66\xe9\x20\xd1\x58\x53\xa1\x89\x0f\x95\x34\x96\x88\x72\x54\x79\x34\xc9\xe2\x13\x8d\xb2\x60\xf7\x08\xa8\xc7\x78\x64\x08\x9c\x24\xef\xa7\x9a\x97\xc0\x20\x20\xcb\x03\x0c\x16\x91\x28\xc3\x38\x29\xea\xbc\x10\x62\x47\x81\xb9\x66\x52\xa6\x69\xea\x30\x44\x75\xfa\xa2\xb7\x1f\xa4\x59\x91\x3b\x07\xcf\xcb\xae\xdd\x13\x60\x77\x2b\xc0\x7e\x76\x9c\xa9\xb9\xf5\x3f\x3a\x1e\xfc\xd2\xf8\x9d\xf8\xa3\xc2\x11\xc1\x02\x99\x80\x7a\xa6\xa8\x3a\x44\xb4\x0a\xab\x6e\xa7\x86\xf3\x9e\x5d\x9c\xab\x00\x3f\x5b\x58\xef\x19\x6a\x26\x02\xe3\xc9\xd4\x22\x60\x6c\x2e\x16\x60\x26\xcc\x72\x5b\x31\xd3\xe1\x4e\x2a\x61\x27\x8b\x04\xff\x5e\xf8\x90\xd3\x07\x0c\x4c\x03\x10\xf4\xa2\xdf\x05\x36\x4c\xd5\x8b\x69\x90\xd8\x9c\x95\xf9\x60\x17\x98\xc2\x65\xd2\x4f\x2a\xc1\x3a\xa4\xe3\x07\x0d\x8f\xf7\xf5\xe3\xf5\x30\x11\x69\xa5\x38\x55\x25\xbd\x29\x54\xcd\xf0\x56\x98\x24\x50\x24\xb9\x9f\xb6\x02\xde\x95\x2c\x5a\xc3\x9d\xea\x19\x93\x37\xf9\xd9\xd9\x39\x3d\xbb\x93\x8a\xb1\x96\xf1\xba\xe0\xa7\xe3\xb4\x7f\xc1\x62\x1a\x53\xce\x29\xa2\x18\x8b\x93\xbd\x64\x33\x16\x39\xab\x6a\x5d\xb5\x55\x47\x95\xe6\xab\xc3\x0c\x4a\x45\x82\xe6\xf6\xe4\xfc\x12\x3f\x8d\x10\x3a\x8b\xb7\xcf\x72\xac\xc3\x97\x5b\xc0\x9b\xba\x14\x92\x16\xd4\xf7\xa2\x7c\x1e\x26\x0b\xd0\xfb\x6d\x0b\xd0\xbb\x76\x6d\x01\x5a\x7b\xb4\x8c\x3c\x5f\xad\xb1\x03\x52\x94\xe8\x88\x30\xf2\x7f\xaf\xc6\xa6\x36\x0f\xfb\xa5\x03\xfb\x2c\x3d\x1d\x3c\x50\x53\x7f\x0e\x61\xbf\x29\x6a\x50\x2d\x0f\x41\xad\xa9\xd3\x07\x63\x03\x10\xb5\x0d\x5d\x24\xf6\xc9\xd5\x1c\x7d\x26\xb1\x9d\x81\x99\xb6\x3a\x22\xea\x27\x71\xba\xd6\x10\xeb\x31\xe8\xb1\xd3\xbd\x2c\x6a\xa8\x3e\x4d\x33\xb7\xb8\x6f\x2c\x55\x42\x50\x44\xc3\x1c\xad\x42\x87\x9b\x87\x6f\xb8\xd4\x82\xd5\x0b\x59\xa4\x67\x45\xd5\xa8\x66\x3f\x51\x63\xcc\x74\xc7\x7f\x79\x8d\x3d\x65\xdb\x69\x5e\xd2\xcf\x07\x5f\xf0\xcc\x0f\xb2\x18\xa3\xb0\xab\x2f\x62\x89\xc1\x10\xcb\xad\x67\xa4\x5d\x63\xdf\x80\x59\xab\xe9\x36\xb5\x51\x1d\xa1\x08\x18\x35\xb6\x46\x5a\x2f\x01\x5a\x87\xa5\x86\x19\x8e\x4b\x50\xaa\xad\x8f\xc6\x85\xd4\x8e\xeb\x23\xda\xb3\xd9\x38\x48\xbf\x8e\x8a\x28\x99\x3a\x7c\xa0\xf9\x90\x77\xcf\xf6\xca\xf4\x09\xff\x06\x03\xa0\x21\x27\xa7\x12\x6d\x6d\x7f\xb9\x1a\xd1\xf2\x4f\x1e\xfb\xae\x11\x5a\xd8\x7c\x16\x89\x19\x2c\x31\xe6\x7f\xd1\x0b\x6e\xb0\x7e\x1b\x2b\x90\x6d\xe6\x57\x50\xf5\xf2\xfb\x07\x0b\x5d\x9e\xec\xa2\x07\x46\xbd\x8b\xde\x7e\xba\xe2\x48\x39\x3f\xc4\xa6\x98\xba\xe1\x7f\x57\xf0\xf8\x65\x75\xfc\xa9\xd7\x6d\xfe\xb1\x48\xf6\xc1\xa7\x07\xb7\xcc\x5b\xcf\x80\x86\x5d\xe7\x59\x0a\x5a\xcd\x9d\x54\x82\xb5\xae\x6b\x74\xcd\x2d\x48\x71\x92\x4a\x0a\x25\x73\x0b\x4e\x9b\x3f\xc9\x76\x0e\xcd\x0e\xf1\x82\x7f\x39\x11\xcc\x6e\x7a\xd7\x8d\x1f\x1c\xf9\x18\x07\xcf\xd0\xe8\xa0\xc2\x07\xf7\x12\x76\x76\x2d\xf3\x7d\xd1\x53\x51\x85\x9f\xf1\x82\x97\x7a\x33\xa6\x38\xee\x90\xc5\x07\x20\x8a\x2f\xa3\x48\xe4\xe6\x08\xd0\xe6\x12\x5e\xc1\x2f\xd8\xd1\x8c\x27\xd8\x0d\x3b\xa8\xb2\x3f\x92\x72\xf6\x64\x8f\xbd\xe8\x46\x3b\xba\x31\xdc\xfe\x5c\xb8\xd5\xbf\xb9\x41\x47\xc0\xa6\x3c\xab\x72\x1e\xd8\x66\x58\xf6\xd6\x31\x27\xeb\x33\xec\xf5\x0a\x39\x11\x27\x43\xd1\xcd\xd2\x25\x51\x9e\x83\x25\x5e\x52\x59\xc9\x7f\x5b\x0b\xbe\x7f\x93\x7b\x9a\x52\xfb\x92\x59\x90\x5c\x4f\xb9\xa6\xa8\x84\xc1\xe3\x5c\x65\x38\x93\xbc\xa3\x5b\x73\xad\xb2\x2f\xab\xb1\x8c\x0e\x83\xb5\xe0\xdf\x2f\x13\x70\x7a\x04\x0f\x83\x3d\x1b\x5b\x03\x6a\x04\x0b\xe9\x62\x96\xc8\xf3\x18\xfb\x14\x00\x14\xe7\xd9\xf4\xa4\x48\x44\x29\x02\x6d\xcd\x96\x7d\x74\x1e\x74\x4e\x8a\x1f\xf7\xd8\xd5\xb9\x7d\xd7\xbf\x7f\x2b\x5c\x32\x35\x57\x4e\x83\x7a\x38\xc1\x29\xba\xae\xc6\x8d\x8a\x1e\xef\x85\x79\x28\x59\xd2\x02\x95\x69\x43\x6f\x1f\xd5\xb3\xbc\xa5\x3a\x8e\x26\xfb\xb3\x27\x8c\x3c\xbe\x66\x97\xe6\x36\x31\x89\xff\xe2\x13\x82\x45\xcb\x24\xae\xca\x84\xa8\x02\xcd\x56\xd1\x79\xbb\xac\xc2\xec\xd2\x9c\x62\x9a\x51\x1e\xaf\x8b\x9c\x4f\xde\x26\xe5\x40\x92\xde\xa6\x2e\x7a\x57\xe0\xf5\x8b\xde\x55\xf8\x1c\x6a\x64\xce\xa2\x7d\xd0\x67\x3f\x3f\xc6\xbe\xc3\x24\x18\x2f\x60\x41\x5d\x6d\xdd\xf5\x5f\x33\xb6\x03\xf3\xb0\x7e\x9c\xcc\xc3\x5f\xaa\xcd\x6e\xda\x22\x52\xdd\x10\x72\x3d\xd9\x8d\x89\x0d\x92\x34\x03\x66\x78\x91\x16\x31\x28\xa5\x15\x78\x9e\x9e\x14\xd9\xe8\x6d\x39\x19\x34\x0b\x40\xc3\x52\x27\x26\x14\x62\x79\x6b\xa8\x37\x64\xe3\x95\x07\xa6\xb9\x77\x2e\xed\x39\x77\xa5\x22\x5d\x38\x05\x49\x25\x9f\xa6\x78\x16\x73\xda\xaa\x6a\x95\x84\xd0\x41\x03\xb1\x4a\x05\x68\xec\xdc\x91\x63\x2c\x9c\x68\x1b\xf5\x50\x1d\xc4\x4d\xfc\x9b\x4a\xbb\x86\x45\x21\xa2\x26\xfb\x2f\x1e\x73\xd6\xd3\x7f\xbb\x17\xbc\xde\xbb\xcb\xba\xa2\xcc\xde\x64\xb2\x5e\x37\xf6\x6c\x8a\xc6\x32\x2e\x0f\x8b\x88\x7a\x49\x7f\x2d\x4e\xbf\x7a\xff\x9b\x0b\x3e\x0b\x66\x50\x9a\x05\x00\x05\x6d\xe3\xc4\x5a\xe6\xe9\x0c\xed\x8a\x45\x7f\xb5\x90\x47\x4d\x5a\xaa\xe9\x5a\x54\xa3\xae\xa0\x10\x29\x7f\xcd\x4b\xbc\x60\xfd\xb1\x71\xd7\x54\x2a\xf6\x7f\x8b\x14\x6a\x87\xe8\xfd\x9f\x6b\x97\x41\xef\xff\xb5\x36\x3f\xa2\xad\xc7\x82\xd2\xad\x7e\x58\x34\x2e\xaf\xfe\xef\x41\xdd\x6f\x1f\x63\xbe\x5c\x97\xa5\x32\x5c\x13\x66\x55\x5e\x79\x39\x5c\xe8\xf7\x60\x55\xdc\x96\x1e\xab\x35\x81\x5e\x54\x56\x64\xd4\xb5\x73\x69\x61\x5d\xfd\xe6\x5c\xa3\x8a\xe7\x52\x45\xf8\x9d\x00\xb3\x1f\x4a\xbc\xd6\x04\x6d\xc2\x7b\x09\x71\x5a\x1f\xf4\x9b\x39\x3e\x1d\xcf\xe5\x39\x76\x2d\xb2\x9f\x19\x1d\xd8\xe8\xcf\x04\x4f\x9e\x71\xca\x6c\x59\x4c\x4a\x76\x03\x3f\xba\x65\x90\xcb\x17\xc6\xd8\xbf\x35\x47\xe0\xa9\x0b\xbd\x30\x8d\x0c\xed\x7d\xe8\x72\x68\xef\xa5\x63\xb3\x9b\x35\xf8\xd8\x1e\x80\xd8\x19\x8b\xbe\x88\xbc\x00\x14\x03\x03\x43\x14\xf8\x16\xf8\x75\x28\xe3\x1a\x4c\x45\xb2\x37\xf8\xfe\xec\xd2\xdc\x5d\x54\x0e\xc8\x2e\x0f\xf5\x4d\x47\xaa\xcf\x66\x24\x04\xf9\x67\x83\xdb\x4e\xe2\xfc\xc4\xc3\xe9\xc3\x66\xe6\x94\xd3\x1a\x83\x29\xc9\x25\x3c\xf2\x48\xfb\xd2\x3e\x76\xdb\x36\x35\x45\x16\xf2\x38\xcb\xe3\x72\x70\x5a\x2a\x13\xb3\xb6\xdb\x40\x67\x06\xf9\x3f\xbe\x2f\x38\xbd\x83\xe7\x1c\x2b\x83\x49\x13\x92\x3a\x34\xbd\xcc\x13\xf9\xb6\x2b\x93\xbf\x7f\x9c\xad\x99\x10\xd7\xe7\x04\x67\x57\xe8\x6f\x2a\xa3\x52\x71\x8c\xd3\x4d\x05\xc3\x82\x95\x1b\xc0\x7e\x49\x41\xd4\x49\x58\x94\xbc\xcc\x43\x20\xc8\x4a\x75\xcd\x44\x47\xb2\xae\x06\xe7\x56\xf0\x4f\xfa\x0c\x0a\x22\x60\x0e\x6a\x6c\x64\x79\x54\x37\xba\x27\xc7\x07\xb5\xa5\x4a\x0f\xed\x60\xb1\xe5\xd7\xda\x1a\xa7\x57\x8e\x4a\xe5\x6a\x98\xe4\x7b\x42\xd8\x72\x9a\xd4\xaa\xc7\x32\x84\xd5\xdc\x2e\x99\x4d\x9d\x9f\x4b\xcf\xa7\xd9\x46\xba\xc9\x2a\xcf\x92\x4a\x73\x53\xd0\x5c\x91\x7f\xe8\x6f\xa8\xca\x50\x95\x2f\x8c\x6c\xe4\x3d\x1e\xf3\xe5\x60\x96\xf5\x58\x96\xe3\xae\xf0\xdf\xb4\x85\xae\xb2\x85\xca\x2b\xdf\x0d\x9e\xb3\x32\xdc\xa0\xee\x1b\x4e\x5c\xdc\x15\x15\x6a\x31\x73\xa9\x6a\x29\xc3\x1e\xc2\xd9\x02\x3f\x7d\x56\x76\x44\xde\x64\x6f\x1c\x67\x36\x93\x53\xe5\xee\x14\x5d\xdf\xa5\x78\x70\xd8\xea\x74\x45\x5a\x42\x7d\xc4\xaf\x8c\x05\x77\x8e\xba\xa1\x17\x85\x3c\x6a\x56\x1d\xfb\xea\xe3\xca\x6a\xd1\xbc\xe8\x1d\x08\xe1\xaa\xd4\x5e\xae\x40\x33\xc9\x45\xef\x80\x14\x0c\x86\x6a\x59\xfe\x45\x8d\xbd\xc9\x63\xfa\x79\xff\x95\x5e\xf0\xc2\x19\xfa\x61\x15\x69\xad\x6e\x79\x57\x75\x02\x5d\xeb\xcc\xb9\xa5\x65\xe5\x2b\x24\xa3\x1d\x9d\x51\x8a\x43\xea\x46\x6c\x39\xfa\x0e\x51\x2e\x80\xec\x2c\xbb\x36\x39\xe5\xac\xfc\x9d\x4c\x77\xda\xbf\x39\x98\xd6\xa6\x54\xed\x87\x73\x2b\xc4\x61\x68\x4b\x08\x95\x17\xca\xcc\x69\xe9\xc5\x1e\xa3\x89\xf0\x2f\x6c\x81\x7a\xb6\xed\x52\x41\x13\xc1\xf5\x54\xdb\xaf\xe2\x00\x51\xc7\x26\x18\xd0\x87\xba\xd4\x64\xff\x74\xad\x53\x8f\x20\x8a\x8b\x56\xb6\x2e\x89\xd4\x42\xc9\x5d\x4a\xe2\x96\xf0\x3f\x77\x6d\xf0\x65\xcf\xb9\xe4\xd4\x39\x45\xbd\x41\x03\x64\x98\x5a\x03\x95\x22\x85\xa1\xaa\x20\x80\x2e\x15\xe5\x6c\xa1\x8b\x14\xde\x4e\xe7\x8a\x4e\x3d\x70\x3f\x4b\x08\x99\x75\xca\x86\xc2\x35\x43\x34\x35\x55\xf0\x43\xb9\x28\x9f\x97\x41\xc8\x83\x3c\x5a\xf3\x2c\xea\x53\x70\x55\xbb\x0f\x87\xcb\x10\x1e\xf5\x95\x64\xeb\x5e\x06\x53\xfa\x84\xbe\xe5\x50\xe7\x9f\x5e\xcd\xfe\x5b\x8d\xd9\x8f\xfa\x7f\x52\x0b\x3e\x5f\xb3\x2e\x54\x6b\xcd\x12\x63\x51\x96\xf4\x56\x98\xe7\xb1\x95\x37\xe6\x8c\x0e\x23\x8b\xe9\x51\x15\x54\x1f\x17\xbc\x80\xa1\xab\x71\x95\x76\xb9\xf3\xea\x19\xad\x7d\xaa\x3c\x6c\x97\x22\xc7\x18\x18\xe0\x65\xcb\x4e\x85\x0f\xdb\xb2\x8f\x87\x6c\x4b\x57\xb5\xd1\xba\xd9\x09\x7e\x1d\x9f\x5b\x58\x3f\x76\x82\x2f\x3a\x55\x6d\xe5\x35\x4e\xae\x8a\x26\x3e\x73\x7c\xc4\x33\xc7\xed\x67\x6e\x7f\xe6\xc9\x79\xf7\x19\x7e\x3b\x54\x28\x7a\xa6\xae\x50\x74\x12\x8b\xb4\xc8\xed\xb5\x17\x66\xfc\x70\x5a\xec\xff\xd8\x63\x86\xa0\xfd\xcf\x79\xc1\xf3\xcc\x16\x75\xfc\x29\x64\x63\xb0\xee\xda\x14\x48\x21\xc8\x44\x8d\x56\xf2\x52\xc8\xbb\xe1\x05\x28\x98\x99\xb5\xf9\xe1\x43\x87\x0e\x6d\x02\xd0\x7d\x84\x1d\x62\xcd\x4d\x99\xdd\x48\xfe\xf3\x90\xf7\x24\xf6\x6f\x1c\x23\xac\xec\x6b\x03\x4e\xf1\x09\x7f\x7f\x58\x66\xdd\xb8\xc5\xf6\x0c\xf7\xbb\x30\xdc\x77\x2c\xbb\xfd\x73\x76\x1b\x32\xf0\xef\xb6\xc8\x19\x61\x3f\x33\xc6\xf6\x01\x62\x94\x7f\x71\x2c\x78\x4f\x0d\xc1\xa3\x5c\x7e\xa9\x28\x51\x15\x7b\xc2\x67\x10\xde\x08\xb8\x26\x54\xb4\xd1\x53\x3e\x8a\x3e\xc1\x51\x0c\xcc\x92\x6a\x27\x11\x55\xcb\xa3\x9e\x80\x05\xb1\x55\x95\x57\x85\x60\x4a\x96\x6c\x81\xa7\x79\x2e\x28\x4c\x51\x47\xcf\xc1\x6b\x1a\x9b\x10\x1d\xd2\xa6\xa0\x0e\x95\x93\x4a\xe3\x04\x6f\x11\x53\x70\xda\x0e\xa4\x6a\x03\xcd\x04\x3b\xde\x4d\xf4\x59\x7b\x23\x1d\x67\xc7\xd8\x91\x4b\xdb\x48\x0b\x59\xbe\xc3\xcd\x74\xc9\x05\x25\xcc\xf7\x2a\x4e\x71\xe7\x70\xab\x7a\xc5\xbf\x36\xe6\x20\x92\x55\x45\x9d\xe5\xec\xbc\x48\xc9\xac\xe0\x7f\x68\x2c\xb8\xd9\xbe\x60\x14\x48\x30\xe7\x8b\x92\xe2\xad\xb4\x80\xc1\x43\x2a\x2b\x5e\xca\xb7\x40\x02\xed\x47\x90\xbd\xe3\x9c\xe7\x5f\xa9\xb1\x4f\x78\xec\xf1\xe2\x42\x2f\x46\x0d\x8d\x20\x73\xfd\xff\xe2\x29\x0c\xd5\x9f\xf2\x4e\x55\xef\x6a\x40\xb9\xbe\xc1\x4e\x05\xbc\x6b\xa9\xb7\x91\x0c\x04\xdf\x85\x3c\x45\xbb\xdf\x80\x4a\x07\xb9\x47\x1d\x2a\x0d\x06\x07\xb9\x13\x17\x2d\x5b\x08\x86\xbe\x19\x8c\x6e\xcb\x81\x1e\x7b\xbd\x14\x9c\x69\x98\xfe\xcb\xbc\x60\x30\x43\x3f\x54\x87\xe5\x83\x29\x64\x3b\xa9\x1b\x3b\xec\xed\xa8\x34\x6b\xd5\x06\x4c\xbc\x24\x16\x6e\x80\x2a\x6c\xf6\xf2\x8d\xab\x47\x42\x31\xcf\x2d\x6d\xee\x42\x79\xdf\xd5\xc1\x7b\xbc\x2d\x1e\xa8\x14\xd9\x87\x27\x25\x15\x9e\x6f\xd2\xdf\xaa\xb8\xb1\x2e\x18\xb8\x4a\x75\xf7\x31\xd3\xcb\x58\xa4\x20\x23\xb6\xfa\x96\x0a\x36\xc9\x36\x52\x91\x17\x9d\xb8\x47\x4e\x1b\x94\x61\xd3\x88\x2f\x9d\xc2\x88\xba\x5c\x80\xe4\x19\xa7\x6b\xcd\x8b\xde\x55\x65\x98\xaf\x09\xd8\x67\x61\x72\xd1\x1b\x8b\xef\x4b\x2f\x7a\x63\x49\x3f\x75\x28\xee\x4b\x57\xb2\xbb\xd8\xe3\x5b\x9d\xb0\x37\xd3\x2f\x3b\x27\xd5\xce\xf1\x67\x82\xa3\x1b\x1d\x21\xb9\x8e\x89\x75\x81\x4e\xe9\x47\xf8\xec\x9d\x33\x0b\x3c\xec\x97\x1d\xc9\xd2\x51\xf1\x72\xcc\x69\xef\xaf\x69\xab\xfc\x83\xb5\xe0\xa7\x6a\x55\xb3\x7c\xc5\x98\x26\xb9\xdc\x20\xeb\xf3\x8d\x10\xb6\x89\x32\xd9\x2f\xc7\x3d\xb9\x71\x4d\xe8\x0e\xe6\xab\xb8\x4d\xc5\xc5\x25\x18\xf0\x29\x4d\xf8\xe1\xca\xba\xb8\xdc\x60\x02\x15\x41\x10\x17\xad\x22\x76\xc0\xf1\x3d\x76\x35\xb0\xd1\xb0\xcc\x72\xd0\xed\x3e\xe2\x05\xef\xf3\x10\xdc\x84\x56\x61\x4e\xdd\x47\xe9\x94\xcf\xb5\xb9\xf3\x8a\x1b\xbc\x8f\xd0\xec\xf2\x3b\x10\x13\xd3\x86\x14\x81\x58\x6a\x34\x61\x2a\xb2\x7e\x91\x0c\x10\x9b\x16\xdb\x8e\xf5\x33\x37\x23\x09\x01\xc3\x0f\x93\x5b\x4f\xdc\x6c\x39\x7b\x6e\xad\x86\xb5\xdb\x46\x16\xaa\xf9\xe6\xec\xbb\x43\x4c\x92\xa0\x3f\x15\x3c\x71\x19\x5b\xc5\xaf\x19\x71\x7b\x48\xce\x4e\xd9\x35\x6e\xa7\xfd\xe7\x04\x67\xd5\xf8\x55\x1f\x31\x0b\x40\x12\x06\xa4\x8d\x87\x29\xb5\x0b\xd6\x08\xd9\x71\xd7\x06\x7c\x90\xb8\xc6\x41\x3e\x59\xb6\x7a\xae\x3a\x7d\x8c\xc9\xed\xe1\x37\x14\xe2\xe2\xb7\x63\x4b\xd4\xdd\xd3\xfd\x94\xe0\x9a\x5d\x80\xc5\x5f\xf4\xd8\x7e\x9c\xa0\xc2\x7f\xc0\x0b\x5e\xe9\x39\x6f\xe1\xf6\xe3\xa7\x63\x54\xf4\x85\xba\x20\xcf\xfa\x18\xb6\x17\xe8\x26\x00\xc1\xdf\xbb\x57\x6a\x41\x27\x70\xb3\x29\x24\x6e\x3c\xd3\xc1\x72\x82\x86\x49\xc5\xf6\x26\xcb\x41\x2f\x6e\x41\x34\xfd\xf2\xec\x02\x89\x10\x37\x1c\x3f\x04\x1c\xe1\xe8\x91\xe3\x87\xa6\x9c\x23\xda\xb5\x4f\xb3\xf3\x96\x95\xfd\xde\xe0\x99\xbb\xcb\xff\x19\xb6\xb2\xbb\x86\xf5\xe7\xdb\x19\x41\xdd\xcb\xb0\x77\xdf\x08\xbc\x86\xd2\xdf\x24\x9d\xd1\x1a\xe3\x1c\x43\x30\xa2\xde\x10\x2e\x47\x62\x3f\xe3\x31\x87\x13\xfa\xaf\xf7\x82\x1f\x1e\xb5\x46\x8f\xc5\xf2\xd8\x91\x6e\x8f\x53\x4c\x78\x49\x14\xa0\x57\x3e\x2d\x38\x3c\x9a\x05\xd3\x03\xdb\x32\xe0\xaf\x4e\x8c\x2c\x91\xa5\x13\x58\x20\xba\xed\x57\x27\x82\xef\x75\xae\xb8\x1a\x98\xc9\x76\x19\x1d\xc0\xf6\x9f\xf6\x02\xd8\x76\xad\x0e\xff\xae\x0e\x60\xfb\xa4\x17\xbc\xda\x9b\x93\x7f\x6a\x63\x6b\x75\x1d\x94\xc5\x49\x65\xc2\x24\xc0\x59\x1e\xb9\xd2\x3a\x97\x5a\xce\x4a\x77\x74\x4f\x07\xde\x0b\x5e\xb3\x83\xd7\x76\x5a\xa0\x0b\x55\x35\x87\x25\x6d\x15\xb0\xf6\xce\x09\x1b\xde\xa7\x52\xf6\x76\x1e\x7f\x2d\x00\xea\xf3\x29\x53\xa6\xfd\xbf\x1d\x08\x3e\xeb\x6d\x72\x93\x30\x51\x56\x21\xbd\xae\x17\xe6\x65\xdc\xea\x27\x61\xae\x6c\xb4\xaa\xea\xb4\x8a\x9f\x22\x34\x24\x9e\xf5\x4b\x5d\x6b\x43\x95\x37\x86\xec\x36\xe7\x33\x52\x85\x39\x08\x65\x71\x14\x80\x19\x01\x99\x50\xab\x60\x26\x80\xd7\x11\x85\x9a\x8a\x7e\x03\xfa\x34\x99\x56\x95\xc0\x2b\xd5\xd2\x06\x78\xe5\x24\x2b\x38\xdc\xbc\xc1\xe1\xcb\xaf\xd9\xcf\xde\xab\xad\x1b\xef\x1c\x0b\xbe\x54\x53\x78\x9b\x91\x28\x4a\xa8\x65\x9d\x29\xbb\x03\xa4\xd1\xf6\xcb\xb5\x0c\xf6\x20\xf6\x84\xac\x01\x92\x2d\x69\xa3\x46\x42\xa7\x43\x2b\xeb\xae\x82\x85\x01\x93\x7a\x42\x9e\x64\x6b\xf2\xa8\xe3\x67\x17\x0d\x54\x8c\xb6\x00\xa3\x43\x36\xcb\x79\x37\x2e\x0a\x48\xd9\x37\x65\x93\x55\x21\x65\x6d\x85\xe0\x93\x6a\x26\xa8\x08\x7a\x99\xc7\xca\xaa\x2e\x1f\x98\x1a\xf1\x01\xe2\x0a\x2a\x83\x8b\xf0\xca\x54\xf1\x99\x2c\x15\x30\x0a\x5d\xc6\x59\x7d\x5b\x25\xa9\xd2\xf7\x74\xdc\x9d\xbd\x1a\xaa\x7b\x76\x63\x78\x10\x5b\xcc\xd7\xe6\x91\x37\xb3\x13\xec\x86\x1d\xd6\x64\x76\x08\x43\xca\x1d\xec\x4b\x63\xac\x56\x66\xfe\xe7\xc7\x82\x97\x8d\x8d\x58\xae\xd1\x2b\xa5\xa9\x4e\x7b\x20\x4c\x01\x93\x7e\x22\x75\x03\x3c\x4f\xec\x45\x04\xfb\xfa\xe6\xab\xa8\xd4\xa6\x2c\xdd\xc5\x7a\x3a\x1d\xdf\x62\x59\xad\xe7\x76\xb7\xba\xbb\x5d\x58\x43\xeb\x10\x35\xf1\x30\xae\xad\x10\x39\x7b\x8d\xc7\x8e\xdb\xc1\xa2\xfd\xb2\x93\xe5\x0a\x7a\x0b\x44\xde\xa4\xbd\xd4\xc7\x44\xf8\x7e\x22\x8a\x45\x21\x0f\x68\xf0\x7d\x2e\x3b\x3b\xfb\x76\x76\xd2\xae\xb9\xf7\xd4\xe0\x3a\x23\x1b\x94\x19\x17\xf2\x5c\x0d\x4b\x41\x35\xb9\xdb\x92\xc3\x8c\x74\x1b\x3f\x74\x80\xfd\xbb\x4d\xb2\x40\x40\x30\xfc\xfd\x03\xc1\x59\xf5\x43\x49\x23\x1b\x9d\x2c\x31\x32\x89\x5c\xe5\x79\xc8\x89\x41\x47\x17\x98\x37\x57\x05\x00\xc1\xac\xc5\x05\xa0\x99\x29\x94\x51\x48\x3c\x1e\x29\x41\xbe\x6d\xaf\xca\xea\x9e\x4c\x3c\x24\x13\xcf\x2b\x91\xf8\x54\x70\xb5\x62\x85\x90\x1e\x65\xef\xc8\x29\x76\x90\x7d\xcf\xf6\x12\x69\x16\xed\x09\xa3\xbb\xc2\x6a\x5f\xd8\x5e\x74\x6b\xf8\x4f\x19\x85\xaa\xac\x18\x48\xd5\xd8\xfe\xc6\x71\xf6\xe4\x11\x4b\x75\x3a\x6b\x85\x89\x63\x7a\xfd\xeb\xb1\xe0\x34\x5c\xb5\x6d\xac\x51\x9c\x8b\x56\x99\x0c\x1a\x3a\x9c\x41\x45\xb5\x03\xb7\xc1\x74\xb2\x36\x10\xc5\x60\x28\x6e\x7d\xbc\x57\x45\xfc\xfd\x91\x31\x76\xbf\xb1\x55\xfe\x83\x17\x7c\x63\x0b\xc4\x97\xb9\x92\x87\xbd\x5e\x12\x0b\xca\x7e\xdc\xd0\xc0\x18\x80\x62\x09\x48\x45\x49\xd6\x3a\xcf\x23\x81\x91\x05\x8f\x28\x44\xcc\x72\xa7\x6a\xad\x27\x38\xc0\x7e\x99\x35\x50\x20\xa0\x0f\xe3\x77\xb7\x40\x8c\x79\xb1\xc7\x60\x6a\xfc\xe7\x07\xdd\x65\x15\x98\x00\xd0\x9c\x43\xa1\xd9\x2a\xe9\x0f\x66\x83\x50\x83\x94\xb9\x84\x16\x27\xcb\x41\x46\xb0\xa7\x82\x4f\x46\x71\x71\xbe\x8e\x32\x75\x09\xa5\x3d\x9a\xcd\xa6\x6b\x06\xf9\xc3\xab\xd9\xd4\x8e\x8a\xea\x80\x18\xff\xde\xab\x83\xbf\xf6\xac\x0b\x43\x19\xb7\x70\x04\x76\xc3\x5e\x4f\x63\x3a\x00\xc0\x29\x02\x90\x86\x96\x85\x14\x66\x9e\xc6\x99\x8b\x04\xec\x99\xab\x61\xeb\xbc\x48\x23\xe5\xc2\x29\xe4\x66\x6c\x65\x5d\x00\x0d\x50\xbb\x56\x0a\x52\x08\x07\xa9\xce\x5d\x95\xb7\x0b\x2d\x82\x9c\x41\x92\x67\x9e\xf5\x09\x3d\x49\x7e\x44\x35\x5e\x4d\x01\x96\xf7\x34\x26\xb7\x35\xb4\xbb\xe4\xe2\xba\xe1\x77\xff\xf9\x4a\xf6\x55\xc6\xc6\xe5\x87\xfc\x2f\xb1\xe0\x53\xec\xce\xcc\x9c\xd4\x50\x20\x90\xdf\xa7\x8d\xab\x11\xc6\x32\xa8\x18\xa5\x50\x3b\x34\xe5\xeb\x75\xac\x79\xaa\x21\x39\x16\x6f\x9f\xe5\x47\x6f\xbc\xe1\x78\x93\xcf\x67\x14\xe6\x69\x22\x35\xe4\x62\x2a\x71\x54\x71\xb8\x40\xb6\x12\xe8\xea\x9c\xf2\xd2\xb9\xc5\x39\xbb\xd5\x38\xd5\xad\x9e\xe0\x87\x9b\x7c\x6e\xa1\xd0\xa0\x2b\x1a\x50\x76\x56\x87\x7c\x98\xe2\x6c\x7a\xf0\xc6\x79\x22\xf7\xdf\x80\x97\x19\xe3\x1c\x3e\x35\xb7\xa0\x01\xb2\x7a\xa2\xa5\x8b\x18\x86\x80\xf0\xa9\xaa\xe0\xb3\x23\x54\x4c\xee\xc4\x0a\x8f\x44\x12\x77\xe3\x12\xa3\x35\x49\x22\xed\x51\xf4\x8e\x40\xfc\x4b\x52\xbc\x2a\x3d\x64\x13\xdc\xea\xa4\xb6\x00\xe2\xd1\xac\x2a\xbb\x41\xd4\x8b\xf6\x21\x9c\xb8\xe1\x10\x50\x84\xe4\xd7\xf2\xb4\x95\x4d\x9c\x38\x76\xec\xa8\xbe\x58\x34\xd9\x6d\x54\x70\xa8\x40\xc7\x6b\xab\x03\x35\x8d\x68\x4c\xed\x3e\xa4\x4b\x6f\x42\x7c\x4a\xd5\x0c\xd7\xa4\x60\x5c\x1a\x36\x42\x80\x26\xa5\xa9\x29\x65\xc8\x48\x05\xc1\x76\x88\x60\x2c\x96\x50\xb7\xdf\x40\x9a\x45\x71\x5e\x89\xce\xab\x61\x21\x22\xc5\x04\xcc\x06\x1a\xfa\x06\x43\x72\x24\xee\x10\xf4\x72\xd1\x8a\x0b\x11\x90\xa8\x08\x7c\xd2\xa6\x49\x1b\x73\x18\x01\x68\x28\x42\x34\x2b\x87\xe9\x95\x4f\x8a\xe6\x5a\x93\x07\xed\x2c\x6b\xae\x86\x79\xb3\x95\x75\x03\xa8\x62\x1b\x6c\xc4\x49\xd4\x0a\xf3\x28\xa8\x6f\xf6\xa1\x5e\x2e\xda\xf1\x05\xe3\x1e\xa7\x8a\x7e\xea\x45\xc2\xb6\xa6\x0f\x5c\xd7\x94\x9f\x80\xe6\x91\x7a\xf4\x63\x1a\xd6\x8e\x1f\xbc\xee\x20\xaa\xed\x84\x6e\x8b\x00\xbb\x22\x69\x73\x72\xa9\x22\x7f\x38\x39\xbf\x44\x6d\xa3\xc8\x85\x0a\x08\xd2\xb3\xea\x04\xdc\x6f\xf2\x67\x67\x7d\x85\xf1\x46\x11\x03\x95\xde\x99\x4f\x60\x3f\x61\xa6\x6f\xb9\x85\x07\xd7\x05\x53\x28\xed\x03\x7d\x28\x07\xcd\x28\x1a\x81\x57\x48\xc9\x4a\x2b\x3b\x7c\x23\x1c\xe0\x1e\xc5\xdc\x6a\xd2\xc2\xe4\xf2\xd5\x89\x41\xa2\xd3\x5b\x0d\xc2\xa8\x5e\xa4\x66\x01\xad\xc3\x42\x75\x00\x94\x16\xd4\x46\xc9\x8b\x24\xf7\x93\x4d\x36\xf9\x11\xa7\x79\x33\x42\xad\xa6\x5f\xe6\x67\xa4\x06\xa4\xbe\x04\x14\xda\x6f\xb7\xe3\x0b\x7c\x32\x17\xdd\x4c\xe3\x00\xe1\x8a\xc0\x5c\x4e\x29\x76\xa1\xa7\x18\x94\x66\xfb\x4c\x7a\x3a\x1b\x97\x5f\xf2\x6f\x63\xdf\x77\x29\xd5\xdf\xee\x5c\x5e\x5e\xa8\xee\x0a\xf6\x62\xd7\x24\x9f\xaf\x86\x2d\xfd\xc2\x62\x96\xa0\xe6\xf5\x87\x07\x82\xd7\x7a\xea\x17\x4e\x4f\x2b\x4b\x12\xf4\xa9\xc9\xfe\xca\x7b\x05\x3f\x29\x0f\xbb\x56\x48\x00\x4f\xeb\x87\x9b\x87\x9f\x2a\xff\x68\x87\xeb\x19\x56\x14\x95\x8d\xbb\x7a\x26\x09\x8a\xeb\x87\xb9\x6a\x1e\x35\x00\xa0\x94\x34\xe3\x49\x96\xae\x89\x1c\x31\xe8\xa1\x3e\x29\x36\x7c\xe4\xc8\x68\xd5\xed\x63\x57\xb0\xb6\xa5\xb9\xfd\xfb\xdd\x29\x6e\x5b\x86\xea\xec\x29\x54\xbb\x54\xa8\x9e\xab\x14\xaa\x73\xc1\xb7\x6b\x0f\x83\xf1\xf3\x00\x49\xed\xdc\xda\x3f\x44\xb8\x7b\x0a\xd6\x6e\x14\xac\x64\x7b\x05\x6b\xce\xbf\x43\x2b\x58\x9b\xee\x6c\x03\xff\x41\xdb\xdb\xd5\xbb\xb0\x56\x23\x7b\xcd\x04\xfb\xde\x2d\x22\x9d\x66\x97\xe6\xb4\x11\xe8\x4f\x0f\x04\xd3\xd6\xef\x51\xcc\x88\x6e\x2b\xbf\xd4\x68\x4e\xf1\xbe\xbd\xe2\x59\xbb\xde\xc1\x1d\xb5\x83\xef\x0d\x9e\x18\x8f\xf2\x11\xd2\x4a\xd8\xbb\xf8\x18\x3b\xb2\x85\xbb\x7d\x93\x95\xdf\xdb\xcb\xbb\xf0\xdc\x7d\xd2\xb6\x65\xfe\xea\x2e\x6d\x99\x3f\x38\xda\x94\xf9\x68\x81\x9d\x3f\xe4\x45\xdb\x33\xa6\x19\xff\x69\xca\x69\x67\x71\x8a\x51\x8c\x47\xfb\xed\x14\xdd\x11\xd3\x62\x9f\x3e\xc0\x8e\x6e\x69\x1b\x47\xbb\xf8\x0c\xd4\x8a\xb1\x0c\xe3\x6f\x3c\x10\xbc\xc3\xdb\xe4\x26\x49\xfe\xc3\xf5\x0d\xb0\xe2\x8c\xc9\xbe\xe1\xa7\x2e\x84\x2d\xa8\xc9\x8e\x70\x42\xaa\x7c\xd4\x8c\xdd\x0b\x2b\x07\x14\x73\x69\xd3\xed\x1e\xeb\x9a\x62\x3e\x6e\x88\xe7\x15\xec\x41\x8f\x7d\x6b\x6a\xb5\x60\xb2\x4d\x5f\xe7\x6d\x91\x7c\x33\x34\x2d\xf3\xa3\xda\x08\xce\x8c\xbc\x6c\x79\x35\xab\x10\x4e\x21\xc0\x63\xeb\x8d\xeb\xce\x8f\xec\xac\x9f\x0f\xf7\xf4\xf5\x5b\xd5\x42\x1e\xea\xe9\x88\x6e\x2e\x6d\x37\x81\x5b\x75\x78\xb3\xce\xde\xca\xc6\xfa\x71\xe4\x3f\x35\xb8\xee\xdc\xdc\xc9\x11\x65\x5a\x2c\xb5\x02\x90\x38\x8a\x4a\x6c\xe8\xcf\x79\x6c\x5c\x5e\xf4\xdf\xe1\x05\x6f\xf0\xce\x15\x26\x9f\x13\x30\x4a\x07\x59\xff\xa0\xd4\xa9\xe9\xf5\x36\x95\xc6\x18\x64\x7d\x0d\x81\x18\xc8\x97\x02\xbe\xda\x2f\xc1\x70\x10\xdc\x21\x29\xbe\x08\x48\xad\x89\x0b\x0c\xbf\x2e\x45\xde\xcb\x05\x45\x7f\x06\xcf\x02\x57\x72\x9b\xc3\xf7\x36\x84\xb2\x39\xf0\xae\xe8\xae\x8a\x1c\x4f\xb9\x01\xee\x1d\x87\xd5\x7c\xdc\x63\xfb\xc4\x85\x32\x0f\xfd\x0f\x7b\xc1\x6b\xbd\x53\xf2\xcf\x6a\xc9\x1f\xd5\xf7\xe6\x5c\xda\xce\x9a\x77\x88\x12\x9e\x9a\x9c\x92\xbc\xa4\x93\x45\x86\xef\x5a\x71\x44\x50\x75\x64\x29\x4e\x5b\x06\x25\x04\xcb\x0f\xe8\x48\x5b\x5c\x31\x39\x3b\x25\x4f\x85\x88\x28\xe5\x58\x49\x05\x80\xab\xfe\xa0\xf7\x44\xf6\x1d\x9b\x47\xa0\xd9\x19\xd2\xf3\xec\x0a\x1c\x9d\x7f\x32\x98\xc4\x19\x73\xaa\xb3\x16\xa3\x66\x7e\x8b\xe0\xb6\x17\x8d\x8d\x04\x38\x83\x12\x7f\x4b\x22\x8f\x45\xe1\xff\x51\x2d\xb8\xcf\xfa\xad\x5c\x8b\x0e\xcd\x00\xb2\x0c\xdc\xd5\x65\x2a\xeb\x3c\x6e\x8a\xa6\x3c\xb2\xe0\xd0\x0b\x4b\xbe\x11\x16\xd3\x71\xc1\x3b\x52\xfb\x4f\x09\x75\xae\x8c\xd3\x3e\x84\x55\x02\xb9\x16\x59\x57\x20\x88\xba\xc3\x08\xbe\xec\xb1\x15\xb6\x0f\xa2\xc2\xfd\x67\xa9\x90\xc3\xdb\xe6\xfb\x6a\xcd\xb3\x16\x66\x40\xb5\xec\xc4\x2b\xec\x4e\xbf\x67\x44\x18\xd4\x7e\xf3\x72\x55\x84\x98\x95\xe9\x04\x27\x5e\x60\xd7\xca\x47\xce\xae\xa2\x3e\x07\x89\xa1\xd1\xe6\x3b\x77\x8b\x33\xe9\x4c\xdc\xca\x33\x48\x0e\x7d\xb2\xfc\x57\x71\x53\xe8\x80\xe9\x2a\xcf\xe8\x4b\xec\xf3\xae\x25\x77\x44\x79\x40\x52\x8e\x81\x93\xff\xcc\xd5\xc1\x4d\xd6\x6f\x6b\xf3\xdb\xe6\x28\xbd\x11\x37\xe2\x02\x2c\x02\x19\x56\xab\x70\x27\xf6\x8b\x57\xb1\x5f\xae\xb1\xfd\x64\x61\xf5\xdf\x53\x63\x27\x36\xe5\x54\x9b\x77\xeb\x36\x7c\x3d\xf8\x5b\x6f\xc6\x00\x49\x93\xd1\xb6\x15\xf6\x10\x6d\xb5\x4d\xc6\x61\xc7\x2c\x47\xa8\xd0\xe9\x41\x15\xc9\x21\x77\x2f\xba\xe2\x67\x6c\x7f\x73\xd6\xe6\x07\xa9\x45\xac\x88\x00\xe6\xea\x83\xe6\xe4\x30\x21\xc6\xa3\xf3\xde\xc1\xd0\x9f\x24\xd9\x06\xae\x44\x16\x46\xab\x61\x12\xa6\x2d\x91\x73\x83\x37\x20\x5b\xa6\xfe\xcb\x5e\x82\x93\x1f\x6a\x29\x12\xc3\x0a\xf9\x5a\x92\xad\x86\x3a\x9c\xbe\xc9\xde\xb7\x8f\x5d\x1b\x5b\xc5\xda\x21\x04\xf9\x6d\xfb\x82\x1f\xdb\x37\x57\xb9\x3a\x2a\xd9\xdd\x7e\x46\x23\x3a\x6b\xa4\x7d\x30\xa2\x59\x36\x6f\xe7\x69\xa5\x02\x18\xe0\x69\x1a\x02\x98\x27\x4c\xda\x66\x69\x81\x69\xd2\xdc\xe4\xa2\x97\x10\x24\xb7\xe0\x91\x31\x8c\xac\xb8\x51\x70\x34\xac\x26\xc0\xf3\xae\x70\x90\x20\x29\xce\xe1\xf6\x2c\x87\xf5\xdd\x08\xf3\x08\x42\x5c\x7a\x61\x19\xaf\xc6\x49\x5c\x0e\xea\xca\xc9\x03\x05\x11\x74\x25\x2c\xd8\x8f\x25\x24\xd4\xa8\x25\xa3\x7a\x66\xb9\x68\x89\x08\xb7\xc3\xba\x16\x70\xe5\xea\xe1\xf8\xad\x91\x49\x29\x58\x74\x63\xc9\xed\x37\xc2\x1c\xf8\x07\x19\xb7\x0c\x4a\x82\xf5\x49\x30\x07\x46\x71\x1b\xe2\x62\x4b\xad\x8d\xcc\x39\xe0\xc4\x06\x85\x78\x66\x61\x4e\x69\x43\xf1\x5a\x2a\x65\x46\x9a\x6f\x61\x8a\xbc\x29\x08\x71\x8b\xda\x66\x52\x77\x5d\xf4\x69\xab\x52\x61\xc3\xfc\x3c\x1e\x5d\x44\x33\xca\xd8\x4a\xc6\x5e\x05\xf6\x55\x88\x12\x44\x30\xdb\x3d\xa5\x63\x53\x68\x3e\x34\x94\xa9\xc5\x78\xeb\x2e\x24\x90\x4b\x23\x59\xab\xaf\x47\xda\xac\xc2\x00\xc2\x0e\xf2\x3f\xe3\x05\x2f\xb1\x60\x00\xc1\x42\x88\xae\x20\x0b\x86\x0c\x50\x10\x1c\xfb\x38\x9c\xe5\x8e\x29\x3c\xcb\x79\x9a\x39\x91\x2d\x75\xc7\x16\x0e\x14\x90\xea\x73\xb1\xc2\x26\x9c\x13\xea\x26\x76\xe3\x16\xd0\x61\x9b\xb3\xa1\xc5\x7e\x22\xd8\xcb\xc6\xd8\x58\x99\x14\xfe\xff\xac\x05\xef\xab\x2d\x9f\x5e\xaa\x82\x44\xbb\x2e\x09\xc5\x2d\xc1\xb6\x4c\x3e\xc6\xc2\x18\x99\xe5\xfb\xf2\x52\x9d\x1f\x3b\x76\x14\xc6\xac\x53\x9b\x51\xe0\x30\xf4\x03\x33\xa8\x18\x85\x21\x3b\x39\xa1\x05\x48\x34\x03\x63\x63\xa6\x36\x2e\x58\xce\x01\x30\xb7\x67\x79\x09\xc9\x4f\x79\x44\xb0\xff\xca\xf1\x80\xe5\x00\xac\x02\x03\x79\xd6\x5f\x43\x07\xd8\xd2\xfc\x1c\x74\x53\x4f\x4b\x5d\xed\x0a\xda\xc0\xf6\x16\x6a\xf7\x93\x76\x8c\xc8\x6a\xf6\x13\x7a\xe0\x4b\xf3\x73\xcd\x9d\xe3\x31\x6e\xbe\x12\xcb\xa7\x97\xd8\x03\x13\x0e\x3a\xde\x16\x31\x3f\x60\x3d\xf9\x7f\x0f\x04\xd7\x0f\x5d\xad\xc4\x57\xdb\xb7\xb7\xb6\xa1\xfc\xfa\x9e\x0d\x65\xd7\x36\x94\x5c\xd9\x50\xe2\x20\x18\x61\x05\x55\x53\x45\xcb\x60\x93\xcd\x0d\xec\x38\x3b\x76\x39\xd1\x5f\x7b\xd6\x94\x5d\x58\x53\x3e\x65\x5b\x53\x7e\x6d\x97\xd6\x94\x1f\x7a\x8c\x02\xc3\x8c\x39\xa5\xb3\xbd\x39\xe5\x94\x3f\x6b\x80\x9d\x0d\x55\x55\xec\xbb\x43\x5c\xa5\x1a\x60\xf3\x79\x8f\xdd\x72\xb9\xd5\xbb\x9b\x4f\x5f\x3a\x3b\xef\xbf\xce\x0b\x7e\xd4\x83\x32\xa1\x4e\x76\xe3\x00\x53\x4b\xb1\x80\xe8\x3a\xba\x93\x97\xc1\x75\x6d\xa0\x1b\x2c\xc0\x86\xd5\x2c\x4b\xea\x1c\x52\x57\xeb\xbc\x9d\x64\x21\xfc\xa1\x8a\x77\xdd\x7d\x8f\x4e\x36\xfb\x81\x17\xd6\x79\x37\xec\xdd\x8d\xb7\xec\xeb\xc0\x4c\xd2\x38\x69\xb2\xf7\x5f\x61\x17\x85\xda\x6a\xc9\xed\x12\xd1\xfe\xff\xdc\x17\x7c\x4a\xca\x03\x95\xaa\xc5\xb0\xf1\xe1\xda\x7d\x7d\x91\x0f\x50\x56\x0b\x55\x9c\xb7\x66\x1f\x28\xb3\xe5\xa2\x90\x47\x7b\xd6\x46\x49\x00\x3e\x50\x18\x57\xee\xa9\x0b\x72\x8a\x60\x02\x61\x06\x66\xe6\x4f\x92\x24\x85\xe1\xb2\x95\x6f\xdb\x71\xb2\x8a\xd9\xf0\x19\x9e\xf6\x93\x64\xb3\x47\xd3\xcc\x62\x4b\xd6\xa9\xf0\xcf\x63\xec\xff\xf2\xd8\xb5\xd5\x5e\xf8\xef\xf3\x82\xbb\x86\xba\xe6\x30\xbb\xca\x87\xec\x92\x08\x6a\xcc\xe6\x8a\x35\x2a\x9b\x2b\x9e\x65\x67\xd8\x33\x2e\x67\x4f\xda\x0b\xb4\x68\xbe\xc3\x7e\xbe\xc6\xae\xb4\x66\xd8\x7f\x4b\x2d\xf8\x5f\x9e\x3d\xe5\x30\x84\x6e\xd8\x93\x23\xf8\x81\xf3\x62\x50\x07\x1a\x7c\x21\xef\x85\x71\x0e\x93\x48\x62\x8e\x7d\x8f\x3c\xdc\x76\x33\xb2\x05\xf4\x4a\xab\xf2\x17\x80\x50\xc4\x05\xe9\x18\x6a\xa1\xad\xd9\x93\x52\x6e\x56\x08\x28\x79\xa7\x75\xb1\xe0\xbc\x18\xa0\x75\x87\x02\xb7\x90\xb2\x82\xb9\x34\xc0\x53\xb0\xec\x58\x95\x3f\xf2\x70\x60\x82\x98\x41\x3e\x0b\xe0\x5e\xb0\xe5\x84\x6f\x01\x2e\xf7\x90\x17\xb0\x6f\x75\x58\x4a\x37\xec\x0d\xa1\x51\xbc\x72\xdc\x71\xf8\x28\x7b\x88\xb2\xc0\x3d\xb3\x9f\x95\x21\x96\x2e\xf6\xff\x78\x2c\xb8\x7d\xc4\x75\x47\xb4\x10\x29\xe4\x06\x46\xbc\x83\xbc\x14\x4e\x13\x39\x56\xa5\xfc\x4b\x39\xbb\x59\x0d\xb1\xfb\x13\x8f\x8d\xcb\x17\xfc\x2f\x78\xc1\xcb\xbd\x3b\xe5\xab\x0a\xbb\x49\xa1\xdf\x8c\x68\x56\xea\x0b\x00\xb1\x80\xd5\x8b\x8d\xda\x77\x89\x19\x4f\x3d\xe0\x94\xd3\xba\x80\xf8\x7d\x72\x6c\xc5\xf4\x83\xde\xd3\xd8\x2d\x97\x70\xaa\x34\x75\x07\x9e\xd9\x0f\xd3\x32\x2e\x07\xb6\x1d\xeb\x1d\x68\x3e\x8c\xfc\x37\x7b\xc1\xd3\xa1\xa0\x25\x8d\x50\x95\x1b\xd6\x13\x84\x85\x6b\xfb\x80\x87\x56\x2d\xe3\x4c\xd4\x6a\x4a\x80\x3d\xbc\x9d\xfc\x7d\x17\xe1\x59\x11\xc3\xb3\x04\x54\xdb\x8a\x16\xb2\x68\x86\x42\x27\x97\x45\xde\xf5\x7f\x6e\x2c\x58\x87\xc8\x17\xb8\x5f\xa8\xb8\x6f\xea\xb4\x0a\x2d\xd9\xe4\x6d\x55\xbe\x19\x8a\xd4\x44\x91\x88\x78\x4f\xe4\x0d\xc4\xa8\xca\x78\x3b\xa6\xfd\xd1\x95\x1a\x5a\x0f\x94\xbe\x5c\x44\x10\x5a\x38\x59\x4c\x5d\xf4\xae\xc0\x8f\x5e\xf4\x1e\xd7\x73\xdb\x75\x68\xeb\x15\x35\xd6\x67\xf4\xa8\x7f\x5e\x59\xd2\xee\xc2\x0b\x43\x01\x76\x3a\xb8\x0e\x53\x95\x95\xb1\x54\x5e\xa9\x7c\xa4\xae\x56\x22\x87\x70\xac\xc3\x8d\xc3\x87\x0e\xb9\xa9\xbf\xaf\xf5\x58\xb5\x67\xfe\x0b\x76\x90\xde\x5a\x99\xa5\xe0\x4e\x1d\xa0\xcf\x67\xa0\xf4\x87\x0e\x5e\x2d\xa1\x1f\xa3\x82\x04\xdd\xae\xe3\x60\x9b\xec\x43\xd7\xb2\xc3\xdb\xc0\x06\xaa\xdd\x8d\xb2\x03\xc4\x51\xbe\xe4\xda\xe0\xa7\xc7\x86\xaf\x23\xaf\xed\xe5\x02\xd1\x42\xd0\x00\xa2\xce\x23\xb0\x6a\x6a\x9a\x55\xb2\x64\x5d\x9b\x68\x2d\x53\xfb\xc1\x42\xca\x26\xab\x9a\x1d\x52\xda\xae\xd9\xcb\x33\x7c\xc4\xc7\xf5\x19\x39\xf4\x15\xa9\x0c\x02\xbb\xc1\xcc\x8e\x13\x7c\x32\x9c\x72\x53\x39\x8c\xe5\x5c\x7e\xb8\xb0\x42\x88\x74\x23\x75\x3e\xb9\xba\xe9\x5b\x61\x2f\x26\x33\xf4\xe8\x37\x5b\x9b\xbe\xa9\xc5\x86\xd1\x6f\xca\x6e\x4f\x46\x53\x23\x5f\xb5\xaa\xce\x8d\x78\x57\xea\x84\x30\x98\x8b\xde\x84\xee\xde\x45\x6f\xc2\x55\x73\x74\x6c\xe8\x55\xec\x7d\x35\x76\x15\xd9\xdf\x96\x5a\x59\x4f\xf8\xff\xb1\x16\xbc\xa1\xb6\x62\x5f\x5a\xb1\x90\x60\x54\xaa\x71\x99\x91\xd5\xb2\x6a\xcf\xb4\x8a\x27\x0e\xec\x0a\x85\x7c\x52\x65\x7f\xf4\x7a\x22\xd5\x79\xd3\x2a\x90\xd2\x65\x6d\x18\x6e\xa9\xdf\x8d\x38\x61\x06\xa8\x95\x45\xd2\x40\xc1\xc8\xcc\xc7\xa8\xbc\xa0\xac\x1b\x97\x25\x36\x80\xb8\xae\x3a\x6e\x6c\xc5\xbc\xb8\x62\xd7\xfe\x56\x26\x7e\xf4\x77\x91\x5c\x86\xd6\x63\x2b\x69\xfa\x1f\xc7\x18\x33\x0d\xf8\x7f\x3e\x16\xbc\x6b\xcc\x69\xd1\x91\xa1\x88\x94\xd3\x4a\x4d\x40\x95\xe1\xa4\x57\xb2\xc9\xf9\x8c\x19\x24\x00\xe0\x69\x98\xa1\x70\xa8\x15\x37\x3c\x30\x6e\xab\x39\x95\x74\x6e\x0c\x33\x56\x65\xde\xb0\x1c\x6e\x23\xcb\x81\xc2\x47\x3c\x1f\x5c\x17\x34\xb9\x0a\xed\x0d\x4b\xf9\xdb\xec\xb6\x74\x60\x19\x65\x4c\x6b\xab\x7d\x49\x03\x02\xd7\x8f\xcc\xda\xee\x80\xee\xd5\xb7\x89\x48\xee\x75\xa9\xa4\x10\x48\x0c\x15\x0a\x34\x25\xad\x65\x2b\x53\x64\xcf\x45\x05\xcd\x42\x74\xad\x43\x0f\xd4\x7c\x54\xa9\xb8\x80\x5a\xb6\x5b\xb8\x84\x1e\xf2\xf8\xa6\x48\x43\x57\xf8\xe3\x85\x28\x19\xfb\x74\x8d\x99\xdd\xe4\xff\xba\x14\x3c\x57\xf4\xef\xca\xba\xeb\x53\xc4\xec\xf7\xc9\xb8\x29\x9a\x75\x53\x88\x0c\xf6\x7a\x2f\xe9\xe7\x61\x32\x05\x6c\x1b\x8c\x58\x91\x28\x24\xa3\xaf\x4b\x2d\xdc\x30\x40\xb7\xd8\xda\xdd\x3c\x50\x31\xe7\x10\xea\x9f\x45\xa2\x98\x46\x1c\xcd\x80\xdf\xa3\x8a\x6c\xea\x29\x22\xbd\x1d\xa6\xa9\xe9\x2e\x66\x92\x58\x1d\x0c\xd3\x08\xba\x40\xca\x5c\xdd\x01\xf0\x83\x99\x15\x69\x99\x0f\x9c\x74\xb1\xdd\xcc\xe7\x6f\x79\x0c\x39\x96\xff\x11\x2f\x78\xb1\xb7\x02\x7f\x6f\x36\x8f\xc8\xa7\x95\xd9\x68\xeb\x21\xe9\x67\x1f\xd5\xe1\xfc\x91\xc7\x0c\xe3\xf5\x3f\xeb\x05\xaf\xf3\x56\xf4\xef\xcd\x86\x35\xb3\x30\xa7\x5c\x98\x3b\x1b\x9b\xfb\xc2\xa3\x3a\xc0\x7f\x9c\x60\x0d\x4b\x76\x40\x89\xd9\xa0\x0d\x67\xd1\xc9\xb8\xc8\xfb\xe0\x7b\xba\xad\x1f\xad\x89\xd2\xff\xd8\x44\xd0\x19\x71\x9d\xd0\x9e\xc9\x5c\x55\x2a\xbc\x32\x12\x18\x2f\xf0\x48\x3f\x8f\x9c\x83\xdc\x06\x70\x5e\x80\x79\xbe\x1a\x62\xd6\xcb\x22\xf7\x78\xfb\xda\x9e\x29\x74\xd7\xa6\xd0\x3d\xb3\xe4\xe5\x9b\x25\x9f\x61\x59\x25\x9f\xb6\x4b\x88\x42\xf6\xa3\x1e\x1b\x97\x47\xa6\xff\xd2\xad\xa2\x7f\xb6\xdf\x8e\x00\x8f\x36\xb3\x54\x05\x42\x46\xcf\x14\x9c\x3a\xaa\x12\x72\xae\xae\x8f\x68\xa5\xc9\x7e\xc4\xd3\xe8\xd7\x3f\xc8\x66\x76\xd3\x21\x3c\xb2\x6e\x3c\x03\x0e\x38\xd1\x42\x17\x95\xd6\x81\x5d\x1c\xed\x51\x5d\xd9\x79\xfc\x19\x19\x4c\xb1\x4f\xda\x4a\x3a\xa2\xcd\x91\x01\xb1\x3f\x5c\x67\x27\xec\xf0\xa5\x08\x52\xdb\xb3\x14\xd3\x98\x73\x15\xc6\x44\x98\xc7\x61\x12\x47\x90\x9c\xf2\x2c\xb1\xda\xc9\xb2\xf3\xfe\xc7\x9f\x12\xac\x0c\x5d\xb5\x61\x24\x52\xae\x9b\xe4\x1b\x74\x5b\x29\x43\xce\xf9\x6c\x12\xef\x21\x50\x48\x25\x1a\x96\x59\xf3\xa2\x37\x2e\x65\xa9\x8b\xde\x55\xb8\x5d\x10\xc7\xdd\xf5\x0f\x5d\xc7\xfe\x62\x1f\xbb\x06\x7f\x6a\xe3\xe7\xff\xbd\x6f\xf3\xb4\x85\x9d\x1a\xe8\x82\x1f\xdb\x77\xd6\x69\x95\x47\xa2\x15\x47\xae\xd6\xa0\x4a\xb6\xaa\x01\xea\x44\x21\x72\xdd\x11\x9b\xe8\x84\x85\x39\x1e\x11\x28\xb9\xc9\xdd\x3e\x83\x61\x4e\xa7\xd3\xa9\xac\x95\x55\x4a\x92\xe2\x59\x12\x61\x6f\xd0\x30\x2c\x36\xe8\x17\x86\xe3\x28\x58\x69\xdb\x31\x4b\x5d\xd2\x85\x5e\x5b\x59\x5a\xc4\x91\x20\x64\x37\x94\x66\x8d\x94\x4d\x1d\xb5\x75\xb0\x42\x83\x75\x90\x91\x96\x9e\x99\x34\x7d\x51\x35\xb6\xa5\xe4\x97\xb5\x09\x14\x0d\x5d\xc8\xba\x87\x95\x67\x22\xa8\x46\x05\x89\x4b\xd6\x71\x49\x27\xa2\xce\xfe\xc1\x39\xe2\x93\x49\x7c\x5e\xf0\x90\x9f\x14\xbd\x24\x1b\x74\x45\x5a\x2e\x66\x49\xb2\x1a\xb6\xce\xc3\xfb\x92\xd6\x17\xf2\xec\xc2\x00\xeb\x65\x28\xae\x3c\xa5\x54\xae\x11\x43\x6e\xf2\x73\xa4\x9f\xd1\xa7\xb5\x29\xd8\x86\x4c\x50\xcb\x89\x71\x27\x8d\x38\xad\x6b\xd5\x4e\xa4\x60\xe9\x23\x3e\x5f\x9c\x8f\x7b\x18\x1e\x36\x44\xeb\xab\x06\x3a\x0c\x0f\x4a\x5c\xf5\x93\x2e\x80\x23\x6a\x64\x0e\xe1\x69\x08\x6d\x5a\x0b\xa8\x4f\x0c\xc1\x57\x4d\xf6\x37\xe3\xca\xdf\xff\x67\xe3\xc1\xcb\xc7\x01\x34\xc1\xda\x74\x1b\x72\x1e\xad\xed\x24\xbb\x43\xaa\x19\xee\xb7\x69\x4b\x02\x2f\x9c\xa1\xb6\xc2\x5c\x50\x4d\x02\xca\xfa\x1a\xbe\x01\x2b\xa6\x5a\x97\x73\x15\x1b\x8a\xb9\x37\x4c\x07\xf7\xf2\x45\x88\xf8\xb9\x33\xdb\x90\x9d\x06\x5b\x52\x96\x47\xb8\x55\x7a\x39\x44\x93\x71\xc3\x34\x66\xd4\x9c\x11\xf7\x40\x6e\x70\xa6\x5f\x6e\x72\x17\x4e\xd2\x5e\xdf\xcc\xa9\x8a\xb4\x01\xfd\x56\x72\x56\x61\x62\x32\xe8\x04\x95\x72\xc7\x3a\x50\x00\xbc\xad\x02\x40\x54\x35\x8f\x64\x20\x05\x33\xaa\xc3\x01\xa9\x8d\x00\x3b\x5f\xdf\x45\x2f\x21\x7d\x52\x0e\x5f\x55\x8d\xce\x6c\x4e\xa8\x0f\x7a\xa9\xfc\x0d\xf1\x4f\xa7\x50\x85\xf3\x9d\x91\x0f\x8c\x72\xd9\x9e\x61\xcf\x60\x73\x9b\x07\xa9\x6e\xc9\xe5\xe5\xea\x3d\x2b\x2e\x3b\x67\x35\x05\xb1\x9f\xaa\xb1\x6b\xca\xb8\x2b\xb2\x7e\xa9\x60\x59\x5f\x51\x53\xe6\xc6\xbf\xf3\x96\x9d\x5b\x55\xd8\x75\xbc\x69\xc2\x5f\x88\xa4\x9a\x7c\x06\x60\xd1\xed\x67\xa0\xba\x08\xc6\x57\x58\x94\x97\x24\x3a\xd6\x02\x63\x79\xb4\xed\x44\xaa\x0c\xe6\x7e\x3b\x8c\x13\x37\x49\x53\x5e\xe9\xe7\x82\x0e\x47\xc2\x1b\xa2\x6f\xa1\x94\xa7\x14\x8b\x55\x51\x6e\x08\x91\xf2\xc3\x08\x52\x77\x88\x17\x38\x18\x67\xab\x5a\x97\x1d\x63\xe8\x2f\x8c\xb3\x6f\xd3\x73\x8a\x71\xd8\x24\x8c\x17\xfe\x1b\xc6\x83\x7f\x1c\x9b\x19\x7d\x53\x69\x0b\x39\x72\x27\x5d\xbc\x53\x9b\x83\x57\x2a\x2f\xae\xa8\x13\x1c\xa7\x56\x9d\xb7\xe2\x42\x8f\xfc\x68\x0b\x73\x54\x88\x18\x67\xa4\xcc\x07\xba\xf6\x0a\x24\xfe\xd1\xeb\x36\x88\x8f\xca\x16\xd5\x95\x7d\x31\x1a\x28\xa5\x38\x3d\x4b\x8b\xb0\x71\x35\x1d\x4c\x1d\x27\x9b\xdf\xf4\xa1\x8e\xfe\x52\xab\x6c\x3c\xac\x90\x26\x04\xa5\x6e\xcc\xb5\x79\xa8\x2b\x96\x46\x66\xe1\x1d\x2a\xb7\x8c\x46\x04\x3b\xa5\x7b\x25\x57\x4c\x5b\x60\x34\x74\x73\x3a\x30\x4f\x40\x21\x12\xc5\x6c\x65\x07\x97\xa8\x83\x50\x07\xaa\x72\x56\x5a\x5d\x95\x2d\x43\xb5\x62\xad\xcf\x8d\xa2\x2a\x8b\x42\x56\xee\x3e\x48\xdb\xe8\xe0\x3d\x2b\x5b\xc5\xe9\xbe\xd5\x63\x8e\x38\xe3\xff\xa8\xc7\x9e\x7e\x99\x1b\x56\x71\x05\xab\xb9\xe0\x69\xf6\x2f\xad\x12\x76\xb2\x0d\xaa\x62\x24\x75\x03\xb0\x6c\x6b\xab\x3a\xee\x49\xa5\x50\xb3\x37\xed\x27\x6f\x24\xda\xa5\xfd\x97\xef\x0f\xfe\xbf\x2b\xac\x0b\x6e\x9b\x1d\xc1\x03\x38\x91\x02\x0d\x95\xa5\x34\x5a\x92\x31\xaa\x29\xdc\x50\x60\x01\xd7\x51\xb9\x08\x05\x0f\x20\x21\x01\x4b\x56\x9e\xd2\xfe\xc9\xa0\xc9\x58\x03\x73\x15\x4e\x0c\x19\xe0\xd4\x81\x1d\x97\x5c\x50\x36\x83\x31\x9f\x1b\x7a\xc5\x20\x54\xc7\xd6\x04\x26\x29\x25\x51\x14\x4a\x15\xef\x66\x11\xbe\xb1\x1e\x87\x52\x0a\x2d\xa6\xd7\x0f\xd7\xd5\x1f\x30\xdf\x28\x4b\x99\xb0\x01\x73\x7d\xb5\x5f\xea\x69\xc0\x7e\x21\x31\x46\xdc\x98\x4b\x4e\xdc\x1d\xc8\xc6\x82\x7b\x64\xa3\x4a\x87\x97\x57\xd7\x0f\xcb\x6b\xfa\x58\x3e\xc1\xef\x0e\xac\xee\x05\xf7\xac\xd4\x6d\xb3\x63\xe6\x74\x89\xc3\xb8\xaa\x1d\x22\x89\x90\xce\xc0\x11\x52\x21\xce\xab\x9e\xe7\xe1\xc9\x8d\xdb\x6a\x3e\x1c\x67\x44\x82\x1b\x35\x4e\x31\xb6\xb0\x0e\x11\xe2\x38\x61\x58\xe8\xc6\xd8\x72\x64\xcf\x68\x1b\x3e\xe2\xd3\x2f\xaf\x7f\xf3\x4d\xff\xaa\x50\x06\x14\xb2\xf0\xe0\x8b\xd0\xdb\xd1\x6b\x62\x43\xca\xd2\x8e\x70\xa2\x3d\xc7\xd8\x95\x52\xa0\x3d\xd5\x6e\x4b\xae\xef\x7f\x62\x2c\xf8\xe5\xb1\x25\x73\x01\x65\x20\x4b\x41\xb1\x0e\x5d\xd0\x41\xe4\xcb\x5c\xe0\xc3\x4d\x3e\xd3\x6a\x89\x1e\xd6\x29\x31\x3b\xf1\x84\xaa\xde\x54\xe7\xf3\x59\x2a\xea\x7c\x29\xeb\x0a\xfc\xfb\x6c\x7a\x32\x1f\x2c\xf6\x53\xae\x65\x1d\x60\x21\x76\xa3\x58\xf0\xc7\x2e\x36\x23\x05\xb0\xb4\x15\x27\x31\x31\x73\x80\x64\xa9\xf3\x02\x32\x2d\x42\x2b\xdf\x7d\x80\xe2\xda\xf3\x74\x41\x99\x90\x50\x20\x78\x51\x8a\x9e\x3a\xb7\x8c\x24\xd5\xea\x80\x5f\x83\x34\x4a\xa7\x13\x50\x37\x00\x90\x20\x52\xa1\x81\xac\xfb\x69\x94\xa5\xc2\x41\x0a\x20\xfe\x17\xe1\xa8\x42\x95\x86\xa3\x65\x0e\x00\x94\xd1\x5d\x42\xdd\x60\xa0\xb7\x8a\x39\x34\x68\x12\xd4\x2a\xdc\x72\x8b\x9a\x43\x49\x1e\x72\xfe\x5c\xb0\x60\x55\x1e\xab\x52\xcf\xea\x6a\x3a\x5d\x88\xed\xfe\x94\x17\xbc\xc6\xbb\xdd\xbe\xe4\x30\x5e\xc7\xa4\x26\xf2\x3c\xcb\x2d\x53\x93\x99\x25\x6d\x74\x92\x4c\x16\x0b\x31\x45\xbc\x61\x4e\x52\xc3\x81\xe7\x30\x58\x3a\xcb\xb9\xfc\xa8\xdb\x63\xbc\xe7\x22\xca\x7b\x0c\x94\x73\xff\x8f\xbc\xe0\xb3\xde\x72\x25\x20\x7e\x48\x1d\x6a\x22\x66\xb5\xa9\x80\x54\xc1\x6a\xa9\x73\xd1\x5c\x6b\xd6\x79\xdc\x0d\xd7\x04\x1d\xaf\x4e\x68\x03\xc4\xa3\xcb\x83\xc2\x7a\x22\x18\x15\x8b\xef\xe8\xbd\x4e\x13\xa3\x9e\xce\xf2\xb5\x30\x55\x49\x5b\xa3\x31\xe3\xbe\x3c\xc1\x1e\xaf\x5d\x3a\xda\xc4\xf0\xf1\x89\x87\xc1\xc4\xf0\xa2\x89\xf9\x6a\xc3\x3b\xb1\x32\x80\x6e\xa0\x14\x58\x2d\xf6\x9a\x5d\x6f\xc5\x51\x68\x27\xd3\xd6\x1a\xfe\x9c\x63\xae\x20\x9c\x0d\xb0\x8c\xea\x96\xea\x26\xf6\x01\x22\xf6\x0b\x29\xb8\xb5\xb3\xbc\x8b\x5f\x27\x91\x4e\xc7\x17\x2a\x7d\xb7\xd2\x72\xa1\x0f\x0a\xa5\xbc\x15\xad\xac\x67\x05\xb9\xd4\x31\xbd\x4a\x0a\xb2\x52\xb9\x2e\x2a\x1c\xd2\x39\x48\x36\x9b\x1a\x1d\x53\x4c\xc1\x4b\x66\x36\xc8\x30\x50\x8d\x2d\x08\xf2\x7e\x0a\xa8\xa2\x01\x14\x7e\x38\x84\x72\xc8\xe1\xe0\x26\x8e\xe8\xfc\x31\x95\xab\xb2\x27\x8d\x87\x05\xc1\x87\x14\x27\x78\x30\x44\x20\xc1\x09\xfe\x03\x8c\xf3\xa0\x1a\x50\x15\x9c\xe0\x77\x33\xce\x39\xdc\x95\xff\x41\x30\xd5\x09\xab\x0b\x75\x75\x43\xc5\x56\xc9\xbb\xf3\x59\x39\x97\x9a\x5b\xb8\x6f\x75\x5b\x70\xed\x90\xbe\xcd\x65\xdf\xe9\xef\x7b\xe0\xff\x2f\x64\xf2\xaf\x17\x32\x06\x98\xf9\x45\x29\xc2\xc8\xa9\x3b\x00\x87\xe6\xa5\xcf\xe5\xa8\x18\x8d\x40\xa4\xeb\x71\x9e\xa5\x92\xff\xe3\x74\xf6\xf2\x2c\xc2\x19\x2d\xca\x70\x2d\x4e\xd7\x82\x9b\x1e\xbd\x69\xb5\x7b\x33\x72\x66\xb7\x9b\x56\xe8\xbd\x35\xb3\x6a\x0c\xa3\xe7\x77\x49\x88\x87\x01\x8c\x9a\xcc\x5f\x6d\x95\x27\x42\x14\x5f\x0c\x47\x30\x16\x46\x6a\xb8\x4c\x93\xd2\x0f\x4f\x54\x50\xe9\x20\x6b\x41\x67\xb1\x9e\x16\x61\x81\xb0\x08\x5f\x3e\x10\x7c\x8f\xfe\xe5\xba\xfe\xe0\xf2\xd6\x81\xfc\x1f\xda\xf3\x5e\xed\xda\x7b\x95\xa8\x40\xfe\xd6\xa5\x06\xf2\x6f\x5d\xf4\x6b\xe4\x9a\xef\xf9\xca\xf6\x42\xf8\x55\x08\xff\xea\xf6\x1e\xa9\xa7\xf9\xb7\x34\x46\x03\x96\x3b\xd4\x55\x09\xe9\xd7\xfc\x84\xb1\x9f\x9b\x60\xdf\xb9\x69\x55\x0c\x60\x40\xaf\x98\x08\xbe\xdb\xfc\x74\x69\x1f\xaf\x8f\x66\x3d\xff\xbc\xc7\x7a\x76\xcd\x7a\x1e\xd4\xe5\x1a\x7e\xda\x0b\xda\xa3\x98\x8f\x5d\x5b\x7a\x74\x27\xb7\x3a\x0f\x1d\x93\xdf\x34\x36\x66\x33\xb0\xa7\xb0\x29\x76\x70\x87\x25\x54\xf6\x38\xd7\x5e\x11\x06\xbb\x08\xc3\xe2\xf6\xdc\x6b\xda\x6f\x8c\x42\xf2\x35\xdc\xa6\x9a\x6a\xf4\xba\x09\xf6\x24\x3b\x15\x2e\x8b\x30\xbb\xa0\x9f\x96\x71\x57\x40\xda\x2e\xf0\xac\x3f\x3f\x10\x1c\xad\x5e\xac\x80\x90\x59\x77\xb7\x16\xa1\x3e\xb8\xc7\xc7\x76\xcd\xc7\xce\x2b\x36\xb6\x7a\xa9\x22\xd4\x61\x36\xcd\x1a\x9b\xe7\x42\x8e\x20\x80\x3d\x3e\xb4\x27\x41\x29\x09\x2a\xdc\x9e\x07\xdd\xea\xdf\x6c\x92\x20\x25\x39\x55\xe1\xed\x2a\x6c\xa4\xca\x92\xde\x76\x45\x15\x50\x2a\x2b\x5a\x21\x14\x46\x5c\x3f\x82\xfe\xa2\x53\x17\x70\x97\x9e\x11\x65\x1e\xb7\x28\xfb\xe9\x1b\xfb\x82\x74\xd4\x8d\x4a\xa5\x77\x95\xce\xa3\xeb\x63\x6a\x10\x8d\x2e\xbc\x34\xd2\xb0\x12\xa6\x03\xfe\x0c\x3d\x6a\xc5\x65\x2e\x7a\x0c\xdf\x99\xa7\xe8\x1e\x6c\x1a\xd0\x42\x1d\x8e\xf7\x5b\xe3\xec\x35\x1e\x73\xee\xfb\x3f\xb8\xab\xa4\xa0\xe0\x69\x76\x63\xd5\x54\x25\x3d\x36\x30\x75\xe1\xb0\x26\x43\xb9\x65\xef\xa3\xf7\xa7\x58\xc4\xac\xce\xfb\x77\x05\x73\xe6\x57\xd5\xc0\x18\xaa\x36\xc0\x23\xd6\xc6\x3a\x6d\x6a\x51\x78\x9c\xaa\xdb\x84\x79\x6e\x93\xff\x03\x1e\xbb\x06\xef\x6a\x8b\xe3\x1b\xbc\x87\xc1\xe2\xb8\xe0\x36\x6a\xbb\xeb\xe2\x48\x52\x2e\xe4\x39\x90\xf7\xac\x05\x2e\x73\x05\xc0\x23\x17\x14\x22\x2d\x10\xf0\x03\x1b\x6a\xb2\x77\x79\xec\x09\x34\x7f\x33\xeb\x22\x0f\xd7\x10\xf5\xd5\x7f\xdd\xa5\xec\xd9\x11\x0b\xf5\xcc\x11\x8d\x6e\xba\x5e\x34\x91\x21\x3e\x1b\x51\x5a\x2b\x4d\x36\x14\x0d\x8e\x8a\x26\xfb\xeb\x6b\xd9\x53\xb6\xdc\x21\x47\x9a\xce\xce\xf8\xf0\xb5\xc1\xad\xce\x8e\x70\x11\x7a\x92\xb0\x28\x1b\xb9\x08\x23\x8a\x3d\xc1\xda\xb7\x98\x89\x49\xb3\x73\xd1\x1b\x97\xfc\xcd\x21\xea\xaf\x3f\x8e\x7d\x6a\x8c\x3d\x9e\x32\x13\x44\xae\xd2\x80\xfc\xf7\x8f\x6d\xe5\x08\x1e\xd1\xd9\xd9\x6a\x13\x76\x67\x83\xaf\xd7\xf4\x27\xec\x6c\xa2\xb6\x3c\x35\x20\xde\xd8\x40\x9c\x10\xa5\x17\xfd\x56\x07\x01\x9c\xb3\x42\xb8\x2e\x7f\x83\xbf\x9d\xaa\x34\xc5\x29\xe3\x5c\xb7\xb6\x38\x4d\x11\x56\xd7\xa1\xc9\x30\xfd\x88\x53\x4c\x6c\xec\x65\x1a\x82\x59\xad\x25\xac\x94\xca\xe8\x40\x88\xe7\xd9\x85\x73\x50\x73\x47\x74\xb3\x7c\x30\xd5\xe4\x4b\xb2\x7f\xd8\x59\xf4\x59\xac\xf6\xe3\x04\x23\xcc\xec\x3e\xa0\xf4\x01\x71\x64\x30\x86\x30\xe1\x6a\xcf\x65\x3d\x1d\x18\x55\x66\x3d\xdc\xe9\x72\xac\xe1\x7a\x18\x27\xe0\x09\x2b\x33\x9e\x66\x79\x37\x4c\x20\x6b\x4f\xf6\x53\x7d\x10\x4b\x06\x81\x89\x53\x92\x53\xc0\x89\x62\xd9\xdf\xd7\xd8\x01\x41\x0c\xd4\xff\x46\x6d\x8b\xf0\xd1\x51\x8b\x38\x8a\xf5\x06\x6f\xaf\xa9\x06\x9d\x15\x73\x19\xae\xc2\x29\xdb\x39\xe3\x85\x42\x0e\x58\x29\xc8\xe6\x44\x26\x6a\xd1\x02\x02\x23\xaf\x3d\x08\x14\xad\xac\xdb\xcb\x52\xf0\xd6\xe6\xfd\x14\x10\x7b\xb2\x7e\x09\x3e\xb7\xac\xad\x2d\xf9\x93\x6d\x63\x9c\xe7\x89\x48\xd7\xca\x8e\xbc\x7d\x5f\x5f\xf4\x21\xc1\xb3\x95\x64\x7d\x39\x9d\x45\x01\x16\x54\x55\x63\x00\xe2\x05\x9f\xb9\xb0\x84\x9f\x72\x10\x9d\x36\xff\xd8\x54\x93\x7d\xc4\x53\x7b\xca\xff\xc0\x36\x10\x82\x43\x93\xae\x23\x91\xcd\x94\x97\x5a\xc8\x32\xf3\x4d\x13\x3d\x8a\xa6\xcf\x57\x27\xd7\x19\x7e\x9d\x77\xe2\xb2\x68\x48\x12\xc2\xb8\x21\xf2\xd9\x68\x98\x1a\x0c\x54\x6c\xb2\x37\xd6\xd8\xb8\xa4\x26\xff\xd5\xb5\xcd\x43\xaa\x47\x0e\x61\x21\x8b\x0a\x67\x00\x9f\xf4\xa0\x22\xd6\xd6\xfd\xdf\xd9\xee\x73\x46\x02\x45\x65\x43\xc8\x4b\x28\x1a\xbd\x3c\x6b\x89\xa2\x10\x91\x35\xb6\x29\xc8\xce\xd1\x4e\x64\xed\x3d\x55\xcc\xb8\xcc\xd6\x04\xa5\xc7\x81\x53\x76\x55\x20\x64\x5c\xb7\x17\xe6\xa6\x38\x04\x7d\x1b\xd1\x15\xd8\x9b\xc7\xd8\x01\xc5\x9f\xfc\x57\x8f\x5d\xe2\x9e\x1a\xc9\x0f\x3f\x5e\x7b\x4c\xb8\xe0\xbf\x3e\x7e\xf7\xce\x1a\x83\x53\xcd\xff\xa9\x5a\xf0\x9a\x9a\x2a\xcd\x07\x6b\x48\x25\xae\x95\x5c\xa3\x32\xbd\x54\xf5\xf2\x55\xa1\x40\xe0\x82\xa1\xb3\x2b\xa8\xf3\x40\xf1\x42\xf9\x37\x6e\x51\xf9\xd7\x02\x7c\x3d\xcb\x79\x60\x3d\x0b\xd3\x5a\xc1\x7d\x0c\x8d\x7b\xd2\x41\xf9\x57\xdc\x6f\x3e\x2b\xc5\x89\x51\x9f\xd6\xf5\x05\xcd\xbc\xa0\x43\x95\x82\x09\xb1\x8e\x4e\x63\x4d\x9e\xf2\x77\x2e\xcc\xe8\x06\xce\xd0\x54\xc5\x05\x17\xa9\x7c\x2d\xb2\x65\xb8\xbf\xd9\xef\xd8\x08\x94\x91\xea\xf6\x44\x5c\x70\xca\xfd\x7c\x7c\x7f\xd0\x31\x17\x1d\xe4\x11\xbe\x26\x52\x21\xa7\x72\x5d\xdd\xd3\x6a\x61\x48\x35\x0a\xb2\xf5\x18\xf5\xfc\x69\x5d\x18\x88\x8a\xdb\xa5\x5c\x5c\x10\xaa\x78\x05\x06\xb6\x36\x2f\x7a\x57\x44\x79\xbc\x2e\x72\x47\x24\xf9\xe8\x15\xec\xa3\x9e\x2e\x06\xf4\x21\x2f\xf8\xb9\x2d\x8a\x01\x3d\x6a\xb5\x7d\xac\xe6\x23\xd1\x13\x72\x91\xb3\x94\x5b\x33\x85\x30\xb7\x8e\xdc\x7c\x96\xed\x27\x9a\xf7\x4f\x06\x07\xcf\x12\x68\xe0\x09\xae\x70\x42\xbb\x5d\xcc\x38\xa0\x74\x03\x40\x19\xdd\x0a\x6b\x82\xbd\xd8\xb3\x8a\x46\xaf\x07\x1d\xd3\xe4\x50\xfd\x67\x3e\x69\x6a\xda\x43\x49\x8a\xdd\x94\x97\x76\xf2\x73\x7f\xa7\x66\x17\x93\xfe\x58\x8d\x5d\xbf\xad\x29\x14\xea\x47\xe1\x1e\x32\x25\xa5\x5f\x55\x33\xdd\xd7\xe5\xa6\x79\x4c\x87\x06\x80\xf7\xa9\x12\x12\xb6\x39\x57\x09\x70\x78\x68\xa7\x45\x5c\xc6\xeb\x0e\x78\x1d\xc4\x77\x87\x85\x36\x0a\x21\xb1\xd1\xfa\x14\x94\xde\x6a\x67\xb6\xca\x99\x4f\xb3\xca\x57\x62\x07\x90\x8f\x02\x07\x46\x76\xa4\x40\xb7\x28\xd4\x9e\x96\x0c\x05\x1f\x42\x9c\x3a\xfc\x1b\xf9\x25\x44\x15\x47\x9b\xf4\x8a\x9d\x65\xb4\x17\xfc\x53\xc1\x0d\x27\xe1\xaf\x51\x21\x22\xf8\x8c\x8e\xa6\x55\x31\xac\xb8\x23\x1d\xe2\x7b\xe7\x18\x7b\xda\x36\x40\x04\x0b\x79\x9c\xe5\x71\x39\x38\x2d\xd6\x45\xe2\x44\x73\x93\xce\xf1\x95\x5a\x70\xd7\x76\x0f\x55\x4b\x3e\xe9\x23\xc5\xa8\x21\x01\x1d\x55\x8d\x1e\xb5\x15\xb8\xd8\x24\x0f\x79\xec\x1f\x3d\xc6\xe4\xf1\x8d\x34\xe4\xff\xb9\x17\xdc\xb0\x62\x7e\xaf\x54\x75\x2d\xdd\xf8\xc8\xa6\x8d\x9d\x6c\x99\x2d\xb2\x85\x4d\xc9\xf3\xd2\xe6\x64\x56\x75\xe7\x21\xef\x20\xfb\x8e\xe1\x54\xce\x6e\xd8\x6b\x9c\x17\x83\xc2\x9f\xf0\xf7\x37\x80\x07\xb1\x6d\x92\x3e\xbb\x61\x8f\xb1\x57\x5f\xc9\xbe\x67\xb8\xae\x43\x73\x16\x45\xcb\xc5\x2c\x11\xb7\xc5\x00\x31\xe1\x7f\x95\x05\x6f\xf3\x86\xaf\x9b\xfd\x22\x79\xb4\x75\xbf\xae\x41\x90\x35\xb6\x6e\x89\xe7\x5f\x2b\x4c\xad\x5d\xe6\xbc\xa4\x8e\x29\x92\xee\xad\x88\x1b\x40\xc2\x8c\x22\x88\xbd\x70\xf6\xdb\x7a\x1c\x72\xc2\x00\x6f\x5e\xf4\xf6\xe7\x59\x22\x16\x45\xdb\x59\xe1\x5f\xdc\x2b\x52\xbe\x97\x2e\xfa\x18\x1a\x70\x3b\x96\xfd\xf6\x39\xbb\x4c\x17\xdd\xba\x4c\xce\x47\x3c\xa6\x76\x80\xff\x4b\x1e\x9b\xdc\xae\x80\x0b\xd4\x6e\x59\x14\xed\xe0\x15\x1e\xfd\x65\xca\xac\x5d\xe2\x16\xd5\xc7\x94\xd5\x90\xce\x3c\x2a\xb2\x64\x5d\x15\x17\x9b\x31\xf0\xe1\x90\x78\x92\x8b\xb2\x9f\x83\x4a\x08\xb1\x9b\x4d\xf6\x52\x8f\x1d\xa0\x6c\x83\xc2\xbf\x10\xdc\x42\xbb\xbb\xe0\x9d\x2c\x89\x0a\x9b\xe1\xd0\x56\x52\x61\x59\x90\x4f\x29\xbb\x68\x25\x4e\xda\xfc\xb8\xce\xae\xdb\xc1\x94\xd0\xe7\x1e\xf2\xee\xdb\xde\x50\x3d\xef\x9f\xbe\x84\xaa\x2c\xc3\xdc\xb3\x6a\xb8\xfe\x17\xe6\x78\xfe\x55\x97\x2c\xac\x9e\x3f\x60\xc1\xdb\x3d\x0b\x26\x07\xe7\xc4\x11\x40\x00\xaf\xc5\x2e\x62\x8e\x21\xb3\x79\x5f\x31\x65\x9d\x27\x32\x0a\xf5\x1c\xd3\xdb\x24\x97\x2d\xa9\x84\xa2\x35\x9d\x52\xf5\xc0\xf8\x29\x13\xfc\x36\xe2\x31\x0d\x56\xe3\xb0\xe1\x9f\x9e\x60\x7f\xe7\x80\x28\x7c\xcd\x0b\x3e\xe1\xcd\x2c\xcc\xb9\x90\xef\xb6\xc0\xa1\x6e\x52\x5a\xa4\x41\x3c\x11\x36\x40\x9f\x03\x62\x6b\x23\x28\xe4\x96\xd2\x5a\x07\x43\x10\x2a\xef\x8a\x6b\x58\xe9\xa5\x56\xa6\x8f\x48\xfb\x5d\x29\xa7\x5b\xa1\x98\x00\xc1\x2e\xdf\x37\x31\xfd\x5a\xb7\xa7\xda\x80\x5b\xe4\xb9\x7c\x78\x8c\x3d\xce\x2a\xf8\x70\x6e\xf1\x74\xe1\xbf\x6b\x2c\x78\xd5\xd8\xbc\x7b\x91\x38\x2a\xc2\xa1\x41\x99\xcc\x30\xe1\xfd\x3c\x21\x88\x97\x10\xc1\xcf\xe9\x84\xc1\xd2\x70\x58\xff\x40\xf2\x3e\x7e\x1d\xe1\x6c\x61\x7f\x2c\xc0\x94\xd0\x94\x84\xac\xf3\x76\x2c\x0f\x1b\x3b\x8e\x1c\x6a\x7c\x22\xdc\xbf\x53\x08\x02\x7a\xa4\x2a\x20\x1a\xc8\x9e\x7a\x15\x88\x47\x95\x64\x8c\x5b\xa0\x2d\x4a\xa9\xd0\x22\x74\x6b\xbf\x52\xe2\x61\x38\x62\x1f\x34\x39\xe6\x6f\x4a\xe6\xa3\x2a\x89\x52\x95\x47\x98\x72\x0b\x66\x45\xd9\x24\x48\x13\x87\x58\x4a\x94\x73\xb1\x06\x60\x75\x0c\x54\xf3\xd3\xbc\x27\x19\x77\x30\x55\xe7\x5a\x3e\x59\xcd\xca\xce\x56\xcb\xf7\x2e\x8f\x5d\xad\x5a\x84\x20\x65\xff\x27\xbc\xe0\xf9\x8b\xf6\x15\x95\xc4\xa6\x90\xd9\x37\xe4\x31\xa1\x5d\xa9\x30\x79\x04\xd3\x33\x62\xb3\x70\x83\x13\x29\xd7\xbe\x2b\x42\x05\xea\x63\x82\x13\xe1\x03\xdb\x53\xda\xcb\x3c\x1b\xc3\x66\x10\xdc\xb3\x68\x08\xd8\x76\xee\xda\x89\xaf\xba\x6e\xba\xdd\x23\x5d\x9d\x23\x71\x2a\xef\x3a\x88\x32\x5b\x75\xe4\x7d\x1a\xfc\xe5\x41\x2f\xf8\x51\xef\x2e\x80\x6c\x71\xba\x80\x97\x90\xb0\xf5\x52\x9f\x3e\x4d\x2a\x21\x7e\x42\x8a\x19\x68\x61\xd2\x85\x41\x16\x09\x5a\x09\x94\x56\x65\x4d\x37\xa9\x78\x98\xe2\x04\x8d\x8f\xe8\x3a\x9c\xcc\x5b\x75\xfb\x4f\xae\x61\x4f\xdd\xa2\x90\x09\x0a\xe9\xa3\x4b\xb5\xfb\x3f\x73\x4d\xf0\x23\xfb\x46\xdf\x33\x2c\xc5\x41\x3c\x24\x4b\xa2\xa3\x5b\xc0\x16\x97\xdb\x80\x42\xca\x8d\xaf\xc9\x3a\x69\x11\x2c\x52\x1e\x9b\x56\xc6\xa4\x6a\x8d\x94\x4b\x48\x36\x83\xcc\x5e\x32\x19\x80\x11\x8c\xa0\xb9\x41\xe0\x3a\x58\x70\x67\x80\xbc\x9b\x45\x4a\xd2\x4e\x07\x2a\x93\x42\x5c\xe8\x89\x3c\xc6\xfd\x1b\xf5\x73\x25\xd9\x51\x82\x3e\xe4\x0a\x6c\x32\x68\x93\xe1\x00\x7e\xb5\xd5\x01\x3f\x37\x07\x87\x48\xd1\xc9\x36\xa6\x3b\x71\x24\x54\x9f\xc1\xee\x5d\x66\xfc\xbe\x7e\xdc\x3a\x9f\x0c\x78\x22\x20\xc1\x5a\x25\x98\xf3\x5c\x84\x85\x5d\xc4\x25\xce\xe5\xf4\x50\x5e\x45\x01\xb6\x7c\xfa\xd8\xfc\xd9\x65\x7e\x9b\xf9\xa0\x76\x1d\xa0\xf1\x04\xbe\x0e\x4a\x6c\x65\xe4\x91\x68\xc5\x94\x47\xa9\xb6\x44\x18\x17\x58\x17\xa4\x0d\x8d\x45\xa2\xd7\x2f\x07\x75\xde\x0a\x5b\x90\x33\xda\x16\x65\xdc\x15\xd3\xb9\x58\xcf\x5a\x84\x5a\x2f\x27\x0e\xcc\x70\xad\x32\x25\x90\xf2\x96\xc8\x65\xff\x46\xd4\x28\xc2\xc7\xc1\x34\x61\x5f\x06\x86\x5b\x2a\x7c\xbf\x56\xc9\x37\xc2\x01\x41\xe8\xa0\x1b\x6f\x64\xa7\xad\xa4\x4e\x02\xf7\xbd\xe8\x01\xa6\x88\x73\xfe\xbe\x6a\x4f\x0d\xda\x53\x83\xfe\xb5\xa0\xe6\xbc\x56\xa1\xe6\xbc\xcc\xdb\x2a\xc9\xff\x12\x18\x38\xe0\xe7\xdc\x08\xe5\x67\x86\x85\xea\xa1\xfa\x51\xe4\xba\xd1\x38\x25\x4d\xf6\x6e\x83\x9b\xf3\x76\x8f\xdd\x71\xa9\x7d\x1a\xee\x0f\xfa\x6a\xee\x52\x21\x28\x52\xe6\x02\x14\x85\x38\x55\x46\x65\x4a\x73\x07\x58\x13\x27\x42\xa5\x72\xc0\x84\x43\xc7\x4a\xf3\x21\xaf\xbf\xbd\x7a\xb3\xe8\x2f\x68\xf5\x66\x4b\xcd\x66\xf4\x8c\x8e\x04\xdb\xf9\xc8\x95\xec\xbb\x37\xf3\x59\x1d\x6e\x2e\xb5\xc2\x44\xf8\x6f\xbd\x32\x38\x02\x7f\xb9\x1e\x00\xe5\xca\x51\x4b\xe0\x16\x03\x73\xad\x7a\x2f\x66\x7b\x7b\x75\x17\x7b\xf5\x73\x76\xcc\xd9\x47\xbd\xdd\x1a\x2d\xac\xe8\x57\x95\x7c\x47\x8d\xdf\xf4\x68\x05\x9e\x35\xe5\x98\x90\x65\x7c\xc2\x63\x87\x77\xe6\x44\x25\x82\x04\xd6\xf0\xe3\x9e\x7d\x3c\x56\xa1\xb4\xc0\x87\xf9\x88\xaf\x96\xec\x7e\x23\x4c\xa3\x06\x32\x9a\x26\xfb\x7d\xc3\x74\x7e\xdb\x63\x47\x2f\x6d\x54\xc8\x60\xde\xe4\xd9\x76\x6d\x83\xc8\xf5\xd8\x8c\x08\xdc\x41\x0d\xa9\x54\xee\x15\x7d\xde\xad\xb4\xf2\x90\x77\xd7\xf6\x3c\xfe\xa8\x7f\xd8\xe6\xf1\x8a\x4a\x0c\x6b\x97\x64\x50\xb5\x53\xfd\xf9\x38\x9b\xbb\xec\xf2\x12\x4b\x18\xec\xa2\x7d\x71\xfe\xc5\xf1\xe0\xa6\xea\x45\x3a\x83\x43\xd7\x1f\x47\x4f\x35\x13\xb1\x16\xb6\x06\x34\x79\x17\xbd\x09\xad\x1b\x11\x4e\x9a\x73\x14\xfc\xca\x18\xbb\x95\x32\xb4\x8f\x07\x53\xe9\x26\x85\xcb\x28\x04\xc7\x64\x3e\xdb\xec\xf0\x0c\x33\x9f\xf0\xbf\x2f\x38\xea\xa4\x7e\xba\x59\xc6\x3b\x69\xee\xd9\x6c\xbc\x17\x96\x1d\xff\x99\xc1\x49\xb0\xbe\x54\xac\x07\xca\x6a\xc1\x43\x05\x4c\x33\x84\xcc\xb2\x4a\xa1\x64\xad\xb2\x92\xa2\xfd\x19\x8f\x8d\xf7\xb2\xbc\xf4\x3f\xe6\x29\x6c\xa2\xf7\x7a\x50\x85\xa9\xf2\x11\xea\x20\x55\x68\xda\xd9\x87\xf8\x8a\x7c\x7a\xc5\xd2\xec\x42\x2a\x07\x02\xad\xa4\x58\xbc\x70\xf2\x70\xe3\xf8\xf5\xd7\x1f\xbd\xbe\x8e\xd8\x13\x45\xbc\x2e\xa6\xdc\x24\xfa\x63\xc7\x8e\xc2\xe1\xad\x0a\xae\xb9\xf5\xd6\x5c\x5c\xa1\xcf\x8f\xb1\x59\x22\x35\x49\xc8\x8d\x70\x6d\x2d\x17\x6b\x50\xa5\xd2\x26\xb1\x0a\x28\xcc\x30\x91\xbd\x72\x6c\x57\x44\xe6\x50\xd4\xaf\xd5\xd8\x31\xa2\xa8\x7a\xf0\xdd\x9b\x95\xc2\xa3\x19\xb6\x57\x67\xd6\xa6\xa3\xe3\xc1\xd4\xfc\x4e\xe9\xc8\x6e\xe4\x37\xd4\x12\x7f\x40\x2f\xf1\xdb\xbd\xb9\xb6\x6d\xea\x04\xbb\x9e\x5c\x12\x4b\xe1\x8f\x55\xac\x44\x27\x43\x88\x76\x8d\x47\x60\x65\xcd\x6e\xb3\x32\xbb\x20\x00\x67\x51\x7f\xcd\xc5\x9a\xb5\x8a\x8f\xd2\xf2\x2d\x67\xe7\x45\x4a\xf8\x14\x50\x21\xf2\xd5\x13\xc1\x9d\xd5\x8b\xc6\x20\x4c\x12\x16\x84\x80\x40\x99\x81\x30\x0f\xbb\xa2\xa4\x62\x66\x21\x2f\xe5\x9b\x16\x94\xfa\x44\xd8\x8f\xc0\xa6\xe1\x5a\xa8\x1f\x38\xc0\xde\x38\xc6\xcc\x4d\xff\xe5\x63\xc1\x5f\xd6\x66\xd4\x4f\xad\x9c\xcb\x61\xa4\x91\x48\x23\xae\x1f\x55\xeb\x05\x5f\x6a\x02\xda\x77\x2b\xee\xc5\x54\x4b\x44\xf5\x00\x1c\x1e\x18\x5d\x5c\xb6\x41\x86\xef\x02\xbe\x00\x05\x2a\xaa\xc0\xe3\x18\xe3\x43\x4b\xab\xb8\xf7\xe8\x0f\xe1\xd1\x03\x28\x02\x1b\x71\xa1\x41\x25\xe8\x1c\x72\xfa\x83\x1d\x88\x8b\xa2\x4f\x01\xd8\xda\x62\x6e\x9a\x26\xd3\x91\xc6\xe1\x35\xcb\x22\xb4\xa1\x1c\xf2\xe0\x09\xda\x42\xbf\x48\x60\x38\xab\xfd\x12\x10\x4f\x10\x2a\xa7\x13\xaf\x75\x78\x24\xd6\x72\x81\xd4\x9c\x63\xb4\x0c\x62\x8c\x59\x71\x6e\xba\x99\xad\x2c\x73\xaf\x19\x63\xd7\xac\x66\xfd\x34\xd2\x81\x1c\xfe\x3f\xd5\xb6\xa9\x96\xec\x92\xd4\x6d\xce\xdb\x14\x06\xf2\x81\x9a\x7b\x19\x35\x04\x87\x1d\x54\xb0\x19\xf5\xa4\x6a\xfe\x08\xdd\x02\x61\x7e\xd9\xbd\x07\xa6\xf1\x55\x41\x3b\xa4\x8d\x19\xfd\x49\x96\xae\x29\x93\x3c\xbe\x49\xad\x43\x29\xd3\xa2\xc9\xe7\xcf\x2e\x9f\x3a\x01\x4d\x19\x7b\xce\xc1\x82\x13\xf1\x93\x39\x91\xb4\x0b\xf8\x0c\x01\x8c\x21\x69\xba\xc3\x21\x2f\x00\x1a\xd4\x9d\x65\x4e\xb3\xb2\xc9\x9f\x21\x44\x8f\x9f\xba\xd0\x8b\x29\xa4\x42\x61\xd7\x75\xc3\x24\xe1\x71\xdb\xe0\x22\xf4\xf2\xac\xdb\x93\x82\x91\xb2\x76\x35\xd9\x3f\x79\xec\xf1\xa2\xfa\xaa\xff\x35\x64\x49\xc7\x8f\x05\xbf\xed\x0d\x37\x1c\x3b\x25\x0d\xd0\xa0\xa8\xe5\x32\x18\x46\x5c\x0e\x4c\x41\x14\xaa\xd0\x6d\x66\x15\xa8\x37\x27\x51\x0b\xbd\x86\x7a\xbe\xe5\x06\x72\x6b\x63\x62\x6b\xfa\x13\x05\x00\x4b\x23\xa3\xc0\xa2\xc5\x65\xc6\x5b\x1d\xd1\x3a\x0f\x9f\x3b\x68\x06\x73\xd0\xc4\xcc\x81\x7a\xd9\xcb\x52\x28\x68\x63\x31\xb0\xaf\xef\x67\x37\x6c\x5a\xff\xcd\xae\x58\xb9\xa0\x39\x91\x39\x8a\x7e\x76\x7f\xf0\xc2\xff\x9f\xbd\x3f\x81\x8e\x24\xab\xee\x84\xf1\x13\x52\x2d\xaa\xd7\x0b\xdd\x01\x63\x7b\xc6\x78\x78\x13\x6d\xbb\x4a\x45\x66\xaa\x54\xd5\x1b\xd5\x4d\xd3\x6a\xa9\xaa\x5b\x50\x55\x2d\x24\x55\x35\x0d\x63\x50\x28\xe3\xa5\x32\x50\x64\x44\x12\x11\x29\x55\x82\x99\xe9\x61\x31\x86\xb1\x59\x3d\x78\x28\x0c\xc6\x36\x06\x1b\x63\x33\x98\xcd\x18\x83\x17\xdc\x36\x98\x01\xfc\xe7\xcf\x62\x1b\x9b\x61\x3c\x8c\x57\xbc\x8c\x61\xfc\xd9\xfd\x79\xf9\xce\xbb\xf7\xbe\x2d\x32\x53\x52\x95\xba\x1b\x8e\x8f\x38\x87\x2e\x65\xc4\x8b\xb7\xbf\xfb\xee\xfa\xbb\xdb\x94\x31\x94\x00\x6e\x6d\xb9\x13\x14\xd3\x0a\x1e\x44\xd5\x54\x9b\x3a\x2f\x83\xf2\x40\xce\x72\x73\x97\xd4\x2b\xc0\x22\x8d\x4b\x1e\x08\xc6\xc3\x98\xa5\x1f\x3c\xc0\x7e\xcd\xb3\x6f\xa9\xf7\x7a\xc1\xdb\xbc\xed\xef\x29\x2d\x31\xa3\x5e\xc4\x18\x7d\xaa\xd9\x6b\x29\xc9\x51\x84\x3e\x85\xd0\x33\x4a\xa5\x0a\x50\x4f\xba\xa5\x00\xb9\x6a\xf2\xb0\xeb\xa5\xb2\xc0\xf0\x4f\xc8\x9c\x14\x38\x7c\xd0\x97\x3c\xb6\x1f\x4a\xfa\x9f\xf5\x82\xdf\xf0\x20\x53\x80\xad\x4d\x88\xb5\x7c\x60\xfc\x5f\x87\xcd\x5d\x15\x94\xc5\xf5\xe0\xaa\x76\x81\x1f\x21\x8f\x3d\x30\x45\x59\x83\x69\xf0\xd3\x30\x01\xb4\x6e\xe4\x22\x89\xdb\xb6\x9a\x56\x17\xed\x6c\xb0\x29\x0a\xe5\x70\xc9\xd7\xc2\xd2\xf5\xb0\xfa\x3d\x8f\x4d\x28\x53\xaa\xff\x49\x2f\xf8\x65\x6d\x49\x75\x72\x67\x93\x97\xd6\x96\x0b\x34\xdf\xe2\xf6\xb7\x26\x87\x83\xe6\x25\x8c\xeb\x2f\xa8\x51\xd4\xaa\x28\x3f\x5e\x29\xda\x68\xcb\x28\x42\x9c\xc1\x35\xa1\xa0\xb7\xf2\xa8\xde\x0d\xf3\xb2\x8f\x39\xe2\x6a\x4e\x6b\xf9\x30\xa0\xa1\x59\xd2\xdc\xdc\x16\x34\x94\xde\xc6\x76\xaa\x1d\x3d\x96\x4a\x25\xc0\xa2\xdd\x16\x34\x86\xb1\x68\x3b\xab\xe4\x3f\xf9\xec\xdb\x86\xa5\xf0\xc9\xb3\x55\xe1\x7f\xe5\xfa\xe0\x01\x0f\xfe\x74\x0c\xee\x6d\x11\x26\x65\x5b\xd1\x16\x80\xdc\x32\x00\x41\xfa\x1e\xb5\x62\x41\x40\xc1\x5f\x8a\xbc\x13\xa7\x42\x83\x17\xc5\xc8\xb2\x27\xf1\x06\x40\x51\xe5\x22\x8c\xc0\x14\x90\x8b\xa6\x90\xcf\x28\x0b\xad\xab\xf6\xfa\xcc\x75\xec\x35\x1e\xdb\x27\x2e\x8a\xa6\xff\xfd\x1e\x7b\xe2\xb6\xce\x91\xa7\x2e\x8a\xe6\x0c\xa8\x09\x83\xfb\xef\x4d\x85\x49\xac\x63\x19\xc4\x11\x84\x06\x5c\x1e\x35\xdf\x67\xb9\x27\xca\x2a\x2a\x78\xa7\x64\x66\x97\x52\x73\xb8\x2e\x1a\xec\xed\x1e\xbb\x8e\x30\xbd\x96\xdb\xb9\x28\x24\xdf\xed\xbf\x56\xb3\xaf\x2f\x3c\x1b\xa7\x71\xa7\xd7\x01\x98\x62\xd1\xec\x81\x53\x25\x95\x2f\xf4\x0e\xee\xc2\x44\xe3\x7c\x5a\x78\xc6\xb2\x9c\x9c\x57\x00\x52\x6d\x87\x1b\xd0\xcf\x5e\xb3\x29\x44\x24\x7b\x67\xcb\x1d\x27\x1a\x5c\xb5\xb4\xa1\x82\xa8\xa6\x5d\xc2\xbe\xce\x0e\x4a\xf1\xfc\x6e\x51\xfa\x2b\x5b\xe2\x84\xe0\xf4\xdd\xb3\xbc\xbc\x70\xb7\x28\x69\x06\xd5\xcf\xca\x6c\xc8\x0a\x6d\xd8\x3c\xa5\x83\x65\x2f\x64\xd7\x74\x45\x1e\x67\x91\xba\x3e\x13\x35\x23\xf7\xdd\x93\x6d\xf2\xac\x55\x8a\x94\x1f\x89\x53\x85\xb8\x3a\x69\x7d\x6d\xa6\xc4\x61\xe0\xa7\x2d\xd4\xd6\xed\xc6\xfa\x87\xde\x00\xa2\xed\xa7\xf5\x9a\x7c\xd0\x33\xb9\xe8\xa9\x46\x9a\x63\x23\x29\xd2\x8a\xc4\x1d\xc9\x96\xf6\x4a\x77\xb2\xa7\xe9\xb3\xa1\xfd\xb8\x5c\x50\x04\x79\xbb\x26\x59\x18\x15\x53\xdd\x0c\xff\x53\x4f\xe2\x96\x68\xf6\x9b\x89\xb8\x41\x1f\xa4\x3a\x74\xa8\xa8\x0e\xf2\xb1\xb0\x82\x61\x32\x27\x92\xb0\xaf\x46\xfa\x5b\x7a\xa4\xef\x1d\x39\xd2\xb2\x6d\x47\x6c\x01\x6c\x60\x19\xa2\x57\x37\xc6\x6f\xc8\xc3\x09\xb6\x3e\x6c\x18\x04\x04\x6c\x0c\xc4\xe5\x47\x6d\x90\x9f\xf2\xd8\x75\xb0\xe7\x8b\xc2\x9c\xaf\x0f\xea\x11\xfe\x84\x37\xec\x80\xd1\x07\xdb\x9f\x30\x2a\xd8\xea\x25\xee\x29\xc3\x93\x57\x59\x75\xe3\x15\x3f\x0d\xf5\xea\x29\x02\xac\x47\x39\x7d\xf2\x8a\xd8\x6e\x6b\xbe\xc6\x63\x87\xca\x66\x77\x29\x6b\xae\x8b\xd2\x7f\xb1\xb7\x83\xac\x6a\xcb\xb3\x0b\x58\x9c\x0e\xe3\xbd\xfa\x81\x8d\x9b\x9b\x2a\xda\x14\xa7\x1b\x59\xb2\x81\x81\x4c\xcb\xb3\x0b\x20\xc0\x36\xe0\x2f\x04\x74\x94\xb7\x60\x5f\x58\xb8\xbe\xec\x6f\xf7\xb3\xef\x40\x22\x0d\x5c\xe2\xdd\x79\xd8\x14\x0b\xce\xf1\xfd\xec\x7e\xc5\xfd\x7e\x64\xbf\x72\x2f\x37\x0c\xa8\x39\xc7\x24\x9f\x47\x86\x0f\x55\xf5\xca\x5b\x3b\x6c\x0a\x84\x04\xec\x75\xb3\x94\xd6\x84\x88\x21\x32\xc3\x50\x84\x23\xe5\x50\xb7\xda\xb0\x56\xcc\x26\x56\xb1\x4a\x26\x68\x4d\x39\xff\x64\x11\xba\x49\x21\x44\xa5\x35\x3c\x5e\xc4\x6b\xb2\xfb\x3a\x6b\x5b\xdc\x11\x26\x0e\xc4\x54\x28\xbf\x6e\x65\x79\x33\x5e\x4d\xfa\xbc\x1d\x26\x26\xee\x8e\xaf\x03\xaa\x19\x54\xd3\xe0\x4b\x80\x6e\x16\x17\xb4\xe4\x52\x0c\x82\xce\x85\x58\x21\xc2\x37\x8b\x88\x37\x13\x11\xa6\xbd\x2e\xb6\x27\x37\x50\x3f\xeb\xe5\xaa\x3d\x93\x08\x4c\x6f\x9c\x34\x4e\x94\xbe\x23\x3a\x5c\xf0\x2d\x17\x48\x8b\x6c\x92\x6f\x6e\xf0\x7b\x95\xdc\x5c\xb3\xeb\xcc\x36\x44\x9e\x03\xee\x60\xa9\x22\xb9\x8c\x62\x81\x0c\x6f\x72\xda\xe4\xa6\x6a\xf0\x0b\x0e\x82\x76\x9a\xa5\xf5\x54\xac\x85\x14\x15\x00\x7b\xb9\x61\x22\xc2\xf8\xf3\x45\x9e\x59\xa6\xba\xa2\xcc\xba\x3c\xee\x74\x44\x24\x09\x46\xd2\x07\x67\x66\x59\xbf\x35\x73\xfc\x48\x9a\xf1\x0c\xf6\x60\x0f\x73\x02\x66\xbc\x68\x83\x17\xdf\x66\xaa\xf2\x65\xa1\x6e\x2f\x4c\xba\xed\x90\x38\x6d\xb9\x6c\xc4\x5a\x51\x44\x8e\x5c\x75\xe0\x55\x96\x87\x4e\x51\x95\xd3\xb4\x0e\xe3\xef\x4f\xb0\x9a\x75\xe0\x8a\x32\xcb\xc3\x35\x38\x73\x18\xa7\x31\x03\xd1\x36\x1d\x91\x52\xca\x0f\xff\x67\x26\x82\xd9\xe1\xaf\x74\xce\x4f\x6d\x6a\x08\x79\xb5\xa4\xa5\xa7\x99\x50\x81\x3c\x6e\x2c\xfc\x41\xf6\x47\x1e\xbb\x0a\xdf\x9d\xca\xf3\x2c\xf7\x3f\xef\xb1\xa9\x91\x14\x62\xa0\xc3\xf0\x4d\xf0\xe3\x80\xd5\x99\x84\x45\x89\xbe\x2e\x5c\xa4\xcd\xac\x97\x96\x40\xf3\xc8\xd5\x05\xdb\x30\x50\xff\x35\x15\x28\x63\x4b\x35\xb0\xfa\x4a\xd8\x97\x22\x01\xed\x12\x50\xf3\xf4\x15\xc2\xbe\x32\x30\x0e\xa9\xb2\x21\x1a\x74\x08\xd0\xae\x50\xa7\x61\xe7\x0d\xf6\x6e\xc9\xe6\xd3\x24\xf8\x6f\xf5\x82\xff\xe2\xcd\x3b\x96\x5e\x8a\x86\x92\xe2\x90\xa6\xd2\x49\x9f\xab\x4f\x1e\xad\x7e\x3a\x31\x3a\x1f\x1d\x63\x7e\xa8\x97\xf3\xac\x32\x23\xbe\x73\x2c\x78\xe5\xd8\x79\x49\xd3\xec\x0b\x05\xca\x55\x9d\x0a\xbb\x59\xb7\x97\xb8\x21\xbc\xb6\x35\x5e\xfb\x3e\xd1\x00\xaa\x5d\xa5\x54\x95\x74\x28\x29\xf4\x25\x4e\xe5\xc9\xe9\xad\x16\x72\x77\xa5\x25\xbf\x2f\x8c\xcb\xd3\x59\x3e\x43\xdf\xe6\x1c\xc2\x8d\x10\x0d\xfd\xd1\x9a\xb6\xad\xc2\xad\xe4\x16\x8f\xc4\x23\xbf\xc5\xb1\x8d\x87\x75\x8b\x0f\xa9\x72\x8b\x2d\xfe\xf2\x7d\xec\xf0\x30\xd6\x3a\x2b\xca\x85\xb0\x6c\x3b\x01\x82\x7f\x30\x1e\x6c\x2c\xda\x3e\x01\x10\x57\x07\xc6\x92\x4e\xd8\xed\xaa\x65\x0e\x25\x85\x6e\xf0\x7b\xf4\x4b\x3c\x26\x85\xce\x63\x89\x97\x3a\xcf\x36\x53\x91\x17\xed\xb8\xcb\x3b\x61\x1a\xae\x51\xee\xe7\x9c\x2f\x9d\x3a\x13\xa7\xbd\x8b\x3c\x17\x80\x31\x19\xa7\x6b\x8d\x4b\x1e\xd8\x6c\x1c\x42\xf4\xe9\x31\xf6\xa0\x47\xb6\x9c\x5f\xf2\x82\x9f\xf3\x64\x7f\x75\xb0\x54\x9c\x03\xb0\x44\x5f\x29\xe1\x65\x57\xb5\x97\xbc\xb6\xfb\xf0\xa2\xdf\x49\xe2\x74\x1d\xd0\x5e\x29\x34\x4e\x8a\x5b\xa4\xfd\x4d\xd7\x95\xed\x31\x17\x61\x02\x9f\x5d\x36\x5f\x49\x9b\x63\x8a\x26\xe1\x06\xd9\x11\x3d\x16\x92\x73\x5f\xe2\x51\x24\xeb\xf7\x06\xd9\xb2\x14\xb3\xe5\xd5\xab\x96\x80\x48\xb4\xc3\xe8\x05\xc1\x23\xd1\x8d\xdf\xb8\xa6\x02\xc1\x89\x7b\x61\x7e\x69\x76\x69\xde\xd9\x08\x6f\xba\x26\x78\xad\x67\xef\x84\x94\x43\x21\x1e\xc5\xc5\x7a\x83\xfe\x56\xab\xae\x63\x1a\x00\x5c\x5d\xee\xff\x88\x87\x05\x37\x31\x8a\x5c\xf6\xb1\xfa\xd5\x96\x9b\x44\xde\xaf\x43\x77\xc9\xd5\xa8\xd5\x5e\xc8\xf2\x32\x4c\x2e\x79\xe3\xf1\xf3\xd2\x4b\xde\x78\xd2\x4b\x9d\x9d\xf3\xa5\xab\x58\xca\xae\x8d\x8b\x66\x11\xcf\xab\x54\xf7\xfe\xbf\x0f\xee\x8d\xa1\x07\xfa\x11\x42\x45\x03\x35\xeb\x15\xc8\xbe\x62\x09\x88\x44\x47\x96\xd5\x5e\x94\xc3\xa4\x7e\x3a\xcc\x8f\x94\xcd\xee\xa4\xa3\xc9\xb8\x91\xc9\x5e\xf8\x75\x25\x14\x7c\x1b\xd6\xb4\x8c\x4a\xf8\x33\xbd\x94\xcc\x37\xee\xdd\xff\x1f\xed\x70\xcb\xfc\x4a\xa3\x2d\x9f\x34\x7b\xcf\xcc\x02\xc5\x59\xc2\xce\xa2\x51\x90\x01\x00\xfc\x97\x40\x6a\x42\x60\x18\x4b\x73\xcf\x8e\x31\x39\x85\xfe\x64\xf0\x78\xea\x29\x7e\xfa\x74\x05\x97\x0d\x53\xe4\x0c\xf4\x3d\x1e\x3b\xd8\x85\xe9\x2f\xfc\xb7\x7b\xc1\x2b\x3c\x67\xa0\xb8\x30\xfc\x4c\xac\x54\xcb\x58\x14\x02\x97\xc9\xf3\x3c\xe5\xf3\x0b\x92\x0a\xc4\xdd\xe7\x84\x51\x94\x9f\x44\xf3\x69\xcb\x58\xd5\xe2\x42\xeb\xb6\xc2\x54\x07\xe9\x1e\x29\xfb\xdd\x58\x5e\x1f\x7d\x2d\x50\x14\xfc\xd6\x9b\x8f\x61\x36\x95\xe3\x37\x1f\x9b\xdc\xca\xc0\xb1\x6e\x05\xd7\x3e\x27\x78\xfa\xee\x42\x66\x07\x23\x72\xdd\x1b\xfa\xad\x1e\x73\xf6\xa9\xff\x3a\x2f\x78\xe9\xb0\x79\xc2\x29\x5a\x78\x54\xa7\xc8\xcc\xc9\x05\x76\x7d\xb3\x1d\x76\x67\x7a\x65\x7b\x2e\x2e\x20\x7d\x51\xdf\x9f\x09\x4e\x28\x15\x99\x3a\xa1\xd8\x71\x5d\x84\xc3\x6e\x73\xf7\x91\x33\xfc\x45\xf6\x18\x55\xef\x12\xa2\x26\xfb\x4f\x09\xa6\x87\xd7\x4a\x05\xb6\xad\xf3\x7d\x63\x3a\x6a\xfc\x1d\x63\xc1\x8f\x8c\x55\xa3\xc6\x55\x1a\x19\xa4\xa4\x70\xa4\x6d\xd8\x69\x8a\x28\x5f\x8e\xbb\x27\xf9\xa9\xb4\xe8\xe5\xc2\x98\x9b\xaa\xc1\xe5\x71\x71\x19\xf1\xe5\x08\x97\x7c\x72\x54\x94\xf9\x7c\xa7\x9b\xc4\xcd\xb8\x4c\xc8\x8f\x2d\xd7\x59\x02\xf0\x03\xb9\xa8\xbd\xd4\xd2\xf6\xed\x92\xe8\x03\xc9\xb3\xd7\xf8\xb3\x1e\xbb\x46\x1f\x7e\x00\x92\xfa\x35\x2f\x78\xaf\x37\xdb\x2b\xca\xac\xc3\x15\x3d\x54\xc4\x01\x0e\x3b\x07\xe8\x6e\xeb\x13\x27\x5e\x1a\x59\x46\x97\xb4\xf2\x22\xee\xf4\x92\x32\x4c\x45\xd6\x2b\x92\x7e\x8d\xa7\x62\x93\xea\x8e\x75\x99\xdb\x89\x1c\x21\x41\xb8\xe3\xe4\xed\xb4\x56\x69\xd8\x11\x77\x18\x6f\x08\xc8\xec\x16\x69\x9d\x49\x33\x4b\x53\x4c\x55\xea\x6c\xdd\xd7\x0f\x67\x6a\xe4\x49\xda\xcc\x72\x17\xf5\xe0\xcf\xc6\x83\xdb\x86\xbd\x70\xbd\x1f\x55\x89\x2a\x00\x82\x14\x93\xf0\xd1\xfc\x9c\x73\xc7\xbc\x6e\x9c\x7d\xd2\x40\x19\xc0\xac\x9e\x5e\x82\xbb\xbd\x12\x34\x5d\xdd\x5f\x6a\x3b\x3e\x12\xf8\x06\x97\xbf\xe3\xec\x59\xbd\x68\x11\xc9\x24\x78\xf6\xa3\x88\x3b\x70\x37\xd3\x93\xec\xdf\x16\x10\x93\x3d\x3f\xc7\x7b\x69\xfc\xbc\x9e\x14\xdd\x6d\x83\x5d\x75\xad\xec\x21\xbc\xf3\x2a\x76\xe7\x0e\xb2\x1a\x35\xb6\x4a\x77\xe6\xff\x31\x0b\x5e\xe4\x6d\x99\x10\xcd\xc5\xe7\x72\xd3\x48\x65\x2d\x0a\xa7\xae\x26\xea\xc3\x68\x1c\xc8\x85\x82\x26\x07\x9d\x66\xb1\x13\xf6\x79\xb3\x1d\xa6\x6b\x76\xc6\xc0\x8a\xe9\x61\x62\xcf\xe3\x76\xcf\xe3\xd6\xf1\xb8\xfd\x27\x8f\x4d\xd0\xde\x2a\xfc\xff\xe3\x05\xf3\x3a\x5d\x8e\x13\x0a\xb6\x69\xa7\x13\x04\x19\x1a\xd2\xc7\x38\xf1\x97\x6e\x76\x54\x87\x91\x3a\xc5\x66\xb7\x02\x45\xda\xe1\x01\x7b\xc8\x3b\xcc\xbe\xdd\x71\x96\xec\x86\x65\xb3\x5d\x87\x4f\xc4\x5a\xdf\x9f\xf0\x0f\x74\x44\xbe\x26\x98\x2c\xf9\xf8\x21\x25\xe1\x6d\x7d\x5d\xf4\xfd\x83\xfe\x7e\x79\x69\xb0\x3d\x0f\xd6\x5d\x7b\xb0\xfe\x87\xed\x3d\x58\x9f\xe5\xdf\x6f\x3c\x58\x87\xae\x76\x25\x5a\x61\x2b\xc2\x59\xf5\x74\xfd\xa5\x03\x6c\x72\x74\x7e\x00\x8c\xdd\x00\xe8\x76\xf0\x52\x7b\xe5\x81\xe0\x88\xfe\x45\xb4\xaf\x9a\x90\x39\x44\xa8\x77\x97\x78\x7e\x6e\x3f\x13\xec\xda\x76\x96\x44\x22\x9f\x47\xff\xb0\xbe\xbf\x14\x9c\x76\x9f\xb8\x11\xd0\xb1\x7a\x4a\xac\x25\x96\xc5\x26\x94\x4b\x77\x62\x9a\x22\x3a\xf3\x31\x8f\x3d\x0e\x9e\xce\xf5\x5c\xd7\x9d\x77\x69\x73\xd1\x1b\xbc\x61\x05\x70\x38\xda\xce\xa0\x12\xd5\x46\xe0\x7b\x54\x50\x30\x06\x7c\xa8\x73\x4d\x6d\x86\x31\x30\xb7\x78\xe9\x86\x4d\x50\x47\x03\x0e\x88\xd2\x5a\x77\x44\x08\xbc\xae\xb2\x8a\x83\xd6\x1f\x52\x7b\x48\xd2\xa0\x52\x45\x2f\x8a\x54\x6c\x2e\xc7\x9d\x8a\x5e\x3a\x67\xd7\x41\x7b\xcb\x52\x18\x26\xec\x96\x67\xab\x41\xcc\x57\x5f\x69\xfb\xbf\x36\xf6\x95\xd6\x4b\x98\x35\xec\xbd\x72\x51\xc3\xf9\xac\xa4\xb9\x7c\x0b\x44\xd8\x52\x77\xfc\xd7\x7a\xec\xf6\x2b\xa0\xd3\x67\xe3\x66\x9e\xc9\x0a\x82\x45\x5d\x17\x4e\xaf\x6b\x65\x51\xab\x68\xaf\x2c\xf6\xb1\x1d\x16\x38\x47\x74\x87\xe1\xd9\x86\xc5\x66\x2f\xf7\xd8\x55\x34\xd7\xd0\xc9\xe7\xef\xb2\x8f\x27\xad\xca\xb6\xea\x25\xf6\x6c\x33\x2c\xd4\x4a\x47\x0d\xf6\x0b\xfb\x1c\xd3\x41\xd8\xed\xa2\x0b\x78\x19\x96\xa2\xd5\x4b\x96\x44\x79\x1e\x06\xb0\xa4\xa8\xec\xcb\xf6\x05\x9f\xf6\x46\xbe\xae\x86\x39\xa9\xc7\x5a\x54\xb2\xbe\xe4\x04\xae\x93\xa8\x44\xa0\xbd\x42\xd8\xf6\x73\x9c\x39\x8c\x1e\xa5\x24\x79\x05\x62\x04\x44\x88\xb4\x03\xea\x36\xed\x47\x9a\x8a\xa6\x28\x8a\x10\x93\x89\xda\x36\x78\xac\x47\xcb\x03\xaa\x87\x91\xee\x9d\x7b\xd2\xdf\x36\xc6\x3e\xe8\xb1\x6b\x64\xc7\xe2\x74\x0d\x87\xe7\xbf\xdd\xdb\xea\x06\xa3\x49\x5b\xb4\x3f\xb1\x06\xaa\x26\x27\x58\x75\x4a\xd8\x80\xb6\x76\xb2\x4b\x6b\x4c\xb0\x84\xcb\x24\x52\x6e\x57\xbd\x2c\xd7\x60\x6d\x52\x13\xae\x04\x28\x4a\xb8\x0b\x62\x4b\xba\x23\x17\xd1\xf8\x2b\x54\x5b\x75\x08\xd5\xcf\x5f\xc7\x9e\x6c\x53\x5e\x91\x97\x48\x47\x45\xa1\x29\xef\xac\x79\xb8\x14\xaf\xa5\x71\xba\x66\xfb\x0c\xff\xf5\x63\x82\x8f\x78\x48\x69\x2c\xe3\x81\xfc\xd9\xe9\xf4\x30\xdb\x9f\x31\x91\xea\x74\x8f\x85\x12\xf1\x1a\x1c\xc4\x03\x94\x15\xf0\xa5\xbc\x26\xcf\x17\xe1\x9a\x20\xbd\xb8\x76\xc4\x83\x80\xbb\x14\xbf\x04\x85\x37\x2a\x3f\xa8\x50\x98\x0b\x1e\x89\x3c\xde\x40\x79\xc9\x42\x47\x84\x98\x61\xcd\x90\xea\xc4\x8f\xab\x7d\xcc\xac\x0d\x88\x46\xd8\xb6\xb3\x89\x5e\x7f\x2d\x7b\xeb\x7e\x76\xa0\x07\x5d\xf1\xdf\xb0\x3f\xf8\xfc\x3e\x42\x07\xa0\xde\x59\x16\x6f\x15\x0c\x08\x85\xe1\x50\x88\x8b\x24\xf8\xad\x8b\xbe\x96\x6b\xb5\x93\xaa\x64\x2a\x84\xb9\xe2\xcb\x2c\x4b\x8a\x46\x2c\xca\x56\x23\xcb\xd7\xa6\xda\x65\x27\x99\xca\x5b\xcd\x9b\x8e\xdf\x7a\xec\x86\x02\x05\xdf\xfa\x8d\x8d\xe3\x8d\xe9\xc6\x09\xcc\x03\x75\xf9\x1f\x4e\x1f\x67\x90\xff\xd9\xce\xbd\xc8\x78\x50\xe0\x8a\x06\x35\xc6\x83\x28\x5e\x8b\xcb\x90\x6c\xa2\x92\x83\x80\xa7\x30\x98\x14\x72\x58\x77\xe2\x52\x25\xb6\x0a\xe4\xb0\x44\xda\x8c\xbb\x6d\x91\x3b\x0f\xc3\xb5\x5c\x08\xfd\x44\xf2\xa8\x83\xe5\xe4\x3e\x83\x66\xf0\x57\x9e\x98\x1f\xaa\x2c\xe8\x92\xb1\x0a\x51\x7d\x12\xa6\xf8\xaf\x0a\xcd\xec\x95\x6d\xac\x08\x25\x12\xf3\x3b\x8b\x04\xb7\x47\x28\x3a\x61\x9c\xf0\x6e\x9e\x95\x38\x37\x58\xcb\x54\x47\x52\x60\xf9\x67\xdc\x2d\x44\x13\x42\xe2\x51\xb6\xb6\x1e\x96\xbd\x34\xc5\x6c\x69\xf4\x40\xee\x1d\xf8\x09\xae\x39\x65\xd8\xe9\xaa\x56\xb2\x66\xd1\x75\x9a\xed\x48\x32\x5f\x64\xad\x92\x17\x6b\x4d\x78\x92\x8a\xb2\x68\x86\x52\xc6\x5f\x6b\x06\xa3\x15\x96\x0f\x79\x37\x8c\x44\x17\x3b\xe4\x1f\x0c\xcb\xac\x13\x37\x19\x8b\xd8\x84\xec\x0d\x38\xe2\x3d\x23\x78\xda\xfc\x56\x31\xb5\x52\x20\x94\x85\x61\x07\xe2\x5f\xb2\xbc\xa5\xa2\x69\x41\x56\xc0\x32\x8c\x93\xa2\x51\xc1\xbd\xd8\x2f\x2e\x96\x79\xe8\xbf\x30\x78\x3a\x02\x1c\x6e\x19\xbd\x7b\x79\x2d\xbd\xc3\x7b\x3c\xfb\x37\xa3\x67\xc2\x36\xda\xbd\xd9\x63\x07\x10\x7d\xc6\x7f\x9d\x17\x3c\x9d\xbc\x1c\x1f\xce\x41\xef\x72\x3d\x9a\x4c\x11\x14\xff\x19\xfe\xbe\xd5\x7e\x29\x82\x27\xdc\x15\x16\xe2\xe6\x1b\xeb\x22\x95\x9b\x32\xe2\x0b\x4f\x9b\x5d\xba\x61\xfa\x18\x9f\x5d\x5a\xe4\xf2\x8c\x5c\x7e\x23\xaf\xdd\xc7\x98\xdc\x64\x02\x15\x7a\x2f\xde\x17\xfc\xdf\xf1\x45\xed\xf4\x8d\x6f\x2c\xc7\x54\xf2\xf3\x9e\xa7\x74\x20\x3a\x13\x26\xba\x6a\x92\xa7\x8a\x9c\xc2\x93\x7c\x05\xdc\x71\xeb\xed\xac\x28\xe5\x4b\x29\x78\xc8\x7f\x57\x40\x27\x08\x18\x2a\xc6\x8e\xb6\xaa\xf1\x33\x45\x74\x92\xf1\x69\xd4\x1b\x96\x87\x65\x1b\x72\x04\x89\xe4\x11\xf0\x58\x5a\x97\x4b\x4d\x39\x5e\x16\xd0\xcf\x88\x61\x0a\x3d\x47\xc5\x89\x81\x48\x2a\xd4\xad\x8e\x95\xd4\xa9\xce\xa0\xc1\xf8\xf1\x61\x6d\x41\x10\x4e\xba\x76\x25\x8d\x25\xa2\xac\xd3\xe7\xb2\xfa\x13\x8e\xef\x8a\x5b\x45\xf5\x73\x8c\x62\xaa\xf7\x30\xd3\x6a\xd0\x60\x73\x52\x2e\x92\xe2\x19\xc9\x23\x18\x97\x21\x57\x03\x17\xa6\xe0\xed\xb0\xdb\x15\x29\xb8\xf4\xc9\x02\xab\x61\x1a\x35\xf8\xfd\x59\x0f\x2e\x3b\x4c\xb7\x87\x36\x4d\x6d\x1e\x46\x98\xd5\x15\x70\x89\x31\x0b\xbf\xe2\x9c\xd1\x26\x1b\xef\xc5\x91\xff\xef\x83\x7b\xcf\xcf\xcf\x3d\xbc\x67\xc2\x34\xf2\x7f\xf7\x0d\x05\x9a\x45\x75\xdd\x42\x9e\x3d\x17\x89\xab\xff\xc9\x7d\xc1\x3d\xe6\x27\xf9\x0b\xa0\x4f\x77\x17\x1f\x8b\x88\x87\x10\xab\x01\xea\xe4\xcc\xb6\x0c\x48\x99\x98\xd4\xf8\xfd\x6e\x25\x84\xe8\xeb\xe3\xec\xb5\x1e\x7b\x2c\x05\x5c\xcd\x34\xc1\xdc\x0e\xb1\x1b\xfe\x0b\xd8\x1d\x3b\x48\xdb\x34\xf0\x9d\xe9\x66\xf0\xa4\xe1\xb3\x36\xa4\x31\x38\xb8\xc0\xa9\xe2\xd7\xac\x64\x87\x50\x09\x78\x36\xec\xfa\x6b\xec\xc6\x6d\x7b\x32\xab\x4a\x5b\xed\x4f\x0d\x6f\x5f\x57\x3c\xd0\xea\xf3\xd9\x55\x51\xb6\x99\x6e\x86\x79\x34\xb3\x30\xef\xaf\xb3\x9b\xb7\x6d\x77\xce\x94\xb7\x5a\x9e\x1e\xde\xb2\x55\xf9\x40\xdb\xeb\xec\x00\x5a\x2e\xfd\x70\x8b\x70\x6f\x37\x5f\x96\xd5\x62\x6d\xd4\x5c\x83\x0d\xb3\xda\xd8\x07\xaf\x66\xa7\x76\x9c\xab\x7e\x4b\xd5\xee\x5f\x5d\x15\xfc\xf3\x37\x81\x6a\x97\xcf\x89\x6e\x2e\x50\x90\x89\x53\xbe\x31\xdd\x98\xbe\xb9\xc6\xbb\x49\x98\xa6\x64\xfc\xc8\x45\x27\xdb\x08\x13\xf5\xf6\x49\x0d\x7e\xbe\x10\x5b\x2a\x60\xa6\x36\xa6\xb7\xd4\xbd\xa8\x7c\xac\xae\xc0\xf4\x85\x3d\xd8\x9a\x3d\xd8\x9a\x3d\xc5\xfc\xc3\xaa\x98\x7f\xf1\x98\xa5\x98\xff\xdb\x47\x4a\x31\xff\x54\x76\x0f\x3b\x7d\xd9\x8a\xf9\xa1\x34\x72\xe7\x3a\xf7\xcb\xd1\xe3\xbf\xd8\xdb\x5e\xbb\xbc\xe2\x3f\xfb\x91\xd2\x2e\x13\x22\xce\xc7\xae\x66\x77\xef\x78\x5e\x2e\x60\xb0\xe7\xa8\xdb\xe3\x1f\xae\x0a\xbe\x7f\x6c\xeb\x32\x0f\xf7\xfd\x41\xbb\x58\xb2\x49\xf2\x92\x84\xab\x04\xcc\xa9\x8f\xdc\x25\xb2\xcd\x00\x87\x5e\x23\xbf\xbb\x77\x8d\xec\x5d\x23\x7b\xd7\xc8\xc3\x7a\x8d\xbc\xd4\xbe\x46\xfe\xee\x91\xba\x46\xce\xb0\xa7\xb2\x7b\xae\xf0\x1a\x19\xa0\x14\x8f\x84\x99\xf7\x21\xef\xa5\x3b\xb8\x48\x56\xfd\x95\xfa\x20\xf5\xe7\x97\x75\xb7\x6c\x4d\xf8\x18\xfb\xe9\x43\x4e\x5a\x37\xe3\x15\x4e\x20\x73\xf8\xdb\xe4\x64\xfd\xfb\x89\xe0\xc6\xea\x43\x5c\xbb\x66\x96\x24\x24\x1c\x67\x2d\x4e\x15\xf1\xa6\x2c\x24\x46\x24\x65\xfd\x95\x83\xec\xb7\xec\x4d\xfe\xe1\x5d\xa6\xaa\xfc\x0f\xc3\x53\x55\x3e\x5a\xdb\x7b\xef\xba\xd8\xed\x75\x51\xaa\x0c\xb3\xeb\xc1\x0d\x3a\xc3\xac\x0d\x6a\x62\x6f\x3d\xf9\xa9\x39\xf3\x4f\x62\xb7\x6c\xe1\x6b\xbc\xd5\xbe\xde\xbb\xa4\xae\xfc\x92\x7a\xc8\x8b\xb7\xa7\x63\xa7\xfd\x39\xcd\x10\xab\x85\xa8\xc2\x41\x56\x48\xca\x50\xb6\xf7\xbd\x8c\xdd\x75\xf9\xc0\x4e\xb8\xe0\x33\x0b\xf3\xa4\x29\xf3\xff\xe2\x50\xf0\x2c\xf3\xb3\x02\x18\x89\x46\x18\x74\x4d\x00\x40\xf5\x66\x2f\x09\x73\x0e\x6a\x79\x3a\xc7\x0d\xf4\xb1\x57\xc1\x42\x01\x75\xb4\x01\x23\xac\xa4\x8a\x79\xdd\x1e\x0f\xb9\xc7\x43\xfe\x4b\x41\xd0\x7d\xbb\x82\xc3\x7c\xb3\xc7\x96\x87\x56\xb5\x8b\x63\x09\x88\x99\xa7\x5d\x90\x2e\x5b\xa1\x0a\x91\xe1\x80\x25\x04\xe9\xd9\x22\xcb\x43\x21\x56\x3a\x77\x75\x7e\xd9\x8f\x18\x90\xcb\xd7\x78\xec\xc2\xc3\xde\x57\xc4\xc1\xbc\x85\x22\x54\x75\x7f\x95\xc5\x7e\x50\x11\x4c\x88\x3c\xd8\xbd\x87\xbc\xf5\xed\x89\xe6\x3d\xfe\x69\xa3\x45\xa8\xf4\xa8\x42\x3c\x4d\xc7\x86\x92\xcd\x77\xef\xdb\x26\xb7\xf5\x40\x1e\x55\x74\x32\xff\xfb\xf1\xe0\xb7\xbd\x61\x6f\x2c\x17\x8e\x76\xb6\x09\xe1\xc5\x00\xbd\x2b\x47\x7b\xd9\x09\xad\x1f\xe5\xcc\xa7\x97\xbc\x03\xd8\xc3\x4b\xde\x01\xf4\xde\x77\xe8\xf5\x4b\xc7\xd8\x8b\x3d\x46\x45\xfc\xfe\x65\xa6\x15\xc5\x49\x9a\xd7\x90\x6c\xc1\xad\x34\x1b\x96\xab\xb7\x05\x63\x46\x2f\x57\xfb\x68\xb6\x04\xb8\x03\xca\x2d\xcd\x5e\xc8\xa8\x7b\x7e\xc1\x9e\x74\x05\x9d\xc0\x98\x9c\xe0\x66\x6a\xca\x85\x17\xb1\xd3\x85\x6a\xab\xaa\x9d\x99\x9a\xbd\x92\xb1\xa3\x23\xd1\xaa\xce\xe1\x2f\xcc\x29\xb3\x20\x44\xee\xff\x8f\x43\x41\x32\xf0\xd4\x4d\x29\x23\x10\xb5\x26\xc4\x48\x45\x44\xa2\xe1\x65\x36\x25\x97\x8f\xfc\x66\x9a\x22\x87\xe4\x32\xcd\xac\xb3\x4a\x1e\x93\xe0\x66\x67\xf9\xc4\x90\xcf\x8a\xb3\x66\x7f\x38\xc1\x7e\xc6\x63\x07\xe3\xee\x5d\x49\xd6\x5c\xf7\x7f\xd4\xdb\x02\x85\xa5\x02\xbb\xb5\x00\x9f\x04\x1d\xfa\x43\x5f\xc6\x94\x00\x07\x36\xb4\xc5\x05\x50\x31\x83\x29\xa0\xc3\x8f\x01\xd6\xa9\x2d\x52\x9e\x52\xbc\x13\xb9\x34\x39\x4e\x3d\xe8\xf9\xd3\x60\xbf\x37\xce\xae\xd7\x70\x51\x3a\x45\xf9\xaf\x8f\x3f\x0c\x29\xca\xff\x79\x0c\xff\x2c\xb8\xc6\x85\x52\xc9\x48\xe9\x08\x28\x68\x2f\x08\x40\x74\xc3\x96\x31\x8c\x14\x20\x4b\x48\x8a\x92\x65\xf4\x9e\xe4\x85\xe8\x84\x69\x19\x37\x8b\xdb\x78\xdc\xe2\x74\x89\x02\x5e\x9c\xb1\xa3\x17\xd4\x7c\x98\x58\x99\x9e\x8a\x06\x63\xf3\x2d\xbe\x90\x45\x76\xee\xf4\x30\x29\x32\x39\x71\x35\x9c\x39\x39\x5d\x83\x7b\x08\x12\xc8\x6f\xb6\xb3\x44\xe8\xaa\x4b\x88\x2f\x8b\x0a\x93\xb0\xd4\xa9\x98\x6a\x32\xc3\xc7\xef\xd0\x47\xea\x5c\x75\xda\x2d\xbb\x78\xb5\xfb\xd0\xc6\xe5\x56\xc7\x3e\x3b\xce\xae\xea\x9a\xfe\x3c\x5c\xcb\xaa\x41\x1b\xaa\x8b\x82\xd8\x3a\xaa\xdf\xb2\xcf\x8f\xe8\x9a\x62\x32\x78\xb9\x9a\x03\x63\xff\x66\x5f\xd3\x1d\xb5\x81\x27\xff\x70\xc1\xb3\xcd\xd4\xd4\xdc\x60\x7f\x72\x88\x1d\x19\x06\x86\xd5\x6d\x8b\x8e\xc8\xc3\xc4\x09\xcf\xfa\xd0\xa1\xe0\xa9\x6e\xa4\xb1\x50\xe5\x9c\x70\xbe\xb8\xe0\xed\x30\x8d\x12\x1c\x47\xa8\x92\x09\x2b\xdd\x09\x26\xcf\x74\x05\x8a\x2f\x4f\xb0\x3f\x98\x60\x8f\xc5\x5a\x66\x93\x30\xee\x2c\x8b\x4e\x57\x72\xf4\xfe\x83\x13\x3b\x70\x17\x58\x90\xec\x41\x51\x8a\xb4\xbc\x30\x58\x45\xf0\xa2\x89\xfb\x2c\xbc\x13\xf0\x03\x05\xcf\x46\xe0\xb6\xc3\x34\xaa\x87\x49\x96\x0a\xbe\x70\x61\x96\x4c\xd9\x98\x3f\xd7\x8a\x53\x54\x11\xb2\xa0\xbe\x57\xb8\x4f\x71\xc1\x87\x4e\x14\xc4\x87\x76\x56\x45\x14\x01\x8b\x80\x2d\x03\xd9\xdc\x4c\x0d\x0d\x5d\xb8\x30\x6b\xc1\x01\xc8\xb6\x8d\xb3\x4e\x22\x4a\x3b\x49\x36\xb0\x19\x04\xb7\x42\x89\xb5\x6d\x7c\x5d\xfb\xdb\x95\xdb\x01\x4e\x27\xec\x88\x3b\xea\x4e\xdc\xde\x0a\xdf\x84\xe0\xaf\x95\xca\x53\x1b\x0c\x4e\xcb\x07\x2b\x72\x0b\x75\x45\x93\xfc\x45\x8a\x15\x0e\xea\x08\x2e\xd2\x32\xef\x37\xe4\x06\x53\x60\x9b\x60\x1a\x89\x21\xc3\x90\x86\x5a\x85\x69\x6a\x29\xa3\x8a\xe4\xa8\x52\xe0\x96\x14\xe2\xb4\x64\xa2\x2c\x28\x50\xe8\x7f\x25\xbf\x78\x96\x01\x30\xce\x64\x83\xb1\x99\x14\x11\x41\x61\x67\xc3\x48\x61\x32\x42\xcc\xc5\xe5\x64\xbc\x97\xf3\xeb\x40\xd3\x40\xcf\x8e\xa6\x59\x79\x54\x2f\xbe\x86\x78\xca\x10\xda\x75\x23\x8b\xad\xec\xc8\x52\x3a\x44\xbc\x0d\x9a\xa5\xd5\x3e\xef\xc4\x05\x80\xb7\xf1\xa5\x52\xde\x99\x24\x88\x75\x35\xe4\x50\xca\x57\xe5\xf5\x29\x22\xde\x4b\xcb\x38\x41\x5f\x68\x5d\x8f\xec\x32\x88\xba\x9d\x6c\x83\x30\x06\x31\x03\x97\xa4\x4a\x75\x15\x44\x49\xa5\x3a\x82\xa2\x60\xad\x5c\x41\xd4\x58\x4d\x2f\x75\x1b\x45\x5e\xe5\xf0\xae\x20\x73\x71\x73\x0d\x64\xed\x95\xfd\xcc\x52\x0a\xf5\x93\x3f\x34\xbc\x2a\x9c\x4a\xf0\xeb\x8d\xb5\x94\x4d\xc2\x9e\xf6\xf1\x46\xdc\xd4\xb8\xb4\x10\x71\x5b\xbd\x04\xfd\xa5\x3b\x61\xda\x83\x0a\xa4\x40\x9d\x16\x65\xde\x6b\xa2\x14\xc3\x57\x73\x70\xbd\xa1\xab\xb9\xc1\x58\x15\xf9\x92\x90\xed\x81\x37\x4c\x33\xf2\xba\x30\xa8\x44\x9d\x30\x12\x15\xff\x60\x1a\x8d\x1c\x3f\xba\x2a\xc7\x25\x4c\xc4\xaa\x10\xa9\xf6\x53\x66\x4c\x81\x8c\xd7\x50\x8f\xa1\x86\x13\x27\x0d\xf6\xb7\x57\xb1\xc7\x0f\x0b\xd3\x57\xc0\x62\xfe\xe7\xae\x0a\xde\x31\xa6\x7f\xda\x6c\x9e\x4e\x4f\x45\xce\xf5\x16\xfc\x01\xc5\x82\xd2\xf4\xc9\x7d\x82\x49\xe1\x11\x2b\x15\x5d\xce\x15\x6e\x9a\xc6\x30\xe3\x62\x03\xe5\xf4\xd3\xb4\x19\x17\xb2\xa2\x84\xdd\x05\x33\xb2\x90\x8b\xa5\x32\xeb\x5a\xe5\x91\x9c\xe6\x45\xcd\x81\xe7\xd0\x47\x8c\xea\x87\x6d\x58\x58\xbb\x50\x21\x7c\x15\x0a\x97\x44\xd4\x78\x2f\x4d\x20\x53\x9e\xf3\x25\xa1\x4a\x01\xba\x56\x51\x33\x34\xae\x19\x16\xb8\x71\xa8\x03\x70\x17\xae\x82\xe7\x97\x4b\xbf\xdf\x70\x88\xfd\xa7\x71\x76\xa8\xab\xc6\xe1\x7f\x6d\x6c\x8b\x0c\x93\x1a\xd9\x04\x6b\x0d\x7e\x63\xcc\x4c\x00\x60\xba\x62\x7a\x17\x0b\x0f\x0a\xd7\xdc\x86\x8a\xb4\xdd\xd3\x09\x4a\x44\x75\x92\x46\xe1\x0e\x31\x36\x90\x58\x42\xa1\x42\x29\x10\xbb\xb0\xd9\x84\x68\xa6\x35\xb9\x5e\xa8\x03\x81\x57\x74\x6d\xd2\xc5\x7b\x79\x73\x0f\xc6\x62\x35\xed\xc3\x55\x4c\x5b\x85\x8b\xeb\x9a\xad\x3f\x0d\x08\x5e\x1d\xcc\x48\x53\x16\x16\x1e\x3c\x60\xaf\x3d\xc0\x0e\x76\x71\xf7\xf8\xdf\x77\xe0\x32\x56\xe0\x77\xf7\xab\x4d\x37\x7c\xfe\x09\xe6\x2f\x1c\x39\xa1\x51\x4f\x61\x44\x63\x9a\x42\x0c\x13\xc8\x9c\x49\x83\x5d\xcf\x55\xfe\x41\x85\x87\x37\x45\x58\x78\x2e\xc0\x5b\x4d\x92\x47\xc9\xab\x41\x10\x81\x56\x78\x91\xa3\x3b\x3c\x14\x65\x13\xaf\x65\x6b\x73\x42\x32\x4f\xea\x7e\x75\x89\x9a\x79\x58\xb4\xa5\x80\x2e\x2f\x9a\x18\x73\x90\x0b\x95\x52\x0d\xae\x05\x0b\xf4\x2d\x2e\x2a\x99\xc2\xa9\x11\x85\x03\x51\x81\x58\x73\xa1\xe8\xc0\xfd\x30\x92\xac\xd6\xaa\x58\x8b\xd3\x42\x4d\x1f\x1c\x75\x9a\x67\xb9\x60\x02\xc8\xa1\xb8\x28\x9a\x3d\xd8\xc7\x8b\x62\x2d\xcc\x23\x38\x9f\x4a\xd0\xea\x95\xcd\xcc\xdc\xf4\xd4\x89\xea\xde\x06\xba\x09\xb3\x8b\x04\xd9\x80\xe8\x51\xd2\xbc\x72\xdb\x3e\x3f\x0c\x7b\x5c\x4e\xac\x79\x37\x72\x72\xf0\x02\x40\x4c\xae\x47\xe5\x50\xfc\xbe\xe7\x08\xfd\x74\xa6\xb5\xfa\x0b\x45\xef\xd9\xa5\x79\x4c\xf7\xee\xff\xac\x17\x2c\x56\x1f\x56\xb4\xe9\x71\xba\x06\xf9\x7f\x93\x38\x15\x1c\x10\x37\x28\x1f\x3c\xf1\x22\x24\xce\x5b\x17\x79\x63\x18\xc4\xf4\x1c\xbb\x8b\x90\x79\x4f\x06\x75\x1b\x99\x17\xb5\x62\x80\x4d\x65\xf3\x79\x80\xea\x03\x2d\xd9\xba\xd0\x57\xec\x73\x86\xb7\x15\x28\x5d\x57\x34\xfd\x2f\x8e\x07\xf7\x0c\x7b\xa1\x01\xe9\x06\xc3\x34\x77\x80\x4b\x97\x5f\xf2\x0e\xe0\x11\xbd\xe4\x4d\xa4\x59\x04\xd9\x40\x9d\xc1\xfe\xaf\x31\xf6\xc3\x06\xc2\x2d\xf7\x5f\xe1\x05\x2f\x9c\xa1\x1f\x95\xe0\x29\x7b\xcc\xc4\x86\x45\xd6\x04\x9f\x3d\xbf\xb4\x4c\x27\x41\x99\x11\x34\x40\x7a\xec\x66\xcf\xb5\xf1\xd1\xee\x16\xe5\x02\x64\xfb\x97\x5d\x3b\xe2\xe2\xab\xdc\xc3\x74\xa7\xfd\xdb\x83\x29\x60\xb0\xb3\xc8\x42\x1e\xa1\x6e\x58\xe9\x1e\x08\x57\x0e\x53\x2b\x9b\x9a\x5e\xc8\x68\x1e\xfc\x82\x9d\xdc\x39\x4a\x99\xb5\x16\xf0\x75\x70\xd3\x20\xf2\x45\x15\x35\x65\xb0\x33\x0d\xf6\x8b\x57\x69\x5b\xd2\x96\x31\xdf\xdb\x78\x4f\x7d\x8d\x05\x2f\xf7\xbe\x69\xbc\xa7\x9c\x7d\xf4\xd9\x3d\xd3\xd3\x9e\xe9\x69\xcf\x7d\xe9\x61\x75\x5f\x7a\xc0\x76\x5f\xfa\xda\x23\xe5\xbe\x74\x37\x3b\xc5\x66\x2f\x1f\x9e\x62\xa8\xe7\xd2\xc3\xef\x02\xfb\xc0\x0e\x3c\x97\xbe\xc7\x7f\xd6\xe5\xb9\xc0\x6e\xe3\xa6\x54\x81\x58\xf8\x9d\x43\xdb\x58\xb4\xa6\x1b\x2a\x21\x34\xd9\xad\xd0\x16\xf8\x96\x43\xc1\x1f\x8c\x0d\x7b\x53\xb9\x57\x55\x7c\x3a\xc1\xfb\xb6\xac\x9c\x91\xca\x68\x03\xb1\x51\x92\xaa\x19\xd1\xbb\x26\x99\x75\x03\x2a\x15\xa7\xe6\x98\xca\xc5\x4e\x62\x79\x74\x6b\xea\x3a\x80\x5c\xa0\x61\xb3\xad\x74\x65\x76\xbb\x68\x4e\x23\x2b\xcd\x11\xd1\x58\x6b\xf0\xd9\x85\xf3\x20\x24\x88\x4e\x96\xf7\x27\x1b\x9c\x2f\x49\xe9\x00\x3b\x83\xa6\x91\xd5\x5e\x9c\x94\x50\x53\xa5\x53\x29\xa5\x7e\x87\xae\x85\x89\x4e\x8a\x89\x89\xbd\x0a\x08\xcc\xca\xba\x48\xf0\xb3\x42\xf0\x70\x23\x8c\x13\x88\x75\x2e\x33\xa5\x92\xec\x8a\xbc\x2e\x3b\xaa\x1a\x44\x3d\x90\xec\x33\x65\x55\x37\x79\x42\xe4\x86\xba\xe4\x3d\x96\xc6\x32\xb3\x21\xe4\x05\x0e\xa0\xc7\xce\xf5\xf4\x43\x07\xd8\x17\xc6\xd8\xbf\x76\x8b\x9d\x2f\xe3\x84\x12\x86\xfa\x1f\x1c\xd3\x60\xe4\x63\x23\x4b\x29\x26\x66\x60\xc5\xe0\xd4\x61\xf1\x81\x84\x23\xb4\x84\x61\x33\xcf\x0a\x95\xa1\x3c\x11\x1b\x90\xb6\x26\x8b\x8a\x9a\x21\xd3\x88\xfa\x18\xca\xe1\x37\xe5\x15\x68\x57\xa6\x82\x13\x9d\x26\x75\x1b\x96\xf6\xac\x68\x70\x3e\x5f\xba\x09\x7e\x94\xa2\x3d\x6e\xf1\x15\x5c\x67\x7b\xa2\x56\x00\x13\xa1\x10\xa5\x95\x2f\x03\x35\x25\x20\x7b\x53\xff\x1d\x06\xd4\xc5\x9c\xf8\xda\x18\x1b\xb6\x00\xfe\x17\xc7\x2e\xc3\xc1\xae\xa1\xf1\xc2\x9e\xde\x0b\x01\x37\x24\x78\xe3\xd8\x90\x5a\x1f\x99\x35\x80\x79\xcf\xc3\x4d\xaa\xee\x08\xb9\x68\xc3\x69\xdc\x72\x49\x26\x6b\xbc\x88\x3b\x71\x12\xe6\x8a\xe9\xa0\x2d\xaa\xa6\x8d\xae\xe5\x7e\x57\x34\xf4\xba\x84\xc9\x66\xd8\x2f\x28\x2e\x5f\x6e\x80\xaa\x7c\xb9\x83\x15\xb0\x85\x94\x51\x39\x03\xf5\xf8\xe3\x94\x63\xe0\x62\x05\x05\xee\x35\x63\xec\xbb\x46\x86\x23\x9e\xcb\x22\x31\xd3\x82\xc5\xeb\xfb\x7f\xec\x05\xcf\x1e\x7c\xac\x99\x3a\xd4\x34\x86\x31\x32\xc4\x61\x89\xf4\x87\x6f\x82\x3e\x38\x8b\x54\x22\x7f\xe2\x93\x09\x94\x20\x44\x7c\xff\x08\xb8\x10\x97\x9d\xec\xb3\x4d\x36\xa1\x52\xaf\xf8\xeb\xac\xbe\xad\xca\x44\x76\x4b\x5b\xb0\x4e\x2a\x85\xa3\x65\x8a\x6e\xcb\xcb\x1c\xc4\x87\x81\xce\x2a\xe7\xaa\x8e\x28\x1b\xec\x7d\x63\x8c\xdb\x36\xdc\x2c\x32\x8e\x7c\xf7\x6e\x88\xbc\x2d\xc2\xc8\x7f\xf5\x58\xb0\xa8\x7e\x70\xd4\xb1\xf6\xf2\x01\xb1\x40\xcf\x7f\xa6\x8a\x56\x9d\x14\x94\xf3\x00\x02\x02\x3b\x53\xf0\xb3\x1e\x7b\x87\xc7\x26\xba\x59\x74\x3a\xbe\x28\x22\xff\x4d\x5e\x70\x6e\x81\x7e\x0c\x22\xe4\x5d\xb4\xae\xfb\x9d\x36\xf7\x0e\xef\x29\xec\xc9\xbb\x3a\xa2\x76\x68\xf9\x7f\xbd\xd6\x81\xc6\x1d\x02\xa5\x42\xf7\xe2\x5f\x5d\x13\xdc\x39\xf0\xb4\x3a\x22\x3b\xe7\x2b\x5d\x8a\xd6\x37\x52\xc4\xcd\x45\x37\x89\x9b\xa1\xeb\xe3\xfb\xba\x6b\xd8\xdf\x7b\x8c\x35\xe5\xc9\x41\xc0\x9d\x3f\xf3\x82\x73\x8b\x6e\xdd\x24\x16\x98\x9b\x07\x81\x7c\x42\x0b\x69\xa7\xa0\xb6\x0a\x51\x1e\x2e\xdc\xce\x34\x76\xee\x07\x3a\x64\x0e\x66\x55\xcf\x76\xc4\x2e\x49\xb2\x71\x19\xec\x12\x7b\xab\xc7\x1e\x43\x9d\x5d\x14\x68\x2d\xf3\x5f\xed\x05\x2f\xf2\x2a\x0f\x01\x43\x5b\xb2\xf1\xca\xec\xea\x62\xb6\x93\xc4\x36\x08\x94\xa2\x0d\x75\x6b\x22\x95\xcc\xa4\x70\xcc\xd6\x88\x5c\xde\x14\xfc\x59\xc7\x6a\xba\x41\x5c\x24\x57\xae\x7f\x97\xc7\x1e\x43\xd2\x83\x2a\xe0\xbf\x51\xc3\x3b\xbd\xd4\xab\xbc\x1b\x44\x46\x82\x56\x95\x91\x86\x8c\x31\x23\xa0\x75\xb4\x84\x63\xbf\x57\x23\x34\x60\x38\xab\x7d\x92\x67\xd4\x14\xb9\x57\x5c\x8b\xe9\xfd\xe6\x3f\x53\x75\xf4\x54\xfe\x30\x74\xd0\x6d\xe7\xdd\x1e\xbb\xd6\xed\x87\xff\x66\x2f\x78\xb5\xe7\x3e\x7b\xa4\x97\x4f\x8d\xab\x5e\x59\x88\x9a\x7a\x31\x69\xaf\xe6\x3f\x79\xec\xda\x66\x96\x24\xd0\xb5\xd9\xac\x97\x96\xfe\x9f\xeb\xc5\xfc\x9c\xe7\xbe\xd2\xd7\x37\xfc\xc8\x5a\xbc\x1d\x16\x6d\xae\xcb\x98\x5c\x2f\xf6\x69\x07\xe5\xee\x88\xf5\x05\xdc\x69\xcb\x3d\x26\x54\xd1\x01\x68\x2b\x06\x43\x62\x98\x02\x0f\xd0\x6c\x87\x69\x5c\x74\xd0\x4c\x16\xdb\x59\xf6\xd0\xec\x6c\x0c\xae\xd4\x87\x54\x6c\x4a\x42\x31\xab\x5b\x1b\xbe\x39\xfe\x9b\x7d\xea\x68\x93\xfc\x88\x9e\x81\xef\xf3\x2a\xef\x1e\xa5\xed\x5c\x39\xf2\x95\x94\x19\x1e\xf3\x15\x7c\xd9\xdd\xb8\x19\xe4\x5e\x7b\x50\xa7\x47\x7c\x97\x37\xf8\x5a\x75\xbc\x93\x15\xa0\x75\x91\x24\x71\xcd\xbc\xd5\x70\x68\x38\x7d\x71\xe1\xae\xe1\x7c\x69\xb1\x35\x5a\x45\x63\x15\x39\x5c\x58\xb5\xd5\xc8\xd4\x15\x17\xda\xa2\x9a\xa5\xbc\xd3\x23\xc5\x10\x4d\xd2\xcc\xc2\x3c\x69\x36\xdc\xe1\xfd\x80\xc7\xae\x81\x84\x64\x7a\x3d\x5e\xa0\x96\xa3\xe5\x3c\xdf\xd5\x5a\x60\xb6\x5a\x29\xec\x84\x80\xd6\xda\xe7\x9a\xba\xbb\xdd\x79\x9d\xc7\x6e\xab\xe4\xfe\xcc\x72\x92\x29\x4c\x90\x80\x48\x5a\x4b\x3d\x04\x26\xef\x25\xa2\xc0\x5c\x9a\xa0\x1c\x5e\x76\xee\xb8\xd3\x6c\xce\x4e\x80\x78\x4b\x70\xd4\xa4\x3f\x2c\x33\x2e\x24\x0b\x2b\xb7\x73\x2e\x6b\x41\xc4\xa4\xc5\x61\x49\xec\x5e\x32\xe1\x00\x58\xb5\x92\x6c\x93\x06\xa7\xfb\xb4\x90\xc7\x59\x1e\x97\xfd\x33\x62\x43\x24\x8e\x90\x0c\xfd\xfa\xb5\x83\xc1\xbd\x5b\x17\xa9\xb8\x02\x0e\xaa\x23\x79\x97\xbe\xe7\x89\xac\x40\x8a\x74\xf2\xd2\x73\x06\xfc\x77\xfb\xd9\x57\x3d\x76\x10\x58\x4a\x11\xf9\x5f\xf6\xd8\xb9\x91\x57\xee\xb0\x51\x9c\xc1\x0f\x47\xf7\x34\xf8\x41\x6f\x85\x6a\x5f\xb1\x39\xc6\x6c\xd3\x12\xaa\x73\xa1\xdd\x67\xd0\x3f\x82\xaa\xad\x8e\x60\x20\x2d\x05\xe5\xdd\x01\x5a\x8d\xb9\x2b\x28\x85\x1d\x88\x63\xfd\xae\x00\x6f\x8f\x95\x80\xea\x0b\x56\x1a\xec\xfb\xf6\x11\x8c\xd9\x3f\x8e\x07\x7f\x35\xae\x0a\x69\x2a\xaf\x90\xc0\xe1\x90\xb9\xcd\x23\xf8\x36\xe5\x83\xcd\x90\x0d\xa7\xd9\xd6\x1a\x02\x32\x6d\x41\x70\x01\x9f\x31\x12\xd4\x4a\x70\xea\xa2\xec\x65\xb0\x02\x1e\x0f\xc4\x1c\xeb\x19\x50\x3a\xda\x4a\x83\x72\x62\x30\x67\x85\x6e\x35\x24\xf6\xff\x08\x6a\xa4\x7a\x38\x7b\xa9\xd8\x10\x39\x7a\xda\x46\x93\x30\x0b\x94\xed\x22\x12\x65\x1e\x36\x4b\x43\xdb\x9a\x61\x37\x6c\xca\x16\xc0\xe3\xc0\xd1\x10\xa0\x8b\xa3\xdb\x85\xa2\x3a\x0c\x3d\x93\xf6\x38\x8e\x84\x93\xdb\x8e\xe5\x39\x61\x2e\x9e\x33\x30\x7f\xa8\x51\x39\xb2\x3a\xc9\x8b\xcc\x4d\x9f\x0d\x99\x6f\x69\xe7\x98\x5e\x43\xfe\x4d\xa7\xe3\xe2\x22\xe5\x99\x4e\xfa\x48\xf8\x06\x9a\x1e\x7e\x46\x1f\xf2\x56\xd8\x63\x1d\xd6\xaf\x97\x02\x57\x3b\xef\xdf\x5d\xe7\x51\x5c\x34\xf3\x18\xcc\x79\x59\x7e\x12\x64\x4c\xc6\xc9\xfd\xb3\x5e\x66\x75\xeb\xbd\xb8\xab\x7f\x12\xc0\x7b\xa8\xb3\x27\xd5\xee\x65\xec\x8d\xfb\xd8\xf1\xd1\x76\xb8\xc5\x5e\x3a\x53\x9c\x2f\x44\xae\xd0\xf1\x30\x6f\x58\xe1\x7f\x65\x3c\x78\xce\xa8\x97\x8e\xae\xdf\x80\x20\xf6\xbb\xe8\x76\x0c\xb9\x37\xa9\x64\xc5\xf3\xcb\xfe\x40\xd2\x02\x49\xc3\x1c\x5a\xf0\x17\x63\xec\x1f\x3d\x76\x20\x07\xcf\x14\xff\x6f\xbc\xe0\x5d\x1e\xfe\xad\x93\x5f\x2b\x2b\x1f\x3d\xce\x5a\xbc\x17\x47\x85\x03\xa1\x83\xe9\xb5\xe6\x29\x81\x30\x18\x13\x92\x78\x5d\x58\x50\xa0\xca\x98\xd8\x8b\x23\xf4\x25\x2a\x7a\xdd\x6e\xd2\x37\x2f\xa0\x76\xe3\x01\x56\x48\xb6\xa1\xd0\xae\x22\x22\x8d\xcc\x82\x02\xb9\x38\xdb\x2b\x4a\x98\x2e\x47\x7c\x38\xc1\xa6\xb7\x48\x9a\x53\x59\x8a\xf9\xb9\x45\xd9\x28\x5b\x67\x30\x2d\x7e\x33\xb8\x20\xff\x35\x29\xab\x6c\xb0\x49\xd0\x33\x44\x71\xb3\x54\xb3\x0a\xd3\x02\x9b\x51\xaf\x9a\xc2\xb3\xb3\xa7\xa6\x10\xa5\x73\x49\xfc\xae\xc7\xbe\x63\x44\x5e\xc9\x7b\x44\x18\x89\xdc\xff\x80\x17\xdc\x61\x7e\x3a\x6e\xd8\x4d\x04\xc9\x6f\xe3\x1b\xcb\x7f\x2a\x4e\xb9\xfc\x84\xd2\x12\x6a\x35\xde\xfe\x8d\x01\xc5\xdd\x33\xd9\x51\xd2\x73\x04\xc1\xbf\x02\xaf\x02\xac\x0c\xe9\xab\xb6\xdd\x52\x6f\x6b\x0c\xab\xf0\x6f\x08\xbe\x65\xa0\xb0\xa9\x9c\x4a\xff\xb5\xc7\xbe\x7b\x84\x26\x61\x2e\x14\x9d\x2c\x3d\x45\x46\x91\xc2\x7f\xd0\x0b\x4e\x0f\x79\x0e\x3a\xf8\x82\xf2\x44\x64\x5d\x41\x36\xcd\x08\x4a\x99\x64\x76\xe4\xb3\x28\x2b\x70\x65\xfc\x8c\x75\xd8\x63\x08\x40\x4b\x55\xea\x3f\x73\x8b\x2d\xa1\x51\x88\x9c\x7e\x04\x87\xd5\x5f\xb2\x29\x64\x9c\x9e\x46\xa8\x5e\x31\x65\x3f\x97\x1d\x69\xb0\xff\x3a\xc6\xfe\xdd\x90\xea\xc8\xe2\x1b\xa1\x86\x47\x1e\xab\x1b\xdd\x47\xce\xb2\x92\x16\xc7\xb2\xb5\x82\x37\x69\x24\xf4\x42\xb2\x48\x6c\xc4\x4d\xb1\x50\x4d\x48\xf4\x06\x8f\x09\x66\xbd\xf4\xef\x0b\x9e\x3a\xa7\x7f\x55\x75\x00\x58\x0e\x53\x11\xa1\x97\xe4\x70\x9b\xaf\x22\xb3\xf6\xda\x1e\xa3\x5d\x73\x24\xf8\xf6\x73\x96\x46\x4c\xf7\x79\x10\xf8\xfe\xf3\xc3\x7d\x6e\x2d\xbc\x27\x9c\x8a\xd3\x71\x22\xfc\x9f\x3e\x14\x3c\x63\xe8\x1b\x27\x87\xb3\x15\x98\xe3\x92\xb8\x96\x2c\x49\x0e\x11\xb6\xe7\x22\x6c\xd4\x61\x99\x9c\x3e\x38\xc1\xfe\xd2\x63\xd7\x29\x05\x0c\x24\x6b\x5e\x14\x2d\xff\x77\xbd\x1d\x40\x56\x2d\xda\x5f\x69\x1d\xd9\x9b\x3d\xe5\xbc\x6f\xd9\x1b\xaa\xce\x25\x27\x91\x53\x31\x86\x24\xeb\x42\xd4\x97\xe9\x11\x7c\xd8\x68\x76\x7b\x35\x2a\xd0\x40\xd3\x41\x4d\x17\x92\x2f\x9d\xaf\xa8\xc4\x24\xd0\x6d\x92\x55\x92\xbe\x81\x30\x6b\xb0\xb7\x79\x6c\xa2\xa5\x06\xfa\x06\x6f\x07\x98\x60\x68\xc0\x73\x87\x19\x2a\x32\x7c\x92\x9b\xf1\x22\x41\xc8\x54\x3e\x98\x88\x86\x09\xa6\x4d\x54\x03\xd5\x28\x64\xa1\x66\xa2\x65\x4c\xce\x6d\xc8\x48\x69\x3a\xfa\xa1\x71\xb6\xaf\x93\x45\xc2\xff\xf9\x71\x25\x65\xfc\xc8\xb8\xca\xaf\x79\x92\xcb\x57\x7c\x55\xce\x9a\x4e\x87\x2e\x4a\xde\x15\x39\x59\xaa\x0a\x0b\xac\x2e\x11\x35\xcd\x34\x86\x29\xcf\x9a\xa5\x32\x5b\x6b\xd8\xe7\x63\xc7\x8e\x61\x3e\x9a\x63\xb7\xdc\x72\x0b\x07\x4e\x34\x12\xcd\xb8\x33\x58\x10\x4a\xdd\x34\x3d\xdd\xe0\xf7\xcf\x9c\x3d\x43\xf6\xfd\x82\xaf\x66\x65\x9b\x6a\x06\x26\xcc\xfe\xb8\xa8\xf1\xa7\x2e\xdd\x7b\xce\xa4\x66\x74\xdf\xc2\x55\xa6\xc7\x03\x57\xe8\x90\x6c\xdc\xca\x39\x04\xc1\x0d\xcf\xca\xe2\x6e\x62\x4b\x4c\x51\x1e\xaf\xb5\x55\x96\x6e\x29\x17\x24\x31\xf9\x19\x10\x8f\xa7\x38\x04\xf4\x4d\x00\x93\xa6\x39\x3b\xb2\x0f\x35\xbc\xb4\x5b\x05\x84\xcb\xd6\xb4\xf9\x33\x17\x45\x2f\x29\x95\x92\x19\x2b\x33\x6b\x80\x17\x9c\x25\x9c\x7d\x49\x65\x48\xfb\x9c\x17\xfc\xa6\x67\x76\xcb\x02\x65\x40\xa3\x3a\x13\x4c\x9a\x09\xc4\xc8\xd6\xb1\x43\x77\x28\x25\xad\x72\x7c\x3c\x6b\x39\xb9\x86\xab\x45\x96\xf4\x4a\xc8\x91\x4d\xc7\x0a\xbe\x3b\xdc\x68\x1c\x56\xe9\xd2\x54\x12\xf9\xb2\x55\xbf\x95\x13\xcc\x25\x2a\x3d\x5a\x71\x5e\x94\x3c\x2e\x45\xc7\xe8\xf4\xed\x9e\x68\x77\x5a\xe4\x3d\x60\xfa\x64\xcd\x36\x61\x7b\xdb\xf5\x43\x69\xbe\x0b\xe5\xe7\x3f\x70\x7d\xf0\x8b\x9e\xfb\x8c\xaf\xc6\x28\xa9\xa3\xbf\xfb\x49\x7e\x54\xd2\xf9\xb0\x03\xee\xaa\x91\xc8\x8b\x32\xcb\x0c\x16\x30\x2e\x40\x57\xe4\xed\xb0\x5b\xc8\xa7\x5d\x91\x03\xf8\x6c\x98\x90\x3b\x6e\x51\x43\xc9\x29\x35\xd0\xf0\x47\x51\x02\x4c\x9b\x71\x37\x4c\x34\x66\x3b\xcc\x9b\x49\x65\x44\x8e\xa1\x4a\x6c\x16\x11\x7c\x46\xb0\xc1\x08\x78\xe7\x6a\x81\xbf\x78\xed\x9e\x33\xca\x6e\x9d\x51\xbe\xe0\xb1\xc7\x87\xbd\x32\x83\x4c\x3b\x43\x60\x1f\xfd\xf7\x7b\xc1\xdb\xbd\x99\x2d\x4a\x0c\x11\x58\xbb\x59\x64\xb8\xa2\x90\xf4\x76\x14\x62\x09\xee\xbe\x72\xd7\xd1\x8c\xa2\x62\x05\x1d\x57\x4b\xa8\x0f\xba\x13\x96\x94\xa0\x8b\x12\xe5\x81\x47\x0a\x1c\x74\xcc\x8b\x1b\xc9\x82\x26\xe0\x01\x85\x2b\x27\x55\xce\xff\x1e\x67\xd7\xc5\x9d\x70\x4d\x2c\xf4\x92\x04\x71\x15\x0b\xff\xb7\xc7\x83\x97\x8d\xcf\x57\x9e\xba\x7e\x16\xda\x87\xbf\x40\x1a\x4e\x45\x52\x23\x00\xa4\xb6\x1e\xa6\x57\xa0\x22\xb1\xdb\x03\x04\x71\x4c\x12\xda\x01\xf4\xeb\x38\xc5\x99\x20\xf9\x5a\x87\x06\x80\xce\xcc\x99\xc8\x06\x1f\xe8\x13\x80\x75\x43\xe0\x85\x12\x99\xd5\x9b\x55\xd1\x0c\x65\xab\xea\x37\x9d\x24\x95\x51\xd0\xa4\x54\xc6\xf0\x81\xa1\x35\xe3\x3d\xa8\x0c\x73\xa4\xff\x22\x0e\x75\x37\x8e\xa1\x38\xf2\xa9\x1b\xf0\xaa\xe8\xc7\xe9\x5a\x1d\x1e\xc9\xd9\xa1\xa9\xac\x67\x69\x3d\xac\x77\xb3\x68\xe7\x06\x96\xad\x92\xfa\xed\xb9\x53\xed\xc2\x9d\xea\x33\xb6\x3b\xd5\x83\xbb\x76\xa7\x7a\x71\xd5\x9d\xea\x70\xa1\x1d\xaa\x1e\xf1\x21\x6a\xc0\x9c\x5f\x1f\x63\x07\x69\xaf\xf9\xbf\x30\x16\xbc\xc5\xb3\x8e\x79\x69\xc1\xbe\xa8\xa3\x3d\xe8\x2f\x0c\xd7\x9a\x4d\xc3\x94\x67\xca\x90\x83\x7b\xf9\x67\xc5\x28\x24\xa7\xb0\x0b\xf6\x41\xb8\x91\x1d\xdf\x41\x92\xf9\xca\x19\x78\x64\xd0\xa4\x2e\x6c\xef\x92\x75\xc2\x9f\xae\x2b\xb0\x15\x67\x5e\x2a\xce\x55\x1a\x5e\x2a\x08\x18\xfb\xb9\x09\x16\x58\x63\xca\x57\xc3\x26\x20\xfd\x62\xbc\xd2\x62\x96\x08\x00\x85\x7a\xd9\x44\x30\x55\x79\x36\x0c\x13\xca\x2a\x52\x0c\xc5\x83\xfa\xd4\x81\x3d\x2e\x61\xb7\x5c\xc2\x9a\x82\x50\x7a\x76\xf0\x04\x0d\xa1\x64\xae\x4c\x7b\x0d\xec\xcd\x7c\x8c\x35\x58\x6d\xe4\x66\x1e\xb2\xf0\x7b\xc4\x7c\x17\xc4\xbc\x65\xd1\xf2\x67\xee\x0e\xf3\xec\xdb\xb7\xa0\xe3\x0f\x79\xe9\xf6\x84\xe1\x69\xfe\x7c\x7d\x38\x05\x80\x55\x77\x4d\x63\x15\xb7\xcd\xca\xa1\x67\xec\xa3\x87\xd8\x33\x4d\xff\xc5\xc5\x52\xa4\x20\x50\x1b\xec\x7b\x07\x67\xc5\x14\x30\x09\x5a\x40\x43\xa9\x14\x24\x73\x7a\x5e\x80\xd0\x7c\x65\x22\x78\xda\x56\x05\x2a\x7b\x7d\x44\x49\x9a\xa9\x11\xa0\x74\x2f\x3a\xb8\x47\x84\x76\x4b\x84\x3e\xe0\x29\x2a\xf4\xb3\x5e\x70\x33\xfc\x85\xab\x22\x05\x90\x8d\x38\xea\x85\xc9\xb6\xab\x63\x53\xa7\x36\x6b\xb1\x68\xd4\x29\x79\x38\x77\xd9\x1e\x55\xdb\x05\x55\x9b\xb7\xa8\xda\xe5\x78\xb1\x0d\x52\xb5\x87\xbc\x17\x6c\x4f\xb8\x9e\xe1\x5f\xb0\x11\x92\xac\x55\xae\x52\xa9\x2d\x28\xc6\x50\xc4\xa4\xb7\x5f\xcb\x6e\xde\x89\xc7\xfd\x62\x2f\x11\xf7\xc5\x65\xfb\x5e\xed\xc7\xef\x7f\xf5\x9a\xe0\x45\xde\xe0\x73\xca\x82\xd6\xeb\x26\xa0\xa6\xb2\x5e\xc8\x23\xac\xba\x56\xa8\x94\x26\xf2\x18\x77\x3a\x22\x8d\x90\xd7\xed\x84\xeb\x82\x9b\x0c\xcf\x61\x42\x91\x83\x50\x9d\xb8\xd8\x0d\x71\xdc\x20\x2e\x02\x5e\x80\x6b\xd3\xf8\x9d\xab\xd9\x2f\x79\xec\x2a\x43\xd1\x0a\xff\x9d\x5e\xf0\x2a\xcf\x90\x34\xcd\x70\x4b\xd1\x7e\x43\x3d\xb3\x1d\x26\xa5\x2c\x0b\xc9\x2d\xe4\xfe\x3b\x7c\xf4\x30\x45\xef\xe9\xc2\xa0\x79\xa4\xe7\xb4\xe3\x51\xf5\x68\x10\x9d\x40\x28\x4f\xe2\xa6\x81\xce\xcb\x52\xe1\x58\x59\x47\xa6\x3a\x7f\xcb\x18\x63\x26\x58\xc2\x7f\xcd\x58\xf0\x75\xcf\x9d\x5c\x88\x01\x35\x4f\xc0\xb2\xa0\x83\xaa\x30\xfc\x32\xcc\x45\x41\x70\x5c\x75\x3e\xbb\x78\x6a\x66\xf9\x54\x8d\x9f\x5f\x98\x83\x7f\xe7\x4e\x9d\x39\x25\xff\x9d\xbd\xf7\xdc\xb9\x53\xb3\xcb\x3c\xcb\xf9\x51\xd4\x8d\x25\x89\x71\x8d\xcf\xdc\x75\x0b\xd3\x3e\x6f\xf5\xc0\xcf\xd4\x34\xe6\xf4\x42\xae\x16\xbc\x8d\xc8\xbe\xf9\x88\x4d\xd1\xeb\xf7\xb1\x43\x7a\xb1\xfc\xef\xdf\x17\xfc\xf9\xb8\xde\x56\x55\xad\x89\x7a\x8c\x17\x55\x2f\x11\x3c\xec\x76\x13\x70\x09\xc9\x1a\x8c\x9d\x36\x08\x13\x27\xf9\x61\x29\x5d\x1d\x26\xb3\x3d\x7a\xb3\xc3\xa3\xa9\x24\x5b\x3b\xac\xad\xf9\x82\x27\xd9\x9a\x24\x56\xb6\xdd\x82\x4a\x1f\x55\xc5\xd0\xcb\x9b\x1a\x47\x8d\x07\xb9\x29\xe4\xe6\x00\x60\xe5\xce\x37\x76\x01\xbb\xde\x29\x88\x93\x70\x4a\x42\xe0\x44\xa5\xc2\xa3\x53\xc3\x7b\xa0\xd4\xd3\x71\xee\x7e\x01\xe8\x36\x9b\x71\x12\x35\x25\x57\x55\x5d\x2c\x0b\xce\x03\x66\x0e\x43\x8a\x31\xff\xba\xa9\x9a\xfc\x28\xb2\x0d\x91\x27\x61\x17\xb5\xc0\x10\xed\x01\xca\xef\x06\x63\x73\xa2\x2b\xd0\x97\x9c\x4c\x91\x22\x6d\x26\x19\xc8\xae\x78\x6c\x6b\xee\xb0\x51\x37\xaf\x34\xd8\x28\x06\xef\x6c\x63\xfc\xc4\x38\xdb\x0f\x98\x4f\xfe\x1b\xc6\x83\xff\x3c\x0e\x7f\x56\x9c\x80\xf0\x99\xe6\x5c\x7a\x89\x68\xf0\x6a\x7e\x35\x1e\x10\xb3\x17\xd4\x78\xa0\xdd\x9b\xa2\x00\x99\x90\xe0\x68\x60\x0a\xd8\x3e\x1e\xa0\xbd\xaa\x40\x4f\x99\x51\xc1\xe4\x01\x1c\x8f\xdd\xb4\x71\x9e\x92\xe4\x88\xd8\x01\xb4\x4a\x39\x15\x35\x9c\x8e\x0c\xb4\xaa\x75\x7f\xdb\xb7\x28\xbb\x6f\x7d\x5e\x82\x69\x13\xdd\x68\x68\x72\x72\x21\xa7\x13\xf1\x2d\x1a\x7c\xc9\x59\x1a\xaa\xcd\x99\x47\xb9\xab\xba\x61\x8e\x41\xc0\xe4\xa3\x6d\x67\x31\x0c\x8e\x06\x8e\x55\xff\xbd\x1e\x3b\x14\x76\xe3\xbb\x31\x13\xd8\xdb\xbc\xe0\xe5\x92\x3c\xe3\x4f\x9b\x38\x63\xaa\xb0\x1d\x91\x66\x2c\xfa\xc8\x52\x9d\x5f\x1a\x73\x23\xb1\x76\x98\x78\xd2\x7f\xc9\x58\xb0\xbe\x5d\xa1\x6d\xd3\x51\x02\x7d\xde\x51\x12\x4a\xe7\x3a\xfc\x7e\x8f\xbd\xd4\x63\x87\x00\xd7\x0d\x7c\x2c\x2f\x2a\x1b\x61\x73\x41\x3d\xab\x38\xf0\x62\x0a\xdf\x84\x87\xa5\xc6\x34\x72\x7d\x10\x8d\xed\x5b\x57\x2b\xf7\xa7\x5a\xf0\x0d\x15\x27\x73\xac\xe2\xf4\x39\xc1\xfe\xed\x68\x43\x10\x88\x3c\x1f\x9c\x08\xbe\xcb\xfa\x0d\x99\x5d\x6d\x6a\x4e\xba\xfa\x11\xc2\xcc\x4f\x1e\xdc\xe3\x68\x77\xc1\xd1\x7e\xda\x56\xba\xfe\xda\x2e\xd1\xc9\xad\x08\x56\x07\x9e\xfc\xd1\x1c\xdc\x9e\x68\xbb\x5b\xd1\x76\x59\x49\xb6\x4f\x0b\xae\x3b\x53\x39\x83\x36\xa1\xac\xb1\xa3\x3b\x80\x95\xa1\x93\xfd\x90\xb7\xb4\xbd\xbc\x73\xcc\x6f\x54\x34\xb8\x83\x02\x8c\xab\xbe\xfd\xd4\x35\x8e\xfa\xb6\x0a\x82\x3e\xbb\x34\x7f\x2e\x8b\x84\xff\xf6\x6b\x82\xdf\xda\x3f\x77\x6a\x61\xf1\xd4\xec\xcc\xf2\xa9\x39\x5e\x47\x77\x00\xa8\xc8\x8e\x5d\xa0\xf2\x92\x7c\x44\x26\x4d\xcb\x6a\x5f\x01\xd6\x4d\x6d\x4c\x4f\x51\x19\xcc\xc3\x47\xd6\x71\xcc\xf0\x9d\x95\xda\x5b\x81\x56\x8d\x3c\x71\x1a\xba\x62\xa4\x6d\x43\xd0\x73\x93\xc4\xc2\xf4\x28\x20\x5d\x0b\x62\xd7\x00\x42\x27\x04\x84\x39\x05\x88\xfd\x52\x59\xc5\x2d\x3f\x1f\xd5\x14\x11\xb3\x28\xce\x45\xb3\x4c\xfa\x0d\x3e\x53\x00\x90\x1a\xda\x40\x45\x1f\x73\x3f\x13\xbc\x45\x1d\xeb\xad\x2b\xf9\x2f\xe7\x45\x1c\x89\x66\x98\x1b\xb7\x9c\x9a\x6d\x96\xa3\x98\x42\xc7\x38\xda\xcd\xba\xbd\x64\x44\x2f\x54\x74\x83\x19\x82\xec\x86\xbc\x49\xe4\xb4\xab\x4a\xbb\x00\xca\xc1\x1d\x21\xd4\x4c\x5d\x58\xb8\x56\x4f\x0c\xbb\xc0\xa9\x21\xd8\x27\x6a\x0d\x3c\x3d\x0a\xc9\x6a\x02\xba\x23\xf2\x3d\x04\x5b\xea\x70\x3e\x06\xb0\xa5\xb0\x43\xb3\x52\x3d\x2f\x80\xc1\x2b\x7f\x28\xd7\x36\x1d\xf6\x20\x27\x73\x93\x8b\x34\xeb\xad\x11\x06\x5d\x5c\xf2\x28\x13\x45\x7a\xb8\x34\xcb\xa1\x2f\x15\x77\x1c\x61\xca\xef\xdd\x4c\x45\xbe\x68\xd9\x61\xc3\x92\x93\x83\x1f\x91\x0d\x37\x20\x33\x35\xb3\xd9\xb8\xe4\x01\x4a\xb5\x73\x0f\xfe\xcf\x03\xec\x7b\x2d\x52\xde\xdd\xad\xf9\x6c\x4a\x13\xef\xd4\xc6\x82\x57\x73\x41\x58\x6c\xd0\x2b\xc8\x3a\xca\x22\x82\xce\xfe\xf7\x5b\x38\x4d\x8d\x38\x9f\x80\x8c\xfd\x9d\xc5\x56\xd0\x33\x54\x72\x8f\xba\xef\x01\x7e\x7c\x03\x53\x41\xac\x6c\x7f\x87\x3d\xd9\xbf\x6d\xbb\x54\x10\x6a\x2f\x0f\x53\xcc\x7d\xe0\x80\x13\xd1\xda\xcc\x90\x31\x57\xfa\xb8\x33\xf2\xa6\x81\x38\x98\x57\x1e\x08\x8e\xe8\x5f\xb4\x84\x83\x78\x4d\x50\xc2\x15\x0e\x3e\xb7\x9f\xbd\xdc\x63\x57\x85\x4d\x90\x7d\x96\xe3\x8e\xf0\x9f\xcf\x6e\xbf\x02\x62\x71\x36\x6e\xe6\x99\xfc\x3e\x38\x69\x55\x46\x7a\x40\xf9\xd7\xa6\x02\xc8\x55\xb1\xac\x78\x4f\x6e\x4a\x02\xd8\x24\xc9\x8b\x09\x76\xad\xbc\x14\x45\x3e\x4f\xfe\x59\xfe\x52\x70\xda\x7d\x62\xe0\xe7\x65\x65\xda\x8f\x4b\x01\xa1\x41\x59\x1c\xae\xd3\x90\x23\x7d\x7e\xcc\x63\x8f\x83\xa7\x73\x2a\x52\x48\x34\xb3\x34\x2a\xfc\x77\xe9\xa0\xb9\x37\x78\xc3\x0a\xe0\x78\x22\x15\x3f\xa4\xdc\xc5\xa2\x38\x0a\xd5\x75\x1f\xd2\xc0\xd4\x65\xbc\x19\xc6\xa5\xe5\xd7\x8f\x43\x85\xdc\x6f\x0a\x95\xaa\x23\x42\x50\xa7\x84\x6b\x72\x58\x25\x4e\x57\xd6\xe2\x49\x28\xd9\x2d\x15\xd3\xb6\x28\x52\xb1\x29\xe7\xd4\x15\xa9\x72\x76\x1d\xb4\xb7\x9c\x87\x69\x41\x11\xc7\xcf\x56\x83\x98\xaf\xbe\x1a\x8c\x36\x2b\xad\x97\x30\x6b\xd8\x7b\xe5\xc2\x89\xf3\x59\xb8\x6d\xbe\xc5\x63\x87\x72\xd5\x1d\xff\xb5\xde\x2e\x37\xcc\xa2\xae\x6b\xab\xed\x62\xaf\x2c\xf6\x51\xde\x9d\x30\x47\x2a\x3e\x0f\xc5\x7c\xb9\xd8\xec\x6d\x63\x43\x41\x3d\xef\xc9\x8a\x72\x26\x89\xc3\xc2\x7f\xe9\x58\x90\xe9\x5f\xc4\x89\x41\x68\x61\xd8\xed\x4a\x3a\xa7\x26\x60\x7e\x01\x11\x45\x28\x9f\x76\x61\x05\x37\x80\xe7\xa8\xca\x84\x8c\xb0\xc7\x69\x99\xf7\x2d\x37\xa7\xc3\x05\x7c\x88\x7e\xb5\xee\xc1\x7b\xa9\xc7\xce\xb0\x43\xba\x5a\xff\x29\xc1\x0d\xf7\xe8\x36\x14\x83\x14\xae\x66\x1b\x02\xba\x10\x45\xb9\x28\xb6\xca\x6e\xce\x6e\x62\x63\x71\xd7\x9f\x0a\x02\x53\xdc\x1c\x8a\xa2\x44\x37\x51\xc4\xe9\xb5\x3f\x7b\xf1\xfe\x6d\xb3\x25\x0c\xc5\x96\xf9\xc4\xbe\x3d\x6c\x99\x47\x09\x5b\xe6\x20\x8d\xc5\xd9\x3f\x9f\xf0\xd8\x4b\x3c\xa6\x5e\xf9\xfd\x2d\xb0\xb0\x47\x67\x4d\x00\x3c\x13\x95\xdd\x43\xcd\x98\x43\x5f\xdd\xe5\x1b\x9a\x3b\x61\x04\x56\xe1\x65\xc1\x80\xfc\xb5\x37\x34\xf4\x61\x56\x89\x1b\xa0\x7b\x5a\x44\x97\x27\xff\xa3\x5e\x70\xeb\xd0\x37\x48\x3e\x94\x67\x94\x05\xdc\xa0\xc5\x16\xf7\x0c\xbe\x80\xf5\xd9\x21\xc2\x7a\x9d\x29\xfd\x84\xdd\x72\x05\x74\x0c\x48\xd8\x34\x50\x2f\x47\x5d\x66\xa1\x70\x2a\x3a\x75\x24\x17\xf5\x49\x6a\x8f\xbd\x68\xdc\xc9\x44\xe7\x66\x6a\x20\x81\xf7\xae\xb0\xb9\x2e\xd2\x68\x21\xcb\x4b\xff\x33\x63\xc1\xad\x83\x8f\x35\x8b\x4c\x2e\xac\x5d\xf9\x6c\x55\xc8\xf1\x6b\xb7\xce\x8a\x79\xec\xe5\x63\xec\x55\x1e\x3b\x80\x57\x80\xff\x12\x7d\xe3\x65\xe7\xf0\x52\x30\x57\x84\xc8\xa5\x30\x87\x75\xd2\x8d\x81\x27\xe8\xd6\x63\x93\x4a\x2e\xa2\x2e\x99\xdb\x2c\xe4\x9d\x1e\xc1\x8d\xea\xa0\x43\x5e\x88\xd2\xa4\xad\x01\x2d\x76\xe0\x5e\x29\x3d\xda\x44\x9d\x60\x65\xd4\x26\x82\x6e\xec\xa6\x55\x18\x81\xab\x86\xfe\xc4\x3e\x56\x1b\xb5\x08\x14\xf3\x96\xae\x49\x52\x4a\x53\xee\x5f\xda\x17\xcc\xb9\x8f\xec\xc0\x24\x30\x8d\xa8\xc8\x2c\x64\x06\xf0\xa8\x68\x0f\xe3\x34\x82\x71\xb8\x2b\xf2\xe0\x38\xfb\x3b\x8f\x4d\xa8\x53\xe2\xff\xb9\xb7\x05\x44\xa4\x3a\x1b\xcb\xfd\xae\x88\x86\xb9\x9d\x06\xef\xf4\x16\xf5\x79\x43\xe9\x52\x15\x40\x24\x5e\x0c\x48\xb0\xe4\x36\xfb\x78\xaa\x59\x47\x83\x04\x4d\x3d\x8d\x58\x8b\x45\xf3\x2d\xeb\x9b\xc2\x8e\xbe\xa0\x81\x9e\x33\x09\x5e\xe0\x37\x6c\x55\x1b\x79\x5b\x7f\xd2\x60\xf3\xec\x2a\xeb\x2b\x49\x46\x96\x1c\x6b\x8d\x4b\x48\xd4\x9e\x56\x35\x57\x40\x36\xaf\xb2\x1a\xf4\xd3\x9d\x8a\xbd\xbd\x32\x4e\x1a\x72\xd1\xca\xbc\x31\x9f\x96\xf7\xe6\x4b\x50\x5f\xb5\x27\xb8\x03\x47\xf6\x84\xbd\xf7\x20\x7b\xc2\xa0\x17\x19\x05\xd7\x63\x48\xaf\xff\xea\x83\xc1\x6f\x78\xf4\xc3\x90\xda\x70\x10\x9f\x9d\x24\xaa\x0c\x00\x26\x72\xc5\xe8\xc6\x10\xff\x96\x67\x89\x80\xd8\x08\x70\xeb\x36\xb6\x4c\x8e\xe7\xa1\x29\x59\x11\x54\x70\x48\xc6\x46\x72\xac\xa0\xf7\xb1\xec\x4b\xa6\xbd\x1a\x46\xf0\x18\x02\x9f\x66\x69\x5d\x19\xa1\x14\x26\x33\x74\x41\xae\x27\x2a\xc9\x60\x83\xc8\x0b\x4a\x8a\x30\xc3\x10\x6c\xdf\xb1\x9f\x7d\xd4\x63\x13\xca\xc2\xe3\xbf\xdf\x0b\x7e\x4a\x1b\x78\x2c\x6e\x4b\x9b\x78\x86\x4d\x6b\x4f\x27\xda\x07\xab\x02\xc8\xd8\x41\x00\x7d\xac\x84\x89\x50\xd1\xa2\x52\x76\xa4\x43\x17\x56\x72\x5e\x0d\x0a\x7b\xa5\x2b\xb1\x77\xd4\x97\x3d\x12\x90\xbf\xe0\x05\x1f\xf7\x40\x40\xce\x5a\x6a\x0e\x07\xe8\x2c\xbf\x80\xf6\x43\x54\x3c\x90\xf7\x7a\x5c\x58\xc3\x04\xcb\xa2\x6c\x38\xa8\xf1\x00\xda\x55\x16\x45\x77\x48\x81\xd6\x62\xcd\xa8\xa0\x93\x1c\x14\x4a\x70\x80\x2c\xed\x01\xa8\xe1\x64\xb7\x48\xd3\x50\xf9\x84\x14\x17\xb9\x80\xbd\x2b\xf7\x45\x9e\x67\xb9\x33\xc4\x27\x11\xe9\x9d\x0e\xbe\xd3\x0e\x54\x1c\x35\x46\xfb\xd3\x8f\x7a\x36\x0e\xc4\xfb\xbc\xe0\xed\xde\xb9\x2a\xed\xb0\x56\x54\x91\x8f\x8a\x82\x6e\x9d\xd4\x0e\x72\xe3\xe9\xda\x6a\x7a\xef\xe1\x74\xc9\x5d\xea\x4c\x18\xc2\x79\x29\x43\x93\xc6\xa2\xb9\x82\x09\xf8\xa1\x6f\x65\x33\x97\x9f\x24\xad\x92\xcb\xcd\xff\xd4\xb7\x04\x3f\xe1\xb9\xcf\x76\x9f\xd9\x8d\x32\x45\x81\x56\x03\x81\x15\x28\xe6\x4f\x2e\x34\x28\x1b\xfb\x59\x0f\x5d\x2e\x88\xc9\x8c\xe2\x02\xfe\x6c\x8a\xbc\x44\xf1\x1f\x74\x5f\x06\xb6\xed\x92\xf7\x38\xd8\x8a\x0a\x8c\xe2\x6c\x9c\xc6\x9d\x5e\xe7\x92\xf7\x18\xd2\x3f\xa8\x17\xce\x81\xfe\xeb\xc7\xb1\xaf\x7a\x6c\xa2\x19\xde\xd5\x4b\xa3\x44\xf8\xff\xc3\xf3\xf7\xad\xf6\x4b\x11\xfc\xa4\x37\x3b\x83\xcf\xf0\x1a\x5e\x38\x75\x56\x05\x90\xf1\xd9\x19\xbe\x8a\xaf\x90\x2d\xda\xac\x24\x5b\x21\xdf\x02\xe1\xa6\x8b\x3b\x4c\x01\x39\xe9\x9a\x3d\x08\x38\x0f\xbd\xd4\xbe\x6a\x30\xc3\x42\x99\xcb\x6b\x25\xcf\xb2\xb2\x50\xdc\x81\x76\x8b\x83\x99\x81\x88\x3f\x47\x71\x73\x03\xfb\x16\x47\x71\x93\xc4\x45\x59\x07\xb4\x8b\x43\xfe\xc1\xb0\xcc\x3a\x71\x93\xb1\x59\xb6\x1f\xe6\x49\x5e\x48\x48\x20\xaa\x66\x69\x95\x67\x84\x22\x88\x80\xda\x16\xa5\xa3\x5f\x7b\xf7\x7e\x36\x74\xb6\xfd\xff\xba\x5f\xb1\x5d\x2f\xda\x7f\x77\xb5\x40\xda\xeb\xa8\xc6\x34\x46\x43\x69\x4c\x14\x4e\x78\x12\xaa\x37\xca\x06\xbf\x27\x5e\x73\x80\x29\x5c\xfb\x3e\x7d\x8a\x46\xf1\x96\xc8\x73\x82\xe8\x01\xb5\x5b\x01\x8e\x1b\x3c\xc9\x36\xed\x0a\xb2\x54\x40\x36\x90\x92\xf4\xd1\xc8\x36\x68\x6f\x29\xa5\x3f\xc5\x8a\xd1\x5f\x83\x62\x6b\x00\x72\x9e\xb7\xb1\x43\x43\x47\x47\x7e\x16\x70\xc4\x75\x17\x31\x45\x12\xd6\xb7\x06\xe1\x0f\xaa\x0e\xd5\x27\xca\xb6\x93\xc7\x9d\x30\xef\xf3\x82\xf8\xdf\xd5\xb0\x40\xd3\xc8\x40\x53\x9d\x5e\x47\x5e\x6f\x11\x20\xa6\x43\x5d\x85\xe6\x64\xcb\x0c\x06\x2c\xd9\xf2\xe3\xc7\x14\x10\xff\xf4\xb1\x49\x6c\xa3\x00\x7d\xcf\xd0\x56\x10\xcb\xa0\xdb\x0e\x57\x05\x18\x3b\x00\xe8\x3e\xcc\xe3\xc2\x20\x5e\xa5\x03\x74\xb4\xc1\xf9\x91\x8d\xe9\xc6\x6a\x98\xab\xa6\x36\xa6\x1b\xad\x2c\x9b\xe4\xf7\x89\xc3\x91\x71\x80\x03\x80\x8f\x12\xb2\x35\x25\xf1\xba\x38\xc9\x8f\xd2\x95\xc5\x8f\x88\x8b\x80\x8e\x6d\xfc\xfe\x26\xe5\xea\x4f\xdf\xaa\xa2\x71\x17\xc2\x70\x49\x14\xfc\xc8\xbd\x5d\x91\x2e\xb5\xe3\x56\x59\xe3\x73\x22\x2e\x30\xbe\xb9\xe2\x61\xb7\xaa\x79\xbd\xe3\xc7\x8e\x1d\x2b\x1c\x56\xfc\x93\x1e\xfb\x57\x71\x5a\x88\x66\x2f\x17\x4b\xeb\x71\x77\xf9\xcc\xd2\x05\x49\x40\xfa\xfe\x7b\xbc\xe0\xad\xde\xfc\xb0\x57\x8a\xec\x14\x7c\xf9\xcc\xd2\x48\xda\x83\x4a\x9d\x21\xe4\xce\x3a\x43\x86\xa5\x2f\xca\x3c\x4b\xd7\x12\xa8\xbb\x99\xf5\xf2\x70\x4d\xde\xb0\xfc\xfe\xac\xa7\x4e\x80\xb2\x59\x19\x0a\x84\xa0\x99\x6e\xb4\xdd\xd7\x21\xe8\x06\x73\xe1\xfe\xc9\x18\x7b\xfa\x6e\xb3\x63\x2a\xe1\xcc\x30\xdc\xaf\x1d\x53\xc9\x75\xe3\xa1\xac\x9c\xe2\xfe\x35\x2c\x95\x21\x76\x88\x95\x0a\x9c\xb1\xed\xf3\x91\xa5\xc8\x6c\xde\x78\xe3\x09\xcd\x08\x58\x6d\xa4\x71\x52\x23\xd0\x0c\xed\x90\x06\xd0\x40\x72\x46\x95\x90\xae\xe9\x94\x65\xa5\x52\xf8\x41\xf2\xfe\x91\xb2\x92\x8a\xfb\x36\x73\x2f\x20\x4d\x06\x92\xea\x22\xee\x74\xe5\xfc\x8b\x44\xce\x8a\x1e\x0e\xe9\x31\x54\x76\x8d\x66\x3b\x44\x4d\xc9\xaa\xe0\xad\x5e\xd2\x8a\x93\x44\xb2\xf5\xcf\x60\x07\xa9\x61\xff\x6c\x70\xe7\x05\xd3\x87\x8a\xdf\xe5\x20\x01\x6d\x70\x7e\xda\x4e\xfa\x14\x6c\x4c\x07\x36\x51\xfd\xca\x41\x56\xbd\xab\xfc\xdf\x3e\xa8\xe8\xe9\x2f\x1e\xbc\xe0\xbe\x53\x80\x59\xda\xb5\x45\x80\x6d\x43\xd1\x2f\xbb\x2b\x71\xaa\x12\x68\xc6\x25\x51\xb6\x06\xd7\xd1\xd1\x6b\x60\xc7\x03\xd4\xad\x94\x3f\x5f\xe4\xd9\x36\xe4\xa8\xd2\x8f\x41\x4a\x34\x9a\x04\x2d\xc5\x72\xfb\xc4\xe5\xe1\xc2\xea\x53\x88\x3d\xaa\xd9\x1a\x5c\x8a\xb0\x2c\x3a\x61\x92\xd4\x00\xcd\x24\x5c\x4d\xb4\x0a\x72\xfa\x58\x01\xb6\x12\x48\xd7\x93\xb5\xb8\x78\x5e\x2f\xd4\x3e\xac\x8a\xaa\xc6\x82\x72\xe2\xa8\xe7\x64\xfc\xa9\xde\xd6\x92\xcc\xf5\xc8\xa0\x0b\x23\x51\x5d\x0b\xd5\x4c\xd1\x46\xad\xd4\x13\x17\x3c\x80\xe3\x25\x09\x5a\x00\x36\x58\xdc\x5d\xc0\x90\x81\x86\x33\xcd\x52\xbb\x4c\xa5\x86\x42\x41\xb1\x41\x78\x28\xcd\x61\x22\x2e\xc6\xcd\x6c\x2d\x0f\xbb\x6d\xb4\x38\x37\x78\xf0\xb4\x81\x1a\x0a\x3b\x2c\x3d\xe4\xc1\x46\x40\x99\xfa\x64\x55\x98\x2b\x50\x27\xa3\x23\x05\x07\x2a\x81\x9f\x9b\xe9\xab\x6e\x92\x3e\xc9\x08\x4f\x21\x41\x1e\x93\x46\x17\xc0\x55\x80\xec\xa9\x94\xf4\x02\xf2\x94\xc5\xdb\xd2\xa9\x34\x4e\xad\x4a\x61\xef\x14\x84\xe4\x00\x2c\x1d\x85\xda\xaf\xf6\xf9\xdd\x33\xfc\x0e\x2e\x2b\xe3\x77\xe0\x4d\xc3\x8f\x20\xfa\xc8\xdd\x33\x48\x61\xd4\xfc\xc0\xb0\xd2\x8c\x17\xbd\x56\x2b\xbe\xa8\x39\x66\xf8\x14\x7c\x79\xbb\xed\x70\x52\xe3\x12\x00\x68\x1d\x5d\x56\xe9\x9a\x3b\x48\x1a\xa3\xd3\xc7\x06\x87\x14\x6b\x98\xa9\x96\xfa\xa8\xdc\xb0\xd4\xfc\x9e\xe4\x1b\xd3\xc7\x6a\x7c\xe3\x78\x8d\x6f\x4c\xcb\xff\x83\xb8\x0b\xbf\x8e\xc9\xbf\x4e\xd4\xf8\xc6\x09\x90\x80\xe5\xa3\xe3\xd0\x23\x2c\x07\x7f\x1e\xaf\xf1\x56\x96\x4d\xe3\x7f\x2b\xce\x62\x7f\x7b\x0d\xbb\x7d\x07\x5e\xe9\x28\x5e\x2b\x18\x75\xe0\x67\x10\x5a\xcd\x7f\xdf\x35\xc1\x85\x21\xcf\x2b\x06\x24\x17\x22\x05\x5c\xcf\x43\xbc\xc5\xb2\x34\xa5\x60\x3e\x8d\xb2\x44\x48\xf7\x6e\x14\xdf\xd5\xec\x41\x9b\x35\xfe\x05\xc5\x1a\xbf\xd5\x5b\x51\x0f\x57\x76\xc1\x1c\x5b\xed\x1e\xd6\x84\xf2\x11\xe1\x8c\xd9\x0f\x58\x57\xe5\x03\x63\xec\xee\xcb\x06\xe5\x57\x40\x82\x95\x0b\xf2\xbd\xde\x0a\x55\xbb\xb2\xd3\x2b\x92\x46\xdc\xe0\xa7\x50\x91\x61\x2a\xc8\x72\xbe\xd2\xcb\x93\x15\xed\x85\x60\xe9\x92\x18\x91\x20\x95\x56\x24\x36\x81\xb2\x56\xc6\x23\x72\xab\xa5\x3d\xdf\x77\x19\x0a\xdd\x50\x83\x7d\x7d\x82\x8d\xf7\xf2\xc4\xff\xea\x44\xf0\xa5\x09\x6c\x73\x2d\xde\x10\xca\x01\xdc\x98\x56\xad\x26\x21\x49\x9a\xce\x3e\x7a\x7e\xf1\x8c\x1c\x52\x87\x1f\x59\x01\x47\x01\x71\x72\x6a\x4a\xde\x71\x27\xe5\xed\x3e\xd5\x0d\xcb\xf6\xca\x64\x83\x9f\xba\x18\x36\x4b\xb8\x8d\x11\x3e\x0e\x5a\xca\xec\x31\x0f\x1b\xaa\xbc\x7b\x56\x64\x65\x2b\x76\x92\x3e\x98\x59\x04\x44\x52\x73\xaa\x66\xc0\x1d\xfd\x6d\x9a\x7d\x32\xcd\x50\x26\x3e\xe2\xa2\x30\x87\x56\x56\x94\x06\x29\x25\x17\x45\x96\x6c\x88\x88\x6f\xc4\x21\x70\xa2\xe0\xe4\x30\x77\x6e\x09\x46\x9d\x75\xac\x1d\x56\xa0\xae\xb8\xc6\x57\x90\xb7\x52\xcf\x57\xe4\x9d\x85\x3d\x85\xba\x78\x9c\xd6\xa9\x4b\x50\x51\xa8\x4c\x6c\x1a\x5d\x89\x27\x61\x1f\xef\xec\x8d\x38\x4b\x60\xd2\x27\x1b\x6a\xe8\x9d\xb0\x8f\x59\x58\x11\xaf\xc6\xb2\x95\x31\xb6\x60\x5c\xb1\xb0\x4e\x34\xb5\xac\x00\xf7\x83\x9f\xcb\x59\x9e\x3e\x7e\x4b\xe3\x58\xe3\x58\x63\x7a\x05\x9d\x88\xa8\x66\xb9\x77\xe2\x62\xbd\xaf\x32\xe3\xc9\x7d\x02\x79\xfc\x80\x0f\x80\xf8\x0a\x39\xcf\x79\x2f\x75\x76\x2c\xf8\x68\x25\x09\x19\xfe\xf0\x6c\xcb\x32\x61\x6a\x9d\x3e\x7c\x4c\x4e\xee\xc2\x8a\x7a\x91\x57\x59\xa1\x11\xfa\xf4\x21\x00\x0b\x15\xb9\x81\xa1\x89\x4a\x5e\x73\x88\xe5\x47\xe8\x8e\x72\x47\x49\x16\x1c\x73\x87\xd6\x50\x23\x12\x16\x08\xf7\xd7\xcb\x53\x2e\xc5\x3f\x70\x1f\x13\x9b\x4e\x06\x46\x81\x3e\x2c\xc6\xab\x27\x00\x75\x43\x70\x1b\x6c\x0e\xb9\x83\xe9\xc5\x5a\x4c\x94\x30\x50\x5e\x16\x41\x83\xb1\x19\xc4\x81\x89\x0b\x7d\x41\xe2\x85\x63\xa5\xd7\x25\x64\xb7\x30\xed\xab\x5b\x53\x61\x0f\xad\x02\xd3\xce\x43\xd9\x4c\x03\x58\x7b\x59\x56\x6d\x4c\xa8\xb8\xcc\x20\xe1\x1a\xcc\x5f\xbe\x1a\x97\x39\xf0\x5a\x58\x0d\x11\x0f\x7d\xf4\x9c\x5c\xa1\xa1\x1a\xa5\x49\x47\x2e\x07\x3c\x53\x96\x90\x3e\x0e\x3f\x97\x4d\x85\xa8\xde\xcc\x72\xc9\xba\xc5\x4d\x80\x79\xe1\x60\xe7\x08\xe4\x8b\x93\xb2\xf9\xcd\x2c\x8f\xee\x0c\x94\xa6\x49\x07\x24\x9c\xce\xc3\xb5\x0e\x48\xd0\x47\x82\x1b\x1a\x8d\x46\x80\xb0\x92\xcf\xeb\x89\xbc\x6f\x1c\xb7\xe5\xdb\xa7\xd0\x5b\xc2\xaa\xa4\x1a\x6a\xa4\xa7\x75\xa8\xf0\x67\xf7\xb1\xc3\x3b\xc4\xf0\xf6\x7f\x6a\x5f\x70\x76\xd8\x8b\x4a\x16\x26\xcb\x16\x66\x01\x8b\x4b\xf9\x31\xd4\x69\xc8\xc1\x6c\xa1\xe0\x4f\x2f\x79\x07\x0a\xb0\x0b\x3a\x57\xdd\x2f\x8e\xb3\x13\x84\x0b\xfa\xc4\xe0\xdf\x2e\xf7\xd1\xfb\xdf\xae\xb1\x69\xe1\xcf\x5a\x28\xff\x1e\xf3\x93\xb0\x28\x8d\x27\x02\x78\x0e\xbc\xe0\xca\x0d\x6e\x77\x9e\x09\x95\xa7\x04\x19\xdb\x68\xdc\xc6\x9f\x81\x60\xfd\x81\xaa\xe2\x58\x2c\xf3\x47\x83\xdd\xcf\x0e\x62\x92\x7d\xe1\x9f\x0b\x66\x66\x78\xbb\xd7\x09\x53\x48\x41\x0a\xfa\x33\x7a\xa7\xac\xd9\x72\xbb\x44\xa2\x0c\xe3\x44\xc5\x54\x41\x44\x9a\x6e\xcc\x19\xee\x69\x76\x00\xb3\x07\xaa\x9c\x61\x76\x2e\x41\xbb\xb7\x87\xc9\x30\x38\xa2\x9e\x33\x8c\x16\xc1\xbf\x2b\xb8\x89\xac\xeb\x06\x1f\x0d\xab\xa8\xa9\x5b\x63\x39\xef\x89\x1a\x3f\x1d\x26\x85\xa8\xf1\xf3\x29\x18\xd5\x9d\xda\xfe\xc6\x63\x93\x3b\x9e\x60\xff\xd7\x3d\xff\x90\x64\x3f\xea\x72\x8e\x83\x9f\xf1\x8c\x7b\xc6\x66\x1e\x76\xbb\xc0\x41\x64\x3d\xc9\x58\xc6\x1d\x01\x9f\xa8\xd4\xdc\xa8\xa2\x2c\xd0\x63\xb1\x29\x4f\x7f\x5e\xb4\xd1\x4c\x5e\x66\x04\x04\x96\x46\x80\xf2\xd5\xe0\xfc\x3e\xac\x0d\x89\x1a\x64\x58\x8e\x08\xb0\xb2\x03\x28\x9d\x84\x71\x15\x36\xcb\x2c\xef\xf3\x8e\x28\xdb\x1a\xb7\x06\x56\x40\x36\xdc\x0d\x9b\xeb\x98\x18\xa3\x85\xbe\x2a\x16\x88\xd0\x38\xbb\xe7\xf2\x45\xfc\xe1\x6c\x8c\xff\x8a\xf1\xe0\xb6\xea\x43\x1d\xa0\xe0\xb0\x34\xca\x3e\x29\x05\xe7\x66\x9f\x54\x38\xce\x51\xfa\xc8\x18\xbb\x91\x14\xef\xb5\xe0\x09\xa3\x6c\x9e\x74\x33\xdb\x23\x9a\xb5\x55\xee\x37\x07\x93\x46\xe1\x6e\x7d\xef\x68\xe0\x87\x54\xf2\x2b\x1e\xdb\x27\x57\xc9\xff\x80\x36\xff\xfe\x98\x37\xdf\xaa\xe2\xa8\xd9\x26\x57\xc5\x4d\x20\xe6\x74\x86\x79\x97\xf5\x05\xa5\x22\x3f\xca\x8c\xdf\x78\xe3\x09\x58\xbf\xd5\xb0\xb9\xbe\x29\x99\x20\x10\x39\xca\x78\x35\x4e\x40\x85\xb7\x22\x6b\x5d\xb1\xe1\x13\x29\xe9\xb3\x63\x67\x9e\xae\xdf\x7c\xd3\x4d\x27\x6e\x92\xec\x14\x99\x73\x27\x5d\xf1\xe0\xeb\x13\x4e\x96\x0c\xcb\x65\xa0\x15\xaf\x9d\x0d\xbb\x10\x4d\xf2\xe9\x89\x60\xd6\x79\xa2\xd8\x4f\x2b\x6d\x28\x21\x20\x5a\x71\xf4\xea\x83\xad\x03\xe7\xdf\x78\x90\xb5\x94\x73\xfc\xf7\x04\x5c\x83\x4f\x94\x16\x90\x8b\xae\xca\x75\xdd\x69\xb0\x1a\x3b\xba\xad\x85\x57\x7f\xbc\xe7\xa6\xb9\x8b\x98\x96\xf7\xd8\x31\x2d\x6f\xdb\x65\x4c\xcb\xda\xa3\x85\x14\xb4\xe7\xd9\xbc\x3b\xcf\xe6\x87\xbc\xf3\xdb\x7b\xe7\x1e\xf7\x8f\xa9\x08\x13\x87\x48\x6c\x15\x63\xf2\xd1\xab\x1d\x38\x62\x2b\x61\xa7\x0a\x2f\xf9\xb1\xab\x83\x4f\xed\xdb\x0b\xf1\xd8\x0b\xf1\x78\x64\x43\x3c\x7e\x7f\x0f\x3c\x6a\x2f\xfc\xe1\x1b\x78\xaf\x7e\x63\xe3\x8b\x9e\x43\xf1\x45\xf7\x6d\x01\x55\x3e\x48\x9b\x77\x1e\x5a\xf4\x90\xf7\xac\xed\xef\x8f\x5b\xfd\x9b\xaf\x28\xba\x83\xb1\x3f\x1a\x67\xc7\x46\xe3\xec\x2f\x9d\x3a\x13\xa7\xbd\x8b\xd5\x84\x07\xef\x1e\x0f\xbe\x67\xf8\xab\x47\x30\xdd\xc1\x2f\x8d\xb1\x90\xe0\xfe\xef\x0f\xce\x5c\x11\xdc\x3f\xe2\x58\x6f\x09\xf2\xff\x79\x8f\x5d\x5b\x08\x18\x9a\x1a\xed\x47\xbd\x1d\xa0\xd0\xd3\x74\xd0\x37\xc1\xab\x3c\xb7\x12\x85\x22\x1d\x29\x45\x5d\x58\xdc\x66\x9e\x39\x59\x11\x76\x88\x96\x58\x86\xc5\xba\x81\x4a\x14\xf5\x6e\x16\xd5\xf5\x8d\x39\x05\x96\xf1\xb8\xec\xc3\x23\x71\xb1\x9c\x62\x7f\xb1\x8f\x9d\xdb\x1d\x74\xd3\x80\x14\x7a\x69\xdf\xae\xa4\xd0\x4b\x9e\x91\x1f\x87\xb9\xee\x7d\x68\x9c\xdd\x41\x82\xe9\xcd\xc1\xe4\xa8\xc4\x8e\xca\xe7\x51\xa3\x26\xd8\xab\x79\xd6\x16\x51\xef\x0c\x4e\xa4\x3b\x15\x51\x87\x57\x77\x3f\xc1\x63\x3f\x3d\x98\x53\xaa\xc6\xd0\x98\xe3\x40\x43\x09\xcf\x1d\xf7\x6b\xa5\x8d\x55\x66\x15\x58\xa4\x66\x59\x31\x78\x7c\x4a\xc9\xc1\x0f\x6a\x39\xf8\x5d\x5e\x97\x0c\xb9\x76\x23\x8e\x7f\xf5\x0e\x1b\xda\x85\xa4\xeb\xf8\x31\x6e\x23\x50\x3b\x52\xf1\xff\x6f\x6c\xa8\x54\xbc\xd4\xcc\xba\x3a\x39\xa5\xff\xce\xb1\x20\x9d\x21\xf4\x8e\x82\x1e\x56\x53\x1f\xcc\x9c\x9b\x33\x8b\x83\x45\x0a\x27\x51\x2c\x21\xf3\x22\x34\x49\x5d\x5f\x87\x56\x7d\xb0\x92\xa0\x1d\x1d\x96\x57\xf2\x3a\xc0\x11\x39\x75\x51\xd6\x88\x20\x4d\xff\xc5\x0b\xee\x9c\x31\x40\x0f\xd5\xee\x99\xea\x20\x06\xdb\xc2\x1e\x31\xd0\x20\x8e\x94\x7d\x9a\xcd\xb1\xbb\xb6\x27\x21\x30\x00\xe5\x31\xad\xe6\x68\xd1\x34\xc7\x1e\x3c\xc4\xb6\x0a\x5b\xad\x26\xea\x07\xc5\xc3\x6b\x0e\x05\x33\xc3\x5e\x0c\xc3\x09\xad\x96\xdb\x5a\xed\xf0\x9b\x7b\x78\x7d\xbb\xe6\xfb\x5e\xa0\xf4\x36\x79\xf0\xdd\x43\xf5\x36\xd5\x25\x71\xa0\x0e\x9e\xcc\x6e\x63\x4f\xda\x71\x54\x73\xb5\xaa\x3d\xa6\x73\x17\x4c\xe7\x6f\xd9\xca\x9c\x0f\xef\x52\x99\xf3\x1f\x86\xe3\x93\x3c\x5a\x68\xd0\x0f\x79\x9d\xed\x59\xcc\xa7\xfa\xf7\x6c\xc7\x62\x0e\x23\x33\x43\xa3\x89\xff\x9a\xb9\x0e\x15\x43\xd2\xc8\xeb\xb0\xaa\xa1\x31\x7f\xef\x62\xc1\x3f\x8c\x6d\x59\xe4\x1b\x14\xfc\xa7\x93\x62\x99\xa0\xab\x38\xfd\x97\x1c\x12\x38\x2c\xdb\xf9\x25\xef\x90\x1e\xbe\x9b\xbd\x70\x2f\xf5\xfc\x5e\xea\xf9\xbd\xd4\xf3\x55\xeb\xea\x2a\x33\xe7\xc5\x3f\x1f\xdc\x63\xd1\x8e\xc1\xda\x1c\xc2\xa2\x76\x9b\x66\x90\xe9\xc8\xe3\xf6\x72\x5c\x55\x0f\x39\x88\x44\xda\xb2\xb3\x44\x09\xbb\x96\x30\xe0\xef\xc3\x87\x82\x65\x2b\x45\x78\xa8\x66\xce\xf6\x31\xd2\x59\xcf\xca\x0c\x13\x6a\xd4\x78\x27\x4c\xc3\x35\xf2\x5f\x4c\x8d\x3b\x8e\xd1\xb9\x5e\xf2\x0e\xe0\x1f\x0e\x3d\xf8\xf4\x04\xfb\x71\x8f\xd1\x1b\xff\x87\xbd\xe0\xe5\x1e\xea\x44\x87\x0d\xdc\x52\xe0\x52\xb6\xd9\x54\xae\x96\x9d\x4d\xbf\xc1\x67\xb3\x14\x32\x1e\x81\x77\x48\x3f\xeb\xe5\x80\x0f\x6a\x1b\xbc\xd1\x14\xac\xb4\xb8\xa8\xf0\x05\x3f\x4e\xd7\x29\xc9\x0d\xce\xf4\xd8\x81\x56\xb1\xdc\xef\x0a\xff\x57\xbc\xe0\xe7\xbd\xd3\x71\x22\x94\x6b\x5b\xbf\x6b\x26\xa2\xc1\x4f\x5d\x6c\xf0\x40\x5c\x2c\x6f\x0c\x6a\x3c\xb8\xd8\x2a\xe4\x3f\x69\xd9\x2a\x02\x9d\x20\x4a\x59\x96\x51\xa9\x8d\x91\x42\x3a\x78\xa8\x1b\x16\xa4\x15\x81\x73\x64\x52\xd6\x5b\x83\xb7\x9c\xf4\x22\x51\x8a\xbc\x13\xa7\xa8\xe7\xa0\x24\x53\x10\x5f\xaf\x7a\x97\x41\x50\x9e\x1b\x68\xff\x93\xe3\xec\x71\x69\x16\x89\x85\xde\x6a\x12\x17\x6d\xcc\xfc\xb0\x28\x5a\xfe\x0f\x8c\x5f\x61\x7e\x91\xe0\x33\x63\xe7\x86\x54\x38\xd2\xc3\x4e\xbe\x57\x4c\xa3\x65\xf8\x2c\x04\xf8\x1e\x6c\x0c\xf8\x42\x82\xdb\x0d\x7d\x6d\x6f\x03\x74\x05\x4e\x84\xd1\xfc\x73\xab\x1f\x94\xae\x4f\xde\x8b\xf2\xe9\xf9\xb4\xeb\x3c\x07\x07\x27\x27\x45\xed\x80\xe7\x90\x52\x12\xe9\x7c\xb5\xa9\x4a\x76\x83\x58\xc1\x04\x16\xa2\x6c\x00\x43\x07\x56\x20\xec\x16\xb8\x6a\x83\x03\x08\x14\xaa\x21\x6a\x2a\x7e\x60\x65\xd3\x01\xdf\x03\xd8\x04\x0d\xd6\x61\x13\xb9\x08\xa3\x7b\xd3\xa4\xef\x87\xc1\x92\x89\x00\x0d\xc1\x39\xa4\x8e\x08\x9f\x4e\x1a\x61\xb5\xcd\xd5\x71\xb0\x85\xf7\x56\x98\x14\x82\x1f\x91\x9f\x4e\x6d\xe6\x71\xa9\xcc\xd8\x2a\x3a\xe1\xd7\x3c\x76\xdd\x86\xe2\xe0\x80\x9b\x14\x80\x22\xfe\x2a\xef\x42\xe5\x29\xf0\x7f\xa2\xa0\x65\xa8\x2b\x8a\x28\xb7\x76\x57\xe4\x65\x2c\x2c\x00\x5e\x77\x47\x9b\xc5\x33\x47\x15\x4e\x29\x3e\x3c\x5c\xf0\x28\x6b\xf6\x3a\x5a\x18\x94\xe3\xd1\x91\x66\x4a\x50\x7b\x47\x05\x70\xc2\x22\x29\xec\xa5\x87\xd8\xad\xf6\xa6\x35\xce\xa9\xe0\xb9\x34\x6b\x7e\x2f\xc5\x6b\x72\xd3\x2d\xe2\x05\x02\xa2\xf2\xc7\x26\x82\xc5\x2d\x4b\x0c\xcd\xad\x31\xea\x03\x25\x3c\x0f\x95\x9d\xbf\xb2\x67\x33\xd9\xb5\xec\xfc\x06\x8d\x75\xff\x2a\x2f\x38\x19\x9b\x94\x1b\x97\xb9\x40\x97\xa1\xa9\xd9\xe9\x76\xda\x13\xad\xbf\x49\xd0\xec\x37\xb6\x17\x6c\x97\xfc\xa7\x6b\xc1\xd6\x59\xe0\xaa\x01\x65\x2b\xca\x50\x35\xab\x7c\xe9\x50\x05\x2f\xcb\x4d\x3f\xfa\xf4\x5e\x56\x86\x40\x73\xde\x77\x28\x38\x31\xf0\xd4\x85\x0d\x77\x5e\x43\x02\xc4\x11\x0a\xb9\x1f\x9c\x60\x1f\xd2\x67\xe2\xdd\x5e\xd0\x1d\x92\x86\xc6\xad\x4c\xe9\xf7\x2e\x37\x43\x13\x9a\x89\xa6\xd4\x56\xad\x3f\x4f\xd6\x56\x4c\xd9\x27\xe9\x38\x3b\xc6\x1a\x3b\xce\xc7\x0a\xdd\xd9\x3b\x35\x7b\x88\xb9\x7b\x88\xb9\x0f\xa3\xe7\xd1\xfd\xdb\x53\xbf\x9b\xfd\x1b\xeb\x96\x57\x91\x22\x76\x03\x24\xa9\x4a\xe0\x5e\x7f\xc8\xf6\xe1\xdd\x02\xaf\x06\xa8\xdc\x9f\x4c\x04\x87\xad\xdf\xc3\xae\x69\x7a\x3d\x9c\xb2\x7d\x64\xcf\xd4\xb0\x6b\x76\x29\x51\x37\x43\x33\xf8\x8e\xa1\xa6\x06\xb5\x02\x36\x15\xbf\x85\xdd\xc4\x4e\x8c\xa4\xe2\xa3\x57\x7d\x8f\x94\xef\x82\x94\xff\xb6\x4d\xca\x3f\xba\x4b\x52\xfe\x4d\x91\x6f\xf2\x21\x6f\x6d\x7b\x52\x34\xe7\xdf\xa5\x9c\x20\x2d\x5a\x31\xcc\x80\xa0\xfd\x20\xad\x0d\x48\xdc\x1a\x7b\xed\x21\x36\xbd\x8d\x6d\x41\xbb\xfb\x18\x83\xc2\xef\x4f\x04\xaf\xf4\x06\x9f\x6f\x6f\x45\x20\xad\xa0\x63\x02\x30\xc3\x54\xbb\xf7\x88\x13\x0f\xd4\x8e\xcb\xa2\xde\x95\x92\x33\x60\x3b\x80\xa3\x65\x5a\x81\xb6\x9a\x6c\x5c\xf2\x0e\xa0\x1a\xef\x92\xc7\xb0\x95\x73\xa0\x71\xbf\x9a\x7a\x01\x7a\x58\x87\x4a\xfe\xf0\x41\xf6\x01\x8f\x5d\x1d\xda\xda\xdf\xcb\xf2\x33\x1e\xa2\xfd\x5d\x0b\xaf\x50\xeb\xbb\x8d\xb2\x97\x1f\x01\x8d\xee\xf3\xa8\x9d\x49\xf6\x2a\x8f\x39\x23\xf3\xff\xe3\xee\x3a\x7e\xa7\x5d\xd9\x96\x1d\xa7\x9e\x56\x3a\xd4\x60\x67\x98\x35\xef\xfe\x1d\xc1\xb4\xf9\x35\x4c\x3f\x49\xd5\x8c\x52\xf2\xfe\x3f\x63\x6c\x42\xf9\x10\xf8\x7f\x36\xc6\xee\xbc\x92\x43\x1d\xae\x8a\x44\xa7\xf3\xff\xc9\x31\xed\x93\x60\x1c\xa1\xe2\x74\xad\xae\x02\x7b\x21\xde\x53\x85\x5a\x21\x09\xb0\xb6\x26\x38\x43\x19\xb7\x86\x61\x88\x8a\xfc\xbe\x36\x40\xc6\x69\x9b\x84\x73\x42\x60\xc2\xc1\xcd\xd5\x68\x2e\xd1\xc3\x34\x8c\x30\x24\x28\x4c\xac\x9c\x21\x74\x6b\x2a\xcb\x12\xc5\x1f\x6a\x5c\x7a\xad\x45\xd2\x05\x9a\x59\x57\x9e\x5a\xec\x44\x2f\x05\x25\xfd\x73\x7b\x45\x69\x55\x03\x6b\x51\x8d\x57\x5e\x0b\x31\xcf\x3d\xd6\xd3\x60\x0f\x78\x8c\x4e\x92\xbf\xc1\xee\xb9\x0c\x70\xca\xe9\xc6\xac\xdc\xbc\xc4\x62\x54\x75\x9e\x47\xc9\x6e\x47\x73\xaf\xc2\xdc\x22\xdb\x4f\x90\x78\x0b\xf6\xc0\x3e\xb6\x1d\x0e\xe6\x42\x16\x15\x0e\x49\xfa\xef\xe3\xc1\x25\xaf\xfa\xf4\x4a\x08\xd2\xce\xac\x8f\x0e\x85\x82\x30\xb0\x10\x93\xee\xd4\xbb\x79\x86\x39\xb0\x2d\x9a\x05\xe4\x09\x9b\x1a\x01\x0b\xfa\x83\x63\xdf\x2c\xb0\xa0\x2f\xf6\x18\x75\xd5\xef\x8f\x76\x0f\xdd\xa2\x17\xf3\x3a\x6a\x33\xb8\x55\x9d\x72\xf5\x08\x7b\x41\x93\x48\x2f\x57\xfb\x64\x5f\x00\xd8\x41\x3c\x5f\xec\xe7\x0f\x38\x8e\x96\x4a\x00\xbe\x2f\x4e\xa3\x6c\xb3\x58\x22\x7f\xbd\x59\x74\xd7\x53\xae\x87\xff\xb8\x3f\xb8\x77\xcb\x12\x6a\x32\x38\x95\x32\xca\x58\x65\xf5\x05\x5c\xaf\x5c\x40\x7f\xc3\xa4\xe2\x00\xf5\xa6\xfd\xac\xc3\xbe\x65\xad\x53\x84\xb3\xba\xc8\x52\x57\x20\xc5\x5b\x0a\x4e\xdf\x7d\x76\x69\x66\xf0\xcd\x30\xea\x27\x4b\x5a\xed\xc0\x71\xa6\xb0\x56\x87\x0c\xfe\xf2\x18\xbb\x26\xef\xa5\x33\xc5\xf9\x42\xe4\xd0\xcc\xcf\x8e\x05\x3f\x3a\xb6\xdc\x16\x5c\x3d\xe1\x66\x38\x26\xac\x99\x10\x83\x91\xaf\x1b\x30\x85\xd1\x16\x75\x15\xde\xb2\x08\xc4\xd3\x3a\x16\x7c\x48\xb5\x6e\x5c\x2c\x62\x07\xb1\xa0\xc1\xcf\x5a\x51\xdc\x44\xf8\x16\xb2\xa8\x32\xf9\xa0\xf1\xa7\xb7\xab\x59\xd9\xe6\x95\xf7\x88\x82\x34\xf0\x99\xce\x63\xd6\x13\x6e\x9f\xaa\x9f\x97\xe1\xba\x00\x7c\xac\xa6\x9c\xcf\x0a\xb4\xe4\x9f\x7a\xcc\x1f\x5c\x31\xff\x33\x5e\xf0\xeb\xde\xe0\x7a\xc9\xb5\x42\xc8\x0e\xbd\x4a\x26\x63\x9e\xf2\x18\x3c\x62\xf1\x7e\xed\xde\x6a\xa3\x99\x75\x2c\xf5\x4f\xbd\x88\xd7\x8a\xa9\x4d\xda\x60\xb2\xe9\x49\x1e\xa7\x89\x16\xb7\xc0\xc5\x14\xb0\xbb\xb6\xd8\x0a\x72\xaf\x68\x57\xbd\x11\xdb\x0a\xac\x30\xce\x58\x7f\xfc\x80\x13\xe3\x02\x11\x17\xe0\x29\xd7\x16\x51\x4f\x9e\x54\xff\x81\x03\xc1\xb2\xf9\x39\x90\x5e\x4d\xbf\x68\x66\x69\x51\xe6\xa1\xc6\x43\x95\x55\x69\x14\x3b\x64\xdb\x16\x7b\x69\x19\x77\xc4\x6c\x12\x16\x95\x63\xf2\xc6\xfd\xec\x8f\xc7\xd8\xd5\xf2\x1b\xed\xba\xf8\xf9\xb1\xe0\x17\xc6\xec\x27\x20\x47\x15\xae\x8f\x31\x79\x8f\x2b\xcb\x7f\x96\x52\xbb\xf0\x96\x1a\x47\xe1\xc5\x69\x5c\xee\x9d\xc2\x4e\xa8\x6e\xbf\x05\xa4\x1d\xe5\x53\x40\x23\xc4\x8b\x0f\x43\x52\x30\x2d\x9a\x85\xf9\xa8\x48\x10\xc2\x28\x38\x55\x39\xfd\x07\x00\xf8\x7c\x4d\x44\x0a\xa4\x06\x71\xc3\xc5\xc5\x18\x43\x2c\xed\xc2\x0d\x3e\x93\xa2\xf1\x29\x89\x9b\x25\x65\x77\x6b\x86\x3a\x5e\x3e\x53\xe0\x5e\x28\x05\xe3\x36\xd7\x1b\x6f\x4b\x13\xce\xcf\x8d\xb1\xab\xca\x2c\xd1\x39\x27\xdf\x3c\x16\x7c\xd0\xb3\x1e\x20\xc8\x60\xb7\x8b\x10\x62\x47\x00\x64\x17\x0c\xf4\x51\xaf\x9b\xe0\xdd\x38\x09\x46\x43\x3b\x39\xbd\x01\xf7\x72\xc6\x1f\xf5\x40\x1a\xd5\x1d\xab\x71\xd1\x6a\x89\x66\x19\x6f\x88\xa4\xcf\x7b\x69\x9c\xa5\x4a\x34\x95\xc7\x3d\x6b\xa9\xf5\xc3\xee\x98\x1d\x2d\x07\x4c\xe0\x36\xd5\x7d\x64\x84\xe9\x29\x56\x67\x4f\xdc\x1e\x4e\x57\x0f\x75\x87\xf8\x80\x2f\x3b\x64\x03\xa7\x6f\x15\xe6\xe0\x04\xfe\xf8\x9f\x9e\x08\x7e\xd3\xab\x04\x03\xe9\xbb\x95\xc2\x8b\x2a\x86\xd8\x44\x80\xbd\x4e\x5d\x7f\x60\xed\xcf\x36\x53\x93\x3c\x11\x90\x7a\x9c\x27\xfa\x10\x10\xf7\xa1\x83\xa8\xd0\xed\x3a\x54\xdc\x13\xac\x67\x5a\x42\xd8\xd3\x6a\x35\xe1\x60\x8d\x17\x19\xc5\x4f\x01\xf0\x81\x55\x01\x12\x8d\x4b\x9e\xa5\x1c\x72\xe1\x60\x2f\x79\xe3\xbd\x38\x72\x73\x4d\x1c\x60\x09\xf9\x6d\x44\xc1\x7d\xe7\x06\x61\x7d\xcb\x01\xc9\x78\xb8\x42\x5c\x5e\x2f\xf5\xb5\x5e\x1c\x89\x29\x03\xed\x50\xdc\x00\xbd\xb3\xe9\x58\xcc\x64\x1f\xfc\xd5\xe0\xfc\xf9\xf9\xb9\x87\xb7\xad\x5e\xec\x2a\x0f\x6e\x71\xd4\x64\x93\xc1\xe3\x6d\x58\xb1\x6a\xcb\x8e\x07\xfa\x18\xf3\x57\x93\xac\xb9\x0e\x5b\x62\x4e\x24\x02\x5c\xb2\x3e\x30\x16\xfc\xd4\xd8\x7c\x8b\x97\x10\xd5\x3f\x73\x6e\x4e\x5e\x99\x10\xf1\x26\x8b\xe9\xd8\xb8\xa0\x95\xe5\x62\x0d\x02\xf1\xd5\xa7\x01\x6f\xc5\x69\x98\xc4\xcf\xd7\x00\x37\xe6\xb3\xa6\xd6\xbc\x44\xb2\xb4\x42\x48\x80\x38\x3f\xd1\xaf\xd3\x1d\x59\xca\x79\x91\xa7\x29\x51\xfa\x3b\xb5\x4f\xe1\x47\x27\xdb\xb0\x72\xfd\x19\x23\x77\x83\x2f\x67\x70\x5e\x4b\x6d\xdb\xaf\x29\x6c\x8d\x54\x88\xa8\xe0\x01\xb6\x1a\x68\xf8\x0f\x33\x37\xd0\xbf\x1a\x62\x4c\x6e\xc6\x85\xe0\x37\x1e\x3f\xce\x8f\x9c\x4f\x89\xc5\x00\x7f\xb9\x53\x28\x22\x6a\xc9\x23\x17\x65\x2f\x4f\x45\x05\xee\xef\x02\x63\x04\xf8\x96\x88\xdc\xbf\x27\x38\xa9\x67\xb1\x32\x18\x37\xf8\x0e\x7c\x69\xe8\xca\xa2\x8f\xdd\x7a\x7f\x58\x21\x01\xbf\xd2\x0b\x1e\xd0\x48\xc0\x5b\xee\xa8\x47\x49\x71\xf5\x33\x87\xd8\x77\x6f\x15\x3e\xbe\x90\x67\xcf\x45\x8d\xaf\xff\xc0\xa1\xe0\x7f\x8f\xcd\x44\x61\x17\xbc\x8c\x4c\xa0\x7b\x9c\xc2\x45\xd6\xc5\x92\x22\x52\xae\x0c\x88\x0f\x53\x65\x35\x88\xef\xd6\x9f\x1f\x2e\xf8\x9c\xe4\xea\xd0\x9f\x43\x2d\x8f\x71\xec\x03\x84\x97\x6a\xdd\x92\x06\x81\xcf\x8c\xe5\xed\xb8\x2e\xfa\x85\xa2\x58\x56\x8d\xb4\xd9\x21\x83\x05\x1c\xf2\x9a\xc2\xe4\x91\x8f\xd1\x04\x2d\x12\x70\xd6\x07\xa1\x98\xe2\x52\xe9\x46\x35\x22\x2e\xa6\xf6\x80\x61\x40\x4b\xe0\xe3\x52\xb6\x1d\x94\xd3\x92\xd0\x27\xf1\xb0\x37\xc3\x04\xaf\x78\x74\xf8\xe8\x84\x5d\xd5\x7b\x52\x96\xca\x16\x14\x1a\x88\xf2\x04\xea\x48\x7e\xc9\x85\xdc\x3f\xc0\xde\xb1\x4f\x29\x82\xdf\xb2\x2f\xf8\xc3\xf1\x2a\x64\x17\x48\x8b\xe6\x0c\x76\xc3\x38\x1f\x32\x11\x83\x90\xca\x66\x09\xcd\xb4\x67\xfa\xee\xa7\xbd\x6d\x26\x3c\xc4\x49\xdc\x04\x4f\x54\xdb\x73\x6e\x5d\xf4\x09\x8b\x18\x96\x5a\x3d\x86\xde\x20\xdf\xed\x22\x43\xc8\x2b\x51\x44\x38\x8d\x5b\xb4\x6c\xf8\x6d\x98\x68\x54\xd3\xf7\xd2\x81\xaf\x89\x2a\xd1\x96\x81\x06\x43\xe8\x93\x8d\x2e\x4f\x3e\x58\x84\xc1\xa3\xfd\x3a\x71\x96\xf4\x44\xd4\xec\x21\x17\xa2\xec\xd1\xd4\x00\xee\xb3\xda\x37\xa8\x3d\xe9\x84\xf9\xba\x88\xb4\xf7\x51\x83\x2f\xc8\x4e\xea\xab\x33\x17\x49\x08\x7e\x51\xca\xb6\x20\x9b\x55\x92\x9f\x6c\xe4\x70\xa3\x71\x18\x43\x8f\xb2\xdc\x06\x1a\x94\xcf\xaf\x00\xef\xe1\x69\xa2\xbf\x9c\xc9\x2e\x48\x11\x1e\xef\xc9\xef\x0d\xb2\x1d\xdd\x93\xdb\x59\x8e\xb3\x0d\x91\x6f\xc4\x62\x73\x8a\xf4\xb6\x75\xd9\x4f\x85\xf2\x3e\x05\x67\x6a\x6a\xf0\xfe\x5c\x62\x13\x6a\x6e\xfc\xbb\x83\x27\x2d\x11\xba\xef\x66\x5b\xa8\xc8\x6a\x1b\x2b\x23\x07\xd0\x4c\x58\x53\x35\x83\x84\x83\xee\xd0\xd1\x9f\x1a\x63\x37\x0c\x19\xfc\xb9\x2c\x12\x58\x19\xf9\x44\x3e\x30\x16\xf4\xaa\x0f\x2d\x41\x43\xfb\x48\x12\x77\xe8\xba\x64\xb9\x98\x6a\x45\x6f\x15\xcf\x8f\xc5\xb6\x2a\x51\x74\x52\x77\x16\xe0\xc7\xe3\x64\x48\x8a\x90\xa6\x1a\xa4\x9f\xec\x20\x31\x83\x9e\x92\x6a\xef\x83\x86\x45\x70\x07\x7c\xf4\x42\x70\x97\x3b\x5c\x98\x29\x65\x7f\x74\xc0\x49\x11\x32\x5a\x55\xb2\x8c\x7a\xb6\xf7\x1f\x08\x42\xfb\x81\x63\x9e\x23\xa2\xad\xcc\x65\xa4\x36\xa6\x9f\x59\xae\x9f\xf4\x2c\x97\x6f\xd4\x62\xba\x2a\x42\x02\x88\x72\x66\xe9\xc3\xfb\xd9\x03\x1e\xdb\x0f\x95\xf9\x9b\xbb\xd3\x20\xdf\xba\x61\xab\x8e\xed\x5e\x6f\xa7\x39\x7e\xc3\x18\xf3\xc3\x41\xef\xf6\x17\x69\xef\xf6\xaf\x7a\x83\xaf\xb7\x6a\xe8\x9b\xc7\xab\x7d\x16\xf5\x6d\xb0\x9f\x93\x3e\x45\x11\x42\x3a\xe8\x4a\x9f\x2c\x97\x6b\xc7\x4d\xfd\xfd\x55\x0b\xc5\x4f\xee\xd2\x42\xd1\x1a\x66\xa1\xd8\xc1\x24\x5e\xae\x81\xa2\x49\x78\x63\xcf\x0a\xce\x81\xd3\xaf\x15\xa6\x68\xd3\x21\xaa\x16\x8a\xc4\x05\xb7\x16\xb8\x86\x49\x19\x60\x8f\xdb\xbe\xf4\x36\xa1\xfb\xab\x27\xb0\xfa\xd6\x89\x29\x97\xf0\xd1\x6c\xd8\x0d\x9b\x71\xd9\xf7\x3f\xfc\x84\xe0\x4b\x07\x07\x9f\x2b\x1f\x4d\x5a\x4a\xc9\x0d\x64\x2d\xa0\x41\xb3\x4b\xf3\xfc\x6e\x51\xea\x82\xcd\x30\x49\x1a\x80\x73\xac\x72\xc4\xa8\xaa\xa4\x00\x4b\xcc\xaa\x0b\xe2\x66\x22\x47\x9a\xaa\x16\xe4\xac\xc2\xbc\x8c\x9b\x3d\x74\xc3\xef\x66\x49\xb6\xd6\xe7\x85\x00\x5c\x3a\x3b\x23\x88\xd2\xd5\x13\x08\x37\x60\xf7\x22\xd0\x2f\x2a\xaa\x32\x04\x21\x49\xcb\x38\x2c\x05\xa0\x04\x2e\x48\xf9\xa5\x90\xfc\x00\xfa\xa2\x16\x0d\xc6\x2c\x64\x66\xec\x23\x24\x1b\xc1\x28\x4e\x0e\xc0\xe5\x05\x21\x97\xd7\x9d\x11\xf1\x40\x59\x43\x02\x90\x5e\x82\xe9\xe3\x27\x6e\xe4\x77\xc7\x77\x05\xd6\xb8\xe2\x94\x07\x6a\x08\x0d\xf7\x3e\x7b\x7e\x96\x8a\x27\xf7\x8a\xba\x08\x8b\x72\x3a\x18\xa8\x1c\x20\x1d\x8b\x42\x57\x7e\x6c\x58\xd5\x6e\x8d\x2a\xf7\xda\x93\xd7\x01\x10\x25\x5c\x6d\x4e\x1f\x3f\x11\x20\xc7\x8b\xd0\xc0\xc8\x96\xe6\x42\x00\x80\x32\xee\x56\x84\xc4\x06\x56\x31\xcd\xac\x85\xb0\xf1\x44\x30\xf3\x8f\xc2\xd5\x6b\x66\x9d\x55\x4a\xda\x28\x27\x25\xcd\x94\x88\x0e\x2a\x9e\x82\x98\xd4\x5e\x5c\x52\x50\x10\xad\x20\xa8\xb1\x71\x8c\xbc\x89\x6a\x23\xc9\x09\xd4\x09\xe5\x37\x75\x6b\xa9\xf1\x55\xe2\x42\xed\x2e\x81\xf9\xe6\xf2\x3e\x79\xbe\xc8\x33\x9c\x83\x6e\x9e\x45\xbd\x26\x65\x09\x04\xac\x62\x95\x15\x46\x2e\x79\x24\x9a\x71\xa4\x30\xdf\xc2\x6e\x37\xcf\x42\xe4\xcd\xd0\xac\x44\xc3\x41\x01\xa2\x0f\xca\x23\xb9\xe5\x7a\x96\x3a\x12\xf0\xd8\x94\x26\x2d\x57\xf2\xed\x90\x33\x05\x70\xc6\x2d\x11\x4a\xb1\x88\x03\x10\x79\x5c\x70\x91\xca\xfa\x23\x52\x4f\x00\xca\x81\xed\xa4\x9e\x49\x09\x07\xd8\x50\x35\xb8\x7a\xb8\x09\x58\xcb\x46\x39\x09\x13\x3f\xbb\x34\x8f\xc1\x0f\x8d\x4a\xb3\x8d\x4b\xde\x75\x85\xb5\xc7\xce\x55\x03\xe5\xff\xfb\xb7\xb0\xdf\x24\xf5\xe4\x32\x2d\x9a\xff\xfe\x87\xc3\xc2\xf8\x7f\xbc\x73\x56\x9d\xfa\x12\xc7\xa9\x46\x4d\x18\xe6\x7e\x68\x36\x85\xf1\xd0\x57\x7b\x45\x6e\x3e\x2c\xaa\x97\x75\x13\x42\x2e\xd0\x9d\x5b\x47\x44\x80\x61\xcf\xfe\x50\x61\x5b\x42\xad\x80\xcc\x09\xca\x81\x30\xed\x23\x83\x55\x89\xd4\x90\xf5\x80\x87\xfe\x40\x2d\x03\x35\x00\x83\x1f\x89\x01\xb7\xff\xb8\xd3\xe9\xd1\x36\xf9\xec\x18\x1b\x98\x6d\xff\x97\xc7\x82\x77\x83\x8d\xc2\x36\x79\x38\xe7\x5e\xe3\x00\xaa\xe1\x99\x41\xdb\x69\x91\x14\xc4\x7e\x47\x88\xd2\xa8\xc3\x9c\x90\xf2\xd0\x35\xae\x84\x6e\x3b\xca\xa1\x40\xf2\x88\x34\xe8\xb9\x73\x4b\x92\xb1\x8c\xb2\x4e\x18\xa7\x93\x14\x8d\x10\xea\x38\x84\x34\x03\xf0\x25\x91\xeb\xd3\x36\x62\x77\x1b\x08\xa3\x6c\xb5\xc8\x20\xaa\x02\x8e\xbe\x06\x0d\x20\x8d\x8b\x3c\x36\x92\xc1\x06\xe0\x8e\x2c\x1f\x3d\x97\xf6\xc5\xb6\xe7\xbe\xb5\x4b\xf7\xad\xcf\x8c\xb3\x09\xb5\xa5\xfc\x07\xc7\x77\xc7\x33\xbd\x74\x7c\xd6\xa2\xb4\xc6\x2a\xa5\x37\x2f\x51\x46\x8b\x8e\xc5\x29\x2c\xba\xc5\x39\x2c\x42\x6c\x5c\xa1\x6e\x19\xe7\x0d\x3a\xd8\xa3\xd6\xdd\xbe\x47\x2c\x24\x5a\x42\x61\x29\x09\xbd\xa2\x9b\x8b\x8d\x38\xeb\xd1\x46\x2a\x14\x20\xb0\xe8\xc8\x2e\x83\x19\xab\xa9\x59\xcf\x23\xb2\x5f\x60\x56\x9a\x6e\x1c\x9f\xd4\x49\xae\xc2\xe2\x24\x98\x38\x06\xd9\x13\xc0\xc2\x5e\xed\x43\x9c\xac\x0a\x95\x23\x32\xa1\x32\xe7\xda\x1e\x04\x10\xa3\x05\xfb\x72\x83\xd8\x8d\x0a\xad\x0a\x4b\x47\x33\xee\x74\xae\x97\x9a\xf6\xc1\x24\x00\xf8\x36\x11\xf0\x22\x70\xa9\xe9\x5e\x35\xf6\xbc\xe4\x76\xa1\x6c\x7c\xff\x3e\x76\x7d\x27\xbc\x18\x77\x7a\x1d\x0a\x61\x8c\x9f\x2f\xfc\xb7\xec\xdb\xdd\xc9\xf8\xf2\xf8\xd9\x6a\x9d\xdf\xd4\x47\x04\x75\x84\xea\x04\x14\x90\xad\xc3\x3a\x1d\x37\x36\x8e\xa9\x0b\x25\x91\x22\x51\x51\xf2\x42\x0e\xc9\x86\x40\x82\x5d\x0f\xfc\xfb\x2c\x6c\x55\x1c\x39\x75\xb0\xa1\x36\xeb\x73\xf2\x30\x5d\x13\x0d\x15\xf4\xf6\x1c\x38\x4d\x44\xf5\x0d\x8a\x93\x8e\x0d\xd5\x89\x0a\xe0\x82\xb3\xc6\x04\xbd\xc9\x0a\xb8\xc5\x07\xe7\x83\x12\xbf\x38\x81\xb7\x24\x86\xe3\xa5\x6f\x39\xd7\xcc\x2c\xcc\xcb\xb1\x2b\xc1\x73\xd1\x86\x79\x59\x54\x1b\x18\xc6\x45\x7d\x6a\x26\x61\xdc\x69\xb0\x37\xed\xb3\xdc\x2b\x5f\xbd\x6f\xb7\x88\x61\x9f\x1a\xdf\xca\xc1\x52\x33\x0d\x52\x1a\x48\x33\x5b\x44\xea\x88\x30\x05\x07\xa7\x79\x63\x24\x03\x68\x1e\xe7\x42\xe7\x47\xa2\xac\x2c\x0c\x4c\xf7\xf1\x9b\x4e\xf0\x66\x3b\xcc\xc3\xa6\x9c\xce\x49\xb0\x75\x88\x14\xd2\x76\x2b\x1e\xc4\xa0\x0c\xda\x06\xda\xb2\x4d\x89\xb3\x6c\x30\xc7\x6c\x30\x0f\x81\xc9\xd0\x64\xd4\x12\x08\x4f\xde\x2c\xe2\xa2\x59\xbf\xbd\xd7\x8b\xa3\x3b\x6a\x52\x5a\x14\x29\x59\x41\xe5\x08\x29\x7d\x63\x2e\x64\xc5\xa2\x4e\xbd\x87\xb1\x23\x17\x28\xd2\xa8\x30\xfb\xa2\x97\xc6\xcf\xeb\x39\xc7\x07\xf1\xd4\xd8\xbd\xc4\xdb\xc3\x10\x94\x99\x2f\x6a\x30\xf6\xa8\xf9\xaa\xae\x6f\xef\xab\x7a\x8f\x7f\x7a\x07\x80\x6b\x15\x0e\x6b\x28\x16\xc6\x57\x0f\xb0\x7f\x37\x0c\x0c\x48\x34\x9b\x59\xa7\xbb\x90\x67\xad\x38\x11\xfe\x6f\x1c\x08\x5a\xee\x23\xcd\x32\x81\x9d\x7e\x4a\xfb\xc2\x40\x66\x0e\x28\x28\xaf\x31\x28\x49\xd9\x67\x0b\xca\xe1\x97\xa5\xc2\xbc\x22\x9d\x8d\x01\x43\x1b\xa6\x55\xfb\xff\xef\x63\xef\x1a\x63\xd7\xe9\x6c\x05\xaa\x57\x97\xc6\x82\x1f\x18\xab\x3e\xb5\x5c\xd4\x42\xdd\x8e\x22\x50\x70\x1c\xe1\x89\x05\x75\x69\xb1\x98\x90\x08\x84\x92\x19\xe1\x97\x96\x0f\x85\x06\x3b\x8b\x9c\xaf\xcb\x8c\x6f\x66\xf9\x7a\x43\xa7\x47\x0a\x41\x5d\x21\x90\x7a\x74\xc3\xb2\x5d\x33\x1a\x74\x62\xfe\x08\xf7\xf3\x70\xc1\xad\x4a\xab\xf3\xa6\x82\xde\xa9\x66\xed\x73\x21\x00\xc3\x41\x69\x78\x82\x33\x6a\x02\xdc\x8c\xbe\x6f\x1e\x23\x85\xd1\x7f\x19\x0b\x5e\x36\x86\xa5\xf5\xcc\xe0\x89\x58\x27\xdb\x5d\xb5\x61\x65\xc7\x40\xc9\x01\xb3\x79\xc6\x91\x71\xe9\xca\xc5\x49\xc6\x74\xb3\xbc\x7e\xc5\x13\x4d\xae\x0a\x0a\x33\xbb\x5e\x71\xab\xca\xf1\xb5\xb6\x27\xe9\x5d\x53\xa9\xe6\x7c\x0a\xb3\x28\xdb\x05\xb5\xc2\x60\x39\x35\x12\x27\x3c\x25\x67\x8f\x75\xce\x19\xf8\x5b\x14\xfe\xb3\xfc\xfb\xeb\x90\x6a\x2d\x8f\x3b\x71\x2a\x45\x8c\x93\x30\xd9\x8c\xd3\xc5\x57\x2f\xb3\xba\xf5\x5e\xdc\xd5\x3f\xc9\x38\xe7\xbc\xba\x13\x4f\xf2\x33\x95\x27\x8c\xfd\x89\xcf\xbe\xcb\x3a\x6e\x90\xd1\x6d\x43\x92\x78\xa5\x62\x3b\x45\x6c\x91\xff\x11\x3f\x38\xad\x7e\xd8\xca\x3e\x0d\xf1\x92\x64\x6b\x60\x96\x0b\x56\x31\xff\x72\x00\x2a\x19\xb8\x80\x08\x0a\x86\xb0\xe4\x2e\x79\x87\x28\x15\x88\x70\xc3\x5a\xbe\x78\x3d\xfb\x21\x8f\xed\x6f\xc7\x69\x59\xf8\xaf\xf0\xd8\xad\x23\x55\xfa\xa3\xfb\x79\x8f\xfc\x38\x38\x07\x75\x0c\x4f\xf3\x69\xc1\x08\x00\xf5\x6d\x67\x9b\x80\xd2\xa0\xc6\x66\x56\x49\x29\x46\x1a\xec\x53\x63\x6c\x42\xa9\xa5\xfc\x5f\x1d\x0b\xde\x33\xa6\x7e\x69\x69\x4c\x7d\xef\xc8\x7f\x36\x4b\x01\x39\x9e\xa0\xc2\x1c\x2c\x9d\x26\x21\x35\x66\x07\x95\xb3\xd4\x8b\x8b\xb6\x9d\xa9\x5a\x32\xa7\x60\x81\xc4\xab\x0a\x31\x71\xe2\x14\xae\x44\xb8\x0c\x26\xe5\x61\x4c\xca\xb8\x9b\x08\xeb\x3b\x3c\x4e\xca\xfb\x08\x58\x0e\xdd\x5f\x77\x78\x31\x26\xed\x6a\xf5\xd2\x35\xd0\x0b\x60\x0b\x1d\x55\xe5\x8c\x4a\x08\x89\x6d\x4e\x1a\xa2\x02\x39\x29\x21\x7d\x19\x32\x69\x45\x01\x9d\x02\x85\x09\x3f\xb2\x78\x7a\x96\x4f\x4f\x1f\x3f\x31\xa9\xb2\x25\x55\xdd\xbf\x7f\xcf\x63\x13\xf2\x08\x82\x3a\xe1\x93\x5e\xf0\xcb\x9e\xfa\x55\xc5\xbb\xb3\x55\x0c\x84\xdc\x5c\x94\xda\x13\xac\x32\xeb\x15\xc1\xc5\x00\x42\x98\xb9\x81\x53\x61\xec\x3a\xce\x72\xd1\xe7\x4a\x85\xa5\x2f\x67\xb5\xb9\x96\x92\xb8\x29\xce\xa9\x8e\xda\x9a\x2f\x67\x70\xaf\xf6\xd8\x21\xd4\xb8\x2f\x8a\x96\xff\x22\x8f\x1d\xdb\xd6\x36\x55\xf5\xa6\x3e\xa7\xbf\x1f\x6a\x91\x1a\xf0\xa9\x46\x66\xc7\x99\x3a\x7b\x76\xd8\xd7\x0e\xb0\x09\xc5\x56\xfb\x7f\x74\x20\xf8\xd8\x01\xcd\x64\x9b\x5c\xd7\x3a\x8b\x8b\x7e\xb7\xd5\xb9\xb1\x85\x26\x95\x34\x6d\x5d\xf4\xa7\x8c\xa1\xbc\x50\xf9\x13\xa1\x16\xf3\x15\xba\xda\x63\xcd\x8d\x87\xc1\x4c\x4a\x3e\x86\x5a\x31\x87\x22\x5e\x33\xe9\x41\x0e\x3c\x12\x8a\xe4\x1e\x9a\xbe\xb9\xda\x43\x95\xd5\x12\x4b\x93\xea\x95\x14\x6e\x80\x67\x65\xc0\x1a\x8c\xea\x79\x53\x24\x09\xe1\x63\xad\x8b\x7e\x71\x92\x1f\xe5\xc3\x35\xd8\x27\x2d\x41\xc9\xf5\x54\x77\xc9\x07\x5e\x47\x8c\x5b\xae\xa9\x9a\x1c\xc5\xb8\x65\x4b\x64\x04\x62\xad\xd1\x31\x72\x90\x23\x1e\x30\x8e\x57\x1b\xcc\x49\x83\x1d\xe5\x5b\xa8\xed\x47\x77\x4f\xbe\x35\x9d\x61\xfc\x8a\xbb\xb3\xb3\xce\xe4\x62\x0d\xb8\xc0\x51\xdd\xc1\xf7\x0f\x7f\x87\x5c\x4d\x5d\x24\x4f\x0f\xd4\x04\x34\xcd\x78\x33\xa1\x9a\x2f\x4e\x79\xab\x07\x07\x3e\xec\xc6\x3a\xcf\xde\x96\xbe\x9b\x7f\x39\xc6\xcc\x55\xe7\x7f\x79\x2c\x78\xcb\x98\xfe\x39\xf4\xe2\xa8\xba\xf4\xe8\xee\x85\xc0\xeb\x97\x22\xef\xe6\xe0\x1e\x16\x36\x9b\x59\x1e\x59\xa9\x96\xdc\x41\x3a\x04\x4b\x25\xde\x82\xb4\x40\xe8\x1f\x88\x68\x23\x70\x13\xc1\x11\x45\xf0\x20\x1e\xc5\x2d\xf4\x63\x00\x06\x03\xfa\x60\xfa\xab\x71\xb8\xd0\x29\x1b\x37\x6e\x9c\x73\x79\x0c\xa4\x54\x0c\xa8\xa7\xb1\x56\x26\xab\xa3\x0f\xa6\x16\x95\x0d\x19\x98\x6d\xaa\x12\x8e\x5a\x9a\x59\x78\x30\xd3\xc7\x8e\x39\xde\x19\xee\xc4\x3e\xe4\xf1\x91\xee\x9f\x07\xfc\x7d\x85\x28\x19\x7b\x83\x07\x4e\x66\x18\x6e\x03\x3c\xc4\xe8\x00\x87\xd1\x3c\x84\x4e\xd6\x54\x04\xf3\xa6\xb6\x11\xdc\x84\xce\x39\xa4\xe3\x48\x9c\x7c\x40\x86\x04\xff\xe8\x21\x76\x64\xd0\x79\x1b\x5b\xb6\x7d\x64\x21\x42\xf8\x6f\x26\x82\x13\xd5\x87\x15\xe4\x02\xdb\x71\x77\x4b\x60\xd2\x0f\x1f\x64\x5d\xe5\xe3\xb4\x16\x04\x43\x50\x10\x94\x62\x59\xe7\xd9\x37\x6b\x70\x33\xbb\x91\x1d\x1f\x1d\xf1\x3a\x6a\x10\x7b\xaa\xbc\x5d\xa8\xf2\x3e\x61\x07\xbc\x7e\x64\x97\x01\xaf\xff\xf1\x1b\x04\x5d\xb0\x97\x31\xe5\x61\xc3\x2d\x68\x6d\xaf\x80\x99\xf5\x67\xb4\x02\x06\x0e\x65\x45\xfb\x52\xa5\x25\x43\x75\x2f\x1f\x3a\xc4\x6a\xc3\xc0\xef\x1b\x0b\x59\x34\x17\x17\x79\x0f\x04\xee\xbb\x7a\xd1\x9a\x40\x84\xa8\x57\x1c\x0a\xee\x1c\xf1\x6e\x18\xa6\xc1\x90\xa2\x23\x48\xd6\x27\x0e\xee\x11\x90\xbd\x88\xf9\x3d\x2a\xf2\x30\x1a\x4c\xfb\x8a\x05\xe8\x06\x87\x87\xb0\x00\xc3\xce\xa6\xcd\x07\x9c\x64\xb7\xb2\x9b\x47\xf2\x01\x5b\x92\x8a\x87\xbc\xe6\xf6\x04\xec\x4e\xff\x0e\x4d\xc0\xb0\x36\x4d\xbb\x46\x50\x98\x2a\x04\xcb\xff\x1c\x77\x20\x3d\xad\x24\x23\xcb\xd9\xba\x48\xc9\x04\xe2\xff\xe2\x78\x70\xbb\xfd\xc0\xb0\x75\x96\x55\x06\xfd\x2b\x29\xb5\x80\xe4\xb5\x7b\x92\x18\xc8\xaf\x1a\x97\xbc\x89\xb0\x17\xc5\x52\x16\x77\x48\xd6\x1f\x8e\xb1\xd7\x79\x4c\xbf\xf3\xbf\xcf\x0b\xfa\x33\xf4\x43\x99\xcc\xe4\x5e\x81\x30\x2d\x55\x4a\x3b\xee\xcb\xaa\xc1\x15\xca\xee\x1a\x64\x45\x31\x00\xad\x91\x49\xe5\x07\x5e\x67\x54\x47\xa1\x12\x37\x99\x34\xb6\x8e\x36\xe2\xe3\x1e\xbb\x5e\x5c\xec\xc6\xe8\x80\xbb\x04\xf1\xca\x85\xff\x1e\xcc\xab\x70\xf3\x8d\xc1\x9b\xbc\x53\xd5\xb7\x3a\x88\x5b\x01\x29\x66\x2d\x54\xe4\x80\x5f\xc4\xce\xba\xec\x64\x89\x52\x5d\xd7\x0e\x88\xc1\x40\x9b\xc1\x88\xba\x1c\x6f\xc9\xbf\x99\x18\x0a\xdb\xba\x90\x45\xcb\xa2\xd3\x95\x27\x12\xee\xa5\x4f\x4c\x04\x47\x2b\xcf\x06\x76\xbb\x7a\x39\xe2\x06\xfa\xb9\x3d\x88\x9d\x5d\x93\x9c\xef\x51\x24\x67\x39\xf8\x57\x67\x68\xe6\x21\x14\x52\x4d\xbd\x4d\x60\x8e\xb1\x06\xab\x6d\xab\x27\xb3\x16\x6e\x8f\x43\xd8\x83\x47\x33\x83\x7b\xc8\xbb\x6f\xfb\x4b\xe6\x46\xff\xf8\x30\x74\xaf\x0a\xa9\xa8\x5e\x2c\x3f\xb1\xdf\x89\xba\x36\x5b\x31\x2f\x09\x9b\xe2\x3f\xed\xbf\xe4\x41\xba\x9a\x4b\xde\x44\x37\xcf\xca\xac\x99\x25\x0e\x35\xf9\xf2\x3e\x16\x53\x42\x9b\x50\x39\xbd\x9f\x59\xa0\x74\x36\xa5\x4a\xe8\x4a\x89\x67\xdc\xd4\x3b\x94\xeb\xb5\xa5\xb2\xf8\x12\xee\x45\x01\x64\x22\x97\x97\x49\x5b\xe4\xae\x4f\xf9\xeb\x3d\xa6\xbb\xe1\xbf\xdc\x0b\xbe\x77\x81\x7e\xe8\xd6\xd4\xef\x2b\x69\x0a\xd4\x54\x55\x14\x5a\x30\xc2\xf1\x60\x79\x76\x21\xa8\xf1\xe0\xfc\x1c\xfc\xb3\x34\xbb\xbc\x10\xd8\xbb\xee\x77\xc7\xd8\x7e\x08\xf7\xf1\x3f\x3d\x16\xfc\xea\xd8\x29\x88\xfc\x41\x83\x3a\x36\xa1\x7a\xb7\x9a\x08\x4b\x5f\xec\x74\x0f\xbd\x80\xf3\x4e\xa8\x71\x0f\x30\x80\xa8\x68\x87\x49\x82\x00\xc8\x7d\xf3\xa9\x51\xda\xe6\xbd\x44\x14\x27\x79\x1d\x13\x06\xd4\xe3\x94\xbe\xa3\xfe\xe3\xe7\xab\x15\x24\x02\xd2\xc0\x29\xd7\x25\x70\xd4\xc0\x92\xbd\x42\x30\x6e\xce\x31\x5a\x63\x58\x9d\x37\x93\xac\x17\x29\x54\xeb\xdc\xc4\x68\x38\x8d\x91\xce\x6d\x83\x3e\x23\xa7\x2b\xb7\xeb\x8c\xab\x61\xb6\xb2\xac\x41\x9e\xdd\x00\x45\x60\x88\x87\x3d\xb7\x7f\xf9\x44\x47\xbd\x34\xc4\x55\x1e\xdd\x69\x01\x1d\xe1\x57\x9f\x18\x1c\x77\x9e\x8c\x4c\xbb\x16\x5a\x8e\xb8\xce\x96\xfe\xa7\xa3\xec\xa7\xaf\x62\xd7\x76\xb3\x68\x3e\x6d\x65\xf7\xa6\x67\x25\x8f\xe4\xbf\xfe\xaa\xe0\x65\x57\x11\x1a\x84\xbc\xb4\x20\xbe\xd3\x2d\xe3\xe8\x76\xe3\x02\x5c\x1e\xc8\x23\x85\x3c\x1f\xc8\xa9\xa6\x70\x80\x73\x00\xb0\xc5\xe8\xdd\x8e\x80\xff\x58\x37\x8b\xce\x81\xaf\x45\x37\x8b\xce\xcf\xcf\xd5\xb8\x28\x9b\x8d\x49\x15\xc7\x0e\x20\xe4\x3c\xeb\xaa\x50\x79\x8d\x53\xa1\x22\x63\x6b\x03\xd5\x3a\x41\x6f\x88\xe1\x93\xa5\x0a\xcd\x5c\xd9\x85\xe5\x9e\xa0\xc0\x5a\xd7\xe3\xc9\x44\x41\x55\x86\x6c\xa5\xd8\xa4\xa2\x91\xe8\x26\x59\xbf\xa3\x82\xea\x70\xa2\x9e\x66\xe7\xf4\x04\x9b\x5a\xb5\x7f\x61\x41\x69\x45\x14\x36\x86\x8a\xb2\x1b\x86\xf5\x7d\x64\xd2\xa0\x7a\xbb\x9e\x59\xf2\x48\x83\x2b\x56\xac\xdc\xe2\xbb\x61\x0e\xd1\x9e\x72\x9f\x2b\x9b\x1d\xdd\x98\x76\xfb\x34\x27\xf1\x60\x4f\x1a\xdc\xf5\xcf\xd7\x2f\x65\x2f\x75\x24\x22\x7d\xde\x1a\xd8\x13\x85\xbd\x65\x48\x85\x8c\x97\x49\xbc\xd6\x2e\x25\xd1\xde\x34\x7e\xf1\xdd\x5c\xb4\xe2\x8b\x0e\xd2\x51\x83\x07\xcd\xc2\x6c\x7b\xba\x61\xba\x59\x04\x89\x0b\x03\x29\x54\x44\x0d\x30\xdb\x6d\x55\x0e\x9c\x6b\xac\xc2\x18\x53\x3f\xea\x8b\x5e\x1c\x05\x27\x89\xdd\x38\x22\x1f\x9c\x9f\x9f\x9b\x1c\x5a\x5a\x74\xdb\xa2\x23\xf2\x30\x09\x4e\xf2\x40\x8e\x30\x50\x6e\xf5\xb4\xf3\x31\xab\x99\x2e\x46\x80\x22\xf4\x16\xac\xf9\x5b\xfd\x4f\x79\x39\xac\xf6\xf1\xbc\xda\x39\x0c\xec\xd8\xed\x00\xb6\x6d\xc0\xd8\xd6\x7d\x44\x36\x2a\x15\x9b\xda\xc4\x19\xa7\xb6\xc9\x71\xba\x31\x7d\x33\xf0\xf8\x92\x77\x4a\x93\xbe\x9b\xb9\x4f\xf9\x52\x39\x59\xe7\x11\x17\x46\x0e\x38\x30\xc1\x2b\x01\xec\xb6\xe0\x94\x69\x18\x3b\x7e\x26\x6e\x89\x66\xbf\x99\x88\xb3\x60\xa0\xbd\x17\x6c\xdf\xaa\xda\x66\x98\xf2\x44\x48\xda\xa9\xce\x06\x8f\xe2\x02\x0d\xb6\x61\x1a\x4d\xc9\x1b\x65\x2d\x45\x9b\x82\x32\x9f\x40\xca\x5d\x77\x00\x37\xe9\x14\xb1\x0e\xee\x08\x45\xc7\xdb\x8d\xc1\x08\x55\xa1\x2c\x15\x10\x46\x8c\xc1\x3a\x78\x88\x91\x46\x60\x28\x87\x72\x1d\xd3\xe8\x17\xe6\x9c\x1b\x6b\xb4\x9a\x1b\xa8\x08\xbd\x67\x8b\x1a\xb7\x10\xa7\xf8\x46\x1c\x82\xf6\xac\xd3\xc1\xc4\x3a\xa9\xe5\x33\xa8\xae\x3d\x02\x8e\x67\x6c\xb4\xbf\xb9\x15\xdd\xf9\xcf\xe3\xec\x7a\x45\x57\x17\x05\x41\xff\xfb\x7f\x3a\x1e\xfc\xde\xf8\x62\xf5\x71\xc5\xfa\x66\x27\x5b\x08\xd3\xb2\xe0\x2b\x03\x94\x66\x85\xaf\x0a\xf0\x2e\x12\x79\x9c\x45\x94\x86\xb8\x89\x79\x93\xe1\x72\x6f\x25\x92\xc3\x0e\xd3\x3e\xef\x66\x14\x82\xd0\x6c\x87\xe9\x9a\x76\x64\x04\x12\x6b\xe2\xdb\x6d\xab\x7c\x34\x00\x68\xc0\xd8\xb9\xac\x14\x27\xf9\x4c\x0b\xa6\x5a\xce\x7d\x53\x14\x45\xab\x27\x0f\x4e\x0c\x28\x3b\x83\x89\x0f\x64\x77\x6a\x92\x49\x2f\x24\x7f\x9e\x96\x48\x1b\x65\xa5\x83\x65\x49\x22\x83\xa5\x47\xbe\xdd\xd8\xbe\xac\x78\x7b\xd5\xd9\x73\x62\x93\x2e\x1b\xe5\x80\x61\x5d\x23\x85\x10\x29\x9e\x4c\x85\xbe\xa2\x3d\x8a\x2c\x97\xd5\xd0\x8d\xa6\x09\x53\x6a\xdd\x38\x4f\x6f\x2a\xc8\x88\xd9\xa5\x79\x4a\x20\x39\x83\x9a\x09\x10\x9a\xcd\x59\xd5\x31\x38\xee\x0e\x78\xe0\x00\x7b\x4c\x51\x89\x96\xfb\xea\xfe\xe0\x0f\xf6\x57\xaf\xeb\x4a\x21\x67\x37\x50\x54\xc7\xe0\x9d\x8d\x3b\x43\x1e\x49\x2b\x90\x07\x32\x52\xa0\x93\x89\xe3\x64\xae\x63\x41\x74\x85\x03\xb7\x22\x4e\x21\x7a\x17\x83\xeb\x4c\x2e\xf0\x46\x1a\x19\xa9\x41\xce\x94\x56\x08\x9e\xbe\xb6\x14\x5c\x42\x5b\x34\xd7\xab\x4e\x1e\x71\xa7\x23\xa2\x38\x2c\x45\xd2\xb7\x0f\x35\xba\x2c\xa9\xe4\x0c\xf3\xa9\x72\x8c\x07\xa6\x41\xf9\xc3\x83\x3d\x59\x6c\x2a\xb7\x78\xec\x00\x64\xde\x5e\x8d\xd1\x10\x4b\x37\x79\xaf\x30\xc0\x1d\xc3\x06\xdb\x96\x0c\x02\xee\x40\x29\xf5\x67\x1d\x13\xa9\x35\x7a\xc0\x0d\xc6\x66\x12\xd0\x04\x20\x40\x4f\xcd\xae\x9c\x46\xa9\x29\x94\xe1\x88\xe1\x4c\x61\x00\x5a\x96\x53\x1a\x0c\xb9\xdd\x62\xed\xf7\xdf\x4a\xe2\x6e\x57\x44\x30\x92\x1c\x27\x65\x60\xe5\x6c\xa6\x40\xf6\x7e\x55\xee\x72\x3d\x84\x2d\xa8\xd2\xe0\x96\x47\x03\xf7\x36\x1b\xbe\x32\x03\x43\x36\x3b\xf0\x1f\x51\x95\x45\x73\x8e\xc0\x9f\x1d\x60\xd7\x94\x96\x86\xa9\xf0\x7f\xe7\x40\xf0\xa7\xfb\x6d\xa5\x53\x15\x52\xd0\x76\xc8\x05\xac\x94\x6e\x16\x15\x87\x87\xeb\x06\x35\x7c\x81\x7c\x04\x2a\x08\x3c\x22\x10\x32\x9c\x41\xa6\x78\x01\xa0\x29\x79\x9f\x87\xbd\xb2\x8d\x40\x16\xe8\x52\x39\xc8\xfa\x69\x25\x1b\x98\xde\x77\xce\xf5\x8d\xe0\xf9\x88\xa4\x49\x3e\x4f\xd8\x5c\x9e\xa8\xc8\x49\x4e\x43\x27\x87\x72\x33\x85\x43\x7d\x1a\xd8\xc9\xe0\x24\x7f\x01\xe3\x3c\xb8\x5d\xe9\x25\xef\xa0\x27\x9c\x07\x50\x22\x38\xc9\x6f\x87\x3f\xee\xa8\xe1\x53\xa3\x94\x5c\x8e\x3b\xa2\x28\xc3\x4e\x57\x96\x31\x8f\x79\xa9\x9e\xcb\x01\x2f\x9e\x9e\x3d\x71\xe2\xc4\x93\xe0\xeb\x17\xca\xff\x34\x1a\x0d\xf6\x42\x7d\x15\x68\x55\x2b\xe5\x97\x73\x74\xbc\xc6\xe9\xcd\xb8\x38\x40\x68\x62\xc9\x3b\x19\xb9\x25\x90\x42\xb3\xa0\xf4\x36\xc8\xd6\x81\x3f\x79\x2e\x9a\x02\xe0\x26\xe0\xb0\x63\xb9\x10\xae\x1e\xe8\x6b\xbf\xc6\x07\xaf\xd0\x8a\x27\x5a\x99\xc7\x6b\x6b\x22\x1f\xb2\x5a\xf6\x95\xf9\xc8\xdf\x07\x46\xdf\xf6\x24\x76\xcb\x16\xe9\x8d\xaa\xe2\xa3\x3d\x9b\x3b\xc4\xe1\x7a\xcf\x01\xf6\xb8\x8d\x41\x7e\xae\xf0\x2f\x1d\x08\xfe\x71\xff\x10\x4e\xaf\xb0\x22\x2a\xc3\x52\x7b\x03\x2b\xe2\x3a\x42\x58\x24\xce\xac\xa8\x9c\x7f\xbc\xa0\x13\xd2\xfc\x52\xc6\xa2\xc2\xe1\x3e\x6b\x06\xb7\x04\x3c\xf0\x0b\x49\xe0\x2c\x56\x5a\x9d\x30\x57\x26\x06\x52\xa9\x1c\x5b\x51\x14\xb2\x38\x4b\xc9\xb8\x61\x65\xbd\x30\xe1\x0b\x17\xa6\x16\x2e\xcc\xf2\x8e\x90\xbc\x4e\x5c\x74\xb0\x8f\xe8\xcb\x09\xcc\x9f\xec\x91\x61\x7f\xe9\x9e\x01\x0a\x12\x89\x9a\x1e\x79\x98\xdb\x8e\xcc\xc0\x0d\xc6\x70\xab\x92\xea\x28\xc2\xf0\x16\x15\x36\x6b\x73\xff\x8a\x0f\x8d\x73\x9e\xa8\xb9\x86\x01\xc7\xc6\xb1\xcd\xbc\xc8\x28\x54\x52\xca\x32\x7c\x46\xcd\x70\x1b\x15\xd0\xab\x82\x63\xa4\xae\xd2\x6d\xaf\x0a\x44\xb8\x8b\x8d\x20\xb0\x96\xd1\xb5\xbf\x26\x4a\x72\xb0\x1c\xa4\x4d\x94\xc6\x08\xd8\x65\xc5\x41\x9d\x56\xb8\xb7\x83\xbe\x35\x8e\x17\xb1\x35\x3b\x85\x10\x43\x7c\x07\xeb\x92\x5e\x11\x6e\xa3\x72\x23\xd4\x92\x4d\x1d\x9c\x3f\xeb\x2a\x8a\xad\x5d\x76\x12\x33\x4c\x79\x62\x6d\x36\x5f\xf5\x48\x47\x59\x77\x60\x8f\x82\xb2\x5d\x3e\xc7\x9f\xe4\xe1\x1b\x46\x91\xc9\x99\x86\x6e\x62\x5b\x72\xe6\xa3\x9c\x9c\xd8\xc7\xf7\xb3\x6b\x43\x48\x19\xaa\x32\x6e\xfb\xef\xdf\x1f\xbc\x7d\xbf\xfb\xec\x72\x74\x28\x29\xc7\x6f\x8d\x22\x84\x1f\xb1\x97\x4e\x4d\xaf\xb9\xef\x66\x35\xd2\x56\x55\x9d\xd0\x11\x65\x3b\x8b\x26\x6b\xb4\xad\x88\x79\xb3\xf6\x3f\x35\x15\x09\xf8\xc7\x40\x76\x69\x1b\x87\x5c\x7f\xb0\x5e\x61\x41\x25\xfe\xa6\xa5\xc8\x5b\x52\xd6\xa6\x08\x69\xc9\xa9\x15\x16\x8f\x1d\x9a\x24\xcc\xa4\x96\x04\x17\xbd\x30\x2e\x0b\x8b\xb1\xb2\x84\x69\x28\x2f\xcf\xb1\x68\xc9\xc5\x02\xb8\x32\xa1\x1c\xe6\xd4\x05\x6d\x2e\x49\x95\xa3\xaf\x4e\x1f\xe6\xbc\x99\x81\x83\x1d\x86\x27\xd0\xc9\xaa\xcc\x31\x60\x26\x81\x74\xb0\x75\x57\x35\xa5\x1e\x58\x88\xb8\xd0\x29\xdb\x74\xda\x34\xad\x6f\x5b\x84\x64\x7c\x79\x7f\x64\x88\xbd\x92\x31\x35\x30\x8b\xd1\x5d\x1a\x0d\xd7\xd0\x76\x95\xe6\xa4\x58\x07\x4e\x8f\x64\x6c\xd0\x11\x6c\x59\x1e\x45\xba\x9d\xca\x9c\x5f\x1c\x63\xd7\xb4\x8a\xbb\xf3\xac\xd7\x5d\x00\x93\xae\xff\xf1\xb1\xe0\x43\x63\x73\x44\xe5\x89\x46\xf7\xd2\x48\xe4\x49\xdf\xe2\x97\x14\x49\x47\x11\x11\xb0\x13\x37\x53\x91\x17\xed\xb8\x8b\xde\xe6\x03\x28\x75\xf4\x21\x2d\x36\xca\xa2\x24\x51\x36\x38\x78\x55\x73\x17\xf8\xaa\xc9\x4f\x2f\x59\x1d\x53\x8a\x59\x88\x4b\x34\x4a\x47\xb9\x8d\xe3\xc1\xac\x78\x61\xd2\x6d\x87\xf5\x44\x6c\x08\x4a\x8c\xa7\xc8\x5f\x3b\x4b\xb3\x1c\x2f\x8f\x82\x0c\x48\x70\x48\x70\xc1\xd4\xea\xe2\x69\x72\xdb\x77\x7c\xc9\xb7\x9a\x5e\x0b\xa0\xf0\x71\xec\xf0\x16\x37\xb6\x1d\x26\xef\xff\xf8\xe3\x82\xff\xe5\x39\x81\xf3\x2e\x82\x89\x65\xfa\x26\x8c\x0c\x74\x2b\x6c\x0d\x41\x2f\x18\x00\x1f\xd1\xb2\x46\x3f\x0d\x3b\x24\xfb\x6b\x11\x09\x76\x8b\xdd\x32\xdd\x67\x00\xea\xa4\xc3\xd9\x6e\x1b\x70\xf5\x77\x11\x36\x1c\x77\x57\x51\x36\x71\x5a\x52\x03\xc0\x1e\x82\xba\xae\x71\xc9\xbb\xca\xb4\xec\x26\xd7\xfc\xc0\xf5\x2c\x65\x8f\x83\xb8\x41\xec\xf7\xa9\x8b\xdd\x30\x05\xcb\xee\x85\xe0\x9e\x99\x21\xcf\x25\xcd\xda\x74\xd1\x6c\x2a\xbd\x92\x1f\xa9\xbd\x27\xe4\x67\x2e\xa2\xd6\x2b\xc6\xd9\xf5\x14\xa8\x48\xbe\xe9\xb1\x28\xfc\xaf\x8f\x05\x3f\x33\xb6\x28\xe4\x2a\x36\x4b\x3b\x6c\x4b\x95\x20\x3f\xe7\x8d\x9d\xcd\x2e\x3f\x65\xd1\xd1\x6e\xd2\x5b\x8b\x8d\x69\x5a\x52\xc7\x6c\x33\xb5\xac\x35\x06\x87\xc6\x66\x6a\x0a\xc0\x29\x45\x26\x49\x79\xd1\x2b\xd4\x8b\x65\x91\x77\x94\x59\x2f\x44\x94\x71\x0d\x3b\xaa\x6b\xcb\x69\x38\x20\xca\x54\x52\x48\xee\xf0\x60\x10\xd7\x62\x94\x07\x74\x24\x2e\x83\x69\x35\x90\xb1\x83\x43\xd8\xb3\xe6\xef\xd6\x9a\xbf\x67\x6e\xbf\x72\x73\xfb\x57\x3d\x76\x35\xdc\x47\x0a\xc9\xfe\x8b\x5e\xf0\x11\x6f\x6e\xf8\x91\x1e\x42\x61\xd5\x06\xac\xd0\x9f\x5c\x50\xa0\xb9\xd1\xef\x14\xa4\x4a\xa5\x86\x6a\x1c\x02\xba\x9e\x15\xe4\x59\x50\xe3\x41\x91\xb5\xca\xe0\x7b\x00\x56\x53\xcb\xff\x11\xaf\x2b\x4b\x19\x92\xde\x85\x0b\xa4\xc0\x2c\x10\x53\xa9\x15\xc6\x89\xbc\xac\x25\x53\x0c\x64\x17\xbe\xdc\x8a\x89\xfd\x8c\xed\x5d\xf0\xe0\x16\x4e\xf8\x3b\x0b\x29\xff\xe6\xf0\x40\x7c\x8d\xc7\x98\xb9\x27\xfd\x17\x79\x81\x58\x30\xd7\x66\x3b\x4b\xa2\xa1\x77\x29\x59\xb4\xd5\x9d\x44\x58\xe7\x44\x23\x10\x25\x60\x63\xab\x55\xde\x32\xc0\xe4\x1c\xb3\xef\x3b\xff\x29\xc1\xf1\x05\xab\x29\x57\x93\x05\x21\xb7\xb4\xc4\xd6\x47\x0e\x4f\xf1\xc3\x1e\xbb\x26\x17\x00\x0c\x40\x1c\xdb\x4b\xbd\xe0\xe2\x23\xb0\x4f\xd1\xa9\xc0\xb4\xe3\xc2\x16\x03\x64\xb2\xcb\xed\xfc\x9d\xc7\xae\xc7\x79\xba\x0b\x55\xab\x67\xb3\x48\xf8\x7f\xe8\x05\x9f\xa5\x7c\xbc\xd6\x63\x6b\xe0\xed\x6c\x10\x2d\x6d\x56\x36\x5b\x58\x3a\x21\x7b\x4c\x92\xfa\xae\x66\xbd\x34\x6a\x70\x27\xd3\x8a\xd3\xc8\xbc\xd2\x19\x03\x96\x57\xa1\x43\x8f\x1e\xce\x4b\xcf\xf2\xff\x16\xdb\x7b\xb6\xdc\xe5\xdf\xb9\x5d\x00\xbe\xcd\x84\x0d\x75\xff\x7e\xa7\xc7\x9e\x69\x8e\xa6\x94\x88\x80\x0d\x2a\xea\xc6\xaf\x50\x9f\x52\xa7\x80\xe6\x39\x9f\xba\x74\xef\xb9\x25\xb8\xe4\x16\xf2\xac\x5b\xdc\x9b\x2f\xc1\x08\x66\x24\x91\xf0\xcf\x05\x67\xb6\x7a\xef\xc6\x14\x57\x4a\x22\x96\x02\xdd\x34\x40\x73\x1a\xec\xab\xd7\xb0\x5b\x6c\x10\x4e\x05\x5a\x8f\x39\xcc\x09\x6e\x74\x63\xba\x71\x1f\x66\x75\x98\x85\x5b\x03\x31\x3c\xfd\xff\x76\x4d\x70\x61\xc8\x73\x37\x97\x4a\x05\xec\xbd\x13\xae\x0b\x1e\xf2\xe5\x33\x4b\xb2\x58\x4a\x6e\xed\x5a\xaf\x4e\xc9\x23\x1c\x9e\xf3\xe3\x57\xb3\x07\x3d\x36\xd1\x0c\xef\xea\xa5\x51\x22\xfc\x5f\xf0\xfc\x7d\xab\xfd\x52\x04\x6f\xf5\x56\xd4\xc3\x15\xbc\x45\x17\x4e\x9d\xe5\x2a\x51\xd1\xec\x0c\x5f\x85\x77\x76\x72\x73\x4b\x99\xe8\x68\x6e\xa9\xdd\xc3\x3a\x85\x90\x95\xb4\x15\x64\x4a\x07\xce\x58\x25\x42\xcf\x7b\x45\xc9\xf3\x2c\x2b\x35\xe6\x85\x5e\x64\x38\xae\xb0\xa9\xed\xf3\xf7\xb2\x31\x76\x90\xf4\xbe\xfe\xff\xeb\x6d\x91\x14\x78\xe4\x3a\x90\x8e\xd2\xc4\xb8\xbe\xd7\x5b\xa1\x1a\x57\x46\xa6\x46\x47\x2d\x3b\xd2\xd1\xb8\x50\x83\x6d\xf0\x53\x31\xf0\xe4\xa6\x82\x2c\xe7\x2b\xbd\x3c\x59\xd1\x00\x09\x26\x89\x09\x63\x24\x59\xab\xfc\x1e\xb1\x9b\x08\x21\x1e\x04\xfd\x48\x01\xd6\x9f\x28\x44\xaf\x10\xa6\xa1\x06\xfb\xfa\x04\x1b\xef\xe5\x89\xff\xd5\x89\xe0\x4b\x13\xd8\xe6\x5a\xbc\x41\x44\x56\x61\x24\x28\x42\x4b\x4d\x02\xe0\x92\xce\x3a\x75\x7e\xf1\x0c\x26\xa3\x3a\xb2\x02\x2c\xa1\x38\x39\x05\x21\xa3\x27\x25\x93\x3e\xd5\x0d\xcb\xf6\xca\xa4\x0b\x94\x9b\xb5\x68\x74\x99\x3d\xe6\x61\x43\x5d\x6e\x0b\xbe\x22\x2b\x5b\x51\xdd\x97\xfc\x50\xae\x44\x60\xe3\xd5\xac\x66\xc0\x1d\xfd\x6d\x3a\x6a\xdc\x34\x43\x54\x2d\x2d\x4a\x11\x92\x99\x05\x70\x16\xd0\x33\x63\x15\xb1\x51\x93\x0d\xc9\x9e\xc6\xa1\xd6\xa4\x40\x38\xb8\x1c\x75\xd6\xb1\x36\x57\x81\x11\xe6\x35\xbe\x02\x28\x83\xfa\xf9\x8a\x82\xc0\xa7\xba\x78\x9c\xd6\x95\x29\x5d\x56\x14\x12\xed\xdc\xd4\x08\x0a\x3c\x09\xfb\x08\x93\xb9\x11\x67\x09\x4c\xfa\x64\x43\x0d\xbd\x63\xa5\xad\x09\x53\x3e\xbf\xa0\x82\x0e\x1b\x8c\x2d\x24\x02\x1c\xb6\x34\xb2\x38\xc2\x9c\xaf\x68\xb8\x04\x9c\xe5\xe9\xe3\xb7\x34\x8e\x35\x8e\x35\xa6\x57\x10\x29\x96\x6a\x96\x7b\x27\x2e\xd6\xfb\x0a\xb0\x5a\xee\x93\x52\x52\x86\x35\x79\xc1\xf1\x66\x88\x88\x9d\x98\xa5\xc7\xec\x58\x48\x6a\x97\x24\x30\x71\xca\xfc\x2f\xcb\x84\xa9\x75\xf0\xc8\x2b\x00\xa6\x35\x15\x78\xd6\x81\xea\x68\x4b\xb5\x7b\x08\x96\x7a\xcd\x36\x62\x83\xca\xd7\xb2\xe5\x24\x5e\x17\x49\x9f\xb4\xb5\x52\xc4\x96\x3b\x4a\xde\x36\x35\x1e\x37\x44\xa3\x06\x9b\x41\x84\x05\x14\x29\x7b\x79\xca\x7b\x5d\xc4\xc6\x48\xc5\xa6\x46\x10\x24\xac\x31\xd8\x9a\x7a\x93\x05\xc0\x5c\x05\x28\xa9\xcb\x1d\x4c\x2f\xa4\xbc\x09\x44\x30\x50\xdc\x57\xd0\x60\x6c\x06\xe1\xb7\xe5\x55\x48\x40\xd5\xa4\x29\x69\x69\x7c\x70\xa5\x39\x4d\x95\xad\x45\xeb\x75\x08\x16\x34\x94\xcd\x34\xf8\xfd\x59\x0f\xca\xea\x64\x2a\x21\x40\x44\xa1\x91\x4c\xce\x9f\x0e\x5b\xa7\x6a\x88\x78\xe8\xa3\xe7\xa4\xf2\x32\x1e\x1a\x26\x6b\x84\xec\x6f\x59\x4a\xf1\x97\x3e\x97\x4d\x51\x82\x84\x2c\xe7\xab\x61\x11\x37\xc1\x60\x87\xcc\x74\x20\x5f\x9c\x94\xcd\x6f\x66\x79\x74\x67\xa0\x01\x1a\x51\xd4\x6f\xf0\xd3\x79\xb8\x86\x1a\xd5\x23\xc1\x0d\x8d\x46\x23\x98\x84\xb1\x3f\xaf\x27\xf2\xbe\xcd\x1e\x1e\x09\x9e\x42\x6f\x41\x21\x52\x1a\x54\x23\x01\xe4\xcd\x21\xc0\x2f\xd9\x57\xc5\x9c\x6e\x67\x39\x41\xe9\x22\x4e\x77\xaa\xa1\x9f\x7a\x89\xf0\x3f\x3b\x1e\x9c\xa9\x3c\x23\x26\xd5\xb9\xd7\xe4\x01\x30\xba\xa0\x10\xdc\x24\x35\xdf\x2a\x77\x90\x92\xdd\x2e\x79\xfb\x37\x44\xbe\xea\x7a\xc9\xff\xd5\x18\xfb\x84\xc7\x1e\x93\x9a\x76\xce\x2f\x9e\x29\xfc\x0f\x78\xc1\x25\xef\x9c\xfb\x90\x84\x45\x4c\x1b\x03\xb0\x4f\x61\xc2\x7b\xb9\xca\x0d\x44\xd3\x4d\x34\xcb\x85\xd1\x6c\x70\x7e\x94\xf2\xdc\xa8\x09\x5a\xed\x11\x00\x8d\x4a\x41\xd0\x4b\x92\x1a\x26\xd8\xe0\x45\x29\xba\x8a\xac\xc9\xcd\xd2\xe0\x3c\x38\x1a\x90\xfa\x22\x4c\x92\xad\x24\x97\x37\x7b\x0c\x87\xe9\xff\x90\x17\x5c\xbc\x20\xf2\x55\x37\x70\xc0\x4a\x18\x68\x4f\x0e\xa7\xac\x22\xab\x45\x8d\x40\x76\xd7\x24\xd3\xd8\xcd\x0a\xf9\xdf\x5e\x59\xa3\xbc\x1e\x35\xd9\xa1\x66\xbb\xc6\xdb\x22\x8c\x6a\x0a\xc1\xe6\x72\x3a\xf8\x63\xe3\x8c\x0f\x51\x79\x9c\x4a\x37\x4e\xe7\x59\x87\x20\xda\xff\x61\x2c\x38\xe9\x3c\xa9\x82\x6a\x18\x88\x76\xbd\x24\x1a\xdd\xdc\x5d\xe1\x77\x8f\xb1\x26\xbb\x5a\x03\xad\x2f\x8a\x96\xbf\xb4\x45\x0a\xdc\x01\xac\xf5\x53\xe9\x06\x81\xac\x3f\x7e\xd9\x81\xa5\x2f\x33\xca\xc4\x04\x52\x3e\x4b\xd9\x01\xf4\xea\xf3\xa3\xe0\xbe\x99\x54\x93\x0d\xeb\x98\x22\x14\x21\x64\xc6\x01\x35\x20\x65\x66\x18\x48\x34\x60\x23\x1f\xcd\x3e\x67\x7e\xee\xd4\xb9\xe5\xf9\xd3\xf3\xa7\x16\x9d\xe3\xf4\x4c\x76\xa8\x10\xcd\x1c\x21\x3a\xce\xee\x00\xa1\x63\x09\x4a\x9b\xe1\xfc\x6b\x39\x1c\x7c\x58\x1d\xcb\x1b\xf7\x0d\x75\x18\x3f\x75\x51\x34\x67\x30\xdd\xc7\xdf\x8f\x07\xd3\xe6\xa7\x73\x00\x03\x79\x2d\x00\x3a\x31\x39\x09\x05\x1c\x33\x0f\xba\xee\xb7\x1f\x18\x67\xef\x1d\x67\x07\xc9\x45\xcc\xff\xa9\xf1\xe0\x3f\x8f\xcf\x92\xbf\x98\xca\xee\x69\xbb\x8f\xc9\x09\xbb\x28\x9a\xbd\xd2\xb1\x1d\xea\x46\xd0\x4e\x40\xe8\x1a\x3c\x8a\x73\x50\x9c\xf5\x35\x25\x50\x75\xc1\xfd\x97\x65\x25\x3f\x72\x78\xea\xf0\xa4\x8d\x09\xa0\xd0\xb6\x20\x75\x08\x70\x99\x0a\xd0\x40\x77\x8a\xf4\x09\xb2\x1f\x87\x23\x95\x29\x13\x2e\x7d\x18\x32\xf4\x2a\xe4\x45\x5b\x80\xa3\x56\xc6\xcb\x3c\xd4\xaa\x78\x78\x0a\x97\x5d\xde\xc3\x44\x8c\xfc\xc8\xe1\xef\x3d\x0c\x1e\xbf\x93\x7c\x33\x4b\x0f\x97\x04\x7b\xb5\xac\xe8\x37\x55\x24\x6f\x68\x75\x9b\x8a\x8b\xdd\x24\x6e\xc6\x25\x79\xa7\x71\x88\xdd\xcf\x94\x4c\x2e\x92\x44\xb2\x5d\xb1\x1d\xc3\x7f\x0c\x66\x93\x84\xd7\xb0\xe0\x49\xbc\x21\xa6\xda\x22\x4c\xca\x36\xe2\x13\x4a\x32\x00\x30\x96\x00\xeb\x4c\x6f\xb6\x3a\xc1\x7f\x7f\x68\xe8\xee\x38\x13\x77\xe2\x72\x31\x4c\xd7\x84\xff\x85\x43\xc1\xbd\xe6\xa7\x3c\xa0\x85\x51\xa1\xa1\x99\x1a\xb0\x49\x50\xc1\x80\xe7\x80\x8c\xe5\xba\x18\xdc\x9f\xda\x85\xd5\xdd\x3b\x3f\x3e\xb1\xa7\x0d\xdd\xd3\x86\x7e\xe3\xb4\xa1\xff\x22\xd5\x83\x9f\xf4\xd8\x3e\x29\x88\xf9\xbf\xee\xb1\xa9\x6d\x6f\x13\x73\xbc\x21\xca\xf1\xd5\x1e\x04\x61\xd8\xc7\x8e\x4e\xb8\x90\xcc\x5a\x13\x52\x71\x3e\xb2\x03\x91\x7d\xaf\x87\x69\x54\x47\xda\xf7\x90\xb7\xb8\xbd\xd6\x69\xca\xaf\x0f\x8b\xa7\x32\x83\xab\x86\x52\x7d\x7c\x82\x35\x76\x94\x30\xdf\x20\xa1\xbc\x7e\x22\x38\x51\x7d\x38\x0c\x5c\xc0\x2e\x33\x2a\xa8\xf3\x8b\x07\xf6\x08\xdf\x6e\x09\xdf\x45\x15\xd4\x99\x05\xdf\xa9\xe3\xc8\xb5\xaf\xd5\xe0\x42\xd8\x37\xe1\xed\xec\xe4\x16\xa0\x7e\xdb\xec\x86\x3d\x92\xbb\x0b\x92\xbb\x6a\x51\xdc\x0b\xbb\x8b\xf6\xfc\xd6\x11\xc1\x9e\x0f\x79\xc9\xf6\x14\x63\xde\xbf\xdb\xe0\x94\x98\xf5\xae\xa8\xaa\xab\xe7\x7d\xa8\xba\xfa\xfb\xaf\xb3\x73\xc3\x6c\xd1\xf3\x99\x85\x79\x70\xb5\xf0\x3f\xff\x98\xe0\x7e\xf5\xc3\x55\xf5\x22\x60\x6f\xe9\x06\x36\x12\xc2\x57\x4d\xfb\xd9\x74\x41\x8d\x96\x9b\x77\x28\x40\xc1\x70\x1a\x3a\xbd\xe6\x84\xfa\xd0\x21\x3d\x2f\xbb\x76\x8f\xf4\xec\xf1\x5c\xdf\x38\x02\x70\x82\x12\xd7\x3d\x31\xf8\xb7\xe9\x88\x9c\xe5\xb8\x8f\xed\x8f\x7e\xd3\x63\xd7\xe9\x4d\xaf\xf6\xed\xcf\x7b\xec\x69\x57\x40\x3f\xe0\xd4\x51\x1d\xa7\xb3\x7c\x4e\xa1\xaf\x05\xcd\x6a\x0b\x1a\x60\x9e\x7e\x9a\x63\x47\x4e\xb9\x33\x0b\xf3\x64\xec\x50\x2e\xbc\xdd\x3c\x5b\x0d\x57\x13\x9d\xbe\x41\xd9\x1f\xa9\x8a\x06\xfb\x93\xfd\xec\xdf\xe0\x27\x33\xa8\x08\xbe\xab\x4f\x86\x9f\xf9\xb9\xc5\xc2\xff\xf8\xfe\xe0\x4f\xf7\x85\xbc\x13\x76\xe5\x64\xd0\xf2\xca\x57\x28\xda\xa3\x59\x84\x60\xeb\x28\x60\x0b\x15\xe9\xca\x25\x15\xe7\x8e\x2b\xef\xed\x32\xe3\x6d\x91\x74\xa9\x26\xb9\x43\xa4\x90\xa6\x54\xe0\x3a\xe4\xa9\x28\x15\x11\xac\x8b\x56\x2b\x6e\x42\xb3\x9b\xa1\x09\x92\x6a\xf0\x59\xaa\xa1\x19\xa6\x5a\x03\x0a\x59\x7d\xba\x39\x18\x20\x2b\x9d\x0b\x0b\xde\x55\x19\x00\xa1\xfb\xe4\xa3\xd9\x47\x3c\x44\xd8\x8b\x80\xda\x9a\xb5\x0c\xca\x2b\x66\xc8\x2e\x6a\xba\xbb\x96\xe1\x03\x8d\x1a\x29\x40\xe5\x43\x39\x88\xc1\x99\x9f\x5b\x44\xe9\x9e\x9a\xc7\x8c\xaf\x64\xf7\xc4\x30\x69\x98\x58\x9a\x2c\x08\xae\x4b\xd7\xad\x00\x22\x9a\x62\x39\x2a\xea\x98\x95\xbf\x0a\x81\x20\x3b\x21\xa8\x69\x29\x0e\x08\x94\xd5\x61\x6a\x68\xd5\xfc\x02\x8e\x50\x36\x59\x53\xbe\x84\x54\x2d\x4c\xb7\x4a\x7a\x8e\x3d\xec\x51\x74\xab\x5d\x81\x3a\xf5\x3c\xc9\xb2\xf5\x82\x87\x25\x7f\x46\xfd\x74\x96\x6f\x86\x79\x24\x22\xf9\x17\xa8\xe9\x50\x09\xfc\x8c\xfa\xa2\x08\x93\xfa\x7c\xd7\x7a\x46\xe7\xbe\xb1\x28\x3a\x59\x29\xe4\xc6\xe2\x47\x62\xd2\xa9\x66\x79\x24\xf2\x49\xe5\x0a\x6d\xf5\x6d\x7e\xc1\x61\x89\x16\xd8\x39\x76\xe6\x0a\x8e\xd3\xd2\xa8\xcd\xcc\xde\xe4\x31\x7d\x05\xf9\xaf\xf6\x82\xe3\xea\x07\x68\x50\xad\x83\x55\x58\x97\x9d\x8a\x75\x57\x34\xc0\x74\xf0\x5e\x76\xf6\x61\x3d\xef\x0f\x79\x0b\xdb\xb3\x09\x75\xff\x89\xc3\x04\x0b\x75\x79\x57\xc5\x8a\xdf\x19\x67\xdf\x39\xaa\x53\x8a\x04\xcc\xa7\xad\xcc\x7f\xd7\x78\xb0\x2c\xff\x30\xb7\x3f\xbd\x46\x4b\x98\x09\x18\x03\xc7\x81\x4d\x71\x38\x49\x20\xb2\x4d\x41\x34\x03\x3d\x16\x03\x19\x4f\xa4\xb4\xd1\x09\x9f\x9b\xe5\xf2\xdf\x38\x95\xff\xb2\xb5\xb8\xd4\xf9\xb7\x0f\xad\xc5\xe5\x6c\xd6\xe9\xc4\xe5\x25\xef\xea\xb5\xb8\x5c\xce\x85\x58\x2a\xc3\x52\x5c\xf2\x0e\xad\xf6\xe2\x24\x9a\xc3\xbf\xd7\x32\xfd\xc9\x44\x33\xeb\x74\xe3\x44\xe4\x97\xbc\x89\x6e\x12\x96\xb2\x2d\x87\xa1\xf8\x8a\xc7\xbe\x8d\x99\xaf\x7d\x47\xc7\xf4\x6d\xcc\x34\xe9\xbe\xf9\x76\xe6\x74\xc0\x7d\xf9\xaf\x99\xd5\x6d\xf7\xd5\xe3\x18\x8e\xd0\x7d\xfa\xad\x4c\xf7\x73\xb0\x03\xd9\xc8\x8a\xe4\x14\x0d\x54\xa4\x46\xe9\xbe\xf8\xd8\x7e\x27\x87\x72\xd8\xed\x16\x72\x77\xcd\xe9\xd8\x39\x0d\xd6\xe9\xbf\x69\x7f\xf0\xb4\x21\xcf\x2b\xae\xae\x52\xbc\x25\xcd\xb7\x15\x80\x07\x16\x08\x95\x18\x0e\x01\x3b\x09\xf9\xff\x92\x77\x00\x25\x62\x57\x2d\xbe\x8f\xdd\xcf\x0e\x76\x44\x51\x84\x6b\xc2\x3f\x17\xcc\xcc\xf0\x76\xaf\x13\xa6\x92\xf4\x44\xe0\xde\x41\xef\x94\x2f\x0a\xa8\x55\xd1\x9d\xd8\xc2\x0c\x2d\xf3\x30\x2d\xe2\x01\x74\xec\xd3\xec\x40\x2e\xc2\x22\x4b\xfd\xdb\x83\xa9\x65\x00\xc0\x95\xbf\x2c\x6d\x2c\x8d\xed\x70\xc1\x93\xb0\x28\x47\xd5\x73\x86\x51\xdf\xfd\xbb\x82\x9b\x96\x1c\x5c\x52\x5d\x45\x4d\x19\x98\x97\x21\xf8\xf3\x34\x7a\x8d\x9f\x4f\x01\xdf\xd8\xa9\xed\x38\x01\xf8\x1f\x0d\xbe\x63\x99\x7c\x89\xac\x19\xd4\x15\x3a\xdf\xbc\xc6\x63\xbe\xec\xe1\xb2\xee\xe0\x72\xdc\x11\xfe\x0b\xd8\x2d\x57\x40\x52\xe4\xa7\xc1\x9d\x67\x60\xc0\x71\x47\xb8\xe3\xb0\xe6\x40\xa5\x4f\x87\x14\xb3\x38\xea\x32\xe3\x61\x0a\x81\x37\x0d\xb6\xc1\xae\x95\x5d\x3a\x0f\xec\x19\x74\x27\xba\xf2\xee\x4c\x2e\x03\x1f\x6d\xba\x04\x2e\xfd\xaa\x4f\x9b\x61\xa1\xd9\x40\xf6\xb9\xef\x66\xdf\x6e\x6d\xe5\x55\xb8\xf7\x36\xa6\x1b\x4f\xcd\x56\x01\x94\xe3\x7d\xdf\x1d\xdc\x42\x7f\x5b\x7b\x56\x12\x22\x39\xd0\xe7\x66\xab\xa4\xc1\xd7\x0e\xf9\xf2\xda\x02\x73\x53\xe3\x92\x37\xa1\x40\x95\x5c\x50\xb2\xef\x62\x5f\xf4\x98\x7e\xe7\x7f\x6a\x27\x58\xe4\x16\x1c\x0e\x28\xa7\xde\xe0\xcd\xb9\xce\xe2\x19\xc5\x7e\xe8\xa8\x00\xe5\xdb\xd5\x16\xa9\xea\x23\x84\xcf\x3e\x37\x5b\xbd\xec\x8c\xc8\x92\x1b\x4a\xb2\x30\x2a\xa6\x4c\xf4\x48\x31\xf5\xdc\x6c\xb5\xa8\xe7\xbd\xb4\x5e\x66\x75\x8a\x98\x88\xb3\x74\x8a\x7d\x6d\x1f\xfb\xd6\xb2\x4c\x08\x43\x0c\xe2\xc1\x4f\xc7\x29\xc4\xa0\xfa\x5f\xd8\xa7\xa0\x76\x7e\x75\xdf\x88\x32\x4a\xcb\xa6\xc2\xa0\x60\x0d\x81\x36\x3c\x35\x5b\xc5\x41\xb6\x21\x1d\x3a\x15\x37\x0b\x70\x04\x4d\xb4\x7c\x96\xc2\x37\x24\x3f\x70\x3a\x8c\x13\x11\xa9\x94\x73\xb6\x3f\x18\xe6\xc9\x1a\xd1\x07\x0c\x25\x94\x5d\x90\x8d\x52\x5b\x85\xb2\x8f\x88\x24\x46\x68\x7e\x0a\xc1\xea\x95\x99\xbc\x74\xd0\x1b\x0f\xad\x8a\x51\x03\x5d\xd5\x54\x15\x10\x9b\x85\x04\x07\x5e\xd7\xc0\x11\xdc\x84\x79\xad\xf5\xc2\x3c\x4c\x4b\x21\xc8\x1b\x03\xed\xa6\xf1\xf3\x45\x5e\x98\x24\xfe\xe4\xbd\x36\x64\x30\xe4\x11\xa7\x5a\x43\xdb\xcb\xe8\xae\x0d\x9b\x0c\x39\x98\xe7\x8b\x3c\x33\xb5\xac\x8a\x66\xd6\x11\x03\xc3\xa5\x5a\x9c\x28\x6d\x9c\xaf\xb8\xd4\x53\xb5\x45\x7c\xc6\xe5\x86\x67\x2c\x2f\x9f\x71\x17\xc7\x38\xe4\x59\x00\x4b\xbf\x7c\x90\x5d\x6b\x76\x21\x78\x22\xbe\xf3\x60\xf0\xe6\x83\xb3\xce\x33\x0b\x0e\x06\xdc\x10\xb3\x88\x9b\x8f\x88\x1b\xcb\xc3\xe6\x3a\x4c\x92\x0e\xbe\x5e\x39\x97\xa5\xf3\x69\x24\x2e\x8a\x68\x85\x1f\xa1\x78\xc6\x49\x70\x49\x51\x8f\x1b\x8c\x39\xa5\x94\x63\x3e\xf1\xd6\xb4\x05\xac\xf4\x0e\x2a\xc6\x48\x47\x22\xe5\x02\x4d\xec\x10\xb5\xdd\x90\xfd\x6c\xd8\x5d\x33\x78\x0a\x49\x5f\x7f\x1c\xc9\x11\x14\x14\x6f\xe0\x0e\x46\x36\xd7\xce\x3a\x59\x92\xad\x65\x48\x6c\x4d\xd2\x0a\xd9\xd9\x51\x3d\x95\x15\x9a\xe3\x06\x31\x83\x4e\x9e\x01\xbb\x05\x59\x05\x92\xf5\x63\xb2\x81\x23\x83\xbd\xae\xf3\xe9\xc9\x9a\x9b\x59\x16\xa4\x25\x29\x8f\xa3\xba\x04\x49\xae\x4b\x7d\x9e\x9b\xad\x5a\x14\xa5\x0e\xed\xa0\x6c\xb3\xb3\x79\x84\xcd\x25\xb6\x98\x32\x63\xa4\xa3\xba\xe1\xac\xea\x98\x2d\x3d\x39\xb5\x21\x0b\x31\xe0\xe1\x05\xfb\x79\x05\x0b\x76\xc3\x3c\x4c\x12\x91\xc4\x45\xc7\xf8\x82\x25\x24\xa2\xa6\x72\xc7\x88\xe7\xf5\x30\xd7\xc5\xf4\xb1\x67\xdf\x34\x10\x56\xb4\x8b\x63\x42\x7d\x06\x7a\x65\x47\x2e\x21\xb1\xb7\x96\x0d\x83\x24\x69\x6b\x0f\x0b\x92\x54\x51\x6f\xb2\x2a\x2b\x5c\x30\x5b\x85\xa6\x0b\x1e\xda\x28\x29\xa5\x06\x6d\xd1\x1a\xa9\x9a\x36\x40\xab\x40\xc3\xf5\xb8\x5b\xe8\xb0\x3c\xc5\x31\x3d\x35\x5b\x75\xf8\x92\xdf\x1e\x67\x57\x59\x13\xed\xff\xca\xb8\xba\x2b\xde\x31\xbe\xa4\x8f\x2d\xe0\x29\x88\x02\x22\x2e\x0d\x38\x9b\xb3\xd4\xfa\x6a\x80\x24\xd9\xea\xa2\xb6\x92\x84\xf6\xd0\x3d\xaa\xc1\xff\x3f\xf6\xde\x05\x3a\x92\xec\xac\x0f\x3f\xd5\xd2\xbc\xee\xec\xc3\x2e\x13\xfe\x09\x10\xb8\x29\xaf\x19\x69\xd3\xdd\x92\xe6\xb9\xa3\x5d\xef\xa2\x91\x66\x76\x64\xcf\x43\x96\xb4\x6b\xbc\x0f\xef\x5c\x75\xdf\x96\xca\x53\x5d\xd5\xae\xaa\x96\xa6\xd7\xde\x84\x78\xb1\x79\xd8\x3c\x8c\x6d\x1e\x63\xcc\xcb\x76\x00\x1b\x42\x1c\xec\x04\x63\x07\x30\x8e\x71\x0e\x26\x86\x40\x0e\xe1\x61\x13\x63\xc0\xf8\x4f\x62\x20\x81\x84\xb0\xbc\xfe\xe7\x7e\xdf\x77\x6f\xdd\x5b\x5d\x2d\x69\x56\xbb\x6b\xfe\xe7\x8c\x0f\x9c\x1d\x55\x57\xdd\xf7\xfd\xee\x77\xbf\xc7\xef\xc7\x57\x90\x77\x4a\xcd\x47\x1c\x46\xe5\xbd\x40\xa5\xa2\x49\x6f\x80\x59\xbd\xe1\x7a\x2c\xa2\x6c\xe8\xe7\x28\x22\x3a\x73\x48\x61\x8f\xa2\x64\x0b\xc1\x4b\x69\x31\x80\xf5\x01\x02\x68\x10\x6e\x25\x04\xa6\x27\x58\x70\x6e\x2b\x66\xec\x36\xd8\x05\x00\xf6\x92\x21\xbf\x98\x31\x06\x50\xab\x11\x3a\x4f\x78\x54\x23\x69\x24\x9e\xdd\xe3\xdf\x11\xca\x1f\xdc\xc7\x6e\xeb\x8a\xb8\x2f\x0c\x61\xaf\xff\x2f\xf7\x05\xdf\xb5\xcf\x7d\xa6\x97\x4a\xa6\xe9\xda\xc8\x8c\xaa\xba\x42\x3c\x22\x90\x62\xa8\x7a\x46\x9f\x64\x4d\x7e\x01\x00\x87\xae\xb8\x45\x5d\x21\x14\x0d\x2b\x12\x10\x22\xdd\xe9\x16\x01\x49\xec\xfa\x61\x3b\x81\x74\x53\xd8\xfa\x08\xb9\x91\xa4\xf6\x41\x4a\x71\xb9\xbd\xb0\x75\x55\x37\x83\x18\xe1\x74\xd4\x9f\x5a\x56\x30\xdf\xbd\x1e\x30\xc7\xa1\x95\x85\xde\x25\x7b\xae\x0d\xae\xa9\xe3\xd7\x11\xd3\x05\x73\xd2\x47\x40\x7f\x85\x2d\x88\xfa\xa0\x0a\xad\x51\x40\xb1\x33\xd0\x96\x54\x3d\x20\x4d\x0e\xaa\x4e\x1f\x43\x1d\xa1\xcd\xc2\xf9\x1a\xac\xa8\x98\x61\x0c\x6d\x07\xf4\x02\x50\x97\xd4\x44\xc2\x06\x48\x72\xde\xe9\xc7\xe8\xbd\x03\xee\x8a\x56\x1e\x0d\x9a\x9c\x9f\x4f\xb6\x24\x18\xf6\x74\xd8\x5f\x26\x87\x06\xfe\xc5\xaa\x4f\x57\x94\x20\xc1\xe2\x40\x17\x95\x15\xc9\x30\x3c\x89\xda\xfc\x4a\x11\xa8\x3e\x45\xce\x83\x2b\x7c\x6e\x69\xf1\xd9\x5d\x9a\x2f\x2c\xc6\xae\x31\x48\xfa\x69\x23\xd9\x8a\x1b\xbd\xa4\xdd\xd0\x83\xe8\x64\x0a\xfe\xcb\x31\x76\xd8\xda\x80\xfe\x77\x1a\x71\xf4\x75\x25\x71\xa4\xd9\x6c\x86\xc5\x52\x95\x00\x82\x10\x53\xc4\x5d\x42\x2e\x7b\xa5\xe0\xe2\xe9\x26\x5a\xb9\x3a\x1c\x4a\xdf\x5b\x51\xc1\x10\xee\x3b\xa0\xfb\xb2\xd6\x0a\x8b\xe3\x05\x26\x96\xbe\x86\xe3\x70\xa2\xf2\x48\x6e\xe2\x05\xac\x59\x08\xcd\x49\x7e\x0f\x1f\x3a\xc0\x26\x31\x40\xb5\x48\x95\x56\x83\xcd\x23\xd9\xc9\x09\x39\x45\x89\x24\x53\x77\x57\x5c\xb3\xe5\xd5\x73\x28\x64\xde\x51\x63\x07\xf5\x0c\xfa\xdf\xfe\x4c\x90\x8a\x7f\xc2\x9b\xa3\x4d\x83\xf1\xa1\xc9\xa6\x4c\xf5\x5c\x16\x99\x42\x16\xeb\x29\x68\x5e\x80\x47\x78\x29\x49\xbb\x4a\xd7\x76\x44\x08\x44\x0b\x59\x0a\xb7\xda\xe1\x83\xa4\x7f\xc3\x63\xb4\x1b\xca\xa5\xa9\x17\xc2\x7f\xcd\x9a\xce\xd8\x5b\xf7\xb1\x03\x59\x3f\x53\x22\xca\xff\xc6\x7d\xc1\x5f\x8e\xaf\xe0\x1f\x96\x2e\x6c\xe7\xd3\x96\x4e\x7d\x37\x2d\x0a\xb5\xc3\x54\xc9\x0a\x50\x13\x84\xd1\xc8\xec\x6d\x9e\xe9\x0a\x6c\x08\xab\x38\xc1\xaf\xed\xcc\x23\x72\x1e\xb8\x55\xba\x05\x53\x59\xe6\x4e\x86\xa0\x53\xea\xb6\x07\x8b\x13\xd4\x96\x48\xac\xf3\xf5\x44\x92\x6b\x08\x65\x39\xd5\x3b\x59\xaf\xea\x14\x61\x77\x23\x81\x78\x14\x41\x34\xdf\x26\xf5\x6e\x98\x53\x2b\xcc\x40\x57\xe1\x0f\x64\x86\x9c\x47\x6d\xf5\x75\xd8\x15\x61\xca\xf5\xd2\x05\xcb\x72\x2a\x5a\x12\xd5\x10\x22\xf0\x51\xdf\x37\x39\x8d\x39\xde\xc1\xe1\x8a\x86\x36\xf4\x8c\x2c\xd1\x2b\xb9\x48\xf3\x55\x75\xd5\xc5\x25\x92\x18\x15\xac\xce\x65\xa7\x23\x5b\x88\x27\x85\x5f\x18\x3f\xda\x1c\x3c\x5e\x90\xa2\x1d\x85\xb1\xd4\x48\xe1\x4a\xa0\xa4\x3c\x4f\x92\xc2\xf9\xa1\x74\x78\xa5\x5a\x5a\xc8\x4e\x06\x8d\x02\xaa\xc7\xe6\x95\x95\x47\xba\xf1\x11\xb8\xc1\xdd\x16\x6c\x61\xee\x12\xea\x19\x5c\x35\x0b\x2f\x72\x61\x08\x23\xce\x91\xaf\x9f\xab\xb1\x7f\x20\xaa\xda\xef\xff\x52\x4d\xe3\xa0\xff\xdb\x5a\x49\xf1\xd3\xe0\xe7\x4a\x16\x52\x6f\xcb\xbc\x99\x99\x19\x49\xa3\xc7\x29\xe9\x4b\xa1\xed\x6a\x11\x84\x71\x3f\xe9\x67\xea\xd6\x8a\xd3\x4e\x10\x05\xd6\x86\xcd\x53\x64\xc3\xe7\x88\x0e\x08\x59\x65\xf9\xdd\x74\x41\x30\x74\x9f\x5a\x8d\x23\x41\x34\x6a\xdd\x4e\x88\xbc\x58\xb5\xa0\xfb\xa6\x49\x7f\x7d\x43\xcd\x09\xaa\xc4\xb0\x46\x43\x3d\x6f\xb0\x30\xec\x29\x5f\x03\xdf\x19\xa0\x7f\xe1\xbc\xa9\x51\xde\x2a\x19\x17\x52\x09\xb4\x88\x5c\xac\x8b\x30\x76\x6f\xc6\x29\xbb\x65\x4d\xb4\xae\x26\x9d\x0e\x04\x01\xf9\x6b\xfa\x1c\xbb\xe8\x0e\x6e\x71\xec\xa4\x12\x07\x80\x06\xa6\x2b\x30\x04\xd5\xe8\x3c\x1d\x30\xaa\xb8\x33\x7c\xd2\xa9\xf3\x3b\x9e\xc7\xbe\xf6\xe9\xe6\xad\x35\xe7\xfb\x59\x9e\x74\x75\xa0\xfa\x82\x91\xe5\xda\x76\xfd\x81\xdb\x83\x7b\x76\x78\xc7\x09\xdf\xd5\x4e\x4b\x25\x76\xe7\x97\x17\x8a\xb0\x80\xfd\xd0\x94\xf6\x75\xef\x00\x39\x27\x1d\x43\xdd\x27\x6f\x63\xef\x1b\x63\xff\xb0\xc0\x9e\x58\x4a\xc1\x51\x35\x9f\x44\xfd\x6e\x9c\xf9\x6f\x1d\x0b\xde\x5e\x1b\xf5\xab\x25\x5c\x2d\xf0\x8a\x16\xfd\x88\xbe\x33\xbc\x78\xad\xc2\x0d\x2e\xe9\xe7\xbd\x3e\xc4\x0b\x54\x21\xe8\x98\xa3\xc0\xa4\x63\x4d\x81\xf3\x4c\x0d\xa7\x76\x38\xe3\x11\xf1\x42\x04\xc6\x52\xbf\x19\x7f\x7d\x43\x64\x0d\xc8\x38\xc1\xbb\x97\x41\xd0\x00\xc2\x78\xd3\x26\x25\x97\xad\xac\x34\xc3\x21\x8a\xbf\xf3\x76\x98\xf5\x22\x61\xf4\x4e\xb1\x6e\x7c\xd4\x2d\x98\x0a\x2b\xcc\x35\xd3\xa9\x6a\x85\xc7\x2a\x64\xeb\x4c\x8e\x3a\x97\xf7\xb4\x3c\x70\xbc\x8b\x05\xc0\x3e\xe5\x31\x56\xd0\xc6\xf9\xff\xc9\x0b\x7e\xc6\xb3\x68\xe4\x4a\x28\x3d\x56\xd4\x48\x55\x5f\x88\xc0\xbb\xf8\x9e\xee\x0b\xce\xc9\xa6\x5e\xb2\xe2\x0d\xdc\x82\x0b\xa0\xb2\x2d\x91\x82\x1e\x47\xce\x4a\xba\x73\x1b\x6f\x2d\xd2\xb0\xef\x28\x36\xff\x75\x8d\xf9\xba\x39\x61\x12\xbf\x1c\x0b\xf5\xbf\xa7\x16\xbc\xb1\x36\xfc\x1c\x54\x97\x34\x6c\x9b\x8b\x33\x42\x71\xe9\xa6\x98\x75\x98\x27\xd0\x0b\x72\x36\x37\xf9\x45\x31\x70\xc8\x84\x41\xdc\x5c\x29\x86\xe1\x0a\x86\x60\xf7\xa5\x0b\xf1\xa5\xcb\x1d\x31\xc8\xc3\x84\x7e\x86\xc8\x1b\xd6\x8c\x61\x5e\x94\x5b\x80\x10\x07\xbb\xd3\x9e\x22\x34\x9e\x24\x29\x66\x76\x29\xed\x24\x47\x8e\x3b\x74\x37\x27\xb1\xe4\xf2\x5a\x98\x01\x5f\x5a\x61\x58\xf8\xb0\x47\x81\x16\x3f\xe9\x05\xef\xf6\xec\x50\x0b\x5d\x34\xc6\x1a\x81\xdd\xf5\xb3\x5f\xf7\xbd\x9b\x33\x9f\xfd\xba\x77\xd4\xe1\x5f\x47\xe1\x6e\x02\x7f\xca\xbc\x45\xd1\xf3\xee\x1a\xa1\x9d\x83\x6d\x05\xe0\x1c\xb7\xd3\x22\xe7\x57\xd4\x1e\xcd\xa6\xee\x01\xef\xe9\xbd\x53\xf7\xd0\x4f\xf7\x4e\x35\x9b\xcd\x2b\xaa\xe1\x57\xf0\xf3\x62\x54\xed\xe6\xff\x8e\xc7\xf6\x63\x90\x90\xff\x6b\x1e\x7b\xec\x59\xd9\x45\x0f\x1a\x4a\xd8\xa0\x47\xe1\x4d\x25\x17\x1d\x3e\x84\x74\xd7\x0e\xa2\xa6\xd3\x07\x75\xde\x4b\xfb\x6a\xd6\xd1\xf6\x41\x4b\x01\xd6\x5e\x67\x37\x7b\xac\xc9\x36\x18\x09\x62\xff\x95\xc1\x12\x0d\x24\x04\x11\x81\x62\x07\xca\x47\x18\xaf\x4f\x21\xa4\xb0\x39\x88\x8c\x54\x57\x3a\x1f\xda\xd6\xf5\x82\x09\x05\x46\x18\xcd\x2d\x2d\x66\xce\xe6\x79\x8f\xc7\xb4\xa8\xf7\xbf\xcf\x0b\xbe\xcd\xd3\x41\x29\x23\x16\xac\xcb\x22\x8d\xbb\xa0\x87\xa9\xeb\x00\x1a\x5b\x5e\x09\x79\x62\x32\xbd\xd5\x52\x49\x0b\x65\x41\x5a\x99\xa3\xba\x74\xd4\x95\xf1\xfd\x17\xd3\xb4\x5b\x8d\xfd\xa4\xc7\x6e\xc9\xfa\x6b\xa6\x74\xff\xdf\x7b\x4c\x3c\x2b\xb3\xbf\x62\xd5\x12\x7c\xad\x5d\xa7\x36\x37\xa0\xe5\xc4\xf9\xa5\x6a\x6a\x35\x22\x5e\x59\x8c\x6e\x88\x4d\xd9\x64\x1f\x3c\xc8\xce\x51\x8b\x31\xd3\x74\x7d\x3d\x95\xeb\x22\x4f\xdc\x96\x96\x92\x94\x31\xea\x74\x6e\x49\x03\x2a\x42\x04\xf2\x1f\x1f\x08\xa6\xdc\x47\x6e\x4e\x58\xf1\xdb\xf6\x3c\x8c\xef\xdf\xcf\x5e\xa5\x83\x67\x85\x7d\x60\xad\xb2\x65\xb6\x54\x39\xd8\x7b\x68\xfa\xcd\x68\xb9\x3d\x44\xcb\x2d\x5a\xe1\xb2\x2f\xde\x53\xb8\xec\xcd\xa8\xcf\x3d\xd3\x1f\x26\x3b\xc7\x0b\x5d\xf0\x5f\xd2\x28\x62\x84\xac\x9d\x5a\x15\x39\xcc\x75\x64\x51\x79\x13\x51\x14\x32\xfb\xf6\x03\x6c\xca\x4d\xb5\x4d\xb2\x96\x88\x20\x30\x1d\x0f\xe9\xa6\xba\xbd\x5f\x54\x37\x97\x16\x51\xc0\x7c\x6a\x7f\x70\xdd\x2b\x3f\x2d\x21\xb0\x68\x86\x58\xc3\x82\x25\x78\x17\xde\xd6\xe7\x9f\xda\x84\xe0\x29\x42\x68\x7b\xe7\x23\xd5\x02\xc9\x91\x16\x9c\x4f\x38\x39\xcd\x10\xf9\x80\x49\x82\x59\x03\x90\x0e\xb3\x4c\xb6\x1b\x3d\x99\x36\xf0\xe2\x3a\xd9\xbc\xee\x31\xac\xea\x12\x5c\x46\x5e\x40\xa5\xce\x6d\x4a\x75\x20\x3c\xa8\x1a\xe4\xc8\xaa\x6f\xd9\xc7\x3e\xe6\xb1\xaa\xd7\xfc\x1f\xbf\x11\xb6\xa0\xa6\x39\x79\x5f\xd6\x17\x71\x1e\xe6\x83\x20\xae\x28\xd4\xa4\x2c\x96\x47\x08\xd4\x7f\x7c\x51\xff\x49\x43\x26\x5a\x69\x82\x18\x6c\xea\x56\x2e\x37\x05\xa0\xaf\xb7\x33\x3e\x01\xe9\xfa\xaf\xa6\xea\x26\xd9\x4b\x99\xd5\x75\xff\xc5\xc1\x74\xf1\x57\x55\x14\x2c\x15\x1f\xc6\x1c\xc4\x54\x98\xc4\xb6\x60\xf8\xdf\xb6\xfd\xef\xf3\xcf\x84\xfd\xef\x9d\x35\x5d\x5e\x11\xbc\xaa\xaa\x6a\x68\x34\x10\x40\x8a\xc0\x4c\x5d\x1d\x78\x6f\x65\x21\xa3\xe9\xd0\x14\xa1\xfd\x5d\x68\xed\xa5\xbe\x98\x5b\x04\x2d\x29\x6b\x99\x12\x45\x04\x86\x1a\x10\x3b\x87\x40\x13\x8e\xc5\xbd\x62\xa8\x07\x48\x82\x60\xb9\x06\x71\xa4\xa3\xa1\x52\x0d\xb8\xa2\x79\xa1\x95\xf4\x0a\xc7\x07\xf9\x3b\x5e\xa5\xd4\x94\xa2\x18\x98\x88\x32\xc4\xc9\xba\x40\x90\x5c\x2c\xa7\xc9\xfe\xb4\xe6\x6c\xc9\x4e\x94\x6c\x91\x89\xcd\x1c\x7d\xe7\xa2\x64\x0b\x21\x63\x68\x4b\x7e\xb8\x16\xdc\x5b\x7e\x58\x4e\x84\xb6\x49\x9b\x69\x4b\x16\x9f\xb8\x59\x93\x5f\xf0\xd8\x1f\xb9\xf4\xd2\xbf\xed\x05\x77\x5f\x29\xfe\xbe\xe2\xaa\x05\x43\xc5\x83\x5f\xcc\x29\xbd\x50\x01\x16\xd8\x99\xd1\x6b\x69\x87\xee\x9a\x48\xb7\xa7\xbc\x23\xec\xcb\x86\x81\x99\xbb\xa2\xd7\xb8\x2a\x07\x99\x7f\xc8\x3f\xd0\x00\x18\x28\xb6\x03\x97\x76\x57\xf4\x18\xfb\xe0\x38\x7b\xd1\x28\x29\x38\xd3\x5c\x51\xe2\x88\x06\xfa\xdb\xc6\x83\x39\xeb\xef\xed\xc6\xb8\xaf\xb9\x15\x41\x9a\x59\x3a\x5e\xf3\xba\x77\x30\x95\xbd\x28\x6c\x09\x57\x6b\xfa\x37\x63\xec\x21\x66\x7e\xf2\x2f\x69\x83\xd2\xe9\x21\x07\x06\xf9\x8c\x11\xaa\x44\x68\x66\x44\xbc\x50\x88\x48\xea\xec\x40\xd7\x60\xf5\x6b\xf6\x86\xfe\x85\x5a\xf0\x53\xb5\x1b\x31\xc6\xeb\x66\x69\x8b\xbc\x36\x81\x02\xe9\x21\x41\x13\x94\x76\xe8\x5a\xdf\xec\x43\xd2\xc4\x88\xe6\x29\x4f\xb8\xd8\x4c\x42\xd5\xfc\x3c\x4d\xd4\x56\xc2\x98\x85\x41\x71\x53\x5e\x2d\x3e\xd2\x1b\x46\x17\x25\xba\x86\x16\x8b\xea\x85\x1e\x34\x60\xef\xf2\x6c\x10\xe7\xe2\x9a\x75\x7e\x53\x0c\xa2\xdb\xb6\x0c\x4f\xf5\x6a\x6b\x50\x3f\x93\x69\x63\xbd\x1f\xb6\x25\x19\xff\xcb\xb6\x7f\x5b\x4c\x7e\x74\x9c\x4d\x56\x45\x96\x95\x90\xb8\x68\xfd\xbc\x6d\x3c\x58\xa8\xfe\xa9\x7c\x2e\xd8\x4b\xa8\x67\xbe\xd0\x28\xd2\xce\xba\xf9\xb9\x31\xf6\xaa\x22\x42\xf3\xb1\x60\x99\x22\x34\x1b\xbb\x8f\xd0\xdc\xda\x40\x97\x41\x01\x2b\xac\xc3\xa4\x61\x3f\x3b\x17\xe7\x0f\x79\x6c\x5f\x6f\x43\x64\xd2\x7f\x9f\x17\xbc\xd3\x5b\x52\xff\xb4\x8e\xff\xb0\x63\xc0\xae\x41\x4a\xe8\x20\x95\x3a\x82\x8d\x21\x34\x0f\xe0\xa1\xd5\x31\xb2\x1c\x40\x6a\x88\xd1\x07\x9e\xdf\xb0\x03\x87\x2e\x7b\x53\xc5\x38\x69\xe0\xeb\x17\x42\x43\xed\xd6\xbf\xc5\x33\x11\xa7\xdf\xe8\x05\xaf\x59\xc6\x70\x53\x84\xa1\x4f\x43\xd9\xb1\x88\xce\x34\xc8\x4a\x09\x38\x24\x46\x9c\x40\x8d\x55\x1f\x66\x10\x5b\x90\xe3\xd9\x80\x67\xa1\xc3\x31\x95\x87\xed\x81\xb6\xf3\x19\x04\x87\x0b\x8b\xce\x98\xfe\xec\x98\x83\x70\x9b\xae\x89\x56\x71\xd1\xa1\x4b\x51\x98\xc4\x00\x77\xf2\x96\xb1\xe0\xc1\xd2\xb3\x72\x10\x65\x82\xe8\x4c\x92\xcf\x23\x02\xcc\x72\x12\xe1\x35\x5a\x5f\xb0\xc0\xba\x8e\x27\x9c\xf5\x8a\xb3\xaa\x7e\xac\xc6\x7e\xb4\xc6\xbe\xa4\x55\xfc\xac\x0f\xf2\xcc\xff\xf6\x5a\xf0\x61\x6f\xbe\xe2\x17\xc2\x5d\xb1\xf8\xf6\xcd\x2f\xd5\x00\x5f\x1d\x75\x59\x73\x5a\xa9\x86\x8c\xdc\x62\x20\x77\xfa\x91\x44\xa3\xaa\x1a\x78\xc3\xe2\xa7\x4b\xed\x22\xc8\x48\x6e\x18\x02\x8a\xa2\x8e\x64\x16\x7c\x72\x66\x6a\x86\xd0\x9c\x5d\x1f\x48\xbb\x55\x6e\xd8\xfb\x0e\x8d\x9e\x41\xab\x51\x70\xcd\x7e\xfd\xa1\xe0\x87\xbc\xd2\xc3\xaa\x44\x5f\x7b\x5c\x9a\x7c\xc1\xb6\xb6\xf2\xcd\x99\xe6\xcc\x29\xf5\x8f\x8e\xd8\x4c\xd0\xbd\xa0\xaa\x74\xc1\x72\xe8\xb6\xb2\x39\xe3\x14\x85\x17\x20\x22\xee\xc1\x94\x97\x14\x2d\x92\x74\xa6\xa8\xc2\x8f\x1e\xad\xbe\xda\xff\xda\xcd\xc4\xe2\x3d\x67\xf7\x45\xda\x36\xd2\x0a\xbe\xaa\x82\xa0\xda\x9e\x2b\x7b\xa1\x9e\x60\xc7\xd8\xcc\x48\xcd\x69\xd4\x8a\xbb\x69\x1d\xd9\x83\x75\xa4\x63\x59\x47\x1e\xda\x5b\x32\xf1\x97\x6f\x03\xdd\xf0\x94\x97\xed\x7c\xf3\x5f\xf2\x2f\x99\x4c\xa1\x91\x7b\xdd\x24\x10\x95\xc4\x4b\x65\x5e\xf1\x77\xdf\xc2\xbe\xa2\x2a\xe3\x44\xc8\x6e\x12\xaf\xc8\xdc\xff\x8b\xc3\xc1\x29\xf3\xd7\x90\xb2\x0b\x28\x44\x16\x6f\xb6\xe0\x6d\x78\x57\x5d\xc0\x5c\x0d\xe5\x37\xd9\xcd\x55\x78\x13\x45\xc4\x45\x11\xf9\x35\x8d\x22\xf2\x8b\x1e\x6b\x8e\xc6\xd8\x2c\xaf\x49\xc8\xd3\xf8\x2e\x6f\xd5\x8a\x57\x5d\x93\x1b\x62\x33\x4c\x52\x73\xb8\x58\xcb\xf0\x39\xc6\x12\x61\xff\xaa\x66\x12\x91\xde\x59\xdb\x26\x03\x65\xb8\x5f\xf0\x51\xf0\xc7\xde\x6a\xe5\x25\x60\xa8\x5f\xab\xf8\x20\x17\x3a\x04\x43\xa9\xf1\x49\x87\x03\x5a\xea\xda\x00\xc1\x28\xb7\xc2\xb8\x9d\x6c\xc1\xf7\x10\x0d\xb7\x94\xf4\xfa\x91\x1d\x22\xa4\xe1\xab\x96\xa5\x68\x37\x92\x38\x1a\x3c\xe7\x03\x76\x53\x99\xd8\xab\xd1\x78\x65\xe7\xa3\x63\xda\x6f\x36\x0a\x53\x70\x2f\x33\xa7\x84\x59\x7e\xe5\x3c\xd3\xb7\xb9\xd0\x5d\xc3\xc9\x88\xfe\x9f\x1d\x0c\xbe\xa6\xf8\x93\x82\x99\x32\xde\x96\xad\x48\xa4\x18\x3a\x64\x87\xa6\x63\x10\x58\xdc\xe6\xcb\x68\x48\x58\x91\xda\x9d\xac\x0f\x89\x4f\x1c\x60\x89\xd9\x3c\x72\x1b\x4d\x67\xb8\x31\xb4\x79\xa6\x2f\x26\x19\xcc\xac\x8c\xc1\x29\xa8\xad\x24\x99\x93\x0f\x58\x7c\xd6\xbc\xb9\xfa\x6e\x02\x55\xfc\x7d\x51\x2e\xf7\x78\xa8\xff\xa3\xd2\x99\x5e\x9c\xe8\x2c\xa3\x83\xf6\xea\x36\x68\x5d\x15\x7b\x4a\x1d\xb4\x77\xad\x94\x59\xd2\xf3\x91\x07\xaf\xb3\xb3\x9e\xf2\x56\x77\x16\x4b\x33\xfe\xd4\x08\xb1\x64\x0a\x2a\xcb\xa5\x2f\x3c\xcc\xbe\xac\x3a\x91\x13\x12\x4b\x3f\xf9\x70\x10\xd0\xbf\x71\x1d\xa2\x61\xa4\x57\xa8\xa8\xbd\xa4\xdd\xbc\xee\x31\x83\xe3\xe8\xde\x6e\xbf\xf5\x61\xf6\x77\xe3\xec\x05\x86\x46\x6d\xde\xbc\xe6\xff\xe1\x78\xf0\x4b\x63\x17\xe8\x8a\x56\xd0\x57\x17\x05\x11\xb6\x23\x2e\x52\xe0\x95\x3b\x5b\xf5\x16\x1d\xd7\xf4\xb2\x88\x31\xb4\x06\x40\x81\x13\x30\x89\xf4\x64\x0a\x8e\x10\x30\x43\x22\xc9\x2f\xc4\xf5\xb4\x4c\xc6\xda\x06\x17\x4a\xd0\xae\xf5\xd7\xd7\x89\xda\x4b\x53\x88\xb7\xcc\x4a\x2f\xf2\xa8\x20\xbc\xc2\x90\xca\xc2\x08\xd4\x2d\x1a\x54\x7a\xbf\x9b\xb4\xf1\xf5\x35\xe2\x01\xd6\x7b\x55\x93\xee\xc1\x9e\x01\xac\x07\x30\x24\xb5\xdb\x2e\x8d\xb7\xe9\x21\x26\x20\x3b\xbd\xaa\x17\x88\xc6\x49\xfb\x48\x56\x7c\x65\x0d\x8b\x6d\x20\x7f\xe6\x92\x1c\xcf\x0e\x4f\x64\x25\xdb\xce\x29\x76\x62\x17\x50\xaf\xc3\xa5\x3d\xe5\x1d\x61\x5f\xe1\x2c\x73\x40\xbd\x6d\x74\x65\xba\x2e\x1b\x57\xe5\xc0\x3f\xe0\xef\x8b\x45\x57\x32\xf5\xe6\x97\x57\xbc\x09\x2e\x59\xb9\x3e\xf0\x0f\xfa\xfb\xe1\x2b\xc6\xde\xe0\xb1\x83\x1b\x49\x96\x43\x70\xd5\xe3\x41\xd7\x8d\x27\xd5\xbf\x18\x0e\x93\xa4\x8d\x21\x87\xb9\x1d\x67\x58\x0c\xb6\x79\xdf\x30\xa1\x61\x90\x9d\x20\xe5\xaf\xa1\xa3\x4b\x30\x6d\xca\x16\x50\xdf\x74\x90\xdd\xa6\xc6\xc3\xda\x05\xff\xe7\x40\xf0\xfb\xfb\xf5\x2e\x20\x06\x6a\xba\x81\xda\x8b\x7c\x4d\x46\x09\xf2\x99\x15\xf9\x3a\x6a\x01\x85\xb9\xfd\x96\x48\xa5\x86\x6a\x05\xb3\x13\xae\xae\x5e\x1a\x22\x8b\xac\x53\x1e\x04\x22\xe5\x22\xd5\x89\xb6\x22\x1e\x40\xfd\xd6\xc2\xeb\x88\x30\xca\x4c\xd7\x4b\x09\x8d\x3a\x53\x0c\x83\x6c\xf5\x3a\xc2\x60\xf2\xb6\xcb\xba\x85\xe7\x07\x54\xa6\x29\x3b\x56\xb5\xeb\x14\x38\xc3\xe2\x72\xcd\x10\xba\x9f\x76\x9d\x6d\xa0\x43\x94\x28\x5b\x48\x74\x13\xb5\xfb\x22\x5b\x16\x0c\x8f\x88\x0e\xfa\x86\xa6\x1a\xca\x50\xbd\xf7\xeb\xa0\xa7\x2b\x8d\x24\x03\x1c\x20\x99\xd5\xf9\x85\x70\x53\x3a\x0f\x92\x14\xa3\xde\xfb\x3d\x7a\x84\xad\xd7\x9b\x8b\x78\x15\xbb\x9a\xdc\x7b\xb8\x33\x90\xad\x2b\x80\x21\x36\x56\xeb\x84\x78\x7f\xdb\x7d\x38\xd1\x2d\xce\xeb\x35\xc8\x09\x6c\x6b\x41\xb1\x11\xae\x6f\x48\xd0\xbf\xe0\xe8\x9d\x82\xbc\xba\x22\x25\xb4\x38\xee\x07\x3d\x69\x90\xc6\x62\x82\xab\xa1\x84\x1f\x70\x6e\x51\xca\x1d\x79\xc6\x89\x8e\xac\xdf\x35\x61\x8c\xa5\x91\xce\x9a\x08\xc3\x48\xb8\xd9\xbd\x5e\x44\xb4\x80\x61\x69\x70\x01\x28\x36\x0b\xbb\x61\x24\xd4\x62\xc9\x36\x00\xfe\xa4\x3c\x05\x24\x12\xe9\x0a\x16\x59\x9c\x97\xe0\xad\xe8\x26\x9b\x6a\x05\x56\xa8\x14\x4f\x3b\x33\xa7\x97\xb4\xb3\x29\xd5\xd8\x46\xd1\x8c\x29\x5b\x3a\x35\x59\x9d\xdd\xb9\x1b\x20\xea\x67\x51\x28\xfd\xd5\x18\x7b\x7e\x26\xf3\xf3\x24\x4d\xe6\xb2\x73\x2f\x5b\xb8\xe4\xff\xc1\x58\xf0\xeb\x63\x8b\x1d\x88\xae\xdc\x4e\xe6\x68\x5b\x11\xfa\xdc\x8b\x17\x55\x21\x75\x9e\x0a\x4a\x93\x11\x68\xc4\x8f\xa4\xe8\xe0\x76\x9b\xb0\xc2\x6b\x27\xe1\x04\xba\x10\xc6\xfd\x6b\xd6\x7c\x51\x90\x3f\xa6\x77\xda\x49\x1c\xaa\x68\xed\x6e\x31\xed\x71\xb2\x40\xae\xaa\x4e\x47\x58\x47\x9c\xb4\xa5\xfb\x06\xc2\x2f\xf3\x7e\x9e\xa9\xe7\x58\xf9\xcb\xe1\x3e\x9d\xed\xa6\xfa\x54\x53\x5a\x9a\x00\x8f\xa2\x11\xb4\xa6\xcd\x2b\x57\xe5\x80\x9f\x7f\xe9\xd9\x57\x3c\x76\xe1\xf2\xfc\xdc\x85\xc7\x2e\xce\xcd\x9f\x5f\xbc\x74\xf6\x91\x95\x57\xac\xac\x9e\xbd\xf8\xc8\x3c\xae\x44\xa2\x2a\x5d\x91\xf9\x23\x14\xf5\x93\x3d\xb2\xda\xea\x85\xbd\x47\x2c\x6e\xa6\x3c\x81\x7e\x53\xce\x84\x92\x82\xed\x44\x66\x85\x48\xc1\xf1\x86\x26\x6f\x08\x20\xb6\xc3\x94\x08\x13\x1f\x3d\x22\x3c\xfa\xff\xd4\xd8\xa1\x76\x9c\x11\x53\xd2\xe7\x6b\xc1\x6f\xd5\x56\x64\x0e\x7c\x15\x3d\xa2\x75\xd4\x04\x50\x49\x29\x83\x21\x20\x4b\xe4\xb9\x30\xcd\xf2\xa0\xc9\x21\x26\x56\xb3\x50\xaa\x1d\x7b\xc4\x7e\xe1\xe5\x61\xbe\xa1\xd6\xd8\x25\x99\x1f\xa9\xbb\x3f\xa9\xbf\xa9\xe0\x23\x6a\x2f\x1e\xb9\x94\xc4\xf2\x48\x13\x1b\x51\x0c\x01\xc6\x83\x84\xb1\x7a\x4e\x5c\x37\x7a\x15\xc2\x52\xb6\x72\x26\xa9\xe9\xe8\x4d\xd2\x3f\x2c\x5c\x5a\x31\x52\x9f\xce\x0c\x55\x03\x01\xd8\xc3\xe9\x29\xd4\xe1\x86\x6f\x6f\x60\x5b\xd5\x66\x46\xfc\x6d\xf8\x20\x4f\x4c\x84\xa7\x35\x44\x16\x24\x77\x9e\x8c\xee\xb6\x73\x04\xff\xb6\xc7\x7c\xd4\x67\x4c\xa8\x57\x7c\x35\xf3\xff\x83\x17\x7c\xc0\x3b\x3b\xf4\xdc\x72\xc3\xea\xdc\xb3\x61\x96\x61\x62\x3b\xb1\x69\xa1\xc2\xf8\x55\x38\x02\x20\xf1\x49\x49\x8b\x37\xc3\x34\x89\xbb\x18\xa4\x94\x86\x70\xbd\xaf\x17\xc8\x68\x68\x3e\x8a\x73\x14\xda\x0b\x49\xeb\xaa\x4c\x79\xa4\x5a\xd1\xe4\x97\x09\xd2\x7e\xd6\x59\x09\xc3\xb1\xb8\x6f\xf1\xd8\x6d\x2e\xc5\xba\xff\x75\x5e\x90\x15\x9e\x2e\x97\xed\x5b\xab\xf4\xea\x57\x52\x8a\xa3\x50\xa0\x7d\xc1\x7d\x13\x28\x32\x2d\x8f\xd9\x2c\x7f\x20\x33\x24\x3b\xd6\x3b\x86\xeb\xc5\x1e\xf3\xbf\x19\x67\xff\x58\x67\x28\x85\x49\x7c\x7f\x2a\x5a\x72\x09\xb8\xcb\x75\x2a\xd5\x6f\x8f\xeb\x54\xaa\x8f\x8d\xeb\xbe\x56\x66\x51\x69\x5d\x04\x09\xf5\x9d\xcc\xa7\x22\xa5\x0d\xf3\x02\x00\x02\x44\xa9\xe9\x19\x2a\x43\x94\x47\xa7\x91\xd7\xf8\x83\x4e\x96\x14\x90\x8a\x82\x07\xd8\xce\x94\x5a\x35\x14\xb9\x88\xd3\x6e\x56\x43\x96\x27\x3d\x07\x59\x44\xb3\x77\x5f\x45\xf6\xbb\x75\xd5\x81\x89\x38\xe1\x09\xc0\xa4\xc1\x6d\x17\x56\xf1\x46\x3f\xe7\xed\x64\x2b\xb6\xa0\x5e\x0c\x9e\x43\x1c\x46\x75\x27\xff\x01\xba\x44\x34\xef\xae\xa7\xd7\xa1\xd4\x71\x5e\x0b\x47\xa7\xa0\x15\x68\x31\x3a\xd2\x2f\x2b\x73\xf9\xa8\xb1\xc5\x3c\x81\x38\xe7\x82\x5b\xb3\xa6\x3b\xa5\x13\xfa\x01\xef\xc6\xa4\x77\x15\x05\xaa\xaf\x3b\x49\xda\x0a\xd7\x20\xb7\x30\x32\x72\x40\xd8\x63\xd3\xe4\x2b\x90\x51\x68\xba\x4f\x5e\x53\x73\x64\xc9\x6b\x3d\xdc\x41\x2d\xb5\xb0\xfb\x3d\xac\x8f\x32\x52\x53\x5d\x9f\x2b\x19\x8f\x4d\xeb\xae\xba\x81\x3a\x6f\x1d\x63\xcf\x07\x25\x38\xcc\x07\x00\x0c\x0b\xe1\x7c\x7f\x5b\x0b\xfe\xb4\xb6\xd8\xb1\x95\x7c\x37\xe6\x12\xb7\xad\xfe\xb0\xc9\x03\xd2\xef\xd5\xd1\xd6\x68\xa5\x21\xc0\xd5\x04\x30\x20\xfa\x27\x72\xee\xdb\xbf\x2a\xe5\x6f\x8b\xc4\x97\x88\xd4\xd1\xb4\x95\xa4\x6d\xed\xbe\xd7\x35\x3a\x0a\x1f\x55\x19\x6a\xfa\x68\x48\x1e\x4d\xd2\x2e\x78\x95\xcb\xda\x61\xd1\xbe\xb9\x78\x40\xa9\xf2\x70\x2a\xea\x95\x6d\xd1\xd2\x5b\x97\xd6\x25\x7b\x3c\xb4\x89\x83\x6a\x13\x39\x94\xd0\x1c\x7d\x0d\x32\xb5\x9a\x55\xa9\x97\x6c\x92\xd2\x56\x01\x95\xc0\xd0\xbd\xd2\xcf\x8e\x58\xf8\x9f\x63\xec\x79\x69\x3f\x56\x13\x5b\xcc\xca\xa7\xc6\x82\x4f\x8e\x2d\x97\x9e\x22\x09\x15\x06\x5c\x70\xfb\x47\xdd\x70\x5a\xbd\x6a\x66\xc8\xc8\x84\x66\x10\x0d\xf2\x59\x4a\x90\xb0\x59\x96\xe0\x7c\xa5\x1c\x33\xa7\x68\xa3\x5f\x13\xc6\xa5\x09\xf7\x6c\x23\x37\x61\x31\x18\xe4\xf9\xcf\xc9\x0c\xa1\xa9\xd3\x24\x8c\x06\x50\xe3\xe2\xbb\x41\x24\xd7\x45\x6b\x10\xb8\xf5\xd8\xfb\x5a\xb7\x17\x53\x61\xc3\x2e\x9e\x6f\xc4\x85\x88\xbb\x48\x93\xed\x16\xca\xab\xa6\xa2\x2a\x25\x4f\xd1\xd0\xd2\x95\x2c\xdd\xc9\x28\x27\xe3\x0d\x11\xb7\xf0\x1e\x33\x75\x55\xf6\x32\xb0\xc4\xa9\x11\x9d\xa2\x92\x1a\x48\x2a\xd9\x6d\x17\xe9\xba\x7c\x4d\xe6\xc2\xa4\xe2\x0a\xb8\x00\x59\x94\xee\x9b\x33\xcd\x99\xe3\xce\x9c\xaf\xb3\x83\xa2\x03\x0d\x1f\xf8\x0f\xb3\xc9\x1d\x75\xf0\x39\x7a\x39\x68\xba\x3b\xb5\xd8\x9f\xd6\xfd\x49\xdd\x4e\xf3\x54\x84\x71\x9e\xb1\x9f\xf3\xd8\x61\x4b\x97\xf0\xff\xb5\x17\xfc\xa0\x77\xde\x82\x4d\xc5\xbc\x32\x38\x0b\x28\x57\xa8\x58\x0e\x0f\x90\x75\x45\x15\x70\x24\xd3\x5f\x70\x43\x3a\x5d\xc8\x6e\xd4\x62\x0a\x9c\x2e\x68\x56\x9a\x67\x2e\xca\x19\x2c\xba\x61\xca\xb5\x1d\x74\xc4\x65\x76\x40\x35\x60\x69\x71\xc1\xbf\x3f\x38\x5d\x6a\x53\x2f\x6c\xdb\xed\x19\x52\x10\x46\x94\xf9\xcb\x35\x76\x50\xef\x5d\xff\xe7\x6b\x3a\x22\xf2\xc7\x6a\xab\x20\xc4\x69\x53\x13\xe0\xcb\x83\x22\x0d\x93\x7e\xa6\xb3\x8d\x5b\x49\xb7\x97\xc4\x70\xcf\xed\xbb\xa9\xd5\x3a\xee\x28\xb7\x0b\xd1\x7c\xa1\x89\x4e\x46\xd4\x22\x87\xcf\x69\x9e\x3f\x3e\x5f\xe4\xba\x17\xe4\xf4\x10\x53\xdc\x4b\xe5\xa6\xae\x2a\x25\x1b\x75\x71\x1d\xd0\x35\x13\x1e\x85\x29\xcf\xca\x9d\xef\x91\x6b\xce\x45\x34\x50\xc5\x2c\x95\x8f\x02\x22\xa6\x53\x02\x15\x8f\x47\xf2\x11\xe4\xee\x53\x23\x69\x9d\xc3\xe5\xd5\xec\x70\x9e\x44\x04\x07\x93\xf9\x6b\xc1\x1d\xa3\x56\xaa\xf5\x9a\x63\x2b\x9b\x62\x0d\xf6\x4f\x77\xc1\x4c\xad\xbf\x66\x1f\xaa\xe1\xda\x9e\x53\xca\x9a\xcc\xfc\xf7\xd6\x82\xb7\xc3\xda\xa6\x07\x24\x3e\x0c\x0f\x92\x8e\x83\x41\x06\x39\x75\x5e\x2d\x2e\x95\x56\xa8\xab\xb0\xba\x17\x4f\x24\xe6\x51\x12\xdd\x5a\xba\x5a\x04\x80\xe1\x10\xb2\xec\x60\x0f\x29\x35\xca\xda\x76\x10\x00\xdb\xbc\xf1\xab\xb7\xe9\xcc\x53\xde\x8b\x76\xb8\x7a\xef\xf3\xc7\xc2\xde\x8d\x5c\xbc\x69\x5b\x2d\x2e\xcd\x57\x6c\xab\xb0\xd7\x7a\x3a\xdb\xea\xbf\x8e\xb3\xe7\x85\x5d\xb1\x2e\x97\xfa\x51\x84\xcc\x4e\x99\xff\xf3\xe3\xc1\xbb\xc7\x16\x4b\x4f\x47\xcd\x8d\xc9\x98\xc6\xec\x3c\xfd\xb2\x15\xa2\x6b\x9a\xa5\x69\xe6\x00\x80\xa7\x1f\x45\x18\x17\x69\x36\x1c\xb4\x03\x73\x9b\xd1\x2b\x1d\x66\x9c\x4c\xf8\x20\xb7\xdc\xb5\x09\x5a\x3c\x56\xa6\xd7\x02\xc5\xf2\xe7\xa8\xef\x6e\x86\xed\xbe\x88\xa0\x22\xb5\x4b\xbb\xbd\x08\xce\x08\x5c\xc7\xfa\x96\xda\xa5\x36\x39\x58\xcc\x75\x93\x87\x42\xa8\xd1\x6d\xb8\xd3\xd4\x71\xcd\xe0\x05\x87\x2e\x94\x40\x30\xac\xdb\xa1\x54\x26\x03\x21\x78\x83\x46\x20\xcb\xe0\x83\xe3\x60\x83\xe9\xc0\x13\xd5\x11\xaa\xa9\x91\xc4\x0d\xd1\xe8\x25\xed\x1b\xe7\x89\xbf\x90\xb4\x44\x84\xde\x22\xc3\x4a\xba\xfb\x35\xb8\x7b\x83\x12\xfb\xde\x1a\xbb\x45\x1d\xc2\x06\x98\xea\x8d\xb5\xe0\x0b\xde\x25\xeb\x89\x66\xc7\xa3\xbf\x88\x09\x92\x0e\x1b\xb0\x23\x59\xa6\x04\x14\xd5\x39\x70\x4a\xa2\xba\xc4\x57\x2a\xbe\xc4\x30\x75\x7c\xe3\x48\xa6\x71\x9d\x4a\xe5\xac\x49\x7d\xf8\xca\x36\xd7\x6a\x08\x96\x79\xe3\xb3\x56\x44\x3e\x4d\x89\x4c\xdd\x11\x00\xf1\x08\xd4\x8f\xed\x58\xac\xbf\xbe\x06\x30\xf3\x40\xc0\x98\xc4\x64\x4e\xf9\x13\x2f\xf8\x5d\x6f\xa9\xf4\x54\x5f\x8c\x96\x0a\xf3\x8a\xfe\x10\x9d\x2a\xa4\x60\x45\xc9\x96\xb6\x99\x83\x56\x7d\x19\x21\x6e\x2f\x21\xb4\x14\x95\x7a\x41\xbd\xb4\x64\xde\xb1\x2f\x21\x55\x6f\x28\xd9\x09\x0a\x61\xd9\x13\xa3\xd4\x27\x74\xc4\xd4\x01\xc8\xc4\xc4\x91\x5c\x4a\xe2\x25\xd3\x3a\x53\x0c\x29\x5a\x8d\xf5\x72\x08\xf9\x9f\xd7\xd8\x3f\xc8\x36\x44\x2a\x97\xf0\x5e\x64\xd8\xbb\xfc\xdf\xaa\x05\x9f\xa8\xad\x6c\x80\x25\x57\x43\x25\xd0\xe5\xc9\x92\x28\x6b\x32\xdf\x92\x12\x79\x46\x0b\x94\x5f\xc7\xce\x5b\x9c\xe3\xb9\xce\x54\x90\x8e\x9d\xd7\xc4\x03\x13\x04\xe7\x66\x28\xb7\x10\x0a\x0c\x2f\x8e\xc5\x15\x11\x11\x76\xe1\xa6\xe2\xd6\x62\x64\x9d\x71\xa9\xc1\xd5\x27\x4c\xe1\x9e\x83\x8d\x0e\x63\x34\x7f\x17\xf6\x75\x5b\xfd\xc6\xb5\x23\xdb\x7c\x69\x71\x81\xcf\x34\xf9\x79\x54\xa0\xa0\xac\x95\xaa\x01\x32\x1e\xbb\x24\xdf\x20\x67\xce\xee\xc5\xfe\xdf\x8e\xb1\x7f\x94\x27\xbd\x24\x4a\xd6\x07\x2b\xbd\x54\x8a\xf6\x7c\xa1\x82\xfa\x9f\x19\x0b\x7e\xc5\x5b\x1d\xf5\x73\x29\xc4\x9c\xb8\x40\x0c\x9a\x56\xd2\x5f\xdf\x40\x76\x40\xf8\x50\xe7\xac\xe9\xda\x78\x3b\xe9\x8a\x30\xce\x9a\x9c\x18\xc0\xf5\x38\xe8\x1d\x89\xa5\xc0\xcc\x6d\x89\x01\xed\x6d\xb1\x06\x10\x0b\xb4\xc6\x2c\x75\xb9\xc9\xe7\xa2\x88\x8f\xec\x0a\x88\xe4\xb9\x4b\x0b\x25\xb4\x8c\x17\xb3\xbb\xd9\xe9\x5d\x28\x2e\xd5\xa5\x3e\xe5\x9d\xd9\x36\xe5\xe8\x0e\x3f\x68\x98\x26\xbd\x54\x0e\x58\x03\xcc\x0d\x0f\xc4\x99\xc8\xc3\xac\x03\xf6\xb3\xdd\x64\x23\x3d\xe5\x35\x77\x10\xb6\xb7\xf9\xb7\xd8\xf5\xdc\x80\x2e\xf1\x36\x8f\x7d\x05\x60\xd7\x26\xfd\x38\x77\x2d\x66\xab\xc9\x55\x19\xfb\xfd\x60\x7d\x6e\x9b\xdf\x2b\xec\x8b\x05\xcd\xb2\x76\x14\xe5\xf0\x66\x71\x85\x75\xc1\x72\xa1\x68\x98\x18\x6b\x61\xfe\x82\x07\xe6\x65\xa2\x2c\xff\x29\x8f\x35\x76\x03\xdd\x6c\x0c\xbc\xc1\x1b\x3c\xd7\x47\x5a\x32\x09\x1b\xc7\x3f\xb7\x4c\xe5\x85\x83\x1c\xee\xfd\x25\x1b\x31\x69\x95\x84\x9c\x28\xdb\x50\xa4\x1b\xf1\xba\x06\x96\xba\x24\xb6\x2c\xc6\xec\x93\x1e\x3b\x94\xf5\xd7\x70\xb5\xfb\x3f\xe7\x05\xff\xc6\x1b\x56\xaf\x11\xc7\xea\xd5\x7d\x11\x61\xfd\x4b\x49\x7b\xd8\x63\x12\xdc\xa3\x1f\xdd\xdb\xbc\xc7\x14\x79\x6f\xf3\x1e\x30\x27\x6a\x71\x70\x6f\x33\xdb\x6c\x35\xef\xd1\x44\xbf\xf4\x52\xb0\x9d\x29\xc4\x88\x1f\x04\xce\xa4\x6f\xa0\x6e\x91\x13\x27\x6b\x21\xa9\x7f\xd7\x63\xb7\x3a\xbe\x50\xff\x97\xbd\xe0\x23\xde\x32\x3e\xb2\x8d\xff\xae\x6b\xd3\xa6\x1b\x87\x91\xa7\x73\x69\x2e\xda\x12\x83\xac\xce\x2f\xc7\x84\xab\x58\xc7\xa3\xca\xb9\x5c\xe2\x4b\x7b\xf5\xa8\xa9\x03\xd9\x20\x41\x4f\xbd\x90\x7a\xd1\xc0\x26\xdb\x7d\x7c\xa7\xc7\x6e\xd5\x72\x28\x05\xab\xce\x9b\xbd\xe0\xc9\x8a\x89\xeb\x59\x66\xcd\x76\x98\xc1\x36\xa3\xf8\x03\xb3\x98\x4c\x41\x3b\xce\xc1\x50\x21\xda\x12\x52\x14\x61\xb7\xf2\xcf\x6a\xec\xb6\x54\xbb\x7f\xef\x57\x7b\xd0\xff\x4c\x2d\x78\x7b\xc9\x24\x88\x49\xc2\xda\x49\xbc\x8e\x5b\x95\x6a\x93\xea\x9e\x28\xb4\xf9\x40\xb5\xc3\xbc\xd9\xe4\x73\xda\x67\x9e\x02\x16\x22\x58\x4a\x55\x61\x61\x9e\x95\x5d\xf6\xf8\xc6\xdc\xa5\x05\x3d\xe7\x94\x15\x6a\x8d\x02\xcd\x7c\xb9\x25\xb0\xe4\x28\x1a\xcf\x20\xf6\x06\xab\x69\x5f\x06\x4f\xcf\xdc\x83\xd7\xb6\xa9\xe9\xe9\xe9\x53\xa0\x80\x41\xd3\x5e\x74\xf4\xcc\x8b\x8e\x9e\x69\x76\xdb\xbb\x4f\x9e\xb0\xa4\xca\xb2\x3d\xc6\xec\x57\x3c\xe6\x0f\xbb\x0d\xfc\x0f\x7a\xc1\x7b\xbd\x61\x97\x43\x55\x96\x75\xc9\x85\x41\x97\xa1\x21\x93\xde\xee\xd6\x7a\x2e\xb2\xab\x85\xfe\x29\xa1\xcf\x66\x76\xac\xe7\xd4\xe4\x06\xc9\xe4\x29\x7b\x21\xfd\x17\x8f\x1d\x54\x8a\x2a\x74\xe4\xe3\x5e\xf0\x41\x50\xcf\x75\xf3\x85\xb6\x32\xc1\x61\xae\x4f\x67\xdd\x4e\x9e\xc4\x18\x3a\xa2\xd3\x9f\x51\x7f\x5e\xec\x18\x8a\xdb\xb8\x61\x59\x10\xcd\x4a\xd6\x84\xb8\xfa\x41\x56\x2a\xd1\xe8\xe2\x75\xa5\x12\xf5\xbb\x26\xef\x0e\xb0\xcf\x6d\x52\xd8\xd4\x8a\x5f\x70\xf6\xc7\x9f\xee\x67\x07\x93\x4d\x99\x6e\x48\xd1\xf6\x7f\x6f\x7f\xf0\x96\xfd\x97\xe9\xaf\x72\xe2\x82\x29\x4b\xbf\x3e\x04\x4e\xa8\xbd\x0c\xe8\x42\x05\xe9\x46\x7e\x45\xdb\x14\xea\x68\xc7\x46\x9d\xec\xab\xb3\x59\x87\x59\x2b\x89\x6a\xac\x3d\x60\xde\x24\x65\xc6\x31\xa9\x56\x19\x84\x0c\x5c\xf4\x8e\x6f\x3a\xa6\x28\xd3\x25\xb8\x17\x59\xf8\x81\x61\x8c\x48\xe6\x98\x59\xa7\xc3\x23\xd1\x94\xb4\x73\x15\x04\xab\x08\xc6\xeb\x8a\x62\x48\x5f\x43\xdf\xe7\x86\x3d\xb0\x11\x0a\x0b\x50\x53\x17\x3b\x6e\x4d\x18\xa0\x63\xe2\x02\xe2\x76\xe1\x89\x2d\xd2\xf7\xd5\xc1\x5e\xe7\x66\x2a\x4b\x41\x4c\xc6\xfa\x65\xdc\x06\x86\xa7\x39\x45\xec\x2f\x08\x4f\xb1\xab\xad\x5b\x70\x8b\x61\xae\x7b\xa6\xce\x40\x32\x82\x83\x1e\x5f\xd0\x20\x3f\x2e\xd3\x64\x0f\xc6\xe8\xa3\xd3\x33\xa7\xa7\x8f\x1e\x3d\x09\x5b\x55\x8f\x8b\x31\x4a\x57\xc7\xb8\x55\xd9\xa5\x4f\xd6\x6f\x34\xf6\x6d\x29\x69\x9b\x61\xd3\x31\x6f\xef\xf1\xee\x63\x37\x82\xf2\x32\x0c\x6a\x61\xdf\x66\xff\xcc\x63\xb7\x67\xb2\xd5\x07\xeb\x64\x12\xe7\xf2\x5a\xee\xff\x37\x6f\x17\x81\x74\x4b\xe0\x4a\xb5\xbf\x0b\x7e\xd8\x2b\x3d\xa1\x54\x52\x38\xc5\x61\x58\x74\x4d\x5c\xe4\xc4\x4e\x44\xc9\xa2\x49\xb7\x6b\x47\x9f\x69\xbb\xeb\x48\x3f\x34\x08\x28\xc0\x07\xc7\x60\x24\x27\x48\x14\xa1\xfc\xf0\x20\xa6\x20\x85\xa4\x83\x57\x38\x34\xe1\xb2\x1f\xab\xb1\x03\x94\x5e\xec\xff\x40\x2d\x78\x83\xa7\xc3\xe1\xe8\x21\x4e\x04\x01\xc1\x93\xba\x0b\xde\xac\x9d\xc2\xe3\x9e\x66\xd6\x33\x55\x6b\x1f\x76\xff\x94\x4d\xb2\x23\x3b\xce\x02\xa6\x9f\xdf\x48\xb4\xd0\xd1\xed\x2f\x1a\x2f\xf0\x9f\x0f\x5f\xd5\x53\xa9\x7a\xfa\x52\x39\xc8\x18\xfb\x6b\x6f\x14\x20\xe9\x67\x3d\xed\x45\xff\x25\x6f\x57\x5e\x74\x0a\x9d\x25\x7c\xd1\xa4\x70\xa2\x39\x68\xa5\x05\xe6\xeb\x30\x02\x29\xec\x76\xa1\x21\x40\xf3\x14\x9c\xdc\x5d\x91\x5e\x85\x83\xa6\x08\x0d\x04\xd7\xaf\x43\xcf\x50\x04\x9b\xb9\x8e\x78\x31\x0c\x58\xea\xd8\xdb\x3f\x5c\x63\x56\xb0\xb1\xff\xe3\xb5\xe0\xed\x66\xc1\xec\xb8\x22\xe6\x6f\x34\x2c\xcd\x45\x47\x13\x39\x8f\xa4\x50\x75\xc5\x96\x81\x04\x6f\xd9\x4b\x49\x65\x14\xdb\xdf\xb7\xb0\xb3\x9f\x3e\xe8\xc0\x2b\x38\xc4\xb9\xc3\x0c\xca\x6f\x7a\x06\x19\x94\x7f\x7d\xff\xcd\xe4\x86\xbf\xdf\x34\xbc\x37\xb3\x77\xf6\x9a\xbd\x93\xe9\x44\xf4\x57\xdd\x38\xc3\xf5\xf6\x21\xf3\xa3\x37\xea\x53\xde\xfa\xce\x99\x21\x0b\xfe\x99\x3d\x93\x27\x33\xf6\xc7\x87\x59\x30\x2a\x43\x1e\xad\x19\x00\xa8\xf1\xf1\xc3\xc1\x0f\x7a\xc5\xdf\xa4\x78\xd8\x81\x73\x65\x08\x10\x6d\xf7\x48\xfb\x80\x6b\xd2\xcf\x8b\x30\x4b\x12\xb3\x15\x71\x77\x5b\x1b\x89\x41\xb4\xa0\x30\x65\x58\x37\xc6\x95\x61\x79\xcc\x86\x5f\x53\x42\x6a\x53\xa6\x6b\xae\x90\x7a\x13\x53\x4a\xd8\x21\xd1\x0b\x81\x2d\x32\xf3\xff\xc0\x0b\xfe\xa3\xa7\xc9\x23\xb3\xaa\xfb\xa8\xa1\x85\x46\x35\xc5\xe6\x86\x36\x6b\x04\x23\x4c\x0c\x6b\xea\xdc\xd2\x22\x4e\xc6\x30\x9c\x71\x3c\xa0\xe0\x74\x2b\x38\x01\xc0\xb2\xe9\xd4\xa1\x4a\x65\xdc\xef\x92\x21\xad\x80\xba\x84\xf4\x97\x41\x51\x7a\x71\x87\x8a\xa2\x64\xab\x74\x18\xb9\x3e\x15\xf6\xe1\x31\x76\x7b\x9c\xc4\x1a\x6a\xf3\x81\xe5\x0b\x99\xff\x23\x63\xc1\x1b\xc7\x2e\xb9\x0f\xb5\xb7\x09\x56\x73\x4f\xa4\x79\x28\x22\xde\x4f\x23\xd2\xd2\x04\x52\x85\xd0\xfe\x46\x8b\x18\x12\xbb\x28\xd1\xca\xef\xa4\x98\x72\x6c\x0f\xce\x34\xa8\xdf\x14\xc4\xdc\xe9\x47\x51\x1d\x39\xc5\x78\x96\xcb\x9e\x09\x4b\x13\xf9\x06\x5f\x09\xe3\x16\x06\xea\x99\xc3\x00\x5a\xa4\x4a\x54\x2b\xc5\x4c\x77\xbb\x5e\xa2\x0d\xc3\x2a\x7a\x90\x27\xa9\xa9\x4b\x1c\x8c\x14\xe3\x7d\xa5\x10\x01\x61\xff\x7c\x06\x63\xf2\x9b\x7c\x19\x2e\xda\x4a\x11\x25\x06\x37\x55\xe4\x40\x83\x12\x17\xf3\x30\xa1\x53\x8a\x82\x5e\xd2\xce\x02\xb5\x20\x03\x72\x39\x06\x93\x9c\xbc\xe4\x76\x1f\xa0\x7f\xf6\x77\x4a\xe0\x04\x93\x75\x0e\x03\xa4\xdd\x13\xdb\x4d\xdf\x8f\xa0\x59\x11\x4a\x04\xdf\x86\xff\x9d\x5e\xf0\xf8\xb2\xfd\xa4\xec\x79\xde\x52\xe2\xcd\x08\x26\x18\xbc\x02\x8a\xbe\xbc\x59\x38\x9f\xd3\x81\x47\x6a\xee\x2d\xf2\x1f\xb9\x29\xd3\x41\x0e\x11\xac\x70\xed\xda\x71\xa5\xfd\x2f\x8f\x1d\x2a\xc0\x62\x7f\xdf\x0b\x3e\xee\x2d\x17\x4b\xd8\xc6\xed\x28\x81\xb8\x0e\xb7\xe9\xc8\x9d\x47\x6c\x5b\x04\xda\xea\xac\xdd\x00\x8a\x6a\xc1\x7f\xa5\x37\x75\x93\x1f\xb9\x73\xaa\x93\x24\x47\xca\x86\x0c\x2b\x67\x8a\x1f\x81\x17\xb4\x21\x76\x57\xc5\x6e\xd3\xe9\xf7\x7b\x0c\xa5\x8d\xff\x1e\x2f\xf8\x16\xef\x41\xf5\x4f\xb7\xb3\xf8\x08\x37\x91\x59\x56\x17\x2e\xa0\x06\x44\xb5\x2b\x8d\x09\xef\x68\x73\xfa\xca\xb6\x2c\x55\x0d\x94\xca\xa6\x55\xd2\x82\x94\x57\x8d\x59\x93\x43\xe1\x73\xd0\x0d\x67\xb0\x40\xc9\xd8\xae\xd9\xbf\x3c\xc6\xfe\x89\x25\xf0\x35\x68\xb0\xd2\x52\x57\x16\xc1\x09\xdd\x93\x2d\xff\x47\xc7\x82\x47\xac\xbf\x2b\xe4\x7d\x41\x5b\x9a\x95\xf3\x1f\x55\x3b\xe6\x57\x16\x79\x3b\x0d\x89\xe1\x3a\xcb\x45\x44\x2e\x65\xf4\x40\x5f\xf7\x0e\xd0\xaf\x2e\xbc\x5b\x8d\xfd\x44\x8d\xe9\x9f\xfc\x77\xd6\x82\x6f\xf6\x4c\x29\xf6\xd0\xda\x2d\xb1\x6a\x5c\xa0\x77\x4d\x66\x9d\xe5\x14\x5f\xc4\xd7\x8a\x46\x15\xa7\xb8\x12\x37\xfd\xd8\x34\x93\x64\x0d\x5e\x4f\x5b\x49\x57\xd2\x75\x78\xf7\xb6\xd2\xe1\x51\xc5\x96\x3d\x2b\xfa\xff\x17\x3c\x76\xf9\x69\xc3\x31\xbf\x64\xe5\xf2\x25\x04\x0e\x5c\x4a\x93\x5e\x76\x39\x9d\x53\xfd\xf3\xbf\xc7\x0b\xbe\xd3\xab\xfe\xcd\x59\x71\xa4\xe0\x9b\xeb\x3c\x49\x51\xb8\xf4\x95\x3e\xe7\x98\x16\x06\xe3\xa7\xe6\xac\xf4\x73\x93\x5f\x14\x21\x98\x6d\x24\x86\x1b\xf3\x4c\xa6\x56\xe6\x5c\xaf\x9f\xf6\x12\xa5\x60\xb1\x5f\x18\x67\x47\xab\xe2\x0a\x1f\xef\xa7\xf2\x5c\x18\xc9\x21\xf0\x38\xd8\x68\xfe\xb7\x8d\x07\x2f\x37\xef\x38\x9d\x88\x39\x3c\xe7\xf0\x83\x06\x6f\x06\xab\x84\xbe\x40\x6f\x24\x19\x1a\xbd\xd6\xd4\x05\xa7\xab\xad\xd4\xfa\x12\x7a\xdd\x63\x78\x18\x20\x9e\xea\x21\xf0\xdb\xab\x7f\xbb\x89\xb3\x63\xec\x1a\x3b\x98\x4a\xd1\xbe\x1c\x47\x03\x3f\x0a\x5e\x39\x04\xb8\xcf\x27\xd4\xcf\x53\x5b\x69\x98\xcb\x49\x84\x9c\xb8\x6c\xc6\x04\x8e\xfe\x4e\x92\x92\xf2\x63\x7e\xd4\x31\x74\x61\xcc\xb1\xc3\x17\x55\xfb\x32\xd7\x7d\xf8\x4a\x66\x35\xd1\x5f\x0a\xe6\x6d\x9d\x07\x7f\x29\x69\x3b\x38\x26\x2b\x04\x53\xae\x6d\xf3\x60\x03\x57\x23\xf1\x52\xe9\x38\x88\xde\x88\x76\x2e\xaa\x00\x03\x15\xae\x05\x57\x75\x2d\xa8\xb3\x19\x68\xb2\xa7\x55\x9d\xb1\x39\x69\x64\x54\x0b\x56\x71\x29\x69\xdb\xad\xb9\x83\x15\x73\xe0\xff\x3f\x01\xc3\x68\x09\x33\x23\xda\xa9\x75\x68\x54\xa2\x34\xdc\x94\xbf\xfe\x50\xf0\x65\xf4\x6f\x57\xf8\x00\xf3\x65\xe5\x85\xf8\xef\x0e\xdc\xbc\x70\xed\xf5\xc2\xf5\xe3\x9e\xbe\x71\xbd\xd3\x0b\x22\x6d\x07\x82\xa8\xc3\x67\xb9\x3d\xb6\x7c\x9f\x60\x5f\xcd\xee\xd8\x8d\x91\xf6\xa6\x05\x64\x0f\x16\x90\x5f\xb6\x51\x9b\x7e\xfe\x46\xa0\xa4\x2b\x6c\x20\xaf\xf3\x46\x18\x41\x9e\xc3\xce\x3d\xe5\x5d\xde\xf9\xe6\x5e\xf7\xef\x34\x37\xf7\x20\x30\x57\x75\x92\x35\xe5\x1b\xfa\xeb\x0e\xb1\x17\x5a\x4b\x0f\x63\xa8\xcd\x1d\xfd\xac\xfa\x13\xc4\xd5\x6f\x1d\x0c\x5e\x64\xfe\x72\x05\x16\x3c\xde\x9e\x8e\xe0\xa7\x6e\x4a\xae\x3d\x4b\xae\xab\x5a\x70\xad\x05\x41\x38\x8c\x59\xa8\x87\x8a\xa6\xc1\x96\x36\x33\x6c\x6a\x9b\x78\x9e\xaa\x29\xbf\x29\x76\xf6\x20\x76\x3e\x61\x8b\x9d\x9f\xd9\xa3\xd8\xf9\xe7\x5f\x24\xa9\xa3\x2b\x7a\xca\x13\x3b\x8b\x9c\x7b\xfd\x7b\x1a\xdb\x70\x1f\xd0\x0a\x2b\xd9\x0e\x8d\x34\x61\xec\x33\x87\xd8\xd7\xec\x00\x41\x7e\x01\x39\x82\x75\x64\xeb\x05\xd5\x91\x79\x3b\x1a\xcc\x7f\xfb\xa1\xe0\xf3\xde\x8e\xaf\x95\x88\xd2\x01\x94\x02\xe8\x0c\x8b\x95\x05\x37\xea\x14\x2e\xf7\xb0\x76\xf3\x84\xa8\xfb\x81\x3a\x5d\xb4\xdb\x29\x46\xa5\xe6\x5b\x09\x0f\xb3\xac\x2f\xb3\x59\xc6\xef\xe4\xe7\x93\x2d\x0a\xce\xa1\x82\x8a\x14\x21\x1d\x8e\x8b\x6e\x54\xa2\x3b\xbe\x4f\x7d\xf4\x72\x0b\xea\x7b\x4d\xf2\x76\x12\x4b\x0a\x80\x70\x9a\x23\xaf\xb5\xa4\x6c\xd3\x95\xb2\x1b\xe6\xf7\x39\xd2\xf5\x07\x0f\xb0\xff\xb3\x8f\xfd\x43\x91\x65\xfd\x54\xb6\xe7\x13\x62\x3d\x68\x0d\x40\x31\xcd\xfc\xdf\xdc\xa7\xd3\x76\x3e\xba\xef\xca\xa8\xb7\xae\xf0\x89\xb9\xf9\x95\xc9\x22\x20\x20\xa3\xb4\x4a\xd9\xea\xc3\xc0\x41\xc5\x76\xbe\x19\x3e\x31\x5e\x40\x8b\x65\x8f\x9a\xae\xc5\x76\xa9\xff\xd0\x23\xf2\x24\xca\x6b\xb2\xdf\xc2\x0c\xc3\xdc\x84\x78\x20\x54\x1e\x9f\x9b\x5f\xa9\x72\xf2\x61\x45\x04\x50\x0e\xa2\xe4\x08\x18\x32\x74\x77\xa8\x59\x13\x2b\xf3\x17\x26\x81\x99\x2b\xdc\x0c\x81\x66\x13\xf0\x3f\x28\x8e\x55\xbf\xdc\x30\x21\x16\xed\x52\x33\xe1\x0e\xdf\x4b\x93\x5e\x92\xa2\x05\x1a\x0e\x8d\x30\xe5\x34\x80\x4e\x95\x70\x1d\xd0\xe1\x28\xbd\x34\x69\xf7\x5b\x34\x7e\x55\x6f\xa3\xf4\x9c\x98\x9b\x7f\x70\x92\x37\x1a\x8d\x51\xc3\x57\x1a\x27\x35\x0d\x7a\x9c\x20\x90\x45\x7d\x6a\xc0\x3d\xc0\x12\xe8\xf6\x60\x96\x31\x6e\xfd\x6f\x6e\xfe\xc1\x89\x68\x92\xbf\x98\xb7\x64\x18\x4d\xf0\x95\xf9\x0b\xfc\x4e\x35\xc6\xea\xe1\x14\x9f\xe0\x59\xbf\xfb\x70\x79\x08\xae\x3e\x0a\x6f\x5c\x9d\xe4\x93\x7c\x92\xb1\xb5\x70\x7d\x5d\xa6\xd4\x56\x98\x5f\x98\x23\x29\x62\xe4\x64\x50\x22\x1e\xf0\xde\x4c\x6f\xf3\xa2\x3b\x13\x64\x29\x94\xd7\x7a\x32\xc6\xfc\x0e\x30\x05\x52\x60\xf7\xd2\x85\x49\x27\x9c\x67\x43\x60\xaa\xb9\x15\x08\xa0\x3e\x39\x36\xed\x7a\x77\xdf\xed\xb1\x5b\x61\xc2\x97\x89\x68\xce\x7f\x8b\xc7\x66\x6f\x88\xdd\xe0\x82\xfd\x79\xf0\xc0\x15\xa7\xb8\x2b\x4e\xc4\xad\xd0\x14\xc1\x15\x7b\xb4\x25\x62\x1d\xee\x63\x20\x75\xd2\x70\x7d\x23\xe7\x71\xb2\xc5\xde\x7f\x1b\xbb\x63\x27\xef\x0c\xd8\xc6\xde\x78\x5b\x70\xb7\xf5\x77\x89\xae\x8c\x7e\x81\x7f\x83\xe1\x7c\x2b\xcc\x36\xd0\x8b\x01\x56\x29\x17\xd0\xef\x67\x6f\x65\xff\xad\xc6\x6e\xa3\x41\x3c\x23\x5a\x57\x65\xdc\xf6\x7f\xa9\xb6\x4d\xbe\x4a\x65\xc3\xe8\xcb\xe0\x3b\x6a\x0b\x4e\x51\xfa\xae\xbc\x46\x7f\xda\xc4\x05\x95\x62\xb5\x9d\xc4\x47\x4c\xb2\x48\x3c\x20\x6b\x23\x84\x25\x45\xb2\x30\xcc\x5b\xfe\x8d\x52\x85\x15\x39\x91\x8b\x9d\xf2\x4b\x21\x3a\x82\x4c\x82\x25\x34\x85\x88\xdb\xca\xed\x81\x37\x8b\x06\x91\x0d\x01\x30\xcf\x8b\x6c\xcc\x9e\x56\x19\xf5\xf0\x5b\x51\x62\xec\xfd\xfb\xd8\xf3\x42\xcb\x0f\x06\x56\x81\xef\xdf\x17\xbc\x79\xdf\x62\xe9\x69\x95\x33\xc8\x7e\x87\xeb\x28\x62\x1b\x87\x4b\xda\x51\x10\xce\xdb\x5a\x65\x46\x89\x5c\x8e\x15\x33\x89\x57\x8e\x63\x89\xb6\x58\x2a\x7b\x91\x68\x99\xfc\x5f\x03\x6b\x7e\xc5\x0d\x77\xa1\x6e\x35\x21\x8f\xf7\x0a\x07\x75\x0b\xb1\x8e\x21\x6d\x4b\x4d\xfb\x96\x48\xdb\x19\x64\x7a\x8a\x3c\xd4\x94\x85\x94\xe5\x0f\x6c\xe0\xfa\x13\x93\xf2\x1a\xe6\x66\x16\x51\xdc\xab\xda\x65\x5b\xc6\x14\x0f\x38\x94\xad\x69\xf5\x4c\x49\x43\xa9\xa4\x7b\xc1\x3a\x89\x79\xe3\x16\xff\xaf\x55\x25\xb8\x93\xda\x61\x07\x7c\x35\xb9\xd1\xde\x17\x4b\x39\x69\xfa\x9c\x9a\x5b\x5a\xd4\xab\x37\x5c\x8f\x95\x40\xa3\xf1\xa6\xbc\xfa\xa4\x9f\x23\x43\x42\x96\xd9\x0b\x70\x2e\x76\xe7\xc5\xca\x03\x07\xd9\xdd\x15\xe9\x55\x8c\x64\xa3\x8d\xa8\x0f\x51\x8a\x50\xd2\x19\xe6\x10\xfa\x56\x92\x78\x46\x87\xa0\xf1\x38\xa7\xe9\x6f\x2c\x13\x75\x1d\xbd\x51\xa5\x35\x4a\x6b\x24\x69\xf5\x4d\x4f\x9d\x68\xcd\xbf\xf4\xd8\x3e\x58\xe6\xfe\x9f\x78\xc1\x93\xde\x9c\x93\xf4\x49\x1b\x40\xb7\xcc\xe8\x04\x76\x05\x3a\x6b\xbd\xd8\xab\xe0\xa8\x42\xaf\x0b\xa5\xc0\x63\x38\x74\x9e\x8a\x4e\x27\x6c\xe1\x0a\x28\x6c\x9b\xba\xa7\x24\x3d\x9c\x2b\xcb\x49\x76\x9c\x1d\xbd\x31\x21\xa5\x44\xc8\x53\xde\x0b\x47\x66\x77\x1c\xf2\x0f\x88\x3c\xe9\x86\x2d\xc6\x7e\x78\x8c\x8d\xe5\x51\xe6\x7f\xff\x58\xf0\xfe\xda\xea\x85\x52\x5e\x41\x93\xcf\x9b\x18\x1f\x7b\xd3\x83\x53\x30\xeb\xf7\x30\x5f\xdb\x24\x47\xa9\xef\xd5\xa3\x3a\x3f\x7e\xfc\x58\xd3\x71\xdb\x76\xa5\x39\x28\x73\x83\x10\xa8\x71\x69\x8a\x75\x09\x39\xb3\x20\xaa\x0a\x78\x04\x5d\xc6\x35\x9d\x2a\x67\x12\x9d\xd2\xdc\x05\x4b\xcb\x6d\x78\xa5\xc2\xcb\xa4\xd9\x98\x21\x02\xfa\xd2\x22\x34\xd3\x18\xe7\xeb\x7a\xdb\x84\x43\x02\x8d\x77\xfa\x51\x27\x8c\x22\x7d\xa7\xd3\x6f\x98\x8e\xaf\x5c\x5a\xbc\x01\x5f\x45\xe5\x54\xad\x5e\x58\xd9\xe5\x4c\x3d\x79\x1b\x9b\xa8\xf4\x7c\x10\xca\xbe\xed\xfe\xf0\x7f\xfd\xd6\xe0\x51\xe7\xc9\x8d\x39\x96\x94\xee\x5d\x38\x96\xaa\xfd\x4a\x9a\x60\x59\xfd\xb5\xb8\xe0\x1c\xb6\xbf\x71\x0b\xfb\x11\x8f\x1d\x16\x11\x30\x7f\x88\xb5\x48\xfa\xdf\xed\xb1\x7b\x76\xe1\xc5\xc1\xbe\xa0\x75\x5d\x35\x7e\xb9\xe0\xac\xb4\x4a\x2b\xfb\x1e\x89\xe1\xa5\x70\x34\x42\x92\x0d\x04\xe3\x99\x2b\x8c\xe1\x7e\x41\x97\x87\x41\x3e\x68\xb2\xf7\x68\xee\xd8\x1f\xf4\x82\xb7\x78\x3a\x39\xbb\x7c\x3a\x59\xc3\x41\x9e\x5e\xfb\x4a\x4f\x10\x1b\x74\xa6\x5c\x7c\x60\x65\x15\x32\x48\xed\xfc\xe3\x82\x89\x97\xc2\xae\x55\x89\xf7\xcb\x7c\x29\xea\xaf\x87\xb1\x3a\x14\x27\x26\x79\x4b\xa0\xeb\x81\xce\x64\xa8\xcf\x91\x55\x3f\x35\xce\x68\xc4\xfd\x1f\x1f\x0f\xbe\x7f\x1c\xff\x5d\xc0\xd1\xb5\x65\x61\x3d\xa0\xe6\xa2\x05\x21\xe9\x40\x56\xa1\xa3\x58\x6a\x9c\x64\x2b\xbc\x17\x44\x1c\x5c\x98\x01\x5a\xc5\xe6\x2f\xa5\xb8\x45\x57\x5f\x00\x75\xdf\xea\x68\xd2\x95\x71\x2b\x42\x4c\x0b\x4c\x6a\x6f\xab\x73\xc6\x49\x6d\xb6\xaa\x43\xeb\x12\x89\x6c\x7d\xe9\x81\x5e\x88\x8c\x07\xea\x1f\x33\x01\x86\x3a\x40\x15\x4e\x43\xdc\x8f\xad\x26\x58\x5f\xcf\x05\x94\x78\x69\xd5\x89\x17\x55\x88\xc3\xeb\x76\x45\xdc\x2e\x58\xe5\x9d\xd2\x55\x83\xf2\x5c\x40\x2e\x2f\x2d\xb0\xe1\x74\x83\x3a\x01\x97\x56\xa0\x3a\x38\x2d\x83\x46\xc1\x2a\x28\x20\x06\x17\x17\x8a\x98\x81\x52\xdd\x20\xfa\x80\x68\x18\xc8\xe7\x88\xb7\x98\x7a\xa4\x01\x8c\xd4\x94\xd2\x10\x95\x73\x63\x29\x31\xc1\x85\x92\xfa\xf9\x7d\xcc\xce\xd5\xcb\xfc\xf7\xed\x0b\xde\xba\xcf\x7e\x52\x8e\xb8\xba\xaa\x9e\x91\xb4\x2b\xd6\x2d\xad\x4a\x1c\x57\x61\x64\x44\x56\x40\x70\x6a\x41\x41\x4a\x1c\x81\x52\x24\x9b\x90\x41\x69\x22\x60\x4c\x3e\x26\x54\xa3\x33\x2c\x8a\x5e\x67\x7c\x02\xbb\x0d\x2a\x55\x3c\x68\xb6\x92\xee\xd4\xe3\x49\x2c\x83\xba\xfb\x2c\x95\xeb\x61\x12\x07\x93\xbb\x69\x92\x99\x34\xd3\x1e\xb3\x70\xdc\xf6\x58\xb8\x6a\x5a\x59\x52\x6b\x28\x92\x14\xa6\x2f\xaf\xf5\x12\xc4\x3f\xc8\x86\x3e\x35\x59\xdf\x4a\xd7\x53\xb7\xff\x2d\x5a\xd4\xda\x40\x5b\x5e\x92\x10\xb1\x65\x0a\x11\x5b\x6a\x4f\x41\x03\xd5\x09\x05\xbc\xcb\xe5\x65\x06\x53\xa4\x2e\x60\x12\x71\xa6\x24\x29\x52\x54\x71\x68\x2e\x1f\x48\xf4\xbf\x69\x89\x05\xab\x25\xa0\x25\xf6\x94\x76\xa4\x34\x0f\xa3\x8d\xd0\x04\x2f\x22\x3d\x61\x92\x65\xa1\x96\x9b\xc5\x49\x0d\x1b\x5b\x67\x10\x15\x8f\x9d\x91\xa0\x65\x49\xca\x1d\x86\xc0\x84\x1d\x3d\x3d\x26\x4c\x8d\x56\x98\xf9\x76\xbb\xa8\x8a\x1f\xf6\xd8\x03\xcf\x9c\x0f\x7e\x05\x0a\x45\x4f\xfc\xa5\xe0\xc2\x76\xbf\xbb\xde\xf8\x2a\x97\xbb\xb6\x08\x43\xc3\x9b\xec\x3b\x6f\x61\xc7\x5d\x22\x3f\x8b\x20\x45\xc3\x20\xac\xa0\x61\x6d\x0e\xe2\xbc\x96\xa5\x92\xd0\xfe\xa7\x0f\x07\x7f\xe8\x8d\xfa\x95\xb7\x36\x64\xeb\x6a\x91\xf2\x0a\x22\xd6\xc4\x8e\x01\x6d\x7a\xd2\xef\xc1\x90\x6b\xec\x66\x11\xeb\xb0\x38\x08\xb6\x26\x19\x5b\x00\x75\x9c\x17\x9b\x98\x5f\x54\xf8\x8f\xb3\x56\xd2\xb3\xc2\xe3\x78\x57\x5c\x95\x19\x5e\x57\x5a\x1b\x5c\x8a\x2c\x44\xd1\xb6\x9e\x8a\x38\x1f\xfe\x8e\x22\x12\x71\x4b\xc7\xad\xa8\xaf\x96\x8a\xcd\x7f\x05\x9d\x50\x87\xef\x75\x0f\xe0\xc0\x1d\xed\xe1\xf3\x07\xd5\x2c\x6b\xf0\xfd\xef\xf5\xd8\xfc\x68\xa4\xf0\xf2\xa0\x56\x8c\x18\xe1\xf1\x3f\x50\x50\xdb\x29\x9d\x0e\xe3\x7c\x34\xf3\x04\x12\x6a\x42\x4e\xcb\x50\x52\x31\xde\x19\x31\x0d\xad\x88\xd2\xa2\x81\xbf\xe9\xcb\xb9\x09\xda\xff\xc5\x73\xaf\xbc\xd4\xf2\xae\xdc\xb7\x47\xd0\x7e\xf6\x3f\x35\x07\xce\xe7\x3d\x36\xb7\xb7\x1d\xd7\x93\xad\xe0\x87\xbc\x1d\xe3\xd9\xf4\xae\x42\x60\x3e\x93\x11\xdc\xe4\x88\xf1\x5e\x88\x15\xc3\xb0\xaf\xd3\x74\xdd\x68\x97\x41\xd2\xe7\x5d\xd1\x96\x4e\xa1\x14\xf8\x8b\xd1\xc3\x94\x06\x8a\x79\xa1\x74\xd9\x96\xed\xe6\x53\x5e\xba\xb3\x47\xe7\xb2\x7f\xb1\x20\x06\xd8\x8e\xe5\x6a\x94\xc8\x2e\xfb\x99\xdf\x71\x88\x1d\xdb\x66\x44\xf1\x0a\xa4\x2f\x3e\x26\x58\x31\xf3\xff\xe0\x60\x90\x0f\x3f\x2e\x04\x2c\xd8\xd8\xed\xc2\x9c\xe4\x34\xe7\xfa\xe3\x64\xb0\xd2\x2a\xc7\x80\x6c\xfa\x1e\x2e\x7d\xb9\x4c\x3b\xa2\xe5\xc6\x56\x7d\xe6\x00\x7b\x9d\xbe\x2f\x3d\x1e\x74\x47\xd9\xf2\x4c\x05\x38\xb7\x2e\x58\x9c\xe0\xc1\xba\xcc\x21\xb6\x17\x81\x45\xcd\x63\xfc\x33\x68\xf2\x20\xe0\x13\x30\x69\x93\x14\x2f\x5b\xc6\x02\xf8\xf3\x1a\x3b\x64\x56\x80\xff\xb9\x5a\xf0\x1b\xb5\x02\x97\xc4\x6a\x90\x13\x0f\x45\xc7\x60\xa9\x4d\x4d\x5e\x58\x39\xc0\xf4\x50\x20\x2f\x42\x70\xa3\xfe\x08\x61\x5e\xe2\xc4\x2a\x17\x8c\x6c\x51\x54\x3c\xc9\xec\x96\xdb\x6b\x0d\x7a\x38\x6a\x85\x94\xbf\x42\x4d\x49\x7d\xa1\x41\x31\x4b\x07\x72\x36\x3c\x40\x81\x88\xa2\x00\xaf\x5c\xa3\x8e\xf1\x4c\x07\x68\x57\xe9\x15\x00\x5b\x1b\x75\x2a\x7e\xb2\x87\xfd\x6b\xd9\x41\x5d\x9a\x7f\x21\xb8\x4f\x2f\x47\x0c\x14\x2f\x22\xec\x75\x54\xa8\x83\xb3\x9e\x35\x39\x0f\xee\x0c\x46\x4c\xe8\x2b\xd9\x61\x2b\x84\xd8\xbf\x1c\x9c\x59\xb1\x22\x8a\x77\x5d\xbe\x2e\x3e\x4e\x62\x17\xe6\xe7\x49\x8f\x8d\x6f\xca\x74\xcd\x7f\x4d\x10\x3f\x28\xd3\x35\x3c\x65\x2c\x4a\x6f\x53\xd6\xdc\xd2\xa2\xda\xb0\x6b\x75\x1e\x85\x57\xe5\x2c\x5f\x97\x79\x1d\xf4\xed\x3a\xdf\x42\x86\x4f\xcc\x38\xae\xd3\x61\x50\xa7\x55\x5c\x57\x2a\xfb\xb5\xc1\xb6\xbd\x5c\x61\x07\x48\x16\xf8\xe7\x83\xbb\xb5\x96\x40\xeb\x55\x55\xac\x1f\x51\x4f\x97\x8d\xb1\x7a\x9b\x42\x2f\xb2\x7d\x20\x9f\xfc\x85\xe0\x14\xa6\x53\x58\x05\xde\xaf\xa1\x71\x76\x5d\xdc\x5f\x7b\xec\xab\x77\xf4\xcf\xa0\x72\xf6\x9f\xbd\xe0\x2e\xe7\x89\xf1\xd1\xe0\x91\x5a\xa6\xf9\xb6\x6d\xa6\x8e\x58\xf9\x46\x8f\xbd\xde\x63\xb7\x44\x89\x68\x9f\x11\x91\x88\x5b\x32\xf5\xfb\xbb\x48\xde\xbd\x60\x7d\x40\x8a\xde\xbd\xf6\x33\x37\xa7\xa4\x8a\xca\x4c\x72\x55\x69\x63\x8d\x3e\x68\xb2\xdf\xfe\x52\x47\x3c\xa3\x2e\x6b\xa5\xea\xb4\x17\xc2\x2c\xed\x43\x26\xc0\x99\x7e\x7b\x5d\xb3\xa5\xf9\xef\xfc\xd2\xe0\x9b\xbd\x91\x3f\xdb\xd7\x87\x11\x46\x38\x8b\x64\xb9\xa2\x98\x26\xa7\x82\x94\x32\x93\xa7\x22\x8c\xb4\x48\xeb\x43\xc2\x87\x21\x52\x27\x1a\xb5\xeb\x9e\xdf\x36\x45\x64\x73\xa8\xb5\x5e\xf7\x6e\xa3\x31\x38\x2f\x45\x94\x6f\x0c\xae\x7b\xb7\x11\x81\x8e\x79\x70\x8b\x86\x0f\x5e\x4a\xda\x6e\xe8\xd2\x47\xbe\x84\xfd\xf9\x41\x87\x8a\xfd\x0f\x0e\x06\x7f\xbe\x7f\xbe\x00\xdd\xd0\x09\x47\x16\x0e\x07\xf0\x6d\x2d\x9c\x41\xff\x45\xd1\x22\xdb\xcc\x9a\x49\xb2\xe3\x15\x7d\xa6\xf6\x16\x05\xe1\xf7\x9d\x44\x3d\xc7\x4b\x96\xe4\x57\x63\x75\xb1\xa6\xc4\xe8\x02\xc8\x1e\x48\x94\xd1\x14\x32\x61\xd1\xe9\xe3\x0f\xc0\x19\x4e\x58\x40\x90\xb1\x4a\xe1\xe9\x9d\xbe\xd2\xbd\x26\x67\x79\x83\xaf\x0c\xe2\xd6\x39\x48\xbf\x9d\x2d\x3b\x5d\x64\x0c\xd1\xb2\x12\x31\x02\xb8\x4c\xd3\x04\xaf\x0d\x5b\x22\x8b\x8f\xe4\x06\xbf\xab\x95\x74\x7b\xfd\x5c\x3a\x4e\x67\x5e\x72\x6e\xeb\x8b\x84\x35\x4b\x94\x33\x0b\x89\xc2\x78\xfc\xe8\x5f\x54\x7f\x4b\xa5\xe9\xef\x35\xd4\x97\xbb\xa8\xcd\xc0\x19\x33\xfa\x39\xc0\xe0\x62\x0d\xbe\x18\x67\xfd\x4e\x27\x6c\x29\x75\x57\xcd\x31\xf6\xb2\x68\x18\xa0\x5f\x01\x1d\x0a\x65\xea\x00\x16\xef\x9a\x8c\x92\x2d\xab\x0b\xa5\xd6\xe0\xff\xb4\xdd\x49\x5f\xac\x2a\xd7\xf1\xa5\x9d\x7a\xb6\xc7\xfe\xad\x0c\xf7\x2e\x95\xd0\x21\x70\x17\x69\x5e\xf9\x78\x57\xad\xad\x68\xda\x6a\x65\xf5\xab\x69\xbf\x60\x12\x19\x9e\xe6\x8a\x72\x4a\x83\xa0\x2d\x52\x85\x9d\x6d\x68\xfb\x42\x10\x86\x4c\x4b\x19\x12\x5f\xc3\xee\x1d\x6d\x5b\xdf\x46\xf7\x37\xfb\xf6\x29\xef\xc8\xb6\x68\x62\x87\xfc\x03\x0d\x38\x61\x77\x07\x19\xb6\x8b\xcc\x0b\x2a\x6c\xd7\x99\x17\x67\x59\x49\x6e\xf9\xc7\x74\x1c\xcf\x57\x6a\xa1\x5e\x0c\xf9\x06\xbe\x03\x33\xed\x44\x48\x5c\x64\x25\x69\xe7\xdf\xad\x8b\xf9\xea\x6e\x18\x87\xdd\x7e\xd7\xf0\x89\xed\xa2\xb8\x6f\x39\xc0\x6e\xa5\x49\x42\x69\xe9\xff\xd5\xfe\xe0\x27\xf6\x2f\xd8\x8f\x8a\x03\x68\x58\xea\x23\x70\xe2\x46\x92\x49\x2e\x37\x31\x0f\x49\x09\x12\x83\xf6\x67\x56\x02\x78\x63\xd1\x46\x61\x5e\xb4\xf3\xad\x08\xa6\x1a\xec\xe4\x48\xa5\x91\xf3\x81\x54\x57\x26\x19\x17\xcc\x83\xa3\x57\xb9\x03\xf5\x32\xe7\xa0\x42\xe9\x64\xa8\xae\xe8\x15\xf7\x66\x17\xbf\xde\x6a\x5d\xd1\x72\x50\xd3\x74\x5b\x2d\x14\x9f\xe1\xcf\x09\xea\x29\x53\x6d\x5d\x1b\xa8\xb3\xc2\x96\xb8\x02\x80\x9a\x80\xe1\x47\xbd\x40\x0e\xe4\x8e\xbe\x3a\xa8\xd2\x27\x94\x0c\x06\x78\x7e\x8c\x06\x4a\xfa\xf9\x24\x1e\x17\x57\xa5\x21\xb9\x57\xed\xaf\xb8\xa7\x00\x66\x3f\xed\x59\xc3\x25\xb0\x87\x2e\x1a\xa4\x1c\xd3\xba\x76\xd8\x56\x27\x43\xd2\x6a\xf5\xf1\xa8\x10\xa6\xc7\xb9\x1a\x63\xba\x6f\x14\x70\xc2\x84\x9f\x50\x8c\x36\x66\x4e\x39\xc0\x75\x6a\x9c\xb6\x9d\x46\x1a\x10\x20\x5e\xc5\x18\xb2\xc5\x8e\x9d\x6e\xb8\x9e\xc8\x8c\x67\xdd\x24\x01\xd0\x7a\x9a\xdf\x22\xf8\xae\xb8\x80\xc0\xd8\x25\xe8\x0e\xd0\xe3\xd2\xe4\x17\x44\xba\x6e\x0b\x39\x19\xe7\x69\x58\x24\xf8\xa9\xc2\xd0\xe6\x42\x08\xfd\xbd\x34\x59\x8b\x64\x97\x60\x42\x81\x0a\x86\xc6\x27\x6b\xbe\xc7\xbb\x9b\x9d\x66\xa7\x9e\x86\x00\x5b\x0d\x9d\xc4\x1f\x76\x85\x55\xa8\x3c\xfe\x4b\xf4\x16\x3f\x71\xc9\x3e\xe2\x1c\xf1\x6b\xbc\x84\x05\xa4\x45\x91\x8d\x69\xed\xf8\x87\x99\xa3\x1d\xf9\x2f\xd5\x65\x9f\xcc\x93\x5c\x44\xe5\x43\xb4\x55\x00\xae\x20\x67\x6e\xa1\xfe\xac\xc1\x8c\x39\x85\x7f\xd6\x63\xbe\xde\xac\xf7\x23\xd6\x9f\xba\x27\x7c\xc2\x20\x93\x7c\xc0\xb3\x38\x45\x35\x1c\x20\x5c\x16\xf4\x16\x87\x05\x6b\x31\xe6\x85\x19\xec\x27\x3c\x3a\x9b\x96\x7e\x65\x4e\x14\xb5\x22\x31\xea\x8c\xce\x57\x5b\x4c\x21\xc3\x44\xd8\x46\x7f\x7f\xd8\xe1\xc3\xcd\x43\x9b\x0c\xa2\xb7\x2e\x9c\x39\x62\xcc\x70\x45\xeb\xdc\x11\xfc\xf4\xed\x4e\xf0\x57\x85\x17\x9b\x3c\xd8\xef\xbf\x3d\xf8\xe0\xb8\xf9\x93\xb7\x44\x2f\x87\x48\xcc\x61\x31\x2a\x0a\x68\x12\x93\xd6\xb4\xa8\x8d\x17\x7c\x62\x7e\x65\x71\x52\x3b\xf0\xb4\xe7\x01\x28\x1d\x8b\x70\x02\xba\x65\x37\x9d\x64\x4a\x72\x70\xc6\x92\x02\x3f\x10\x65\x8a\x1c\xb7\xd4\x2a\xea\x6c\x3b\x4c\x65\x2b\x8f\x06\x4d\xa5\x5b\x81\x6b\x0e\x02\x18\xd4\x16\xd0\x64\x83\xfa\x22\x8f\xa5\x37\x88\x6e\x49\xa4\x3c\x0b\xdb\xb2\x25\x2c\x34\x57\xf2\x0e\x99\x36\x12\x01\x88\xed\x8e\x2f\x8b\x03\x6c\x5b\x86\x6f\xb9\x4d\x1b\xb2\x9c\x6a\xe7\x8e\xe5\x7d\x22\x37\x67\x5b\xe6\xc2\x8d\x9e\x22\x5e\x84\xc2\xb8\x5a\x72\x38\x11\xea\x26\x7e\x6e\xbb\x1d\x8d\xab\x6c\x77\x25\x80\x40\xb4\xe6\xd5\x30\xd4\x14\x88\xda\x49\x8c\xd9\x80\xcd\xa1\x2e\xea\x58\xb9\xb8\x51\x24\xb1\x57\xfa\x15\xfe\xe6\xd0\x4d\x6b\xfd\x4d\x6b\xfd\x17\xcf\x5a\xff\x3d\xe3\x96\xb9\xfe\x4d\xe3\x7b\x25\xd9\xfd\xc5\xb1\x91\x2c\xbb\xc5\xbf\x88\x57\xca\xa6\xe6\xb9\xe1\xa0\x96\xbb\x95\xc0\x79\x86\x43\x5a\xd0\x1a\x80\x35\x3b\x8c\x3b\x27\x8f\xf1\xd6\x86\x48\x45\x0b\xc1\x72\x53\x1e\xc9\x2c\xab\xf3\x35\xb9\x1e\x12\x0c\x62\xdc\x56\x2b\x21\xd4\xce\x79\x11\x23\x5e\x1d\x80\x7a\x84\xad\xe2\x6b\x3e\xf1\xb0\x68\x3c\x3e\xdd\x38\x3d\xd7\x78\xe8\xd1\x49\x7c\xb9\x2d\x20\x44\x78\xa2\x31\x59\xe7\xed\x24\xcf\xf8\x44\x73\xb2\x4e\x56\xdc\xa2\x8c\x4c\x1b\x7b\x9f\xb3\xd4\x12\xb6\x46\xbe\x97\x87\xd8\xc9\x5d\x47\x49\x19\x41\x08\xfe\x16\x5e\xc9\x8e\x5c\xe4\xea\x37\x9f\xf2\xd6\x76\x76\x76\xdc\xe7\xbf\xd8\x38\x3b\x74\x7d\x25\x37\x87\xa9\xd6\xf5\x6b\x60\xa2\x0b\x7b\xfd\x2d\xec\xf4\x8e\xce\x0d\x32\x32\x43\xa4\xb3\xed\xa4\xf5\x3f\x7e\x38\xf8\xc3\xda\xa8\x5f\x87\xf0\x61\x80\x33\xa9\xc3\x05\x85\x68\x82\x53\x59\x87\x1c\xa8\x7f\xeb\xc0\x83\x30\x6e\x25\xdd\x1e\x30\x98\xb5\x65\x8f\x16\x8f\x8e\x24\xc4\x58\x14\x61\xfc\x1e\x99\xed\x11\x76\x51\x21\x69\xbd\xb5\x21\x3c\x1a\x6c\x3f\x19\x84\xf0\xa7\x21\xc2\x9f\x10\x2b\x29\x39\xb2\xc0\x68\x75\x46\xb6\x84\x52\x01\x5c\xbf\x4c\x6a\x42\xbc\xd1\x3e\xb5\x29\x21\x0a\x51\x18\x98\x0e\x29\x34\x74\x3c\x28\xff\x61\x7e\x24\xe3\x99\xe8\x60\x04\x52\x96\x41\x2c\xd2\x46\x91\x96\xb3\x21\x48\x85\x2d\x7c\xec\x75\x48\x35\xc2\xd8\x46\x91\x53\x31\x99\x35\x14\xcd\xeb\x9e\x41\x3d\x81\xa1\xbe\xee\x3d\xcf\x02\xb1\xa1\x47\xac\xf8\xc0\x39\x4b\xbf\xef\x20\x7b\x53\x8d\xdd\x5e\x74\xf5\xac\x1a\x0e\xff\xff\x7a\xc1\x1f\x79\x67\xdd\x87\x30\x0b\xd8\x27\x0e\xa6\xc1\xee\x9a\xa6\x45\x83\x01\x85\x9a\x30\xd8\xc4\xc8\x29\x63\x5b\x83\x6b\x53\x5a\x8c\x2d\x0c\x50\x51\x6b\xdd\x30\x50\xab\x3a\x0a\xdf\x15\x45\xa8\x49\x30\xca\xe9\xf0\x92\xa1\x6f\x85\x8e\xd3\x77\x3a\xad\x1e\x4f\x25\x29\xbf\x54\x1a\x0c\x1d\xc3\x6c\x0d\xa1\x2d\xdd\x3f\xed\x31\x6b\xb0\xfc\x4f\x7a\xc1\xcf\x7a\x8b\xc5\xc2\x53\xd7\xca\xb4\x6f\x5d\x29\x71\x0d\xb8\x02\x14\x62\x65\xa2\x08\x56\x86\x55\x8d\x21\x8c\x81\x8b\x18\x42\x4c\x46\x03\xc7\x18\x89\x10\xc9\xf6\x18\x38\x43\x23\xaf\x91\x7a\x61\xff\x5e\x35\x3e\x99\xbd\x7a\x1d\x5c\x83\xb7\xd7\xd8\xd0\xfa\xf0\xdf\x50\x0b\xde\xef\x0d\x0d\x54\x29\x8a\x4c\x93\x85\xdb\x2b\xd6\x0a\xa4\xb0\x58\xc6\x95\xea\x67\xc1\xfb\x10\x22\x2c\x96\x93\xb6\x65\x8a\x40\x39\xd0\xe6\x70\x3d\x06\x61\x17\xe7\x75\x98\x18\x6d\x79\x6e\xf7\x01\xa9\x28\x87\x80\xeb\xb8\xad\xa3\x97\x86\x67\xae\xb0\x9f\x9d\x61\x5f\xc3\xee\xdd\xb5\xc3\x1b\x25\x58\xa9\xcf\xec\x5b\x6b\xcc\xdd\x4d\xfe\x5f\x7b\xc1\x8f\x7b\xcf\xdc\xb8\x3c\xc7\x63\xb2\x2d\x32\xeb\x0e\x2e\x6b\x18\x90\x6f\xbb\xc5\x89\x56\x76\x9d\x48\xe7\x57\x57\x97\xc8\x0d\xb4\x24\xf2\x0d\xff\xb3\x87\x83\xa4\xf4\xac\x48\xf5\x00\x88\x33\xf5\x84\x2e\x42\x3a\x46\x9e\xc3\xee\x52\xdd\x07\x08\x2d\x87\x0b\x14\xde\x27\x36\xc5\x2d\x91\xb6\x0b\x10\x7d\xfd\xf5\x75\xef\x00\xfd\xd3\x8d\x5c\x66\xec\xfb\x3d\xa6\x7f\xf2\xdf\xe2\x3d\xdd\xfc\xa0\x87\x74\x06\x8e\x7d\xa7\xb0\xa0\xb2\x34\x51\x41\xa1\xd4\x26\x14\xc1\x07\x56\x16\x4a\x13\xd0\x36\x21\xbb\x1b\x4d\xf6\x7d\x35\x36\xae\x7a\xe8\xbf\xad\x16\x7c\x43\x0d\x46\x2b\xa4\x01\xb0\x10\xcf\xcc\x38\x68\x62\x6d\x1a\x2d\x63\xa9\x2a\x42\xfb\x29\xb8\xd0\x78\x6f\x0a\x05\xac\x1d\x66\x7a\x3d\x1a\x3d\xbc\xd0\x61\x44\xc4\x03\x55\x45\x00\x68\x66\x78\x16\x3f\xb0\x7c\x81\xf2\x3b\x34\x73\xe2\xf2\xb9\x79\x7e\xec\xf4\x5d\x27\x9b\x7c\x09\xa0\xba\x48\xd5\x5b\x0f\x63\x3d\xa5\x47\xa6\x8e\xd0\xad\xd7\x49\xa2\x50\xa2\x10\xd1\xbd\xa0\xee\x72\x0f\xf0\x04\xa5\x6e\x3b\xb2\xf8\x3b\x0e\xb0\x83\xea\xc3\xd5\x41\x4f\xfa\xaf\x3f\x10\xfc\xc5\xfe\x25\xfa\xab\xb8\x7c\x66\x14\xd2\x9f\xcb\xb4\x97\xca\xdc\xd1\x9a\x60\x48\xf5\x82\xc2\x66\xc3\xc7\xa4\x4f\x58\x9e\xe8\xc2\x21\x85\xb7\xb2\x59\x7e\x27\x3f\x7b\x4d\xb4\xf2\x59\x7e\xd1\xa2\x3e\xd4\x38\x65\x5c\xaa\xdf\xa2\x41\x93\xdf\xc9\x97\x52\xd9\x09\xaf\x15\xef\x19\x0a\x07\x51\xbc\xde\x83\x77\x78\xd6\x8b\xc2\x5c\x0d\x25\x0c\xd4\x45\xbd\xd2\xc3\x8c\x71\xcc\x93\x85\xaf\xb0\x02\xca\x74\x5a\x1b\x14\xff\x14\x59\x88\xa0\xfa\xf6\x0b\xa4\xde\xa3\x4c\x62\xdc\x48\x25\x1d\x40\x6a\x81\xc7\x99\xea\xd5\x93\x23\x53\x47\x78\x26\x7b\x22\x15\x79\x02\xd6\x62\x3b\x36\x8e\x71\x4a\x20\x03\x50\x7f\xe8\x81\x52\x41\x30\xb9\xb1\x47\x28\x6a\xd4\x82\x06\xa0\x5e\x53\x0f\x93\x0e\xd7\x4e\x6a\xc6\x4d\x91\xaa\x84\x26\xbf\x94\xe4\x14\xc7\x4f\x99\x1a\x91\xc8\x72\xd3\x0f\x6d\xc8\xa5\x5d\x20\x94\x28\xa5\x65\xc0\x8d\x87\xd7\xfe\x20\x8c\x9d\xf2\xeb\x06\x2d\x3e\xe7\x82\x5a\x8f\x71\xc7\x53\x9d\x24\x99\x5a\x13\xa9\xee\x94\xcc\xcc\xa3\xa9\x35\xf1\x78\x09\x6b\x11\xbf\xd4\x2f\xac\x89\xc7\x27\x9b\xec\xce\x52\x7e\x95\x56\xd1\x67\xd1\x3e\xb5\xfd\xba\x53\x8d\x82\x74\x3b\xc6\x79\x39\x99\x69\x38\x71\x4b\x2d\x4d\x00\x0a\x47\x35\x42\x60\x9c\x35\x4c\x93\x34\x0b\x58\x8d\x48\x4a\xaf\xa9\x5e\xb7\xd5\x36\x46\x13\x12\xb2\x2f\xc1\x54\xa4\xb8\x82\x71\x48\x31\xb0\x82\x95\xab\xd3\xd9\xdd\xe0\x2a\xc9\x13\xa3\x43\xe8\x2d\x4b\xdf\xd9\x9b\xf2\x77\xf6\x31\x5e\x85\xb5\x80\x30\x0b\x2b\x4a\x8b\xce\xfc\x0f\xee\x0b\x3e\x5f\xb3\x1e\x54\x42\x58\x26\x31\x40\x56\x11\x1a\x36\x94\x53\xe7\x61\x53\x36\x79\xbe\x61\xc0\xfa\xb7\x44\x36\x05\xcc\xe2\xbd\x9e\x8c\x89\xe1\x32\x0f\xe3\x7e\xd2\xcf\x22\x34\x6d\x5b\xc6\xf1\xf3\xc9\x16\x4f\x3a\xb9\x84\xdc\x66\x34\x1a\xc0\x98\xdb\x2d\xd1\xd3\x41\x76\x7f\xdc\x43\x10\x19\x9f\xd2\xa1\xac\xb3\xb8\xdc\x1f\xd5\x82\x0b\xf4\x85\x11\xac\x16\x8d\xf5\x64\x2a\x4f\x92\x28\x9b\xc2\xc6\xe3\x7f\x1e\x5b\x4b\x13\xd1\x6e\x09\xb0\x44\xae\x27\x01\xcf\x36\x92\x2d\x4a\xcd\xdf\x00\x77\x01\x70\xc0\x43\x33\xc0\xaa\xa1\xc6\x61\x43\x8a\x34\x5f\x93\x82\xb8\x09\xd5\x2a\x58\xef\x87\x6d\xc9\x5b\xfd\x2c\x4f\xba\x60\x2f\x2a\x1a\xe2\x4e\x62\xf3\xba\xb7\x0f\x34\xc8\xeb\xde\xf3\xd4\x0e\xb9\x4c\xc6\xdd\x92\x59\xfd\x17\xbd\x77\xd5\x58\xcc\xf0\x55\x5f\x6a\x8b\xf7\x92\x61\xc1\x76\xfd\x9d\xa4\xb1\xc7\x06\xef\x0f\x7c\x3b\x30\x82\xc5\xf0\xc1\x86\x34\x6d\xc7\x49\x70\xcc\xc4\xef\xf3\xd8\x50\x9b\xfc\xef\xdd\x26\x87\x68\x1b\x9b\xc9\xc5\xb0\x95\x26\xaa\x80\xe0\x91\x72\x91\xc3\xde\x1e\x68\x19\xc2\xb8\x98\x03\x8f\x9a\xbf\x25\xb4\x97\x0a\x3d\xf4\x6e\x27\x9a\xec\x8d\x2e\x7e\x8c\x0b\x4d\x87\x57\x66\xc0\x8f\xf9\xbd\x83\xc1\x31\xe7\x49\x15\x2a\xf4\x90\x95\xb3\x1a\x4d\xe6\x27\x0f\xdc\x34\xc9\xed\xc1\x24\xf7\x8b\x36\x3e\xc9\xbf\xdf\x23\x3e\xc9\x3f\xab\xc6\x27\x79\xce\x6c\x48\x37\x8d\xdb\x7b\x34\x6e\xaf\x6b\x58\xa1\x57\x06\x5f\x19\x56\x21\x50\x9b\x4d\xb9\x7b\x48\xa1\x2a\x29\xb0\x7b\x20\x99\x5d\x5a\xe2\xaa\xd0\xac\xfe\xa7\xc7\x56\x9e\x6e\xa2\x0c\xc1\x1f\x91\xd1\x60\xc1\x4e\x9c\xf6\xbf\xc7\x0b\x5e\x51\xf9\x0b\x5e\x5c\x0b\x3c\x60\x34\xd6\x16\xa6\x07\x23\x6c\x00\xa8\x43\x55\xa7\xee\x33\x15\x59\xd9\x5a\xb6\x4d\xb3\x2f\x63\x87\x2d\x3a\x0c\xdf\x49\x04\xf2\xd9\x58\x3f\x8d\xdc\x67\x7f\x77\x90\x9d\x70\xaf\xa9\x49\xd6\x12\x91\xbe\x75\xaa\x2b\xab\x52\x27\xa2\xa5\xa4\x3d\x47\xbf\xa1\xf5\xd4\xff\xd8\xc1\xa0\x31\x0c\xaf\xca\x37\xcc\x27\x18\x29\x60\x3e\x6a\x5e\xf7\x6e\x83\x7f\xad\x8a\x74\x5d\xe6\xcb\xb2\x73\xdd\x3b\xdc\x15\xd7\x96\x25\x5c\xb5\x5d\x21\xfd\xf9\x03\xec\xbf\x7a\xec\x2b\x73\x78\x75\x7e\xe9\x81\x07\xf2\x50\xa3\x6d\x2e\xc9\xb4\xa5\x7a\xbf\x2e\xfd\xf7\x7b\xfa\x68\xfd\x3e\x0f\x5f\xe5\x62\x53\x82\x37\x73\x7e\xe9\x01\xde\x2f\x3e\xe2\x13\x66\x9b\x62\x32\xbd\xe0\x3d\x53\x8e\x85\xec\x20\xdb\xea\xcb\x49\x84\x12\x10\x18\x6b\x00\x8e\xe9\xbb\x95\x26\xed\xa0\x4b\x50\xcc\x02\xea\x2f\xd6\xb8\xe9\x54\x1e\x9b\x8d\xdb\x3d\xa9\xbf\xc5\x63\x76\xc7\xfd\x7f\xa6\x7b\x11\xf6\x7b\x3d\x99\x12\x04\x8d\x0e\x25\x28\x39\xc8\x6d\x76\x92\x4c\x9a\x3b\x46\x31\xce\x77\x1b\x8e\x49\xc9\xb3\xae\x00\xaf\x24\xc4\x6e\x5d\x0c\x63\x5d\xa5\xdb\x9e\xff\x55\x63\x87\xbb\xc5\x8f\xfe\x67\x0d\x51\xf7\x2f\xd5\xac\xe7\x66\x6b\x03\x65\xe9\xa8\x46\xa6\xfa\x6d\xe7\x76\x5e\x34\x0f\xda\x0e\xff\xe4\xed\x64\x2b\x6e\x72\xbe\x98\xeb\x61\x84\x8f\x66\x88\x23\xbf\x54\xb3\x65\xe3\x59\x93\x7c\x5a\x5f\x6c\xc0\x09\x61\x28\xe1\xd7\x95\x16\x7a\x7e\x69\x6e\x05\xd6\x59\xf2\x90\x4c\x13\x8b\x5e\x09\x0d\xd1\x36\xa9\x06\xfa\x86\x50\x7f\xa7\x3d\xd7\x95\x79\x8a\x68\x03\x85\x19\xbb\xc9\xf9\x0a\x4d\xae\x6a\x09\xf2\x97\x88\x8c\x43\xbe\xa3\x5a\x4c\x76\x99\x54\x80\x09\x8d\x31\x69\x04\xee\xa0\xbf\xa1\xc6\x4a\xdb\xc1\xff\xb3\x1d\xf2\xb6\xec\x9d\x39\x9f\x26\x59\x46\xe7\x54\x89\x18\x38\x78\xaf\x67\xac\x26\xc8\x02\x26\x22\x2b\xa4\xfd\xee\xd1\x5b\x14\xd7\x6c\x24\x45\x1a\x3b\xc1\xbf\x46\x0e\xb5\x92\x38\xeb\x23\xcd\x2d\xc6\x70\x02\x07\xa8\xcc\x69\x2f\x94\xc3\xc0\x60\xc1\xae\x0d\x28\x81\x57\x69\x38\x30\x31\x76\x20\x56\x93\xfd\x34\x63\xf7\x8c\xea\xe7\x51\x25\x56\x8f\x8e\x14\x43\xe8\x01\xf9\xcb\x43\xc1\x43\xdb\xbe\x51\x42\xc1\x19\x0e\x69\xde\x5e\x6a\xdd\x4e\x1f\xe8\xd5\x78\xdd\xbb\x9d\xba\x5a\x3c\xb1\x22\x7c\x1d\x39\xf6\xe9\x83\xec\x5d\x1e\x2b\xbf\xef\x7f\xbb\x11\x5c\xaf\x29\xfd\xa4\x37\xd9\xf0\x60\x9a\x8d\xa5\x07\xb6\x2b\x62\xb1\x6e\xd9\xc1\x8b\x46\xd7\x31\x9f\x36\x53\x82\x22\x6a\x11\x97\xd9\x90\xa0\x70\xd7\xe3\x1f\x7b\xec\x56\xf5\x09\x6e\x1d\x75\x77\xf8\x94\xf7\xb4\x43\x8c\x82\x1f\xf0\x9c\xb2\x8c\xe8\x50\x6d\x82\x9b\x83\xfa\x6b\xc4\xa4\xe9\x05\x3b\x2c\xfc\xea\x16\xcd\xb8\x23\x53\x10\x65\x24\x4f\x93\x08\x2e\x7f\x74\x2f\x1d\x16\x9e\xe0\xe6\x12\xf1\xba\x6c\x37\xd9\xb5\xca\xd0\xa1\x35\x1d\x39\x74\xb1\x22\x70\x87\x7a\xd1\xdd\x21\xa4\x68\x78\x42\xdc\xa1\xfe\x6b\xcf\x89\x08\xff\x82\x17\xbc\xc9\xb3\xc2\xbf\x35\x84\xb0\xd4\x24\x43\xfa\x07\x63\x3d\x30\xb8\x2e\xee\x10\xa0\x54\x55\xdb\x0c\x0f\xc3\xfa\x88\xf4\x4c\x4a\x80\xcd\x21\x8c\xd2\x2a\x1f\x6c\x83\x32\x77\xac\xda\x2f\x63\x97\xd9\xc5\xdd\x09\xa5\xed\x37\xab\x09\x9d\x65\x3f\xe6\x99\xd0\xd4\x8b\x20\x2b\x33\xa5\x21\x5d\x74\x1f\x39\x2b\x06\x28\x89\x9d\x8c\x88\x2e\xbd\xe5\xd0\xce\x3b\xe3\xbd\x5b\xde\xe0\xaa\x2e\x60\x1b\x50\x78\xb0\xeb\x1e\x2b\x4b\x00\xff\x0d\x66\xff\xbe\xba\xf4\x13\x2c\xb1\xa1\xf0\xda\xa7\xb5\x77\x75\x78\xe7\x76\xbb\xf6\x7f\xef\x77\x10\xf3\x75\x8e\xc7\xb9\x79\x07\x5e\xfc\x97\xf7\x07\xef\xf5\x96\xed\x64\xec\x73\xe1\x5a\x2a\xf9\xfc\x86\x88\x63\x19\x51\xa8\x57\xb3\xf2\x29\xda\xc9\xc0\x7b\x66\xf1\xa1\x09\x24\x1a\x45\x34\x70\x9e\xc4\xad\x91\x5f\x6b\x33\x57\xb2\x15\xcb\x34\xdb\x08\x7b\xd4\x77\x30\x2c\x02\x4f\xf6\xd9\x0b\x61\xdc\xbf\x06\x1c\x60\x6b\x12\xe6\xc1\x11\xa1\xdf\xb1\x8f\xfd\x9c\xc7\xf6\x77\x32\x30\x4d\x7f\xc0\x0b\x7e\xd8\x3b\x17\x46\x52\x43\x4e\x0c\x7a\x70\xc8\x51\xa4\xd3\x45\x83\xf8\xd7\x29\xbd\x33\x84\xc9\x00\x48\x45\x49\x0f\x76\x6f\xbc\xae\x73\x40\xf8\xd9\x6b\x4d\x1e\xc8\x6b\xf9\xf1\xa0\xce\x83\x6b\x9d\x4c\xfd\x27\xce\x3b\x59\x80\x06\xc4\xb0\x15\x82\xf9\x5f\xdd\xec\x53\xa3\x8e\xe0\x07\xc4\xbb\x6e\xb0\x9d\x6c\x3d\x7b\x96\x8d\x45\xfd\xd8\x8a\xc1\x2e\xb8\xed\xce\xcd\xd3\x7e\xe5\x51\x3f\xa6\x45\xe3\x4c\xf2\xeb\x3c\x0b\x9a\x7d\x33\xd8\xa8\xa6\xc5\x7b\x96\x41\xda\x57\x19\xc3\x56\xbe\xfc\xe5\x97\x32\xff\x5c\x70\x67\x55\x07\xb6\x92\x34\x6a\x6f\x85\x04\xdc\x91\xf1\x09\xf5\xf2\xe4\x36\xd8\x04\x1f\xf0\xd8\xbe\xad\xad\xb0\x9d\xf9\xef\xf5\x82\x6f\xf5\x9c\x32\x29\x00\x11\xca\xe4\x50\x28\x5a\x5f\x3b\xa1\xba\x51\x4f\xc0\x67\x93\xfc\x2c\x66\x5c\xc0\x5f\x4a\xb2\xd9\xce\x72\x25\x2c\x4c\x9b\x61\xb1\xa9\x11\x36\x28\x70\x4a\x42\xda\x8c\x27\x3c\x0b\xbb\xfd\x28\x17\xb1\x04\x8b\xe7\x76\x98\x0a\xdf\x7d\x0b\xbb\x73\x14\x35\xd1\x30\x97\x8b\xff\xff\x1e\x0e\xbe\xab\x36\xfc\xbc\xf0\x6e\x65\x2e\x07\x4c\xd1\x2e\x63\xcc\xcd\x51\x53\x6e\x41\x12\x86\xd6\xee\x9c\x8f\xb4\x03\x62\x3d\x4a\xd6\x84\x95\x60\x49\xd1\x3a\x6d\x8c\x9c\x77\xcc\xc2\x9b\xa1\xc9\x6c\x6c\xf2\x85\x02\xc9\x2d\x8c\x81\xb4\xf2\x94\xfa\x47\x47\x6c\x26\x28\xc4\x54\x27\xab\xd2\x79\xa7\x36\x67\x2a\x08\x6c\xea\x85\x86\x18\x27\xa0\x2a\x23\xf9\x02\x1d\x92\x58\xc5\xd1\xa3\xcd\xeb\xde\x81\x34\x89\xe4\xb2\xec\x38\xdb\xfe\xdf\x1f\xbc\x69\x9d\xb9\x19\x7a\xf8\xc5\xb3\x73\x6e\x58\x66\xce\x47\xf6\x1a\x78\xf8\xe5\xa5\xb8\xc3\x23\x99\x45\x83\xf7\x71\x8f\xe9\x1d\xe0\x7f\x68\x3b\xee\x7c\x47\xca\x2c\xe3\x17\xc1\x37\x79\xf4\xaf\xe2\x9c\xbe\x41\xf9\x50\x50\x15\x17\x05\xd1\x8c\xa8\x55\x10\x6d\x6a\xea\x73\x2b\xd3\x1d\x64\x28\x46\xe2\x98\xc8\xa3\x26\xfb\x26\x8f\x1d\xa4\xb0\x8c\xcc\x7f\x22\x78\x31\x89\x96\x8c\x20\x0d\x2c\x69\x47\xfb\x49\x87\x23\x83\xa7\x3f\x71\xf9\x96\x76\x6f\x30\x74\xc6\x85\xea\x7c\xca\xdb\xdc\xd9\x60\xb8\xe2\xbf\xcc\x18\x0c\x47\x4a\xb7\xc2\x76\x38\x24\xe2\x2a\xc3\xf9\x9e\xbc\xc5\xf1\x69\x88\x5e\x0f\x5c\x76\x0b\x10\x9c\xae\xb4\x1e\xba\xb6\xfe\xf6\xe1\xe0\x7c\xf9\x61\xc5\x0d\x23\x1a\x14\x17\x0b\x37\x9f\xaf\xf8\xd8\x55\x98\x5e\x77\x98\x3d\xca\x6e\x05\xe2\x66\xa3\xb0\x5e\xd0\x7a\xc7\xa9\xd5\x52\xd6\x05\x12\x3c\xa3\x69\x09\xce\x4a\x3b\xf9\xc2\xae\xc1\x52\x48\x9e\x04\x85\x84\x8a\xde\xd2\x45\xaf\x95\x8b\x8e\x93\xb8\x81\xa1\x03\x70\xa8\xec\x54\x07\x9f\x40\x88\x64\x72\xa7\xa3\x77\x18\x6f\x42\x91\x6c\xe5\x49\x3a\xe9\xb6\xe2\x23\x35\xf6\x82\x7e\x6c\xcc\x2b\xa6\xaf\x3f\x6a\xcc\x57\x6f\xa9\x95\x9b\x64\xbd\xbf\x73\x9f\xb9\x0d\x6c\x57\x99\xad\x52\xa0\x7d\xe7\xc8\x43\xed\x5c\xd0\xa4\xdd\x39\xc0\x0a\xdf\x94\x7c\x66\x7a\xfa\x45\x16\xb4\x44\x4b\xf4\x44\x2b\xcc\x07\xe0\x0e\xc5\xfc\x87\x82\xa7\xc8\xad\x43\xb3\x9a\x6b\xd5\x60\x00\xe6\x4e\x5d\x50\x92\x5a\xaf\x63\x73\xa0\x3e\xdc\xc9\x32\xa6\x8c\x87\x92\x29\xf2\x4d\x1e\xbb\x9d\xc4\xae\x19\xbf\x81\x1e\xbe\xf6\x5e\x27\x14\xda\x62\x58\xc5\xb5\x49\x23\x97\xdd\x9e\x3a\x48\x11\xb2\xc4\x69\xce\x37\x7b\xec\xf9\xc3\x13\xda\xd7\x0d\xba\x52\x6e\x50\x69\x32\x27\x70\x31\x03\x32\x86\x36\xc9\x81\x19\x51\xb4\x07\xc4\x5a\x3c\xb9\xfb\x55\xfe\x3b\xea\x9e\x9a\x44\x11\x20\x8a\xcd\x83\x2b\xf7\x13\xe6\xde\xf7\x01\x6f\x1e\x49\x98\x3a\x7c\x43\x64\x1b\xdc\xbc\x58\xa4\x55\x2f\x38\x2b\xc9\xfe\xbb\x3a\x61\x84\x40\x68\xb5\x63\x13\x4a\xe3\x62\x33\x09\xdb\x42\xc9\xf2\xae\x6c\x6d\x88\x38\xcc\xba\xe8\x75\x0d\xf3\x22\xe7\xc3\x4a\xb4\x81\xc8\x70\x63\x95\x95\x5b\x4a\x5d\xa1\x81\x5c\x91\xa5\x0e\xfe\x85\x6b\x87\xf8\xbc\x17\x5c\x5c\x76\x51\x22\x49\xdf\xb1\x56\x19\x88\xa2\x02\xf9\x56\x58\xa3\x77\x24\x73\xd1\x0b\x1c\x09\x7e\x17\x3b\xc9\x8e\x8f\xbe\x84\x0f\x09\x49\x27\xd3\xf6\x19\x4f\x8e\xbd\x52\x69\xfb\x79\x89\xb6\xfd\x9c\x50\xd3\x35\xd2\xb2\xe3\x6c\x6b\x2b\x0d\xd4\x15\x4e\x5e\xe5\xc5\x7c\x41\xc8\x6e\x12\x9f\x25\xdd\xc8\x7f\xbb\x17\xcc\xbb\x8f\xb6\x4b\x7e\x35\xa0\xb1\xf8\x89\x51\xb1\x9a\xd7\xbd\xf1\xa5\x24\xcd\x9d\x53\x60\x8e\xdd\xc7\xe0\xa9\x7f\x4a\xaf\xd9\x40\xfd\x69\xed\x1e\x50\x06\x00\xda\xcc\x94\xe4\xf4\xe1\x17\x6a\x0e\xaa\x85\xee\x03\xea\x35\xe7\xd4\x6a\x5d\x21\xc9\xec\xbf\xbd\x16\xdc\x57\xf1\x9c\x44\x37\xf2\x98\x19\xcd\x5d\x09\x48\x58\xeb\x8e\x36\x7e\xdd\x3b\x04\x4f\x97\x44\xbe\xe1\xe2\x90\x7b\x6c\xc3\xb9\x08\x3c\x14\x5c\x2c\x61\x7e\x90\xd2\xaf\xfe\x79\x4e\x97\xa1\x04\xb7\xba\x07\xe7\x12\x50\xdc\x94\xdc\x52\x4b\xb6\xee\x38\x1b\x82\xcd\x99\xa0\x84\x36\x52\xb4\xc2\x3f\x17\x9c\x5e\xa2\x70\xc0\xdc\xa0\x44\x03\xd6\x72\x04\x91\xa7\x65\x6e\x46\x82\x41\xc9\xca\x68\xc9\xbf\xbf\xdf\xc9\xe0\xd3\x03\xa9\xa3\x3f\x5f\xd6\x4f\x72\x01\x3e\xb5\x0f\xed\x0f\xee\x1f\x7a\xea\x5c\x74\xb4\x10\xdd\x40\xa7\x39\xa8\xd3\x79\xc2\x65\x8c\xf7\x7a\xb5\xf3\xe1\x43\x57\x25\xf8\xbd\x7d\xec\xbf\x79\x6c\x5c\x7d\xe4\xff\xba\x17\xbc\xc1\x83\xcf\x5d\x43\x63\x55\xc9\x86\x20\x40\x89\x95\xb6\x85\x03\x7e\x83\x5c\xf7\xe8\x12\x9b\xd2\xdf\x37\x5e\xad\x9a\x98\x4d\xbd\x67\xfb\xa0\xd9\x21\xdd\xba\x69\x1a\xf0\xb2\xbe\x88\xf3\x30\x1f\xd8\xb9\xac\x9f\xaa\xb1\x5b\x01\xbe\xc7\xac\xca\x5f\xac\xb1\xe6\x8e\x78\x28\x2b\xf6\x27\xc1\x77\xd5\x9c\x22\xd0\xf3\x94\x25\x43\xc1\x26\x9d\x30\x82\x68\xcf\x28\xbc\x4a\xa0\x41\x9a\x72\x41\x69\xc6\xa8\xbb\xc0\xc8\xe9\x24\xbe\x54\xb4\xae\xa2\x04\x11\x1c\xba\x0f\xe7\xb8\xbc\xd6\x4b\x31\x35\x1a\x7d\x24\x4e\x63\x2e\x83\x71\x2a\xa9\xce\x01\x30\x18\x93\xfa\x82\x79\x0e\xf0\xa0\x8a\x7b\x5b\x82\xad\xa8\x93\x69\x03\x9b\x38\x77\x69\x81\xbb\x1d\x9c\x08\x3b\xd6\x0a\x0e\x91\xe2\x64\xb2\x6e\xac\x24\x3a\x58\x94\x7d\xbf\xc7\xf6\x63\x29\xfe\x5b\xbd\xe0\xda\xdc\x88\x01\xb9\xb1\x41\x80\xbb\x47\x09\xb4\x5f\xad\x49\x1c\x21\x1d\x43\x28\xa2\xa8\x92\x9a\xa8\x64\x94\xf9\xd3\xdb\x9d\x70\xb9\x76\x98\xb5\x92\x4d\xb5\x7c\x36\x67\x9a\x46\xe4\x7e\xe2\xf6\xe0\x9c\x11\xb6\x0e\x40\x25\x09\xd7\x28\x59\x0f\x5b\x22\xe2\x01\x45\x34\x07\x45\x10\x18\x62\x3e\x52\x1c\xb2\x12\x58\x86\xce\xc5\x75\x5e\xdf\xc6\xfe\xb8\xc6\x8a\x1f\xfd\xcf\xd4\x82\x1f\xa8\x15\xd4\x2f\xda\x36\x61\xa4\xae\xc1\x4c\x81\x96\xe8\x9f\x49\x1f\x48\xad\x90\x5b\xd9\x1e\x46\xef\x6e\x25\x69\x0a\x74\x13\xf0\x50\x77\x6d\x25\x0a\x5b\x52\xd3\xcd\x40\x24\x2e\x61\xc0\xcf\x83\x0b\x0f\x6e\xee\x6a\x92\x88\x68\xc1\x42\x23\x55\xd7\x61\x90\xcc\xa6\xbd\x24\xe7\xa0\x81\xd7\x74\xf8\x68\x98\x02\x46\xab\xd2\x5f\x01\xb8\x3f\x34\x0c\x26\x50\xae\xb6\x5b\x39\xae\x51\x2a\x92\x34\x58\xc4\x16\x01\xd7\xf4\xcc\xf4\xf4\x36\x13\xbb\x03\x84\x46\x26\x73\xc6\xde\xea\xea\x32\xaf\xf7\xd8\x5d\x23\x77\x7e\xe5\xc2\x28\x20\x79\x82\xc5\xd6\x10\x3c\xcf\x28\x04\xa2\x6a\x78\x24\x33\xb1\xec\x67\xc6\x98\x5f\x30\x24\xac\x12\x5a\xab\xff\xc3\x63\xc1\x37\x8d\x0d\x3f\xb7\xd2\xb5\xf4\x13\xbb\x5a\x1d\x31\x0e\x06\x2f\xbc\x6e\xaa\x23\xa7\x8c\x64\x5c\x94\x6b\x99\xe0\x2c\x94\x04\x93\x4f\x63\x95\x81\xa9\xc8\xf8\xfb\x44\x9c\xf0\x2c\x49\x62\x1d\x37\x60\x01\x7e\x6d\xce\x34\x8f\x1e\x9f\x6c\x72\xfe\xf2\x8d\x30\x72\xd0\x9b\x5b\x22\x86\x5b\x3e\x89\x23\x3b\x44\x58\x1d\xc2\x88\xf2\x63\x81\xca\x6f\x42\xb5\x75\x93\x1b\x26\x72\x75\x2d\xc0\xc3\x0c\x9d\x0a\x79\x62\x43\x3c\x64\x61\x44\x81\xf7\xc0\xad\xd0\x6e\xf2\xd5\xaa\x21\xa2\xc0\x88\x4e\xd2\x8f\x0d\x4e\xd1\xe3\xb0\xf4\xe2\x36\xe0\xef\x42\x76\x27\x34\x39\xd3\xa0\xd0\xcd\xf7\x94\xd6\x9b\x7d\xa8\x7c\x9b\xc7\xf6\x6d\x84\x71\x0e\x4b\x6a\x34\xb8\x56\xe5\x92\x3a\xaf\xbe\x0b\x2e\xc1\xe7\x23\x16\x52\x41\xce\x01\x42\x7d\x23\xd9\x02\x73\x8a\x96\x4f\x05\xbc\x04\x7a\xde\x95\x1c\xfe\x64\x8d\x1d\xd4\x88\xfd\xfe\x47\x6a\xc1\x4f\xd6\x0c\x7e\x7f\x95\x60\x31\x33\x44\xb9\x58\xda\x63\xd6\x32\x72\x00\xc0\x28\xf0\x03\x18\x7e\x84\xf0\x5b\xef\x87\xd9\x86\xf5\x03\x18\xf6\x50\x98\xc3\xf5\x14\x03\xbe\xc3\x98\x2f\x5c\x5a\x41\x5b\xd2\x64\x93\x5f\xd4\xdc\x05\xc5\x77\x18\x03\xa2\x73\xfa\x21\x0f\xb6\xe0\x1b\x70\xba\x17\xb6\x21\x2f\xab\xd3\x8f\xd7\xe1\x60\xc3\x1a\x0a\x16\x7b\x0d\x5b\x85\x75\x4e\x16\xce\x1e\x08\x48\x69\x89\x4c\x16\xd8\xcc\xaa\x51\x17\xc4\x9a\x8c\xf8\xc4\xf2\xb9\x79\x3e\x33\x73\xf4\xd8\x24\x02\x33\x0c\x73\x59\xfc\x96\xc7\x0e\xea\xa5\xe1\xff\x27\x2f\xf8\x59\xcf\x2c\x94\x12\x68\xbe\x9d\x07\x7c\x29\x69\x63\x4f\x0c\x6c\x44\x69\xd4\x4b\xfc\x1c\x45\x0a\x7f\x31\x36\x51\xd2\x42\x70\x4e\x01\xc5\x39\xd3\xa5\x11\xa0\x29\x74\x05\x96\x07\xc4\x57\xdb\x02\xfe\x92\x59\xd1\x56\x00\x8c\xd3\xb9\x6f\xf7\xd8\xa1\xdc\x44\x98\xbc\xce\x63\xd3\x3b\x2a\x44\xe5\x70\x92\x4b\xe6\x7b\xb4\x1e\x3b\xc1\x25\xc2\x46\x45\xd0\xc7\xbc\x52\x01\x9c\xa1\xb3\x47\x87\xbd\x84\x8d\xab\x2d\xe9\x9f\x09\x4e\xc0\xd6\xac\xc0\x98\x79\x48\x3d\x77\x3e\x43\x00\x43\x35\xf9\x4e\xf7\xde\x51\x63\x27\x77\x20\x68\x3a\x17\x25\x5b\x0b\xc5\x92\x96\xe9\x45\x99\x6f\x24\x6d\xff\xb3\x5e\x70\x76\xc4\x6f\x16\xcd\x1b\x39\x9b\xd5\x43\xb8\xee\xaa\x1a\xec\x1d\x02\x11\x22\xe3\xea\x28\x72\x54\x80\xd7\x7b\x80\xf9\x09\x47\xd4\xe3\x41\xf7\x8a\xfa\xc7\x15\x63\x4a\x52\xc7\xb1\x52\x99\x86\xca\xd2\x55\x01\x3b\x99\x71\x58\xe2\x91\xac\x14\x81\xe0\xcc\xe0\x81\x4c\xa6\x01\x2c\xf4\xe0\xcc\xc0\xe0\x77\x06\x4d\xbe\x5c\x05\x60\xff\xa7\x35\xf6\xb2\xa7\x8d\xfd\x3d\x0f\x01\xf1\xfa\x3e\xf2\xa0\xd9\x3d\xfe\xf5\x5a\x70\x71\xd4\x8f\x2e\xaf\x64\xb1\xe5\xa8\x6b\x78\xa7\x70\x3f\x2e\x01\x1b\x7e\xd8\x63\x1f\xf4\xd8\xed\x49\x4f\xc6\xea\xce\x78\x0c\x41\xc3\xfd\x77\x7b\xec\xc1\x51\x6b\x77\x6f\xa0\xe6\xc1\x4b\x4b\x75\xe9\x89\xba\x8c\x8f\xf9\xe6\x31\x73\xc3\x44\xf0\x76\xd5\x07\xab\x6b\x20\x76\xd2\x7e\x0c\xdc\x18\xff\xf9\x90\x73\xf3\xd7\xa6\x8d\xc2\x08\x03\x11\xed\xef\x3d\x14\x94\x1e\x55\x85\xb4\x17\x6f\x8c\x08\x66\xff\xa6\x9b\x5e\xb2\x3d\x7b\xc9\xfe\x9d\x21\x75\xfe\x71\x2f\x48\x34\xa9\xb3\x3d\xf4\x37\x7a\xdd\xdd\x4a\xd2\xab\x51\x22\xda\xd9\x54\x61\x1e\xca\xa6\xc8\x62\xae\x1a\x53\x3c\xb6\xb5\xde\x29\xd6\x60\xff\x74\x47\x1b\x59\xd1\xb0\x9b\xfe\xbd\x9b\xf4\xce\x36\xbd\xf3\xd7\xee\xec\xf1\x3a\xe1\x1f\x2b\x90\xb9\x7b\xbd\xcc\x38\xb7\x5c\x59\x54\x8e\x8c\xff\xc2\x61\xe7\x66\xbd\xa6\xee\xe5\x45\xbc\x43\x9a\xc4\x2f\x49\xd6\xfc\xff\x78\x38\x98\xa5\x7f\x97\x15\x27\x87\x21\x8b\x30\x57\xf1\xb6\xdd\x4a\x93\x98\xbf\x2a\x59\x73\x8f\x80\x77\xb2\x9b\x6b\x7b\x0f\x6b\xfb\x57\xed\xb5\xfd\x31\x6f\xaf\xde\xeb\x27\xbd\x6d\xdc\xd7\xcf\x59\xae\xce\x5f\x69\xb0\xfd\xff\xe5\x6d\x43\xee\x56\xb9\x34\x01\xed\xe5\x43\x5e\x25\xdc\x8b\x36\x7b\xae\xc9\x0d\xb1\x19\x62\xa4\x8b\x30\xcb\xb2\x4e\x58\xf1\x7a\x7d\x11\x09\xd6\xb3\x3f\xb3\xaa\xa7\x0d\x11\xb7\x1b\x68\x59\x60\xbf\x52\xd0\x7b\x7c\xcc\xdb\xc6\x93\x52\xdd\x7f\x84\x79\x7e\x93\x37\x5f\x11\xfa\x6c\x76\xe0\x73\xdd\xa5\x9b\x8a\xcb\xde\x14\x97\xa7\xbc\x97\xef\x2c\xf0\x8f\xfb\x47\x8d\xc0\x87\xb5\x51\x84\x33\xe0\xda\xa8\x8c\x61\xf8\x8d\x83\x6c\x72\x38\xc0\xa2\x22\xb2\x0d\x74\xd9\xf7\x1c\x0c\x5e\x5c\xfd\x53\x65\x9a\xe6\xd0\x9b\x59\xa5\x6a\xfb\xa9\xfd\x37\x57\xc8\x5e\x55\xdb\xbe\xd6\x6c\xa3\xe0\xab\x17\x87\x59\xff\x2b\xa6\xc2\xd6\x47\x4f\xb1\x13\xdb\x98\xbc\x46\x2f\x8a\x9b\x67\xf7\x1e\xce\xee\x8e\x75\x74\x3f\xb4\x37\xad\x74\xbb\xa8\xb3\xa7\xbc\xfe\xce\xc2\x63\xd9\x5f\xda\x53\x7c\x54\x95\x2a\xf9\x23\xfb\xd9\x63\x7b\x4b\xb2\x74\x8d\x07\x0b\x66\x78\x8c\x05\xdf\xff\x0f\xfb\x82\x57\xec\xf8\x56\x61\x91\x6d\xcb\x5c\x84\x51\x11\xb8\xa1\x8d\xfa\x05\x62\xb9\xe1\xa2\x4f\xda\xda\xdc\x73\xdd\xa3\xf3\xd8\x11\x5a\xbf\x38\xce\x5e\x4d\x66\x9f\x30\x78\x04\x0c\x3d\x25\xa3\x4f\x6e\x43\xa1\x37\xf9\x2a\xd8\x76\x88\x90\x86\x9f\xcd\x72\xb1\x16\x85\xd9\x86\x6c\xd7\x39\x18\x77\xe6\x5a\xea\x4a\x49\xb9\x6b\xab\x14\x7b\x63\x62\xe4\x69\xcd\xbc\xdb\x63\x7e\x24\xb2\x7c\x35\x15\x71\x06\x05\x43\x02\xcf\xb7\xee\x21\x81\x67\x65\xb8\xbc\x52\xf6\x4e\x31\x3a\xb9\x79\x4d\x63\xde\x24\xb1\x01\x9f\xcf\x13\x2e\x62\x30\x19\x37\x99\x64\x07\xba\x32\xcb\xc4\xba\xf4\x1f\x0a\x2e\xd2\x3f\x51\x34\x6c\xf4\xbb\x22\x6e\xa4\x52\xb4\xc1\x55\x60\x7e\xc3\xe4\x15\x25\x22\xf4\x2c\xa1\xf3\x05\xdb\x62\x2a\x76\x86\xe3\x55\x6c\x3f\x32\x17\xf8\x57\x82\x15\x22\x37\x80\x4a\xfa\x71\xf8\x6a\x25\xae\x93\x58\x36\xb6\x92\xb4\x5d\x2f\xb6\xbb\x21\x41\xd0\x6b\x40\xf7\xee\x48\xb6\x6d\x5d\x0f\x19\xb5\x6c\x29\x98\xcf\x9c\x20\xbe\x11\xe8\xfb\x20\x79\x0a\xf8\x7b\xc0\xe0\xaf\xf3\x07\x62\xa0\x66\x70\xca\xfe\xc3\x71\x36\xb3\x43\xa6\x8a\xd1\xd2\x4d\xbe\x8a\xff\xbe\xf1\xe0\x5b\xbd\xe1\xe7\x25\x48\x4b\xbd\xc6\x0d\x35\xbe\xd0\x89\x8b\x94\x2c\x87\xbe\xce\xab\x43\xc6\xdd\x89\x8e\xcd\x4b\xba\x11\xe6\x59\xa3\x27\xd3\x46\x06\xe1\x55\x80\xaa\x13\x17\x9c\xc7\xf0\xc9\x64\xf3\xba\xb7\x1f\x0b\xbf\xee\x1d\xa0\x8a\x31\x89\x0e\xb2\xf2\xda\xd8\x58\x67\x1f\xfd\x8f\x1a\xfb\x7a\x8f\xe9\x97\xfd\xc1\xf6\x68\x63\x23\x12\x78\x1e\x54\x7d\x23\xdd\xf7\x94\xb5\xa9\x87\xd9\x4d\x6c\xba\x6c\x1d\x13\x83\x2d\x66\x8f\xb2\x72\x43\xfd\x97\xb0\xf3\x37\xd4\x9a\x6d\x72\x35\xd9\x93\x1e\xa3\xb1\xf1\x07\xa3\xef\x69\xdb\x74\x72\xd1\xe4\x44\x04\x77\xe9\xdc\x55\xfd\x88\x24\x0f\xe6\x66\xd0\x8f\x6b\x03\xb4\xb4\x2b\x79\xa2\x03\x2b\xd9\xf7\xec\x67\xff\xa8\xc2\x13\x70\x36\xde\x7c\x50\xa4\xfe\x5f\xed\x0b\xe6\xf0\x9f\x8e\xc3\x3c\xe6\x32\xde\x0c\xd3\x24\xee\xe2\x10\xa6\x21\x46\xe2\xe1\x0b\x88\xc5\x68\x60\xa6\x9b\xc4\x7e\xec\xcc\xf2\x0f\xed\x63\x97\x88\x18\xeb\x5c\x70\xfa\x92\xe5\x00\xa8\x2a\xd9\xce\xee\x99\x7f\x6c\x71\xe1\xec\xa5\xd5\xc5\x73\x8b\x67\x97\x9d\x3d\xf3\xa1\x31\xb6\x0f\x66\xd3\x7f\xdf\x58\xf0\xce\xb1\x07\x75\xab\xac\xd8\xe3\x3b\x26\x1e\x9c\x5b\x7e\xec\xd2\xdc\xc5\xb3\x93\xc8\xd9\x71\xad\x27\x20\xb1\xbe\xe0\x99\xed\xa5\x72\x33\x4c\xfa\x05\x26\x57\x55\x7b\x1c\xc7\x38\x62\x69\x6b\x6f\x66\x01\x55\x56\xf1\x19\xc4\x3c\x88\x62\xc0\x46\x85\x5b\x17\x7e\x16\xaa\x27\x8c\x7b\xfd\x5c\x2b\x51\x26\xbd\x3c\xd6\x19\x8d\xe0\x35\xb0\x3b\x97\x0d\xe2\x5c\x5c\x33\x9e\xa4\xac\x25\x7a\x05\x38\x76\x3b\xe9\xab\xca\xef\xb8\xa3\xce\x43\x39\xcb\xef\xb0\x3e\x6c\xf2\xb3\xf4\xae\x35\x6a\x98\xc8\x21\x37\x31\x32\x55\x8f\x59\x9d\xa7\x72\x5d\xa4\xed\x08\xb6\x7b\xc7\x61\x80\x34\x1d\x24\xd7\x0d\xa6\x1d\x36\x9d\xa4\xa5\xc0\x8d\xbe\x7a\xb3\xc7\x0e\xc1\xec\x9d\x4b\x93\xae\xff\xe4\x76\xc1\xf0\xee\x1a\xc5\x44\xb7\x60\x79\xa5\xc0\x4b\x18\xb5\x8c\x8e\x64\xb8\xdd\x1d\x0d\x30\x93\x6d\x1e\x76\x8a\xa4\x6d\xf5\x1c\x10\xfd\x9b\xec\xf7\x5d\x10\x26\x5d\xef\xf9\xd5\xd5\xa5\xfb\x65\x3e\x07\xf7\x1a\xff\x67\xf7\x05\x2f\x76\x9e\x58\x39\xc7\x05\x99\xa8\xc1\x2b\x53\xaf\xf2\xfb\x65\x6e\x34\x52\xb5\x3d\x7a\xe5\x10\xbe\x3f\x1a\x67\xa7\x08\xb6\x6e\x2a\x08\x20\x12\x4d\x9d\xa6\xc0\x33\xa6\xc1\x58\xa1\x24\xd2\x9b\xec\x91\xfc\xb8\xc7\xa0\x40\xff\x67\x76\x6d\xfe\xe9\xe7\x61\xd4\x0c\xe3\x3c\xcb\xd3\xe6\x62\x9c\x6b\x2a\xd7\xe0\x09\xdc\x98\x69\x29\x72\x90\xf8\x67\x4b\xed\xb1\x30\xd9\x89\x3b\x40\x07\x12\xd1\x22\x4e\xd5\x62\xe5\x33\xea\xcb\x93\x27\x4e\x1c\x3b\xd1\x04\x3d\xc7\xbc\xa5\x4e\x8f\xb9\x4b\x73\x8f\xad\x3c\x38\x0f\x8b\xb1\xc9\x96\xd9\x7e\xb8\x84\x49\xff\x7c\x70\x37\xf8\x67\xa4\xed\x86\x69\x25\x71\xac\x2e\x97\x2e\x9d\xbd\xbb\xca\xd4\x20\x39\xa3\xf3\x04\x1b\x57\x6f\xf9\xfd\x60\xe3\x7c\x92\x21\x1d\x2b\x25\x11\xc7\x08\xb9\xee\x86\x09\x12\x0a\x04\x5f\x5c\x6a\xf2\x57\x24\x7d\x20\x67\x10\x6b\xd1\x80\x6f\x09\x0c\xed\xce\x64\xce\x03\x55\x54\xa0\xfa\xa9\xae\x0b\xe7\xa5\x68\x03\x5a\x1c\x05\x19\x94\x96\xf9\x61\xeb\x1d\xff\x5f\x78\xc1\x3c\xea\xab\x7c\x83\x3e\xa3\x32\xf5\xa0\x69\x00\x42\x98\x6e\x83\x1a\xd2\x83\x88\x6e\xfd\x4d\x73\xf7\xbe\x04\x7b\x11\x63\x23\xd8\xf7\x1d\x60\xcd\x8a\x37\x96\xd4\xb1\x95\xe5\x32\xce\x31\x35\x70\x3e\x12\x61\x97\x34\x8d\xff\xbe\x3f\xb8\xbc\xcd\xef\x5a\x0f\xaa\xca\xb7\xef\x99\xcf\x74\x9a\x5f\x4b\x7d\xe8\x5a\x81\xdf\xb8\x9f\xfd\xaa\xc7\x0e\xe3\x02\xbb\x98\xb4\x65\xe6\xff\xbc\x17\xbc\xdb\x9b\x2b\x1e\xb8\xc7\x39\x31\x7b\xd1\x8a\xec\x26\x9a\xe2\x91\xaa\x58\x13\xc0\xa0\x0b\x8f\x96\x1e\x9c\xe7\x1b\xe2\xc6\x5d\x3b\x84\x2b\x33\x55\x74\xa0\x41\x99\xae\x2f\xc4\x6a\x1b\x50\x6d\x63\x66\x9b\x38\xb5\xb7\x79\xec\xa0\x8e\xfb\xf7\xbf\xd1\x0b\x4e\x97\xa2\xae\xa9\x1b\x0e\x37\xbf\x7a\x0e\x3c\xdf\xd1\x00\xae\xd0\x98\xb1\xfb\xcc\x06\x50\x7e\xbc\xe6\xc4\x50\xfd\x54\x2d\x78\xb3\xb1\x18\xce\xdb\x77\xa1\x51\xb3\xa7\x0e\x36\xab\x91\xc3\xaf\x85\x99\x61\x92\xcc\xc0\x38\x92\xeb\x58\xa4\xf9\x21\x76\x28\x80\x95\x48\xf8\x91\x65\x78\x75\x25\x17\x69\x2e\xdb\x47\x9c\x45\x3e\xcf\xe6\xd8\x7d\x3b\x2e\xf2\xca\x25\xfa\xec\xc6\x97\x2f\xb3\x7d\xbd\x0d\x91\x49\x7f\x31\xb8\x67\x69\x03\xaf\x17\xae\x43\x84\x86\x15\xde\x52\x43\x5a\xd9\x48\x47\x68\x7c\xf6\x40\x65\x34\xf6\xcb\xfa\xc9\xda\x20\x97\x4e\xbe\xf7\xbf\x3b\x10\x7c\x83\x9b\xef\x4d\x6f\x61\x9a\x34\xe1\x5b\x8b\x4c\x47\xf9\x87\x1d\x09\xd7\x3a\xdc\x9a\x49\xbb\x69\xde\xd7\x59\xdc\xc4\xec\xb1\x6d\x32\x77\x92\x56\xe5\x72\x5f\xf7\x0e\x12\x73\xc7\xe0\xba\xb7\x1f\xcb\x73\x76\xf9\x37\xef\x67\x2f\x63\xe3\xfd\x4c\xa6\x6a\xb4\x1e\xc8\x10\xcb\xa0\x2b\x7a\x7a\xd1\xd0\x76\xce\x13\x47\xa6\x67\x32\x0d\x37\x5b\x52\xb4\x10\x54\x4f\x7d\x6f\x8f\x56\x9b\x51\x5d\x10\x2d\x6e\x56\x9f\xb1\x41\x51\x8c\x4a\x91\x7e\x1b\x73\x11\x61\xfa\x08\x25\xca\x94\xc6\x40\x2b\xce\xce\x9c\xdc\xaf\x69\x24\xef\x0d\x66\x90\x35\x72\x87\xa6\x13\x4b\x2a\x7c\x64\x17\x54\x4e\xf1\xde\x36\x71\xbb\xd4\x30\x4c\x41\xd7\x99\xf9\xa0\xde\xa9\xa2\x1a\x90\x0b\x68\x51\x88\x37\x87\xd3\xc5\xdd\x14\xef\xdf\x85\x46\xe0\x5c\xf9\xff\xc5\x0b\x7e\xc1\x5b\xa6\xbf\x2a\x03\x65\x93\xb4\x08\xce\xd2\x4d\x32\x1f\x90\x06\x9c\x59\xb1\xc5\xc2\x1a\x7e\x91\xc1\x31\x3d\x0b\xab\xa9\x27\xc2\x94\x4f\x74\x8b\xd8\x31\x64\x46\x82\xbc\x2a\xc2\xe3\xa4\x7e\xb5\x92\x6e\x57\x64\x93\x14\x54\x26\x20\xb3\x80\x36\x94\xfa\x0a\x84\x26\x35\x00\x42\x34\x70\xf5\xda\x03\xfd\xbd\x1e\xdb\x9f\xcb\x58\xc4\xb9\xff\x66\x2f\x78\xd2\x5b\x85\x7f\xab\x05\xad\x4f\x06\xbc\xfd\x95\xc6\x98\xce\x61\x8d\x50\xfc\x40\xa6\x5b\xd4\x1e\xc4\xa2\x4b\x80\xa0\xc0\x46\x47\x56\xe8\xd2\x06\xaa\x17\xaa\xa5\x05\xda\xd4\x03\xd2\x07\xbb\x79\x7f\x74\x1f\x3b\xff\x4c\x85\xb5\xf8\xef\xb8\x2f\x78\xb8\xcc\xba\x1f\x6a\x2a\xfe\x06\x85\xba\x14\x70\xbc\xae\x7b\x6c\x21\x15\x9d\x9c\x1f\xe7\x13\xea\x54\x9c\x9d\x9a\x7a\x55\x96\xc4\x0d\xb4\x86\x37\x93\x74\x7d\x6a\xd2\x3d\xab\xdf\x77\x2f\xbb\x83\xdd\xda\x0d\xe3\x25\xe4\xdb\x0b\x65\xe6\xbf\x80\xf2\x69\x9c\xa4\x92\x1e\xdb\x27\xe2\xc1\xe5\x8e\xbf\x6e\x4b\xf2\x87\xd8\xd7\x3e\x5b\xd1\x3d\xdb\x42\xa2\x7d\x2d\x3b\x40\xb6\x0c\xff\x22\x7b\xe9\x33\xd8\x00\xb6\xa1\x4d\xef\x8f\xb1\x47\x9f\x9d\x8e\x5d\x4e\xe7\xd4\xe0\xb1\xaf\x62\x07\xbb\xe2\x1a\x98\xf7\xab\x87\x9c\xb3\x43\x5d\x71\xed\x82\x8c\xd7\xf3\x8d\xea\x37\x9e\x60\xcf\xef\x89\x3c\x97\xa9\x3d\x7d\x1b\xef\x79\x36\xa7\xc5\x56\x3c\x62\x76\x8b\x2c\xf0\xf1\x32\xff\x95\xec\x91\x67\xa8\xd2\x4a\xd8\x3d\x58\xa9\xe2\xda\x4e\x2b\x55\x8d\x6b\x18\x6f\x33\xae\x5f\xc9\x0e\x10\x05\xa2\xff\x02\x7f\x3f\x5e\xa8\xa1\x5b\x44\xf7\xf9\x4f\x18\xd3\x42\xed\x72\xa7\xfa\x95\x2f\x65\x07\x68\xe0\xdd\x75\xf9\x02\x36\x7e\x47\x2a\x3b\xee\xc3\x36\x1b\x97\x71\xbf\xeb\x3f\x62\xef\x9c\xcb\xec\x19\x5e\xb8\x5f\xad\x0e\x01\x0c\x25\xf4\xbf\x6c\x1b\x2d\x56\xf5\x5e\x5c\x1b\xdd\x7b\xb5\xea\xc2\x78\xbb\x55\xf7\x02\xb2\x94\x3b\xc5\x7e\xc3\x58\x89\x58\x33\x8c\xf3\x46\x92\x36\xf0\x67\xff\xf3\xb5\xe0\x37\x6b\xa3\x7f\x77\x22\x37\x35\xb2\xb3\x11\xbd\x9a\x9d\x35\xe6\xd4\x04\x0e\x39\x33\xf8\x29\xa5\xc1\x53\xc6\x2f\x51\x93\x12\xad\x9f\x36\xe4\x3b\x04\xab\xea\x99\xc8\x78\x6b\x23\xd4\xa9\x6c\x83\xcb\x10\x22\x0b\x67\x6e\x9e\xa3\x49\xa1\x90\xaf\x95\x18\xe8\x34\xfb\xd9\x2c\x63\x33\x93\x58\xc4\x2c\xe3\x9c\x23\x7b\xe8\x2c\x37\x7b\xb8\x78\x44\x03\x75\x74\x52\x35\xc7\xbc\x5e\x7c\x5a\xf9\xf1\xd0\xe7\xf0\xa4\xd9\x6c\xf2\xc7\x65\x9a\xc0\x59\x9e\xa4\xd2\x51\x07\x7e\x67\xff\xc8\xac\x8b\x8f\xee\x0f\x3e\xb0\xbf\xfa\x37\x0e\x46\x15\x4d\x49\x02\x4b\x07\xd4\x8d\x7e\x0a\x63\x6f\xc8\xae\x31\x71\x0d\x43\xf8\x29\xf0\xd9\x2c\x46\xb4\x06\x68\xcc\xa1\x3e\x19\x4d\x22\xb0\x22\x69\x57\x2b\xa4\x15\x1f\x2b\xa7\x41\xe1\x30\x5e\x11\x79\xd2\x0d\x5b\x57\x66\x49\xbd\x45\x47\x74\x4e\x7a\x9d\xb0\x74\x18\x09\x97\x20\xa4\x2d\x57\x4f\x5b\x22\x12\x29\x31\xd8\xce\x41\x21\x54\xad\xbe\x95\xa8\x0f\x52\x09\x20\x0b\xbd\x48\xb4\x1c\x02\x41\xb4\xc1\xd9\x1d\xc1\x82\xec\xc8\x7b\x30\x90\x0f\x8c\x37\x08\xda\x36\x81\x38\xd5\x75\xaa\xbe\xae\xa6\x65\xb2\xa9\x26\xf8\x4a\x26\xf3\x2b\x34\xa9\x2b\x92\xe0\xfb\xb1\x41\x45\x9e\x95\x52\xcc\x61\x34\x8c\x06\x85\x48\xac\x26\x64\x1c\x82\xee\xc9\xe6\x75\x56\x40\x08\x00\xe7\xb4\x2b\x8c\xdd\xc5\x54\x6e\xdc\xe3\x58\x80\x33\xcb\x5d\xd1\xc3\x49\xd6\x23\x0c\x1b\x88\xba\x89\x73\x3d\xfc\x51\xb1\x34\xf4\x57\x4d\x76\x6c\x92\x5f\xe9\x8a\x9e\xee\xdb\xea\x86\xcc\x74\xc7\xb0\x8b\x57\x81\x12\x93\x4c\xac\xb0\x91\x65\x98\x6a\x40\xfa\x0c\xfb\x2b\x20\x21\x3d\x8c\xdb\xf2\x1a\xbf\x2a\x07\x58\x94\x8e\xb7\x27\x23\x38\x28\x5a\xdd\x26\xbf\x9c\xb6\x91\x9c\x08\xf4\x58\x48\xe6\xed\xf7\x20\x2a\x39\x5d\x97\x68\x3a\x55\x7a\x7b\x2e\xd6\x69\xce\xaa\x56\x20\x79\xce\xa1\x8b\xa6\x29\x6a\x4b\xc3\x6c\x62\x4c\x01\xb3\x95\x6c\xec\x2f\x26\xa4\xab\xd1\x71\xe1\xdd\xb7\x94\x62\x62\xc4\xf6\x73\x78\xde\x6e\x32\xd6\xfb\x62\x9c\xf3\x5f\xc2\xf6\xe5\x61\x1e\x95\x24\xfe\x7b\xc7\xb7\xa5\x52\x7e\xd3\x78\xf0\x27\x63\xa3\x7f\xaf\x92\x37\x66\xe5\x8f\x5a\x87\x6a\xe9\x29\x25\x1c\x4f\x8b\x81\x56\xfd\xa1\x3c\x98\x6c\xa1\xb9\x2e\xd4\xda\xd2\x98\x73\xa2\xd7\x64\x0c\x36\x78\x2e\xd6\x91\xde\xad\x5a\x46\xb9\xb8\x07\x41\x75\x2b\x02\x4b\xde\x91\xad\x23\xe8\x8a\x5e\xd0\xe4\x73\x51\x96\xd4\xc9\x84\x0e\x19\x2f\xc5\x1d\xaa\x40\xfe\xcb\x31\xe6\x60\x78\x07\xc3\x6a\xb4\x32\xac\xe1\x76\x04\x67\x14\x8a\x99\x7e\x2a\x21\xc5\x2b\x96\x98\xbb\x12\x66\x45\xaa\xc1\x24\x74\x50\xf2\x62\x75\x58\x75\x43\x45\x05\x38\x85\x01\xbb\x48\x52\xbd\x1d\x35\x1a\x2e\x45\xc5\x40\x5e\x72\xd6\x87\x24\xbf\x24\x73\x4a\x45\xf2\x6e\x74\x14\xc1\xee\x88\x22\xcd\xdb\x25\xbb\xdb\x26\x79\xbe\xde\x63\xb7\x20\x25\x99\x8c\x5b\x6a\xf9\xf6\xdf\xe3\xad\x33\xc9\x5a\xcf\x96\x92\x8d\x56\x70\x50\xb5\xed\xb5\xcc\xd9\xf3\xe4\xb5\x56\xd4\xcf\xc2\x4d\x79\x91\xb4\xc0\x5b\x9c\xf3\xb3\xc7\xf6\x25\xb1\x7c\x2e\x6f\x3a\xdf\x3d\xc6\xee\x70\x0d\x54\x24\xec\x1a\x7d\x74\x2f\x37\x30\xf3\xcd\xff\x93\x5a\xf0\x99\xda\x6e\xde\xe4\x59\xae\x6e\x8d\xb9\x4b\xe8\xdc\x96\xad\x04\x42\x22\xb3\x5c\x12\xc7\x35\xa5\x3f\xe8\xd4\x3a\xba\xa0\xa7\xb2\x84\x99\x4c\xf7\x68\x2b\x77\x82\xae\x94\x78\x70\x8a\x4e\x07\xe0\x02\xa8\x94\x54\xb6\xfa\xa9\x1a\xe0\x68\x80\x88\x68\xd9\x56\xa8\x99\x5f\x5a\x57\xd5\xfa\x8a\x93\xb4\x2b\x22\x53\x3b\x05\x71\xf6\x53\x40\x6b\x46\x3c\x67\x6b\xd5\xa9\x85\xd6\x46\xb3\x9f\x88\x96\xdc\xd5\x38\xd4\x46\xa7\x61\x2d\xa5\x0a\x9a\xa5\x0f\xb4\x64\x49\x0a\xb6\x4e\xf0\x0f\x36\xd1\x8d\xaf\xf6\x52\x27\x49\xd7\xc2\x76\x5b\x96\xb8\xc0\x5e\xcd\x6e\x2f\xaa\xc6\x3b\xc5\x33\x77\xcb\x19\x5a\xb3\x67\x92\x24\x82\x9b\xb6\x52\x10\x9f\xbb\xf5\xf7\x8e\x1a\x3b\x40\x32\xc0\x7f\x73\xed\x19\xbd\x96\x04\x1f\xf1\xda\x85\x09\xad\x24\x6a\x40\x84\x98\xc9\xd0\xea\x0b\x2e\x22\x63\xf5\xd2\xe8\xcd\x7c\x4d\xe6\x05\x62\x34\x98\xab\xd1\x02\x5d\x0e\x18\xd2\x1f\x39\xb9\x75\x76\x69\x24\x01\x51\x46\x36\xf5\x0e\xa2\x48\x8e\x73\xb8\x84\xd1\x38\x87\xc6\x36\xf6\x7c\x56\x0b\xdb\xee\xe1\xf7\x0f\xd9\xc1\xb8\x1f\x01\xb8\x4a\x49\x7c\xfc\xda\x18\xfb\x4a\x67\x8b\xca\xee\x9a\x6c\xb7\x65\xdb\x90\xbd\xf9\xff\x7a\x2c\xf8\xc1\xb1\xed\xdf\xb1\x82\x24\x51\x8b\xb2\x80\xa9\xd5\xed\x06\x5f\xb7\x93\xf8\xd2\x7e\x0c\xe4\x22\x18\xa9\x50\xc7\xc3\x74\x75\xd0\x93\x17\xd5\xb8\x29\xf5\xbb\x08\x0d\x47\xc5\x09\xce\x54\x7d\x0c\xe9\xd0\xca\xc5\x32\x33\x9b\xbe\x01\xa4\x52\x75\xbd\x85\x8d\x31\x0d\xd0\x9f\x5d\x0d\xe3\x76\x9d\x17\x41\x9f\xa8\xef\x6b\x5e\x08\xb5\x51\x49\x76\xc8\xb6\xcb\xe8\xdc\xe4\xbb\x92\x67\x43\x48\xe2\x78\xd3\x73\x78\x89\xf0\xe4\x86\x5f\x09\x65\x3c\x31\x54\x73\x9d\x7e\x14\x0d\x2c\x61\x31\x81\x36\xdf\x72\xb3\xeb\xa6\xcd\x93\xae\x1c\xf8\x81\x71\xf6\x0f\x2a\x55\x6a\xff\x0d\xe3\xc1\xff\x1d\xab\xd6\xb6\x1d\x15\xa7\x60\x85\xde\xeb\x9d\x0a\x13\xc1\xe9\x6a\x4b\xc5\x3a\xd7\xab\xa3\x23\xae\x57\xeb\xa9\x88\xfb\x91\x48\x5d\xf5\x1d\xf4\x75\x60\xe8\x44\xff\x15\xfc\x3d\x71\x55\x0e\x1a\xb8\xe4\x7a\x22\x4c\xb3\x49\x64\xa3\x15\xad\x0d\x2d\xe4\x11\xe0\x40\x1f\xe9\x39\x96\x38\x94\x6d\x0c\xec\xe0\x20\x85\xd5\x43\xc0\xa5\x88\xc3\x5e\x01\x6d\x6d\x88\x9b\x44\x2b\x4f\xd2\x6c\xd2\x80\x77\x61\x79\xb9\x85\xd5\x5f\x1c\x12\x5a\xf3\x50\x2d\xc5\xfb\xd6\x33\x79\x71\x84\xfe\xef\xf2\xde\x68\x8b\x84\x2f\x65\x07\xee\xc0\xc3\xc7\x15\x15\x9b\xec\x4b\xaa\x4e\xaf\x67\xfd\x1c\x71\x34\x1d\xb2\xf8\xb8\xa2\xea\xdf\x3e\x9f\xed\xc7\xa4\x76\xff\xbd\xcf\x0f\xde\xf1\x7c\xfc\x37\xc9\x18\x2b\x89\x92\x9e\x6b\x7b\x0b\x49\x4a\x7a\x4c\x2b\xc1\xa4\xf7\x3b\x66\x12\xfb\x15\x23\x00\x66\x19\x6b\xf0\xb5\x2c\x89\x71\xed\x86\xed\x59\x25\xdd\xb3\xc4\xec\x91\xc5\x05\x22\xb9\x12\xfc\xe8\x71\x9b\xa3\x6f\x43\x5e\xd3\x9e\x88\x06\xef\xa7\xe1\xac\x6a\xe8\x03\xcb\x8b\x6a\x8a\x7b\x22\xa5\x14\xf9\xfb\x93\x48\xc4\xeb\x3c\x96\xf9\x54\x3f\x8d\x9a\x4b\xea\x87\x65\xf4\xc4\xab\x77\x1b\x5c\x76\x45\x18\xcd\xa2\x20\x15\x61\x64\xf0\x2d\x46\x95\xa2\x5e\xc2\x62\xe6\xe8\xcd\x86\x49\x86\x9f\x85\x90\xa0\x28\x6c\x97\x23\xd6\x61\x91\xc6\x48\x7f\x16\xcb\x1c\x71\x8b\xd5\x17\xf5\x0a\xd2\xc0\x99\xe9\x63\xc7\xeb\x3c\xa3\x68\xfb\x63\xcd\x19\xfe\xf0\xf2\xb9\x79\xf5\xf4\xd1\x26\x6f\xf0\xb0\xb7\x79\x1c\xda\xbb\xb8\xb4\x79\x9c\x2f\x2e\x8d\x6a\x29\x36\x72\x71\x09\x3f\x39\xa9\x3f\x39\xb9\xbb\x4f\x5a\x61\x3b\x55\xdd\x99\x5f\x5c\x58\xde\xfe\x75\x78\xa3\xc1\xbb\xa2\xa5\xde\xbf\x38\x37\xbf\xd3\x10\xe2\x67\xea\xc5\x06\xef\xf7\x61\xc6\x63\xfe\xc0\x03\x8b\x0b\x04\x93\x87\x91\x11\x40\xa3\x01\x80\x00\xd6\xf0\x60\x1c\xc5\xba\xbc\xc6\x27\xee\x0b\x27\x5f\xf9\xf0\x74\xe3\xb4\x68\x74\x1e\x7d\xcd\x5d\x4f\x34\xee\x33\x7f\x1c\xdf\xdd\x1f\x33\x47\x9f\xb8\x83\x5a\x70\xcc\x34\xe1\xd8\x33\xd8\x86\x63\xe6\xaf\x63\xbb\x69\xc4\x71\xd3\x88\xe3\xcf\x60\x23\x8e\xbb\x8d\xb8\xeb\xb4\x58\x7b\xb4\xba\x5d\x56\x53\x4e\x98\xa6\x9c\x78\x06\x9b\x72\xe2\x46\x9b\x12\x66\x6b\x31\x2e\xdb\x95\x33\x97\x66\xa6\x95\xb6\x0e\xff\x3a\xa6\xa3\x98\x68\xfb\x83\xfc\x0e\xa6\x8f\x1d\x9d\x39\x75\x62\x66\xfa\xf8\xb1\x40\xbd\x19\x9c\x3e\x75\x57\xc3\x3c\x9b\x09\xa8\xbc\x99\x69\xbb\xc4\x9d\xca\xa1\x6f\x8e\x15\xdf\x54\xd7\x3d\x5c\x57\x2b\x95\xed\x30\x6f\x89\x14\x04\x1a\xfe\xc5\xd5\x9f\xfa\xfb\xca\x11\x7c\xe5\xc4\x7d\xb3\x30\x63\x30\x06\x13\xf7\xcd\xe2\xbf\x8f\x3d\x31\x79\xdf\x6b\x4f\x3c\x3c\xd3\x38\xf1\x28\xfd\x78\xfc\x89\xd7\x9e\x9c\xb8\x6f\x76\x7a\x66\xe6\xb5\x30\xac\xf8\x7c\xd2\x7c\xfa\xda\x63\x0f\x1f\x3f\xa5\x5f\x3e\xf6\xc4\x6b\x8f\xa9\x97\x1f\x9e\x6e\x9c\x78\xf4\xb5\x0f\x9f\xbc\xcb\x7d\x7b\xe6\x89\xd7\x4e\xdc\x37\x7b\x74\xe6\xd8\xcc\x6b\x67\xee\x9a\x9e\x7e\xed\xb1\x13\x8f\xb4\x55\x95\x8f\xb4\xd5\x6f\x93\x77\x68\x02\xf0\x01\x8f\x93\x98\xb7\xc3\x75\xd5\x93\x42\x0e\x77\xc3\x6b\x78\xe1\x6a\xf0\x2c\x53\xf3\xc5\x1f\x68\xae\x34\x39\x80\xa2\x44\x4a\x8c\xf5\xd3\x30\x1f\xe8\x6e\x17\xc7\x81\xd5\x6b\xa8\xef\xe1\x06\x7f\xf4\xbe\x47\xda\xaf\x39\x6a\xfe\x75\x1c\x56\xc1\x86\xbc\xd6\x4a\xa2\x24\x85\x39\xd8\x90\xd7\x44\x5b\xb6\x42\x75\x61\x84\xa7\xbc\x95\xb4\xc9\xd6\x18\xbc\xf0\x1c\xfc\x6f\xb6\xba\x92\x17\xde\x37\x41\x0b\x6c\xae\x71\x4e\x8d\xe9\x6b\xed\x3f\x4f\xaa\x8e\x36\x78\xba\xbe\x56\x54\xb6\x7c\xff\x99\xa1\x4a\xd2\xf5\x35\xaa\x2d\x5d\x5f\x9b\x38\x7a\xe2\x44\x9d\xfe\xff\xb4\x9a\xf5\xb5\x41\x2e\x67\x21\x44\xf0\xe4\x71\x20\xaa\x56\x6a\xf1\x5a\x18\x8b\x74\xc0\x41\xf9\x6d\x00\xb4\xc9\x56\x02\xcb\x22\x1e\x80\xce\xc9\x93\x4e\x71\x90\xa9\x43\x51\x8d\x21\x70\x31\x3a\x6b\xec\xe8\xf4\xf4\xc9\xc6\xf4\x4c\x63\xfa\x68\x50\x3a\x34\x94\x3e\xdb\x80\x0f\xc2\x58\x9d\x20\xc7\x8e\x1d\x3b\xad\x8a\xa2\x5c\x5d\x28\x4e\xe7\xed\xba\x45\x1e\xe5\x71\x16\x54\xca\x69\xb8\x3e\x80\xa0\x5e\x30\x29\xbf\x00\xcc\xde\x13\x39\x28\x93\xb0\x28\x56\x94\xca\x54\x14\x4e\xba\x01\xf6\x42\x95\x60\x7a\x02\xa1\x29\xa5\xee\xcc\x1c\x6f\xcc\x1c\x6d\xcc\x9c\x58\x9d\x39\x3d\x7b\x6c\x7a\xf6\xe8\x74\x73\x7a\x7a\xfa\xa1\x72\xe7\xd4\xe7\x0d\xf8\xbc\xe8\x9c\xa3\x6a\x3d\xc6\xc6\xe2\x24\xf7\x9f\xbd\x1b\xf1\x97\xb3\xc3\x98\x74\x80\x57\x7e\x57\x6d\x7a\xd3\x01\x76\xea\xc6\xb9\xb7\x30\xf6\xee\x57\xf6\x07\x8d\x1b\x22\xb2\xd9\x0d\x8f\x8d\xe3\xd1\xff\x63\xc0\x0c\xfc\x2a\xfa\x68\x24\x07\xd7\x87\x0c\x24\xea\xbb\x3d\xdd\xa0\x51\x24\x5c\x86\x55\x0b\xc9\x5c\x6e\x84\x92\xab\xce\x01\x0d\xe8\xd4\x34\xef\x4a\x11\x6b\x54\xdc\xd8\x54\xa5\x3a\x1b\x66\x14\xc2\x1d\x27\x5b\xfc\xd4\xf4\x8b\x54\x21\x56\xda\x17\x16\xe4\x42\x5c\x76\x86\xb9\x3d\x56\x74\x7f\xee\xdd\x13\x8f\xc7\x50\x3d\x65\x0e\xa0\xa2\x9e\x3d\x71\xfd\xb8\xf5\xfc\xe6\x10\x8b\xcf\xc7\xf7\x90\x04\xf4\x46\x6f\x8f\x84\x3d\x77\x3f\x93\x84\x3d\xaf\xac\x04\x6d\x3d\xaf\x41\x5b\x8f\xed\x95\x96\xe7\xa3\xfb\xd8\x7d\x7b\xe4\xb6\xf1\xff\x6e\x3c\x90\x3b\xbd\x54\xa2\xa3\x32\xbc\x36\x62\xe4\xf8\xaa\xa5\xce\x5b\x32\x05\x44\x3d\x03\xf9\x3a\x3a\x0b\xed\x2f\xc7\xd8\x7b\xab\x53\xc2\xde\xba\x87\xd5\xf0\x8a\x8a\x94\xb0\x4a\x5e\xa7\x1b\xce\x0c\x63\xeb\x45\x62\xd8\x23\xc1\xe5\xed\x12\xc3\xe4\xb5\x5e\x24\x08\x07\x93\x62\x7f\x87\x93\xc3\x72\xe4\x83\xa7\xaa\x6d\x81\xbf\x64\x52\xc3\xce\x05\xa7\x8b\xd4\x30\x3c\xea\x9f\x56\x02\xd8\x8a\x49\x00\x5b\x0c\xee\xd9\x55\x02\x18\x9f\xa8\xca\xfc\x9a\xb4\x0b\x3d\x4d\x91\x11\x33\xc1\x1d\x39\x52\xb0\x57\x11\x98\x99\x12\xed\x4f\xdf\xbe\x7f\x47\x3a\x35\x93\x94\xa3\x8d\x9d\x4e\xfe\xd8\x6f\xed\x0b\xfe\xba\xb6\xed\x2b\x3b\xa7\x92\x19\xeb\x23\x25\x1d\xe1\x3d\x3f\x4f\x2c\x3b\x23\xdc\x5e\x1d\xa3\x7b\xc1\x51\x1f\x6b\x00\xda\xba\x9b\x8d\xa6\x31\x43\x4c\xbe\x4d\x48\xf6\x20\x90\xfe\x2e\x67\x1d\x92\x61\x51\xee\x13\x42\xc9\xa9\xe3\x28\x49\x79\x57\x76\x93\x74\x30\xd9\xe4\x7c\xa5\xdf\xda\x30\x54\x52\x22\x95\x7c\xad\x1f\x46\x18\x74\x5f\x6a\x6a\xdc\x46\x8b\x18\x34\x58\xa9\xa6\x44\x07\x98\xf4\x10\x26\x52\xad\xf5\x84\xe8\xd8\x93\x4c\x5a\x88\xd9\x96\x93\x42\xa6\x0d\xd5\xd0\x82\xbb\x4a\x6b\x99\x81\x92\x75\x01\xa7\xd0\x6c\xca\x94\xb2\x73\xe5\x0e\x99\x2e\x3b\xfb\xfc\x27\xfe\xde\x64\xc9\x9d\xa1\x34\xae\xd9\xa0\x01\xe9\x1c\x15\x98\x6e\x66\x4d\x84\x31\x87\x89\x2e\xef\xa4\x35\x56\x74\xd3\x7f\x20\x38\x3f\x5f\x4c\xf3\x70\x69\xce\x1a\xa0\xf4\x8c\xac\x80\x83\xc6\xd9\xc1\xd9\x77\x90\x98\xc7\xd9\x11\x6b\x54\x62\x99\x6f\x25\xe9\x55\xd2\xb8\x28\x6d\x91\x82\x3c\xfd\x9f\x1a\x0f\x16\xdc\x47\x76\x52\x4f\x14\xd9\xe0\x88\x10\x47\x84\x23\xa2\x73\xbf\x00\x14\x2c\x49\x4b\xe4\x0c\xbf\x37\xc6\xde\x57\x63\x07\x8d\x09\xff\x5d\x35\x36\xbb\x63\x04\xfb\xea\xa0\x27\xdb\x17\x92\x96\x88\xca\xd8\x7c\x7f\xe0\x2d\x9b\x61\x45\x6b\x9b\x7e\xc1\x12\xaf\x8e\x75\xdf\x9a\x05\x3d\xa6\x80\x20\xa7\xc7\xce\xcd\xdd\x84\xa0\xa8\xd4\xaa\xc2\xc2\xe9\x2d\x90\x70\x2f\x15\x09\x85\xf8\x00\x80\xc6\x4d\x70\xca\x9a\xe5\x5d\x2b\x48\x15\x04\xef\xf6\xf3\x3e\x04\xce\x1a\xf3\xa2\xa1\xa2\x82\x1b\x42\xb0\x82\xc5\x05\x4d\xf6\x63\x1e\x3b\x40\x85\xfb\x3f\xb0\x0d\xa7\x79\xf5\x8c\x52\x39\x34\x8b\xc1\x15\xfa\xdb\xa5\x5f\xd2\x0f\x41\x13\xa5\x57\x6f\xa8\xb5\x7a\x26\x82\x26\xfb\xcd\x71\xf6\xe5\x15\xf3\x38\xd7\x81\x36\x0f\xfc\x0f\x8e\x07\x33\xfa\x0f\x2c\x1d\x22\xc2\x41\x78\xea\xc7\x04\x35\x03\x8e\xa6\x7e\x54\x06\xc9\xfb\x8b\x31\xf6\x7e\x8f\x1d\xee\x25\x6d\x53\xe8\xbb\x3c\x56\xdf\x39\x19\xa2\xf8\x20\xc8\x17\xcc\x72\x86\xbb\xc2\xa8\x9a\x49\x7c\xb6\x92\x46\x94\xb4\x90\x68\x00\x73\xe1\x8d\xab\x54\xcd\x7f\x9c\xb4\x65\x1d\xc0\x56\xeb\x5c\xe6\xad\x26\xc8\xf7\x44\xed\x58\x58\x83\xbd\xa4\x3d\x91\x4d\x4e\x36\xd9\xc7\x3c\x76\xbb\x6a\x76\x9c\x87\xa6\xe9\xef\xdb\x0d\x2e\xe5\x92\xfb\x51\xf0\x44\xa9\xf9\x71\x1e\x36\x76\xea\x03\xd0\x29\xf0\x5e\x3f\x2f\x60\x3b\x9f\x6e\x3f\x36\xd9\x2d\xea\x55\xd3\x87\xce\x2e\x92\x15\x2f\x59\x1f\x04\x27\x8a\xf6\xab\x82\xb6\x19\x7f\x2d\x72\x7b\x49\xbb\xc9\x3e\xb7\x8f\x4d\x54\x14\x7d\x3f\xa2\x2f\x74\x32\x27\x13\xe4\x83\xfb\x82\x37\xba\x99\x20\xe6\xbd\x5d\xe7\x82\x14\x5f\x3c\x13\xd9\x20\x87\x8c\xd4\xbc\xee\x41\x6e\xa3\x8b\x67\x38\xae\x34\xd7\xe2\x1d\xff\x1d\x5e\xf0\x6d\x9e\xc6\x3c\xcd\xec\xb3\xc5\x00\x7d\x60\xf2\x9e\xea\x86\xd6\x02\x8b\x16\x17\xde\xb0\x8a\x3c\x2f\x8a\xfc\xce\x0c\x8d\x19\x76\x6f\x6a\x5d\x7f\x3e\xb5\x7c\x76\x6e\xe1\xe2\xd9\x66\xb7\xfd\x42\x4c\x0a\x69\x08\x75\x82\xdb\x07\xca\x93\x1e\x65\x68\xbe\x26\x88\x35\xeb\x80\x6a\x5d\x79\xd0\xb8\x7a\xe9\x59\x6b\xc5\xaf\xda\xe9\x23\x1f\xf3\x82\x7f\xeb\x6d\x9b\x40\x32\xd4\xb8\xbd\xa4\x90\x3c\xb3\x7d\xb2\xcc\x23\xff\xfd\x10\x7b\x51\x15\x5e\x3f\xca\xe9\x39\x4c\xfc\x01\x00\xa2\x8f\x1c\x0a\x4e\x0e\x3f\x76\xb1\x6e\xdc\xdf\x35\x9c\x7c\x25\xf2\xd0\xdb\x0e\xb2\x8f\x1a\x4c\xc8\x0f\x7a\xc1\xd6\x85\xca\x32\x76\x9d\x3c\x98\x8b\xec\x2a\xa4\x10\x22\x9d\xb4\xea\x68\xc3\xe8\x2f\xd6\x73\x3a\xe0\x1a\x94\xd3\x34\x65\x87\x6d\x1c\x63\x33\x6c\x6a\x67\x2e\x03\xa7\x81\x37\x71\x78\x6e\xe2\x43\x16\x9d\xbb\x89\xa6\xb5\x67\xbc\xb5\x87\x76\x86\x4c\x3a\xe5\x9f\x68\x38\x80\x47\x5c\x03\x28\x05\x81\x41\x4a\x1a\x96\x55\x8c\xfd\xea\xad\x0e\xe4\x9a\xa3\xc7\x22\xee\x91\x56\x66\x7b\xb2\xe5\xbf\xeb\xd6\xe0\x6e\xeb\xef\x92\x81\x40\xeb\xf0\x90\x34\x9c\x01\xaf\x7b\xb6\x81\xac\x7b\x80\xc5\xe0\xaa\x92\xbf\x71\x0b\x7b\xff\x3e\xf6\xbc\x10\x3f\x9a\x8f\x44\x06\xe7\xac\xff\xfd\xfb\x82\x37\xef\x5b\x2c\x3d\xad\xba\x8b\xd9\xef\xf0\x16\x8a\x78\x8b\x2b\x66\x75\x43\xda\x40\xf7\xce\xdb\x7a\xd1\x61\xbc\xa0\x45\x8b\x05\x27\x96\x21\xde\x70\x2e\x90\xa4\x94\x53\x20\x85\xa6\xc5\x31\xdc\xa5\x57\x5c\x09\x4c\xdd\x6a\xb6\x54\x7d\x57\x74\x28\x0d\xe0\xf0\x9c\x4b\x52\x08\x24\xdc\x12\x69\x3b\x33\xfe\x89\x30\x82\xc0\x0e\x62\x49\x00\x2b\xb3\xfe\x84\xd2\xf7\x80\xdc\x40\xc7\x3b\xe1\xa5\x4f\xd5\x2e\xdb\x00\xd6\x01\x46\xee\x82\x19\xa1\x20\xf7\xa0\x9e\x01\xc5\x5b\x37\xcc\xb9\xe0\x5b\x22\x05\xeb\x55\x68\xf3\x0b\x21\x86\x88\xa9\x12\xcc\x0e\x05\x3d\x87\x5e\xff\x8b\x7a\x68\x0a\x2e\x2e\xa8\x13\xa2\x34\x71\xff\x61\x28\x85\x1e\x6f\x89\x09\x01\xc8\x23\x05\x83\x61\x5f\xca\xe6\x62\x77\x5e\x0a\x03\x0e\xe6\x2e\x74\x45\x7a\x15\xad\xf4\x14\x45\x53\xd7\x53\xe6\xa2\xdc\x67\x32\xaf\x0c\xce\xb3\xc7\xe3\x1c\x25\x9b\xd8\x4c\x08\x75\xbc\x8b\x69\xc1\xe0\xae\x11\x3b\x77\xcb\xb1\x18\xfc\x86\xc7\xf6\x81\x86\xec\x7f\xd2\x0b\x9e\xf4\xe6\xcc\x61\x0f\x31\x12\xa8\x3b\xeb\x96\x99\x63\xd6\xae\x80\x32\xd6\xad\x4b\x2d\x80\x94\xc0\xa7\x9a\x7d\xa6\x0e\x77\xfd\x3c\x55\xea\x79\x0b\x57\x00\x62\x3e\x38\x31\x45\x74\x5f\xb4\x8f\xec\xbb\xd9\xe9\xd1\xc6\xd6\x6d\x36\xf8\x72\x3f\x92\xec\x0d\x63\x6c\x2c\x8f\x32\xff\x6f\x6b\xc1\xfb\x6b\xab\x17\x56\x5c\x08\xdd\x26\xa7\xbc\xfc\x68\xe0\x5e\xda\x81\xce\x19\xd5\x72\x2b\x58\x49\x7d\xaf\x1e\xd5\xf9\xf1\xe3\xc7\xa0\xcf\x26\x55\xa4\x2b\xbb\x6b\x44\x02\x01\x73\x04\x23\x48\x61\xf1\xd6\xb2\x53\x03\x9a\x41\x4c\xfa\xc0\x84\x34\xe9\x32\xae\x61\xe8\xbb\xb9\x40\xc1\x9d\x60\x88\xa6\xa6\x20\x7d\x30\x36\x3f\x9b\x0f\x64\xe5\xd2\x22\x34\xd3\xb8\xd3\xea\x7a\x57\xd0\x06\xb6\xb7\x50\xa7\x1f\x75\xc2\x28\xd2\x0a\x8b\x7e\xc3\x74\x7c\xe5\xd2\xa2\x33\x13\xb3\xec\x2e\x76\xf2\x69\xcc\xc4\xea\x85\x15\xf6\xd3\x35\x76\x80\x66\xd7\xff\x57\xdb\x19\x6b\x46\x17\xa3\xed\x0e\x7f\xe2\xcd\x95\x17\x0c\xf2\xe7\x44\x20\x4a\x51\xfd\xd3\x81\xa5\xd2\x24\x12\xb4\x93\xf8\x88\x66\x53\x12\xf1\x00\x16\x67\x93\xcf\xd9\xc4\x3a\x49\x87\x1f\xa1\x12\x8f\xa8\x25\x7c\x04\xd6\xfe\x91\x82\xea\xba\x64\x7e\x31\x5c\x31\x09\x11\x6e\x83\xb5\x28\x8a\x92\x2d\x3c\x97\x13\xd1\x5e\x13\x91\x88\x5b\x32\xb5\xc7\x3d\x49\x75\xfb\xc1\x17\x9b\xac\x87\x2d\xd8\xf4\xb4\x5c\x84\xa6\x96\xa5\x97\x9a\xec\xbb\x0f\x39\xa0\x22\x2e\x7c\xa0\xc5\x80\x36\xa7\xb3\x19\x32\xff\x0f\x0e\x06\xf9\xf0\x63\x8d\x90\x97\x69\x47\x52\x51\x4e\x91\x0a\x91\x59\x66\x57\x25\x74\x8c\x10\xb3\xf4\x4d\x0c\x65\x2f\x88\x6c\x41\xf7\xe8\x88\x96\x8b\x49\xf0\x99\x03\x70\xb1\xdb\x94\xe9\x9a\xba\xd8\x3d\x28\xd3\x35\xd4\x9c\xaf\x56\x58\xd2\x88\xf5\x6d\x0d\x83\x01\x67\xf9\xba\x3a\x21\xd4\x3e\xaa\xf3\x2d\x64\xe0\xc2\x2b\x4e\x9d\x14\xdc\x3a\x6f\xcb\x48\xaa\xff\xf6\xd2\xe4\xda\xa0\xc9\x79\x70\x67\x40\x6e\x4d\x11\x45\x25\xef\xc2\x01\x52\x26\xfc\xf3\xc1\xdd\x5a\x55\x0b\x8b\x88\xfc\x12\x11\xde\xb2\x39\x23\xb7\x29\xf4\xa2\x46\x2d\x58\x08\x4e\x21\x6a\x81\x55\xe0\xfd\xda\x10\xb5\xeb\xe2\x5e\xe7\x91\xf1\xf7\xf1\xa0\xbb\xa3\xf1\x57\xc3\x7f\x68\x8f\x2c\x5a\x4e\x83\x75\x99\x07\xb8\xbc\x22\x69\x3d\xc6\x3f\x83\x26\x0f\x02\x3e\x01\xa9\x98\x93\x23\x1a\xf1\xe7\x35\x76\xc8\xd8\x32\xfd\xcf\xd5\x82\xdf\xa8\x19\x72\x0c\xbb\x41\x8e\xad\x53\xc3\x32\xb9\x6d\x6a\xf2\x42\xba\x82\xc8\x4b\x09\x12\x8a\xb8\x3a\xf4\x47\xf9\x96\x94\xb1\x7a\x5c\x94\x0b\x67\x77\x64\x71\x2b\x67\x76\xcb\x43\x73\x7a\x52\x0f\xc1\xa4\x4b\x84\xc5\x08\x6a\xb3\x2c\x37\x43\xb9\x55\xfe\x0a\x93\x50\x01\xed\x08\xb5\xab\x06\x10\xb1\xb5\x2d\x88\x98\xa1\x01\x0a\x44\x14\x05\xf0\x4d\xd1\xbc\xa1\xaf\xe0\xb6\x66\x28\xe0\xed\x26\x80\xfd\x46\x46\x9d\x8a\x9f\xdc\xfc\xf9\xc2\x94\x7d\x21\xb8\xcf\x36\x46\x5b\xe9\xae\xa0\x74\x12\xec\x0b\xdd\x2a\xd5\xbd\x64\xdb\x55\xf5\x4a\x76\x38\xeb\xaf\x99\xc2\x2f\x07\x67\x56\x8a\x3f\x77\x5f\xbe\x2e\x3e\x4e\x62\x17\xba\xe3\xad\x87\x1c\x7d\xbb\x95\xc0\x71\xa5\x25\x13\xca\xef\x0b\x52\x64\x12\x2c\x0c\x9f\x3b\x18\xbc\xc8\xfc\xe5\x1a\x16\xe0\xb1\xa1\xa7\xab\x34\x28\xfc\xcc\x81\x9b\x97\xaf\xbd\x42\x19\xbf\x5a\x1b\x64\x36\x82\xa0\x02\xca\x58\x0f\x55\x05\x4b\xe0\xf6\x30\xc6\xa3\x27\xfe\xa6\xf9\x64\x0f\xe6\x93\x4f\xd8\xe6\x93\x9f\xd9\xa3\xf9\xe4\x9f\x7f\x91\xac\x27\xba\xa2\xa7\xbc\xf5\x9d\x6f\xfe\x0b\xfe\x19\x03\x96\xec\x2c\xaa\x12\x4c\xb2\x11\x23\x95\xc8\xeb\x3f\xe2\x55\x1a\x3d\x97\x92\xf6\xc2\xa5\x95\x79\xb8\x03\x5c\x46\x1c\x93\x7f\x1e\x9c\x1e\x7e\x6a\x04\xc9\xc2\xa5\x15\x0d\x27\x99\x16\x5e\x6a\x6d\xd3\x77\xa4\xd3\x49\xf6\x95\x74\x80\x7f\x69\x70\xa8\x92\x45\xea\x4b\x34\xa6\xa6\x93\x8f\xf0\xeb\x87\x2b\x81\x11\x1d\xfe\x5a\xff\x7d\x87\x83\x73\xce\x13\x75\x45\xcc\xb8\x58\x5f\x4f\xe5\xba\xc8\x35\xd3\xa7\x4e\x0a\x82\x66\x12\x99\x6d\x9b\xf7\xa4\x75\x7a\xb9\x1c\xf7\xec\xa6\x44\xdd\xab\x44\xbd\x29\xde\x6e\x32\xac\x38\x0c\x2b\x1f\xd5\x0c\x2b\x3f\xbd\x1d\xc3\xca\x48\xf6\x6a\xd5\x8b\x51\x74\xd5\x44\xf6\xfb\x5c\xd0\x8a\xfc\x61\xc1\x94\xf2\xa9\xed\x98\x52\xaa\xfb\x81\x71\x30\xef\xf4\x28\xd2\xc9\xee\x0b\xa5\xb5\x19\xe9\x84\x92\x4b\xed\x6e\xb5\x6a\x75\x90\x4c\x3f\x13\xeb\xf2\x39\xe9\xe9\x53\xde\x03\x3b\x9f\x4a\x47\xfd\xe9\x46\x85\x05\xda\xe9\x74\x19\xa2\xff\xdd\xb7\x8c\x70\xbc\xb5\x52\x99\x3b\xee\xe5\xbf\x3a\x1c\x7c\xc1\x9b\x6b\x8b\x5e\x8e\x11\x14\xea\x05\x25\xff\x12\x2e\x34\x44\x25\x02\x0a\xb8\xe4\xc6\x26\x2e\x0b\xbf\x38\x92\xf1\x05\x91\x0b\xb2\x4a\x68\xf3\x52\x11\x1b\x0c\x48\xd2\x1a\x51\x2f\xe3\x9d\x10\xed\x7a\x0e\x6a\x03\x79\xf0\xad\x72\x08\xc2\x41\xbd\x8d\x87\x48\x53\x37\x50\xbb\xb1\xb7\xf5\x5f\xab\x79\xad\x70\x60\x3b\xa7\xd0\x5f\x1f\x62\x7f\x34\x06\x20\x1e\xea\x52\x77\x31\x69\x4b\xff\xb7\xc6\x74\x58\xef\x7f\x18\xbb\x4c\xc6\x95\x59\x80\x22\xe5\x6b\x6a\x99\xd8\x86\x52\xcb\xb1\xca\x93\xd8\xe0\x0e\x62\xff\xd6\x06\xc6\x8c\x52\xe0\x5e\xc7\x3c\x69\xe5\xfa\x64\x31\x57\xd0\xe9\xe9\xe9\x69\x68\xef\xf4\xa9\x53\xa7\x10\xd2\x48\xe7\x39\x94\x5e\x84\xb7\x4e\xcc\xcc\x34\xf9\x2b\xe6\x2e\x5e\x00\x78\x42\x35\x75\xc0\x10\x8e\x25\x0b\x88\x72\xb2\x3e\xce\xea\x00\x17\x57\x24\x3b\xbb\xbf\xc2\x0d\xd3\x74\xcf\xf5\x0f\x4f\x9f\x3c\x7e\xbc\xc9\x17\xc2\x14\x40\xc6\x43\xb2\x40\xeb\x78\x2d\x91\x17\x00\x01\x98\xf7\x6f\xc5\xe8\x52\x64\x8d\x66\x94\x0e\xd7\x37\x34\x86\x70\x2b\x89\x3b\x51\xa8\xd1\x69\x30\x26\x43\xeb\x37\x68\xae\x87\xb2\x8a\xb9\xef\x42\x3c\x07\x24\x0e\x74\x32\xb0\x6e\xe0\x91\x4c\x66\x89\x7e\x94\x6b\x23\x36\x16\x56\xcc\x55\x26\x73\x37\x42\xf8\x5d\xe3\xfa\x12\x72\x7d\x3c\xf8\xd4\x58\xd9\x76\x0c\xa1\x88\x6e\xbe\x6b\xc5\xba\x34\x16\x11\x8a\x3d\x6a\xeb\x65\x59\x2c\xfc\xe4\x55\x38\x1a\xb0\x93\x2c\x00\x5b\x08\x4d\x82\x4e\x6d\x41\x94\x61\x6c\x59\x5b\xae\xca\x01\x74\x8b\x76\x9a\x7e\x4c\xb0\x3f\x8b\x9d\x32\xc3\xba\xd2\xa4\x65\x1b\x37\xcf\x36\x35\x17\xd6\x5a\x35\x61\x14\x0a\xd9\x8f\x87\xbe\xa6\x43\x9a\x36\x2d\x41\x9c\xab\x36\xd9\x31\x63\xe4\x33\x20\x7c\x6d\x0b\x28\x1e\x2c\xc0\x30\x0a\x75\xbb\xbf\x99\xcc\xfb\x3d\x2c\x5f\xa6\x29\xe4\xf8\x03\xd0\x38\x72\x4b\x93\x4f\x42\x9b\x30\x9b\x7c\x49\xb5\xd0\xd8\x3c\xd5\xa6\xcd\xc3\x4d\x69\xf4\x2f\x55\xa7\xa6\x25\x57\x95\x1c\x69\x36\x8f\xe0\x32\x4c\x52\x9e\xe5\x22\xa5\x35\xa5\x9e\x3b\x17\xc8\x26\xab\xb3\x3b\x77\x3c\x47\x5e\x2a\x07\xab\x89\x6a\x02\x5b\x62\x07\x75\xa3\xfc\x85\xe0\xe4\x0a\x99\x47\x6d\x6c\x74\x9a\xf2\x24\x85\xb3\x03\x86\x51\xb7\x9b\xf2\x57\x9c\x50\x88\xb7\x78\x8c\x65\xf0\x09\x78\xe6\xbe\xde\x0b\xb6\x6c\xb4\xfc\x4c\x0b\x5e\x1d\x25\x74\x24\xb3\xac\x3e\x08\x96\xfd\xb4\x91\x8e\x35\xbc\x31\x56\xe2\x40\xb1\xdc\xca\xbe\x6c\x74\x10\x82\xff\xb9\x5b\x82\x3f\xf4\x74\x54\x1d\x68\x96\xaa\x4d\x6d\x2e\xd6\xb2\x3c\x15\x86\x0f\x2a\x4b\x3a\xf9\x16\x62\x7d\xe2\xab\x2e\xc3\x44\x77\x90\xbd\x3a\x9a\x44\x82\x68\x34\xf2\x24\x1d\x22\x4e\x06\xe1\x6d\xbf\xcd\x8f\x1d\x9b\x3e\x39\x59\x40\x1d\x80\x7d\x15\x57\x3a\x88\xd8\x62\xe3\x6b\xe6\x03\x13\x37\x84\xdc\xcc\xda\x11\x09\xc1\xa4\xb0\xee\x44\x9c\x6d\x81\x37\x93\x74\x52\x74\xfe\x58\x8e\x0b\xb4\xe1\x3a\xc7\xc2\xdb\x19\xfb\xdb\x42\x09\xf9\x33\x8f\x35\x77\x1b\xb8\x41\xea\xc7\xcf\x79\x17\x8b\x5c\x86\x68\x50\xa4\x30\xb8\x11\xe5\x3a\xda\x92\x2f\x25\x56\x26\x3c\xfc\x32\xc8\x72\xd9\x6d\xf2\x65\x1d\xc4\x73\x93\xdc\xed\xe6\xed\xec\xe6\xed\xec\xff\xd7\xb7\xb3\x8f\xe9\xdb\xd9\x87\x77\x13\xe7\xab\x05\x8a\xba\x97\xbd\x71\xf8\x5e\xe6\xb2\x5d\x1a\x41\xf2\x9c\xdc\x5a\x2e\xef\x7c\x6b\xa9\xfb\x77\x56\xdd\x5a\xa8\x53\xe5\xfb\xca\xa7\xdd\xd0\x99\x0a\xca\x30\xcb\xb3\xee\xff\xc4\xad\xc1\xff\xf0\xac\x07\x65\x68\x76\x0c\x1b\xe8\x8a\x5e\xcf\xb0\xc1\x80\x5e\x81\x48\x41\xc2\xd2\x66\x20\xca\x80\xa4\x0b\x68\x1b\x4a\x00\x93\x5f\x57\xe3\x5f\xab\x0d\xd1\x4a\xba\x8e\x57\x57\x1d\x76\x9d\x30\xcd\x72\x2e\xd5\x36\x16\x85\xc3\x0b\x4a\xec\xa2\xd3\x10\xa0\xf1\xd3\xa4\x9f\xe3\x85\x01\x66\x8d\x0a\xb7\xa2\x69\x0c\xd4\x1d\x7c\xa5\xaa\xb1\xba\x06\xa9\x1d\x25\x9c\xe6\xc3\xec\x25\x6c\x5c\xcd\xb3\x7f\x86\x7d\xcd\xc8\x65\x54\x31\x86\xe7\x57\x57\x97\xca\x65\xb3\xcf\x32\x62\xd2\xf8\x4d\x16\x7c\x82\x01\x97\x06\x29\x9e\x08\x94\xf3\xea\xbe\x88\x70\xb0\xda\x49\x57\x29\x5e\xda\x29\x28\x38\xf9\xcb\xa1\xcf\x55\x50\x16\xc7\x4e\xdf\x75\xb2\xc9\x2f\x25\x39\x46\x6c\x14\x79\xd9\x6d\xb9\x19\x52\xd4\x8b\x91\x58\xc1\x06\x70\x6f\xf4\x94\x12\x47\xa7\x23\xc1\x79\xe8\x52\x31\x03\x18\x4a\x9d\xe5\x33\x4d\xbe\xb8\x94\x15\xd7\x0f\x44\x04\xb2\xa3\x2a\x0a\x16\x2b\xd3\x59\xb8\x26\x40\x84\x85\xe8\xf5\xa2\x01\xcf\x13\x46\xc8\x32\x8b\x4b\x46\x89\x55\x1b\x4d\x13\xa4\x08\x30\x08\xe8\x50\x13\x76\x14\x43\x81\xae\xcc\x5e\xe1\x6d\x09\xf9\x4d\x98\xda\xa2\x5a\x90\x4a\xb5\xac\x60\x01\xc9\x96\xe8\x67\x18\x45\x31\xdc\x42\x76\x88\x97\x42\x3f\xf0\x06\xdb\xb1\x69\xb7\xc2\x0c\x22\xa7\xc2\x56\xa8\x5e\x9a\xbd\x6b\x1a\x56\x97\x9a\x73\x75\xda\xa9\x22\x66\x8f\x1f\x3f\x66\x1e\x66\x4d\x76\x26\xc1\x55\x94\x61\xc0\x0f\xa6\x5c\xea\x3e\x75\xfa\x6a\x6b\x8f\x5a\xc8\x18\x25\xd3\xe6\x62\x5d\x84\x71\x96\x9b\x58\x0f\xbe\x26\x3b\x89\x1b\x6d\x53\x2c\x49\x84\xda\xa5\x17\xc3\xcc\xbd\x46\xd9\x01\x2d\xb0\xfe\x33\x27\x02\xc7\x90\xe8\xb8\xd7\x93\xa1\x3a\x18\x2e\x47\xba\xdb\x05\xbd\x54\xb6\xc2\x4c\x06\xc5\x1d\x44\x38\x6b\x52\x07\x47\x81\x75\xa2\xe0\xd9\xe3\xed\x24\x1f\x5e\xaf\x94\x4d\x10\x74\x92\xa4\xb9\x26\xd2\x66\x2b\xe9\x06\x93\x80\x1c\xb1\x15\x46\xed\x96\x48\xdb\x41\x7d\x54\x45\xbd\x54\x76\x00\xf6\x80\x88\x98\x28\x46\x47\x7f\xc8\xc1\xd4\xa0\x2b\xb8\xb3\xa9\xaa\x80\xe2\x71\xf5\x98\xd7\x0c\x8a\x02\x3f\x72\x27\x05\x7a\x88\x5e\x4f\x8a\x54\xed\x9f\x30\xcf\x64\xd4\x29\x4c\x20\x4a\xd6\x2c\x5c\x5a\xa1\xb2\x51\xe5\x41\x20\x3d\x5c\xcf\xba\x11\xf0\x3b\x52\xdc\x10\x2b\x15\x21\x3b\x96\x5a\x57\x54\x81\xed\x84\x91\x7e\xf1\x8b\x79\x70\xa7\x6a\xe7\xb2\x5e\x1f\x26\x52\xa8\x62\x8d\xc0\x27\x14\x87\x12\x97\x76\xf8\x96\x18\xe0\x1e\xed\x70\x2d\x52\x68\xfa\x34\x33\x16\x54\x60\x3a\x81\xda\xa2\x92\xe5\x14\x33\x04\x6b\x1d\x26\x0a\xb9\x71\xc0\x89\xae\x64\x11\xb0\x01\x01\x3f\xd0\x51\xa7\xf8\xa2\x87\x24\x78\x9f\x76\x35\x79\x52\xd4\x04\x2b\xb4\xdf\xe9\x84\xd7\xf8\x44\x2a\xbb\xc9\xa6\x3e\x51\x70\x46\x60\x2c\x27\xb5\xb8\x30\x43\x0c\xf1\x3d\x4e\x8e\xc1\x21\xf6\x4f\x2a\x99\xaf\x50\x4b\x5b\xe9\xaf\x65\x32\xf7\x3f\x7d\x30\xf8\xc9\x31\xf7\x59\x39\xb5\x08\xb1\x75\xc8\x0e\x03\xc4\xbc\xdd\x2e\xa1\x8f\x42\xc2\x75\xaa\x94\xc3\xd5\x0d\x8b\x12\x8d\x7e\x2a\x32\xdd\x48\xba\xcf\x8b\x34\x97\x59\x28\x62\x75\xff\x69\xf7\x5b\xf0\xd6\x9c\x29\xff\x1a\x5f\xc2\xc2\xce\xd9\x57\x39\x08\x95\x9c\x65\x9c\xbf\x06\x70\xb2\xcc\xeb\xb3\xfc\xe1\xd7\x04\x61\x2f\x98\xe5\xc1\xcc\x74\x53\xfd\x5f\x73\x26\x78\xa2\xce\xdd\x87\x47\x9b\x47\x83\x27\x1e\xad\xc3\xa7\x50\xfa\x2c\x80\x6d\x3d\xfc\x9a\x40\x6d\x2b\xf5\x9e\x08\xea\x3c\x50\xbd\x08\x66\xf9\x5d\x27\x4f\x9d\x80\x22\xf4\x8f\x6b\xd6\x8f\xc7\xa6\x4f\x3f\xf1\x28\xe3\xfc\x09\xb0\x4d\xa2\x11\x08\xe0\x2c\xcb\xbd\x25\xe1\xb1\x19\x4a\x40\xfd\xce\x10\xef\x4c\xcc\xf2\x87\xb9\x69\xe9\xac\xaa\xaa\xce\x4d\x23\xe1\x6f\x4e\x0d\x5d\x73\x5f\x3d\x36\x7d\xda\x7e\xf3\xd8\xf4\x69\xfe\xa8\x73\x3c\x7f\xf7\x3e\xf6\xdf\x3d\x76\xc8\xcc\x94\xff\x69\x2f\xf8\x1e\x6f\x71\xc9\x9e\x3b\x10\x2b\x49\xa7\x43\xa6\x04\xad\x79\xe0\x81\x81\x26\xb0\xd4\x8e\xd8\x04\x56\x93\x26\x81\xb4\x15\x7d\xa3\x8b\xd3\x9a\xc4\xbb\x75\x5b\xa6\x6a\xbe\x45\x07\x63\xa7\xa2\x44\x28\x65\x06\x63\xc0\x30\x25\x17\x2f\x02\xb0\xc2\x11\xd4\x41\x3a\x56\x92\xe3\xec\xe8\x2e\x92\xb8\xf4\x02\xa5\xc9\x67\x3f\x5a\x63\xcf\x8f\x93\x5c\x5d\x54\x07\x66\x41\xf8\x6f\xa9\x05\xbf\x7c\x23\xbd\x5e\xeb\xe7\xe6\xa8\x6c\x99\xe3\xb1\x34\x04\xe6\x68\x85\x28\x46\x10\x6c\xea\xfd\x81\x54\x72\x28\x06\xd2\x55\x34\x03\x85\xf1\x7a\x1d\x7f\x37\x37\xf0\x8e\x08\x23\x55\x14\x94\xa4\xb4\xe8\x8c\xb7\x36\x64\xeb\x6a\xdd\xa0\xdf\x0e\xbf\x1a\xa9\xe5\x6e\xde\x7c\x06\xc6\xea\xb5\x6c\x1f\x74\xd7\xcf\x82\x63\x90\x61\x89\x60\x09\x76\xd8\x5b\x12\x3b\xa3\x63\x0f\xa1\xd3\x80\x19\x36\xb5\x2b\x6e\x3d\x6c\x80\xaa\x8c\xfd\x8f\xb1\x4a\xfe\xa1\x73\x51\xd2\xba\x2a\x53\xc7\x2d\xf0\x91\xb1\xe0\xa3\x6e\xd6\x19\xbd\xa5\x0d\x7b\x3a\xd3\x88\xcc\x15\xfa\x57\xb1\x0e\xb6\xc3\xcb\x31\x1a\xed\xe0\x6c\xa2\x30\x1f\x75\x05\xca\xd0\xfc\x85\x36\x6a\xfc\x1b\xa0\xbf\x8a\x95\x9c\xc9\xbc\x59\xaa\x6b\x2f\xd9\x6a\x2e\x25\xa9\xc7\xbe\xd1\x63\x87\xad\x86\xf8\x83\x20\xb2\xcd\x70\xf4\x13\xcf\xf2\x24\xc5\x75\x67\x80\x2b\x1b\xf7\x92\xe6\x1b\x3b\x6f\xaa\x9d\xa6\x9b\x5b\xb9\x1f\x41\x81\xd5\xa1\xed\x2e\x72\xcd\x61\x6b\x08\xfc\xa5\x60\x1e\x86\xc2\x6d\x49\x91\xbe\x8a\x20\x34\x05\x33\x28\x5d\xfc\x74\xdd\xf4\xbe\x5d\xc1\x9b\x6e\x71\xa6\x9b\xac\x81\x6a\xc6\x71\xaa\xe7\xf2\x5c\xb4\x36\xd4\xe0\xf9\xbf\x7b\x38\xf8\x2e\xaf\xfc\x94\xb7\x44\x4f\xe9\x8d\x1a\x48\x3b\xa7\xa8\x69\x01\x6f\x60\xac\x1f\xfc\xcb\x55\xe3\x4c\x42\xda\x94\xd1\xed\x8b\x1f\xe3\xa4\xad\xf4\xba\xa1\xaa\x28\xea\x87\x44\x40\xdc\x30\xb6\x4f\x60\x6b\x56\xdf\xbb\x24\xcd\x87\xd8\xaf\xd8\x16\x82\x8f\xee\xd9\x42\xf0\x2f\xca\x16\x82\x2f\x82\x7d\xe0\x5d\xda\x3e\xf0\x76\x8f\xdd\x35\x72\x6b\x6f\x33\x8f\x60\x2b\x78\xcc\x65\x01\xd2\xeb\x89\xdc\xb8\x38\x79\x53\x34\x73\x9a\x18\x8b\x6c\x09\x15\x76\x48\x2b\xf1\x9c\x4c\x92\x6a\xe4\xb5\x65\xf4\x63\xde\x36\xe1\xd4\xdb\x35\x14\xad\xa4\x6f\xd0\x4e\x5a\x6a\xe4\xd0\xb2\x30\xac\x89\x43\xed\x42\xf4\x4e\x48\xf9\x88\xa4\x31\x9e\x0d\x2d\xcd\xa4\x47\x20\x32\x04\x23\x89\xb1\x86\x68\x3f\x6c\xe0\xdb\x32\x6d\xde\xb4\x75\xde\xb4\x75\x7e\xd1\x6c\x9d\x4f\x79\xed\x9d\xcd\x69\x73\xfe\x7d\xc6\x9c\xa6\xf7\x55\x29\x2a\xad\xbc\x79\xca\x36\xb6\x6f\x1f\x73\xee\x20\x43\xbb\xf3\x6c\x9a\x26\xa9\xff\x47\xb5\x60\xc1\xfa\xbb\x38\x03\x44\x4c\x1e\x3c\x19\x43\xce\x1b\x1c\x6e\xed\x3e\x72\xc1\x69\x41\x62\xb6\x9b\x7b\xf0\xbe\xad\xc6\xfe\x95\x57\x00\x04\xfd\x90\x17\xbc\xd5\x43\x50\x7e\x4a\xf8\xd6\x0b\x62\x64\x0d\x73\x66\x63\x2f\x94\x36\x36\x9d\x90\xb4\xd0\x28\xd9\x29\x4a\xd6\xd7\x65\xbb\xce\xb3\x84\x87\xb9\xde\x74\xb6\xf7\x30\x93\x80\x04\xb4\xe9\xa4\x30\x95\xb0\x4c\xc6\xf3\xb0\x2b\xfd\x87\x9e\x3e\xdc\xd2\x57\xad\x6a\x3c\x25\xec\xd7\x96\xc8\xec\xbe\x35\xd9\x17\x0e\xd8\xf4\xf0\xdb\x14\x37\xb7\xb4\x08\x7e\x6f\x88\x64\xfe\x77\x07\x82\x65\xfb\x81\x1b\x46\xab\x7f\xa9\x17\x09\x21\x96\xda\xdf\x0e\xb3\x16\xa5\xb7\x61\xb2\x80\xc8\xf9\x14\x54\x75\xdd\xdb\x0f\xeb\xcb\x8d\x7b\xfe\xf3\x7d\x37\x65\xe3\x5e\x65\x63\x9f\xd1\xc8\xfa\x57\x83\x7f\x8c\xff\xaa\x9e\x32\x47\xbf\xbf\x31\x5a\xd8\xe1\xa5\x72\x53\x24\xef\x45\x24\xaf\xee\x2c\x92\x67\xfc\xa9\x2a\x0f\x87\xbd\x33\xcb\x22\xf8\x1b\x6e\x67\x5f\xb1\xcd\x25\x2d\xf3\x3f\x77\x5b\xf0\x37\xb5\xb3\xb6\xa9\x46\xf0\x56\x12\x45\xd2\xf8\xda\x8b\xdb\x3f\xd8\x09\xdc\xfc\x5a\x8a\x6f\x33\xce\xa0\xb3\x68\xb7\x99\x65\xfc\xff\x63\xef\x4d\xc0\x24\xb9\xaa\x33\xd1\x2f\xb2\xaa\xba\xbb\x6e\x4b\x02\x62\xc6\x33\xf3\x8c\xe7\xf9\x4e\xc8\xb8\xbb\x44\x56\x56\x6f\x6a\x49\x8d\x90\x28\xf5\x82\x0a\xf5\x52\xaa\xaa\x16\x66\xaf\xa8\xcc\x9b\x95\x41\x45\x46\xa4\x22\x22\xab\x3a\xc5\x60\x23\x04\x03\x06\xc6\xec\x3c\x28\x16\x83\x6d\x04\x02\x06\x8f\xcd\x66\x56\x03\x7e\x5e\xb0\xc1\x30\x5e\xb0\xdf\x78\x81\xf1\xb0\x3c\x83\x17\xc6\x60\x6c\x7a\x6c\xbf\xf7\xdd\x73\xce\xdd\x22\x33\x6b\xe9\x6e\x49\xd8\x5f\xd9\x1f\xea\xca\x58\x6e\xdc\xf5\xdc\x73\xcf\xf2\xff\x40\xd1\x7d\x8c\x07\xed\x5e\xbe\x5a\x0f\xaa\x8c\x73\xb4\x2f\xe5\xc7\xf8\xd3\xc0\xcc\x81\x06\x9d\xcb\x32\xe9\x68\xa3\xce\xe5\x18\x74\x38\x7f\x6e\x75\x8b\x15\x3a\x5c\x3b\xbc\xe5\x6f\xdf\x74\x78\xe8\x97\x6f\x38\x6a\x7d\xd8\xb5\xe5\xdc\xb7\x77\x47\xfa\xed\x68\x86\x3b\x5e\xf0\x2b\x7a\xca\x7d\xef\x28\xdb\x9d\xa3\xf0\xf1\x7f\x66\x34\xf8\xd2\xc8\x42\x4b\x28\xbb\xad\x8b\xc9\x46\x96\xea\x6e\xa2\x56\x4d\x1c\x73\x7a\xb3\x66\x99\xaa\x81\x89\x0a\x19\x19\x20\xe8\x8d\x1e\xe9\x4f\x60\x9e\x99\xcd\xd1\x72\x98\xb7\xc2\x4c\xd4\xf8\xb4\x72\x9a\x28\xc8\x7a\x30\xaa\xeb\xc4\x6a\xb0\xd4\x55\x09\x39\xaa\x69\x11\x22\xa1\x2d\x12\xd0\xd2\xfa\xee\xc9\x69\xb4\xdf\x31\x53\xd6\xe5\x33\x30\x25\x4d\x32\xb6\xc6\x8e\xc9\x27\xd0\xc5\x42\x61\x8c\xe4\x47\x51\xf5\xc1\xb4\xc6\x46\x94\x77\xe2\xb0\x87\x1e\x50\x53\x86\x6a\xa6\x82\x96\x32\x77\xc8\x0d\x70\x36\xb5\x1a\x16\xc7\xca\xb9\x14\x25\x18\x2d\x6a\x75\x60\xd2\xe0\x67\xcb\x16\x5c\x07\x51\x0b\xbf\x55\x43\x06\x43\xc7\x0f\xa1\x30\xf2\x68\x3f\x92\xe7\xf1\x2c\xca\x85\x15\x92\xb0\x7d\x0c\x1c\xd7\x05\x72\xd1\xbb\x73\xf3\xcd\xb8\xe6\x57\x07\x6d\xc6\x7a\x2b\x2d\xef\xc4\x3f\x33\x3a\x30\xf3\xc5\x09\xe2\xf2\xff\x71\x24\xb8\xc9\xb9\x32\x8c\x03\xde\x86\x6f\x36\xcd\xb6\x36\x92\xf7\x8d\xb0\xd7\xb8\xd4\xfc\x2f\xac\x04\x3f\xa0\x98\xf9\x55\xd8\x1c\xe0\xc8\xda\xdd\xf5\x04\x76\xcb\x70\xa0\xbc\x0d\xd6\xbc\x43\x8d\xbf\x11\x33\xdf\xb8\xbf\x1b\x49\x45\xd9\x45\x8f\x0f\xe5\x0a\xdd\xe5\x8f\xb6\xc3\x0e\x7b\x50\x78\xf6\xdf\xe0\xb1\xab\xe2\x34\x6c\xdc\x46\x8e\x0a\xff\xc5\xde\x86\xa9\x7e\x38\x4e\xa7\xad\x37\xc8\x96\x74\xa7\x7d\x6d\x30\xfa\xa5\x1b\x7b\x27\xbf\x3a\xa9\xfc\x23\x80\x55\x90\x26\x42\xf3\x4b\x26\x45\x8d\xbd\xb9\x32\xd0\x6b\x77\x3a\x6a\x47\xc5\x5c\x98\x2c\x43\x6c\x8e\xff\x77\x5e\x70\xd6\xbd\xa4\xb5\x82\x90\xb7\xa3\x64\xaa\x1d\x5e\xc0\x9c\x02\x84\x44\x75\x12\xdb\x15\x0b\x28\x40\x03\xa4\x09\x4c\x5b\x79\x14\x43\xf0\x54\x67\x0e\xbd\xdc\x63\x2f\xf3\x18\xdd\xf1\xef\xf5\x82\x5b\xe1\xab\x5a\x4c\xea\x6c\x56\x5d\x97\x99\x42\xb4\xb5\x65\x55\xbb\x95\x54\xf6\xc3\x25\x2c\x4c\xb7\x68\xf6\xf1\xab\xd8\xe4\xc6\xf0\x09\x56\xc4\xc9\x6c\x58\xb4\xfc\x97\x5e\x15\xa4\xa5\x6b\x26\x14\x06\xf0\xc3\xe5\x15\xf2\x6d\x2a\x44\x0e\x13\xb6\xd0\xcd\xe2\xdc\x84\xc9\x38\x31\xe8\xcd\x34\x5b\x0b\xb3\x46\x5f\xa4\x4d\x6d\xdd\x53\xe0\x0f\x4e\x6f\xde\xbb\x97\xfd\x9c\x67\x70\x21\x5e\xbf\x91\x21\x73\x73\x5c\x88\xa7\x1a\x78\x51\xa3\x0f\x5a\xf1\xe1\x6a\x85\x1b\x85\x24\xa5\x3d\xa3\x40\xa0\x63\x88\x8b\x50\xce\x76\xbb\x2d\x35\xf6\x96\x0a\xe1\xd3\xbd\xb6\x12\xbc\xa8\xa2\x10\xea\x06\x39\xe4\x31\x12\xba\x89\xec\xc8\x6e\xa4\x87\x1d\x18\x13\x61\x48\x85\x32\x85\x58\x64\x0e\x8d\x28\x57\xd4\x5a\x5a\x87\x32\xdb\x78\x18\xf3\x40\x7e\xc2\x04\xe9\x84\xfc\xfc\xdc\xe9\xa1\x81\x3f\x4e\x30\xf7\x72\x94\xa8\x71\xdd\x37\xb5\xaf\xc6\x9f\x0c\xdc\x49\x76\xd0\x08\xa0\xca\xc3\x2b\xf0\xed\x72\x0b\x9c\x58\x15\xc7\x52\xf3\xc1\xdd\x6c\x8f\x7c\x71\x41\xca\xaa\x07\x76\x07\x6f\xdc\x3d\x4b\xbf\xec\xb0\x60\xe5\xc3\xc8\x3a\x99\x28\x1c\xd3\x38\x74\xa9\x9a\x55\x58\x6d\x78\x59\xe5\x14\x0c\x22\x7d\x26\x32\x2d\x7e\x9d\x3c\x63\xd5\x8b\x63\xfc\x8c\x8e\x30\x10\xd0\x29\x30\x16\x42\xde\x8b\x7b\x35\x7e\x1d\x9f\x85\x98\x11\xf3\x9c\x8e\x81\x09\xcd\xe3\x18\x57\xc2\xf3\x4e\x1c\x15\xb2\x2b\xa1\xa3\xce\xa8\xe9\x0e\x44\x58\x0d\xa8\x4d\xa2\x56\x0a\x51\xd8\xca\x87\xf5\x9f\x61\x1e\x49\xf5\xc8\x7d\x00\xe6\xa2\x12\x15\x8c\x6b\x61\x01\x8e\x32\xbd\xd7\xc3\x2b\xfa\xf3\x10\x61\x3f\xb5\x4f\x11\x73\xa5\x99\x2c\x55\x45\x54\x48\x1d\x9e\x71\x12\x5c\x52\xa2\x61\x0b\xa4\x18\x15\xab\x22\xeb\xf1\x8e\x22\xa8\xc3\x1a\x4c\xae\x45\xb9\x8a\x9c\x81\x50\x05\xea\x53\xc6\x75\x91\x88\xaf\x48\xe1\x62\xf2\x6c\x4b\x72\x3a\xcc\x0b\xdd\x0e\x1d\x98\x55\x50\x5c\x8e\xd4\x4c\x14\x1d\x76\x3a\xe0\x05\x83\x4a\x0d\x2f\x55\x29\xf3\x00\x22\xb2\xa8\xf6\x18\x00\x33\xd5\x4c\xd3\xa9\xa5\x30\x53\x8d\x12\xb9\xbe\x34\xb5\x14\xde\x53\x22\x98\xc3\x37\xd5\x03\x4b\xe1\x3d\x13\x35\x76\x5d\x09\xd2\x49\xf9\x61\x8e\x21\xf3\xd3\xc6\xf3\x0e\x5c\x7c\x1d\x8c\x48\x2b\x4a\xf8\x49\xfd\x58\x51\x72\x6a\x02\xa7\x19\xe1\xe1\x43\x4f\x28\xfe\x34\x35\x81\x65\x8f\x64\xf4\x98\x6c\x35\x78\x0e\x81\x69\x4f\x8a\x9f\x59\x1a\x8a\x0c\x67\x30\x76\x29\x82\x2e\xb0\xf2\xe7\x50\xe9\x25\xb2\x58\xd0\xb0\xd1\x11\xab\x96\xac\x02\x6b\xb0\x53\x87\x06\xf7\x85\xb3\x70\xff\x71\x9c\x3d\xd6\x12\xb3\x9d\x34\x8e\xea\x3d\x2d\x62\x67\xd3\xc6\x3c\xd1\xc7\xcc\xc2\x1d\xff\x73\xe3\xc1\xeb\xbd\xbe\xcb\x7c\x39\x5d\x15\x19\x6d\xf5\x21\x82\x7f\xc9\x0a\xb4\xc3\x15\x51\xc2\xe1\xb1\x12\x8b\x54\x19\xfc\x78\x9a\x14\xe2\x02\xa1\xa8\x2a\x09\x1c\x76\x3a\x71\x84\x6d\x0d\x09\x9a\xb6\x61\x94\x76\xd9\x50\x0d\x56\x16\x25\xfc\x60\xed\xd0\xc1\x5a\x19\x2e\xe2\xd9\xe4\xcb\x5b\x1a\x7e\x6a\xdb\xb4\xc5\xe0\xcf\xfb\x0f\x79\x39\xf4\x17\x5f\x33\x1b\xf9\x8e\x91\x62\xc7\x48\xb1\x63\xa4\xb8\xb2\x70\x11\x4b\x9b\x9f\x39\x6f\xf5\x1f\xaf\xcf\x9c\xb8\x24\xf5\xb9\xb3\x6f\x25\x0f\x44\x8a\xf8\x1f\x8f\x74\x03\x34\x34\x68\x34\xe0\x45\x64\x51\x2a\xdf\x87\x1d\xc0\xff\xe0\x23\x83\xe7\x38\x57\xf4\xa2\x56\x61\xee\x04\x04\xd4\xa1\x87\x08\x20\x10\x91\x95\x53\xca\x73\xa2\x5b\x94\x0c\xa9\x32\x0b\x17\x34\xa5\x2c\x69\x3c\x61\xd2\x23\x22\x43\x7a\xb2\xb6\xee\x21\x7c\x84\x23\xe4\x3e\x7a\xcd\xce\xba\xd9\x59\x37\xae\x71\xef\x05\x15\xf6\xc8\x4e\x26\x44\x1b\xb2\x27\x69\xdf\xfe\x6b\x2f\xf8\x1f\xde\x6c\xe9\xaa\x3a\xb6\xd2\x2f\x50\x21\xe9\x91\x64\x59\xa5\xef\x15\x2d\x2e\x8f\x23\x99\x9e\xbb\x18\x5a\x96\x36\xf9\x59\xa9\x65\x56\x39\x95\x7a\x5a\x3e\x34\xab\x9f\xb1\xf5\x90\x41\x4f\x48\xd5\xb2\x9b\x98\xf0\x2a\x0d\xa6\x27\x17\xe6\x64\x2c\x9b\x5d\x05\xb2\x68\x1d\x6e\x72\x36\x4d\x66\x75\xed\x74\x31\xc4\x2b\x3d\x09\xbc\xd2\xf6\xd8\xbe\xd3\x53\x78\x2b\x6f\xd1\xe4\x5a\x2f\xf6\xcc\x42\x53\x9b\xae\xbb\x5a\x4d\xb0\x97\xe5\xcc\xd1\x8f\x80\x82\x02\xfd\x92\x89\xba\x88\x56\x85\x02\x38\x55\xa1\x90\x85\x83\xd8\x16\xe5\x24\x02\x50\xb7\x8f\x00\xf7\x1e\x79\xae\x9d\x8c\xe8\x1d\xdd\xe1\x32\x75\x87\x97\x7a\x6c\x2f\x82\xf7\x22\xaa\xcf\x7f\x0c\x52\xeb\x27\x9d\x82\xc2\x6c\x29\x2a\xb2\x30\xeb\x29\xd1\x08\xa3\xd9\xcd\x91\x8f\xa2\x93\xa5\xab\x51\x43\xe4\x7c\xb9\x1b\x35\x44\x0c\x1d\x9e\x26\x6a\x7c\xfb\xe6\x89\x15\x68\xd8\xcd\x4b\x87\xe1\xfb\x47\xd8\xd5\x88\xe1\x48\x8b\xc0\x7f\xcd\x48\xf0\xc2\x11\xe7\x92\x8e\xc7\xcb\xad\x8c\xe6\x28\xe7\xee\x06\x33\x2c\x9a\xb1\xb0\x40\x53\x75\xbd\x60\xf9\xca\xb9\x49\x78\x97\x86\x85\x5b\x6e\x26\xe5\x69\x7e\x4e\x85\x85\xba\x5f\xa4\xed\xc7\x04\xfd\x2e\x3a\xd5\x5e\xac\xf1\xdb\xd3\x35\x5c\xf7\x51\x13\x01\x68\x8b\x16\x64\xf4\x94\x4a\x12\x39\x02\xca\xe5\x3a\xb9\x2a\xca\xca\x85\xd1\xa2\xcf\x05\x26\x80\x01\x9f\x13\xd8\xbb\xdb\x61\x1c\xcb\xf9\xa7\x57\x6a\xde\xad\xb7\x4a\xb8\x98\x7d\x9f\x53\x07\x07\x00\x87\x18\xd2\x49\x0e\x8d\xf8\xf6\xa3\x7e\x8c\x76\x50\x0a\xfc\x71\xea\x52\x36\x74\x7f\x77\x2f\x3b\xb2\x11\x7e\x27\xa1\x03\xce\x75\x63\x41\xe0\x80\x64\xfc\xfe\xf8\xde\xe0\xeb\x95\x61\x77\x5d\xfb\x2a\x39\x2f\x90\x37\x0a\x72\xef\x30\x5e\x1a\xc5\x19\xfc\xad\xc6\x16\xac\x3a\x9d\x58\x14\x00\xf8\x2c\x12\x70\xcf\xa8\x4c\x20\xf2\x01\x69\x68\x4f\xe2\xe0\x02\xf9\x01\x3c\x72\x0a\x00\x58\x65\xde\x00\xce\x72\x0f\xa3\x6b\xe4\x90\x77\x44\x16\xa1\xc1\x8d\xc2\x86\x28\x3b\x0f\xc2\x84\x6e\x23\xa7\x8c\x0b\x3d\x8a\xf5\x05\xce\x73\x60\xe6\x5e\x15\x30\xb9\xb0\x21\xe4\x32\xc9\x11\xbf\x04\xc9\xc4\xe5\xbe\x0a\x31\xf6\xf2\x6c\x98\xe7\x5d\x8a\xf0\xc9\xb1\xa7\x78\x2b\xa4\x35\x60\xa0\x41\xaa\x5c\xac\x8a\x04\xcd\x19\x61\xa1\x39\xc9\x4d\x57\xd4\xd6\xbd\xab\x95\x68\x81\xae\x5e\xf7\x1e\x99\xa4\xc9\x9c\x7b\x89\x99\x17\x1c\x35\xec\x35\x7b\xd8\xcb\x2a\xec\x11\xa6\xa9\x18\xc8\xf5\xf7\x5e\xf0\x0d\xef\xa4\x7b\x11\x46\xc1\xb8\x81\xea\x69\x7b\x89\x20\xce\xb0\x43\xe1\x4b\x48\xfc\xaf\xf9\xc1\x74\xdc\x57\x5a\x07\x3b\xba\xee\x5b\xe8\x20\xf3\xd5\x2a\xae\x93\x10\xc5\x9d\x81\x67\x25\x81\x20\xf2\x64\x9f\x89\xe0\xee\x7b\x17\xb3\xfa\xc3\x82\x3b\x8d\x96\x97\xa7\xd2\x4c\xee\xbe\xee\x75\x0a\xf3\xb2\xba\xd0\x96\x81\x7f\xe2\x31\xab\xb3\xfc\xdf\xf6\x82\x5f\xf6\x66\xcc\xc4\x03\x3e\xf8\xae\xd9\x3e\x69\x0e\x64\xa2\xe8\x66\x89\x85\x60\x52\x0f\xe3\x98\xb8\xed\xf5\x67\xf4\x0e\x0d\x54\x85\x98\x7d\x13\xf7\x9c\x88\x35\x28\xd6\xe9\x03\xa7\x6b\x54\xf4\xa9\x73\x7f\x50\xff\xe4\xf6\xec\x75\xb0\x24\x5e\x57\x61\x7d\xf3\xc3\xbf\xb7\x12\xbc\xdf\xeb\xeb\xa8\x92\x6f\x20\xac\x2b\x98\x15\x33\x63\xa3\x5c\xe5\x26\xca\x29\xdd\x11\x59\x33\xcd\xda\x72\x4d\x26\x69\x32\xa9\x77\x3c\x3c\x26\x60\x39\x59\x43\x64\x68\xb2\x82\x3a\x47\xcb\x09\x84\x1b\x27\x45\x15\x73\x0f\xc9\xb0\xdc\xe8\x02\x05\xa6\xa6\x5f\xeb\xa4\x79\x1e\x2d\xc5\xfd\x23\x67\xdc\x10\x8f\x67\x8f\x63\x37\x6d\xc4\x80\xe6\x0a\xaf\x52\x73\xd9\x4f\x56\x98\xbb\x90\xfc\xbf\xf3\x82\xff\xe2\x5d\xb9\x2e\x79\x88\xbb\x63\x63\xd4\xeb\xa1\x58\xcc\xd8\x17\xa3\xec\xc0\x06\x0f\xa3\xc5\xa9\xd4\x81\xfe\xef\x8d\x04\xa7\x4b\xd7\x78\x2b\x8d\x1b\xb9\x1d\x26\xa9\x10\x37\x34\x91\x1a\x2e\x67\xe5\x87\xb6\xa7\x8d\x3c\x37\x8a\x6c\xc9\x75\x64\xfd\x75\x85\xfd\x96\xc7\x1e\x61\xcd\xe0\xf3\x73\xa7\x73\xff\x43\x5e\xb0\x6e\x4f\x60\x79\x91\x4e\x93\x94\xdd\x16\x66\x45\x14\xc6\xe8\x05\x42\x7b\x1e\x72\x23\x90\xa6\x82\xea\x06\xc0\xdc\xc2\xb9\x8f\x5f\x47\x82\x1d\x87\x12\x2d\xb8\x98\x2d\x69\x12\x9c\xab\xbc\x19\xc9\xd5\x98\x17\xa2\x63\x9b\xc1\x07\x20\xdb\x9a\x71\xb9\x9a\x39\x30\x8a\x6f\x91\xda\xbe\x6c\xa6\xff\x1a\x2f\xb8\x60\xa0\xa6\xd5\x14\xb3\x20\xa7\xed\xce\xd1\xb0\xd3\xb9\x83\x3b\xdd\x81\x6c\xea\x4e\xb7\xb0\x50\xa6\x31\x9f\xbc\x25\xc2\x46\x55\x41\x25\x6d\xa7\x82\xff\xe5\x11\xec\xd8\xa5\xf1\xa2\x82\x87\xf3\xaf\xae\x09\x9e\xb5\xc1\xfd\x12\x75\x85\xca\x34\x68\x76\x11\x67\x39\x04\x83\x2c\xd9\xbc\x87\x14\x53\x5b\xf7\xae\x81\xbf\x16\x00\x55\x6c\x4e\x34\xd7\xbd\xbd\xed\xf0\xc2\x40\x5a\xe3\x2f\x5d\xcd\xde\x5b\x61\x7b\x54\xd2\x82\xff\xb6\x0a\x3b\xbd\x2d\xd6\xc4\x21\x95\xb8\x8d\x0a\x0c\xbe\xe0\x69\x70\x05\xad\x72\x90\x80\x20\x5a\x42\x1b\x7c\xa1\x30\x60\x68\x2a\xb4\xe2\x3c\xa4\x69\xf3\x13\xe9\x5a\xc2\x1b\x00\x9e\x05\x22\x66\x3f\x7c\x87\x6e\xc2\xdf\xf0\x04\x28\xa2\xb9\x4a\x20\x8f\x56\x45\xdc\x9b\x80\xd4\x56\xc8\x79\x52\x80\x4a\x4a\x9b\xbc\x7d\x76\x7a\x1e\x6b\x31\xa7\x99\xc5\x90\x29\xb3\x6b\x15\xcc\x1b\xb2\x64\x39\xfb\xe1\x84\xc0\x1e\xf0\x98\xdd\x9f\xfe\x1b\xf4\xb1\xf4\x3e\xcf\xba\xae\x43\x6e\x3a\x1d\x91\x59\xee\xea\xc2\xe1\xf5\xd5\xdc\xc6\x8e\x17\xd3\xe2\x06\x96\x6a\x86\xaa\x13\xa8\x13\x75\x6d\x82\x89\x91\xd5\x24\x2c\x78\x3b\x4a\xd4\x67\xdd\x13\xe9\xbd\x63\x6c\x37\x31\x6c\xfa\x7f\x3b\x1a\xbc\x6b\x54\xd1\x6d\x3a\x4a\x67\x6e\x67\xb9\x60\x37\x50\x5d\x00\x2c\x09\x08\x23\xc2\xb8\x0e\x59\x23\xce\xbc\xa4\xda\x73\x64\x91\xda\x5f\x00\xf2\xc2\x85\xa8\xdd\x6d\x97\x6e\x85\xf5\x2c\xcd\x31\x73\x5d\xd5\xc0\x56\xf3\x27\x6a\x1c\x36\x81\xc1\xe5\xa2\xfe\x50\xa7\xa4\x15\x8a\x29\xea\x69\x52\x7a\x59\x6b\x0d\xea\x66\xcd\x20\x3c\x74\x28\x8c\x23\x97\xb6\x93\x4c\x11\xfd\x9c\xd6\xf2\xe0\x55\xe3\xfc\x64\xb6\x9c\x56\x2d\x6e\x52\xf8\x6e\x2e\x25\x75\x3d\x03\x84\xeb\x50\xb9\x0f\x1a\xa6\x92\x51\x82\x37\x1b\xb8\x29\x01\x7b\x95\x3c\x40\x84\x35\xce\xe7\x85\x20\xb7\x69\x23\x5a\x8d\x1a\xdd\x50\x75\x04\xb7\x31\xba\x09\x3f\xce\x65\x03\x21\x1a\xdf\x56\xba\x86\x98\x6a\xc0\x81\x9b\x36\xd5\xfb\x50\x2f\x39\xe1\x53\xf0\xef\x0f\x99\xea\xf4\xb0\xea\x74\x3a\xaa\xdd\x78\xe0\x31\xc3\xe8\xc9\xb7\x41\xde\x31\x9c\x4d\x55\x8a\x34\xf6\x37\x15\xb6\xd7\x9a\xa0\xfe\x9f\x55\xd4\x7a\xf9\x6c\xc5\xba\xae\x95\x09\x30\x52\x5d\x81\xf5\x22\x97\x6d\x8d\xcb\x25\xd3\xb0\x6c\x58\x07\x91\xc6\x8f\x97\xbe\x6c\xe9\x28\x4b\x82\x1f\x50\x0e\xd3\x30\xee\xb4\x42\x65\x9b\x02\x33\x96\x12\x1a\x62\x21\x7d\xaa\xc8\x52\xc8\xa5\x4f\xc2\x25\x48\x72\x95\x67\x28\x9b\x88\x02\x4d\x8a\xe8\x17\x24\x4d\x95\xc6\xc1\x39\x81\xc9\xe9\x41\xc2\x50\xd6\xa4\x8e\x38\x6d\x39\x8f\xd3\x64\x19\x34\x5d\xbb\x4c\x2a\x00\xa7\x71\x64\x65\xbb\xba\x0b\xff\x3f\x55\x58\x69\x17\xf0\xbf\xed\xb1\xdb\xb7\x35\x86\xc7\xe5\x9a\x25\xe3\x55\x99\x6a\xf5\x7e\xcf\x2d\x9d\xab\x48\xea\xd4\x5e\x82\xc6\x30\x9d\xe2\xa0\xe0\xca\x88\x0c\xf2\x63\xa1\xc8\x6a\x8d\xd0\x51\xab\xce\x36\x9a\x40\xd8\x36\x2c\xac\x9c\xaf\x09\x79\x96\x40\xc6\xec\x3a\xb1\x90\x12\x46\x07\x9e\xa0\x2d\xd9\x51\x63\x1f\x1d\x63\xc1\xe6\xd9\xc4\xfe\x6b\xc6\x82\x9b\x4b\xd7\x50\xf1\x28\xba\x9d\x58\x94\xd5\x34\x8a\x75\x34\x09\xc5\xb5\x75\xaf\x12\x75\x9c\xad\xf5\xd3\xa3\xec\x28\xdb\xa3\x88\x5e\xfc\xeb\x82\x7f\xbf\x40\x40\x0f\x8e\x61\x51\x99\xde\x6d\x1d\xe3\xad\x15\x56\x89\x3a\xfe\x1b\x2a\xc1\x4b\x2b\x0b\x88\xa8\x52\x7e\xbc\xc6\xcf\x10\x88\x1f\xa4\xe8\xa4\x9d\xa5\xb0\xbe\xc2\xf7\x1f\x3c\x74\x43\xed\x80\xfc\xff\xa9\x1b\x27\xa4\x26\x94\xac\x4c\x22\x3a\xdc\xfe\x83\x47\x6f\xaa\x1d\xba\xfe\x08\xdc\x3b\x78\x74\x02\x52\xb5\xad\xfb\x20\x55\xeb\x72\x92\xed\xdf\x7f\xe8\xd0\x11\x2a\xe4\xd0\x11\xb9\x7b\xce\xae\x1e\xc5\x25\x92\xa7\x04\x92\x29\x4f\x76\x5d\xa4\xb3\x45\x64\x1b\x3a\x6c\x51\x18\x46\x1c\xf3\x4e\x1c\x16\x52\x82\xe5\x35\x3e\x1d\xe7\x69\x55\x13\xb6\x5b\x0a\x9c\xd4\xd3\xd3\x44\x24\x05\x29\x6d\x70\x6f\x12\x20\xe4\xaa\x3a\x00\x14\xbe\x8e\x49\xfb\x76\x17\xad\xb1\x3d\x49\xda\x10\x90\x78\xbc\x12\x3c\xd3\x80\x8b\x9e\x4d\x1b\x88\xa4\xa2\x29\x4d\x4d\x97\xa1\x0d\xc5\xe5\x68\xd2\x61\x2d\x56\xb0\x2c\x76\x09\x38\xac\x21\xc9\xd6\xfe\x70\xcc\xc6\x0b\xbd\xa4\x9e\xb5\x85\xbc\xf5\xf2\xca\x79\xac\xfe\x53\x7e\x81\x9c\x35\x68\xb5\xd4\xa9\x5b\xaa\xc6\xec\x1d\xbb\x9c\xd9\x0b\xb5\x51\x07\x8e\x79\x6d\xc5\xf2\x9f\xb7\x2b\x58\x30\x3f\x2d\x9b\x24\x2a\x5a\xfa\x46\x3d\x4d\xf2\x22\x0b\x35\x4b\xb3\x2c\x4f\x23\xbf\x62\xf6\xd9\x5c\x37\x29\xa2\xb6\xc0\x60\x09\x17\xc8\x6f\x8c\x7d\xbd\x82\x34\xaf\xf3\x04\x18\xe8\x7f\xb1\x12\xfc\x52\xc5\xbe\x02\x8a\x7a\xae\x62\x61\x50\x39\x21\x28\x47\x85\x70\x09\xc7\xe1\x86\x0a\xd7\x53\xc7\x74\x18\x2a\xe7\xe3\x7c\x56\xca\x05\x85\x6c\x5b\xba\x6b\xe0\x87\x96\x74\x0b\x55\x90\x01\xf0\xc7\xaa\xa0\x2e\x83\xa1\x8a\x15\xc4\xd3\xa6\x53\x94\x53\xff\x28\xe7\x10\x4e\xa9\x11\x69\x10\x40\x52\x93\x6a\xd8\x0f\xd7\xf8\x74\xd2\xd3\x18\xac\xa4\xd2\xe8\x70\x65\x50\x0e\x70\x3f\x41\x3b\x3b\x81\xf7\x36\xc8\x96\x55\x7b\x57\xe9\x6c\x61\xf5\xb6\xd4\xca\xf7\x16\x69\x4c\x89\x79\xb9\xff\x96\x4a\xf0\x61\xcf\xba\x80\xa7\xb1\x4e\x47\x00\x30\xc9\x7e\x60\x61\x86\xf9\x63\x8e\xc6\x13\x70\xe0\x06\x27\x47\x37\x49\x34\x33\x73\x7f\x57\xaa\xcc\xc3\x86\x31\xb2\x41\x70\x07\xa8\xcf\x18\x32\xae\x66\x26\x1d\x1f\x69\xfc\xb0\x3a\xc6\xb3\xa3\xa2\x3b\x8a\x52\x0f\x3b\xba\xc4\x14\x9b\x64\x8f\xdd\x9c\xe4\x5b\x37\xf5\xa2\x77\xed\xd0\x68\xda\x71\x7f\x77\x58\xa4\xed\xa8\xce\xd8\xdb\x18\x3b\x65\xef\x67\xaa\x31\x99\x58\x8e\x20\x56\x96\x0e\xf6\x77\x85\x71\xd4\x00\x30\xa3\x27\x8b\xa5\x56\x9a\xae\x1c\xb7\xa9\xc4\x20\x29\xf0\x8b\xe3\xc1\xb9\xcd\x1f\x73\xcf\xa5\x1b\x3f\x3f\x98\x08\xe5\x6b\x3b\x44\x28\x97\xed\x31\x7a\x8d\xa6\xa6\x7d\x89\x17\xec\x3b\xbd\xb5\xd1\xb0\xa7\xe3\x02\x9b\x63\xb3\xc3\xd5\xa2\x4b\x9b\x46\x3b\xce\xfc\x1d\xae\x59\xd3\xb8\x8b\xde\x0b\xbc\xcd\x1d\x54\xa1\xff\xac\x21\x5c\xa9\x83\x27\x61\x39\x65\x7d\x53\x81\xc5\xd8\x67\xaf\x61\x87\x36\x74\x60\x19\x8a\x2b\xcb\x4d\xe5\xbf\xf6\x9a\xe0\xc5\x63\x83\xef\x71\x91\x74\xdb\xb0\x09\xe4\x8e\xdf\xc9\x32\x04\x1b\x2a\x00\x3a\x9d\x29\x1b\x30\xc1\x9f\x87\x06\x9c\x19\xf7\x66\xed\x3c\x28\x9b\x95\xfb\x3c\x15\x83\xfc\x5e\x72\x6d\xec\xcb\x4b\x9e\x29\x44\x3d\xdf\xdc\xc7\x05\xcb\xc7\xf2\x73\x0d\x69\xb4\xeb\x33\x96\xbb\xdf\xf9\x19\x10\xa0\x79\x2b\x5d\x9b\x6a\x45\x0d\x45\xa6\x96\x83\x9a\x5d\xa4\xfc\xee\x6e\x54\x5f\x89\x7b\x3c\x16\x05\x38\x2e\x92\x06\xf6\x47\x26\xc2\x5c\x9f\xf0\x29\xb4\xc0\x26\x18\x9f\xd1\x19\xf7\x67\xcf\x2d\xf0\xdb\xcc\x07\xb5\xeb\x03\x91\x44\x30\x21\x3d\x83\x83\xa3\xd3\xf2\x86\xa8\x47\x88\xa9\x1f\x2a\xc8\xb6\x30\xca\x05\x1e\x3f\xa1\xb0\x86\xe8\x74\x8b\x5e\x95\xd7\xc3\xba\xc5\x78\x3f\x95\x89\xd5\xb4\x6e\x79\x94\xea\x69\x96\x89\x7a\x91\x10\xf9\x64\x5d\x64\xb2\x7e\x03\x08\xd1\xf0\x71\x20\x73\x73\x28\xd4\x42\x82\x3f\xa4\x82\xf8\x5a\xd8\x43\xfd\x5b\x4a\xb0\x61\x95\x56\x14\x85\xb3\x33\x34\xb4\x03\x21\x6c\xee\xdd\xc3\xee\xb0\x64\xc6\xad\x97\x19\xff\xc3\x5e\xa2\xc0\x63\xee\xf5\xd8\x89\xad\xfb\x0c\x06\xcf\x16\x08\x3c\xbd\x09\x8c\xbb\xfd\xe6\x7f\x3d\xf0\x3a\xb2\x1a\xf3\xd4\x34\x14\x6a\x8d\xbd\xc3\x40\xc4\xbc\xc9\x63\xb7\x6d\xa3\x3a\x43\x1c\xce\xc1\x5d\xe4\x78\x86\x68\x9d\x38\x46\xb5\x54\x61\x65\xa3\x87\x18\x4e\xe9\xda\x6f\x39\x60\x65\x87\x7d\xeb\x79\x27\x74\x76\x27\x74\xf6\xe1\x84\x19\xe8\x6c\xbe\xc5\x9e\xf1\xef\x98\xd4\xe0\xc9\x83\xd6\x6a\x29\xe2\xc3\xec\xc0\xce\xda\xa2\x9d\x97\x7d\xf2\x2a\x76\xad\xb5\x00\xb3\xa5\xb0\xae\x0f\xe7\xc7\x91\x9a\x72\x2e\x8d\x85\xff\xfa\xab\x82\xef\x79\xd6\x05\xc2\x22\x20\x6a\x70\x0a\x91\x03\xfe\xd8\x30\xc6\x2f\x12\xb0\x3f\xc6\xf3\xa1\x6f\x02\x53\x42\xd1\x76\x61\x65\x43\x41\xba\x82\xec\x4f\xb9\x80\x43\x2e\x8b\xbf\x2d\xa2\x2d\x31\xe3\xd6\x47\xe9\x6a\x39\xde\x7e\xf5\x60\xed\xe0\x0d\xf2\x8f\x66\xb8\x8a\x3e\x18\x68\xc6\xa0\x06\x4f\xad\x1e\xb4\x0b\xc4\x65\x44\x74\x17\x60\xab\x14\x19\x9a\x96\x01\x9c\x1f\xcb\x3e\x74\xa8\x64\x43\x18\x97\x27\xdb\x47\x28\xc2\xb3\x28\x4d\xc0\x57\xba\x5e\x61\xd7\x0f\x15\x6c\x4e\xbf\x4e\xbb\x6f\x06\x5f\xf6\x4a\x57\x28\x48\x4c\x53\xf9\x62\x68\x52\xc9\x7e\xd7\x4a\xd7\xe0\x64\xde\x8d\x62\x75\x5a\x55\xfe\x1f\x58\x15\x56\x33\xc1\xa4\x3e\xe0\x1b\xca\xc2\x9e\x58\xef\xcb\x2d\xce\xe1\x36\x4f\xc2\x65\xb2\x0b\xa3\xeb\x8a\xec\x94\x20\x9f\xf0\x15\x6d\x90\x2f\xd2\x76\xc7\x9c\xa4\x4d\x29\x3b\x62\x75\x47\xac\x3e\x8c\x87\xb1\x96\xa5\x57\x3d\xfd\x72\xe3\xaa\x1f\xbd\x41\x58\x35\x7b\x9e\xa6\xaf\x5f\x0b\x8e\xe0\xe2\x40\x7d\x09\xe0\xae\x75\x74\xf3\xf0\x95\xba\x75\x48\x51\x47\xa2\x98\x62\x2f\x7a\xe9\xe6\x7b\xc8\x69\xff\x49\x3a\x8e\x70\xa8\xa8\xd4\xa7\x32\xab\x7a\x03\xd3\x16\xbe\xb7\x8b\xfd\x98\xa9\x98\xc1\x99\x9f\x94\x5d\x88\x0a\xaf\xee\x50\xe7\x01\xb3\xd1\x74\xa5\xe8\x50\x81\x1e\x16\x2b\x70\xee\xbf\x69\x57\xf0\x94\xe1\xb7\x1d\xc1\x41\xe9\xdc\xc6\xe7\x9d\xdb\x4f\xca\xde\x76\x0b\x2a\x99\x86\x7f\x71\x8c\xfd\x95\xc7\xc6\xe0\x55\xff\x2b\x1e\x7b\xf6\xb0\xae\xbf\xa2\x2d\x04\x37\x5c\xb0\x8a\x15\x76\xb5\xd5\x3a\xbc\x63\x96\x2f\x89\x36\xf8\x20\x0f\xf9\xe2\x14\xbc\xb4\x68\x37\x13\xf7\x08\x3c\x7c\xc2\x16\xb2\x68\xf9\xc4\xa6\x56\x0f\x2e\x82\x93\x4e\x71\x1b\xd7\xd8\x27\x46\xb4\x5e\xfe\xbe\x11\xb6\xf2\xd0\xb4\x18\x15\xf8\x5f\xa9\xd0\x78\x6d\xb7\xd1\xf0\x96\xd3\x6a\xca\x1e\x26\x17\x26\xe0\x8c\x97\xf9\xe1\xcb\xc5\x76\xb2\xa8\x1d\x66\x3d\x23\xda\xa2\xe5\x44\xca\x26\x6b\x6f\x93\xaf\x2d\xea\xaf\x15\x61\x72\x4f\xa8\xc2\x3b\xd4\xa6\x73\x68\xf3\x0f\x0d\xac\xf0\x80\xaf\x85\x49\xaf\x80\xbc\xcf\x94\xe2\xaf\xc3\x64\x0b\x35\x60\x9f\xd9\xd3\x47\xc7\x8f\x59\x9d\xea\x00\xb5\x90\xae\x88\xc4\x09\xe4\x7d\xdd\x9e\xe0\x44\xdf\x55\xe5\xb8\x36\xa1\xbb\xe0\x01\x95\x8f\x71\xb7\x50\x9d\x41\xee\x2c\x9e\x4f\xed\x66\xaf\x1a\x63\xe3\x61\xb7\x01\xc6\x88\xdc\x7f\xc1\x58\xf0\xf5\xd1\x69\xf5\x13\x2d\xfd\xf4\xcb\xc2\xac\xcd\x79\xbd\x95\xe6\x42\x9f\xdc\xac\x4f\x29\x26\x27\xd4\x47\xda\x9d\xb0\x88\x96\x62\x04\xf5\xc7\xa0\x1a\xf9\xbc\xd5\x0e\x34\xda\xcb\xdf\x35\x3e\x9d\xd8\xb8\xb8\xa0\x4c\xf5\x9c\x2b\x89\x49\xff\xce\x0d\x96\x56\xa9\x44\x38\xee\x86\xa6\x09\xe4\x14\x80\x6f\x80\x75\x86\xee\xd4\xf8\xb4\xda\x15\xfb\x0b\x81\x63\x3f\x3a\x89\x44\x61\x22\x55\x6a\xa6\x5c\x8a\x3b\xc7\x79\xbe\x8a\x16\x30\xa1\xe2\xd9\xac\x96\x0f\xe8\x3e\x1e\x59\x61\xaa\x0a\x9f\x05\xc6\xb3\xaf\xfc\x22\xe5\x22\xc9\xbb\x99\x30\x5c\x5a\x76\x2d\x4d\x50\xb5\xfe\x4c\xb8\x06\xa0\x38\x40\xfb\x66\x3f\x6a\x49\x17\x64\xb1\x2f\x7d\x51\x91\x3e\x02\xcb\xbe\xbe\xa7\xc7\x55\x80\xe2\x12\x14\x59\x57\x04\x55\x6b\x96\x45\x39\xe5\x98\xd9\x08\x06\xba\x4c\xea\x58\x0b\x7d\xd6\x32\xa7\x6c\x10\xe9\xb6\xcc\xae\x76\xbe\xed\xdf\x15\xdc\x3e\xed\x56\xc6\x12\x3f\xd4\x31\x58\xa1\x35\x79\x2c\x29\xd1\xb2\x84\x7c\x25\x49\xd7\x12\xb0\x1b\xb8\x31\xb8\xb7\xb3\x31\x30\xc9\xf9\xb7\x06\x87\x30\xb6\x7a\x68\xc1\x75\x39\xd2\xc9\x3e\xd0\x93\x20\x18\xde\xc5\x81\x5e\x63\xa3\xb2\x78\x3f\xdd\x00\xf7\xa4\x7f\x9d\x9f\xcf\x45\x36\x93\x34\xd3\xe0\x46\xf9\x97\x5a\xce\xea\xea\x40\x7e\x19\xca\x32\x51\xab\x86\x3d\x7f\xcc\x46\x9f\xb4\xd8\x54\xd1\x19\x17\xa5\xc9\x71\xad\xd1\x6b\x44\x19\xff\xb3\xa3\xc1\xd2\xc6\x8f\x94\xe2\x01\x01\xd5\x86\x92\x03\xcc\x7b\xf6\x99\x03\x67\x3e\xf9\xda\xd1\x89\xbc\xee\x8d\x4a\xe5\x6e\xdd\xa3\xed\xca\x11\x3e\x9f\x19\x61\xa7\xf5\x3e\x76\x5b\x70\xbd\x0b\x22\xac\x41\x76\xaa\x0a\xba\x61\x01\x92\x3b\x4e\x85\x71\x2e\xaa\xfc\x7c\x02\x43\xea\xf8\xc7\x6f\x61\xf0\x35\xff\x68\x30\xb1\x40\x81\x4a\x43\xaa\xaa\x0b\x77\xde\x7f\xad\xc7\xfc\x38\xcc\x8b\x85\x2c\x04\x58\xd1\x34\x59\x88\xda\xc2\xff\x89\x4b\x87\x0e\x3d\xb1\xa0\x80\x0c\x0a\x85\x21\xaa\x3f\xcd\x0b\xfd\x19\x05\xd4\x21\x1b\x4a\xdb\x2b\xec\x2d\xb0\xa9\xd4\xd8\x53\x0c\xe6\xea\xd9\x60\x7a\x9a\xb7\xba\xed\x30\x81\xc0\x04\x80\x9a\xa7\x7b\x6a\xe6\x1a\x38\xd6\xdc\xb2\xf1\x99\x8f\x39\x4d\x3e\xc5\x76\xa1\x1d\xd8\xbf\x39\x98\x42\x0b\x38\x58\x85\x55\xf4\x93\xae\xed\xbe\x9c\xda\x31\xb8\x9c\xaf\x56\xd8\xbe\x01\x93\x10\x1d\x01\x67\xc2\xce\x1d\xa2\xa7\x1d\xf7\x1f\xae\x04\x3f\x8c\x3f\x72\xa2\xa6\xa4\xa4\x57\xfd\x74\x6d\xdd\x1b\x59\x11\x3d\x17\xc4\xaf\xc2\x7e\x94\xc9\xab\xfe\x0f\x07\xfe\x02\xf1\x6c\x02\x7f\xab\x2c\xc9\xa9\xcc\x0b\x3c\xa2\x51\xff\x8f\x41\x6a\xc3\xc2\x93\xed\xa2\xd8\x36\x03\x63\xba\x2a\x32\x29\x44\xa7\x08\x3f\x66\x52\x2e\xc6\x49\x82\xe4\x99\x02\x1f\xc2\xd4\xb5\xf0\x8f\x5d\x8d\x39\x8b\x7d\xf2\x54\x70\xe3\x20\xf6\x49\xdd\x62\x8b\x80\x72\x43\xfe\xc9\x5f\xdb\xcd\xaa\x1b\xf5\xf3\xd9\xb4\x21\xf0\x07\x31\x00\xbc\x76\x77\x10\x0f\xbd\xeb\x86\x62\x3a\xb1\xe1\xa9\xb1\xf4\xd8\x23\x83\x46\x1f\x8c\x1f\x53\xe1\x83\x6a\xae\xc8\xd2\x6b\xeb\xde\xb8\xf6\xa9\xac\x7b\x30\x0c\xeb\xde\x23\x65\xe7\xc6\xa2\xc0\x62\xee\x28\x8d\xec\xdf\x8c\xb1\x0f\x7b\xac\xef\x19\xff\xe7\xbc\xe0\x75\xde\x1d\xa5\xab\xbc\x21\xea\x71\x98\x69\x22\x0a\xd9\x61\xfd\x44\xae\xa6\xbe\x60\xf2\x87\x00\x45\xad\xeb\x39\x45\x92\x93\x4a\x1e\xc8\xbb\x70\x40\x2d\xa5\x92\x6a\xbc\x8c\x08\x83\x8b\xea\x61\xae\x4e\x20\x34\xcc\x1d\x9a\x6c\xad\xe0\x69\x67\x2d\x1a\x58\x7d\xba\x4c\xfa\x67\xa0\x53\xc5\x72\xf2\xea\xe6\x5f\xfc\x09\x66\xfa\xd8\xcf\x02\x88\x44\x42\x86\xd1\x41\xdf\xc6\x3b\x57\xb4\x02\xbf\xe0\xb1\x47\x28\xa5\x58\xd9\x86\xde\xec\x05\x2f\xd7\x29\x1a\xca\x40\x54\xae\x4f\xf9\xfe\x36\x6a\xd5\x4c\xb3\xa5\xa8\xd1\x90\x2a\x47\x82\x53\x4d\x2e\x28\xb4\xfc\xd8\x35\xc6\x5b\xa8\xbf\x38\x34\x0a\x1e\x1b\xe9\x46\x0d\xa0\x6c\x3d\x3f\x73\xa2\xaf\x6a\x16\x69\xc3\x43\x51\x9d\x2f\x8f\xb3\xfd\x83\x90\xeb\x10\xce\xfb\xdc\xbc\x43\xe3\xf1\xbe\xf1\xe0\x90\xc3\xe2\xa1\x1f\xe3\x1d\xd9\x93\x39\x70\x3a\x10\x78\xb7\x3e\x5e\x39\x8b\xec\x5b\x7b\xd8\x27\x3d\xb6\xab\x99\x03\xbc\xd2\x07\xbc\xe0\x1d\xde\xa9\x28\x16\xe8\xc2\xc3\xb8\xde\x22\x45\x3a\x10\x8b\xe6\x1a\x29\xb0\xad\x67\x4c\xb0\x1d\xa9\xfe\x40\xba\x44\x28\xde\xc9\xb2\xe6\x3b\x3d\x79\xa1\xc6\x03\x71\xa1\x38\x12\x54\x79\x70\xa1\x99\xcb\x7f\x92\xa2\x99\x07\x08\x82\x43\x8c\x68\x60\xb5\xca\x74\xe8\x2b\xbe\x40\x59\xdd\x0a\x51\xca\xe9\xb5\x0b\x6c\x8f\xdc\xf6\xce\x25\x71\xcf\x8f\x83\x67\xda\x09\xe2\x4d\xa9\x17\xf0\xfd\xf2\xf6\xd4\x5a\x16\x15\x62\x02\x59\x57\x21\x49\x15\x94\x5a\xb0\x77\x02\xd6\x0a\x1a\xd9\xd4\x4d\xa2\xb8\x96\xa3\x85\xbd\x7e\x46\xf6\x42\xee\x2a\x8a\x9f\xf7\xd8\x38\x12\xef\xce\x89\xa6\xff\x29\x6f\x03\xa3\xb1\x41\xb7\xab\x87\x71\x39\x04\xef\x45\xde\xbc\x2a\xa5\x1c\x2b\x87\xdc\xc1\x14\xf7\x2e\x85\x6a\xba\x24\x85\xb3\x32\x0a\x9a\x31\x97\x3a\x74\x3d\x13\x70\x9e\x08\xe3\xbc\xc6\x75\xe8\xb5\x81\xe1\x52\xc1\xd7\x44\x0e\xae\x11\x71\x8a\x42\x6a\xff\xa2\x51\x63\x2f\xf5\x18\xc3\x29\x03\x91\x8c\xcf\x0d\x3a\x77\xe9\x5f\x6a\x75\x80\xa6\x31\xa9\x35\x0d\x5b\x94\x99\xea\x10\xbf\x3c\xa7\xde\x43\x9f\x3a\x1c\x00\x21\x48\x8e\x18\x4f\x06\xb8\xdc\xed\x91\xfd\xc2\x08\x7b\x84\xa9\x0d\x8a\xb6\x8f\x8d\x04\xef\x1d\xb9\xcb\xbd\xd8\x17\x60\x98\x76\x74\x95\x68\x01\xd0\x97\x74\x05\x55\xff\x58\x4c\xcc\x0e\x17\xb6\xb6\xa4\xcf\x96\xf8\x9a\xed\x34\x01\xc8\x12\x50\xc1\xda\x79\xf9\x54\x03\x1d\x23\xeb\x42\x30\xa5\x4b\x82\xb7\x23\x79\x9c\x20\xc5\xdd\xae\x0e\xee\x97\xd1\x72\x0b\x58\x07\x93\x42\x2c\x67\xda\xe5\x5f\x70\x6b\x0c\xd0\xbc\xa0\xb1\x3f\xa4\x12\x92\x45\x0d\xe1\xc4\xd6\x53\x06\x4b\x37\xc3\xb7\x8b\x94\x07\x74\x0b\xd6\x51\x2f\xed\xea\x68\x56\x8c\x66\xd4\xad\xcb\x07\xf4\xd3\x59\x73\xd3\x4e\x00\xef\x64\x62\x12\x82\x10\xfb\xdb\xa2\xba\x88\x28\xf3\x9d\x21\xfd\xa3\x71\x27\x80\xb4\x84\x92\x74\x72\x35\x82\x13\xbc\xff\xfe\xf1\xe0\x77\x3c\xf5\x8b\x8b\xd5\x08\xf5\xc2\x4e\x4a\x4a\xb1\x54\x8b\x20\xac\x52\xa5\xf5\x15\xa9\x75\xca\x90\xa2\x84\x4e\xf9\x79\xd8\x14\xc0\x25\xa2\x03\x4d\x4d\x9e\x67\xe8\xd8\x71\xc0\xbf\x25\x87\xd4\x00\xa1\x41\x9e\x89\x5c\x78\x98\xe3\x49\x15\xa1\x3d\x8c\x5a\x27\x65\xde\xec\xb9\xf9\x05\x1a\xe4\x5a\xad\x36\xd5\x49\x1b\xf9\xd4\xcd\xb2\xae\xb2\x63\x6f\x99\x52\x6f\x95\x8c\x94\xdf\xd9\xb5\xe3\x4a\xb9\x5c\x57\x4a\xc6\xae\xc6\x44\x3a\x0c\xb8\xce\xfd\x70\x38\x6d\xea\x06\xe7\xb2\x13\x76\x19\xc1\xbf\x77\x7e\xaa\x30\x1f\x75\xc4\xde\x71\xdf\x5c\x86\xfb\xe6\x79\x76\x2c\x5d\x71\xb9\xfe\x9b\xc3\xe6\xef\x92\x61\x02\x42\x9d\x01\x68\x30\xd7\x11\x2c\x51\x5d\x76\xc3\x45\xef\x29\x9b\x3b\x55\x8e\xfa\x47\x86\xc1\x3f\x29\xa1\x34\xd0\x7d\xf2\xd6\xab\x1c\xd4\x3b\xc3\xc3\x82\xe2\xad\x8f\x9b\xeb\x5b\xff\xcc\xb8\xb9\x7e\x6f\x7c\x47\x64\xed\x78\x7f\x1f\x3e\xf1\xf1\x2f\x90\x18\xee\x01\x15\xdb\xf7\xd6\x0d\x9a\xb3\xa9\x24\x79\x68\xd8\xe1\x7e\xdf\x84\xfe\xfd\xa6\xc7\x6e\xbd\xf4\xda\x7e\x7f\x51\xc4\x5d\xf4\xa2\xcd\xb7\x84\x53\xfe\x89\xcb\x63\xe9\xa2\x2d\xe2\xab\x3e\x3b\x7f\xa9\xde\xd8\x92\x23\xf6\x84\xee\x7d\x48\xe5\x7f\xa9\x1f\xcc\x6e\xf4\x40\x29\xde\x88\x22\x26\xd7\x42\x42\xbc\x8f\x32\x27\x3d\x12\xa1\x64\xd6\xbd\x31\x68\xf3\xba\x37\x06\x5b\xc3\xba\x37\x06\xa7\xab\x75\x6f\x0f\x35\xcd\x35\xdf\xbf\xe8\x51\xec\xed\x1e\x80\xe1\xd3\x6d\xff\x75\x1e\x7b\xd6\x95\xf1\x45\x97\x5a\x7f\x5c\x7f\x23\x38\x6c\xbe\xa7\x37\x20\xeb\x12\x9d\xe5\x0d\x9d\xc2\xf1\xb9\x13\x35\xf6\x55\x8f\x61\xdb\xfc\x3f\xf2\x82\xcf\x79\xc8\xc3\x4c\x47\x5c\x79\x98\xd6\xc4\xcc\x74\xb4\x02\x10\xec\x92\x4b\x18\x03\xc4\x4b\x17\x71\x37\xa5\xb0\xb3\x6e\xd2\x10\x19\x5f\x04\x1e\xae\xa9\x9b\xa1\xcc\x5b\xa6\x6a\xb5\xda\x22\x19\x54\x10\x6c\xb8\x68\xb9\xe7\xe8\x61\xc3\xc8\xf7\x6b\x36\xee\xac\xcd\x17\x6f\x86\x31\xa9\x75\xe2\x6e\x16\xc6\xb7\xd4\xa8\xf8\xc5\x09\xe7\xc4\xf5\x7e\x8f\xe1\xd8\xf9\xef\xf2\x58\xe3\x41\x19\x0a\x53\x41\x38\x2c\x06\xb7\xe1\x69\x3f\x27\xe3\x36\x39\xa3\x71\x6a\xc9\x6d\x59\x2e\x17\xb2\x08\x68\x9f\x42\xa9\x63\xd9\x03\xa3\xec\x07\x60\x63\xcb\x56\x05\xf9\x75\x4e\x01\x98\x82\xff\xf2\xd1\xe0\x79\xa3\x03\x6f\x95\xfd\x74\x24\xd4\x09\x84\xc1\x25\xf2\x30\xba\x11\x75\xe9\xb9\x8e\x48\xc0\x11\x89\xba\x8a\x89\xa9\x57\x9f\x22\xf8\x1f\x32\xae\xd1\x79\x4f\x89\x02\x6e\x74\xa2\x2a\xb4\xaf\x6a\xe8\x65\xa1\xc9\x89\x05\x07\x11\x01\xe6\x9a\xf5\x00\x20\x99\xac\x85\xbd\xdc\x7c\xac\x6c\x5b\x6c\x38\x61\x92\x3a\x3a\x52\xd9\xa9\x16\x2f\x4c\xaa\x57\x27\xbb\xd8\x27\x93\xf8\xb5\x45\x85\x81\x26\xdf\x5b\x04\x87\xb5\x5a\xba\x4f\xbb\xee\x19\x35\x6c\x6e\x2d\xc5\xd6\xdf\x75\x78\x1e\x7e\x2f\xd6\x00\x21\x60\x03\x3f\x48\x11\xe6\x2b\xf9\x14\x42\xb2\xd8\xa2\x33\xec\x44\x53\x38\x98\x06\x60\xa8\x7c\x61\xd2\x9e\x83\xd7\x76\xb2\x6e\x12\x25\xcb\x20\x6e\xbb\xb9\x6a\x86\xbc\xe2\x36\x04\xe6\x0a\x39\xb1\x5c\x3b\xdc\x1b\x21\xd6\x27\xed\x08\xff\x95\x5e\x70\x9f\x87\xf6\x1f\x33\x17\x6c\xe7\xca\x90\x85\x0c\x07\x7a\x8c\x8d\x9a\x94\xfb\x87\xd6\x83\x27\xa1\xb0\x46\x8d\x4f\x53\x42\x3e\xd9\xce\xe4\x90\x2d\x52\x34\xd5\x22\x8c\xf0\xa2\x36\x94\x34\x16\x9d\x05\xf8\xed\x5d\x4c\x0b\x4b\xff\x6b\xbb\x82\xbf\x18\x53\xbf\xfa\xf0\x8a\xe2\x58\x01\xc7\xe0\xfd\xcd\xa4\x8f\xd2\xba\x8d\x75\x4d\xa5\x11\xcb\x9d\xb1\x4b\xe8\x18\x80\x62\x24\x47\x1f\x57\x00\x4d\x66\xfd\x11\xf9\x9a\xac\x01\xce\x2c\xf9\x7d\xc5\x24\xd8\x83\x90\x01\x4b\x95\x57\x4a\x6a\x94\xf3\x00\x92\xa3\xe3\x68\x45\x04\x00\xbc\x0e\x46\x9f\x1c\x90\xc3\x97\x52\x20\xef\x4e\xec\x67\x4a\x25\xe4\x55\x6b\x39\x42\xfd\x44\x83\xc7\xe2\x42\x54\x4f\x97\xb3\xb0\xd3\x42\x2c\xf3\x1a\x0f\xee\xe8\x2b\x21\x47\x1a\x70\xe5\xc4\x0f\x56\x03\x8a\x7b\x45\xaa\x08\x1a\x26\x88\x3e\x26\x98\x06\xc2\x01\x79\x76\x9a\xa9\x32\x26\xe8\x15\xe5\x8a\x8b\x29\xd9\x00\x5b\x17\x00\xca\x42\x20\xe7\x41\x20\x77\xed\x80\x32\x75\x30\x9c\xc7\x29\x34\x4a\xac\x42\x15\x8b\x3b\x88\x7f\x34\x88\x37\xa3\x2c\x87\x50\xe8\x27\x4e\xf3\x5b\x00\x82\x94\xdf\x42\x20\x0e\xfb\x31\xaa\xe2\x89\xd3\xa8\xfe\xab\xfe\x81\x66\x25\x29\xcf\xbb\x4d\x20\x0c\x20\x74\x31\x78\x35\xcd\xf0\xdd\x09\x05\xaa\x86\xd1\x36\x10\x58\x42\x14\x99\x56\x23\xa9\x8d\x4e\x1d\x21\x9e\x86\x28\xff\x55\x1d\xd5\xec\x53\xfd\x2b\x95\x95\x03\x55\xbe\x7a\xa8\xca\x57\x0f\xca\xff\x81\xea\x02\xbf\x0e\xc8\xbf\x0e\x57\xf9\xea\x61\xd0\x66\xe4\xa5\x43\x50\x23\x7c\x0e\xfe\x3c\x54\xe5\xcd\x34\x3d\x88\xff\x3d\xe0\x04\x74\x3c\x9b\xb5\x58\xf3\x41\xde\x81\x68\x49\xb0\x2f\x8e\xba\xe0\x88\xc3\x12\x27\x89\x04\x48\x1b\xe3\xfd\x37\x8f\x06\x8f\x2b\x5f\x54\x91\x9f\x96\xff\xb3\x48\x39\x3d\x55\x8b\xc5\x72\x58\xef\x91\x36\x38\xc0\xd7\xe9\xc2\xc7\x8f\xb0\x27\x90\x73\xf0\xc6\xe0\xb1\x8b\xf2\x8f\x45\x25\x07\x6c\x15\x40\x13\xfd\xcd\x91\xe3\xc8\x16\x29\xe7\x6c\x67\xdf\x6d\xc1\xf5\x8b\xfa\x97\x53\x96\xe3\xe5\xdb\xb0\xc0\x26\xf1\x90\x3c\x33\xb8\x73\x51\xfe\xb1\x58\x0e\x5b\xd7\x44\x16\xb8\x6c\x0d\x6e\x0b\xf2\x30\x84\x49\xcf\xb6\x9a\x52\x86\xbb\xc5\x96\x44\xdf\xf9\xa4\xc7\x46\x3b\x69\x56\xf8\x1f\xd2\x20\x45\x6f\xf3\x66\x9a\xb6\x93\x02\xcd\x37\x59\x61\xa7\xd0\x45\x2a\x1c\x52\x01\x29\xac\x61\x4e\xa1\x86\x03\x96\x1f\x3d\x72\xe4\x30\x6c\x11\x4b\x61\x7d\x65\x4d\x9e\xe9\x54\xc0\x15\xd0\x06\xd4\xf8\xa2\x2c\x75\xd1\xda\xd8\x43\x8a\x52\xea\x18\xb6\x7e\xbe\xff\xe0\xe4\xd1\xeb\xaf\x3f\x7c\x7d\x95\x47\x89\xdc\x10\xa2\x55\x31\x51\x02\xd5\xdd\xcd\x36\x06\x2a\x01\xb0\xff\x9c\x00\x67\xd0\x57\xf7\x92\xdd\xc1\x57\xbd\xf2\x55\x6b\x87\xa2\xf8\x7f\x8c\x1c\x05\x0a\x10\x82\x55\x21\x95\x1d\x0c\x58\xf2\x40\xd3\x49\xb5\xbe\xa2\x99\x8f\xe0\x25\x02\x38\xd9\x2f\x3b\x80\xd6\x77\x15\xe3\x32\x28\x43\x6a\xb2\x93\xa5\x72\xaf\x16\x8d\xc9\x8e\xc8\x26\x73\x51\x4f\x93\xc6\x84\x85\x10\x6e\xf9\x83\x10\x81\x47\xee\x23\xcb\xb8\x73\x2e\x89\xa6\x3c\x0e\xa3\x25\x0d\xc5\x8d\x81\x4b\xb1\xe1\x8d\x6a\xeb\x1e\xc3\xaa\x9f\x05\x27\xbf\x8f\x37\xa7\xb1\xc4\xbb\xfa\x70\xc6\xff\xd7\x28\xbb\x83\x59\x6f\xf8\x8f\x0f\x0e\x98\x5f\x83\xd6\x85\x02\xac\x49\x38\xcc\xb6\x28\x4d\xec\xf9\xf5\xc7\x15\xb6\x47\x21\x2b\xf8\x9f\xaf\x5c\x92\x31\xf8\x74\xb8\x24\x62\x15\x98\x12\xbc\xa4\x92\x5b\x50\x0c\x66\x97\x98\x14\x49\x3d\x6d\x48\x11\x0f\xf8\x81\x40\x0f\xa6\x8c\x09\x16\xa6\x08\x00\x4f\x68\xb0\x07\xad\xee\x2e\x47\xab\x52\x34\x63\x5b\x20\xe2\x15\xf2\x37\x90\xc0\xa4\x13\xe6\x04\x35\x1b\x26\x04\x5d\x0a\x6b\xb0\x13\x66\x61\x5b\x14\x22\x53\x3d\xaf\x91\x69\x30\xe2\x4f\x83\x35\x29\xfc\x2c\xf3\x00\x79\xa1\x14\x33\x8f\xfc\xd6\xb3\xbb\x14\x98\x67\xf5\xb7\x03\x75\x5b\xa4\x7c\x39\x84\xe1\xa7\x62\x6a\xec\xd3\x1e\x1b\x30\xa2\xfe\xbb\xb7\x93\x14\x5d\xd3\xea\xcb\x9d\xdd\x10\x8e\xf1\x41\xdc\x5f\xa6\xea\x6c\x07\x3b\x8b\xa6\x80\x42\x88\x72\x67\x84\x05\xe9\x95\x89\x58\xac\x86\x09\x41\x6a\xef\x87\xf0\x94\xbb\xe9\x63\x13\xec\x57\x1f\x65\x53\x5b\x6d\x21\x69\xdb\x7f\xf5\xa3\x82\x9f\xf5\xd4\x2f\xeb\x18\xad\x15\x78\x0c\x0a\xed\x25\x45\x4b\x14\x51\xdd\x3a\x09\x42\xd8\x4e\x2b\x04\xe4\xd9\x44\x41\x62\x20\x24\x09\x40\x72\x85\x59\x94\x76\x73\xcd\x5e\x86\xd6\x58\xe4\xe2\x21\xc5\xb4\x1d\x12\x04\x78\xaa\x80\x95\xd3\x26\x7f\x8e\x31\x62\x55\xb9\xaa\xd8\x73\x5d\xf7\xd2\x9f\x3e\x82\x7d\x65\x94\xed\xa9\xa7\x49\x11\x25\x5d\xe1\xff\xc1\x68\xf0\x99\x51\xf5\x4b\x39\x35\x72\xa1\xb9\x78\x10\x6e\x51\x14\x00\x22\xd1\x8e\xb4\xfc\x35\x58\x57\x80\x6b\xa0\x83\x54\xab\x7d\x49\x99\x14\x19\x49\xb3\xb1\x15\xe6\x38\x1b\xf1\x88\xa3\x61\xa1\x2c\x4e\x82\x28\xe7\x69\x27\xbc\x9b\x50\xd1\xa8\x4a\x6a\xee\x45\x79\x0e\x37\x50\xeb\x72\x36\x17\xc7\x90\x19\x16\x4a\xa3\x85\x5d\x07\xb4\x19\x88\x54\x2a\xb2\x48\x28\xe8\x74\x71\xa1\xd0\x19\xa3\xaa\x26\xa6\xbb\x8f\x63\xaf\x20\xe0\x0c\x20\x62\x63\xf0\x04\x66\xe8\x1b\x5c\x23\x82\xf5\x14\xaa\xcb\xa8\xa5\x75\x27\x68\x48\xb6\x1b\x83\xc1\x1b\x5c\x2d\x48\x08\x01\x0f\x79\x53\xac\x49\x75\xac\x0b\x22\x5f\x8e\x29\xae\x74\x95\x66\xee\x86\xc1\xe0\xc9\x4f\x87\x04\xc3\xd9\xd3\x02\x9f\xd1\x23\x89\x5d\xa9\x56\xae\x66\x02\x52\x3d\x45\x3d\x4d\x66\x03\x50\x47\x31\xf6\x29\x17\x55\xde\x4d\x00\xb3\xaf\x97\x76\xb1\x3e\x84\x3e\x4f\x5d\x89\x21\xae\x18\x87\xa7\x90\x82\x29\xae\xd0\xd9\xd4\xbf\x34\xc6\xfc\x4c\xb4\x31\x2c\x61\xa6\x10\xed\xe3\x69\x37\x29\xfc\xdf\x18\x83\x2d\xfe\xe8\x91\xe0\x17\xc7\xfa\x6f\x6b\xd9\xae\xa7\x17\x10\x51\xde\xdd\x05\x8d\x02\x66\x1a\x55\x1a\x46\xc1\x3d\xbc\xe3\x5a\x52\x67\x77\x35\xea\xaa\x5d\xfa\xd4\x42\x57\x71\xe6\x28\x8a\x9f\x06\xc9\x64\x29\x8d\x09\x26\x1c\x85\x73\x6e\xe5\xcf\xd9\x00\x6f\x54\x73\x55\xa9\x9c\xd3\xb1\x54\x47\x91\x53\x90\x34\x8d\x40\x2c\x9a\x05\xca\x58\x78\x20\x6d\x47\x45\x61\x32\xfa\x73\x91\x45\xa1\xc6\xb4\x73\x2a\x1a\xe5\x5c\x03\x09\xec\x17\x11\x6d\xbc\xe8\x80\x36\xcc\x56\xf5\x56\x37\x59\xa1\x74\x4a\xc3\x45\x6a\xa8\x05\x20\x20\x13\x1e\x9a\x30\xed\xc9\x04\x75\x1d\x4e\xc7\x72\x9b\xb0\x21\xda\xda\x70\x49\x2d\x51\xae\x87\x34\x6e\xa8\x9c\x87\xd5\x83\xb5\x83\xd7\xab\x78\x01\x40\xf8\xd3\xdf\xc0\x29\x0f\xbe\x2d\x39\x8c\xb2\x11\x3a\xce\x6a\xd0\x4c\xb9\x4e\x6e\xf5\x6d\x08\x27\xba\x0e\x97\x5e\x74\x0f\x05\x1e\x1b\x0e\xed\x1a\x3f\x4e\x6c\xf8\xa4\xe3\xc9\xef\x66\x02\xc4\xe6\xb0\xb2\x31\x2e\x03\x44\x43\x46\x3f\x80\xd9\xcd\xd5\xf9\xde\x3e\xd2\x1f\xdb\xf6\xca\x91\xe0\xbe\x91\x79\x0b\xf1\x5f\xc7\xf2\xdb\xd8\xe6\xfb\x72\xcb\x31\x65\x07\xb8\x19\x07\x8e\x9d\x49\xab\xe0\x14\x6c\x5a\x7f\x0d\x08\x06\x22\x40\x79\xec\x60\xbd\x92\x9c\xa9\x71\xdc\x33\x55\x90\x68\x41\xa1\x09\xa1\x96\xaf\x56\x91\x80\x4f\x8c\x1a\x46\x37\x69\xa7\x0d\x34\x82\x01\x5c\x1b\x89\x0c\x3a\x7b\xf5\x5b\xdd\x55\x20\xd7\x9c\x08\x1b\x93\x72\x3f\x7a\xd0\xdd\x24\xf5\x34\x41\x3d\xb7\xde\x9b\xa4\x50\xed\xc9\x30\x69\x4c\x6a\x21\x5d\xef\xd9\x92\xe8\x1b\x1e\xa8\x7f\xcd\xd3\x51\xb2\xe2\xff\x89\x17\x7c\xde\x53\xbf\xf0\x90\x2d\x4f\x31\x25\xd7\x9a\x1e\x87\x2d\xb5\x97\xb1\x13\x27\x67\xe7\x4e\x1e\x9f\x5e\x38\x79\xc2\xf6\x90\xa0\xf5\xa3\x48\x3b\xbc\x93\xa5\x9d\x70\x39\x34\xa5\x93\x05\x0f\xa8\xc4\x0e\x80\x6a\x02\x78\x9d\x8e\xec\x90\x4a\x5f\x1c\x26\x89\x30\x00\x5d\xed\x74\xd5\x30\x90\xa9\xd7\x1c\xb1\xfb\x9d\xbd\xec\xdf\x0e\x88\x30\x3b\x9b\x36\x84\xff\x87\x7b\x83\x08\x90\xe7\xa0\xd9\x6b\x69\xb6\x22\x10\x5d\x4d\x96\x68\xea\x5d\xe3\x27\xe5\xa9\x02\x6e\x40\x13\x10\x67\x59\x85\x67\xf5\xa7\xd9\x20\x28\xc7\x7e\x70\xa6\x44\x09\x17\x45\xbd\x31\xe1\xaa\x1d\x2f\x67\x3b\x2e\xe2\x1d\x17\xf1\x0e\xf5\xd2\x15\x75\x12\xff\xb2\x72\x12\x7f\xd0\x63\x13\x9b\xc6\x98\xca\x85\x0f\x0e\xe1\x17\x7b\xf3\x65\x7e\x41\x1b\xdd\x9a\xd0\x24\x1f\x94\xaa\xcb\xda\x82\xa4\xc6\xa3\x0d\xfb\x7b\xe3\x3a\xfe\x4b\x6f\x0b\x78\x7b\xd0\x06\x74\x13\x7f\xdc\x3b\x93\x82\x1e\x57\x47\x8e\xdb\x74\x89\xf4\x7d\x97\xf3\x19\xdb\xf2\x7d\xb0\x67\x95\x9a\x7e\xd1\x3b\xbd\xb9\x77\x79\xc2\xdf\x37\x88\xe3\x5c\xf6\x42\x99\xf5\xe5\x1f\xaf\x1a\x98\xed\x31\x77\xdb\x89\x59\x1d\xf6\xed\x04\x8a\x7f\xf6\xaa\xe0\xc5\x9e\x13\x29\x3e\x17\x36\xd2\x9c\xdf\x16\xa7\xf5\x15\x7e\x42\x80\x75\xad\x8d\xda\x10\x90\x98\x84\x79\xa1\xfc\x12\x08\xbe\x84\x93\x05\x70\x8a\xe7\x6e\x3b\x41\xb1\x03\x1a\xa7\x93\xa7\x6b\x89\xc8\xf2\x56\xd4\x21\x84\x09\xf0\xdd\x4b\xd9\x3a\x7f\xf2\x74\x94\x74\x2f\xc8\xdd\x4b\xaa\xdc\x51\xb2\x5c\x5b\xf7\xf6\xb4\xd3\x24\x92\x5d\xba\xee\x8d\x45\xed\x70\xd9\x35\x08\xfd\xf4\x5e\xf6\x3c\xb0\x11\xa6\xb1\x7f\x21\x58\x81\x83\x11\xd4\x56\x5e\x01\x43\x90\xb1\xf9\xc9\x6d\x62\xa9\x31\x70\x3c\xc9\x04\x96\x6b\xa8\x10\xac\xf2\x54\xb6\xd4\x98\x9a\x3b\x39\x7d\xe2\xcc\x49\x39\x52\xad\x74\x6d\xb2\x48\x27\xbb\xb9\x98\x8c\x8a\x92\xcf\xd4\xc4\x94\xbf\xc3\x0b\x5e\xef\x5d\x5e\xd4\x38\xef\x0b\x4a\xbf\x72\xb5\xb6\xfc\x60\xdf\x71\xe2\xd1\xbf\xe6\x6d\x01\x0d\x56\x07\x9e\x53\x28\xfa\x2f\x58\xa1\xe8\x51\xee\x58\xde\x4a\xb9\xc3\x14\x9a\xde\x4c\x33\x39\x29\xce\xe7\x52\x63\x9c\x69\x9a\x4c\x44\x15\x9a\x0c\xf9\x4b\x19\x41\xbd\xe8\x81\x4b\xa2\xf8\x4a\x75\x01\x7b\xbe\x47\x69\x96\xf7\x04\x6d\x33\x61\xc0\x9e\xd1\x37\x61\xc2\x46\x3b\x4a\x1e\x8c\x29\xf3\xbe\x8a\x4e\xa4\x78\x67\x25\x58\xaf\x94\x13\x29\xdc\x70\x74\x58\x68\xf2\xd8\xbd\x16\xe2\x21\x84\x92\x2c\x16\xa2\xce\x31\x7e\xb2\x94\xdc\x5b\xce\xb7\x88\xf2\x6d\xa4\x5c\x60\xa3\x8e\x5d\xa9\xc4\x8b\xed\x66\xc9\x91\x7f\x5c\xf5\xe6\xb5\xd9\x52\xc3\x8d\xdf\x45\x19\xe0\x47\xc1\xd3\xcd\xd0\xc1\x25\x1a\xbb\x2b\x3f\x52\xef\xf6\xd8\x6e\x9a\x92\xfe\x4f\x7b\xc1\x2b\xbd\x3b\xf0\x87\x3a\x41\x23\xeb\x71\x0a\x59\x64\x70\xdd\x99\xe2\xd6\x5c\x9a\x12\x45\x7d\xaa\x2e\x3a\xad\x29\x3d\xc3\xaf\x7c\x6d\x7f\x4a\xea\x33\x24\x31\xfd\x7b\xbd\x40\x4c\x5b\x27\x5e\x39\xab\x8e\x8b\x4e\x8b\xab\x27\xae\x5c\x0d\x86\xa6\x61\x7f\x73\x84\x3d\xc6\x12\x24\x62\x15\xf4\x4a\x13\xc1\x2f\x92\x62\x5e\x64\x91\xc8\xfd\x4f\x8d\x04\x77\x5b\xbf\x35\x5b\x8e\x9d\x53\x08\x82\x04\xee\xa6\xc0\x2e\x0e\xe0\xdb\x14\xab\xa5\x4f\xd6\x6b\x61\x3e\x15\xc9\x23\x6f\xa7\x23\x12\xc2\x8d\x2e\xa2\xa4\x9b\x76\xf3\x18\xf9\xe9\xf2\x54\x2e\xaa\xa8\x2d\x6a\xeb\xde\x18\x20\xad\xaf\x7b\x8f\x94\x1b\xd9\x39\x52\x16\x16\xa2\x92\x1f\xee\xe7\x2a\x2c\x61\xf8\xa8\x2f\x94\x2f\x6a\xb6\x3e\xd8\x34\x45\xfc\x58\x90\x83\xaf\x4c\x4e\x54\x6d\x20\xf6\x36\x96\x97\x96\x08\xb3\x62\x09\xd8\xbb\x65\x6d\x1c\x2b\xc2\x2f\x78\xac\xaf\x4e\xfe\x9b\x3c\x76\xf3\x25\xa8\xa8\x67\xa2\x7a\x96\x42\x3a\xf1\xd3\xcb\x45\x6a\xd3\xb9\xfc\x1b\x6c\x06\x50\x33\x18\x09\xa3\xe7\x53\xf5\xd7\x42\xd9\x12\x91\x28\x17\x8f\xdb\x88\x1a\xfb\xde\x1e\x47\xdb\xd0\xf9\x1a\xb5\xd9\xb4\x71\x22\xca\xb3\x2e\x38\x08\x6f\xeb\x36\x96\x05\x04\x21\xfa\xbf\xb1\x27\xb8\x6d\xc8\x3d\x3c\xc4\xd8\x04\x8a\xa0\x57\x0c\x78\xda\x3d\x52\xfe\xed\x6e\xf6\xeb\x15\x76\x55\x3b\x4a\xa6\x95\xed\xd6\xff\x60\x65\xab\x9a\x7d\xb7\x88\xe2\x5a\x94\x14\x79\x91\xd5\x66\x92\xe2\x5c\x86\x36\x9b\xe0\x3b\xde\xb4\x9b\xd0\xa1\x28\x17\xa2\xa6\x61\x38\x08\xec\x8f\x06\xe8\x4f\x40\x7b\x21\x8a\xde\x40\x19\x0f\x03\x65\x02\xd0\x3e\x34\x65\x65\x0e\x9b\x05\xc5\x9e\xa8\x8f\xd1\x04\x47\xe6\x38\x3c\x57\x87\x4b\xb9\x8d\x8d\x40\xc1\xea\x44\x0d\x31\x9f\x72\xcb\xad\x07\xdb\x06\xe0\x1c\x66\xb0\x5e\xc0\xe3\x21\x97\x74\x52\x00\x0a\x8a\x4a\x36\x91\xd5\xa3\x08\x2c\x08\x6a\x38\x78\xe0\xc0\x63\x82\x1a\xfb\x80\xed\x21\x7b\xe7\x95\xf0\x90\x7d\xc8\x83\xdf\xfc\xee\xae\xc8\x7a\xb0\xed\x13\xc5\x6b\x2b\xcd\x85\x55\x9f\x30\x13\x1a\x82\x8c\x76\xad\x86\x1e\x77\xbe\x84\x03\xcf\xa7\x79\xd2\x8d\x2d\x8f\x19\x74\x2b\x46\xc9\x25\x08\x1f\x0e\xa1\x23\xb2\x63\x15\x68\xc5\xfe\xe7\x3c\x77\xa2\xf4\x02\xfe\x42\xe4\x7f\x45\x37\x4b\x5d\x6d\x52\xcd\x2e\x7a\x13\xec\xd1\x8e\x66\x0e\x64\x4c\x93\x10\x1e\x20\x96\x7b\x3e\xf3\xf7\x64\xa2\x13\x87\x75\xc1\xd8\xb7\x2a\xec\x9a\x76\x78\xe1\x7c\xa2\x47\xd6\xff\x93\xcb\x9e\x83\x6f\xac\x6c\x38\x07\x81\x88\x2e\x70\xbf\xba\xf1\x24\x84\x10\xa0\x64\x1b\x93\x6f\xa3\x89\x77\xca\xf6\x25\xa7\x89\xb8\x84\x59\x77\xc0\xce\xb6\x6a\x77\x89\x1d\x03\xd0\xe1\xf3\x68\x55\x68\x85\x19\xa2\x5e\xdc\xc5\x56\x63\xdf\xd8\xc5\xe6\xaf\x50\x7c\x88\x83\x43\xf5\xaa\x87\x0c\x87\xea\x9d\x63\xec\xcf\x35\x0e\xd5\x97\x3c\x26\x1e\x94\xe0\x97\xef\x1b\x08\xaa\x0f\x1b\x08\xaa\xf7\x8e\x3c\x48\x91\x3e\x3b\xe8\x53\x0f\x0e\xfa\xd4\x9b\xc7\x34\x5f\x01\x44\xed\x29\x48\xcc\xd4\x1d\x9a\x72\xf8\xd4\xf4\xec\x0c\x85\x41\x19\x18\x99\xdf\x19\x0d\xce\x0e\xb8\x3e\x14\x3b\x06\x82\x0e\xe9\x69\x04\x8c\x01\x3a\xc0\x7a\x37\x0e\x33\xc4\x8c\xd9\x10\x32\xe6\x7f\x8e\xb0\x57\x0e\x06\x69\x79\xce\xa5\x83\xb4\x3c\xe1\xf4\xe5\x02\xb4\xfc\x98\x01\x68\x39\x13\x3c\xe1\x76\x37\x69\x7a\x53\x78\x96\x8d\x70\x55\x9e\xa9\xf1\x59\x16\x82\x27\x9e\x07\x53\x3d\xc8\xe7\xc9\xb5\x34\x6b\x54\x8d\x09\xf6\x52\x71\x5b\x9e\xaa\x57\xf1\x6c\x70\xdc\xc5\x14\xcb\x07\xc3\xf1\x80\x35\x58\xaa\x3d\x9b\xc2\xf1\xdc\x40\x70\x3c\x53\x41\xb0\x40\x67\x59\xd0\x55\xad\x23\xf2\x60\x1c\x9e\xff\x67\x2f\x7b\xb4\xa5\x88\x2e\xc9\x9d\x1a\xa4\x43\x96\x26\x4f\x4a\x97\xfc\x0f\xef\x0d\x8e\xd1\xdf\x96\xbd\x5b\x15\x68\xf9\xe7\x31\x54\x07\x09\x93\xea\x59\x9a\xf0\x67\xa7\x4b\xae\xd4\x7e\x03\xdb\xb1\xc0\xef\x58\xe0\x5d\x0b\xfc\x77\x95\x05\xfe\x2f\x3d\x36\x39\xd4\xaa\x56\x9e\x95\x60\x85\xff\xa8\xb7\x61\x5e\x96\x6b\x91\x57\x33\xd2\x8e\x17\x2a\x0c\x69\xd2\x83\x3f\xa8\x65\xc3\xfd\x6f\x19\xc3\xfd\x27\x3d\x36\xb5\xf5\xa6\xe3\xe6\xfc\x32\xef\xb8\x8a\x53\xd4\xa2\xc3\xb4\xf2\x21\x6f\xcd\x8e\x6b\xf2\xf2\x5c\x93\x17\xbd\xb9\xcd\x9d\x19\x53\xfe\xa4\x76\x66\xc0\xb4\x30\xf0\xb3\x38\x37\xca\x2e\x8d\xfb\x77\x39\x69\xd3\x16\x80\x15\x44\xec\x64\x72\x2e\x89\x05\x88\x82\x00\xe0\xbf\xef\x8e\x05\xb7\x0c\xbb\x49\x2c\x79\xe6\xb7\x85\x51\xa7\x42\x80\xb2\xda\xba\xb7\x47\x5c\x88\x8a\xe3\x69\xc3\xb5\x0a\xbd\x69\x8c\xdd\xeb\xb1\x71\xc8\x30\x10\x8d\xe9\xc2\x2f\x2e\x5d\x85\xb8\x09\x8c\x31\xa1\x8a\x5d\x92\x07\x27\x08\xf6\x13\x17\x44\xbd\x6b\x4b\x02\x5d\x2d\x4e\xdf\x65\x73\x6c\xaf\xbe\x38\x73\xc2\x3f\x1e\x1c\xd5\xcd\xdd\x97\xf3\x99\x13\xdc\x4a\x03\x0b\x0b\xbe\xaf\x91\xd6\x57\x44\x76\x6c\x6a\xea\x66\xfd\xda\xb3\xa2\xc6\x2d\xfb\x6c\xb1\xfc\x64\xa6\x9b\xec\xdf\xa1\x0c\x5e\x47\x4f\x5e\x88\xf4\xd2\xd4\x3b\x10\x2a\x07\xd4\x85\x83\x2a\xea\x98\xb5\x0a\xc6\x64\xf7\xe4\x2d\xe8\xb0\xe6\xa5\x77\xd8\x63\xdd\x0e\x73\xbb\xc6\xa9\x93\x68\xb0\x73\x46\xbd\x3a\x11\xdc\x70\x86\xf4\x29\xa9\xb2\x66\x5a\x68\x6e\xa1\x1d\xa6\x7f\x66\xb5\x56\x75\x2a\xb8\x69\xff\x52\x16\x89\xe6\x84\xd6\xa0\xb6\xd1\x33\x4e\x89\x79\xb4\x0c\x48\x6f\xaa\xbf\x0f\xce\xc3\x85\x4b\xed\xea\x57\xfc\x08\xfb\xa1\x01\x0b\x65\x36\x6d\x10\x20\xec\x5f\x5f\x1b\x7c\xd0\xd3\x3f\x6d\x45\x68\x30\xcb\x87\x2d\x94\xe1\xcc\x4f\x2f\x4a\xd1\x53\x64\x61\x84\x48\xd7\x48\x6b\x69\xaf\x24\xf4\x2f\x54\x39\xf0\x1a\x47\x70\xa8\x8f\x8c\x1b\xd6\xc4\xea\x5b\xac\xb4\xa8\xac\x40\xbb\xea\x85\x6e\x63\x96\x02\x37\x63\x52\x42\xa6\x7a\x47\xc0\x96\xd9\x2e\x59\xc2\xcc\xac\xff\x8c\x60\xd6\x30\x5b\xaa\x0e\x02\x87\x87\x43\xba\x0a\xb1\xf1\x80\xf3\x19\x2d\x27\x52\x19\x3a\x09\xd6\xa1\x08\x61\x90\x7a\xa2\x30\xbc\x7c\x2e\x24\x8d\x5a\xf0\x70\x66\xf8\x9c\x77\xe9\x13\xf8\x95\xde\xdc\xa9\xe3\xfc\xf0\xe1\xc3\x37\x71\x00\x7e\x85\x20\x9f\xbe\x39\x4d\x2a\x21\x60\x92\xd6\xa5\x86\x1c\x8b\xc6\xb2\x9b\x77\x1d\x8b\xc2\x18\x4e\xc8\x32\x6b\xdd\xe3\x9d\x2e\xd0\xa4\xb8\x2b\x04\xbc\x26\xfb\xf3\x09\xad\xef\xcb\x01\x65\x17\xd8\x58\x27\x6d\xcc\xcc\xfa\x69\xb0\x64\xf5\x62\x18\xc7\x29\x66\x13\x1a\xd2\xd3\x1a\x9f\x4b\xbb\x05\xda\x8d\x94\x01\xd4\x32\x9c\x51\x7e\xdc\x80\x7e\xd5\x85\x39\xfd\xfa\xc6\x0a\x24\x02\xa3\x2e\x9f\xfb\x2f\xab\x04\x17\xb4\x2e\x40\x87\x3d\x3d\xa3\xe0\xeb\xdb\x74\x2d\xad\xa5\xd9\x4a\x9c\x86\x8d\x1c\x01\x77\x3a\x69\x63\x32\x8e\x9a\xa2\xde\xab\xc7\xe2\x5a\xf9\xcb\x7c\xdc\x76\x67\x1c\x64\x53\x1b\x68\x70\xd6\x92\xd2\x07\xd7\x8b\xde\x3e\xf6\x43\x03\xec\x84\xc0\xcb\x38\xb9\x22\x7a\xfe\x6e\x7f\x4c\x2a\xc3\x4c\x3e\xb9\xa1\x45\x71\x8f\xbf\x0b\xde\x62\xec\x3d\x15\xf6\xa8\xba\xbd\x87\x75\x73\x91\xfb\xaf\xab\x04\xbf\xe3\x2d\xa8\x88\xd0\x56\x98\xc3\xe9\x52\x24\x45\xd6\xe3\x1d\x0c\x35\x56\xa3\x8d\x83\xd2\x0e\x93\xa8\x09\x39\xf2\x10\xc9\x85\x4f\x46\xb9\xca\x0e\xa1\xc4\xb6\xb4\x5b\x74\xba\x10\xfd\xbc\x88\x5b\x05\x8f\xc0\xa7\x57\x2c\x5e\xf9\x5e\xa7\xf0\x3c\xac\xe6\xa4\x3a\xaf\x6f\x15\x0a\x7f\xe0\xde\xdf\xcd\xd9\x4f\x56\xd8\x0f\x8a\x4e\x4b\xb4\x45\x16\xc6\xc7\xfb\xfa\xed\x1b\x5e\x70\xbf\x42\x13\x90\xd3\x1f\x68\xb5\xd4\xe3\xa6\xd7\x28\x4a\x1d\x63\x9d\xbb\x89\xf6\xe5\xc0\xf4\x73\xf3\x6d\x21\x97\x6d\x12\xc8\x50\x14\x47\x30\x44\xe0\x77\xec\x00\x93\x9c\xb4\x3d\x28\x15\xad\x46\xd0\xdf\x27\xfb\x6a\x9a\x2b\xd2\xe6\xda\xe5\xf7\xc5\x3f\x54\xd8\x0f\xc8\xe7\xfb\xbb\xe1\xcf\x2a\xc1\x5b\x2a\x1b\x4c\x1f\xf9\xd6\x46\x73\x48\xbe\xda\x36\xc1\x36\x3c\xef\x42\x7e\x6f\xb3\x1b\x97\x5f\x35\xd1\x82\xc0\x85\xcb\x1f\x0f\x29\xc6\x98\xbd\xd5\x76\xc2\x75\x48\xa7\x19\xf8\xae\x16\xbb\x3c\x17\xdb\x47\x60\x7d\x98\xa6\xe2\x8a\xd1\x3c\x16\x83\xf9\xed\x23\xef\xae\xb5\x7a\xf6\x7e\xa5\xe6\xe0\x60\xb3\xc7\xe7\x46\xd9\xa3\x92\x94\x94\x9e\xb3\x8a\x61\xf8\x43\xa3\xc1\x03\xa3\x7d\x97\x89\x06\x06\xe7\xe9\x1a\x86\x7e\xe3\xe4\x96\xca\xa7\x68\x77\x8a\x9c\x8c\x82\xe0\x42\x50\xf9\x1d\xc0\x45\xb7\xd4\x2d\x78\x64\x93\xf9\x1b\x0e\xdb\x2c\x5a\x6e\x15\x3c\x5c\x0b\x7b\x3c\xcc\x55\x49\x52\x8d\x00\x9b\x3f\x64\x84\x40\xd6\x00\x47\x10\x89\xe5\x2c\xac\x0b\x39\x63\x6c\xad\xa6\x23\xb2\x08\x98\xec\xad\x45\xd6\x48\x05\x86\xb4\x2f\x77\xc3\x2c\x4c\x0a\x61\x85\x3c\xc8\x3a\xeb\x1c\x43\x5d\x93\x94\x5a\x84\xa1\x5e\x44\x30\x0c\xc4\x36\x00\x01\x0b\x38\x78\x29\x07\x87\x8d\x2e\x45\xc4\xb9\xc0\x4c\xdb\xa8\x49\xad\x47\xb6\xd8\x25\x51\x4f\xdb\xb6\xa3\x2e\x4f\x53\x79\x34\x28\x95\x0b\x2c\xd3\xa6\xf0\x65\x6a\xa8\x95\xe3\x63\xd7\x0a\x09\x7f\x5b\xd1\x32\xf4\x72\x16\xa5\x59\x54\xf4\x1c\x34\x2a\x05\x1b\x87\x9e\x19\xd3\x9b\x35\x3e\x8d\x09\xa6\x79\x37\x06\x26\x1f\xdd\x4f\x94\x17\xd3\x88\x9a\x88\x54\x8c\x26\x5d\xa9\xe4\x75\x44\xbd\xa6\x58\xa7\xd5\x80\xeb\x79\x35\x58\xd9\xf9\xb5\x71\x36\xd6\x69\x85\xb9\xf0\x3f\x36\x1e\xbc\x77\x5c\x2e\x78\xf8\xa9\x5d\xb1\x64\xe7\x8a\xd0\xeb\x23\x5b\x42\x42\x30\xef\xb6\xc1\xd0\x9d\x36\x09\x0f\xbe\x40\x74\x44\x9a\xc3\x51\x91\x73\xbd\xfa\x08\x17\x43\xef\xbf\x1c\x16\x5c\x95\x3a\x0e\x29\x06\xe5\x01\x97\x96\x0a\xe6\xf7\xeb\xa4\x66\x58\x3a\xab\x51\xa3\x6b\x8b\x6e\xed\x8a\x91\x25\x99\x58\x02\xcc\x30\x82\xf5\x65\xa9\xb5\xc8\x91\x4c\x80\xf5\xb2\x2e\x94\x00\xd1\x94\xa3\xa7\xb3\x78\xb0\xe1\x78\x9a\x3e\xc6\xd8\x2c\x52\x37\x1e\x83\xba\xcb\x4e\x6c\x41\xde\xb5\x48\x2c\x92\xf1\x21\xd0\x38\xb8\x7e\x20\x39\x8b\x32\x7e\xfa\x8e\x76\xa0\x9d\xe5\x50\x26\xae\x30\x91\x68\x80\x44\x52\xf5\x30\xa9\x25\x47\x95\xd1\x49\xb9\x34\x4b\xc0\xe6\x9b\x07\xb9\xd9\x91\x33\xa2\x91\xae\x25\x52\x12\x42\x14\x0b\x7e\x08\xfc\xb0\x98\xfb\x54\x48\x41\xa9\x12\xee\x01\xb1\x9e\x17\xe1\x8a\xe0\x21\x3a\x52\x6b\x7c\x0e\xf9\x98\x07\xb4\x7c\x29\xed\x26\x16\x8b\x35\x71\x56\xc6\x71\x5f\xfb\x28\x21\xc1\x6d\xd6\xb4\xd2\x22\xc1\x79\x68\xba\x22\x27\x3f\x39\xf1\x40\x03\x29\x65\xa4\xf3\x7d\x28\x51\x15\x52\x82\xe4\xfe\x40\xc9\x2e\x99\x50\xbf\x6a\x7c\x5e\xee\x4b\xa2\x21\x1a\xc7\xf8\x74\xec\xec\xef\x91\x59\x03\x50\x21\xcb\x10\x10\x25\x6a\x3f\x73\xa8\xc2\x0a\x8c\xb7\xa7\x6d\xaa\xc6\x4f\x85\x51\xbc\xad\x82\xa9\x53\x06\xb7\x55\xf6\xa4\x5b\x87\x66\x18\xc5\x52\x0f\x50\x4b\x84\x1e\xa4\x84\x1f\x79\x44\x57\xc0\xfe\x49\x9a\x4c\xde\x23\xb2\x54\x9f\xd2\x32\x38\x34\x58\xc5\x95\x62\x5c\xc9\xe0\x7e\x0c\x3c\xb7\x10\x12\x43\x8b\xcd\xf1\xb7\xe8\x13\x99\xce\x91\x59\x12\x04\x2b\x0b\xf9\xd7\xbd\x0e\xa2\x2f\xf0\x46\x97\xf0\x46\x29\xf7\x2b\x4a\x38\x19\x95\x68\x5f\xd3\xf4\x03\x18\x7d\xd6\x34\xa7\x0e\x76\xa5\xb7\x72\x58\xab\xb6\x20\x7b\xd1\x08\xdb\x05\x07\x9b\xdc\xff\x87\x4a\xf0\x2d\x0f\xff\xa6\x44\x7d\x59\x0f\x73\xd8\x11\xc3\x8e\x3b\x33\x4d\x27\x45\x23\x2f\xa7\xa1\x1f\x28\x94\x46\xdd\x76\xb1\x7a\xe0\x63\x2a\x93\x09\x68\xe1\x49\x48\x9b\xef\x28\xf7\xfd\x41\x72\x16\x40\x04\x07\x60\x52\x35\xf9\xcc\xec\xea\x11\x98\x34\x33\xb3\xab\x47\x69\xf5\xab\x14\x30\x61\x0e\x57\x1c\x1a\xa4\x97\x95\x29\xbb\x07\x11\x32\x46\x8f\xb9\x8e\xed\x67\x3f\xba\x95\x23\xcd\xcc\xec\x45\xef\x31\x9b\x9c\x65\xc6\xfc\x91\xa8\xb3\x9d\x93\xcc\x7d\x15\xb6\xe7\xee\x34\x07\x66\x75\xff\xbb\x5e\xf0\xff\xc2\x01\xe6\xce\x6e\x18\xcb\xbd\x2f\x6d\x2a\x9c\x04\xbe\xff\xce\x73\xf3\x13\xbc\x2e\x9f\x33\xd6\x70\x75\x5c\xb7\x06\x86\x2f\x85\x39\xee\xf6\xda\x97\x42\xe8\xd9\x6d\x30\x65\xcc\x0b\xd8\x78\xee\x3c\x37\x8f\x7c\xf1\xe0\x3c\x02\xad\x5f\xef\xe5\x77\x9e\x9b\xc7\x0f\x89\xfc\x52\xcd\xa4\xb2\x5a\x93\x9d\x2c\xed\xa4\x79\x18\xe7\x53\x52\xfc\x4d\x69\x00\x9b\xbb\xd3\xbc\xd6\x76\x22\x19\x57\xb5\xf9\x28\x0e\x9e\x35\xcd\xc1\x7e\x64\xb9\xe2\x2e\x55\x2b\x84\x55\x5b\xe3\xa2\xb6\x5c\xe3\xfb\x4e\x62\x74\x86\x63\xd6\xfb\xec\x38\x7b\xfa\x95\x64\x77\xb2\x30\xc3\xd0\xa4\xf4\xcd\x3d\xc1\xdc\xc6\x8f\x94\x99\x59\x6d\x41\x33\xec\x4d\xc7\xda\xf3\xe2\x3d\xec\x7b\x1e\xbb\x5a\x6d\xb3\x00\xa4\xe3\x7f\xdd\x63\xd1\x83\x47\x62\x55\x86\xac\xba\xc7\xf9\xb8\x26\x07\x46\x60\x1d\x4d\x5e\x84\x86\xb0\xb8\x47\xdb\xb2\x4a\x55\xc6\x98\x02\x0b\x35\x67\xa1\x25\x7a\x43\xf4\x36\x53\xaa\xdc\x8f\xa4\x06\xc7\xde\xe5\x1a\x4a\xde\x58\x09\xce\x5a\xaa\x93\xea\x5c\xea\x57\x39\xcf\x2d\xf7\x78\x08\xc7\x78\xb2\xe0\x0d\xef\x6c\x23\x2b\xee\x66\x29\x6b\x3f\x14\xfd\x6a\x99\x4f\x38\xfb\x37\x8e\x28\x91\xb2\x6e\x12\xdc\xc0\xbb\xfc\xd1\x76\x88\xd2\xe6\x07\xfb\x1f\x69\x87\x1d\x29\x92\x72\x7f\xdc\xdf\x3d\x09\xcb\x9c\xb1\x3f\x1e\x61\xd7\xe4\x45\x9a\x89\xc6\x5d\x0a\x53\xe9\x33\x23\xc1\x7b\x46\xdc\x6b\x2a\xc7\x3e\x8e\x1d\x30\xa5\x52\xb0\x0e\x45\x97\x4a\xed\x50\x40\xd8\x1a\x25\x50\x48\x8d\x2c\x0b\xeb\x2b\x64\x47\xce\x85\x05\x96\x84\x50\xdf\x21\x6f\x47\x04\xd1\x8d\xf1\xc2\x10\x84\x0a\x55\x30\xcf\x52\x82\x1c\xee\xf4\x7a\x8f\x69\x93\x89\x2d\x4f\x9d\x52\x6c\x22\x9a\x30\xe1\x68\x51\x77\x9e\x30\xc1\x05\x3a\x93\x74\x3f\xf0\x40\x01\x84\x77\xca\xd3\xb8\xe1\xe0\xb9\x42\xfa\xae\x9c\x64\x18\x82\x6d\x83\x06\x61\x7a\xa1\x3a\xc7\xe8\xad\x5b\xd7\x9c\xcc\xd3\xb4\x2d\x69\x9c\xa9\xdc\x4e\x86\x57\x29\x8a\xf0\xac\x8b\x2b\xb6\x48\x11\x7a\x85\x5c\x06\x08\x12\x6e\x27\x6a\x6f\x44\xf2\xf4\xfe\x23\xec\xf0\x56\x60\x7b\xce\x74\x0b\x9b\x5c\xde\xff\x87\xc3\xc1\x33\x4b\xd7\xac\x78\x13\xc0\xd1\xa0\x92\x14\x76\x8c\x3e\x6d\x58\xb8\x7d\x49\xc3\x40\x46\xe6\xf2\x3c\x1c\x76\x3a\x31\x24\xf4\xa6\x35\x4d\x5c\x72\x15\xfa\xe4\x91\x05\x62\xdd\xdb\x9b\x47\x0d\x71\xb2\xd9\x94\xfd\xbe\xee\xfd\x5b\xfd\x1d\xa4\xd8\xba\x6b\x10\x4a\xe2\xe7\x0f\xb1\x37\x55\xd8\x35\x52\x81\x4f\xbb\xc5\x3c\x40\xb1\xe4\xfe\x4f\x56\x94\x97\xe0\xdb\xde\x82\x73\xab\x84\x6f\x4f\xef\x19\xd2\x49\x8d\x86\x33\xad\xc3\xfd\xd4\x33\x90\xe9\x8b\x99\xed\xba\xe1\x52\xbb\x33\x80\x01\x10\x40\x04\x50\x05\x05\x21\x1f\x9a\xfb\x52\x57\x35\xbb\x31\x78\x9c\x50\x7b\x25\x54\x64\x9c\xda\xea\x5b\xab\x4e\x12\xf2\x92\x28\xd6\xa4\xda\x72\x10\xba\xf5\xf0\x01\x8e\x90\x33\xb9\x83\xd9\x73\xd0\x5c\x76\x1c\x1d\xaf\x1b\x65\xc3\x7a\xd2\xbf\x38\x12\xfc\xc9\xc8\xf4\xe0\x9b\x0a\xb6\x48\x21\x8a\x11\xb4\x55\x07\x70\x9b\xe4\x95\xc5\xd2\x8b\x8b\x66\xd2\xcb\xe6\xa9\xa9\x23\x2e\x74\x08\x89\x43\x53\x93\x61\x8f\x48\x65\x50\xb1\x31\x00\x98\x82\x5a\x8a\xfd\x60\x05\x51\xa1\xd2\x34\xf2\x1a\x92\x0d\x24\xa2\x6f\xa5\x95\x60\x08\x95\x22\xe8\xe4\x77\x98\x3a\x54\x15\xa9\x1c\x22\x85\xa9\x11\xd2\x13\x41\x79\xa1\x81\xe8\x4d\x4b\x33\x33\xf0\x2e\xbb\x8d\x9e\x54\x2a\xea\xd4\x48\xb9\xc4\x32\xcf\xd0\xb9\x14\x6c\xaa\xfa\x09\x44\x40\xb0\xe8\xf0\xe7\xa9\x82\x72\xf2\x68\xe7\xb6\xfa\xb0\xa9\xaa\x2c\x79\xc9\x81\xed\x1f\x30\xab\x36\x90\x0f\xef\xf1\xd8\xd5\xf4\x34\x12\xa5\xfa\xeb\x5e\xf0\x32\xef\x94\x7d\x49\xbb\xf1\x5b\xe9\x9a\xeb\x39\x87\x73\x8c\xe5\xe8\x34\x42\x41\x07\xdd\x48\xc9\xd9\x0a\x93\x86\x3c\x6c\x4f\x9a\x9e\x31\xa8\x7f\x33\x18\x71\x97\x66\x70\x52\x74\x53\xbb\xe0\x8a\x5d\xdd\x3f\x1f\x63\xd7\xe0\xa0\x68\xa6\xab\x2f\x8e\x5d\x81\x98\xeb\x57\x8e\x9d\x73\x4a\x25\x0b\x95\x85\x75\x98\x82\x81\xdb\x1e\x04\xbd\x8e\x23\x3b\xf2\x0f\x71\x5a\xa4\x6e\x0f\xd0\x34\xf2\x2b\x79\x8d\xbb\x75\x86\x73\x89\xec\x02\x3c\xd9\x10\xa9\x9f\x66\x6e\x4c\xe3\x06\xd6\x06\x46\x37\x11\x6b\xe7\x2c\x60\x83\x35\x85\xb6\x95\x13\x8c\xb9\x55\xa5\xaa\xb2\xac\x43\x3a\x3f\x2e\xd9\x22\xa5\x73\x56\xd4\x54\x67\x62\x85\xcd\x2c\x2f\x6b\x70\x05\xac\x99\x8e\x17\xa7\x67\xf6\x9b\xba\xe8\x8c\x75\x34\x6e\xa1\x51\x02\x4c\x0d\xa6\x86\xa5\x67\x90\x4a\x60\x02\x70\xf6\x92\x32\x3c\x83\x5c\x0a\x70\x18\xc3\x3e\xe2\xfb\xe3\x08\x6c\x28\x27\x44\x27\x4e\x7b\xf2\x64\x32\x97\xc6\x31\xe0\x28\xc8\xf7\xe5\x09\x65\x36\x4b\x2f\xf4\x14\x8d\x00\x16\x37\xa1\x71\x3c\xfa\x9b\x5c\xe3\xe7\x73\x61\x0f\x8d\x8e\x69\x07\x8b\x2e\x0d\x9b\x1a\x4e\x00\xcf\x29\x26\xa3\xa4\xaa\x71\x40\x44\x02\x50\x1a\x14\x5b\x96\xaf\x44\x9d\xd2\x2c\xd7\x53\xc1\xa4\x2a\xa2\x13\x1a\x47\xdd\x12\xcc\x10\x88\x06\x47\x51\x67\xe2\x29\x2b\x92\x1a\x0b\xa9\x36\x61\xb8\x69\x8d\xfd\xf9\xa8\x22\x4c\xfe\xd2\x68\xf0\xa2\x51\x24\x46\x36\x7b\xf0\x1a\x20\xb2\x9a\xdd\x55\x56\x07\xc3\x8b\x15\x4a\xa8\x13\x5b\xed\x6e\x58\x99\xa0\x73\x12\xee\x38\x03\x6e\x20\x5a\x1d\x95\x2e\xfb\x2a\x32\x33\xe6\x59\x61\xd2\x7b\x16\x10\x9c\xd7\xf8\xed\xe9\x9a\x00\x41\x15\xd1\x36\x01\x26\x63\x0a\x6a\xbf\x8b\xa4\x6b\xb2\xac\xb7\x09\xda\x11\x50\x26\x2a\x0d\xa3\xff\x2e\x88\x94\x4e\xd7\xf4\xa9\xa2\xd3\x07\x7a\x1b\x54\xdd\xc9\x00\xa7\x4d\xec\x52\x30\xad\xc2\x0c\x80\xb7\xd7\xa2\xa2\x25\x1b\xa2\x40\x60\xe2\x9e\x3c\x52\x84\x4b\xb1\x2a\xb2\x13\x77\x97\xe5\x70\x5f\x7a\x2d\x01\x04\x06\x54\x5d\x29\xa6\x51\x1a\x98\xd9\xa1\x83\x0b\xe5\x7e\x62\x3e\x42\x6f\xbb\xd4\x68\xf6\x77\x06\x3e\xa0\xc0\x95\x6c\x59\x7e\x3b\x3b\xc5\x4e\x0c\x27\xc2\x1c\xa6\xeb\xc9\x81\x7b\x72\x54\xb4\xce\xe9\xc9\xc3\x5e\xb1\x87\xf9\x99\x88\x92\xd5\x14\xcd\x08\xb4\x17\x7c\x6f\x77\xf0\xcd\xdd\xfd\xd7\x07\x62\xc1\x1a\xbd\xc9\x82\x04\xa4\x6e\x69\x77\xe3\x22\xea\xc4\xa8\xda\xe4\xe0\xf6\x08\xb3\xc2\x09\x1d\xb5\xb6\x0e\x94\x8c\x68\xc3\xef\xdf\x2f\x82\xb3\xb2\xc3\x11\x43\x34\x98\x69\x9e\x05\xbb\x65\x50\x63\x0c\xae\x1f\xeb\xdf\x27\x69\x76\xa8\xaa\x68\xfc\xa8\x14\x08\x63\x93\x4d\xaa\xc0\x98\xfa\xc6\x80\xa2\x4d\xb1\x8e\xb9\xd2\xc2\x98\xc3\x70\x05\xd3\xde\xd2\x2e\xa9\xbf\x53\xda\x44\xf0\x3c\x2c\x1f\x2c\x28\x06\xcb\x20\xcb\xf4\xc8\xed\x62\x8a\xc1\x89\x9c\x5b\xd9\x29\x30\x1f\xc2\xd8\x51\x51\x6b\x5c\xcf\x5b\x44\xf9\xd2\x70\xd2\x28\xfa\x64\x49\xd7\x49\x6d\xf3\x3a\x02\xbe\x6a\x77\xd2\x42\x24\x45\x95\xa3\x1f\x36\x35\x96\x64\x3a\x15\xc1\x79\x44\xc5\x61\xc5\x3d\x5d\xdf\x1a\x3f\x9b\x16\xe2\x18\x47\x68\x21\x93\xf6\x67\xf5\x8b\x99\x52\xb9\x92\xe0\xda\x6d\xd5\xb0\x81\x83\x10\xa4\xad\xc6\xaf\x83\x1c\x9e\xc1\x05\x10\x33\x73\x94\xf0\x66\x37\x43\x84\x3d\xe8\x2d\x75\x9f\xe4\x2f\x56\xbb\xaa\x3a\x25\xd7\xf0\x57\x7d\x5f\x96\x85\xaf\xa8\xad\x59\x7e\x7b\xcd\xe9\x39\x8d\x11\x45\xbd\x46\x96\x89\x4c\x28\x3d\x59\xee\x40\x51\x12\xb5\xa3\x7b\xc4\x96\xba\x40\x7e\xa2\x48\x0d\xc3\xb1\xd9\x2d\x71\x48\xe5\x01\xa2\xdd\x2d\x42\x93\xf8\x65\x55\x59\xc9\xb7\x2a\xd4\x2b\xd4\x2a\x2d\x4d\x20\x77\xa7\x92\x9a\x86\x08\x1b\x35\xc6\x6c\x3d\x8b\xd6\x94\xa3\x6a\xfd\xed\x08\xb3\x8f\x62\xfe\x57\x47\x82\x3f\x18\x99\x37\x17\x50\x08\x0f\x91\x00\x52\x09\x02\x4c\x6e\x81\x0f\xd7\xf8\x34\xd8\x83\x60\x26\x99\xc5\x7c\x8c\x9f\x4d\x13\x51\x85\xff\x9e\x4b\x4e\x64\xbd\xb9\x6e\xc2\xf7\xeb\xce\x56\xbe\xbd\xd5\x28\x54\x80\xff\xc6\x87\xa8\xa6\xef\x7c\xda\x06\xed\x91\x8c\xf5\x13\xd6\x34\x07\x7b\xba\x5d\x0d\x7e\xe6\xfc\xfc\x02\x07\x5f\x1c\x82\x2a\xc0\x9e\x91\xd4\xa3\x38\x22\x1d\x9e\xdc\x4f\x79\x84\x9c\x9c\x0a\xec\x4c\x8f\xf0\xb3\x75\x96\x58\xc8\x9b\x5d\xe0\xb2\xcc\x0b\xd1\xd1\x79\x87\xba\xc3\xeb\xad\x30\x32\x90\x66\x4e\x25\x00\x40\x0c\xbc\x51\x89\x99\x72\xdd\xa4\x01\xf3\x7c\x4e\x6d\x1a\xda\x19\xd0\xc0\x6e\x09\x0b\xb4\xa6\x1a\x6c\xba\xb0\x5b\xa4\x93\xba\x4a\x28\x40\x7a\xa4\xed\x85\x96\xa0\xa2\x4e\x50\xe3\xf6\xf8\xc7\xab\xbe\x92\xdd\x26\xbb\xcf\x19\xf7\x37\x78\xcc\x39\x91\xfb\x2f\xf6\xd8\xc9\xed\x6f\x32\x6a\x13\xb3\x4a\x0a\x6e\xb5\x7f\x39\xa7\x0a\x44\xd8\x26\x57\x88\xb0\x1d\x21\xf2\x18\xae\x10\x76\xd9\x9b\x76\xb3\xbd\xd0\x3e\xda\x9f\x5e\xb6\x3b\xf8\xa7\x5d\xd6\x05\xb7\xcc\x96\xe0\x01\x28\x50\x81\xf6\x06\x28\x0b\x23\xa9\xc4\x49\x3d\x6d\xcb\x75\xa2\x76\xea\xc1\x1b\xce\x49\x29\x87\x10\xbe\xfa\xe4\xdd\xdd\x68\x35\x8c\x45\x52\xc8\x1d\x67\x92\xc3\xad\x63\xba\xcf\xd5\x74\x51\xfa\x65\x54\x68\x21\xa6\x74\xa7\xd0\x3a\xa2\x66\xa0\x41\x39\x69\x81\x91\xd4\x9a\x95\x02\x9c\x2b\x18\x33\x2d\xfc\xe5\x4a\x08\x3b\x9d\x7c\x6a\xf5\x60\x55\xfd\x41\xd0\xd1\x72\xaa\x19\x73\xa2\xb9\xbe\xd4\x2d\x74\x37\x60\xbd\x14\xd8\xdf\x62\xd8\x89\x9e\x98\xa5\xdd\x4e\x7e\xec\x69\x81\x2c\x2c\x78\x46\xd5\xc2\xdd\x97\x57\x57\x0f\xca\x6b\x5a\x8b\x3c\xc6\x9f\x16\x58\xd5\x0b\x9e\xb1\x58\xb5\x5a\x8d\x2c\x17\xfa\xd3\x1c\xda\x55\xae\x10\x1d\x60\x54\x54\x44\xff\x21\x06\xfb\x55\xf7\x73\x7f\xe7\x46\x4d\xd5\x1f\xe4\xe0\x47\x97\x86\x81\x5c\x87\xc6\x56\x31\x25\x13\x3a\x8c\xac\x7c\x16\x0b\x85\x05\xe0\xfd\x20\x77\x3f\xa8\x29\xdf\x77\xdd\xbf\x24\x54\x28\x3d\xae\x07\x7a\x11\x13\x33\x07\x8e\x89\xb3\x5d\x58\xcb\xc0\x16\x1d\x5f\x57\x9c\xd0\x7f\xea\x05\x5f\x00\xaf\x95\x83\x77\x52\xde\x8c\x90\x0b\xd1\xd2\x16\x9b\xdd\x38\xee\xf1\xbb\xbb\x61\x4c\x4e\x43\x51\x5b\xae\x55\xd1\x03\x4f\xa6\x0c\xc7\xe9\x59\xa5\xc8\x89\xc0\x7a\x22\x18\x04\x72\xec\x9c\x8f\x9d\x22\x06\x3d\x9d\x66\xcb\x61\xa2\x81\x18\x95\xf0\x71\x84\xe4\x9f\x8c\xb3\x47\xe9\xc4\x67\x6d\x8a\xf8\xf5\xf1\x2b\x60\x8a\x78\xfe\xf8\xd9\x72\xc1\x5b\xb1\x46\xc0\x19\x42\xab\x8f\xca\x34\x61\x53\x35\x18\x04\x73\xb4\x6f\x19\x42\x8f\xc1\x96\x80\x19\x47\x23\x8d\x8a\x5c\xc4\x4d\x8c\x66\xd1\x25\x51\x7c\x9a\xb2\x76\x44\x39\xef\x88\xac\x99\x66\x6d\xfc\x3a\x99\xcf\x74\x76\x91\x3a\x17\x97\x4a\xce\xf5\x0a\x55\x87\x3c\x24\x89\xd0\x13\x1f\x80\x9c\xf1\x9c\x25\x0f\xe1\x79\x69\x6a\x3a\x2b\x78\x58\xd7\xf4\xb4\xd6\x8a\x79\xf4\x0e\x4f\xa9\x94\x46\x61\x9e\xa7\xf5\x28\xd4\xd1\x02\x41\xd6\x4d\x20\x52\x27\x90\xf3\x22\x38\x80\x1b\xc0\xc1\xe0\x71\x1c\x81\x66\x30\x2d\xbe\x70\x3a\x4d\x6a\xfa\x48\x99\x90\x1f\xe3\x41\xdf\x04\x09\x8e\xf1\xe7\x30\xce\x03\xe8\xb0\x93\x17\x3a\x99\x80\xf5\x90\x07\xc7\xf8\xd3\x18\xe7\x1c\xee\xca\xff\x0b\x56\x44\x2f\x38\x66\x55\xa1\xaa\x6e\xe0\x91\x1c\x8a\x0a\xce\xa6\xc5\x4c\x62\x6e\xe1\x96\xa5\xcb\x82\x6b\x07\xf4\x6d\x2e\xeb\x4e\x7f\x3f\x03\xfe\x7d\x2e\x93\x7f\x3d\x57\x1e\x70\x94\x5a\xe8\x40\xe8\x80\xb4\xda\x7e\x5f\x96\xfb\x11\xb6\x61\x91\xac\x46\x59\x9a\x48\xb1\x85\xdd\xd9\xc9\xd2\x06\xf6\x68\x5e\x84\xcb\x51\xb2\x1c\x3c\xee\xa1\xeb\x56\xbb\x36\x03\x7b\x76\xb3\x6e\x85\xda\x5b\x3d\xab\xda\x30\xb8\x7f\x37\x61\x7e\xd9\x22\x03\x3e\x2e\x9e\x29\x83\x4a\xae\x60\x68\x64\x7f\xba\xb0\xe8\xb9\x91\xd7\x97\x68\x7b\xfa\xd0\x5e\xc6\x6d\x2d\xaf\xd3\x81\xd4\xf1\x13\xa1\x68\xa7\xc9\x3c\x41\x93\xbc\x6a\x6f\x70\xd8\xb9\xa2\xb3\x5a\xcb\xf9\x79\x21\x6f\xc0\x73\x10\x96\xba\xee\x69\xc0\x8c\x75\x6f\x4f\x21\xda\x9d\x38\x2c\xdc\xb4\xa1\x5f\x62\xec\x55\x15\xf6\x88\x76\x94\xcc\x89\xb0\xd1\x53\x1e\x9d\x7f\xd2\x24\x07\x18\x13\x01\x67\xac\x6e\xdb\xc6\x3c\x26\x07\x8f\xec\x24\x82\x38\xe6\x89\x58\x8b\x7b\xfa\x2c\xa1\x2b\x0c\x91\x01\x66\xef\xc1\xb0\x5b\x65\x33\x82\x39\xde\x84\x20\x3c\x13\x52\x54\xcf\xc2\xbc\x05\xf1\x55\xb2\xf8\x48\x81\xdd\x5a\x96\x47\x0b\x95\xdb\xde\x2f\x0f\xf0\xfd\x76\x00\xe6\xa0\x17\xe4\x14\xcf\x53\x88\xdf\x20\x70\x62\xa8\x50\x89\x2b\xe1\x97\x3d\xf6\xaf\xe5\x71\x5b\x4e\xef\xdb\xa3\xbc\x48\xb3\xde\xe9\xa8\x1d\x15\xfe\xfd\xba\x67\x5e\x81\xfb\xae\x81\xda\x89\x1b\xbc\x85\x8f\x12\x8c\xb7\x3c\x97\xc8\x2d\x5f\xae\x27\x9e\x91\x8d\xd5\xc6\x95\x00\xc3\x3d\xee\x32\x0d\x64\x66\xea\x46\x79\x4b\x3b\xa0\xc4\x05\x84\xb8\xe2\x10\x4b\x05\x26\x6a\x9b\xff\xc9\x6d\x3a\x30\x96\x58\x2d\xf8\x94\x0d\x97\xf2\xfe\x2b\x01\x97\xf2\xd7\xde\x34\x4d\xff\x32\x60\x8a\x0e\x33\x28\xa3\xa4\x98\xb9\x68\x93\x86\xd9\x06\xcc\x25\x61\x1c\xc8\x8d\x1a\x9f\x29\x06\x44\x2c\x71\x35\x75\xf7\xe5\xda\xe2\xbb\xcd\x30\xad\x2d\xad\xf8\x6b\xe1\xdf\x49\xbd\xb8\xd9\x0b\x46\x98\x5e\x36\xfe\xdf\x55\xb6\x10\x72\x3d\x9b\x36\x16\xe8\x05\x48\xa1\xfd\x74\x65\xda\xb5\xc7\x0f\x61\x7b\x2d\x33\x4f\x83\xb1\xd8\xac\x20\xb8\x8d\xf7\x6c\x63\x0d\xaf\xa7\x9d\x9e\x4e\xef\x94\x85\x81\xa9\x4b\x0e\x8e\xc9\x66\xb2\x75\x0e\xab\x23\x91\x84\x5a\xc9\xfd\xfd\x60\xab\xb7\xdf\xc5\x40\x2e\xf7\x29\x3b\xd0\x6c\xe2\x32\xa2\xde\x4d\xc8\x40\x3e\x95\x09\x39\xc9\x41\x80\x99\xcb\x10\x39\xa7\x2a\xcb\x5e\xed\xb1\x6b\x30\x51\x7c\x5e\x05\x71\x3d\xcf\x63\x37\x0e\x3f\x2a\x97\x85\xe8\x79\xe7\xe5\xe0\xc4\x74\x42\x89\xe7\x5c\x45\x85\xe1\x8a\xc5\x00\x6c\xf0\xfb\x4b\x2d\xcb\x91\x60\x64\x27\x48\xc4\x1a\xfc\xaa\xb1\x2f\x56\xd8\x0f\x0f\x98\x01\x08\x9f\x88\xe0\x94\xfe\x2f\x56\x82\x27\xad\x5a\x17\x6c\xcf\x3e\x6f\x87\x1d\xe0\xb6\x00\xb1\x9d\x85\x6b\x7c\x09\x70\x2d\x1b\xf8\xa4\x66\x98\xb7\x73\x3f\xc9\x91\xcf\xf0\x99\xd9\xb0\x68\x39\xd2\xfc\x57\x3c\x16\x33\xeb\xa6\xff\xcc\xe0\x4e\xf3\xcb\x81\x89\x23\x26\xb7\xbe\x68\x5f\x1d\xc8\xae\xab\x81\x13\x53\x56\x16\x4e\x30\x8e\x76\x3e\x4f\xc7\x90\x3b\x82\x5b\x40\xaf\x6f\x0f\x66\x06\x34\x2e\x5d\x85\xf0\x79\x3c\x0e\xa3\xb6\x15\x9a\x6a\x17\xfa\xc0\x38\xbb\xce\xe6\xe4\xc4\x10\x62\xa0\xa9\x39\x58\x9b\xa5\xd0\x74\x08\xa0\x3b\x1d\xe5\x85\x7f\x71\x4f\x70\xb4\xef\x2a\xca\x57\x17\x6f\x4e\x47\xb5\x53\x7c\x5d\x6d\xdd\x1b\x03\x18\x79\xa7\x0f\x3f\xb9\x7b\x27\xd7\xfa\x72\x61\xa0\x0b\x86\x1d\xeb\xaf\x04\x3f\xa2\xc9\x07\x0a\x8b\x2f\xce\x19\x2f\xe1\xa4\xb6\xdc\xc8\x8e\xb2\x23\xc3\x09\x5a\x87\x4f\x86\x1d\xe8\x8b\xcb\x80\xbe\xf8\x4d\x1b\xfa\xe2\xe3\xdb\x61\xc5\xe9\x67\x9d\x09\x7e\x5c\xe3\x5e\x20\x0f\x89\x22\x9b\x79\x88\x20\x2f\x2e\x7a\xad\xcd\x53\xfd\x4f\xfa\xc7\x27\x9d\x5c\x7e\xae\x39\x72\xcd\x14\x2b\xd1\xe4\xf6\x49\x19\xc6\x3e\x58\x92\x55\x9a\x3f\xb8\x8f\x3a\x18\x64\xd5\x8b\xc6\x83\xe9\x41\x37\x06\x89\xab\x61\x94\xe7\x83\xc5\xd6\xa7\x77\xc4\xd6\x65\x8b\xad\x55\x25\xb6\xda\xc1\x8f\xce\x0c\x12\x5b\xe5\x21\xd9\x8e\xe0\x1a\x3e\x33\x76\x04\xd7\x8e\xe0\xda\xa6\xe0\xda\x1e\x9d\x37\x88\xaa\x12\x72\xc9\xbb\xaf\x62\x77\x3d\x38\xe1\xd3\xfe\x17\xf6\x06\x2f\xac\x0c\x25\x82\xce\x6c\x3c\x77\x17\xaf\xcf\xd8\x08\xc4\x85\x4e\x6a\xc5\x6e\x9a\x38\xc2\x1a\xe7\x33\x05\xa1\x7b\x83\x8b\x71\x49\x94\x20\x45\x6e\xae\x41\x44\xaf\x7c\xe2\x96\x1a\xfd\x40\x7a\x69\x79\x5e\xb6\xa9\x88\x57\x0f\xd6\x0e\x1e\xad\x6a\xfe\x92\x26\xa4\x6a\xb5\xd3\x55\xf0\x18\xcb\xbb\x87\x0e\x61\x60\x93\xdb\x7e\x85\x45\x7c\x70\x38\xd9\xb5\xf2\xfd\xae\x7b\x00\xbd\xe4\x5a\x5c\xf6\xb0\x3b\xac\x29\x7b\xeb\x65\xa2\x4c\xb1\xf7\x2a\x7c\xa7\xb7\x7b\xac\xf5\x50\x84\xc7\xc3\xb9\xf6\xa6\xbc\x9f\x97\xbd\x50\x5c\x6c\x9a\x99\xdd\x8a\x50\xd6\xc4\xec\xec\x23\x06\x97\xe9\xe7\x3d\xf6\xec\x87\xa4\xca\x08\xe9\x34\x3d\x10\x6e\xb1\x8c\x11\xb2\x51\xf2\xc7\xce\x1e\xbb\xc3\x10\xf3\xb0\xed\x75\x17\xbd\xb5\xcd\xb7\x87\x05\x7f\x6e\x52\x61\x56\x0d\x9b\xc4\xce\x56\x00\x2b\xc7\x68\xbf\x83\x64\x1d\x63\x7f\xcc\x6c\xe6\xc9\x9a\x4b\xa8\xa0\x97\xdf\x42\xba\x22\x28\x3a\xdd\x7f\x17\x0b\xde\xe1\x59\x17\x78\x58\x14\x98\xc8\x2f\x25\x81\x79\x5f\xf0\x90\xe8\xfa\x20\x57\x17\xa3\x25\xba\x00\x58\x8f\xa1\x4d\x76\x19\x7a\x90\x28\x4a\x04\x48\x9d\xb4\xb5\x51\x79\x4e\xb0\x38\xeb\x1b\x69\x46\x81\x5b\x6a\xab\x40\x9c\x50\x2d\x55\x06\x89\xe9\x7f\xd8\xd1\xa7\x77\xd6\xfa\xc3\xa8\xd7\x5e\x51\x1d\xe1\x3e\xa5\x23\x3c\x87\xdd\x3a\xdc\x6a\xba\xe9\xa2\x86\x9d\xff\x46\xf0\x3f\x61\xb6\xf2\x60\x68\x2e\x15\x26\x81\xa1\x95\x3a\xfc\x9e\x3d\x60\x36\xfe\xb7\x78\x6c\xfa\x72\x2a\x82\xfb\xf9\x53\x0c\xc4\x6b\x33\x82\xe0\xd0\x28\xd1\x99\xe5\x98\xfa\xe2\x32\xbd\xda\x4e\x7a\xcd\xae\x89\x91\x27\xb6\x48\x6a\xd4\x2e\x7a\x2b\x9b\x8b\xda\xdb\xfd\x53\x5a\x13\x2f\xd5\xb9\xa4\x90\x5b\x55\x1f\x24\x7c\xd9\x5f\x3c\xd2\x81\x11\x1c\xc4\xcc\x4d\xfc\xdb\xd8\x7d\x1f\x7b\x64\x70\x8b\x7d\xa1\xe4\x55\x88\xc3\xbc\x00\xc4\x5e\x07\xf8\x0c\x43\x6f\x91\x73\xb8\x46\xb8\xc4\x8e\xc8\xfb\xda\x23\xd8\x77\x2a\x6c\x8f\x14\xff\x52\xba\xf8\x5f\xaf\x6c\x3c\x48\x7d\x75\x3c\x49\x6f\xda\x55\x0b\xde\x56\x51\x05\x22\x07\x3d\x6e\x00\x7c\x39\x4e\x97\xc2\x58\x51\x20\x2b\x8c\x91\x41\xe1\x01\x61\xd2\xb3\x71\x2b\x74\xa6\x50\xa1\x53\x1a\x4d\x55\xac\x54\x11\x6b\x6a\x52\xec\x19\xc8\x86\x7a\xda\xee\xa4\x09\x1c\x3f\x08\xc4\x81\xa7\xdd\x42\xd9\xc2\x55\x58\x84\xcd\x46\xce\x63\x91\x2c\x17\x90\xfe\x7e\x77\x57\x20\x25\x6d\x3d\x4e\xbb\x0a\x05\x84\x28\x45\x57\xa3\x3a\x26\x69\xdc\x39\x3b\x8f\x9f\x8a\xd3\xb0\xb1\x14\xc6\x61\x52\x17\xd9\x06\x1f\x9b\x00\x76\x6a\x1a\x05\xff\x43\xde\xc6\x4b\xb4\xaf\xd3\xf5\x32\x37\x5d\x5e\x68\x79\x69\xfa\xbb\x9f\x96\x5d\x4f\x89\x95\x72\xe7\x96\xc8\xd8\x5b\x51\x91\x5b\xd4\xeb\x14\x00\x33\x93\x2c\x67\x26\x44\x79\xa2\xc6\x5e\x5b\x61\xa3\x9d\xb4\x91\xfb\x2f\xdb\x80\x5d\x60\x33\xd6\x79\x6c\xc0\x6f\x7a\xe0\x70\xd9\xb8\xfe\x0f\x12\xad\xfc\x95\xe6\x95\x67\x6f\x18\x61\x7b\xd4\x76\xe6\xbf\x6c\x64\x9b\x6b\x4a\xa9\x6f\x4e\xff\xfc\x5a\xc5\xc2\x17\x30\x7d\x64\xa8\xb7\xb1\xb3\xf6\xe7\xdd\x7a\x0b\x00\x57\x20\x78\xc4\xc9\xd8\xd3\x7b\xa0\x94\x90\x40\x9b\x9d\x4f\x98\xdc\x38\x6b\xbd\x6d\xbb\xc7\x21\xe3\xff\xf8\xec\x79\x40\x94\x11\xed\x34\xeb\x4d\x00\xee\x49\x4b\x93\xba\x87\xb2\xe7\xba\x11\x06\x92\x3b\x5f\x43\x5d\x06\x11\xb6\x10\x9f\x91\xab\x95\x9d\x76\x74\xf6\x4d\x91\x76\x50\xc1\x92\xad\x32\x8e\xfd\x22\xe5\x89\x5c\xf3\x31\x97\x03\x2a\xeb\xa9\x3e\xa8\xd8\xa7\x05\x0f\xe4\xc4\x0a\x38\x81\xe6\xb3\xb7\x57\x08\xd5\xfb\x4d\x95\xe0\xa7\x2a\xc5\x00\x5c\x6f\xea\x4a\x85\xb2\x2f\xc5\x8e\x9a\x15\x94\x15\x19\x68\x98\x2d\x35\x56\x41\x95\x07\x4a\x16\xca\xbf\x71\x89\xca\xbf\x66\xe1\xeb\x69\xc6\x03\xeb\x59\xe8\xd6\x7a\x9a\x21\x0b\x74\x43\x4d\x78\x15\xeb\xa5\x69\x58\x0b\x0b\x76\x1f\x75\xe3\x01\x9f\xd6\x44\x5b\x16\x5b\x78\x62\x20\x96\x08\xcb\x6d\x72\x59\xee\x0b\xb7\xcf\x4e\xeb\x02\xce\x50\x57\x45\xb9\x22\x11\xb0\xb5\x91\xdf\x1a\xb1\x30\xff\xd4\xb7\xfc\x0f\x8c\xb0\x27\x6d\x6b\x32\xf7\x55\xd7\x99\xd5\x5f\xab\x18\x37\xe4\xc3\x32\xbf\x0d\xba\xba\x0d\x34\xf7\x2f\x6f\xd6\xbf\x75\xd4\x21\xc2\x32\x70\x71\xb4\x35\x1a\x42\x84\xef\x8e\x04\x53\x33\x03\x95\x3c\xc3\x2b\x40\x00\xc5\xf4\x6e\x6d\x43\xc6\x83\xfb\x47\x58\x6e\x40\xe8\x5a\xc1\xd3\x14\xfc\xed\xa0\x82\x01\x18\xc5\x2a\xb9\x14\x40\xdc\x57\xad\x90\xb7\x44\x18\x17\x2d\x5e\x6f\x89\xfa\x8a\xe3\xa7\xbe\x47\x6b\x9e\x9d\x40\xe9\x4c\x65\xd0\xfe\xfe\xef\x41\x26\x99\xda\x0b\xe4\xdd\xe0\x76\xf8\x00\x84\xb6\x2d\x64\x5d\x58\xe8\x40\x19\x10\xc0\xce\x1f\x50\xb4\xbf\x9b\xde\x71\x96\x44\xcc\xa9\xe0\xa6\x05\x12\x2a\x5b\xfb\xe6\x31\xf3\x3d\xbb\xbc\x3a\x1b\x83\x6c\x60\xff\xa9\xc1\x19\x43\x51\x81\x40\x47\xf5\xb4\x21\x36\xe9\x37\xb7\x97\xac\xf7\x9c\x4a\xbf\x65\x8c\xed\x1f\x14\xde\x92\xa5\x98\x04\xe1\xf0\x74\xfe\xaf\xd1\xe0\x47\x1c\x9a\xce\x8e\x7a\x4c\xb1\x06\xe2\xcc\x73\xa6\xc2\xc7\x46\xd9\x6f\x8f\xb0\xbd\x0d\x0c\x61\x3a\x93\x36\x84\xff\x89\x11\x15\x5b\xf5\xce\x11\x79\x81\x2f\xc9\xd3\x9a\xc1\x2a\x29\xe4\x14\xa7\xd0\x66\x58\x12\x2a\xd4\x0c\x78\x06\xa5\xe6\x4f\xa5\x51\xb4\xd1\x12\xe6\xf7\xd4\x0b\x75\x54\xd6\x91\x55\x07\x0e\x1c\x38\x00\x8b\xee\xc0\x0d\x37\xdc\x80\x59\xaf\x0d\x51\x8f\xda\xfd\x0f\xc2\x53\xd7\x1f\x3c\x58\xe3\x4f\x99\x3e\x73\x9a\x00\xd5\x72\x4c\x21\xc6\x92\x21\xe5\xdc\x7e\x39\xaf\xf2\x27\xcd\x9f\x3b\xab\xa0\x7f\xf2\xd2\x5d\x8a\x2b\xa4\xe6\xd5\xf8\x89\x28\x83\xe8\x1a\x20\x35\x33\xa0\xb5\x10\xa5\xa1\xf2\xa6\x42\xc8\x28\x51\xa6\x0e\x44\x4b\x44\x40\x31\x08\x26\x6b\x03\xd0\x21\x9a\xc2\xeb\x69\xd2\x8c\xa3\x3a\x02\xe0\x52\x02\x9b\x92\x22\x18\xa7\x05\x65\xe1\x6e\x10\xc5\x10\x73\x2f\xaa\x1c\x72\x82\x9b\x39\x84\xc9\x57\x6d\x84\x89\x6e\xac\xcf\x48\xa9\xca\xf8\xa2\x91\xc9\x45\x89\x6d\x3e\x62\xbb\x15\x55\xd1\x33\x83\x1f\x54\x5e\x33\x9a\x03\x34\x29\xca\xa8\xb6\xd7\xb3\xc3\xec\xe0\xa6\x71\x55\x38\xdf\x66\x75\x11\xec\xe5\x23\x8e\xe3\xd5\x40\xd8\x68\x3e\x3f\x32\x00\xdc\x1e\x25\x45\xee\x7f\xb9\x12\xdc\xe1\x5c\x51\x74\x9f\x39\x6f\xc1\x4f\x6b\x1b\x68\xa5\x6b\x00\x1d\xa6\x2c\x08\x56\x92\x65\x9a\xe4\xdd\xb6\x8a\x50\x57\x33\xf9\x05\x15\x76\x6f\x85\xed\x69\xa6\xd9\x53\xd3\x44\xe4\xfe\x77\xbc\xe0\xc5\x9e\xfa\x55\x32\x36\xdf\x93\x26\x80\x74\x0c\x83\xb8\xc1\x17\x60\xa0\x53\x8d\xcc\x9a\x76\xd2\x38\x5d\xee\xf1\x70\x4d\xce\x88\x2c\xed\xe2\xe0\x9f\x09\x7b\x1a\x4f\x50\x6a\x0c\x17\x20\x48\x33\x6d\xf2\x1b\x01\xfd\x2b\x12\x6e\x16\xeb\x51\x76\x84\x1d\x1a\xda\xd5\xfd\x5d\x78\x0a\xdb\x70\xd1\xbb\x76\x28\x0c\xce\xb8\xbf\x3b\x2c\xd2\x76\x54\x67\xec\xa7\xf7\x3a\x00\x28\xcd\x38\x5d\xa3\x80\x2e\x5d\xe0\x9d\x5d\xd1\x8d\x92\x65\x27\xdf\xd6\xff\x3a\x0b\x8e\x0f\xba\x61\x21\xa2\xb9\xf0\x0f\x9d\x30\x0b\xdb\xa2\x00\x68\xda\x34\x83\x43\x59\x94\x2c\x3b\x43\xf2\x6a\xc6\x7e\x66\x8c\xed\x69\x85\x49\x63\x3e\xba\x47\xf8\xaf\x1d\x53\x92\xe5\xff\x1b\x5d\x54\x57\x17\xc9\x1a\xd5\x46\xb2\xb5\x3c\x2a\xa2\x55\x1d\xd0\x89\xc9\xf3\xf4\x5d\x15\xb1\xdf\xea\x36\x9b\xb1\xfc\x97\xa0\xda\xd3\xa6\xd1\x33\xa2\xa4\x48\xf1\x80\x98\xd7\xb8\x62\x63\xa2\xba\x59\xb9\x23\x10\x5d\x15\xe5\x26\x18\x09\x31\x2a\x2d\x8b\xc4\xbe\x9c\xcb\xbe\xb3\x39\xec\xf7\x6b\x9b\x59\x27\x8c\x32\xc8\xc4\x6f\x85\x79\x4b\x34\xf4\x42\x95\x3f\x49\x74\x59\xc9\x57\xaa\xc2\xb6\x0f\x1b\xab\x48\x52\x4b\x8a\x2f\x00\x8d\x50\xbb\x61\x1e\xdd\x63\x2b\x52\x2d\x91\x09\x3a\x12\xe9\xa4\xa0\x9c\x77\xba\x05\x36\xd7\xc2\x05\xc9\x5b\x69\x06\xc6\x49\x2a\x1f\xa4\x18\x20\x16\x27\x8d\x1a\xb7\xfa\x5c\xa1\xac\x24\x29\x8f\xa5\xee\x44\x44\x4f\x8b\xf8\xde\x22\xca\x1e\xb3\x20\xf2\x68\x39\x81\x08\x67\x84\xe4\x95\x63\x25\x3b\x24\x4f\x49\x98\xf1\xa6\x58\x93\x3b\xda\x6a\x0f\x7a\x2d\xe7\x8d\x14\xa3\x64\xa5\x9e\x2b\x95\xdc\xb6\x85\xf0\x87\xdf\x90\x87\xbc\x79\x21\xb4\xbf\x6a\xb2\x19\xd6\x01\xd6\x2c\xad\x77\xdb\xda\x54\x6b\x45\x7f\x83\x39\x7c\x55\xf0\xe5\x6e\xd4\x08\x81\x73\x2e\xb1\x50\x08\x14\x0c\x1f\xf4\x93\xc6\xe4\x6b\x85\x48\x19\x89\xb1\xe1\x38\x32\x72\x61\xba\x02\xf3\xd5\x15\xf6\x48\xa8\xd4\x69\xb0\x31\x60\x88\xb1\x1d\x7c\xbd\x58\xbe\xbb\xa8\x0e\x28\x6a\xb5\x9b\x00\x64\xa3\xf2\x52\x6a\x1d\x86\xd7\xae\x85\x91\xa2\x76\x0e\x01\x05\x36\x21\x3b\x86\x8e\x1b\x75\xa7\x22\xf4\x6a\x11\xb5\xc5\xe3\xb8\xb8\x00\x59\xc8\xa6\xe0\xcc\x64\x66\xaa\xe6\xba\xd8\x39\x6a\x19\xc9\x93\x52\xd3\x0d\x57\x86\xdc\x12\x75\x76\xa2\x8e\xc1\x4a\x5e\x5f\x0a\x5d\xfe\xfd\x0a\xdb\x85\x43\xe5\xff\xa6\x86\x16\x7a\x7f\x45\x4d\x91\x7e\x96\x53\x9a\x73\x1a\x4d\xc6\x6d\x11\x06\xd3\xd2\x33\x0a\xd4\xa9\x21\x3a\x22\x69\x20\xd0\x73\x58\xa0\x92\x6f\x3c\x02\xc6\x08\x30\xa8\x65\xf3\x8a\x2c\x1b\x91\x80\x28\xdd\x34\x5a\x15\x31\x24\x69\x13\x18\x2a\x2d\x3e\x2d\x2c\x70\xa9\x02\x45\xc1\x0a\x09\x14\x2b\xe8\x5b\x6e\xac\xa2\x68\xa5\xb0\x12\x2d\x13\x18\x88\x02\xe5\x06\x88\xb2\x4c\xc4\x62\x35\x94\x4a\xdd\x16\x26\xdb\xd1\x23\x6e\xbf\xae\x8f\xb3\x1f\x1d\x1c\x92\x32\x8f\x7f\x9a\xa0\xca\xbf\xde\x13\x1c\x29\x5f\x1c\x14\xa4\x44\x65\x6c\x1c\x52\xf9\xd1\xdd\x3b\xae\x80\x9d\x10\x17\xa4\xa3\xda\x71\xaa\x5d\xa6\x53\x2d\x55\x41\x6a\xcd\xe0\xda\x81\x41\x6a\xf6\xb2\x15\xdb\x40\x8d\x1f\x22\x0e\x2e\x7a\x8d\xcd\x5d\x21\xd3\xfe\xad\x9b\x05\x25\x95\xa5\x49\x39\x20\xe9\xe2\x38\xbb\xf9\x12\xcd\x47\x78\x0a\xfd\xf0\x78\xf0\xbb\x23\x1b\x3e\x62\xe9\xe2\x94\x0a\x8f\x36\x9c\x34\x19\x60\x5c\x1a\x60\x2b\xaa\x42\xee\xd0\x26\x56\xa6\xea\x15\xb2\x98\x5e\x31\x3b\xb4\xdc\xb0\xbe\x2f\xed\x50\x9c\x9f\x4b\x28\x8f\x25\xc0\xaa\x92\xf5\xd2\xd2\xfe\x20\x8b\x8d\xb2\x1e\xc6\x5d\xae\x22\xb5\xbb\x7c\x6a\x8c\x2d\x31\x73\xcf\x3f\x1f\xdc\xee\x20\x76\x97\x93\x8e\xfb\x58\x24\x90\x53\x80\xd4\x58\x6a\x2f\xd6\xc7\x5e\x77\xb7\x51\xaa\xc3\xb1\x60\x32\x09\x0d\x57\xb9\x5d\xb2\xe1\x36\x05\x15\x2b\x2f\xca\x9c\x08\xdf\xf2\xd8\xbf\xc3\x92\xa7\x71\x24\xcf\x17\x51\x4c\xf9\xcf\xfe\xef\x6a\xb5\xef\xe3\xde\xb0\x87\xb4\x75\xda\xf2\x30\xe8\xac\x6f\x7c\xba\xaf\x32\x34\x9f\xc3\x7a\x96\x22\x7d\x0e\x57\x4a\x04\x91\x52\x6b\x71\x8a\x28\xed\x90\xac\x51\x97\xd2\xd5\x2e\x0c\xda\xa3\x44\x5f\xdf\x37\x2c\xe2\x9e\x12\x1e\xe2\xcf\x56\x98\xef\xb4\xe6\x2e\x59\x80\xff\xb2\xca\x36\xb6\xb1\x9a\x66\xb9\xbd\xb3\x1b\x26\x45\x54\xf4\x82\xcf\x7a\xfd\x85\x3e\x28\x9d\x03\x1d\x92\x85\x6b\x54\xdc\x7e\x95\xbf\x0b\x5a\xda\x46\x7d\x35\x51\xe5\x79\xd4\x8e\xe2\x30\x53\xcb\x91\x66\xbf\xe3\x51\x80\x09\x5f\x63\x3f\x3d\xce\x8e\x6d\x22\xfc\x6e\x4f\xb3\xe8\x1e\x39\x75\xe3\xd9\xb4\x31\x4d\x0f\x88\x0c\xb4\xb5\x2f\xef\x09\x9e\x38\xe4\x3e\xaa\x59\x6a\x6f\x68\xe9\x87\x40\x2a\x85\xe6\xb1\x0d\x83\xcb\x5f\xb6\x13\x0c\x73\xd9\xfb\xf6\xeb\x3d\xb5\x71\xff\x94\x17\xdc\x30\x30\x2b\x66\xf3\xd1\xd9\x06\x26\xd9\xd6\xa7\xd0\x8e\x72\x7e\x19\xca\x79\x66\xe9\xe6\xcd\xcb\x53\xcd\xf7\x69\x65\xdc\x70\x16\x0f\x50\xd5\x6b\x17\xbd\xbb\x37\xd7\xc8\xce\xfa\xa7\xed\xe0\x14\x35\x17\xb4\x3a\xb6\x81\x38\x71\x34\xb3\x43\x14\xa2\xf2\xd5\x51\xc7\x21\xa0\x48\x53\x71\x62\x3d\x29\x5d\xb2\xb3\x5e\xfd\x5f\x1a\x0d\xee\x2c\x5d\x2b\x85\xa8\x40\x33\x43\xfe\xa4\x74\x49\x2d\x78\x50\x3b\xc0\x4f\xa9\xad\xfa\x72\x8e\x84\x7c\x60\xde\xfa\x17\x47\xd8\xf7\xec\x63\xd1\x5f\x5c\x36\x5b\xef\xbb\x36\x60\xeb\x55\xa2\xfd\xd9\xe9\x52\xee\x56\x0f\x26\xb3\xaa\xe1\x43\x47\xea\xfb\x07\x2a\xa0\xeb\xb7\x3d\x36\xb1\x39\xb3\xad\x22\xf4\x7d\xeb\xd6\x09\x7d\xa9\xb9\x0f\x39\xcf\xed\x4f\x57\xd8\xbf\x1b\x46\x01\xe1\x3f\xbf\x12\xdc\xef\x59\x5c\x80\xb6\x0f\x10\x02\x3d\xd0\xe8\xad\x14\xbb\xfd\x9d\xb8\x9b\x85\xf1\x04\x9f\x45\x8a\x0d\x32\xd3\xd9\x7c\x73\xc4\x5c\x73\x8c\x71\xce\x67\x66\x8f\xf1\xe9\x84\x5f\x19\xae\x41\x67\xb6\xde\xc1\x66\x58\x25\xea\xf8\xc7\x83\xa3\x51\x87\x90\x9e\xad\xcf\xec\x07\x5a\x8d\x34\x03\x56\x8d\x89\x41\xc4\x12\xb6\xc4\xf9\xe6\xd5\x6c\x62\x13\x3b\xfb\xa9\x38\x5d\x9b\x87\xdd\xd2\xff\xf8\xd5\xc1\xd7\x3c\xf3\xdb\xd9\xa8\xcd\xf6\x1c\x2a\xb8\xa6\x26\x5a\x51\x31\xc0\x40\x9b\x58\xc1\x1c\x2d\x65\x78\x43\x4a\x5f\x8a\x27\x13\xb0\x67\x45\x09\x32\xed\x4c\xcf\xce\x18\x79\x4d\x48\x68\xa8\xfd\x68\x38\xb5\x5c\x81\xe6\x6a\xcb\x36\xc1\xbb\x75\xc2\x08\x31\x25\xa0\x81\xf9\xb1\x3e\xc5\xda\xaa\x3f\x30\xd6\xf0\x00\xaa\xe4\x98\xcf\x02\xb7\xcb\xff\x1b\xdb\x51\x59\x76\xe2\x77\x1f\x3e\xbd\xe0\x2b\xf6\xee\xf4\x07\x97\xbd\x3b\xbd\xd6\x5b\x54\xa5\x2d\xf6\xa9\x07\x0f\x23\xbb\xfc\x3f\xa8\x8d\xe8\xdb\x1b\xd8\x25\x37\x91\x51\xb0\x35\xbd\xcf\x03\x26\x86\xc5\xa1\xc8\x36\xc3\x36\xaa\xd0\x92\x0e\x0f\xf9\x6e\xf5\x4d\x13\xd2\xfc\xe5\x0d\xc6\x78\xb3\x0e\xc0\xf8\xa7\xb7\x78\x8b\x58\x96\xee\x84\xfa\x00\x06\xfa\x87\xaf\xb5\x5b\xd7\x3b\x87\xa7\x97\xd8\x1d\x61\xdc\x2d\x25\x53\xa1\x69\x22\x93\x9a\xce\x6d\x97\x97\x14\xf6\xa4\xf9\x73\x67\xfd\x57\x78\xc1\x7f\xf6\x28\x2a\xc3\x04\xaa\x24\x3d\xc4\x70\xc5\x78\x0d\x8c\xd7\xe4\x0b\xc0\x9b\x02\xcb\x1a\x6c\x65\x9a\xd1\xe0\x18\x5f\x4a\xd3\xb8\x2a\xc5\xf7\xd1\x23\x55\xd9\x94\x10\xfe\xc0\x15\x5f\xe5\x4f\x7b\x06\x08\xf6\x66\x58\x17\xcf\x79\x6e\x95\xb7\xc3\xce\xd3\xf0\x96\x7d\x1d\x61\x7c\xa2\xb8\xc6\xbe\x55\x61\x07\x37\x8a\x7a\xaf\xdd\x26\x37\x56\x94\x01\x73\x02\x98\x78\xea\xc2\xff\x48\x25\x78\xd2\xa0\x1b\x28\x99\x33\xfd\x13\xe9\xcf\x6c\xe4\x19\x95\x67\x13\xe5\x9a\x1c\xcf\xdd\x32\xbf\xe4\xb1\x1b\x9c\x1d\x73\x22\xf8\x21\xb9\xad\x2b\xa2\x08\x6d\xdf\x40\x56\xa0\x72\x90\x13\x0c\xff\xa9\xe0\x26\xd8\x27\xca\xcf\x52\x5c\x13\x88\x49\xe8\xd5\x7d\xb3\x69\x63\x1f\x74\xc6\xbe\x79\x51\xcf\x44\xb1\xcf\x29\xef\x3a\xb2\xb6\x05\xc1\x0f\x9c\x75\x2c\x6c\x03\xbe\xbd\x9f\x8d\x74\xa3\x86\xff\x1f\x82\x7f\x7d\x7e\xe6\xc4\x86\x4f\xbe\x99\xb1\x23\xa5\x2e\x97\xc7\x20\x37\xcf\x60\x1e\x19\x1e\xa6\x81\x70\x8f\xb2\x99\xbe\x32\x1e\xcc\x0e\xb8\x8e\x31\x54\x26\xaf\x20\xcd\x30\x5a\x07\xd3\x20\xd3\x8c\xb4\xaa\x7a\x98\x28\xb8\x3e\x20\x74\x01\xef\xd9\xc0\xac\xa3\x17\xed\xd9\xd1\x5a\x76\xb4\x96\x7f\x21\x59\x47\x2f\x55\xba\xc1\xf3\x3d\x76\x6a\xc3\x6c\x9f\x2d\x2d\xc2\xcb\xcc\x3e\x7a\x8f\xd9\xaa\xdf\xe6\xb1\xdb\xaf\x40\x7d\x70\xd7\x3e\x7f\x25\x92\x90\x22\x13\xb4\x81\x32\xe4\xa2\x97\x6d\xbe\xd7\x9e\xf3\xcf\x6c\xb4\xd7\xba\x2d\x29\xbb\xe3\xfa\x1b\xc4\xd8\x5f\xed\xd9\x18\x8c\x10\x8c\xcd\x9f\xd9\x13\x4c\x3a\x57\x06\xc5\x05\x18\xe4\xb7\x21\x26\xe5\x07\x76\x4c\xca\x97\x2d\xe9\x9e\xae\x2c\xca\xf3\xc1\xbf\x99\xd6\x56\x64\xbb\xeb\x6d\x7b\x71\x8d\x55\xd9\x75\x5b\xc7\x4c\xdb\x91\xa3\x97\x21\x47\x7f\xcb\x3e\xfd\x7d\xe2\x32\x43\x36\x7e\x62\x70\xc8\xc6\x43\x76\xc8\xbb\xe8\x3d\x79\x73\x59\x74\xc4\x3f\x64\xec\xcd\x9d\x4e\xae\x05\x8d\x23\x2a\xca\x4e\xff\xb7\x5d\xcd\x6e\xda\xe4\x98\xa4\x40\x97\x4e\xcb\x7a\xbb\x91\xa3\xff\xf3\xaa\xe0\xf4\xf0\xdb\xb6\xa6\xdf\x1f\x44\x8a\xa8\x74\x6e\x9c\x98\x23\xa0\xee\xdd\xbb\x23\xa0\x76\x54\xb1\x1d\x03\xd2\x83\x6c\x40\x7a\x4d\x85\x94\xc4\x97\x56\xd8\x1d\xdb\xb2\x9f\x0c\x5f\xf9\xa0\x29\x7e\xf4\xd2\x0d\x4a\x01\xcd\x8f\x49\x25\x1f\x82\x87\xdc\xb0\x74\x5f\x45\x6b\xab\x7f\xef\xb1\x33\x57\xaa\x63\x50\x65\xbd\x7f\x4b\x86\xa6\x87\xbf\x17\x2e\x7a\xf7\x7a\x9b\xef\x3c\xcf\xf4\x9f\x3e\xb9\x2d\xf3\xd2\xf0\xfe\x19\x98\x9c\xff\xcd\xdd\xec\xf1\x56\x8f\x27\xa2\x20\x0c\x61\xdd\xe1\x94\x0a\x0d\xa1\x6d\xb3\x3a\x39\xc1\xd8\x6c\x1e\xd8\x1d\x3c\x77\x93\x67\x8c\x4f\x04\x9c\x43\xd3\xb3\x33\x7a\x93\x80\x38\x5f\xca\xc0\xd1\x81\xfd\x44\xc9\x13\xea\x8c\x75\x79\x6c\x50\x70\xed\x93\x25\x54\xff\xda\xba\x07\x62\x98\x22\xa9\x9c\x6d\xee\x3f\xef\x62\x9f\xf6\xd8\xb8\x7e\xd7\x7f\xbf\x17\xdc\xef\x9d\xb5\x71\xe6\x0b\x87\xd2\xa0\x1c\x41\x83\xe7\x2c\x6d\x78\x6a\xd4\xec\xc0\xe4\x28\x57\x59\x50\x0d\xf4\xed\x42\xcd\x38\x26\x30\x21\x1f\x92\x2a\x18\x79\xc6\x54\xb4\x75\x37\x91\x0f\x0c\x7e\xe5\x38\x36\xd9\xcd\xb2\xfb\x53\x8f\x8d\xc1\x93\xfe\xef\x79\xc1\xaf\x79\xf3\xf0\x92\xa5\x01\x44\x7a\x3f\x36\xf9\xa4\x83\xfa\xae\xcc\x88\xa0\x32\xac\x10\x70\xa6\x5c\x05\xbe\x9f\x82\xae\x81\x7a\xd1\x6a\x4c\x8d\x9f\x82\x0e\xa0\x71\xa3\x94\x5a\x74\x8c\xd9\x53\x41\xbf\xd1\x80\x49\x91\xab\x04\x5d\xbe\x1c\x16\x6e\x46\xde\x7f\xf7\xd8\x1e\xc5\x63\xe2\x7f\xce\x0b\x7e\xd9\x9b\x9e\x9d\x81\x5f\x6a\x8c\xd0\xd8\xa4\x42\xb1\x36\x18\xa0\x99\x26\xb7\xdf\x2d\x45\xe9\x5b\xc2\x52\x34\x38\x6c\xda\x6a\x54\x94\x8f\x53\x4a\x01\xcd\xf1\x82\xf9\x85\x80\xe3\xae\xe8\xa9\xb2\xc6\x64\x27\xcc\x8a\x1e\x5a\x51\xab\xce\xd7\xb2\x41\x2c\x1f\xc7\x49\x4f\x78\x5c\x50\x53\x5a\x82\x9d\x84\x3d\xbc\x2d\xa5\x42\xc0\x66\xf8\xb8\xa0\x76\x76\x40\x84\xde\xd6\x0a\xf9\xcb\x31\x76\xbd\xb5\xda\x89\x0c\x45\xad\xf4\xb9\x6e\x52\x44\x6d\x0c\x62\x55\x70\xd2\x44\x93\xe9\xbf\x6f\x2c\x88\x36\xb8\x4f\xea\x22\x6d\xbc\x0a\x75\x5a\x63\x7f\x37\xa2\x3a\x60\x75\x40\xb0\x5a\x1c\xa7\x6b\xe0\x61\xb6\xcb\xa3\x04\x87\x10\x9c\xd0\xeb\xde\xbf\x23\x83\x81\xfd\x08\xcc\x26\x67\x79\xbf\x65\x94\x7d\xa4\xc2\x86\x3e\xeb\xbf\xb9\x12\x3c\xaf\x32\xec\x2e\x79\xaa\xe1\xb6\x3a\x5d\xda\x0f\xd9\x24\xf3\x6a\x81\xe8\x89\x03\x41\xb5\xe0\x2f\x9f\x36\xa1\x78\xc1\x75\x01\x6f\x8b\x50\xa7\x22\x26\xbd\xfe\x02\x2d\x63\x48\xd5\x91\x09\x90\xff\x0e\xc4\x3e\x85\x68\xdb\xb4\xc5\x35\x3e\x9d\x10\xef\x02\x54\x53\x27\x5e\x82\x36\x59\x6a\x15\x49\x26\xc5\x05\x86\x39\x8c\x43\x59\x7b\xff\xd4\x63\xff\x96\x96\x78\xb9\x20\xff\xd3\x5e\xf0\x7e\x6f\xc8\x4d\x35\xf9\x54\x4e\x46\xdf\x7d\x4a\x68\x4d\x13\x13\x57\xb0\x60\x3d\xaf\xb0\x0d\x95\x55\x88\x0c\x4a\x43\x47\x8a\xba\xc1\xf4\x74\x12\xc5\x86\x08\x19\x18\xed\xb0\x03\x67\x53\x77\xbe\xdf\xbf\x97\xdd\x79\xa9\xfe\x95\x0d\x20\x20\x59\xf0\x1a\xef\xfb\x11\x02\x72\x90\xd1\xfd\xbd\x7b\x76\x0e\x2a\xdf\x2f\x36\xe3\x07\x94\xcd\xf8\xad\x1e\xab\x5f\x19\x68\xc8\x07\x0d\xc8\xf2\x03\xc6\xa2\xfc\x2e\x8f\x89\x07\xbb\xb6\x3b\x18\x96\xdf\x27\x66\x8d\xad\xdb\xe6\x37\xc2\x54\xe4\xdb\x42\x69\x64\xec\x35\xbb\x6c\x08\xc6\x0d\x56\x13\x28\x9e\xf9\x5d\x07\xfd\x3f\x1b\x0b\x7e\x6f\x4c\xfd\x82\xb4\x1c\x64\x45\xc4\xc0\x2d\xd8\x06\x73\xcc\x0a\x85\x18\xc7\xbc\xc8\xba\xd0\x0f\x9c\xf8\xc1\x17\xb2\x08\x70\x36\xd0\x3b\x8e\xb2\xb5\xc6\x18\x84\xcf\xad\x88\x1e\x40\xc5\x20\xcf\x79\xc8\xf7\xd5\xf6\xf5\xcb\x47\x3a\x03\x00\x9f\x19\x8e\x26\xa8\x3a\x61\xbc\x16\xf6\xa4\x3c\xec\x90\x97\x1a\xb7\xee\x5c\x14\x55\x84\x62\x20\xa9\xeb\x94\x17\x4a\x99\x38\x89\x25\x02\x27\x90\x68\xe3\x8e\x49\x0f\x23\x51\x3f\x50\x57\x59\x69\xc9\xb9\xdc\x13\xba\x19\x55\x3e\x3f\xc6\xf7\x35\x8f\xdd\x0c\xfb\xc2\x3e\x45\x69\x87\x3f\xcb\xca\x62\x68\x60\x77\x54\xcf\x40\xe5\xa0\xdd\x98\x6c\xdf\xe1\xfb\x56\x8f\xdd\x0c\xf3\xd0\x2a\x0d\x7f\xab\xe2\x80\xab\x85\x3f\x3b\xc7\xf8\xc4\x76\x58\x38\x29\x14\x14\x97\x0f\xea\xcc\xbe\xe8\xd8\xcd\x51\xd2\x10\x17\xac\xb2\xf0\x37\x64\x5c\x43\xf2\xa9\x32\x5b\x2a\xfd\x87\xde\xdf\xb7\x72\xec\xe6\x15\xd1\xcb\xad\x37\xe1\x27\x6e\x61\xb2\xa2\x69\x93\xdb\x1f\xdb\x97\x43\x3b\x68\x06\xe0\x1a\x8c\x32\xde\x4d\xa2\xbb\xbb\x3a\xe7\x68\x46\x7e\x69\x05\x68\x4e\x3b\xb9\x33\x50\x38\xa5\xd4\x22\x35\x03\x0d\x1b\xb8\x7c\xc3\x3e\x76\xb9\x84\xcc\x52\xdb\x62\x6c\x41\xf7\x0c\x6d\xd8\x91\xd2\x8e\xa1\xc3\xf3\x68\x59\xa3\x2c\xeb\x49\xd9\x98\x6c\x8b\x6c\x59\x4c\x36\xa2\x66\xd3\xda\xbd\xd9\x47\x47\x37\x8e\xa2\x18\x08\x62\xe8\xbf\x70\x34\x38\x59\xba\x86\xbd\x85\x1b\x41\xc7\xb6\x11\xf5\xc1\x89\x5a\x8c\xe3\xae\xc9\xf8\xe7\x47\xd8\xb7\x2b\x6c\x3c\xec\x36\x22\x79\xb0\xc8\xfd\xaf\x56\x82\xdf\xa8\x4c\xab\x9f\x6e\x2a\x86\x2c\xda\xa4\xf8\xe7\x86\x78\x45\x2b\x14\xe4\x3d\x34\xc9\x39\x9a\xe5\x8d\x02\x39\x2c\x93\x41\x5e\xe3\xea\x3b\x93\x08\x12\x31\x00\x05\x95\x12\xc9\x56\x45\x86\x6c\xd4\xf4\x41\x7c\x72\x2d\x04\x0c\x03\x91\x34\x08\x0b\xdb\x21\xda\x56\x69\x34\xa6\x2d\x09\x6e\x04\xa8\x78\x42\xae\xb7\x75\x57\x56\x80\x00\x36\xe8\x44\xa9\xee\xd1\x69\xc7\xe5\x4c\xd3\x37\xe9\x33\x16\x3c\x93\xd9\x32\x37\xd0\xd2\x6f\x60\x63\xd0\x08\xbf\x16\xfc\x87\x05\x15\xe4\x02\xc7\x85\x4e\x78\x37\xa0\xb9\x84\x19\xd0\x4d\xad\x08\x37\x21\xeb\x85\x57\xb3\xa3\x1b\x26\x4d\x1c\x1a\x96\x34\xe1\xff\xe6\x55\xc1\x6f\x7b\x1b\x24\xdd\xf4\xbb\x1c\xf0\xf4\x36\x34\xc9\x43\xb1\xc4\xc9\x2b\xed\x50\x0e\x59\x0c\x6c\xb6\x49\xb8\x2c\x94\x12\x02\x54\x49\xbc\x9e\x76\x13\xe4\x74\x4f\x7a\x56\xd2\x99\x22\x5b\x56\xe2\x17\x93\x0c\xf3\xee\x92\x39\xf9\x86\x96\x46\xad\xd2\xf3\x0c\x9f\x98\x33\x99\x2f\x8e\xef\x28\x0a\x3b\xfe\x8f\x87\xef\x58\xf1\x65\xdb\xff\xf1\xbb\x97\xed\xff\x78\xa5\x37\x34\xbb\x46\x71\xb3\x3e\xe4\xbe\x8f\x17\x28\xdf\xc7\xff\xde\x24\x20\x65\xab\x42\x09\x4e\x34\xef\xf5\xf2\xa1\x0c\x91\xca\x38\x48\xfe\x8e\x6e\x66\x04\xbb\x2a\xe3\xa1\x36\xf2\xd7\xd8\x2b\xcc\x39\xea\x85\xde\xb6\x10\x05\x87\x77\x04\x1e\x96\x6e\xc8\x75\x6c\x8e\xed\xe5\x18\x1c\x38\x64\x75\xc1\x45\xaf\xbd\xb9\x86\xff\x24\xff\xf6\x4b\xc9\xb0\xea\xcf\xae\x3a\xc4\xd8\x6f\x8c\x0c\x84\x5b\x53\xa7\x02\xd0\xb8\x34\xf9\xf1\x1b\x47\x82\xa7\x0e\xbc\x63\x2b\x5e\xfd\x50\x8a\x39\xdf\x5f\xef\x74\xab\x2a\xc3\x5c\x41\x07\x45\x19\x4f\xbb\x45\xa7\xab\x94\xb1\x75\x4f\xa3\x95\x3a\xfb\xc1\xdb\x2b\xec\x29\xec\x6a\x5d\x2c\x58\xc0\x6e\x0f\x1e\xa7\x13\xee\x41\x75\x3e\x66\xcc\xfd\x72\xaa\x21\xf2\x57\x5e\xa5\xf4\xf1\x30\xa6\x2c\x9d\x55\xbe\x1a\x66\xce\x6a\x7f\x85\xc7\x76\x37\xa2\xd5\x28\x4f\x33\xff\x05\xdb\x89\x97\x18\x90\x1b\x7c\x92\x92\x9c\x68\x6b\x72\x5a\xa7\xe6\xbb\xb2\x2e\xe9\xce\xa9\x2a\x9d\x04\x79\xb7\x0f\x06\xec\x7a\x0b\xb6\x75\x22\xf8\x21\xc5\x4e\x7d\xcc\x12\xcd\x29\x51\x23\xda\x2d\x79\xfe\x2e\xb6\x6f\x83\xa1\xa4\x62\x80\x3d\xc8\xff\xe2\x58\x30\x3d\xe8\x46\x29\x59\xae\x9e\xb6\x3b\xdd\x42\xd8\xa0\x98\xe6\xd9\x12\x16\xd9\x18\xfb\xae\xc7\x76\x21\x14\x81\xff\x4d\x2f\x78\xa3\x07\xb0\x41\xe5\x22\x15\x7a\x50\xd8\x56\xba\x44\xf9\x23\xda\x08\xbb\x6d\x9e\x47\x47\xeb\x99\x42\x05\x66\x52\x17\x3b\xa9\xa7\x50\x3e\xf5\x2e\xef\x56\xf6\xf8\xcb\x1a\x6c\xfb\x28\xf0\x62\x00\xda\xc5\x9d\xcd\xff\xdf\x95\xe0\xdd\x95\x39\xb5\xcf\x95\x1a\x4f\x6c\xb6\x1b\x35\x5e\xbb\x29\xa4\x6e\xab\xcb\x91\x5b\x71\x3b\x82\x53\x9c\x42\x59\xa4\xc6\x00\xba\x91\x3d\x85\xa8\xdb\xc1\xef\x84\x47\x1c\x45\xe7\x1a\xf7\x6c\x77\x0b\x38\x4d\xd6\xa2\x5c\x85\x6c\x6b\x35\x0e\xba\x6f\x52\x9d\x8b\x28\x38\xfd\x9f\xc9\x50\xbc\x73\xc4\xc1\x82\x53\x8b\x60\x81\x70\xec\x94\xc0\x02\x7e\x59\x6b\xe2\xfb\xdf\xaa\x04\x3f\x3e\x6d\xe0\xee\x72\x23\xd8\xf4\x33\xa4\x41\xa9\x3b\x0e\xc5\x29\xa2\x5b\x01\x8b\xab\x45\xb3\x9b\xf0\x30\xee\xb4\x42\xed\x5a\x53\xda\x62\xbd\x15\x26\xcb\xc6\x76\xdc\x95\x37\x6b\xeb\xde\xc8\x8a\xe8\xad\x7b\xbb\x50\x6f\x74\x16\xd7\xef\x78\xec\x56\x26\x6f\xfb\x37\x06\x8f\x5d\x00\x75\x74\x49\xc4\x70\xe4\xd5\x87\x29\xc3\xa4\xdd\xe9\xc4\x20\x82\x5c\xc6\xcc\x9f\xf2\x18\x15\xed\xdf\xe7\x05\xcb\xd3\x09\x87\x23\x8d\x49\x7f\xd3\x0a\xeb\xb9\xc4\x81\xa0\x32\xac\x9a\xf8\x55\x74\x60\xe0\xe7\x44\x39\xab\x91\xdf\x85\x87\xf8\x28\xe7\xe7\xe6\x40\xb3\x1f\x7a\x76\xba\x6f\x94\x1d\xb2\x86\x6a\x40\x02\xc5\xed\x0b\x0b\xb3\xe4\xb3\x9c\xeb\xc6\x04\xe0\xf0\xd9\x91\xe0\x67\x2b\x83\xee\x94\x00\x07\x8a\xa2\x63\xd8\xb2\x91\xf0\x18\xf4\xdc\x94\x2f\x85\xf5\x15\x91\x34\x72\x50\x37\xc9\x58\x22\xe7\x3e\xce\xec\x63\x53\x53\x37\xb7\xd2\xbc\xb8\x65\xea\xe6\x4e\x58\xb4\x6e\xb9\xf5\xe6\x5c\x48\xcd\xa2\x13\x66\xc5\x2d\x7c\xf2\x16\xf5\x3a\x59\x3c\xf0\xbf\xf2\xa6\x86\xf2\xe8\x66\xb1\x85\xaa\x2c\xbf\x38\x77\xea\x38\x3f\x7c\xd3\x8d\x47\xab\xea\x68\x42\xe2\x54\xc1\xab\x28\xef\x3a\x76\x75\xb8\x1c\x46\x49\x5e\x58\x6c\xdd\x3c\x6c\x16\x14\xb8\x1b\xcb\x83\xf1\xbe\x29\xcc\x60\x20\x10\x16\xb4\x84\x64\xf2\xc6\xad\xfb\x78\x9a\xf1\x7d\xd7\xee\xab\xad\x7b\x63\xb2\xfe\xee\x44\x7a\xb9\xc7\x5e\xe6\x31\xbc\xe1\x3f\xdf\x0b\x8e\x4c\x97\xa9\x4a\xe5\x1d\x35\xb7\x3b\xb6\xbe\x6e\xba\xcd\x1e\xd3\x0d\xd7\xee\x26\x63\x3a\x1b\x16\x2d\x76\xff\xd5\x4e\xe4\xaf\x13\xe4\xa0\xe2\x1b\xfc\x7b\xaf\x0e\x9e\x57\x51\xb8\xef\x03\x82\x7e\xb3\x6e\xac\xdc\x81\xc8\x79\xad\x92\x43\xeb\x69\x92\x10\xf4\x27\x12\xed\x86\x34\x95\xd5\x21\xc5\xd8\x7f\x20\x21\x94\xda\x08\x7e\x3d\xf5\x3d\xf2\xa4\x6b\x48\x46\x18\x29\xb9\xe0\x15\xf8\x7e\xce\x15\xd1\x40\xdc\x9b\x84\x4f\x80\x0b\xb5\x9b\xc5\x79\x15\xb0\xf8\x39\x81\xf1\xf3\x22\x0b\x9b\xcd\xa8\x5e\xe5\x85\xc8\xda\x51\x12\x16\x82\xcf\xcf\x9f\xae\xf2\xb4\xd9\x24\x3d\x86\x4e\xca\xab\x51\x06\x76\x75\x39\x11\x21\x9a\xbc\xa8\xbb\xdb\xed\x4f\xed\x64\x99\xee\x1c\x92\x1f\xc6\x43\xf2\xef\xd8\x87\xe4\x5f\xbd\xec\x43\xf2\x7d\x1b\x60\x20\x3c\x74\xc7\xe2\x3f\x54\x3e\xc0\xcf\x7b\x1b\xc0\xd5\x0e\x14\x51\x70\x00\x7e\xbd\x37\x6f\x1d\x80\x55\x88\x9f\xe3\x1b\xa3\xc7\x1f\xf2\xb0\xbe\x3f\x35\x47\xdd\xdf\xf5\x36\x60\x0b\x1d\xdc\x38\x3c\xd4\xae\x7b\xf3\x03\x4f\xb5\xdf\x0f\x0d\xbc\xe8\x3d\x73\xf3\x83\xf3\xe3\xfc\x9b\xf4\xc1\xd9\x6a\x68\xc9\x2f\x46\x2d\x28\xbb\xc1\xbe\x38\xea\x60\x58\xaa\xb4\x85\x39\x34\x8d\xce\x0b\x0b\xbc\xfe\xdd\xa3\xc1\xe9\x01\xd7\x4b\x87\x01\x8b\x22\x47\xd9\x57\x73\x51\x20\x0a\x6a\x5d\x64\x80\xad\x0c\x32\x62\x63\x64\xfb\x4f\x8c\xb0\xd7\x7a\xcc\x97\x7a\xc1\x42\x16\x26\xe8\xae\x59\x88\xda\xc2\xff\x09\x76\xc3\x25\xac\x4a\xf9\x6a\x70\x62\x41\xa9\x1a\x45\xd4\x16\xca\xa4\x4c\xcd\x28\xf4\x67\x14\x18\x49\x9a\x08\x15\xc1\x09\x67\x09\x38\x5a\xd4\xd8\x53\x0c\xe2\xfe\xd9\x60\x7a\x9a\xb7\xba\xed\x30\x91\x9b\x70\x03\x36\x48\xba\xa7\x3c\xca\x80\x84\x2b\x8a\x30\x8a\x73\xcb\x38\x62\x3e\xe6\x68\xb3\xa7\xd8\xae\x4c\x84\x79\x9a\xf8\x37\x07\x53\x88\x12\x1c\xe6\x96\x9d\x49\xd7\x76\x5f\x4e\xed\x18\x5c\xce\x69\xbd\x2c\x6e\x0b\xae\x1f\x82\xcf\x5f\x55\x8e\x88\x85\x4c\x6e\x67\x80\xbb\x5f\xe5\x04\xb9\xef\x94\x76\x98\x10\xf7\x1f\x1b\xfc\x9f\x0b\x3a\x82\xcc\x0c\xad\x2e\xd1\x79\xe9\x63\x7b\x1c\x13\x4c\x39\x0d\xcc\x1c\xe0\xbb\xb1\xf0\x5f\xbe\x27\x78\x8f\x67\x5f\x29\x43\x39\x85\x4a\xdb\x69\x81\x05\x1e\x36\xa6\xc8\x41\xe6\x55\x09\xa1\xe0\x4c\xa2\x6d\x11\x3d\x9a\x58\x46\xd6\x10\xb0\xff\x45\x79\xb2\xaf\xb0\x61\x90\xab\x78\x78\x21\xd8\xef\x46\x17\x1a\xa6\xd1\xfb\x3a\x69\x9e\x47\x4b\x71\x0f\xa3\x62\xe4\xb9\x36\x16\x85\x3c\xd6\x8c\xad\x8a\x6c\xc9\x9d\xb3\x9f\xd9\xc5\x3e\xec\xb1\xab\xd5\xe7\x31\x22\xec\x1d\x5e\xf0\x12\xdd\x34\x3b\x0c\x4c\xdb\x70\xd6\xa4\x44\xd0\x2d\xb5\x62\xc0\xc0\x47\x21\x3b\xc3\x3a\xfb\x70\x13\x9b\x25\xfb\xde\x0a\xfe\xb2\xf4\xea\xc8\x32\x36\x58\x31\x62\x61\x1c\x6f\x74\x72\xf9\x8a\xc7\xc6\x75\xd7\xf9\x5f\xf4\x82\x0f\xeb\x7a\x97\x7c\x6d\x56\xf4\x06\xa8\x5b\x7d\x75\x74\xbe\xa9\x4e\x84\x26\x96\x4d\xc5\x5d\xe6\x35\xc6\x83\xeb\xa6\x9a\x69\x1a\x94\xb3\x4b\x6c\x37\xcb\xbe\x66\x9a\xee\x43\x03\x01\x00\xca\xe9\x3a\x0d\x2f\x77\x83\x66\xbe\xd9\x63\x38\x76\xfe\xab\xbd\x60\xed\x2e\x91\x2d\xb9\x8d\xb3\xe8\x9b\x74\x0d\x28\x6d\x7b\x29\x47\x60\xff\x63\x7c\x59\x14\x55\x78\xa3\xca\xd7\xe4\xe9\xa6\x4a\x30\x46\x55\xd2\x67\xaa\xbc\x21\xe4\x44\xa9\xf2\x4e\x96\x5e\xe8\x6d\x67\x18\xee\xab\xb0\x71\xdd\x10\xff\x6f\xbd\xe0\x0f\x75\x64\x6a\x3e\x08\x25\x51\x07\x83\x2a\x88\x75\x39\x8f\x4b\x51\x36\x08\x1f\xdd\xee\xc6\x45\xd4\x89\xad\x58\x53\xca\xc8\x37\xd6\x93\x30\xe9\xd1\x5a\xb3\x30\x0a\xd5\xc9\xcd\x72\x5c\x8a\xa4\xdb\x16\x19\x40\xf9\x38\xe3\x21\xdf\xd7\xa5\x1b\x9c\xcd\xed\x4f\xc6\x6f\xed\x65\x37\x6e\x9e\x45\x2a\xe2\xe6\xa0\xf4\xf2\xf7\xed\x0d\xbe\xeb\x0d\xb9\x39\x20\xc7\xdc\xde\xf5\x21\x4e\x69\x70\x82\x39\xe7\x67\xd3\x02\x92\x52\x35\xfc\xb7\x8e\x8b\xc3\x58\x67\x6c\x59\x10\x41\x8c\xa7\x89\x81\xce\x03\x00\xb9\x8e\x9b\xa4\x88\x13\x16\x68\x3d\x94\x02\x77\x49\xd4\xc3\x6e\x8e\x01\x52\xfa\x50\x42\x21\x1d\xb2\xeb\x08\x0b\x14\xd9\x45\xac\x5c\xd7\xde\xe0\x5a\x0e\x0a\xc8\x7b\xf7\x4e\x16\xfc\xce\xa9\xea\x5f\x4a\x44\xe3\xcf\xab\xd3\xcc\xfd\x9b\xf8\xb6\xb6\x2e\x2f\xe0\x94\xf3\x63\x97\x9a\x09\x5f\xe3\x28\x34\xe4\xcc\x25\xa1\xaa\x22\xac\x61\x9f\xfe\x67\x97\x29\xbf\xba\xf9\x91\x63\xde\xbf\x73\x72\x6b\x69\xf1\x83\x7b\x7c\x60\x62\xd0\xbd\xa3\x9b\x80\xb6\x1e\xaa\x1d\xcf\xd2\x3c\x27\xa1\x55\x46\x72\xf9\xf4\x48\xb0\xb4\xc1\x7d\xb3\x33\x8a\x24\xed\x2e\xb7\x9c\x41\x2e\x52\x1e\x8b\x82\xf7\xd2\xae\x8a\x02\xea\x71\x8d\x7a\x92\x6d\x31\xf5\xe7\x53\x15\xf6\x7a\x8f\xa4\xc4\xcb\xbd\xe0\x5e\x6f\x10\x72\xcb\xe3\x1e\xc2\x35\xe8\xd0\x5c\xc5\x94\xcd\xd1\x08\x9e\x3c\x08\x01\xa6\xaf\x5e\x83\x3d\x21\xc0\xef\xb1\xdc\x8d\x1a\x62\xca\x0a\xb6\xba\x36\xd1\x89\x12\xf4\xb5\xa3\xce\x76\xb3\x3f\x78\xf4\x06\xc0\x37\xf6\x7b\xbf\xbe\x97\x1d\x18\xe0\xe8\x78\xe2\xf1\x93\xb3\xf2\xe5\xbc\x10\x49\x71\x22\xca\x57\x1c\xbe\xac\x57\xed\x0d\x3e\x55\x71\x08\xb3\xcc\xc3\x5c\x3e\xed\x20\x49\x3f\x31\x4d\x97\x63\xc1\x8f\x93\x8b\xea\x64\xb2\x1c\x25\xa2\xc6\xd8\x34\x7f\xe2\xf1\x93\x7c\xf6\x04\xae\x5e\xe4\xb5\x20\x4b\x34\xf8\xb5\xc8\xcc\x6e\xf9\xa8\x28\xc5\x40\x7e\x00\xde\x09\xe3\x3c\xb5\xa2\xe7\x73\xd9\xcd\xb2\x4c\x62\x6a\x82\xd5\x78\x8f\x54\xa2\x42\xdc\x69\x65\xff\xc6\x02\xf2\x0c\xe8\xd3\x72\x4b\x87\x94\x8c\x25\xfa\x28\x42\x48\xcb\xb3\xe5\xd4\x5a\x26\x8f\x09\x29\x84\x75\x65\x70\x69\x12\x1e\x6d\x4b\x9d\x4b\x9e\x67\xf3\x1a\x15\x93\x2b\x8c\x25\x9e\xae\x25\x22\xcb\x5b\x51\x87\xe2\x9e\xc0\xe1\x23\xab\x31\x7f\xf2\x74\x94\x74\x2f\xf0\x4c\x80\xf7\x43\xae\xb1\x75\x6f\x57\xa7\x71\xb6\x3c\xa9\x3f\xb8\x87\xfd\xa2\xc7\xe8\x8e\x3c\xcc\xbc\xc1\x3b\x8f\x71\x8d\xb6\x0e\x3a\x7b\xc2\xed\xe2\xe3\x27\x6b\xfc\x3c\x59\xff\x9d\x05\x05\x9d\xa5\x9e\xd8\xa6\xf3\x8d\x80\xf0\xa7\xc8\x0b\x7e\xed\x72\x5d\x74\xf4\x38\xcb\x82\xed\x89\xf4\x09\x8f\xed\x91\x7d\x74\x2e\x89\x7b\xfe\x2f\x78\xc1\xcf\x78\x73\xf4\x8b\xa3\x83\x03\xe3\x5b\x71\x5b\x16\x5c\xdf\x54\x3c\x34\x51\xc2\x71\x8e\x9d\x91\xc3\x90\xd7\xf8\x09\xcb\x17\xd9\x94\x67\xe5\x07\xa3\xfe\xe3\x6c\xf7\x52\x9a\xc6\x22\x4c\xd8\xa7\x2a\x6c\x57\x33\x97\xe7\x6d\xff\x83\x95\xe0\x9d\x95\x53\x51\x2c\xf2\x5e\x5e\x88\xb6\x4e\xe3\x02\x6d\x0d\xc9\xc0\x40\xf7\x97\xe2\x6b\x2d\x4c\x20\x0c\x10\x66\x4f\x8d\x2f\x44\x9d\x63\xfc\x64\x92\x77\x33\x61\xce\x94\xcd\x52\x51\x51\x6e\x40\xb9\xd4\x06\xd2\x02\x9a\x9f\x8e\xd4\xf1\x81\x90\x17\x1e\xaf\xf1\x93\xe8\x53\xca\x8f\xf1\x40\x5c\x28\x8e\x04\x55\x1e\x5c\x68\xe6\xf2\x9f\xa4\x68\x4a\x3d\x77\xa6\xad\x9d\xb2\xa0\xca\x64\x9a\x37\x07\x5f\xe0\x51\x93\x77\x13\x13\x23\xf7\xa0\x4e\x82\xef\x55\xd8\x78\x27\xcc\x0a\xb4\x5f\x7d\x53\x73\xdf\xfc\x7e\x65\xa1\x85\xde\x2d\x34\xfd\xd0\x82\xdd\xa4\x2f\x67\x9a\xca\x59\x5d\x75\x72\x91\xa2\x5c\x3f\x24\x7b\x8f\x4a\x91\xcb\xc3\xee\xaf\x53\x3a\x7e\x83\x4b\x69\x3e\x95\x37\xc2\x83\x55\xf8\x8c\x4a\x3e\x2d\x9c\x3a\x85\x39\x0f\x0e\x06\x35\x3e\x8f\xb0\x9f\x71\xaf\x6a\xd7\xd1\x3c\x27\x8f\xc5\xaa\x40\x59\x95\xe0\x40\xc0\xf7\xa7\x19\x94\x2c\xc5\x49\x2c\xc2\x55\x9c\xe3\x9d\x4c\x8e\x67\xd1\x43\xb5\x64\xe2\x41\x9a\xbf\x0a\xa8\xfe\x2b\xbb\xd8\x0f\x0f\x06\x9e\x3d\x71\x76\x1e\xb3\x84\xfd\x4f\xee\x0a\x9e\x65\x5f\x70\xce\x21\x27\xce\xce\xdb\xd4\x64\x08\x30\x81\xd4\x0f\x61\x43\xd9\xec\x52\xe2\x4d\x58\x16\x09\x1d\x47\x41\x6b\x3e\x71\x76\x7e\x16\x33\x0d\x1d\x61\xf6\x85\x31\xf6\x7f\x7b\x6c\x2f\xec\x56\xa8\x73\xfb\xef\xf7\x82\xff\xcb\x33\x60\x2f\xf2\xa3\x20\xd8\x48\x81\x32\xa8\xb2\x68\x4d\x8a\x2c\xf2\x88\x4e\x07\x83\x7b\xe9\x40\xb3\x14\xe6\x94\xd7\x4b\xea\xfc\xd0\x3a\xf1\x13\xca\xc8\xd4\x70\x5e\x50\x25\x67\xa2\x9d\xae\x6e\xec\x65\xfe\x40\x85\xed\x26\x0e\x09\xff\x81\x4a\xf0\x67\xa5\x26\x48\x61\x1c\xaf\x1a\xa6\xc2\x52\xd5\x21\xf4\xdb\x8a\x7f\x86\xaa\x2b\x4a\x8a\x2d\x55\x5b\x21\x03\x97\xab\xcc\xe7\xe4\x97\xbb\x78\x40\x54\x05\x42\x14\x41\x94\x70\x95\xb9\x09\x2f\xa5\xab\x22\xcb\xa2\x86\xa0\x01\x44\x2f\x27\x24\x02\xa9\xe5\x08\xb5\xb2\x47\xd2\xf4\xc6\x0d\xec\x7a\x76\x78\x53\x92\x43\x7b\x6a\xe1\xb7\xd9\x6f\x7b\x6c\x0f\x3a\xbe\x45\xee\x7f\xca\x0b\x7e\xae\xd4\x71\x78\x8f\x37\xd2\x36\xa8\x8a\x72\x71\x49\x49\x88\x14\x15\x71\x9a\xae\x74\x3b\x5b\x99\x06\x54\x0c\xba\x9d\xb7\xd4\xa1\xce\x1b\xdb\x98\x08\xdf\x64\x03\x15\xa6\xf9\x7a\x18\x8b\x99\x73\x46\x0f\x72\x14\xa6\x0f\xb2\xe0\xc4\x86\x4f\xb8\x29\x85\x66\x95\x73\x7a\x8b\xc4\xd0\xba\xb7\x7b\x39\x2c\xc4\x5a\xd8\x5b\xf7\x76\xe1\x16\xb1\xee\x8d\xe7\x80\x3f\x38\x27\x9a\xce\xda\xfb\xf4\x38\x7b\x1a\xdb\x4b\x32\x64\x36\x4d\x63\xff\x74\x70\xab\x14\xc3\xaa\x48\x22\xb4\xe1\xf2\x5e\x1f\x69\x3d\xc9\xaf\x82\x9c\xe3\x38\x3e\x8e\xbd\xf9\x5e\x8f\x31\xac\x15\x68\x2a\x79\xd0\x5c\x70\xf2\x4f\x48\x70\x86\xb1\xd4\x0a\x7a\x1a\x75\x9c\xe6\x9a\xaa\x84\xda\x16\x29\xc4\xa8\xbf\x1a\x51\xee\x72\xb0\x3a\x75\x68\xb2\x47\x9a\x4a\x9e\x80\x3a\xfa\x73\xc1\xc9\x85\x92\xc5\x4e\x7d\x6c\xd6\x34\x08\x1f\xb6\xed\xfc\xca\x2b\xaf\xe8\x7f\xec\xef\x5c\xb0\x74\x9b\x38\x78\x66\x9f\x6a\xc2\xf7\x1b\x8d\x71\xa2\xc6\x2f\x4f\xf3\x71\xb4\x92\x45\xc6\xf2\x3c\x3e\x89\xe9\xf7\xfe\x5c\x70\xe2\x54\x1c\x2e\x1b\x66\xcd\xa9\x46\x94\x83\xc5\x6a\x7e\xfe\x34\xa7\xa3\x0c\x85\xca\x42\xff\x3d\x11\x67\x8b\x0e\x0e\xc4\xea\x3a\x5f\x78\x95\xa7\x67\x09\x90\xd8\x3e\xdf\x0b\xba\x33\x03\x8f\xb0\x8a\x2b\x0d\x23\xc7\xd4\xa8\xe8\x84\xd7\x85\x56\x54\x5f\x99\xcd\xd2\xd5\x88\xcc\x59\x69\x26\xaf\x25\xd6\x25\xad\xd5\xc9\xb1\x2e\xdf\xb3\x3b\xfc\x1c\xa3\xe9\xed\x9f\x0c\x6e\x2c\x0f\xa7\xaa\x07\xcd\x9d\x30\xb7\x87\x2f\x4a\xd4\x70\x3b\x05\xbe\xd5\xd3\xda\xdd\x6b\xbd\xe0\x45\x5e\x59\xbb\x33\x5a\x87\x66\xde\xed\x53\xdb\xb6\xae\xb3\xd5\x86\x69\x6b\x56\xf3\xe1\x8e\x5d\xc5\x13\x4c\x2d\x6e\xff\xa6\xa0\xba\xa0\xca\x57\x20\xeb\xa5\xb9\x2c\xcf\x77\x34\xba\xb5\x12\x72\x97\x11\x08\xfe\xc7\xbd\x0d\x08\xb3\xb4\xec\x52\x8f\xe3\xc1\x3d\x78\xb1\xa7\xaf\x18\x94\x01\x6d\x47\xc4\xc2\x61\x12\xa8\xba\x68\x73\x0c\x02\x28\xe4\x22\x21\x2e\x52\xeb\xc8\x0f\xea\x5c\x41\x21\x6d\x49\x5a\x58\xe9\x35\xa7\xd3\xe5\x28\x51\xfd\x08\x33\x57\x2e\x98\x30\x8a\x6b\xec\x7d\xa3\xac\x3a\x28\x0e\xaf\xd7\x11\x8d\xd3\x69\x3d\x8c\xcb\x46\x89\xe7\x8f\x06\x3f\x3e\xf4\xee\x56\x4d\x12\x08\x9a\x8f\x7e\xc3\x1e\x82\x69\x28\xac\x05\x65\x4e\x8c\x92\x1c\xf7\x52\x3a\x76\x6a\xdb\xf3\x46\x06\x8b\xaf\x56\xfe\x65\xa3\x60\xdc\x46\xc6\x98\x63\xc1\xe4\xb6\x50\x30\x86\xd0\x54\x6d\x0b\x04\xc3\x2e\xe3\x8f\x46\x1d\x54\xb6\xb0\x41\x9c\xdb\x99\x58\x8e\x00\xba\xc2\xb5\x51\x42\xf4\x95\x99\x42\x6f\x1e\x0d\x1e\x57\xbe\x48\x76\xca\x12\x40\x31\x3d\x55\x8b\xc5\x72\x58\xef\x91\x45\x69\xdd\x33\x60\x34\x83\x26\xc1\x27\x46\xd8\x13\xa8\x8d\x37\x06\x8f\x5d\x94\x7f\x2c\x0e\x72\x33\x51\x54\x58\x8d\xab\xc0\x6d\x57\x3e\x5a\x88\x37\xb7\x05\xd7\x2f\xea\x5f\x8b\x43\x11\x6f\x36\x2c\xb0\xc9\x46\xa5\x2a\xe4\x3f\x33\xb8\x73\x51\xfe\xb1\x58\x76\x9a\x9e\x9f\x3b\x8d\x94\xde\x98\xe4\xa5\x34\xa6\x1c\xf3\x11\x28\x93\x0b\x4d\x9c\x20\x28\x00\xef\x06\x3f\x68\x7f\xe7\x93\x1e\x1b\x95\x42\xd4\xff\x90\x66\x0b\x7b\x9b\x37\xd3\x2c\xcf\x5c\x34\xa6\x24\x76\xbd\x89\x8e\x97\x22\xd9\xd6\xc4\x52\x2b\x4d\x57\x8c\x44\x2d\x52\x7e\xe4\xc8\x61\x58\x37\x4b\x61\x7d\x65\x2d\xcc\x1a\xc8\x27\x57\x44\x4b\x51\x1c\x15\xbd\x1a\x5f\x94\xa5\x2e\x5a\x3b\x56\x48\xe8\xd9\xf0\x35\x62\x63\xdd\x7f\x70\xf2\xe8\xf5\xd7\x1f\xbe\xbe\x8a\xdc\x1a\x79\xb4\x2a\x26\x4a\x9c\xb7\x23\xec\x31\x83\x80\x27\x45\x27\x4e\x7b\x6d\x91\x14\x0a\x38\xc5\xff\xf3\x4a\x70\x7b\xff\xe5\x72\xb2\x7e\x0a\xce\x7d\x39\x50\x60\x03\x03\x96\xe4\xb4\x41\x24\x14\x89\x80\x74\xe0\x52\x94\xfc\x2b\x2a\xac\x45\x31\x02\x8b\xc1\xbc\x8a\x11\x68\xe8\x4f\x81\x67\x02\x6c\x00\x73\x02\x15\xaf\x00\xf0\x7d\xe6\x52\xf0\xa5\x9d\x07\x67\x85\xbb\x1d\x39\xb7\x9c\x21\xfb\x59\x8f\x5d\x9d\xd9\x77\xfd\x57\x7a\xec\xc6\x4d\x81\x38\x9d\x02\x4d\x2f\x04\xe7\xe9\x06\xb9\x4c\x68\xef\xc6\x93\x67\x5e\xe3\xb3\xa8\x06\x13\x50\x4a\x93\xf7\xf7\x1f\x34\xf7\xf1\xa5\x0a\xb3\x57\x7b\x36\x9f\xfa\x06\xae\x8d\xd9\xb0\xa8\xb7\x7c\x11\xfc\x18\xfc\x01\x09\xbe\xb4\x13\xe9\x80\x4c\x30\x39\xca\x5d\x8e\x56\x24\x64\x9a\x90\x96\x50\x4a\xef\x9c\x9d\x5e\x38\x7e\xbb\xf1\x54\xa4\x0d\xe7\x04\xcc\xfe\xbb\xcf\xfe\xfd\x80\xfd\xeb\x6c\xda\x10\xe8\x4c\xf0\x7f\xc9\x0f\x1e\x6f\x7e\x02\xbb\xc8\x40\x67\xc8\x20\x84\xb3\x24\x6d\x08\x77\x62\x7c\xe3\x51\xec\x6e\xb6\x2b\x6a\x87\xcb\x22\xf7\x97\x83\xc7\x9c\xa6\xa3\x96\xc5\x19\x08\xf7\x70\x6d\xc1\xae\xd1\x10\xf6\x69\xe7\x30\x3b\xc8\xa6\x36\xd5\x18\x74\xd2\xcc\x8c\x2c\x8d\xfd\xbc\xc7\xf6\xc8\x82\x66\x92\x66\xea\xff\x8c\xb7\x85\x02\xa0\xc5\xa0\x2e\xc9\x77\x82\x74\x9e\xc8\x58\x1a\xf9\x54\xb7\x1b\x61\x52\x37\xa6\x73\xcb\x59\x60\x1b\x34\xa1\xcd\xdb\xb5\xa4\xc8\x97\xf0\xbf\x53\xd7\xca\xb7\x58\x83\x5d\x45\x56\x95\x99\xe4\x7c\x2e\xfc\x85\xe0\x46\xd5\x55\x61\x51\xa8\xd0\x5b\x7a\x44\xca\xb7\xae\x54\xf2\xc9\x42\x3c\xa1\xf4\x3f\xea\xff\xa1\x67\xc5\xb7\x57\x18\xd3\xc1\x3a\xb9\xff\xba\x4a\xf0\x3c\x4f\x87\x71\xe9\xd8\x7e\x15\x3e\xaf\x46\x38\x5d\x02\xfb\x44\x03\x8a\x37\xc1\x3e\x83\xfd\xb0\x5b\x6d\xb5\x2e\xc6\xae\xee\x21\x76\x80\xd5\xb6\x34\x56\xba\xd6\x17\xbd\x7d\xec\x87\x1c\x6f\x55\x47\x2e\x23\x4a\x6e\x5f\x11\x3d\x7f\xb7\x3f\x26\xd7\x0a\x93\x4f\x3e\x7a\xc0\x93\x0a\x3b\xca\xdf\xe3\xef\x82\xb7\x18\x2b\xd8\x23\x10\xf4\xf7\xa4\x0a\xa7\xf6\xc3\x0d\x82\x0b\xed\x8a\x9d\x70\xdf\x0b\x26\xf4\x9f\x06\x4a\x38\xe7\x59\x37\x49\x80\x05\x14\x77\x14\xf9\x62\x8d\xfd\x99\xc7\xf6\xd4\xc3\x4e\x58\x8f\x8a\x9e\xff\xfb\x5e\xf0\x42\xef\x38\xfd\x2a\xc7\xc6\x14\x69\x11\xda\x71\x30\x66\xf1\x5d\xaa\x49\xcf\x9c\xf0\x27\x95\x75\x4f\x55\xe5\xca\x26\xb2\xbc\xd2\x63\xbb\x50\xc2\xfa\xff\xc9\x63\x07\xb7\x3a\xd8\xcd\x68\x99\x9c\x9e\x77\xf6\x45\xb3\x49\x69\x5d\xa6\x78\x82\xa9\xba\x1a\x85\x68\xa7\xed\x25\x61\x3b\xaa\x83\x98\x8c\x31\x5e\x4d\xbe\x43\xe9\x2b\x35\xf6\x39\x8f\x8d\x75\x5a\x61\x2e\xfc\x5f\xf1\x82\x0f\x78\xf2\x93\xb3\xf2\xa7\xd2\x5c\x32\x51\x47\x5e\x73\xbd\x12\xe2\xa8\x29\xea\xbd\x7a\x2c\x38\xbc\xa7\x2a\x73\xd9\xb2\x00\x4b\x5b\x30\x50\x1f\x39\x4f\x04\x60\x16\xa4\x9d\x6e\x1c\x16\x0a\xc1\x2b\x41\xfe\xa8\x86\x9c\x16\x60\x3e\x72\x36\xc8\xfb\x3c\xf6\x08\x1a\xc5\x69\x10\x1e\xa2\xe1\x77\x82\x9a\x12\x28\x4a\x8a\xa0\xc5\x2d\x13\x24\x61\xdc\xce\xab\x6d\x5f\x08\xab\x6f\xa1\xf1\x80\xdd\x3f\xca\xc6\xb5\xe9\xd4\x7f\xfd\x68\xf0\x82\x11\x2d\xd2\xd4\x65\x6e\xd2\x0a\xec\x6f\xf3\x3b\xbb\x22\x8b\x94\xbd\xac\x1e\xa7\xdd\x86\xda\x14\xb3\xaa\xdc\x84\x35\x4b\xee\x65\xf5\xb6\xa9\xc6\xd9\xb4\x10\xc7\x4a\x38\x8b\x0d\x51\x8f\x81\xfc\x37\xcc\xd1\x5e\x2a\xbf\x57\xe5\x4b\xb4\xff\xc1\x25\x05\xdb\x02\x67\x9d\x6e\xb3\x19\xd5\x23\x9c\x29\xb8\x57\x28\x04\x82\x7a\x98\x70\x0c\xf2\x81\x7c\xe9\x7a\x9a\x65\x5d\x84\xc4\x00\x40\xc6\x08\x14\x1e\xb4\xc9\x4a\x4d\x29\x8e\xad\x40\x1a\xc5\x8f\x2a\xdf\x0e\x79\xb3\x1b\xc7\x93\xa4\x9e\x81\x2f\x0f\x64\x58\x8d\xcf\x0b\xa1\x9c\xb6\x1d\x85\xcf\x39\x75\xc3\x4d\x87\x6f\x3a\x88\xd6\x91\x44\x25\x03\x39\xe3\x7a\x80\xd5\x58\x75\x4b\x4b\x70\x1a\xbb\x6a\xeb\x32\x74\xeb\x72\x99\xfd\x9a\xc7\xf6\x12\x31\x9c\xec\x62\xff\x83\x5e\xb0\x3c\x6d\x7e\x97\x85\xdf\x00\xb1\x67\x4d\x65\x4d\x9f\x2c\x9b\x9d\xcb\xe9\xd8\x05\xe7\xa6\xe3\xbe\x53\x72\xb5\x76\x65\x65\xdb\x47\x46\x58\x60\xf5\x5f\xb6\x14\xd6\x61\x5d\x2c\x2f\x67\x62\x19\xf4\x27\x88\x75\x7d\xf5\x48\x70\x57\xe9\x5a\xbf\x0a\x4e\x47\x7e\xc2\xc5\x9c\x4b\x63\x42\xca\xa2\xf7\xe4\x94\xa7\x15\x63\x3d\xe2\xa8\x5e\xef\xa9\xb0\x07\x2a\xec\x5f\xd7\xcd\x6d\x95\x25\x98\xfb\xaf\xa8\x04\x1f\xf3\x8e\x0f\xb8\xa3\xcf\x95\xca\x26\x6e\x52\xcd\xdc\x73\x96\x4a\xed\x6a\xca\x73\xb5\x53\x4b\x29\x9f\x50\xc7\xd7\xf1\xab\x39\x18\x07\xe0\x2c\xdf\x74\x92\xfa\x72\x4c\x0d\x83\x33\x56\x52\x6e\xcd\xbe\x9c\x77\x44\x46\x47\x65\xcb\xde\xde\x68\xc8\xa3\xa2\x99\xc5\x27\xd8\x6d\xec\x09\x97\x82\x07\x1f\x2e\x89\x58\xb5\x9c\x7d\xb9\xe2\x0c\xde\x10\x5b\x94\xff\xe1\x4a\x10\x97\xae\xb9\xb6\x72\xbc\xc9\xf5\xdd\x1a\x9f\x29\x78\x2b\x1c\x66\xe2\xc9\x44\x91\x45\x62\x55\x9b\xb1\xe8\xe8\xaa\x0f\xcb\xce\x90\x7e\xcc\x63\x77\xd1\x51\xfd\x6c\x30\xad\xcc\x11\x04\x33\x44\x9c\x89\xa1\x75\xd0\x86\xf2\x55\x2d\x43\xf5\x09\x3d\x87\xed\x3d\x63\xc9\x3e\xc0\x9f\x0f\x6e\x37\x80\xb5\x0e\xc1\x21\x5c\xa1\x2f\xe1\x84\xb0\x4c\x70\x70\x36\x31\x80\xb3\xb2\x56\xce\x37\x7e\x8e\xb1\x99\x2d\x9b\x41\xce\x74\x0b\xb0\x62\x3e\x19\x0f\xd6\x0e\xac\x30\xd0\x66\x7c\x71\x3c\xb8\x63\xb3\x87\xdc\xc0\xde\x8d\x9e\x1e\x4c\xaa\xf1\xf5\x1d\x52\x8d\xcb\x0e\x9c\x7c\xad\xe6\x69\x7e\xa9\x67\x0e\x80\x1b\x8e\x85\xbd\xba\x9f\xcc\xce\xb3\xf9\xe1\x87\xfb\x4b\x9e\x43\x3b\x01\x9d\x97\x11\xd0\xf9\x79\x3b\x4d\xee\xd3\x97\x49\xc7\xf1\x7c\xef\x61\xe2\xe3\xb0\x1a\x77\xd1\x7b\xd1\x16\x90\xd1\x1b\xfe\xd2\x86\xfc\x40\x03\x27\x63\x29\x20\x72\x33\x91\xc5\xd8\x1f\x8e\xb0\xe3\xd4\x83\xb2\x3a\x93\x6a\xcf\x4f\x5d\x94\xcb\xd2\x8c\xaf\x4d\xcf\xce\x90\xfd\x97\x2c\x39\xaf\x18\x09\xa6\xcb\x17\x8d\xc7\xa1\x21\xb2\x68\x15\xfc\x53\x65\xeb\x0e\xc1\xa5\x63\x50\x82\x6b\x2a\xae\xb0\x8f\xba\x26\x84\xf7\x54\x82\x1f\x39\xae\x2c\x41\x64\x17\x35\x09\x60\x9d\x68\x5e\x9b\x5c\xcd\x8a\xfe\x31\x76\x17\x5b\x18\x38\x61\x2e\xad\xb9\x8e\x2d\xe0\x07\x9d\x31\x94\xf3\x69\xb2\x1d\x76\xa4\xc2\x99\xfb\xe3\xfe\xee\x49\x4e\xb6\x00\xce\xfe\x4d\xff\x93\x60\xc0\xdc\xe5\x8f\xb6\xc3\xce\x36\x14\xd8\x6d\x18\x16\xbe\xf1\x68\xc7\x54\x6b\xc5\x29\xcc\x8b\x7a\x37\x8b\x8a\xde\xf1\x34\x29\xc4\x85\xc2\xff\xe4\xa3\x83\xef\x78\xfd\xd7\x49\x3f\xeb\xa4\x8d\x49\xa0\x58\x91\x5b\x2f\xdc\x2f\xb3\x02\xcb\xc5\x91\x26\x96\x9d\x8d\xbc\xbb\x52\x38\xa5\x0a\xb4\x19\xb3\x3f\x20\x6a\x91\x64\x97\xd4\x3f\x4c\x78\x63\xee\x7e\xbc\xc6\x09\x84\x9d\x10\x0d\x6d\x3b\x5e\xf9\x59\x5e\x84\x2b\x42\x96\x5a\x17\x0d\x44\xa2\x93\xc7\xd8\x66\xe9\xf5\xfe\x06\xba\xe6\xc3\x8f\xfc\x1f\xec\x37\x46\xd8\xee\x66\x8e\xfe\xa9\x8f\x8d\x80\x8d\xfe\xe8\x91\xe0\xfe\x91\x69\x9d\x46\x91\x77\x3b\x0a\xe1\x23\x26\x6f\x95\x0a\x26\xa1\x84\x24\xc8\xc6\x30\xd8\x1c\x98\xba\x01\x10\xd1\xd0\x17\x2a\xec\x0b\x19\x2a\x21\xd1\x5e\x59\x55\x63\xc4\xa7\x27\x64\x0b\x40\xbf\xd1\x11\x95\x29\x21\x91\xa8\xd7\x21\xc8\x4d\xde\xd6\xfe\xd8\x4e\xda\x38\xc6\xd8\x41\x0c\x13\x4d\xd7\xc0\xe4\xf3\xc4\x99\x13\x5a\x95\x95\xcf\x9c\x9a\x47\x1f\xd6\x21\x42\xdf\x14\xc5\x72\xd4\xe0\x4b\x78\x26\xcc\x45\xc1\xf7\x27\x62\x0d\x5d\xbf\xe5\x98\x05\xfa\xb0\x2a\x4d\x7f\x9a\x8a\x9c\xe0\x87\xb1\x4c\xa3\x45\xcb\x72\x71\xd0\xcf\xcd\xed\xa3\x90\x86\x6c\x6d\x32\x5b\x9b\x9c\x9c\x9c\x64\x6c\xa6\x89\x00\xde\x55\xa7\xfd\x50\x3e\xa0\x5e\xa7\x0d\x65\xf5\x34\xbd\x00\x39\x72\x96\x9a\x4e\x28\x7a\x58\x37\xd7\x51\xf1\x5f\x47\xd8\xbf\xa2\xa1\x3c\x0e\x1d\x8a\x61\x30\xfe\xeb\x47\x82\x97\x8c\x0c\xb8\xa1\x95\x2b\x9b\x4d\x04\x86\x02\x6c\x67\x43\xaa\x50\x0a\x8c\xa4\x08\x5e\x8a\xd9\x27\x94\x22\xf2\x96\xce\xa6\x2e\xbb\x02\xc6\x25\x25\x71\x0f\xe6\x4e\x4f\x0e\xa9\x33\x37\x50\xe3\x55\xb1\xb5\x54\x63\x05\x02\xa8\xaa\xb3\xbf\xd4\x23\x13\x70\x02\x80\xa2\x81\x92\x3f\x49\xb9\x68\x36\xa5\xc6\x90\x26\x5c\x74\x5a\xa2\x2d\x32\xa9\xc5\xd9\x1f\xca\xbb\xf5\x16\x0f\xf3\x63\xa4\x59\x57\xc9\x64\x05\xc8\xa1\xb2\x78\x08\xe0\x6b\x44\x99\xa2\x01\xa5\x15\x25\x07\x36\x38\x97\xcc\xa5\x69\x71\x26\xca\xe1\x68\x85\x2c\x10\xc1\x34\xe4\x13\x05\x04\x2c\xe9\x38\x66\xd5\x3d\x38\x4a\xe4\x25\x53\xd2\x27\x2b\x6c\x3c\xeb\x26\xd3\xf9\xf9\x5c\x64\xfe\x2f\x56\xd4\xfa\x7b\x13\x04\x50\x9e\x9f\x39\x01\x67\x8c\x2e\xe1\x8d\x24\x45\xd6\x43\xbd\xc6\x58\xe7\x48\xfc\x74\xb2\xb4\x0e\xf9\xd3\xf6\x39\x1c\xfc\xf4\xc6\x29\x1c\x25\xe8\x0d\xe0\x06\x59\xae\x1c\x2b\x1a\xf6\x74\x90\x75\x8e\xa7\xa5\xb2\xfc\x80\x44\x37\xba\xb7\x94\x16\xad\xf2\x03\xd0\x1f\xfd\x72\x87\x82\x2b\x41\xe9\x73\x6a\x54\x7e\x5d\xca\xb5\xdc\x16\x6c\x4d\x05\x9a\x63\x64\xa1\x33\xeb\x5f\x38\xc2\xae\xc9\x05\x44\x5b\x2b\x66\x83\xbf\xa9\x6c\xc1\x9c\x46\x11\xda\xf4\x4e\xf0\x09\xe8\x71\x15\xb6\x5d\x57\xb5\x49\x29\x04\x2d\x8e\xf0\x3c\xee\x4a\x3a\x18\x6f\xab\x07\xab\xa5\x51\xc9\x10\xf7\x5e\xc1\x08\x93\xd1\x21\xe4\x59\x98\x34\xd2\x76\xdf\xd7\x00\x6c\x2c\xac\xb7\xec\x08\xf8\x7f\x2e\x63\xc2\x3e\x57\x61\xd7\xac\x45\x49\x23\x5d\xcb\xd5\x38\x7c\xa4\xc2\x6e\xd9\x74\x1c\x9e\x8c\xef\x94\xbe\xa9\x86\xe5\xef\x3c\x39\x2c\xf4\x8c\x06\x07\xd4\xbb\xed\x76\x87\x26\xd5\x11\x92\x74\x98\xd7\x2f\xec\xcb\xfb\x9a\x6d\xdb\x62\x6a\x0f\x71\x17\xd7\xd8\xcf\x56\x18\x03\xd1\x80\x7b\xf3\x6b\xb4\x6c\xf8\x27\xe8\x92\x27\x5e\x82\x6c\x38\x0f\x26\x61\x9a\x93\x3a\xba\x9a\x36\xa5\x7f\x9e\xab\xff\x23\x23\xec\x2a\xe8\xa5\xb3\x29\xc8\x65\xff\x81\x91\x60\x7d\x64\xc6\x02\x9b\xa7\x98\x7c\xd3\x1d\x60\x41\x91\xfd\x16\xe6\x60\xdf\x4c\x26\xb3\x34\xc5\xc4\x54\x0c\x5a\xca\x14\x4c\xb4\xb3\x43\x43\xe8\x80\xb2\xba\xa1\x1c\x0d\x0b\xdd\x9b\x10\xa1\x67\xb2\x00\xa2\xc2\x90\x59\xd0\xa7\xa4\x30\x3f\xc0\xf7\xcb\x6f\x21\x96\x61\x33\x8c\x00\x15\x2b\x2f\xc2\xac\x5c\xc7\xa8\xa9\x8a\x50\xf3\x58\x14\x3c\xcd\x30\x9c\xaf\x2a\xf7\x38\xd8\xc2\xa8\x4e\x3a\x88\x6a\x49\xa8\x84\xd5\xef\x53\x69\xee\x06\x3b\xfe\xb8\x94\xdc\xf5\x7a\xda\xee\xcc\x66\xa9\x54\xc1\xfc\x78\x2b\x82\xdb\x79\x25\xb8\x09\xd5\x3a\xb8\xa6\xd7\x37\x6e\x7e\x4a\x4f\x74\x55\x53\x30\x15\x48\xed\x94\x7d\xdd\x63\xbe\xad\xdd\x52\x4a\xf8\x17\xbc\xe0\xdd\x56\xd8\xb2\x4a\xe4\x36\x92\xa6\xd0\xc0\x59\xb4\xb4\x60\x8c\xa3\xa4\x24\xbc\xab\xfd\xa1\xf4\xc2\x11\x38\x9d\x2c\x6a\x87\x59\x4f\xae\x65\x1c\x09\x47\x5e\x25\xa9\xfa\xb4\x63\x9a\x45\xa8\x8c\x9e\xb3\x20\xcc\xe1\xef\x07\xd8\xbf\x22\x39\xe1\x2c\x93\xff\xe6\xb1\xdd\x79\x2f\xaf\x17\x71\xee\xff\xaa\x17\xbc\xc4\x9b\xc7\x1f\x70\xde\xb1\x4c\x78\xda\x46\xd9\xe0\xf4\x38\x5a\xa2\x55\x78\x1a\x28\xf5\xb3\x3a\x5a\xa5\x9b\x98\x18\x49\xf5\xfc\xfe\x72\xa7\xab\x35\x32\xc1\xdb\xd1\x72\xab\xd0\xf3\x3e\x0e\xbb\x49\xbd\xe5\xd4\xfe\xb1\x6c\x82\xed\xdb\x7c\x02\xc0\x97\xd8\x9b\x46\xd8\x8f\x0c\xb5\x28\xdf\x21\x34\x68\x9f\xff\xed\x4a\x50\xed\xbb\x4a\x36\xf2\x9c\x40\xe3\xc1\xd7\x81\x0f\x11\xa4\x9e\x73\x50\xfa\xaf\x15\xf6\x02\x8f\x4c\xc3\xff\x31\x48\x07\x52\xbc\x6f\xd7\x55\x26\x4f\x6c\xab\x91\x58\x9b\x22\x40\x99\x49\xd9\xa5\x93\xf8\xc5\x7c\x0a\x46\x62\xaa\x3f\xf1\xef\x1c\xdb\xa3\x62\xb4\xfc\xe3\xc1\xf5\xf3\x94\xdb\x62\xc7\xed\x92\x89\x1c\x58\x07\x10\x39\x5f\x19\x8d\x09\xb4\xcc\x59\x84\x77\x21\x42\xe0\xb9\xe0\x36\xb9\x90\xa8\x2f\x2c\xab\xb3\x06\x0d\x05\xf3\x9a\x54\x48\x74\xc0\xec\xff\xcf\xde\x9f\x80\xd9\x71\x95\x77\xc2\xf8\x53\xb7\xb5\x1e\xd9\x2c\x35\x84\xc9\x3f\xf9\xcf\xcc\x49\x79\x88\x24\x4f\x77\xb5\x24\xef\xb2\xb1\xd3\xea\x6e\xd9\x8d\xa5\x56\xd3\xdd\xb2\x43\x1c\x06\x55\xdf\x7b\xba\xbb\xac\xba\x55\xd7\xb5\x74\xeb\x9a\x90\x00\x36\x4b\xc2\x4e\x58\x95\x10\x08\x81\x4c\x86\x21\x90\x61\x86\x84\x75\x60\x42\x18\x08\x84\x30\x84\x6f\x12\xf8\x12\xb2\xf3\x65\x27\x1b\x49\xc0\xf9\x92\xef\x7b\xce\xfb\xbe\x67\xab\x5b\xb7\x17\xb5\x6c\xf3\xcd\x23\x9e\x07\xab\x6f\x2d\xe7\x9c\x3a\xeb\xbb\xfc\xde\xdf\x8b\x68\x2a\x7a\xee\xbc\x70\x81\xa9\xaf\x69\x35\x52\xc4\x2e\x08\x10\xdb\x27\x96\x61\x88\xfb\x14\xa8\xf2\x87\x5e\x70\x57\xe3\x9d\x0d\x93\xa2\x16\xe8\x41\x41\xb5\x24\xa2\xf7\x5c\x0d\xf7\x65\x1e\x7b\xa9\xc7\xf6\xa1\x0d\x70\x66\xce\xff\x81\xad\xa0\x5d\xe8\x61\x6c\x41\x30\xa3\x5e\x76\xb9\x1f\x06\x5b\x82\xef\xf1\x99\x39\x52\x59\x06\x5a\xc6\xde\xb6\x8b\x1d\xb6\x2a\x54\xd0\xf3\xb5\xa3\x21\x3a\x75\x67\xb3\x8e\xd0\x94\x20\xfe\xdf\x8c\x04\xcf\x68\xb8\xae\x08\x25\x1d\xa6\x10\x8e\x0c\xaa\x35\xb7\x9c\xe5\x8e\x76\xbb\xe5\x8d\x23\xec\x75\x23\x6c\x37\x10\xb3\xfb\x3f\x32\xa2\xa0\x79\x5f\x6f\x9d\x26\x9e\x55\x42\xc7\x65\xcb\x3a\xcb\x02\x79\xb5\x31\xce\x51\xab\xe0\x93\x0b\x33\xbc\x93\xc7\x6b\x42\x1d\xd3\x56\xde\x3f\xc8\xa8\x85\x0e\xe7\x09\x27\xf6\x2b\x2e\xf0\xb0\xd1\x8e\x71\xb0\xad\x53\x38\xa6\x7e\x4b\x3e\xd6\xce\x40\x93\x34\x41\x6e\x54\x70\x1b\x0e\xc1\x92\x97\xeb\x31\xa6\xbc\x23\x58\xae\xc3\x94\x82\xb8\x75\xa7\xfd\xba\xfe\x62\x15\x7d\xcf\xdd\x2c\x5d\x31\xbc\x1d\x80\xc3\xcb\xac\xe8\x52\x6c\xbd\x42\x32\x1b\xd0\xc0\x00\xf6\x96\xde\xd1\xbb\xa1\xe9\x3f\xd5\x71\x36\x02\x0b\xdd\x4a\xc0\x35\x08\x0a\xa1\xb5\x53\xff\xd1\x6e\x87\x89\xb4\x29\x48\x5b\xcd\x84\xd3\x40\x88\x4f\x21\x27\xef\xdd\x1d\x7c\x66\xa4\xe9\x8e\x15\x9f\x4e\xbe\x4f\xe4\xd9\x87\x7e\xd6\xd3\x07\xc9\xf5\x39\x70\x13\xc9\x67\x0c\xe4\x6d\x54\x0a\x2f\xce\x21\xaf\x2d\xe8\x72\xd8\x70\xde\x8d\x2a\x0f\x2b\x58\x01\xe4\x29\x48\x91\x64\x0e\x94\x0d\xea\x2d\xa3\x7c\x45\x94\xfc\x90\x08\x57\x42\x3e\x39\x77\x56\xee\x53\x44\xf1\x1c\x72\x18\x49\xd2\xb6\xf5\xa9\xb7\x26\x72\x98\x72\x65\xb6\x82\x9b\x9c\x63\x73\x00\xd0\x67\x6e\x0e\x66\x2c\x3f\xe4\x7c\x41\x4a\x48\x2a\x69\x80\x54\xdd\x97\xaa\x38\x01\x49\xa7\xfe\x7d\x69\x07\x4d\x07\xda\xe2\x85\x1d\xae\xa5\x09\x38\xbd\xc9\x20\x95\x15\xb6\x17\xbc\xcc\x78\x9a\xe5\xdd\x28\x91\x72\xd7\x98\xfc\x66\x55\x61\x55\x28\x97\x46\x20\x67\x55\xa0\xc2\x59\x38\x87\x60\x90\x2c\x15\x3c\xc0\xa6\x06\x14\x6d\xa0\x61\xaa\x05\x9c\x41\x70\xd8\x5c\xf4\xf6\xe0\x43\xce\xea\xfd\x84\x67\xc3\xa6\xd3\x06\xd8\xb4\x03\x25\x8f\x53\x0e\x03\x56\x67\x90\x7a\x1e\xa3\xc2\xfd\x82\xdd\xb2\x2d\x16\x73\x9c\x5f\x8b\xd8\xfc\x1b\x69\x44\x0b\x87\xc5\x9a\x2e\xa2\x8c\xa8\xc4\x07\x8c\x54\xc3\x2e\x62\x9f\x76\x99\xd1\x96\x00\x69\xa1\x5c\x4b\x93\x79\x96\x3e\x23\x5b\x22\x53\xfb\x5b\x76\x05\xb7\x38\x57\x06\x8e\x85\x3a\xa7\x5c\xc4\xdb\x79\x96\xf2\xfb\xb3\x25\x77\xe3\xfb\xe2\x08\xfb\x51\xa2\x3d\x5b\xa8\x80\x30\x61\xb9\x4a\x80\xf6\xec\x79\x97\x4e\x7b\x76\x62\xc6\x32\xf2\x03\xe4\x64\x9d\x22\xd0\x5d\x2e\xb4\xfb\xb3\x25\x29\xb7\x53\xad\x49\x9f\x2b\xba\xab\x4e\xc8\x7e\xd2\x63\x7b\xa2\x76\x19\xaf\x09\xff\x0d\x5e\x30\x66\xc4\x4f\x50\xea\x28\x05\x28\x7d\x66\xd2\xd7\x08\xb7\xfb\xb3\x25\x97\x8c\xe9\x7a\x76\x6c\x0b\x21\x20\xb5\x40\x89\x47\xbc\x6b\x86\xda\xeb\xf7\xfb\x7b\xa3\x32\xeb\xc6\x6d\xc6\xde\xe0\xb1\x27\x41\xcf\xe1\x01\x23\xa0\xdf\x5e\xe0\x5d\x7a\xc7\x9d\xdc\x46\xc7\xc9\x3b\x4e\xe7\xd1\x31\x27\x3b\xef\x55\xfb\xec\x5c\x53\xfa\x2b\x4f\x26\xe2\xc2\x90\x08\xbd\xaf\xec\x0d\x1e\xf2\x86\xdf\x77\x31\x07\x10\x60\x18\xb7\xed\x38\x3d\x3a\x4e\xdc\x64\x80\x0a\x5b\x8c\x5e\xea\x71\x7d\xbc\xe1\x5e\x00\x20\x21\xd1\x26\xe1\xa0\x97\x54\x2b\x71\x1a\x5e\xf4\xf6\xe0\xf1\xe9\xcc\xd2\x8f\xed\x61\x67\x18\xdd\xf0\xa7\x83\x9b\xa7\xf0\x84\x6d\x58\xe4\xea\xec\x45\x7d\x07\xd7\x99\x0e\xa1\x73\x56\xfb\x2f\x99\x88\xa8\x0f\x4a\x05\xe7\x71\x8a\x88\xb2\x93\x46\x5a\xc5\x77\x44\x4f\xa4\x78\xf8\xca\x51\xb9\x87\x82\xcd\x20\xb7\x51\x2d\x52\x4c\xc7\xe5\x4e\x05\x07\xcf\x90\x28\x7c\x9c\x4f\x5f\x28\xf3\x08\xdc\x32\x10\x98\x44\x3b\x77\x0c\xd6\xf2\xf0\x5d\x35\xa4\xae\x8d\x2a\x7a\xa1\x4d\x64\xb0\x16\xac\x9a\x22\x1f\xc3\xb0\xbf\x5f\x6d\xd9\x51\x5c\x1f\x6f\x5d\x42\x14\xd7\xcb\x5a\xa6\xe5\x26\x9e\x4b\x65\x10\x56\x21\x2c\x96\x60\x4f\x7e\x75\x12\x6a\x61\xf8\x9a\x02\xb9\x80\xfb\x2f\x2a\x34\x3c\x02\x27\x2e\x0d\x4d\xe1\xa6\x1b\x46\xde\xbc\x18\x92\x2b\xb9\xb5\xc4\x76\xde\x1e\x14\xa8\x86\x34\xa4\xe0\x5d\x24\xa7\x06\xaa\x0e\xa1\x4d\xf4\x51\x92\xd0\xdf\x94\xae\x29\x2a\x0a\x73\xea\xd7\x5a\xc5\x1e\xde\xc7\xbe\xb3\x01\x2b\x36\x9f\x25\x02\xd0\x2d\x7f\xb8\x37\xf8\x2e\xf5\xa3\x89\x1a\x1a\xb0\x56\x8d\x98\x95\x8f\xef\x61\xcb\x96\x77\xfe\xfb\x76\xe6\x9c\xff\xce\x0d\x08\x6c\xaf\x60\x63\x76\x8a\x8d\x79\x96\x82\xc6\xcc\x05\xdf\x3e\x23\xff\x70\x01\x4b\x30\xc8\xf6\xe1\x79\x98\x1d\x64\x4f\x1b\xba\xf2\xec\x49\x74\x05\xde\x72\xe9\xf0\x96\x47\xbc\x95\xcd\xb1\x20\x53\xfe\x09\xcd\x80\x05\x1d\xbf\x21\x0d\x96\x5a\xca\x75\xf6\xdd\x4d\x29\xaf\x8e\x7e\xcb\x53\x5e\xfd\x7f\x83\x57\xea\x5b\x98\x98\xeb\xa1\xbd\x8d\x44\x29\x2a\x70\x64\x2e\xcb\x4b\xff\x77\xf6\x04\x37\xd8\x17\x70\x2d\x97\x55\x2f\x21\xd1\xce\x00\x88\x23\x2e\xa5\xb9\x04\x63\x16\xe5\xd8\xc9\x7f\x9d\x41\x7b\xed\x1e\xb6\xc4\xf6\xf5\xf2\xac\xcc\xda\x59\xe2\xdf\x13\xcc\x48\xa1\x67\x66\x8e\xab\x4b\x46\x4e\x83\x32\xb4\xc0\x75\x76\x6a\x6e\x94\x2f\x4e\xce\x41\xe6\xce\x85\xc9\xc5\x39\x37\x7e\x7e\x72\xce\x91\x84\x3e\x3a\xc2\x0e\x44\xbd\xde\x9c\xaa\xe7\x67\x47\x82\xb7\x8e\xc8\x9a\xc0\xfe\x41\xac\x00\xc3\xaa\xb4\xbc\xf2\x98\x84\xb4\x30\xf9\xc5\xac\x70\x37\x4c\xdd\x51\xf4\xd3\x32\xba\x10\xf2\xb3\xe9\x58\x2f\x17\xcb\xf1\x05\xc5\xab\x02\x07\x71\x2e\x28\x6a\x43\x56\x31\x33\x31\x3b\x61\x4a\x52\x90\x25\x7c\xf8\x50\x04\x88\x63\x3e\x7f\x72\x72\xec\xc6\xeb\xae\xbb\x01\x15\x6f\x9c\xb2\xeb\xeb\xeb\x61\x1c\xa5\x51\x98\xe5\x2b\xe3\x18\x6d\x02\xd9\x82\xc6\xa9\x08\xd0\x72\x8b\xc3\x21\x9f\xcd\xd2\x31\x5d\xbe\xfa\x3a\x7d\x82\x49\x21\xb8\xd6\x44\xf2\xf5\xf3\x6e\x1f\x2c\x05\x69\x3f\x6c\x67\xdd\xf1\x6e\x7f\xac\x0d\x89\x6c\xc7\x54\x19\x56\x02\x16\x2e\x77\x06\x3b\x49\x68\x5c\xf0\x95\x2a\xca\x3b\x46\xf0\x25\x6c\xd4\x84\xe9\x7f\x9d\xae\x05\xe0\xe3\x80\x22\x40\x16\x07\xf9\x0a\x49\xbc\xce\xf8\xbd\x5b\x19\x94\x7f\xca\x0b\x5e\xef\xb9\xa4\x07\x7a\x9c\x48\xbc\x72\x13\xaa\x1c\x94\x0f\x1e\x34\x19\x5f\x29\xba\x9b\x32\x87\xc8\xb3\x86\x9a\x37\xe7\x4c\xaf\x88\x4f\xcd\x2e\x3c\xe7\xd4\xc4\x89\xe9\x53\x21\x57\xc2\xa2\x0e\xbc\x94\xb2\x56\x8f\xa6\x3e\xd9\x89\x9d\xe6\x3e\x9d\x02\x79\x6f\x50\xc6\x42\xc4\xbe\x58\xa1\xb4\x9a\xba\x16\xd7\x51\xcd\x51\xc7\xd8\x6d\x9b\xec\xc4\x1b\xe6\x46\xf3\xbf\xb9\x3f\xf8\xbe\x0d\x9f\xa8\x27\xa1\x6a\x08\xa0\x1c\x9a\xcb\x32\xbc\xe8\x3d\x91\x5e\x20\x06\xf6\xe2\xa2\xf7\x44\xa2\xe2\x37\x57\x2c\x68\x9e\xb3\xe2\xbf\xb2\x8f\xfd\xdf\x9e\x03\xdc\xfb\x0b\x2f\x78\xb9\xd7\x76\x62\xff\x50\xe2\x55\xe1\x99\xea\x86\x93\x7f\x0c\x06\xde\xb4\xca\x18\xeb\xe4\xc9\x8e\x16\x95\xd1\x21\x9c\x93\x48\x2e\x49\x06\x2a\xab\x7c\xb9\x42\xbb\xa2\x74\x6c\x04\xcf\x64\x67\xd8\xe9\x6d\x18\x7c\x86\x0e\x8d\x86\x06\xca\xc9\xfc\x04\xea\x40\x34\x0e\x15\xfe\x9b\xbd\xe0\xb4\x7b\x49\x93\x8e\x4b\xe5\x5e\xaa\x52\x6e\x0e\x00\x63\x3a\x53\xeb\xcc\xe9\x0d\xe7\x13\x9e\xce\x6e\xdd\x96\xcd\xea\x28\xd9\xac\x70\xaa\xb0\x8b\x1e\xab\x8f\xb7\xff\x62\x1d\xa1\xfe\x40\xed\x16\x58\xa2\x69\x3a\x99\xb9\x9e\xab\xbb\x60\xa4\xe9\xd4\x4c\xe3\x4e\xd3\xc1\x86\x0a\x5f\x5d\x08\xa1\x39\x44\x9d\x4f\xb3\x96\xca\x3b\x3c\x56\x9f\x7a\xfe\xab\x75\xe3\x9e\x5b\xbb\x55\xcf\x1b\xb1\xc3\x06\xb6\xa3\xa4\x8d\xa1\x70\x9b\x35\xf3\x6b\x1e\xbb\x1a\xad\x41\x11\x99\x82\x7e\x6b\x07\xa6\xa0\xb7\x79\x4e\x59\xce\x54\xd1\x76\xa0\x61\xe9\x6b\xe1\x9f\x0e\x1a\x47\xf4\xe7\xcb\x4f\x1e\xb5\x26\x93\xa8\xad\x2c\x10\x33\xb2\x04\xac\xe2\xd9\x72\x49\x26\x7c\xf7\x7d\x18\x79\x80\xcc\x75\x42\x76\x81\xf9\x2a\x42\xf1\x4e\xa4\x9c\x92\x32\xd2\x92\xc2\x65\x9c\x1e\xbc\xa9\xbe\xa2\x9b\xc1\x84\x6f\xcb\x09\xb4\x62\xee\xea\x78\xc7\xc6\xc9\x6e\x75\xf5\x4f\xec\x62\xdf\xb5\x51\x58\xb6\x9c\xd5\xc2\xff\xc6\x48\xf0\x4e\xcf\xbd\xa6\xe3\x8d\x88\x66\xdf\x42\x0f\x5b\x18\x23\x6d\x91\xce\x96\x61\xa7\xe9\x0a\xd9\x07\x5a\xab\x77\x15\xf7\xd4\xb0\x94\x77\x1d\xb5\xde\x25\xed\x93\x4f\xc5\x05\x77\x9b\x73\x6f\x14\x4b\x55\x28\xac\xcb\xb8\xcb\x6c\x2f\x19\x34\xfd\xfb\xd8\x8d\x5b\x0f\x47\x87\x42\xe7\xf1\xcd\xe0\xbb\xa6\x9c\x2c\x10\x91\xb6\x91\xea\x2f\x65\x19\x63\x3a\x85\x52\xc7\x8f\x36\xd8\x41\x9a\xab\x5a\xd4\x2f\x07\xff\xb6\x5e\x9b\x29\xd8\xaa\x70\x99\xed\x5d\xc7\x6f\xbe\x84\x0f\xa3\xde\x1a\xfc\x30\x2a\xd2\xaa\xe7\x8f\x0f\x34\x46\x57\x39\xe5\x55\x85\xff\xdf\x0f\x04\xcf\xa8\x5d\xb3\x11\xeb\x58\xcb\x72\x8d\xb1\xbd\xb0\x02\x82\x63\x2b\x51\xa7\xf1\x51\xec\x06\xca\xaf\x8b\xde\x55\xb9\x00\x30\xcb\x64\x56\xa5\xe5\x45\x6f\x37\xa0\x64\x2e\x7a\x7b\xe1\xdf\x99\x29\x97\xf7\x94\xb1\x05\x76\x40\x17\x36\x33\xe5\x4f\x05\x37\x4d\x5a\xe0\x88\x99\x29\x9d\xe9\x0e\x33\x61\x1e\xec\x64\xed\xf3\x22\x3f\x3e\x3e\x7e\x9b\x7e\xed\x39\x71\xe7\xf6\x83\x8e\x98\xf2\x20\xc3\x6a\xfd\x07\x82\xce\xa2\x46\xea\xd4\xd0\x35\x3a\x42\xfc\x52\x12\x14\xaa\x0c\x84\xc8\xac\x60\xd7\xfd\xb1\x16\x73\xba\xc0\x7f\x8f\x66\xc4\x7c\x13\xc0\x0b\x2d\x41\x29\xee\xea\x64\x99\xaa\x59\xab\x51\xc1\x97\xe4\xf9\x40\x65\xc8\x25\x65\x2c\xfe\x4e\x2e\x6c\x53\x50\x47\x1e\xa3\x16\xc0\x05\x59\x53\x10\x08\x5b\xf2\xbe\x28\x55\x91\xc4\x1f\x38\x9b\x95\x9a\xa9\x14\xc5\x5d\x6b\xc7\x07\x0b\x43\xad\xc4\x90\x9f\xa8\x2c\xb9\x42\xd5\x03\x79\x0d\x28\x51\x48\x99\xf1\x95\x28\x5f\x92\x1d\x6d\xac\x68\x24\x4f\xa3\x21\x04\x8c\xa3\x2b\x42\x1e\x2f\xbd\x9e\xe8\xf0\xa8\xe4\x37\xc8\x5d\xef\xce\x49\x77\xa3\xbb\x85\xa9\xc9\xe2\x87\xc1\x77\xcd\xe0\x9f\x03\x88\xb6\x83\x05\x0e\xab\x33\xf0\x7d\xb6\x1f\x4e\x10\xd8\x0a\xb7\x82\x23\x72\x17\x5b\x70\xcb\xd4\x40\x12\x19\xbb\x42\x3c\x88\x68\x8d\xc7\x08\xfd\xa7\xac\x2c\x2c\x27\x41\xfe\xfe\xe0\xd9\x46\x5c\xaf\x0b\xdc\xd3\x0e\x36\x48\x63\xe4\xf1\x61\x18\x30\xed\xab\x46\x6a\xd3\x41\xe3\x8d\xfd\xb9\xf7\x30\x5c\x75\xfe\xe9\xe0\x0e\x93\x3f\xd6\x86\x81\xb8\x13\x8b\xcc\xa5\x68\x22\x8a\x3a\x71\x2a\x0a\xf0\x56\x2c\xd5\x30\x59\x3f\xd7\x62\x7b\x69\xf6\xf9\xef\x68\x05\xaf\x6f\x6d\xb3\x70\x78\xb7\xea\x51\xd1\x7c\x46\x0e\x40\x94\x80\x25\x30\x2a\x14\x7e\x6d\x49\xb4\x33\x98\xff\x79\x25\x28\x49\x21\xbd\x37\x27\x5f\xab\x79\xfe\x8d\xd7\x07\xe8\x2f\x85\x6d\x91\x5f\x57\xae\x77\x77\x75\x9b\x05\x94\x01\x98\x8e\x98\xa0\x79\x92\x15\xa2\xa0\x33\xb0\x14\xdd\x5e\x96\x47\x79\x9c\xf4\x43\x3e\x53\xa8\x9c\x10\xd0\x26\x28\x36\xcd\x06\x5a\x65\x14\x23\xab\xcb\x12\xb6\x1b\x4a\xf4\xdb\xdb\x9f\x75\x47\x36\x9e\x75\x6a\x0f\xb6\x26\xdb\x7f\xdc\xed\xb8\x4d\x55\xb9\xa7\xb2\xa8\x73\x02\xf3\x05\xe6\x2a\xff\xe1\x3f\xef\x0a\x5e\xed\x35\xdc\x18\xc8\x05\x63\xa9\x48\x49\x16\x75\xc6\x28\xf1\xa0\x9c\xa5\xf8\x02\x28\x74\xc7\x55\x22\x42\xb0\xff\x02\x27\xa7\x3a\x2b\x74\x6c\x92\xe5\xc4\x46\x6e\xdf\x28\x75\x0b\x71\xcf\xfe\xdf\x1a\x61\xcf\x65\xfb\x56\xb3\xa2\x84\x05\x94\x05\x4b\x77\xd1\xdf\x2a\x3c\x43\xd6\xb0\x41\x9b\x2c\x9a\x85\xa9\xd9\x05\xda\x24\x0f\x95\xfd\x5e\xdc\x8e\x92\xa4\xcf\x27\xee\x5d\x70\x5f\x2f\x0e\xdb\xab\xe8\xb9\xac\x15\xf7\xfc\x2a\x58\x9d\x99\xdb\x7e\x85\x1a\xe8\x63\xd5\x77\xe7\xe4\xb4\x9c\x72\x67\x7a\x22\x5d\x28\xa3\xf6\xf9\x8d\x2a\xff\x1d\x8f\xed\x96\xfa\x73\xe1\xff\x2f\x2f\x78\x40\xea\xea\x03\x69\x80\xda\x59\xde\x21\x94\x13\x76\x30\x3c\x0f\x50\xc1\x42\xce\x6e\xc8\x49\x84\x4a\xb8\x4a\x48\x19\xa7\x4d\x23\x82\x1b\x4c\x6a\x12\xae\xc6\xa5\xad\x4e\x8d\xb3\x31\xf6\xef\xb6\xc0\xf7\x9a\x97\x0b\x94\x37\x6d\x4b\x0e\xe3\x17\xee\xb2\x49\x9a\x4c\x39\xca\x06\xba\xa0\x91\x4a\x52\xb2\xf2\xbf\x38\x12\xbc\xc7\xd3\xe9\x97\x7a\xda\x52\x6a\x01\x9a\xe4\x0e\xac\xd3\xe7\x46\x49\x42\xe6\x73\xc2\x23\xc6\x44\x8e\xcd\xd7\x05\x40\x0d\x8f\xf0\x43\x71\x28\x42\x1e\x97\x07\x11\xeb\x3b\x96\xf5\x0e\x87\x7c\x82\xa7\x55\x92\x6c\xa1\x82\x34\xd3\xe5\x53\x41\x05\x22\x6a\x75\x59\x17\xbd\x3d\x58\xd7\x45\x8f\xf5\xb4\xa7\xcd\x99\xe3\xff\xe4\xb1\x17\x7a\xcc\xba\xeb\x97\x5b\x24\x66\x51\x58\x45\xd9\x39\xc1\x1d\x13\x08\x1d\x32\x99\x84\x45\xde\x1d\x6d\x24\x72\x75\x6d\x41\xd8\xbe\x90\xad\x31\x6a\xaa\x9f\x28\xa9\xe4\xde\x7b\xb1\x9f\xea\x85\xc0\xf7\x2b\x9f\x85\x5b\x5a\x5a\x6b\xd7\xa8\x9a\x70\x39\x84\x70\x1d\x1d\x3b\x7a\xe4\x88\x7b\xa6\xff\xd8\x2e\x16\x36\x82\xde\x50\x2f\x6f\x42\xbe\xfd\xf1\xa3\x87\x7c\x7b\xc5\x08\xfb\x11\x8d\x7c\x7b\xbe\x46\xbe\xfd\xe9\x15\xe4\xdb\xd6\x90\x6f\x71\x72\x79\xf1\x6e\xaf\xbf\xde\xcd\x43\xb5\x9d\x80\x73\xff\x4b\xd7\x05\xff\xbe\x76\xcd\xb6\xd4\xa7\x26\x64\x58\x11\x08\x22\xb7\x9b\x43\x73\x82\x78\x01\xa1\x80\x9c\xb1\x1d\xd9\x68\x74\x9c\xab\xd0\xcf\x86\x98\x50\x17\x52\x74\x8c\xfd\xdc\x2e\xf6\x2f\x75\x55\x98\x81\x85\x5c\x28\x85\xff\xba\x5d\xc1\x3f\x8c\x4c\x34\xdf\x54\xa4\x8b\x39\x0e\xb4\x86\xfc\xe8\x8d\xe9\x5c\xed\xc5\x73\xca\x17\x83\x07\xb7\xfa\x66\x71\xa1\x27\x37\xa9\xd0\x0a\x2c\x46\x91\x5b\x6e\xf6\x0a\x1b\x02\x80\x75\xe5\xca\xa1\x45\x0b\x35\x62\xd8\x5d\x5c\xaa\xe1\x2c\xea\x7a\xbe\xa9\xd4\xc1\x01\xc2\xf0\x42\x09\x0e\x2e\xc4\xb4\x61\x74\x20\x40\x01\xb0\xdf\xda\xd2\xa9\xdc\xd0\x33\xcb\x16\x09\x76\x47\x0f\x95\x83\xb0\xb5\xe0\x65\x2a\x9f\x8e\x6e\x95\x1c\x41\x1d\x6e\x01\x34\x8e\x1d\x81\x51\x92\xea\x09\x8d\x6b\x2c\x31\xa1\x1b\xb9\x58\x47\xa5\xf2\x93\x68\xef\xb6\xaa\xd8\x34\x15\xb3\x65\xdb\x8a\x0e\x28\xa4\x51\x9c\x54\xb9\x5c\x34\x44\x33\x6e\x38\x29\xcf\xdd\x77\x90\x26\xeb\xc1\x67\x9f\xdb\x88\x2b\xee\x27\x3c\xe6\x4c\x29\xff\x15\x1b\xa6\x5b\xda\x70\x59\xa8\x58\x77\xab\xb8\xe0\x0e\xfb\x97\x86\x0a\x10\x02\xd4\x30\x47\x0b\x73\x6a\x20\xbf\xa6\x22\x0b\x65\x7f\xa4\xdc\x13\xbf\xed\x05\xff\xd3\xab\x73\x32\x0f\x2c\xac\x90\x83\x77\xd2\x08\x80\x08\xd2\x7a\xa0\x8a\x12\xb2\x0e\x89\x70\x25\x1c\x45\xc5\x8d\x3a\xce\x51\xb4\x47\x29\x27\x7a\x60\x3d\x11\x34\x41\x9e\xa8\x42\x34\x84\x3b\x45\x34\x3d\x9d\xe5\x2b\x51\xaa\xbc\xc6\xfa\xeb\x1c\x55\xea\x2b\xfb\xd9\x93\x75\xe0\x83\x8e\x1a\xf8\xd4\xfe\x9d\xb3\xde\x04\x2f\xdc\x3f\x5b\x2f\x98\x77\x44\x3b\xee\xd8\xea\x94\x89\xe2\x52\x13\x50\x9e\x04\x0a\xa5\x61\x14\x7f\x5b\xff\x32\x24\x34\x3a\x42\x8a\x1e\x57\xb2\x8b\xcd\x03\xa4\x11\x37\x0a\x8d\x53\x16\x3a\x5b\x9e\x2e\x09\xed\x77\xfa\xe8\x8f\x0b\x13\x4b\xc4\x33\x8d\x19\xd1\xe8\x14\x70\x0d\x16\x03\x25\x17\x2a\x8f\x2b\x27\x66\x24\x5e\xb4\xb3\x9e\xe5\xee\x1e\x95\x3b\x0d\xb2\xae\x15\xe7\xe3\x5e\x61\x7f\x78\xc8\xd8\xc9\x2c\x57\x5c\x5a\xa3\x43\xbb\xa6\x6f\xc4\x3e\xb0\x4b\x98\xde\x20\x70\x76\x5d\xa0\x09\xf2\x2a\x85\x80\xfe\x40\xce\x8b\xe0\x08\xd2\xa3\x1e\x0d\x6e\xe5\x98\x88\x24\x06\x70\x51\xe9\x74\x1a\xe8\xab\xe8\x1b\x3d\xce\x83\x81\x09\x12\x1c\xe7\xcf\x65\x9c\x07\xd0\x61\xd3\x17\xa4\x42\x05\x7b\x4d\x70\x9c\xdf\xc7\x38\xe7\x70\x57\xfe\x2f\x38\x2f\xfa\xc1\x71\xab\x09\xa3\xea\x06\x9e\x3a\x50\x54\x30\x9b\x95\x33\xa9\xb9\x85\xa8\x19\x5d\x16\x5c\x3b\xa2\x6f\x73\xd9\x76\xfa\xfb\xd9\xf0\xef\xf3\x98\xfc\xeb\x79\x10\x63\xae\x28\xcd\xec\x14\x2b\xe0\xe5\xdb\x7e\x5f\x36\x49\x97\x81\x48\xd7\xe2\x3c\x03\xff\x2c\x76\x67\x2f\xcf\x3a\xd8\xa3\x45\x19\xad\xc4\xe9\x4a\x70\xeb\x63\xd7\xad\x76\x6b\x1a\x7b\x76\xb3\x6e\x85\xd6\x5b\x3d\xab\xbe\xa1\xb9\x7f\x15\x13\xdc\xce\xa2\x7b\x70\xf1\x8c\xc3\xe2\x05\xb8\x1b\x4d\x79\xd0\xf5\xc8\xe9\xae\x38\xbc\x42\xc6\xac\x03\x06\x3c\xac\xa0\x1f\x39\x3b\x8d\xa2\xc2\x53\xab\xdf\x24\xab\x0d\xd9\x9f\xec\x66\x4f\xc0\x8a\xf5\xe6\xf6\xeb\xbb\x2f\xc3\xe6\xf6\x9a\xdd\x67\x9c\x52\xb7\xb2\xb3\xe9\xcd\x2c\x76\xb6\x8d\xd5\xa8\x30\x3b\x8f\xda\x59\xdc\x36\xcb\xe9\xa8\x73\x02\xea\x94\xa9\x20\x4d\x43\x41\x49\x07\x5b\x83\x04\x8e\x62\x9d\x7e\xc1\xd6\xb8\x5e\xb7\x4a\x0c\x9c\x23\x03\x12\x37\x7a\xdb\xe3\x65\x2e\x62\xf4\xaf\x6e\xb4\xbb\x92\x42\x49\xcf\x1c\x32\x6d\x51\xce\x79\x22\xb1\x54\x39\x6c\xb3\xdc\x6a\x61\xed\x19\xcc\x6b\x7b\x98\x23\xad\x60\x66\x7d\x45\x1b\x0d\x82\xa0\xca\x63\x1f\xf1\x43\x49\x7c\x1e\xec\x8c\x9a\x35\x79\x3e\x4b\x92\xa5\xa8\x7d\x1e\xde\xe7\x73\x59\x67\x2e\xcf\x2e\xf4\x55\x66\x18\x2c\xee\xb0\xda\x26\x1b\x3e\x19\xa2\x84\xed\xa1\xd1\xeb\x56\x01\x05\xec\xe1\x94\x42\x5c\xaf\x1c\x8b\x53\x93\x67\x55\xa4\x1d\xca\xb5\xda\x8d\xfa\xb0\xc5\x37\x0b\x0c\x52\x54\x54\xd8\x58\xf4\xf4\xe1\xa8\xef\x6c\xae\xbf\x7a\x1f\xf3\x73\x11\xa7\x6b\x19\x02\x60\x88\x02\xe3\x9b\x7b\x83\x3f\xdb\x3b\x78\xbd\x31\x77\x63\x5c\xe8\x26\x1a\x69\x46\xca\x89\xa2\x63\xf4\x25\xb4\xdf\x83\xe9\x33\x47\xce\x4d\x05\x0b\x32\x1f\x4a\xb3\x15\x04\x8f\x09\x25\xb3\x5a\x4c\x12\xb3\xb2\xe1\x44\x20\x31\xb3\x3c\x2b\x44\x47\x74\x82\x90\x31\xb8\x7e\x7c\x50\x22\x25\x6b\xb0\x6a\x8a\x85\x92\xc5\x20\x8f\x8d\x9b\x20\x8f\x06\xac\xa3\xa1\x68\x53\x6c\x54\xf2\x44\x44\x94\x8c\x58\x05\xaf\x46\x09\xfa\x60\xad\xef\x75\xc7\xd4\xd4\x53\x5b\xd8\x18\xa3\x23\x1f\x2c\x81\x59\xa5\x40\x96\x93\x18\x55\x05\x14\x14\x4c\x31\x88\xe2\x2d\xc8\x24\x0c\x81\xd6\x68\x3f\x36\x3a\x41\x94\x24\xa1\xd2\x7b\xc8\x16\x67\xf2\x7b\xe1\x74\x94\x25\x5d\xdb\xad\x8a\xf2\x5a\xc8\x7e\xd0\x11\xdd\x5e\x56\x8a\xb4\x1c\xd5\xc9\x7e\x55\xd8\xae\x3a\xf9\x20\xdd\x6f\x2f\x17\x6b\x71\x56\x15\x49\x5f\xb7\x37\x24\xe6\xd2\x6b\x6b\x4e\x17\xab\x5f\xcc\x94\xd2\x99\x2e\x56\xaa\x28\x8f\xd2\x52\x68\xd5\x5d\x5c\x88\xda\x25\xba\x58\x43\x7e\x2d\x80\xd2\x9b\x0b\xc8\x45\x51\x61\x88\xd2\x72\x95\x43\xdf\x60\x6f\xa9\xfb\xb4\x26\xb0\xd9\xa3\xaa\x53\x70\x3a\x35\xd6\x2c\x0b\x3f\xaf\xb6\x4b\x59\xf7\xba\xd3\x73\x55\x21\x9c\x5e\x23\x87\x6f\x2e\x94\x46\x2a\x77\x85\x38\x8d\xbb\xf1\x83\x62\x4b\x5d\x20\xab\x28\x33\x13\x2b\x6f\x76\x30\x1c\xd2\x28\x49\x78\xb7\x2a\x23\x83\x4d\xb1\x9a\xac\xa2\x61\x46\x89\x8c\x55\x29\x8f\x34\x81\xdc\xdd\x83\x84\x1c\x73\x38\x42\xef\xd0\x9a\x72\xc4\xfb\xaf\xed\x62\xbb\x81\x25\xd3\xff\xc3\x5d\xc1\x4b\x76\xcd\xcb\x3f\x2d\x2b\xc1\x3a\xc8\xce\x46\xff\x07\x41\x3b\x32\x34\x8e\xc5\xb8\x95\x2a\xdd\x91\x56\x79\x3b\xca\x05\x59\xee\xd1\x12\xd3\x70\x03\x93\x2a\x68\x3c\x00\xc4\xf5\xab\x8d\xeb\x39\x51\xda\x7f\x0e\x97\x0d\x0a\xf9\x5d\xd9\xba\x58\xa3\x98\x71\xe8\x7d\x9c\xa7\x62\x4d\x9e\x58\xf7\xe8\x9e\xd0\xf6\x00\xbd\x04\xe4\xf6\xa1\x6c\x20\x83\x77\xc1\xa3\xd7\xab\xcc\x36\xab\xa4\x72\xdc\x2f\xc0\x11\xa2\x49\x74\x69\x83\xc9\x45\x5b\x4a\x2f\xca\x1f\x28\x05\x17\xf9\x21\x6a\x78\x92\x3e\xc7\x2c\x44\xaa\x48\x5c\xb5\xa3\x3b\x68\x25\xcc\x5f\x50\x09\x68\x17\xca\x6c\xd3\x8d\x86\x34\x4b\x69\xc9\x54\xd2\xc4\xf0\xe6\xd4\xd3\xf8\x00\x2d\x79\x47\x35\x3f\xcd\xee\x66\x33\x97\xa8\x7b\xcb\xd1\xbb\x37\x2e\x57\xcf\xe8\x19\xc4\x7e\xbc\xc5\x9e\x20\xb7\xcb\xac\x2a\x17\x44\x3b\x4b\x3b\x85\xff\xc3\xda\x11\xfc\xb7\xde\xa2\x73\xab\x1e\x36\x87\x37\x8d\x8d\x44\xab\xd7\x13\x7a\x4f\x54\xcf\x80\xd7\xad\x18\xad\x4d\xc9\x24\xd1\x5b\x7a\xbc\x92\x66\x39\x26\x66\x52\x76\x0f\x73\x1f\xec\x1b\x8e\x53\xb9\x6e\xd6\x58\xb4\xea\x42\x37\xae\x72\x6c\x2e\x89\x72\x5d\x88\x94\x1f\x85\xfe\xbe\xee\x08\x2f\xf0\x63\x9c\xd3\xdb\xba\xec\x58\xfc\xde\xe3\xb1\xab\xa9\x2a\x3a\x9d\x7f\xdc\x0b\x5e\xe9\x9d\xb4\x2f\x39\x06\x0b\x27\x9c\x40\xe4\x79\x96\x5b\x58\x78\xeb\x0c\x52\xa8\x78\x39\x9b\x56\xa3\xb4\x23\x27\xd2\x98\xb1\x16\x99\x93\x77\x06\xfa\x45\x76\x8b\xac\xd4\xe5\x4c\xc2\x7b\xce\x06\xf2\xc6\xbd\xec\x00\x2c\x59\x6a\xee\x4b\xf6\x06\xdf\xdc\x63\x5d\x70\xad\x2b\x52\x43\x82\xed\x26\x40\xdb\x18\xd1\x3e\x59\x32\x65\xda\xce\xba\x98\xcc\x06\xe7\x75\xb3\x74\x30\x2d\x0f\x0d\xd4\xaa\xa6\x1f\xa8\xe2\xb5\x28\x91\x1a\x4e\xc8\xd8\x18\x87\x5b\xc7\xa9\xb8\x48\x67\xa3\x50\x02\x5a\x5c\xea\x13\x47\x7b\x4a\x2c\xcb\x5d\x0e\xfb\x8d\xa3\x67\xc7\x76\x32\x91\x42\x99\xb2\xf5\x49\xbd\x16\x47\x3c\xea\xf5\x8a\xf1\xb5\xa3\xa3\xea\x0f\x98\xfd\x28\x3b\x8b\x0b\xa5\x48\x41\x49\x33\xd7\x97\xaa\x52\x77\x03\xb6\x0b\xcd\x72\x1d\x7e\x4e\xa5\x43\x2a\x8e\xdf\x17\xc8\xc2\x82\x67\xcb\x42\x15\xe6\x5c\x5e\x5d\x3b\x2a\xaf\xe9\x3d\xf7\x38\xbf\x2f\xb0\x9a\x17\x3c\xfb\xdc\xa8\xf5\xd5\x65\xe6\x34\x89\xc3\x77\xd5\x1b\x44\x1a\x00\x6d\x70\x0d\x5a\x00\xf6\xab\xee\xe7\xc1\xce\x8d\x97\x55\x7f\x14\x76\xdc\x74\x82\x26\xcb\x38\x45\x22\x66\xf0\xc8\xa5\xd8\x61\x64\x06\xd1\x19\x97\x64\xcb\xc8\x20\xf9\xa8\x77\x3f\xc8\x94\xdf\x72\xdd\xbf\x24\x54\x88\x11\xd1\x98\xe0\x8b\xd0\xda\xe6\x31\x71\xce\x76\x5c\x11\xf6\xca\xfc\xd2\x08\x3b\x20\x15\x98\x69\x20\x7c\x2b\xfc\x4f\x8f\x04\x1f\x1a\x59\x30\x17\xf0\x80\x1b\x22\xdc\x4b\x9d\x13\x68\xea\x90\x2e\x4e\x2e\xc3\xb6\xd4\xdd\x31\x15\x88\x5e\x89\xc7\xf9\xd9\x14\xcc\xc7\xa3\x7c\x36\x4b\xc5\x28\x70\x1a\xe2\xdf\x67\xd2\xa9\xbc\x3f\x5f\xa5\x46\x1e\x05\x23\x89\x5d\x28\x3f\x7d\x76\x61\x11\xfc\x92\x94\x1b\x1a\x4e\xd7\xb4\x1d\x27\x31\x99\xb5\x21\xfe\x71\x54\x8a\xee\x40\x22\xad\xfa\x55\x8b\x62\xf2\xb8\x42\x69\x39\xe2\xcb\x15\x20\xcc\x8b\x52\xf4\x94\xda\x68\xf6\xbf\xf6\x6a\x14\xa7\xda\xbf\xe1\x34\x42\x7e\x3c\xc6\xcc\xa7\x46\x36\xac\xd2\x0e\x08\xa4\xf3\xea\x78\xd5\x16\x9e\x0e\x7e\x95\xa6\xbc\x34\xa1\xf8\x55\x99\x8d\xe9\x26\xa1\xa4\xdf\xd7\x4b\xc5\x68\x14\xd4\x09\x6a\x14\x9e\xfe\x74\xd5\x87\x10\xd8\x90\x75\x85\xbb\xe7\xd2\x4d\x67\xd3\x7d\xd1\xd5\xec\xc6\x4b\x03\x25\xfb\x9f\xb9\x2a\xf8\x55\x6f\x18\x54\x34\x6e\x60\xef\x20\x7f\xd9\x50\x74\xb8\x52\x38\xe5\x95\x6e\x54\x92\xab\x1f\xfd\x81\x8a\x34\x1f\xa0\xb6\x1c\xdc\x8a\x8a\xa3\xd1\x04\xe0\xab\xe1\x57\x12\x13\x82\xba\x2d\xb9\xd2\x3d\x88\x15\x10\xda\x00\x2f\x1d\x87\xd3\x23\xfb\xd9\xc3\x2d\xb6\x4b\xde\xf5\xff\xd1\x63\x77\x5d\x0e\x24\xf7\x42\x4f\xb4\x83\xf7\x78\xb2\x4c\x8d\x55\x27\xb2\x35\xd3\x45\xf2\x2a\x71\x45\x56\x1a\xee\x6f\x61\x56\x1f\xed\x08\x1f\xd9\xa2\xb1\x28\xed\x8c\x21\x60\x25\x64\xaf\xf6\xe4\x7c\x01\x68\xe3\x8b\x36\x74\x9f\x6c\x37\xda\x20\xb8\xa9\xd0\xf9\x99\x6c\x2c\x64\x73\xae\x26\xab\x0b\xae\xc4\x6c\xee\x34\x66\xf3\x4a\x60\xe5\xa5\xf3\x86\xff\xae\xcd\x1b\xfe\x45\x8f\x3d\xfd\x12\x6c\xbc\x68\x95\x84\xe0\xe4\xd7\x78\x86\x29\x54\x23\xc5\xec\x68\xe5\xc7\x8e\x4b\x5c\x55\xf4\x88\xd7\xdd\x3c\x76\xf4\x19\xfe\x5d\x3a\x76\xd4\x5a\xfa\x3a\x5a\x74\xc8\xd2\x77\x82\x47\x71\x97\x60\xec\x8b\xfb\xd9\x4d\x5b\xea\xb7\xd3\x08\x0f\x01\x06\xe7\x62\x3a\x2d\xf3\xbe\xff\xc6\xfd\x41\x31\x78\x19\xa7\xf3\x7a\x96\x9f\x5f\x4e\xb2\xf5\xb1\xb8\x23\xc5\x29\xb8\xbf\x20\x4a\x7d\x7e\xa3\xc8\x38\x10\x79\x69\xd3\x4d\x94\x2a\x8f\x51\x21\x1c\xcc\x82\x73\x58\xfc\xe8\x3e\xf6\x16\x8f\xed\xc3\xe7\xee\x39\xea\xbf\xfa\xd2\xd8\xe4\x4f\xd2\xfb\xc1\x33\xd5\x5f\x14\x5a\x50\x6a\x2e\xbc\x67\x2c\x9c\x99\xd5\x0d\x26\xcc\x76\x64\x2c\x2d\x1a\x99\x16\xe8\xb2\x80\x54\x22\x64\xf7\xb2\xbd\x78\x96\xe6\xfe\xa9\xe0\x0e\xec\xb0\x9c\xa0\x11\x26\x58\x55\xbb\x7f\xa9\xe3\xf0\xfc\xa5\xf5\x5f\xa8\x9e\x70\x64\x88\xf7\x79\x6c\xbf\xb6\xbd\xf8\xef\xf0\x82\x37\x78\x67\x7a\xb5\xd0\x0c\x95\xac\xd4\x4a\x47\x0b\x67\x7d\x22\xa2\x8e\xce\x6c\xd9\x30\x88\x44\x3e\x84\x8c\xd1\xc4\x40\x2d\x05\xed\x35\x9b\x30\x58\x6b\xf1\x88\x9f\x91\x7a\xdd\xc1\x89\x5e\x2f\xe9\x1f\x84\x71\x3e\x88\xc9\x0c\x5d\x00\xfb\x4f\x7b\x6c\x97\xd4\xba\xfd\xb7\xec\x20\x9a\x66\x49\xc7\xd0\xc4\x5d\x51\x94\x51\x17\xd8\x8b\x14\x58\x56\xf7\x16\x5f\x17\x39\xf2\x0d\xf1\x99\x52\x9d\x28\x84\x83\xb5\xe9\x1c\x9c\x5e\xa3\x2f\x60\x6f\x70\x4f\xba\x97\xb4\x82\x6f\x7a\x1b\x9f\x74\xd6\x01\xe7\xce\x63\xdd\x43\xb5\x99\x8c\xa9\xba\x70\x2a\xc5\x05\x0f\x60\x51\x8c\x53\x69\x01\xbf\xbf\x2a\x4a\x0e\x1e\x10\x18\xc9\xac\x47\xa4\xed\x56\x2b\xa0\x58\xf8\x3a\xc8\xf5\xd5\x16\x45\x11\x21\x26\xa6\xcc\xa3\xf6\xf9\x7a\xeb\x22\xab\x1d\xca\xa1\x11\x97\x96\x6d\xcc\x15\x01\xb5\x36\x53\xcf\x3d\xcd\xb0\x7f\x81\x6d\xe5\x35\x5e\xf0\x90\x77\x52\xff\xd6\xf1\x5a\xb1\x5c\x17\x00\x27\xcf\x72\x2d\x5e\x75\xe2\x65\x0c\x97\x56\x23\xa4\x16\x52\xda\x31\xca\xe3\x22\x00\x25\x4c\x50\x1a\x98\x96\x29\x84\x47\x07\xfa\xc0\x14\x3c\x6e\xad\x36\xbb\x89\x3f\x33\xc1\xb6\x7b\x1a\xf8\x5f\xff\x9e\x20\x31\x3f\x31\xa5\x16\x9d\x0e\x08\x89\x4d\x12\x0b\xc4\x63\x2c\xa6\x1a\xe0\xae\xc4\x68\xd2\x45\x5d\xfc\x26\xb9\x8c\xe4\xb3\xb8\xa8\xdc\x6d\xec\x1f\xee\x60\x6f\x1f\xb1\x73\xe8\xbc\x6e\x24\x78\xf1\xc8\x96\xb3\xe8\x00\x25\x59\x53\xfe\x1c\xae\xe1\xa6\x8e\xdf\x5b\x68\x4b\x80\x92\xa2\x02\x0a\x6c\x0a\x6c\xc0\x04\x58\x39\xf4\x0d\x25\x2b\x46\x69\x96\xca\x09\x52\x93\xe3\xc0\x99\xe0\x7c\x33\x46\x4f\x53\xfc\x27\x2a\x65\x04\x96\x28\x33\x27\xd1\xd0\x18\xce\x53\x10\x7d\x32\x07\x92\x87\xf3\x26\x2b\x84\x05\x84\x45\x5d\x0d\xbe\x2a\x64\xac\x31\xfa\xb7\x41\x7e\xd9\x2e\x8d\x80\x6e\x9d\x23\x81\xfc\x97\x16\x1b\xa9\xe2\x8e\xff\x9f\x5a\xc1\x5b\x5a\x67\x67\xa6\x54\xa7\x10\x16\x51\x1e\x01\x31\x65\x58\xc5\x4f\xb3\x09\xc2\x6c\x48\x18\x2c\x58\x83\xab\xa6\xa8\x39\x03\xbf\x24\x94\x9b\xd4\xa0\x75\x78\x00\xce\x1c\xbd\x94\xf5\x2e\x43\x0e\xde\x14\xbb\x1f\x2c\x6d\x26\xe9\x40\x96\xf2\xb9\xb3\x8b\x96\x07\x20\x64\x6c\x4e\xa5\x03\xd4\xb5\x11\xb1\xd1\xbc\x88\x3a\x63\x72\xad\x6d\xbf\xbf\x6c\xda\x85\x2a\x76\xe5\xb6\x8f\xb7\xd8\x01\x18\x11\x6c\x81\x14\x7d\x5f\xdb\x9a\x30\x17\xe8\x30\xac\xd2\xa2\xcc\x2b\x90\x91\x3a\xc0\xd4\x49\x56\xd9\xa8\x07\x39\xf2\x15\x48\x23\xaa\xed\xaf\x2a\x8c\x4f\xee\x69\x7d\xb0\xce\x80\x1e\x50\x66\x19\x42\xdf\xe0\x5d\xe8\x24\x9d\x2d\x2b\xca\x97\xe2\x32\x97\x3b\xa5\x91\xf0\x16\xa5\xae\xaf\x3c\x4d\x0f\x54\x22\xef\x83\xa5\x04\x86\x52\xfb\x49\x7b\x9a\x0d\x00\x0e\x1b\x4c\x69\x00\x78\x5c\xb2\xbb\x6f\xbb\xdf\xac\x7e\xd9\x88\xc6\xe9\x2d\x2d\x76\x80\x7c\x1b\x72\x57\xf0\x5f\xd1\x0a\xfe\x9f\x01\xd4\x9a\x72\x7e\x98\xd4\x5a\xda\x4d\x99\x64\xe9\x8a\x3a\x73\x30\x2e\x4a\x59\x6d\x3b\x98\xa4\xb9\x8a\x8b\x55\x6b\x57\x43\x23\x87\x4a\x05\x8f\xa0\x03\xb3\x83\xa4\xd6\x46\x4e\x95\x16\x61\x2d\x01\x22\x90\x48\x82\xc4\xd7\x47\xe4\x5b\x0e\x58\xec\x34\x5b\x87\xd2\xe4\x36\x8c\x73\x3c\x2e\xf8\x4a\x06\x82\x4e\x46\x26\x7d\x48\x69\xa8\x89\x9b\x29\x1b\x5c\x96\xab\x14\xcb\xa4\xaa\xb8\x5e\xb0\x11\xf6\x64\xb5\x3c\x16\x95\x50\xe0\x7f\x69\xe4\xd2\x65\x8c\x1f\x1f\x99\xac\x97\x47\xb4\x19\xfa\xe7\x80\x9a\x46\x5f\x84\xa4\xf8\x28\x8d\x18\x4d\x6d\x3d\x2a\x8c\x40\x45\x27\x76\x93\x53\x93\x3e\x7b\x35\xea\xf5\x44\x5a\x8c\x11\x1b\x24\x3a\xcd\xa2\x76\x9e\x15\x05\x2f\x44\x2f\xca\xa1\x53\xcc\xb2\x26\x7e\x56\xd4\xfe\x54\xe7\x97\x3a\x46\x4d\x55\xa9\xdb\x8c\x12\xeb\xfc\xc9\xc9\xeb\xae\xbb\xee\x16\x38\x87\xd5\x46\x12\xa7\xfc\xec\xe2\xe4\x16\xf7\x89\xd9\x0a\x79\xc1\xc0\x54\xfc\xe8\xeb\xd4\x6a\xbd\xb2\xdf\xde\x2b\xa5\x90\x14\x62\xaf\xf2\xc2\xff\xfc\xde\xe0\xa5\x7b\xd5\x59\x80\x27\x1e\x75\x9c\x8b\xde\x43\xf8\x49\xc7\xe8\xc4\xe4\xf5\xea\x53\x04\x9b\xd0\x2a\x8c\x23\x9c\x2b\xf9\x05\x23\x05\x50\x02\x69\x67\xdd\x5e\x96\xc2\x19\x0a\x20\x1c\x79\x30\x61\x1c\x22\x29\xdb\xb2\x24\x5d\x8f\xec\x1e\x8d\x26\x84\x56\x38\x33\x2b\xab\xc3\x0c\xd3\x2c\x1d\x03\x34\xba\x2c\x27\x16\x85\x8b\x48\x6e\x03\x2e\x22\x21\x1b\x2a\xc5\x3e\x9e\xd4\xdd\xa1\xf6\x44\x02\x02\x10\x10\x9f\x1e\x54\x49\x02\x61\x46\x85\x9c\x9f\x81\x99\x15\x17\x7c\xf6\xcc\x22\x17\x29\x70\xbc\x75\x6c\xf1\x30\x4e\xcb\x3c\xeb\x54\x72\x5b\x28\xe2\x95\x14\x8c\x66\x69\xc9\xf3\xb8\x38\x0f\xb1\x3b\x65\xd5\x3e\xcf\xcd\x58\x84\xd6\xdf\x64\xdb\x40\x4c\x3e\x6c\x0d\xa3\x50\x77\x04\x90\x1b\xd8\x64\xac\xd4\x2b\xf2\xab\xc8\x39\xcf\x63\xd3\x5b\xba\x38\xed\x71\x32\xdf\xa5\xdc\xc9\x1a\xb5\x1f\xa3\x83\x41\xe9\x37\x11\x2f\xe2\x52\x41\x37\xec\xa4\x83\x66\xf4\xec\x31\x55\xe3\x8c\x2a\x9f\xa9\xd8\xc6\x93\xc7\x85\x8e\x50\x46\xc3\xaa\xec\x94\x28\xe1\x87\xac\xf4\x44\xa3\xe6\x1c\x52\x16\xf0\x2c\x47\x3c\xc8\x61\xd9\x7a\xd9\x9b\x64\xf4\x1e\xde\x90\xc8\xfe\xf2\xa8\x74\x9b\x31\x4a\x68\x0a\xe2\xde\x8b\x20\xae\x35\xc9\xda\xe7\x43\x7e\x2f\x79\xb4\xf5\x58\x42\x0f\x41\x7b\xcd\xc0\xc8\x93\x6e\x39\x17\x80\x17\xa1\xbd\xa5\x2b\xcf\x08\xd0\xbb\xbb\x85\x48\xd6\x28\x76\x40\x1d\x89\x6b\x55\x22\x65\x14\x82\x98\xe8\x22\x51\xcc\x28\xec\xa6\x6d\x80\x0b\xdf\x46\xb2\xab\x9f\xdf\xcb\xae\x52\x62\x11\x1c\x79\xef\xdc\x1b\xbc\x71\xef\x9d\xd6\x15\x15\x5e\xa0\xa8\x5e\x90\x19\xc7\xe5\x44\x50\x48\xfd\x32\xd3\x32\x96\x1b\x88\xca\xcf\xcc\x9e\x7a\x16\x9f\x39\x09\x4f\x43\xa9\x38\x8c\xab\x51\x41\xfe\x34\x91\xea\x94\xfa\x0d\x11\x23\x18\xa8\xa6\x61\xd9\xb9\x28\xab\xdc\xca\x24\x4d\x56\x36\x25\xb5\x9a\x63\x13\xa0\x4d\xfa\x35\x8c\x30\x1d\x8c\x29\x56\xf9\x0d\xda\x59\x77\x09\xc2\xdf\x48\x02\xa2\x0f\x80\xa4\xc1\x17\x28\x4d\x93\x4a\xfb\x8f\xaf\xaf\x12\xd3\x28\x9c\xe1\x56\x9c\x02\x78\xcd\x38\xdd\x34\xdf\x6b\x0c\xa6\x4b\x82\x97\x79\x95\xb6\xed\x7d\x3f\x11\xe9\x4a\xb9\xaa\x99\xd3\xa1\x56\x47\xb4\xef\x46\xa4\xa5\x62\xe5\xd4\xbc\x2c\xb5\xc6\x00\x10\x5a\xb5\xce\x33\x5e\x5b\x6d\x17\xd2\x82\x30\xf4\x8b\xb8\x20\x4f\x95\x51\xfb\x70\x85\x7e\x91\x5b\x15\x76\x35\x8f\xf8\xf5\x47\x6e\xe1\x63\x0a\x31\x03\xf8\x6b\x78\x86\x90\x8d\xf4\xd8\xb1\x23\x47\xf9\x24\x25\xc0\xca\x72\x7e\xc3\x91\x23\xd8\x97\xf3\x22\x2a\xb2\x94\x4c\xa7\x84\x5d\x50\xb8\x39\x80\xe6\x38\x93\xa5\x6d\xfb\x59\x97\xb3\x2a\xd5\x66\x1f\x94\xfb\x93\x24\x2b\x75\x9a\x6d\xdb\xca\x8a\x12\xa4\x14\x40\xfb\xfc\x90\x9a\xb1\x49\xdf\x82\x82\x41\x01\x0a\xb0\xa7\x8b\x9d\x97\x6f\x8c\x21\x38\x62\x55\x44\x1d\x91\x1f\x0e\x19\x9b\xa0\x64\x0d\xca\x25\xae\x56\x83\x43\xdc\xfd\xa8\x9f\xc5\x1a\x7a\xd6\xee\xdb\xa2\xd8\x0b\x5b\x6c\x0f\xa2\x1d\xfd\x7f\xf0\x82\x2f\x7b\xa7\xa3\x1e\x1e\x13\xb0\x5f\x9c\x17\x7d\xdc\x57\xc8\x86\x34\x10\x5c\x06\x9b\x0b\x04\x35\xa0\xd8\x29\xbb\x63\x25\xcb\xe5\xcf\x43\xa0\x3e\x92\x3f\x35\x01\xa4\xa7\x91\xbb\x23\xe5\xa9\x33\xd9\x81\x0d\x49\x8c\x0a\x69\x2f\xf3\x0c\xd3\x63\x63\x19\x10\x43\x7a\x09\x42\x3b\x7e\xde\x46\xf2\xfa\x9f\x7a\x6c\x5f\x21\x92\xe5\x53\x71\x7a\xde\xff\x8a\x17\x7c\xde\x5b\xa0\x5f\x78\x24\x9e\x9d\x3f\x55\x17\x1f\x2d\xf5\x70\x0b\x82\x17\x63\x53\xd3\x73\xf3\xd3\x93\x13\x8b\xd3\x53\x36\xa5\x19\x82\xd2\xcb\x0c\x42\xd4\x7b\xd1\x4a\x64\x4a\xd7\x84\x5a\x47\xc3\x63\x47\x78\x2e\x12\x11\x15\x42\xcf\x54\xbd\x28\x7b\x49\x94\xa6\x5a\x1a\xb5\x84\x86\xa3\xe1\xb1\xa3\xea\x35\x47\xf8\xfe\x2b\x8f\x7d\x87\x12\x69\xee\xcc\xa3\xb6\x98\x13\x79\x0c\x59\x55\x00\x1d\xf4\x05\x4f\x71\xc8\x7c\xd8\x9b\xd5\x08\x3b\x02\xcf\x68\x95\xb5\xa6\x23\xc3\x8e\x2d\xcb\xc2\x40\x1b\x4d\x3f\xa2\xc4\xba\xd8\x6c\xaa\xaa\x89\x5a\xd6\x52\xbd\xa5\x08\x6a\x51\x16\x1f\x14\xba\x54\xb8\xab\x4e\x0f\xa4\x44\xaa\x62\x35\xcb\x4b\x91\xca\x35\x64\x7a\xdc\x25\x31\x3a\xc0\x9e\x3c\x50\xa0\xff\xca\x03\x97\xae\x6f\xfc\x1a\x9b\x6a\x6a\xe0\xfc\xc9\x49\x0e\x12\x7a\x47\xf1\xae\xe1\x4e\x53\x6a\x59\xc6\x36\x38\xea\x73\x06\xc5\xdc\xba\x4e\x46\x3a\xb2\xbd\x9b\xca\x9e\x89\x74\x4f\xeb\x4e\x42\x55\x01\xf4\x2c\x33\x0b\xe5\x0a\x18\xb5\xcd\x0d\x9d\x38\x17\x00\xc4\x29\x44\x89\xc0\x02\x14\x69\x60\xd3\xc3\x63\xc9\x78\x89\x0b\x8a\xe3\xd3\x53\x4b\xc9\xe2\x87\xd2\x8c\x4b\xfd\x54\xe4\x7c\x2d\x26\xf1\x47\x0e\xa5\x83\x42\x21\x22\x7b\xc8\x38\xa4\xb3\xfe\x2f\xa1\x5d\xeb\xf0\xe0\x36\x6a\xcd\xf8\x51\x44\x10\x3b\x92\x64\xa1\x45\x39\xb4\x23\xf1\x89\x02\x9a\xa0\x0e\xc6\xfa\x83\x9a\x45\x06\xf8\x56\x47\x9d\x6e\x5a\x92\x52\x97\xec\xeb\x33\xaa\x9a\xc6\x99\x46\x89\x11\xf5\xf9\xae\xd4\x34\x30\xd6\x51\xfe\x23\xd2\xfe\x14\x3a\x56\x27\x69\x47\x60\xc4\x28\x8f\x12\x29\xd9\xad\xac\x22\xca\xd2\x9d\xaa\xdc\xa8\x28\x94\xf3\x00\x1f\x50\xbd\xdc\xcb\xe3\x2c\xd7\x26\x7f\xd9\x4d\x35\x98\x4e\x84\x09\xf5\xd0\x69\x4a\xe8\x17\x30\x7c\x62\xb2\x03\xa3\x3b\xc5\xa9\x83\x89\x5b\xac\xa7\x92\x92\xe3\x53\x22\xfe\x1d\x83\xaa\xad\xf9\x65\x53\x8c\x90\xdc\x5c\xcf\x1c\xa4\x65\x49\xc8\xc6\xa3\x70\x81\x51\x69\x55\xda\x90\xbf\x4a\xd6\xc5\x23\xbe\x1a\xe5\x9d\xa6\x4a\x0e\x2d\xcc\xdc\x79\xf7\xcc\xa9\x53\x87\x07\xaa\x43\x21\x17\x6a\x69\x27\x22\x4a\xab\xde\xa8\xad\xc6\xc9\x6f\xd7\x1b\xcb\xc4\xdc\x0c\x78\x42\xe1\x06\xec\xdd\x6d\x30\xbe\xa4\xa2\x5c\xcf\xf2\xf3\x80\x1e\xc7\x85\x3f\xea\xec\x65\x10\x2a\x50\x82\x50\x22\x05\x1a\x3d\x5d\x6d\x0f\xc6\x28\xaf\xd2\x12\x62\x32\x01\xf4\x92\x22\x16\x33\x03\xea\x3b\x32\xcb\xc3\x28\x22\xb4\x5b\x9e\x98\x1d\x81\x1f\x2a\xdc\x71\x97\xf3\xde\xdd\x33\x3b\x26\x71\xa3\x9c\x84\x83\x8b\x3d\x1b\x88\x56\xd1\xa2\xaf\xde\x04\x86\xdb\x03\xb6\xb6\x89\x6c\x64\x5d\x7c\xd4\xac\x04\xaf\xf5\x18\x33\x8c\x64\xfe\x43\xfa\x24\x4a\x27\x78\x21\x5b\x26\x07\x90\x50\xdf\xce\x71\x1c\x99\x64\x7c\x36\xa3\x99\x52\xdf\x91\x8b\x0e\xe0\x57\x5b\x3a\xad\x9d\xb3\xe3\x1b\x23\xec\x89\x90\xef\x53\x33\xed\x16\xfe\x1f\x8e\x04\x1f\x69\xa9\xec\xe6\xca\xd6\x8d\xac\xf1\x16\x81\x9a\x15\x51\x3c\x71\xea\x94\x7e\xce\xd6\x4e\x21\x7c\x06\x46\x8e\x96\xab\x3b\x13\xd5\x01\x51\x23\x34\xb2\x55\x1b\x63\x83\xb0\x28\x01\x22\x4b\x7a\x22\x5d\xdb\xa6\xdb\x30\x96\x09\xa8\x80\x50\x06\x99\xa1\xd4\x52\x6f\x5a\x24\x0e\xea\xa2\xe5\x93\x02\xbf\x55\x25\x94\x0b\xc8\xf8\xa5\x5c\xce\x74\xed\x1a\x35\x85\x38\x3a\xe7\x34\x9b\x64\x13\x97\x82\x0d\x70\x06\x65\xeb\xba\xea\x23\xde\x77\x6f\x92\xec\x79\x8f\xbf\xab\x8a\x3b\x8c\x3d\xb4\x8b\x5d\xdd\xb5\xbd\xad\xfe\x5f\x8f\x04\x6f\x1a\x71\x1c\xb0\x1c\xf2\xb4\x5a\x0e\x74\xdb\x3b\x66\x48\xee\x61\xa6\x90\x17\x4d\x73\xb6\x38\x24\x0e\x10\x19\x86\xa5\x18\xb3\x6f\x37\x2b\xe4\x81\x2d\xa5\x2d\x0d\x99\x59\xcd\xaa\x42\x9c\x17\xa2\x17\xa7\x2b\x78\xcc\xa2\xbb\xca\xf8\x27\x50\x7f\x49\x0f\x96\x1a\x61\x47\x27\x56\x95\x76\x44\x0e\xb0\x09\xeb\xc4\x0d\xf9\x84\xf1\x63\x93\x6c\xaf\xe4\x87\x83\x05\x1c\xd9\xa3\xce\x8c\xd2\x17\x33\x13\xfe\x4b\x51\x47\x6a\x15\x62\x4a\xdd\x5e\x54\xae\xa2\x4b\x34\x68\xc7\x63\xed\x4e\xa0\xd3\x1e\x5b\xbd\x11\x6b\xb2\x23\x95\xe9\x58\xf5\x9d\x82\x15\xe8\xc6\x81\xc6\x51\x33\xe5\x9b\x9d\xd0\x99\x54\x33\xec\x4e\x36\x7d\x09\x93\x6a\xd0\xb9\xce\xfe\xae\x45\xb1\xe7\x7f\xde\x0a\x7e\xbb\x35\x3b\xe8\xb7\x33\x09\x3c\xb5\xd1\x1d\x38\x9c\xb4\xca\x0d\x4d\x46\x7b\x37\x20\xad\xc9\x6c\x6f\x49\x08\x45\xd6\xb5\x59\x20\xba\x90\x2c\x51\x7e\xb2\x12\xcf\x20\x7c\x51\x9f\xf3\xa2\xb6\xcd\xc9\x53\xa8\x27\x35\x88\x3c\x96\x72\x27\x3a\x01\x6c\xe7\x70\xa8\x95\x4e\x4c\xf3\x17\x03\x0a\xd7\xe2\x4d\xd2\xce\x2a\xa3\x26\x92\x42\xe7\x60\x13\x4d\x7f\x5e\x16\xb7\xdd\x86\xec\xdf\xaf\xd9\xcd\x9e\xa8\xba\x44\xf9\xf5\xff\x69\x57\xf0\x97\xbb\x26\x52\x9e\xf5\x22\x60\x13\x01\xd9\xac\xc4\x08\x19\x87\x45\xca\x00\xcc\x6a\xde\xfe\x5a\x0c\xa3\x56\x64\x97\xfa\xd4\xd3\xe0\x85\x32\x87\x35\x8c\x9d\xda\xba\x61\xbb\x56\x14\x9c\xa0\x83\xa8\xd7\x65\x1f\x66\xbd\x32\xee\xc6\x45\x19\xb7\x65\xaf\xa1\x3b\xbc\xdd\x1f\x55\xbe\x3d\x59\x26\x90\xd1\x19\x7b\xc3\x3a\xa8\xc0\x06\xed\xe1\xa6\xc1\x82\xbc\x80\x0e\xbd\x8c\xed\x37\x90\x53\xb0\x94\xa3\x46\x20\x0a\x05\x12\x2e\x54\xe7\x40\x6a\x6a\xe4\x42\xab\x52\x13\xe8\x06\x40\x83\xcc\x36\xf5\xa0\x33\xad\x6b\x69\x54\x88\x1b\x41\xab\x26\x48\x49\xed\x2a\x89\xf2\x8d\x5b\xb6\x35\x07\xc4\x3d\x4e\xe8\x46\x49\x26\x1e\xd3\x68\x6b\x18\x64\xfb\x1f\x75\xd9\xc3\x1a\xa8\x31\xda\xe1\x00\xd4\x09\xc1\xa8\xc5\x80\xb5\xe4\x33\xfb\xd8\x35\x36\x94\xb3\xd7\x2b\x20\x09\x03\xda\x2d\x16\x04\x31\x52\xf9\x6f\xdf\x17\xdc\x51\xbf\xb8\x51\x9a\x28\x45\x78\x66\xde\x09\x2f\x7a\xfb\x14\x67\xae\x03\x3e\xf8\xd4\x5e\xf6\x25\xc8\x12\x43\x64\xbc\x9f\xd5\x64\xbc\xef\xf7\xea\x34\xbc\x16\xd5\x6b\xd2\xe7\xd9\x12\x3a\x43\x07\x49\x79\xb7\xcd\x3c\x29\xb7\xe4\x24\x8b\x3a\xc8\x41\x49\x66\x9a\x71\xcb\x7c\x63\x2e\x8f\x5f\xb3\xbe\x1a\x95\x63\x71\x31\x16\x8d\x35\x3e\xe0\x26\xd0\xf6\xd8\x93\x75\x46\x33\x4d\x38\xdc\x53\x9f\x78\x9f\x4b\x5c\x69\x72\x9f\x69\x7a\x61\xc8\x8a\x83\x27\xa6\x8e\x1e\xed\xc6\xa9\x9c\x82\x7d\x32\x71\x1c\x36\xd6\x0b\x05\x95\x2e\x44\x8d\x21\xfc\x1b\x2e\x83\xf6\x9f\x78\xc1\xec\xbc\x3b\x7c\x84\x64\x35\x4d\x40\x02\x5d\x93\x81\x31\xb2\x4b\xb7\xe8\xf3\x50\x0e\xb5\x0f\xab\x9b\xd9\x8d\xec\xfa\xe1\x78\xe1\x81\x49\xa6\x99\xae\xa5\xd0\xb3\xb1\x28\xb3\xd7\xdf\x5d\xf6\x7b\x52\xe8\xd9\xb2\x29\xff\x65\x1e\x7b\x0a\xa8\x24\x10\xe7\x6c\xd1\x3e\x17\x6a\x14\xfe\xfd\xe2\x20\x37\xb1\xa1\xfa\xa4\x68\x70\x87\xb6\x8b\xae\x91\x44\x2e\xb5\xb4\x52\x74\x7b\x89\xc5\xb7\xad\x06\xb0\x3e\x10\x3f\xd4\xc8\x73\x1c\x2b\xcd\x60\xf1\xcc\x20\xcf\x71\x2e\x96\x13\x15\x69\xd0\xa0\x0f\xd4\x17\x05\x41\x04\xac\xa5\xe7\x34\xe0\x7b\xd9\xd5\x30\xa5\x74\x37\xdc\xa9\xba\xe1\x98\xdb\x0d\x38\xf1\xf4\x44\xdc\x7c\x8e\xbd\x7c\x97\xb3\x99\x0c\x23\x5b\x93\xda\xc6\x3b\x3d\x15\xd5\x9f\x2b\x84\xd0\x00\xfd\x5a\x03\x51\x9c\x32\xea\x80\x1c\x82\xf1\x3c\x8a\x2c\x59\xca\x9f\x13\xb3\x53\x0a\x2e\xb8\x98\xf5\xb2\x24\x5b\xe9\xdb\x15\x23\x1c\x51\xc7\x0b\x80\x6f\xb0\x5a\xa2\x8d\x1f\x1c\x12\xb5\x96\xba\x28\xa9\x9f\x6d\x41\x8e\xb6\x3a\x77\x86\xff\x02\x2f\xb8\xd1\x64\x94\x73\xbf\xc3\x69\xea\x12\x7e\xe5\xc1\x42\x85\xe3\xdb\x6b\xe6\x36\x76\x9c\xdd\xbc\x2d\xe2\xba\x79\x53\x36\x7b\x8d\x47\x51\x74\x24\xd7\x5f\x42\x9b\x34\xc4\xf3\x72\xb5\xe9\xab\xfb\x1d\x6a\x3a\x4a\xf0\x43\x79\x61\x10\xb6\x31\x9f\x25\xe2\x44\x0c\x66\x1a\x48\x1c\xf5\xbe\xfd\xc1\x87\xbc\xe6\x7b\x4d\x79\xa4\x06\x9f\x84\x20\xc9\x5e\x2e\xb4\x17\x63\xed\x68\x78\xf4\x26\x88\xf9\x8e\xd6\x32\x9c\xd6\xc3\xf2\xdd\x8c\xaf\x1d\x6d\x28\xf1\x14\x78\x3b\xe5\xd9\x4d\x34\x01\xca\x58\xb8\x44\xf2\x86\xaa\xe6\xd8\xb1\xb0\x31\xa5\xd5\xef\xed\xb9\x12\xb6\xb0\xd3\xb0\x85\x07\x55\xaa\xa9\x07\x82\xef\x6e\x48\x35\xd5\x30\x0f\xb6\x3e\x8d\x37\x99\x97\x57\x42\x26\x76\x10\x32\xf1\x18\xe5\x72\x7b\xc4\x7b\x70\xf3\xb8\x85\x7b\xfd\xb3\xdb\xc8\x79\xd5\xbc\x11\xb8\x19\xb0\x28\x88\xe1\xa3\xfb\xd9\xbd\xe6\x5b\x4c\x7c\xe6\x98\x06\x99\x99\xcf\x72\x1e\x00\xc6\x62\xc8\x93\xa3\x48\x38\xa7\x74\xf7\x90\xdc\xfd\x7b\xfb\x82\xf9\x8d\x1f\xb1\x58\x56\x28\x86\xc4\xc8\x20\xc3\xde\x74\xf6\xa7\x7f\xde\xcb\xde\xd9\x72\x84\xc3\x37\xb4\x82\x59\x3b\xbb\x0a\x55\x40\x65\x4b\x39\xc0\xd2\xa1\xa2\x02\xd8\x11\x51\x40\x1c\x5e\xa1\x59\x8d\x09\xbb\x9f\xad\x0e\x9b\x09\x97\xa9\xf7\x1c\x81\xf2\x3b\x06\xa9\x74\xbb\x51\x4f\x4a\x93\x85\xbf\xdf\xdf\x3b\xc6\x49\xa0\xe4\x43\x49\x77\xf7\xf8\xbb\xba\x51\x8f\xb1\xdf\x1a\x61\x4f\x40\x2c\xa8\x26\xa0\xfc\xe5\x91\xe0\xdd\x23\xee\x35\x74\x18\x01\x16\x59\x53\x24\xca\x4d\xca\x69\x2b\x09\x98\x10\x22\x00\xdc\x0c\x1a\xdc\x1d\xf2\xc5\x3c\x6a\x9f\x37\xb1\x17\x86\x89\x11\x53\x49\x45\xbc\x1b\xaf\x90\x08\x08\x16\x29\x20\x67\x45\x88\xaa\x7e\x36\x4e\xb9\x28\xdb\x24\x0c\x69\x0f\x5c\xb7\x42\x3f\x19\x90\xec\x9a\x52\x2c\x6b\x68\x3b\x4a\xb9\xec\xc5\x62\xd5\x79\x02\xb8\xa6\x31\xe4\x5a\x19\x20\x0e\x89\xb4\xa8\x72\x64\xad\xe5\x59\xd2\x71\x80\xd7\x89\x58\x06\x18\x21\xf1\xd1\x1e\xd6\x26\x82\xd4\xf6\x6e\xe4\xa2\x28\x07\x58\x30\x69\xf3\x23\x8b\x6e\xc8\x75\xb7\x5a\x2e\x2b\xc7\xe7\x7a\x4e\xce\xc0\x50\xbd\x7f\x8e\xaf\xaf\xc6\x90\x58\x4c\xf4\xc9\xdf\x61\x5b\x88\x37\x62\x8d\xfc\xba\xc7\xae\x8e\x20\x4a\x59\x74\x00\xfc\xee\xff\xbe\xc7\x3a\x8f\xf2\x54\x85\x8a\x82\x07\x9d\x7a\xa1\x0b\x95\x25\xd2\xb2\xaf\x46\xed\xb2\x02\x83\x28\xc6\xc7\x28\xc0\x02\xd4\x0d\x21\x0f\xd9\x9a\xdc\x44\x8d\xfd\x63\x38\xe6\x07\x66\x08\x74\x1c\x7b\x3d\x63\x53\x5b\x60\xe4\xa8\xf3\xc3\x3a\xcc\x1f\x20\xbd\xfd\xca\xfe\xe0\xee\xcd\x1e\x72\x0f\xee\x8d\x9e\x6e\x96\xa6\x7e\x7f\xef\x15\x69\x6a\xa7\xd2\xd4\xab\x3d\x25\x4e\xbd\xd8\x0b\x9e\x76\x6a\x2b\x63\x61\xaf\x9a\x79\x36\xc7\x66\xb7\x4d\xe8\xb2\xe1\xf4\xb9\x22\x64\xed\x40\xc8\xfa\xbc\x1d\x97\xfa\x8b\x97\x16\x81\xa8\xc5\xac\x17\x7a\x5a\xce\x82\x45\xfa\x98\xc5\xa0\x5a\x1f\xf7\x88\xf7\x42\x6f\x73\x79\xee\x39\xfe\xb3\x4d\x1c\x6a\xe3\xa4\xab\x89\x74\x9b\x6d\x4d\xf5\xf4\xa6\x3f\xcf\x9c\xbc\x50\x0d\xb6\xd1\x9e\x68\xfb\xaf\x66\xc1\xf5\xee\xa5\xe6\x10\xff\x06\x73\xa8\xd2\xc8\xdd\x0c\xc8\xfb\xd9\x43\x2d\xf6\xc4\x9a\x89\xcf\xff\x4b\x6d\x15\xfd\xb2\x77\x3a\x4e\x6b\x64\xea\x0a\xcd\x24\x25\x01\x62\x53\xe0\xa9\x58\x4f\xfa\x0a\xf2\x0f\x26\x2a\x13\x48\x82\xa6\x1d\xc5\x3d\x05\x70\x6c\x4c\x4e\x65\x70\x09\xed\x3c\x2a\x56\xc1\x23\x07\xce\xba\x92\x50\x33\x16\xa9\xa1\xb6\x14\xba\x7c\x13\x47\xf8\x21\x59\x99\x66\xbe\x6b\x78\x81\x47\x05\x2f\xb2\x2c\x95\xff\xc6\x14\x17\x10\x75\xfa\x87\x1d\x6b\xd2\x4f\xb6\x2c\xa3\xf0\xeb\x34\xc5\xd3\x3f\x0f\x18\x85\xed\x74\x3d\xe8\x19\x37\x66\x60\x93\x75\x92\x32\xfd\xd7\x63\x4e\x14\xd9\x92\xb8\x40\xc9\x0d\x1e\x14\x79\x86\x4e\xc8\xd4\xc2\x32\xda\x1f\x78\xf4\x5b\xc7\xba\xfc\x4f\x2d\xa6\x67\x91\xff\x57\xad\xcb\x40\x3b\xfa\x9f\x5a\x36\x25\x68\x44\x74\xa9\x10\x90\xc4\x33\x90\x53\xb5\x71\x94\xa6\x93\xc9\x9c\xe9\xf0\x74\x84\x48\x2b\xd9\x8c\xb8\xb4\x52\x6e\xda\x8c\x6c\x4b\x96\x63\xde\xc0\x0e\x6c\x7b\x23\x9f\x29\xeb\xf9\x3a\x6d\xe3\xab\x31\xad\x6d\x77\x88\xb6\xc4\x28\x7b\x0d\xfc\x3b\xa6\x21\x9e\xec\x47\x5b\x6c\x9f\xaa\xdb\x7f\xf1\x56\x12\xcb\xcf\x65\x9d\x45\x7a\x01\x58\x41\xbe\xe0\xa9\x9f\x6a\x3a\xdb\x0e\x3d\x37\xe9\x26\x7c\xa9\x0e\xf6\x90\x9d\x45\x8b\x3b\x06\x5a\xe2\x6a\x79\x39\x6e\xc7\x08\xe5\xa7\x15\x22\x85\x46\x74\xd3\x0d\x01\xe4\x5e\xa6\x59\x7b\x4d\x2f\xeb\x8c\xa9\x8e\x60\x5f\x6e\xb1\xa7\x6d\x92\x44\x87\xd4\xdb\xf7\xb6\x82\xdb\x06\x2f\x6f\x39\x83\x8e\x6b\x9f\xfd\x7d\x8f\xfd\x8e\x27\xd7\x06\xe6\xe7\xf9\xa2\x17\xbc\xcc\x53\x39\x79\x8c\xd4\x69\x65\xe4\xaf\xa5\x9e\x51\xd1\x17\x6e\x15\x52\x1b\xdb\x6e\x5a\x1e\xd4\xdb\xdc\xe2\xb7\xe1\x20\xd9\x20\xeb\x10\xfb\xec\x2e\x36\x6a\x3d\xea\xe8\x18\x68\xc2\xa2\x27\x4f\x44\xed\xf3\x22\xed\xf8\x17\x77\x05\x53\xee\x25\x3b\x89\x43\x92\x68\xf1\x47\x25\xb6\x58\x89\xd7\x44\xaa\x3f\x0e\x9c\xae\x59\x5e\xcb\x2c\xf4\xc9\x11\xf6\x0d\xf0\xd9\xa1\xe8\xe5\xff\x99\xc7\x8e\x6f\xfa\x39\x8b\xfd\x9e\xe8\x9c\xca\xda\x51\x52\xcb\xf4\x1d\xbc\xdb\x9b\xb7\xe0\x63\x51\xca\xf5\x03\xb6\xe2\x69\xc1\x8c\x0d\xda\xcc\xd2\x68\x20\x38\x90\xb4\x49\x35\xee\x16\x64\xc9\x46\xa8\x59\x69\x15\xe9\x43\x67\x55\x9c\x61\x61\x92\xfb\xe2\x4e\xa3\x68\xcd\xf4\x61\xc0\x66\xd8\x01\xeb\x2d\xff\x78\x30\xb6\xe0\xd0\x0c\xa6\x83\x99\xc4\xdb\x42\x97\xec\x60\x97\x9f\xa7\x8b\x82\x4c\xd9\xe9\x56\x49\x45\xaa\x32\x4e\x42\x39\x68\x65\x1e\xce\xa4\xe5\x99\x7c\x01\xca\xab\xb7\x04\x72\x17\x0d\x6f\x09\xfb\xd0\x01\xf6\xaf\x1b\x72\xf0\x5b\xc6\x2f\xff\x8d\x07\x82\x97\xda\x26\x79\xb2\xc3\x53\xac\x27\x50\x02\x8c\xf2\x24\x5b\x81\x98\x6c\x10\xc7\x28\x41\x0c\xd2\xf7\x21\x21\xa8\x8d\x4e\xb0\xda\x21\x77\x28\x2e\xa5\x46\xc4\x78\x59\xc6\x36\x9e\xe5\x0d\x26\x38\x77\x12\xbe\x7a\x3f\xfb\x8f\x2d\xf6\xc4\x68\x65\x25\x17\x2b\xb0\x21\xc9\xca\xfc\xd7\x6f\xb4\x0d\xab\x2f\x9c\x70\x5f\x0a\x7e\xd7\xab\x5d\xa9\x47\xf7\x58\xf9\xaa\xcd\xf2\xa1\x84\x0f\x4b\x55\x9c\x20\xfe\x01\xbf\x56\x7b\xca\xac\x2f\x40\xd4\xdc\x60\x1d\x84\xcb\xa5\x94\x2f\xf8\xbe\xdc\xb5\x2d\x93\x8c\x42\x55\x41\x22\x0e\x80\x3c\xeb\xb0\xa7\x32\xa3\x57\xd4\x89\x50\x94\x59\xb7\x67\x00\x0b\x16\x42\xed\x8a\xda\x7c\x85\x3b\xe9\xf1\xd3\x51\x57\x2d\x15\xf5\xfb\x77\xca\x9c\xb4\x91\x2b\x80\x3d\x57\xf1\x11\xe7\xc1\xf5\xb8\x36\x28\x4b\x6f\x92\x40\xe7\xd9\xdb\x52\xd3\x42\xdd\x7a\xe2\x36\xb5\x97\x98\x12\x1f\xf1\xee\xdf\x5c\x6f\xbd\xd3\x9f\xbe\x34\x3f\x44\x5d\x3f\x7d\x1d\x73\x36\xef\x06\x09\xd3\xff\xeb\xfd\xc1\x8c\xf5\xdb\x3e\xf9\x0d\x60\xc1\xc0\xe5\x00\xd3\xda\x8b\x05\xc9\x5a\xbd\x5c\xa8\xbc\x77\xbd\xac\x46\x93\xf7\xdf\xf7\x5d\x59\x0f\x3b\x58\x0f\xbf\x66\xdb\x6c\x3e\xb9\x63\x2e\xb1\x87\xbc\x0d\x96\xc4\x63\x06\x77\xff\x73\xcf\xd2\x86\x7e\xc7\xbb\x04\x6d\xe8\x5d\x9e\x35\x55\xcd\x79\x64\x74\xce\xba\xe6\x63\x9c\x05\xb6\x1a\x18\x3e\x16\x04\x89\x57\x0e\xd4\x1d\x1e\xa8\x8f\x78\x0b\x9b\xef\x96\x47\xfc\x50\xef\x96\x41\xa0\xb7\x45\x6b\xe2\xd4\xb7\xc5\x0f\xee\x62\xb7\x5a\x53\x8d\xf8\xb6\x95\x76\xb4\x50\xf5\x08\xfe\x13\x25\xc8\xd4\xbb\x40\x88\x31\xca\xe2\xe1\x7f\x73\x24\xe8\x6c\xfa\x94\x4b\x89\x44\xf7\x10\x5f\x84\x19\xef\xf0\x29\xe5\xa1\x21\x12\x11\xfb\x61\x77\x3b\x7d\x78\x84\xbd\xac\xc5\xf6\x40\x9a\xc7\xc2\x7f\x7e\x2b\xf8\xa8\x87\x7f\x6b\x57\x90\x0a\x54\xa4\xcb\xd9\x32\xe4\x85\x53\x8d\xc4\x2e\x2a\x42\x2e\xc5\x4c\xc8\x0e\x04\x53\x06\xe9\xbb\x32\x0e\x91\xf8\x26\x85\xc5\xe0\x9b\x28\x82\xc2\xf5\xbe\x79\x0e\xb3\x4e\xea\xa0\x09\x08\xe6\x86\xa4\xbd\x48\x8d\x9c\x76\x4c\xce\x2f\x38\x45\x4e\x57\x45\x39\x5f\xa5\x13\xae\xb2\x7b\x1d\x3b\xba\x41\xea\xde\xda\xf8\xcc\x4c\xcd\xcb\x4a\x59\xc9\x76\xc9\x63\xdc\x4f\x82\xe7\xe4\x24\x2a\xbb\x5d\xad\x77\x83\x4e\xdc\x26\xa2\xfd\xa8\x6c\xea\x13\xcd\x36\x43\xba\xe2\x82\x68\x57\x79\x5c\xf6\x27\xb3\xb4\x14\x17\x5c\x1e\x97\x5f\x7b\x32\xfb\x77\x76\x36\x4d\x9d\xf1\x52\xb7\x6f\x2e\x8f\x33\x78\x3b\x89\x8a\xc2\x7f\xfb\x93\x83\xdf\xf3\xac\x48\xd8\x31\x34\x38\x0e\xd0\x0b\x3a\x6f\x61\x30\x9b\x06\x29\x2d\xf5\xad\xcc\x9a\x06\x8a\x34\xee\xbc\x13\xd6\x8a\x50\xd3\xaf\x1b\xf5\x40\xdf\x82\x7d\x30\xc2\x28\xbb\xb8\xec\xf3\x36\x3c\x05\x8a\x28\xed\x0d\xfa\x16\xd9\x0e\x15\x2f\xcb\xa2\x0e\x9b\x27\xfd\x0c\x52\xfb\x01\x5e\x9a\x9e\x0c\x2f\x7a\xbb\xe1\x09\x67\xc6\x7e\xf8\x09\xec\xa7\x47\xd8\xd5\x2b\x49\xb6\x14\x25\x64\x1e\xf5\x5f\x37\x12\xbc\x68\xc4\xb9\x64\x31\xf0\x3b\x3c\xd1\xee\xe7\x58\xa9\x60\x2c\x73\x31\x51\xb9\x51\x41\xfa\x03\x00\x80\xa0\x4f\x84\x4e\xc6\x75\xde\x1e\xd9\x74\xb7\x0b\x28\x0a\x37\x4b\x45\xad\x46\x45\x03\x1e\xe5\xe7\xb1\xaa\x73\x4e\xb3\xcf\xd9\x39\x23\x96\x6b\x01\x40\x4e\x49\xa2\x20\x9a\x00\xbd\x4e\xe2\xbc\x5e\xd8\x60\x8c\x11\x71\x0a\x74\xa3\x24\x91\x3b\xb6\x26\x1f\x2b\xaa\xf6\x2a\xc7\x97\xf5\x87\xd7\xab\x53\x67\x20\x4c\xea\x21\x9d\xe4\xe6\xd0\xbe\x22\xa9\x5d\x91\xd4\x1c\x49\xed\xe1\x16\x7b\x52\x2f\x17\xa2\x0b\xe7\x14\x65\x5e\xf8\x4b\x2f\xf8\x3d\x6f\xae\x76\x55\x6d\xbc\xf4\x0b\xd6\x1e\x3d\x92\xae\xe0\x32\x84\x79\x2f\x0f\xa7\xdc\x4c\x3f\x7e\x06\xf3\x9b\xce\xe2\x12\xa2\x52\x4f\xc9\x87\xe6\xf4\x33\xb6\x53\xa5\xe9\x09\xb9\xf2\x20\x38\xb9\x1e\x48\x2e\xb7\xe2\x31\x32\x3d\xad\xd8\x01\x1a\xb3\x59\x3a\xa7\x5b\xa7\x8b\x59\x16\x91\xec\x91\xb1\x15\x4d\x7c\xa8\x88\x1a\x3d\x86\x3b\x9b\xff\x13\xda\xbd\xf6\x52\x6f\x71\x80\x0d\xb0\xbe\xa7\x28\xaf\x12\x9c\xcb\x80\xc9\x30\x8f\xc0\xa6\x04\xfd\x92\x8b\xb6\x88\xd7\x4c\x16\xfd\x3e\x6e\x52\xae\x85\x30\x2e\x68\xaf\xc6\xd3\x29\xce\xd1\x53\xd7\x13\x6d\x17\x5b\x7d\x45\xd8\xdc\xa1\xf5\xe6\x65\x1e\x3b\x80\xea\x2f\xcc\x6e\xff\x07\x82\xcc\xfa\x49\xe6\x3e\xc3\xcc\x47\x5b\x23\x25\x72\x42\xd4\x0d\x71\xdc\x14\x1c\x82\xbd\x12\xe8\x70\xcc\x1c\x9a\x36\xcd\x13\xeb\x54\x93\x5b\x75\xe8\x88\xbe\xf1\xe6\xa2\xef\x49\x7f\x4a\x8b\xbe\x03\x12\x82\x91\x84\xed\xd3\xa1\x11\x9f\xf8\x81\xfd\x36\x1d\xe9\xa6\x42\x0d\x40\x79\x1e\xde\x1f\xdc\x38\x70\xb5\x09\x82\xed\x7e\xb2\x28\x9a\x51\x3b\x9f\xbc\x82\xda\xd9\xf1\x04\xee\x2b\xd0\x4e\x2f\xf8\xb7\xb1\xc2\x40\xeb\xc0\xeb\xba\x90\x29\x1c\x04\xf4\xd3\xd9\xad\xec\x96\xa1\x22\xf8\x66\x33\xe2\x8a\xfc\xb0\x03\xf9\xe1\x33\xb6\xfc\xf0\xd1\x1d\xa2\x73\x7e\xb0\x19\x9c\xf3\x2d\xcb\x0f\xbf\xc5\x6d\x6b\x28\xb4\xfa\x6f\x3c\xb6\x78\xa9\x88\x4b\x9c\xc8\xcf\x58\x38\x33\xbb\x00\xeb\x7e\x2e\xcf\x7a\xc5\x99\x7c\x42\xae\x08\xff\xcd\x5e\xf0\x06\xaf\xf9\x9e\xed\x7d\x8e\xec\x88\x5c\xa9\x32\x10\xab\x17\x30\xfe\xd6\x5e\xa7\x9c\xa2\xb0\xe2\xe4\x72\xac\xdd\x0e\xf9\xe9\x28\x96\xba\x08\x70\x0b\x00\x64\x57\xe4\x71\x94\x90\xdd\x97\xf7\xaa\xbc\x97\xc9\x0d\x94\xfd\xf0\x13\x1a\x71\x47\x26\x0b\x29\xe0\x8e\xfe\xe0\xea\x60\xd6\xbd\x34\x14\x77\x64\x93\x46\x50\x86\x11\x4d\xf8\x6e\x8a\xb0\x11\x49\x17\x3d\x6d\xc8\x73\xf6\xf1\xaf\x5d\x75\x05\x9c\x04\xe2\xd8\xf7\xb0\x3d\xbd\x48\x9e\xea\xfe\x8d\xc1\xa1\x19\x0b\x81\x4f\x21\xfe\x26\xa5\x13\xc4\xa8\x47\x24\x00\x58\x3a\xd9\xdb\x46\xd8\xbf\xec\xe5\x19\xb8\xa9\xa7\x44\xd4\x91\xe2\x84\xea\xcf\x87\x47\x54\x7f\x7e\xad\xb5\x08\x69\xc3\x2f\x40\x9f\x2a\x76\x21\xbb\x47\x23\xbb\x2e\xc5\x85\xa7\x0a\xb6\xe8\xaa\x06\x72\xf0\x2e\x61\x4e\x3a\x15\x30\x67\x95\x62\xf9\x1e\xa1\xa3\xe4\xef\x38\xad\x9c\xe4\x9e\xf8\xaa\x93\x59\x0b\x98\x6c\xb8\x0e\x1c\x50\x64\x81\x73\xb5\x8f\x9c\xbe\xd0\x86\x44\xa9\xb2\x5b\x0b\x78\x8c\x5c\x97\x55\xbe\x1c\xb5\x8d\x9d\xc6\x6a\x10\x65\xa4\x81\x7c\xa1\x24\x60\xab\x0f\xb4\x73\xb7\x8a\xa2\x8c\x91\xa7\xa6\x53\xe5\xea\x14\x42\xbe\xaa\xe6\x01\x71\xa6\xc6\x8d\x47\x8e\xd4\x72\xe9\xbd\xd6\x0e\x4b\x7e\x91\x9e\xe3\xbd\xd9\x01\xc0\x99\x94\xf7\x77\x04\x36\xab\x51\xe7\xb9\x70\x33\xa7\x51\x9f\xf0\xd8\x53\x72\xb1\x16\xcb\x6d\xee\xae\xb8\x28\xb3\xbc\x7f\x2a\xee\xc6\xa5\xff\x2e\xdd\xc0\xd7\x79\x6e\x20\x67\x96\xd8\xd1\xa0\x05\x52\x2f\xc8\x95\x04\xe8\x0a\xa0\x65\xc8\x29\xb1\xf1\xa3\xf6\x11\x47\xdc\xaf\xf8\x7b\xcf\x02\xae\xfd\x89\x77\x19\x80\x6b\xef\xf0\x4e\x39\x99\xbd\xb5\x99\x28\xe4\xd3\x17\xb0\xf9\x4e\x27\x60\x3e\x76\x50\xd4\xa2\x5c\x45\x47\x5a\xa8\x33\x35\x2d\x01\x92\x25\x25\xd3\x08\xf2\x6d\x59\x4f\x98\x29\xb5\x75\x58\x1a\xfb\xa0\xfc\x70\x15\xa3\xfc\x6e\x8f\x5d\xb7\x69\x98\xb4\xb5\xc9\xd3\x7b\xc1\xc9\xc5\xfa\xfa\x50\x56\xd1\x0c\xb3\xce\x66\x00\x01\x8b\xda\x44\x61\xe9\xaa\xea\xa9\x58\x87\x2f\x0a\x1f\xf1\x46\x37\x8e\xa1\xbe\xda\x3f\x80\x13\xe5\x6e\xd1\x2f\x18\x7b\xc0\x72\xef\x88\x4b\xf0\xee\x1c\x6d\xf0\x43\x6e\xe8\xde\x09\xd9\x5b\x47\xd8\xbf\xd9\x24\xe4\xd4\xff\xc7\x56\xf0\x11\x6f\x62\x20\xc8\xd5\x81\x90\x55\xa9\x9b\xe8\x05\xe6\xa5\x5c\x1a\x29\x90\x3f\x80\x7d\xcf\x40\x1d\xc1\x03\xba\x26\x80\x5d\x57\xe8\x08\xda\xe2\x56\x6c\x64\x5c\x00\xcb\x67\xad\x86\x33\xf3\x9a\xa0\x54\x33\x40\xda\x6c\xd7\x64\xa1\x18\x0c\x73\x96\x2a\xd3\x93\xd3\x5a\xe4\xb1\xab\x3e\xbd\xd9\x63\x6f\xf0\xd8\xe0\x43\xfe\xf3\xbd\xe0\x36\x65\x81\x0f\xf9\xb0\x80\x5f\xac\x05\x33\x8e\xca\x3f\x61\xc2\x9f\x99\x87\x13\xc9\xe8\x08\x37\xb0\xeb\xd8\xd1\x6d\x05\xfb\xca\x36\xb0\x57\x8f\xb0\xeb\xad\xe7\x96\x93\x6c\x9d\xce\x0f\xe3\x73\x41\xb8\xd1\x44\x1b\x80\xa1\x0b\x15\x7c\x95\xff\x1b\xad\x60\xbe\xf1\x0e\x39\xec\x3b\x72\xe6\x25\x70\x1e\x98\xb4\x60\x24\x37\xc9\x97\xc6\x22\x7c\x0b\xa4\x4e\xa9\x12\x80\x1a\x78\xd1\x33\x69\x2c\x2e\x7a\xc0\x73\xe3\x46\xe3\x7a\xac\x4f\xf4\x37\x0f\x04\x9d\x73\xf2\x8f\x73\x1a\x48\x4c\x56\x19\x1d\xe9\xef\x36\x4f\x05\x38\x01\x4f\x50\x70\x6d\x60\x92\x8e\xe6\x62\x25\xca\x3b\x09\x00\xce\x96\xa1\x18\xe3\x18\x71\x0c\x4e\x1d\x3b\xc9\xc6\xbd\xc1\x33\xce\xe9\x5f\x4e\x23\x34\x94\x6d\x93\x96\x0c\xa9\xe5\xa1\x5d\xec\x98\x35\x24\x44\x0f\x67\xab\x77\x77\x2d\x2e\xce\x11\x44\x6e\xbe\x4a\x04\x90\x98\xf8\xbf\x32\x12\xfc\x54\xab\xe9\x8e\x1b\xb3\x23\x15\x0c\x6b\x8e\xc3\x29\x41\xfc\xf9\x4b\x08\x30\x2c\x34\x4b\x1d\xb1\xfb\x69\xf6\x9c\xdb\x56\xb3\xa2\xbc\x7d\xfc\xb6\x5e\x54\xae\xde\x7e\xc7\x6d\x85\x90\x9a\x47\x2f\xca\xcb\xdb\xf9\xd8\xed\xea\x75\x8e\x84\xfd\xf8\x5f\x79\x53\xf3\x2c\x54\xb9\x14\x46\x72\x64\xaa\x06\x21\x06\xf8\x29\x6f\xb9\xf9\xc6\xd1\x21\x4c\x94\x6e\x72\x58\x48\xd4\x5d\x94\x56\x4e\x7b\x8b\xba\x31\x89\x8a\x92\x1f\x1c\xc7\xe4\x41\x16\x7b\x3b\x92\x71\x1f\xbc\xe3\xa0\x1c\xf6\x83\xd7\x1c\x0c\x2f\x7a\xbb\x65\xfb\xdd\x25\xfa\x2a\x8f\xbd\xdc\x63\x78\xc3\x7f\xa1\x17\x5c\x3f\x51\x37\x94\xc8\x3b\x2a\x65\x44\xcf\x56\x5d\x4d\xb7\xd9\xcb\xf1\x0e\xf6\xf4\xe1\xba\xe2\x26\x63\x3a\x17\x95\xab\xec\x2d\x8c\x7d\xff\xce\x74\x27\x37\x64\x6d\x32\x4b\xaa\x6e\x6a\x02\xd7\xfc\x8f\xec\x0f\xee\xde\xf8\x11\xcb\xf9\x03\x76\xa3\xaa\x6b\x56\xb0\xc8\x31\x8b\x66\x2f\xc7\xf9\x13\xd2\x6a\xbd\xe8\xed\x92\x9a\xf4\x45\x6f\x9f\x54\xa0\xe4\x97\xb8\x6c\x31\xfb\xd8\xbb\x3c\xa6\xef\xf9\x6f\xf1\x82\x57\x79\xea\x17\x59\x25\x80\xe1\x01\x33\x66\x41\x7c\xe4\xa1\x38\x14\x21\x89\xa3\xa0\x9d\xa9\x9c\x17\x87\x55\xce\x9c\x42\xe5\xd8\x57\xd9\xdc\xe5\x24\x89\xda\xab\xbc\x0d\xdf\x67\x59\x31\x32\xc5\x9f\x8e\x96\x2a\x37\xbf\x0a\x7e\xa2\xb3\x16\xef\x75\xad\x8e\x77\x05\xb7\xd6\xad\x8e\x7c\xb5\xea\x02\xf3\x7c\xd4\x01\xf5\xc3\xbe\xaf\x4d\xc4\x83\x05\x7f\xa0\xc5\xf6\xe0\xbe\xe8\xff\x6c\x2b\x78\x6b\xcb\x24\x72\xb2\x61\x8b\x67\x7a\x22\x9d\x98\x9b\x41\xcf\xb4\x99\x4b\x03\x4d\x86\xc3\xe1\xa0\xec\xff\x83\x56\x4a\xa8\x88\x88\x9e\x8d\x0b\xb1\x1b\xe5\x7d\x3b\x33\x01\x0d\xaa\x94\x23\x8b\x82\x62\x2d\x15\xdf\x11\x3d\xd6\x47\x58\x11\x3c\x17\xab\x84\x9e\xd4\x9d\xb8\x55\x2e\x08\x61\x9b\x2d\x56\xab\xa5\xb0\x9d\x75\xc7\xcf\x4c\xcc\x8c\x53\xfb\x15\xac\x15\x75\xda\xf1\xa5\x24\x5b\x1a\xef\x46\x45\x29\x72\x95\xaf\xaa\x18\x3f\x16\x1e\x09\xbb\x9d\x6b\x3a\x51\x19\x41\xd4\x2e\xaa\x47\x78\x8a\xb8\x99\xcb\x4e\xd0\x01\x70\x3c\x18\x4b\x15\x91\x7b\x7d\x14\xe0\x86\x42\x7a\x37\x74\xff\x2b\x5a\x6c\x9f\xb2\x7d\xfa\xcf\xd7\xd1\x29\x7f\xea\x19\x3f\x2b\x66\x71\x20\x6f\x2b\xf6\x3d\xe9\x24\xb9\x48\xa2\x32\x5e\x03\x2e\x92\x2c\x2f\xa3\xb4\x2d\x6a\x43\x0d\xbc\xfc\x11\x69\x6a\x00\x7f\x2e\x42\x0e\xce\x11\x92\xeb\x35\x4a\x54\x69\x74\xab\xf1\xca\xaa\xe3\x81\xc1\xc5\x58\x38\x59\x6a\xb2\x6e\x5c\x12\x31\x46\x22\x95\x06\xa1\x12\x05\x15\x6d\x91\x46\x79\x9c\xd9\x46\x6b\x04\x82\x5b\x4e\xe5\x15\x90\xca\x72\x74\x84\xd6\x04\xfa\xf7\x7a\x0c\xd6\xad\xff\xd3\x5e\xf0\x26\xaf\xa4\x54\x5c\x51\xba\xf5\x19\xf8\x58\xcf\x82\xbf\xb8\x9a\xfd\xab\x0d\xc3\xbb\xfc\xcf\x5e\x1d\x3c\xcb\xfc\xe4\x10\xef\xac\xc3\x71\x2d\xf6\x78\x87\x39\xc8\x0d\xbe\xc8\xab\x14\x46\x3d\x42\x43\x06\x76\x29\xb0\xdf\x3a\xbb\xda\x8f\x5c\xc5\xfe\xce\xa3\xa4\xb3\x7f\xee\x6d\x80\x96\x68\x0e\x42\x0b\x3e\xe0\x81\x01\xc8\xcd\xd3\xb5\x0d\x2b\x90\x15\x97\xf6\x18\xa7\x99\x65\x9f\x6a\xe9\x2c\xb3\x1f\x69\x6d\x20\x80\x0e\xa3\x26\x0b\x5e\xdc\x5a\x70\xb2\xc9\x0e\xa1\x43\x32\x41\x24\xf5\x0f\x06\x8d\x17\xcc\xa8\x6a\x95\x54\x20\xe8\x00\xe5\xf5\x52\x1f\xb9\x0c\xd7\xe3\xb4\x93\xad\xc3\xeb\xc0\x5d\x7c\x09\x49\xac\xae\xe0\xd1\xbe\xc5\x3d\x2c\x57\xdc\x1c\x97\xee\xe6\xf8\x74\xcb\x72\x73\x7c\xb8\xb5\x53\x98\xc4\x37\x3d\xca\xbe\x73\x4a\x13\xad\xd9\xd1\xb3\xb0\xb7\x02\x6f\xd8\x28\x7a\xd6\x31\xd0\x0d\xec\x4d\xda\xba\xa9\x21\x6b\x91\x82\x30\x74\x0e\x15\x87\x8d\x8d\xd6\x2a\x8e\xb2\x8d\x87\xfc\x5b\x00\x9c\xf1\x88\xb7\xb8\xb9\x93\xe5\xa8\x3f\x6e\x82\x9f\x7b\xbd\x42\xfb\x55\xcc\x47\xd5\x71\x91\x7f\xbb\xcf\x81\x8b\x8b\x35\x98\x98\x6b\x47\xc3\x69\xf9\x17\x78\x7d\xff\xe7\xbe\xe0\x69\xfa\x97\xab\xf5\xc1\x65\xad\x77\x36\x3a\x79\xff\xcb\x15\x27\xef\x8e\xb7\xa0\x65\xe5\xe4\x7d\x76\x10\xc4\x83\x44\x57\xaa\xab\x68\x18\x6c\x7d\x71\x94\x5d\xcb\x0e\x0d\x3d\x3d\x6b\xa3\x7d\x65\xab\xdb\xc1\x56\xf7\x59\xdb\xa3\xfb\xdf\x76\xe8\xd1\xfd\xa1\xc7\x89\x6e\xc1\xec\x36\xdf\xbf\xf9\x6e\x73\x8b\x7f\x93\xde\x6d\x68\x26\xd5\xdc\xb9\x7a\xd7\xa8\xef\x3a\xef\xda\xcd\xb8\x35\x0f\x97\xa2\xb2\xbd\x2a\x3b\xe0\x19\xd9\x92\x26\x69\xf2\x5f\xb4\x3b\x70\x2e\x58\xf6\x61\x87\x64\x14\x8f\x81\xfb\xb3\xa5\x50\x5b\x09\x48\x76\x74\x36\xa2\x9f\xdf\xc5\xee\x20\x6d\xe4\xa6\xe0\xda\x45\xca\x41\x7d\x7f\xb6\x64\x9c\x53\xa3\x7c\x32\xeb\xf6\x12\x81\x99\x1d\x4f\xa2\x1f\xcc\x1e\xe4\x1e\xbb\x3a\x89\x8a\x72\x2e\xcf\x96\xc4\x62\xdc\x15\xfe\x73\x2e\x3d\xb7\xca\xbf\x3d\x15\x15\x25\x3a\xa2\x28\x28\x4f\x79\xc8\xa2\x82\xb7\x57\x05\xe4\xef\x60\x2f\xf7\x98\x2f\xab\x5c\xcc\xa3\xb4\x80\xfb\x50\xef\xda\xa5\xd7\x7b\xeb\xb0\x7a\x4b\xac\x02\x97\x64\x96\xea\x08\x6b\x13\x6c\x1b\xb2\xef\x65\x7b\xbb\xa2\x28\xa2\x15\xe1\x9f\x0e\xbe\xe7\x2e\x57\x3b\xa6\x3b\x76\xa6\x2a\x52\xb1\x78\xb4\x24\xc5\x67\x30\xa6\x95\xfa\x4b\x9c\xae\x7d\x06\xdb\x83\xae\x3f\xff\x7b\x82\xeb\x0e\x2d\xe5\xb1\x58\x3e\xac\x9c\x81\x46\xe3\xa6\xb6\x82\xeb\x64\x78\x59\xa7\xb4\xf2\x70\x22\xb8\x61\xc1\x11\xf1\xad\xb1\xce\x10\x8c\xb8\x08\xc0\xdb\x93\x51\x52\x88\x51\x7e\x36\x3d\x9f\x66\xeb\x6e\x69\x9f\xf0\x1c\x6f\x83\x61\x54\x5a\x3b\x1a\x9e\xcc\xf2\xef\xcb\x52\xe1\xbf\xd5\x0b\x4e\xd1\xdf\x06\x10\x66\x9b\xa8\xb1\x03\xd0\xb0\xf4\x20\x78\x8e\xcc\x61\x55\x54\x5d\xda\x37\xd5\x26\x18\x36\x19\xa9\x6f\x63\xc7\xc9\x44\x71\x2c\x78\x1a\x25\x76\x73\x9c\x0d\x76\x84\xb1\xac\xc3\xf9\x8a\x2f\x3e\x85\x5d\x3b\xcc\x54\x18\xce\xe2\x2f\x84\x73\x02\x90\xe0\x1d\x4f\x09\xee\x18\xb8\x6a\x3e\x6d\x08\x93\x89\xf3\xc6\x45\xef\x40\x2f\xeb\x2c\x34\xd1\x99\xfc\x8f\x7f\xc1\x3e\xbf\x8b\xed\x11\x18\x93\xff\x4b\xbb\x82\xbf\x19\x51\x84\x43\x78\x8d\x52\xc2\xa1\xc8\x56\x33\x38\x69\xa7\x1c\xfa\xf1\xce\x54\x25\xa5\x6a\x55\x21\xf9\x26\x7f\x53\x0c\x9d\x91\x0b\x4a\x18\xe8\xb4\x2f\x96\x63\x00\x45\xa9\x83\x4c\xea\xe9\x87\x80\x0d\x9e\xe2\x98\x31\x0a\x01\x2d\x2d\xeb\x71\x21\x14\xd7\x1a\x78\x90\xb1\xb6\xc3\xa3\xfc\xcc\x3c\xd5\xa3\x5b\xa0\x28\x6a\x35\x35\xb3\x9c\x16\xd6\x87\xa9\x94\xa9\x90\x98\x9a\x48\x66\xed\x9e\x33\x89\xa5\x95\x3b\x52\xd3\x6f\xa8\xa2\x4b\x95\x8b\x66\x20\x7f\x1d\x12\xe7\x9a\xcc\x93\x6e\xc9\x60\xe0\xa1\x9a\xeb\x1d\x77\x48\x45\xba\xcb\x9e\xc9\x12\x91\x80\xeb\x10\x0d\x1c\x46\x40\x06\xbf\x5c\x5c\x52\xdf\xa1\x41\x23\x2e\x32\xad\xf6\x92\xc0\x7d\x78\x38\x3c\x17\xf3\x76\xdd\x6c\xcb\x29\x27\xd8\xf7\xb0\xdb\xb7\x66\xd7\x76\x27\xeb\xb4\x76\x4a\xb0\x3f\xd8\x65\x68\x1e\x7e\x63\x57\xf0\xf0\x2e\x35\xa5\x14\xe5\xc2\xd6\xe7\xd4\xe2\xe0\x54\x82\xbc\x9d\x90\x74\xe8\xf1\x99\x53\x86\xa6\x80\x8a\x95\x7b\x60\xd6\x8e\x12\x70\xeb\x6d\x79\x16\xda\x7d\xf1\x38\x4e\xc3\x4e\x26\xac\xe4\xe0\x60\x05\x7b\x74\x66\xa1\x3d\xc9\x26\xd9\x04\xbb\xe3\x52\x26\x99\xe5\xfa\x62\xbf\x39\xc2\xec\x6d\xcd\xff\xec\xc8\x65\x80\x29\xbc\x78\x64\x41\x18\x32\x6f\xf4\x7c\x67\x76\x32\xb5\xa6\x71\xa1\x39\x4c\x3e\x5c\x8d\x30\x73\x67\xbb\x6b\xaf\x87\x50\x18\x59\xfa\x00\xb0\x81\x72\xa4\x9c\xae\x92\x32\xee\x25\xc2\x64\x8f\x52\xd3\xba\x1d\xa5\xf4\x96\x15\xf5\x85\x6e\x70\x5c\x33\x33\x3a\xd3\x6d\x41\x81\x2c\x6e\x4b\xe4\x21\x0e\xde\x13\x34\x51\x53\xee\xd0\xa8\x23\x8f\xe3\x35\x91\xf4\xeb\xfb\xc5\xec\x99\x45\xe3\xb1\x90\x73\x62\x39\xc3\x95\x52\x68\xd9\xd8\x85\x76\x14\xa2\x1b\xa5\x65\xdc\x2e\x42\x3e\x91\xd2\xfc\x6b\x9a\xb3\x11\x24\x03\xea\x98\xec\xc5\x26\x9f\x09\xfb\xd9\x3d\x72\x74\x65\x27\x4b\x01\xb1\xf0\xdf\xb2\x27\x78\xfe\x1e\xb5\x8f\xc0\x9a\x41\xf3\xb1\x9e\x87\xee\xc0\x80\x09\x5f\xa0\x9e\x72\x0f\x44\x4b\xa9\x98\x3f\xf9\xd1\xf7\x05\x34\x91\x82\x67\x8f\xf2\xfb\x82\x69\xfd\x77\x96\x5b\xf7\x46\xb9\xbe\xd3\xb0\xac\x1c\x34\x8d\xc9\x2b\xaa\x22\x7d\x96\xa2\x02\x52\x70\x92\x9f\x15\x73\x3b\x80\x50\xa0\xb9\x4f\x72\x3e\x6d\x0d\xcb\xad\x66\x84\x11\xce\x88\x68\x3a\xa9\x5d\xd3\x63\x05\x79\x2b\x81\x6a\xb2\x90\xb2\x0a\x4e\x25\x00\xbe\xd0\x43\xa8\x3a\x63\xc7\x52\x69\x87\x54\x60\x57\x96\x43\xa3\xc1\x20\x63\x95\xae\xda\x43\xc5\x1f\x1e\x52\x3e\x3d\x16\xea\x08\xc6\x08\x01\x6d\xeb\x79\x0c\xa9\x07\xe9\x58\x05\xdb\x2a\xed\xad\xa3\xf0\x20\x00\x6f\x14\x1a\x29\xe9\x53\xa7\xa9\x67\x60\x74\xf9\x7d\xba\xab\xf9\xb3\x43\x7e\x2a\x3e\x2f\xe4\x8e\x0c\xb1\x5d\x0d\x75\xa9\xad\x1b\x69\xae\x2c\x46\x95\xa8\x94\xe7\x80\x30\x9c\x42\x78\x64\x58\xed\x50\x95\x47\x4e\xf5\x16\x8a\x34\x4e\xdb\x49\xd5\x11\xa6\x3d\x87\x8a\x58\x0e\x1c\x44\x7e\xe9\x9a\xd7\x75\xa6\x57\xf5\x7c\xc3\x30\xa5\x1d\x7a\xce\x1c\x31\x6a\x76\x94\x19\xbf\x5f\x36\xe7\x3e\xae\x67\x1b\x7f\xf6\xf6\x0e\xea\x1a\x39\xea\x8b\x02\xf6\x7d\x97\xd3\x9d\x6c\xf1\x18\x4b\x29\xf4\xa7\xbf\x2b\x98\xdb\xe8\x81\x1a\xeb\x0a\x65\x0b\x94\xe3\x56\x50\xdc\x8c\xed\xa3\x8d\x7a\x3d\x11\xe5\x17\xbd\xdd\xa0\xb4\x5e\xf4\x76\xc3\xc2\xbf\xe8\xed\x86\x14\xae\x2e\xab\x3f\x67\x5f\xd8\xcd\xbe\x0d\xc4\xeb\x7c\x4d\x90\x5a\x40\x34\xfa\xbf\xb0\x3b\xf8\x99\xdd\x8d\xb7\x1c\xda\xe5\x48\x59\xc6\x54\x0e\x28\x82\xb9\x52\xee\x6a\xe3\x29\x22\x68\xa3\xf2\x89\x91\x39\xc7\xb8\xdc\x54\x55\x94\x62\x89\x38\x81\x09\x5d\x41\x5c\xba\x21\x37\x16\xb6\x51\x50\xbf\x47\x0d\x10\x5c\xce\x09\x68\xa5\xce\x46\x05\x0e\x42\xeb\x01\xb9\xf6\x30\x3d\x95\xae\x0c\x96\xdc\x32\xaa\x45\xca\x7c\xa6\x32\xda\x28\x02\x08\x80\x9a\x5a\x8c\xc6\x2e\xbc\xaf\xcc\x2b\x81\x34\xf8\x72\xa8\xe1\xe1\xa5\xa8\x7d\x7e\x3d\xca\x3b\x05\xba\x32\xcb\x78\x29\x4e\x28\xea\x4b\x85\xbe\x1e\xb7\x72\xa8\x9a\x64\xce\x80\x4b\x4d\x0a\x2a\x2f\xe4\x73\xd8\x4c\xad\x7b\x54\xa9\xf3\x81\x14\xc8\x39\xd0\xc7\x56\xd2\x69\xea\xe6\xaa\x50\xa2\xda\x39\x17\xf4\x46\xfd\x30\x46\x25\x8f\x61\xc9\xc7\xe1\xab\xce\x71\x3d\x93\x5d\x17\x65\x03\x73\x5a\x19\x15\xe7\x8b\xf1\xa8\xdd\x96\xdb\x94\x55\x41\xd4\x8b\xc7\x11\x48\x30\xa6\x8d\x7e\xf5\x0b\x63\xb6\x5c\x71\x4d\x2f\xaf\xd2\x38\x5d\x01\x2b\x4c\xa5\x5b\x28\xaf\xb8\x6d\xac\x39\x37\x2d\x78\xf1\x7f\xf5\x18\x4e\x76\xff\x5d\x5e\xf0\xe3\x1e\xa6\x2e\x36\x73\xd6\xc4\xe2\x9a\x21\xae\x63\x1d\x20\x34\x0d\x44\xd9\x31\xd8\xda\xd5\xd9\x39\x06\x85\x75\x42\x3e\x41\x02\xb3\xca\xd4\x94\x0b\x7e\x8e\xf8\x4a\xce\xc1\x4c\x3c\x37\xab\xde\xe9\x9c\xd3\x13\x46\x96\xeb\xdc\x70\x34\xf0\x16\xbb\xaa\xa8\x96\x74\x37\xf9\xef\x6b\xb1\xe5\xcb\xc3\xc0\xdc\xb4\x09\x2d\x58\x55\x05\x9f\xf2\xec\x9a\xf5\x4e\x4e\x71\xe5\xd6\x9d\x8d\x3a\x6d\x35\xc2\x13\x8c\xd4\xf5\x51\xfb\x50\x57\xa9\xc7\x64\xe1\x76\x79\x83\x6b\x6b\x31\xeb\xd1\xbe\x0c\xfc\x6b\x22\x57\xf6\x38\xf7\x45\xc8\xb9\x57\x11\x27\xb4\xb8\x20\x47\x2b\x5e\x13\x21\x7b\x17\x26\xfb\xa1\x77\xfc\x1f\xbb\x6c\x2c\xd6\xcd\xb8\x20\x55\x51\x70\x9d\xa9\x54\xdb\xfa\xad\x4b\x85\x28\xe5\x5e\x66\x98\xf6\x26\xe7\xa7\x42\xf6\x55\x8f\xe1\x36\xed\xff\xa6\x17\x7c\xce\xc3\xd8\x79\x52\x7e\xe4\x26\x89\x17\xb4\x03\xbb\xb1\xdb\x51\x30\xae\x5d\x54\xc0\x5e\xd8\x4f\x21\x53\x20\x3f\x37\x2e\x3f\x6b\xfc\x36\x28\xf3\xf6\xf1\x30\x0c\xcf\x85\xc0\x5a\x60\x01\x77\x6d\xdb\xca\xb0\x13\x89\x1f\xa2\x5d\x66\x39\xcb\xbb\xfc\xdc\x6d\xb0\x36\xc2\x5e\x52\xe5\x51\x72\x7b\x48\xc5\x9f\x3b\xec\x4c\xed\x0f\x7a\x0c\x8f\x21\xff\xbd\x1e\x8b\x1f\xbd\xf1\xa8\x53\x8b\x9f\x40\xba\x6f\x35\x9d\x1d\xfc\x0d\x1c\x17\x71\xda\x21\x4a\x70\x6d\x7d\xab\xf5\x2e\x7b\xcb\x3e\xb6\x4f\xcd\x4e\xff\xd5\xfb\x82\xd7\xec\x35\x44\xf3\x6e\x10\x9a\x9c\xc7\x72\xd4\x6c\xe6\xfb\x0d\x07\xee\x8c\x12\xfc\xe3\x65\x7e\x8e\xde\x3a\xe7\x70\xf4\xe1\xe0\xda\xc3\x82\x90\xbc\xb8\x14\x5d\xb5\xdb\x9f\x33\xf4\xef\x68\x47\x77\xc7\xd4\x14\x4c\xb2\x8f\x5d\x17\xec\x54\xe6\x7d\x39\x69\x96\xb2\x72\xd5\x6e\x80\x72\x60\xa5\x9a\x9d\x5d\x33\x7b\x64\xdd\x5e\x45\xd4\x1e\xc8\x63\x1a\xa7\x74\xfc\xd3\xcc\x33\xf4\xfd\x39\x76\x13\x9e\x51\xb2\x93\x2c\xca\xf6\x19\x87\x05\x5f\xf9\x61\xe2\x82\x07\xf2\x20\x19\x4b\xe2\xf3\x22\x30\x4a\x40\x91\xe5\x25\x8f\x96\xb2\x35\x29\x60\xa4\xf6\x33\xb5\x12\x8a\x51\x4b\x16\x81\xf6\x89\x0e\x4f\xc4\x85\xb8\x9d\xad\xe4\x51\x6f\x55\x65\x53\x0c\xee\x1e\x28\xa1\x20\xaa\x0f\x0a\xe3\x08\xd6\x02\x95\x79\x15\xe2\x97\x68\xef\x07\xe2\x3e\x02\xc0\x1c\x02\xf8\x45\x74\x7f\xa6\x93\x04\x1c\xa6\x57\x94\x76\x97\xf4\x15\x8b\x87\xfc\xba\x20\x4a\x7a\xab\x51\x00\x70\x5a\x39\x93\x03\xd4\x2d\x88\xee\xd1\x29\x34\x4e\xad\x42\x61\x42\x14\x68\x96\x91\x3d\x21\xc5\x13\x98\x11\x4b\x7d\x7e\xe7\x04\xbf\x1d\xe4\x5a\x7e\x3b\x87\xe2\x41\x3d\xc9\x85\xbc\x01\x1e\x2e\xd5\x3f\x08\x85\xcf\x38\xd0\xa6\x5e\x20\xf1\x1b\x45\x62\x88\xe4\x92\xef\xda\x69\x0b\x96\xfa\x84\xc9\x92\x2d\x77\x3e\x92\xbe\xd1\x69\x23\x2a\xa6\x88\x7e\x55\x6d\x54\x4b\x44\xf5\xef\x71\xbe\x76\xf4\xc8\x28\x5f\x3b\x36\xca\xd7\x8e\xca\xff\xc3\x72\x86\x5f\x47\xe4\x5f\xd7\x8d\xf2\xb5\xeb\x60\x85\xcb\x4b\xc7\xa0\x45\xf8\x1c\xfc\x79\x6c\x94\x2f\x67\xd9\x51\xfc\xef\x11\xc7\x17\x98\xb1\x2e\x3b\xff\x58\xec\x30\xb4\x2e\xd8\x27\x77\xb1\x6f\x47\xfd\x5e\x8e\xf2\x5c\x0e\xee\x56\x02\xa0\xf9\x3f\xb3\x2b\x78\xd7\xc8\xb0\xbb\x36\x44\x54\x3f\x42\x98\xb0\x82\xe7\xa2\xac\xf2\x14\x17\xcc\x22\x26\xa5\xab\xca\x5e\x55\x6e\x2a\x8a\x69\x92\xc9\x71\x90\xfa\x40\x04\x23\x2f\x18\x92\xdb\x5e\x83\xd1\xf9\xf2\x9e\x3e\x2f\xc6\xa2\x62\x0c\xf2\x63\xb8\x72\xd5\x16\x8e\x73\xd5\xde\xed\x9c\xe4\xea\x9d\x21\x87\x38\xe6\xcc\xe6\xa5\x7e\x3f\xcb\x87\xbe\x6e\x99\x09\x34\x77\x0f\xe1\x08\x3b\x71\xd1\x4b\x22\x9d\x59\x36\x5a\xd1\x9b\x68\x83\xb8\x47\x51\x6a\x66\x22\xa5\x2c\x61\xf7\x3f\x9a\xa2\x83\x8b\x17\x66\xbf\xda\x62\xcc\x08\xee\xfe\xc7\x5a\x8f\xa6\xe0\x72\x8f\xae\x28\xf8\x75\xcf\x52\x17\xdc\xe0\x14\xad\x3c\x90\x12\x64\x3d\x08\x23\x8a\x92\xfa\x90\x4e\x6d\x98\x3b\x83\x7a\x49\x6c\x78\xa2\xe8\xa6\xd8\xba\x38\x48\xea\xda\x30\x49\xf0\xf3\x2d\xa6\x8e\x6a\xff\x13\xad\xe0\xfd\x2d\xf5\xa2\x25\x60\x39\x19\x65\x2f\xb7\x88\x75\x1b\x95\xbe\x05\x61\x6b\xf3\x53\x7d\xbb\xc7\x76\x93\x6c\x51\xd4\x85\x0b\x5b\x1f\xad\x0a\xa7\xd2\x38\x2d\x4a\x11\xb9\x8e\xe1\xff\xb5\x8f\xdd\x60\xc3\x12\xab\x32\x2b\xda\x11\x05\xcf\x87\x77\x65\x79\xfc\x60\x96\x96\x51\x32\x97\x75\x26\xe8\x9e\xc8\x01\x5d\xf3\x63\xfb\x82\x6b\x75\x10\x85\x7e\x0e\xfc\x06\x91\x7e\x72\x63\x88\xcd\x9f\x5c\xc9\x25\xb7\x63\x88\xcd\x8b\x74\xf6\x93\x1f\xd8\xce\x78\x6c\xdd\xbd\xb0\xb5\x29\x71\x05\x82\xb3\x03\x08\xce\x92\x85\xc0\xb9\x67\x67\x00\x9c\x7f\x39\x04\x80\xf3\x88\x97\x6c\x0e\x8c\x99\xf1\xef\x34\x30\x3c\x33\xec\x1a\x16\xb3\xc1\x76\x50\x07\xca\xfc\xc9\x1e\x76\xdb\xb0\x49\x74\x0c\x04\xc3\x70\x52\xc5\xbf\xab\x13\xec\xb4\x28\xf3\xb8\xbd\x80\x1c\xec\x3f\xb3\x27\xf8\xe2\xc8\x86\x8f\x58\xf6\x1f\x62\xae\x86\xe6\xd4\xb2\x74\x77\xe1\x0d\xb2\x22\x96\x99\x45\xbb\x3e\x0a\xa1\xf1\xb6\xa1\x4d\xcf\x15\xb9\xd8\xd1\xe1\x3c\xaa\xce\x4f\x39\x63\xc1\x0b\x04\xae\xd4\x94\x8e\x47\x42\xf3\x40\xbd\x65\x94\xaf\x88\x92\x1f\x12\xe1\x4a\xc8\x27\xe7\xce\x42\x24\xa6\xe8\x66\x79\xff\x70\xc8\x0d\x31\x9d\x89\xc5\x8d\xd6\x44\x0e\x54\xd8\x65\xb6\x22\x88\x88\x01\xe2\xb4\x30\x1f\x96\x1d\x33\x01\xee\x51\x28\x3f\xe4\x7c\x41\x0a\xf7\xf8\x5d\x78\x62\x2d\x55\x71\x82\x79\xc1\x6a\xdf\x97\x76\x90\xa4\x09\xbe\x32\x4a\x38\x0d\x81\x76\xee\x40\x2a\x34\xb2\x46\x64\x52\x05\xd1\xac\x01\x65\xc6\xd3\x2c\xef\xca\x0d\x44\xe4\x63\xf2\x9b\x55\x85\xc6\xee\x18\xf4\xb2\x4e\x11\x70\x75\xa0\x1a\x6e\xba\x00\x9b\x1a\x60\x94\x84\x9d\x50\xa0\x34\x71\x51\x7b\xf0\xa1\x8b\xde\x7e\x4d\x84\xe0\x1c\x0e\xff\xa1\xc5\x96\x98\xb9\xe7\x9f\x0d\xee\x32\x8c\x09\xf1\x20\x70\xc4\xba\x99\x1a\x6f\xa4\x0a\x62\xa5\x0f\xc7\x3a\x37\x89\x9e\xa9\x97\x6c\xd3\xf3\xc3\x04\xa9\xc3\x76\x9e\xc7\xe8\x63\xfc\x62\x03\x5e\x9a\xa6\x35\x80\xf3\x79\x11\xbb\xeb\x46\x9a\x41\x85\x43\x7a\x4f\x17\xed\xd8\x2c\x15\xcc\x82\x43\xc2\x7e\x63\x3f\xbb\xc5\x8e\x6d\x15\x79\x89\xf0\x16\x61\xc9\x8a\xe6\xe2\x42\xbc\x22\x65\xbc\x79\x9c\xec\xfe\x9b\xf7\x07\x07\xa7\x2c\x22\x65\xeb\x75\x5e\xe0\xa3\x6a\x61\x38\xe3\xf3\x47\x57\xf0\xb1\x57\x20\xfa\x8f\xe3\xa1\x79\xb7\x75\x68\xde\xb1\x43\x80\x3e\x7b\x83\x0a\x57\x7a\x85\xc7\xee\x1e\x1e\x35\xbe\xad\x95\x05\xa1\x4c\xb7\x83\x9a\x61\x2d\x29\xea\x5a\x1e\x97\x85\x48\x96\xc9\x44\xd4\xb7\xad\x05\x16\xfa\x2e\x64\x2f\xf0\x34\x24\x70\x9d\x9d\xbe\x5c\x2d\xc3\x50\xa3\xef\x9e\x12\x79\xbc\x56\x0b\x49\x47\xbc\x5f\xb9\xaa\x5b\x1a\x3e\xe2\xad\x6f\x2e\x3a\x2c\xfa\xf3\x5a\x74\x70\x1a\x53\x27\x80\x1f\xd6\xa6\x46\xc2\xa4\x7f\xde\xcf\xbe\xa3\x21\x6a\x9f\xf2\x67\xf8\xbf\xb3\x3f\xf8\x49\x4f\xa5\xd8\x28\x81\xe6\x20\x35\x19\x87\x34\xfa\xf3\x56\x84\x70\xa0\xf9\x6a\x54\xa1\xa1\x0a\xbe\x94\x55\x29\xe1\xa3\x80\x61\x00\x8c\x7f\xc4\xf1\x24\xf2\x7a\xea\xed\xa3\xe1\x4d\xa3\xbc\x97\x88\xa8\x10\xc8\x4a\xb1\x2a\xf8\x12\xa5\x67\xb6\xfd\x18\x3a\xe5\xbe\x52\xb7\xf4\x51\xe7\x6c\x9f\x5f\xb8\xb2\x7d\x5e\xd9\x3e\xaf\x10\xc1\x5e\x5e\x22\x58\x5b\x10\xdb\x9c\xd0\xa5\x9e\xb7\xe9\x8e\x45\x23\x6e\xd9\x69\xcb\x6c\x48\x8d\x5c\xf1\x1a\x7b\xe9\x7e\x71\xf8\x88\x77\x66\xf3\x8d\x72\xd4\xbf\xb6\x89\x01\x5e\xa5\x67\xaf\xa9\x51\x6f\xdf\xcb\x0e\x36\xb4\x7c\xba\xdb\x2b\xfb\x53\x71\x7e\x4f\x96\x54\x5d\x41\x1a\xd3\xdf\xef\x09\xaa\x79\x8b\xec\x4d\x41\xc9\x30\xc9\x4e\x96\xf7\x89\x60\x0b\x60\x8f\xd3\xb5\x5b\x6b\x50\x52\x01\x8c\xe3\x90\x65\x69\x3d\x15\x79\xb1\x1a\xf7\x28\x98\x0c\x88\x79\xe4\x3e\xb0\x30\x7d\x2a\x4e\xab\x0b\x80\x18\x5b\x12\xc9\x40\x1a\xa3\xbf\xda\xcd\x5e\xd4\x62\x7b\xba\xa2\x13\x57\x5d\xff\x11\x2f\xf8\x73\xef\x5e\xc0\x9c\x51\xdc\x02\xc1\x3e\x38\x3e\xa0\x95\x83\xa8\x7d\x9e\x68\x88\x54\x93\x14\x8f\x97\xf6\xb2\x07\x01\x39\x7f\xba\x22\x4a\x0b\x4d\x0e\x44\x2c\x34\x07\x0b\xfd\x2c\x16\x4d\xc6\xb9\x25\x61\x7a\x82\xf6\x85\x43\x0a\x63\x29\x35\xb4\xd3\xa0\xa1\x6d\x3b\x9b\x1c\x7d\xc6\x38\xf5\xdb\x35\x50\x41\x27\xce\xed\x25\xf6\xd5\x11\xb6\xbf\x88\x1f\x14\x48\x68\xf5\x1b\x23\xdb\xd0\xec\x43\x6d\xa7\x7c\x66\x15\xa5\x65\x5c\xf6\x83\x37\x8e\x2c\x66\x65\x94\xf0\xa8\x8b\x9c\x29\xcb\x84\xa2\x55\x1d\x9a\xdb\xd4\xf5\xd0\x95\x6a\x96\xd0\xe0\x62\x87\xca\x06\xa1\x6e\x8b\x88\xaf\x22\x43\x9c\x65\x1b\xd4\xbe\x65\xad\xb3\xea\x5e\xb4\x19\xdb\x2a\x08\x93\xc8\x52\xf7\x19\x53\xd1\xba\xd2\xf4\xc8\xcb\x05\x2f\xe1\x36\xac\x78\xb6\xe4\x9d\x05\xd5\x29\x96\xfa\x8d\xc8\x64\x74\x4f\xf1\xa2\xea\x02\x5f\x0c\xd6\x42\xd0\x6f\x72\xc7\x6a\x3d\x0f\x70\x90\x34\x9d\x6b\x53\x25\x8d\x13\x77\xae\x28\xd4\xa3\xfe\xf0\x2a\x25\xc3\xf0\xc0\xc8\x37\x0f\x7c\x55\x88\x7c\x0c\x88\x72\x07\x86\x9c\x7d\x62\x37\x3b\xd2\xc4\x17\x45\x29\xd0\xd3\x12\x97\xe9\x64\x12\xc5\x5d\x9d\xcb\xe6\x95\xbb\x83\x62\xc3\x27\x6c\x43\xba\x22\xe9\x68\x7c\xc1\x64\x2a\x2f\x80\x5f\x86\x84\x80\xe9\xde\xaa\xe8\x8a\x3c\x4a\xec\x4d\x42\xaa\xdf\xb2\xcf\x9d\x25\xfb\xd0\x2e\xf6\xc7\xf6\x69\xf0\x9b\x3b\x3e\x0d\xde\xec\x9d\x8e\x0c\x68\x12\xc9\xc2\x48\xd4\x55\x64\x25\x75\x8a\xac\xac\x87\x46\x18\xda\x5b\xe7\xee\x99\x44\xd0\x98\x4e\xe5\x13\x97\x21\x9f\x25\xf6\x08\x85\x96\x42\xf8\x17\xba\x6e\x01\x3f\xa8\x01\x58\xf7\x23\x4a\x98\x98\xf3\x8c\x97\x22\x64\xaf\x6f\x91\xa8\xff\xf2\xd6\x16\xb2\xfc\x35\xf6\x38\x48\xf6\x9f\x03\x5a\x3a\x37\x96\x44\xa9\xe7\x8d\x6f\xe1\x2c\x95\xc7\x16\x25\x44\x23\xf6\x3e\xfa\xf4\x2a\xc5\x34\x68\xb5\x4e\x80\x5e\x5a\x11\x65\xd1\x90\x24\xc6\x24\x88\x59\x54\xc8\x66\xd5\x31\xb4\x38\x9a\x27\x0c\x76\x5b\x41\x0e\x1a\x58\x7a\x21\xfb\xbb\x7d\x8d\xb3\xd8\xa5\x6a\x5a\xcc\xce\x8b\x74\x2e\xcf\xee\x47\xd0\xa6\xff\xf1\x7d\xc1\x9f\x7b\x1b\x3e\xe2\xf2\x8f\xf6\xf0\xba\x49\xda\xc7\x89\xf6\x8a\x97\xf2\x35\x6b\xa7\x02\xfe\x69\x5d\x08\xc1\xe2\xd4\x7a\x88\xd3\x42\x0a\xb2\xd1\x90\x52\x74\x07\x22\x71\x79\x95\x42\xd0\xd8\x72\x9c\x08\xa4\x35\x80\x71\x92\x87\x87\x22\xc7\x99\x98\x9b\x29\xf8\x21\x2b\x23\xe3\xc4\xdc\x0c\x49\x80\xf2\x8c\xd0\x80\xd4\xc3\x72\xfd\xf4\xea\x24\x3e\x1f\xde\xc3\x5e\xdb\x62\xfb\xa2\xaa\x13\x4b\x39\xc2\x7f\x51\x2b\xf8\x07\x6f\x82\x7e\x29\x83\x92\xce\xb7\xa9\x1e\x53\xd6\x25\x68\x72\xc8\x27\xa4\x58\x1e\xf7\x40\x8e\x84\x90\x24\xfc\x14\x40\x7e\x28\xea\x19\xa5\x34\x22\x86\x21\xb5\x99\x6b\x06\x00\x83\x8d\xd5\xa0\x28\x6f\xf0\xb5\x74\xfa\x92\x5c\x6f\xb5\x66\xd1\x2e\xa1\x63\x43\x23\xe5\xa7\x98\x6a\xa9\x70\xe3\xb2\x74\xc4\xf3\x11\xf6\x64\x71\xa1\x17\x63\x4e\x6a\xc5\x06\xfa\x53\xc8\x06\x7a\xe3\xf5\xc1\x2b\x46\xa6\xeb\x77\x0d\xa5\x0e\x48\xd0\xb8\x82\xb5\x6a\x03\xd3\x35\x2e\xfb\x86\x9a\xae\x61\xf4\x43\x3e\x51\x98\x4f\x91\x87\x5b\x9e\x45\x80\x9f\x37\x6d\x41\x98\xbf\xdc\xe6\x13\x51\xd2\xa4\xe3\xbd\xa4\x5a\x89\x89\xc0\x13\xde\x41\x78\x3f\xcf\xb3\x52\x67\xbb\x69\xae\x70\xd1\x2a\x0c\x01\x2d\x00\x35\x29\xf3\x3e\x61\x5d\xad\x22\x68\x86\x2e\xdb\x3f\x0a\x9e\x25\x1d\xc5\x44\x73\xf3\x11\xde\x13\x79\x9b\xe6\x81\x3c\xf8\x30\xe4\x31\xe3\x49\xbc\x06\xf1\x9d\x1b\xbc\x7c\xec\x7a\xbe\x9a\x55\x79\x11\x3a\x6c\x95\x70\x0d\xb5\x38\x25\x12\xa9\xd0\x9a\xa3\x47\xe4\x39\x5d\xc9\xa3\xce\x21\xc0\x59\x65\x30\xcf\xfd\x73\xc1\x82\xa2\xa2\x82\x05\x25\xff\xd6\x7c\x3f\x34\x1f\x50\x20\x41\x6d\x48\x3b\x3f\x13\xc5\x2e\xe5\x4e\x2d\x58\x9b\xce\x2c\xf9\x8d\x11\x76\xdd\x26\x56\xd2\xe9\x0b\xa8\x8f\x92\xf5\x1f\xcd\x21\x6f\x1d\x09\xd2\xa6\x1b\x0e\x94\xd9\xd8\xe8\x75\x0a\x84\x48\x25\x24\x21\xcf\x00\x04\xf3\x14\x45\xd6\x8e\x61\x77\xa5\xb5\xd5\xb7\xf3\xb3\x66\x8a\x8c\x6f\x0f\xbe\x73\xd1\xdb\x4b\xc5\x3a\xfb\xc0\x2b\x5a\xec\x61\x8f\xa9\x5b\x7e\x7f\x83\xc0\xb0\xe1\xb6\x60\xa0\xa8\x23\x03\xcd\x4d\xaa\xf1\x74\x92\x36\x7d\x51\xa3\x45\xf8\x21\x8f\x51\x53\xfd\xfe\xf0\xb3\x7c\x83\x56\xcc\xe8\x25\x1e\xdc\x4c\xfd\xa4\x57\xbd\x63\x96\xa6\x9b\x4b\x7d\xb4\x98\x63\x08\x14\xc6\xad\xb0\x5f\xdc\xc5\x36\xab\x6d\x2e\xeb\x14\x8e\xdb\xe7\x95\xbb\x82\xaf\x7a\xf5\xab\x1b\x7a\x7a\xa8\x05\xdb\x76\xd3\x38\x36\x21\x88\xc7\x8d\xe0\xac\x29\xc6\x88\x14\x58\x74\xc6\x7a\x22\x1f\x43\x76\xe2\xc3\xe1\x65\xf3\xe0\x50\xaa\x22\x6b\x36\x35\xd9\x87\x5e\xd4\xfa\x16\x19\xc5\xc7\xd9\xb9\xf1\xf6\x5b\xd8\xff\xaf\x41\x2c\x41\x71\xc6\x7f\xf8\x96\xe0\x3e\xfc\xd3\x95\x33\xe4\x67\x74\xd4\xc6\xae\xd4\x04\x87\x3b\x0c\x71\xed\x04\xf9\x4b\xfb\x8d\xee\xa3\xc6\x68\xe6\xd7\xdc\xcc\x5e\xe5\xb1\xbd\x0f\x54\xd9\x52\xbf\x14\xfe\x43\xde\x16\x52\x5f\x3f\x13\x1f\xb6\x25\xf1\xe0\x6e\xba\xe8\x36\x5c\x5d\x24\x25\x0f\x9b\xb2\x9a\x15\xa5\x4a\x52\x1f\xe5\x02\xd9\x8c\x21\x8e\x32\x5e\x16\xf2\x74\x60\x6f\xf5\xd8\xfe\xe8\xc1\x2a\x17\x27\xe3\x44\xf8\xaf\xf1\xd8\x8d\x9b\xb6\x69\x42\x3d\xee\xb4\xea\x5e\x7d\xd9\x69\x57\xca\xe1\x3a\x87\x1b\x24\xef\x35\xb4\x11\xb8\x26\x63\x79\xce\xd0\xf1\xa8\x7b\x92\xfd\x4f\x8f\xed\x69\xc7\x69\x47\xe4\xfe\x27\x37\xe2\x09\x56\xcd\x9b\x84\x67\x9d\xb6\xbd\xc2\xc3\x8b\x6e\x8f\x61\xa1\x6a\xac\xa3\xb2\x94\x87\x3d\xaa\x03\xd0\x0a\x0c\x2b\xa3\xe3\xb9\xc0\x76\x92\x16\xd3\xa8\xf5\xd3\x9e\xa0\x2c\xd7\xe3\xdd\x7e\xf1\x40\x32\x86\xb5\x8c\xf5\x3a\xe3\xf3\xd3\x13\x53\xa7\xa7\xc3\x6e\x87\xbd\xd4\x03\xff\xe4\x72\xbc\x72\x3a\xea\xf9\xcf\xdd\x42\x9f\x4f\xaa\xa7\x9d\xef\x9a\xd0\x97\x6b\x5f\xa6\x2f\xd3\xd8\x83\xb0\xd6\x23\xaa\x2e\xc2\x5f\x41\x41\xec\x97\x3c\x36\x92\x2e\x17\xfe\x47\xb6\x92\xa6\x72\xf6\xe4\x82\x53\xff\x4b\xbc\xd9\x93\x0b\xb5\xe1\x96\x57\xb6\x37\x07\x77\x6a\x41\x49\x97\x0b\xf6\xab\x1e\x7b\x4a\x6f\x35\x2b\xb3\xd4\x28\x2e\x53\x71\x71\xde\x7f\xbf\xc7\x26\x36\xd7\xd5\x1a\xde\x74\x3e\xb4\xd7\xf4\x84\xdb\xe7\xf8\xc4\xa4\xa1\x8c\xef\xe9\x87\x79\x47\x3e\xbd\xad\x09\xc6\x3e\xbc\x9f\xed\x17\x4a\x11\xf7\x7f\x76\xff\x16\xe6\x48\xa3\xde\x1e\xfc\xcd\x3e\x7d\xbd\x96\xd1\x01\xe7\x3d\x51\x39\xf3\xd5\x28\xed\x24\x0a\xc8\xac\x42\xb7\x95\x89\xa8\x93\xc7\x52\x46\xc7\x93\x0c\xde\xa3\xf1\x6b\xf7\xdb\x94\x94\xd0\xe6\xc7\xcc\x74\x86\x70\x74\x44\xc4\x25\x1f\xd3\x98\x6d\x2b\x6b\xa9\xc5\x2e\x0b\xa9\x0f\xa4\xf0\x4b\xe8\x83\x8e\xec\x13\x15\xff\xa5\x1e\x00\xb7\x45\x37\x5b\x13\x9d\x90\xb1\xb3\x05\xcd\xe4\x78\xf9\x38\x8f\x0e\xa3\xb7\x83\xf6\xed\x82\x43\x74\x64\x8a\x04\xfb\xeb\xab\x20\x54\x52\x21\x79\x95\x16\xa3\x7c\xe9\xb0\x4a\x87\x85\x5c\xc5\x08\x59\x50\xc6\x4c\xc8\x17\x99\x0b\xf9\xf9\x3a\xab\x60\x91\x46\xbd\x62\x35\x2b\xa5\x20\xdd\x8e\x7a\x51\x3b\x2e\xfb\x8c\x73\x29\x01\xb4\xcf\x03\xb7\x61\x2e\xa8\xc6\x51\xd6\x3e\x4c\x86\x5e\xbb\xff\x1c\xc0\x1d\x2f\x57\xf3\xac\x5a\x59\x05\xaf\x03\x3e\x05\xa9\x83\xe8\xf3\x1b\xdf\x27\x1b\x6b\xc1\x3b\xfd\x34\xea\xc6\x6d\xad\x80\xe4\x19\x12\xe1\xa3\xa7\x02\xca\x95\x4d\x1b\xa6\xc6\x1f\x2a\x84\x68\x36\xf3\xa0\x1d\x2f\xcb\x85\x7c\xdd\xf6\xee\xd1\x62\x6e\x67\x69\x4a\x9a\xb5\xb1\xcb\xe9\xdd\x04\x6c\xb5\x50\x71\xda\x69\xae\xfa\x30\x0d\xdc\x10\x7b\x54\xae\x28\x4a\xd0\x75\x95\x76\xb2\x7c\x4c\x19\x4a\x50\xd7\x06\xe8\x29\x8d\x12\x66\x40\xc0\x92\xe0\x46\x92\xa5\x2b\x4a\xa1\x41\xa3\x9d\x9a\xa1\x68\xd9\x92\xd2\xe0\x5a\xdc\xa9\x10\xdd\x46\x4d\x99\x5c\x98\xc1\x97\xe3\x95\xd5\x72\x6c\x5d\xc8\x7f\xc8\x42\xaa\x17\xa0\xae\x92\x34\x28\xf9\x8e\x19\xd3\xae\x50\x36\x7e\x65\x68\x00\x03\x55\xd4\xe7\x63\x5c\xf6\x34\xa0\x49\xb3\x76\xd5\xb5\xbd\x6d\x70\x11\x8b\x50\x7d\xee\x3a\x70\xd9\x04\x4c\xd7\x76\x94\x82\xc5\x01\x30\x9d\x18\x8a\x9d\x2d\x37\xb4\x8c\x10\xb1\x6a\xcb\xd1\x97\xad\xe0\x75\x60\x6c\x64\xcc\x64\x36\xc0\x20\x45\x5c\x06\xa8\xe0\xcb\x55\x63\xb0\x3c\x7a\xf1\xdd\x29\x52\x91\xc7\xed\xda\x94\xd1\xaf\xae\x90\xd5\x51\xa4\xf2\xb5\x4e\xc8\xbe\xe4\xb1\xfd\x2b\xb8\x87\x2c\x17\xfe\x67\xb6\x22\x55\xdc\xa9\x1e\x77\x76\xaf\x57\x79\xfa\xba\xbb\x7b\x99\xcb\xdb\x3b\x6c\xb6\x74\x72\x53\xe7\x8d\xeb\x4f\xb0\xce\xee\x5f\xf6\xd8\x48\xbe\xd4\xf1\xff\xfb\x56\x0e\xcc\xf9\x13\x53\xce\xe7\xbc\xdc\x9b\x3f\x31\xe5\x7e\xc8\x7c\xd4\xc9\x0a\x7e\x22\xc9\xda\xe7\xf9\x94\x18\x22\x1e\x5d\xb6\x2f\xca\x97\x6c\x39\xe4\xc7\x95\xf0\x07\x47\xe5\x2b\xb7\x2c\xfc\x0d\x9c\x8f\x8b\xfa\x72\xa3\xf0\x37\x15\x95\x11\x87\xbb\xdb\x92\xfc\x7e\xc2\x63\x07\x3a\xd9\x7a\xba\x1e\xe5\x9d\x89\xb9\x19\xd9\xc0\x9b\x37\x6d\xe0\x94\x79\xc1\x69\xe2\xbc\x75\xc3\x6e\xa4\xaa\x00\x2c\x77\x06\xc3\xa0\xcf\xb0\x8d\x84\xa7\x5f\x6e\xb1\xbd\x2b\x71\x39\x2f\x7a\x99\xff\xd1\xd6\x16\xc4\xf9\x3b\xf1\x61\xa7\x61\x0f\xb7\xe8\xaa\x3b\x2f\x56\x30\xb5\x42\x56\xc4\xe0\x66\x03\xaa\xdd\x5e\x94\x97\x71\xbb\x4a\x22\x29\xc9\xe2\x8e\x1f\x72\x93\x76\xf7\x38\x57\x25\x39\x99\x75\x43\xbe\x98\x99\x23\x02\xc5\x43\xd2\x58\x28\xa6\x49\xd5\x35\x4a\x83\x10\xa5\xc6\x33\x03\x26\x51\x60\x2f\x88\x4b\x8d\xaa\x24\xe6\x84\x24\x53\x30\x03\xf9\x36\x01\xfc\x56\xe2\x52\x45\x02\xe1\x88\xae\x8a\x5a\x69\x25\x92\x50\x1e\xb4\x32\x19\x85\x72\x2e\xee\x05\x3d\x7b\xe6\x8c\xff\xea\xad\xa8\x46\x0b\xf8\xb0\xd3\x97\xdf\x4b\x17\xdd\xae\x54\x17\x07\xb6\xc7\xa1\x32\x99\x65\xc2\x81\x8c\x17\x21\x7b\xdd\x08\xdb\x27\xe7\x2b\x90\xad\xbf\x64\x84\xdd\xb0\x69\xfb\xee\xa2\xa7\x9d\x06\x7e\xae\xa5\x2e\xd7\xcd\xdb\x62\x4c\xa7\x28\x01\x1b\x58\x96\x5b\x4e\x56\x7b\xc1\x90\x84\xa8\x05\x37\x7c\x0a\xc2\x1d\x7a\x59\x61\xa4\x30\xd3\xb7\x3a\x97\xcd\x8a\xdc\xc6\x21\x34\x42\x87\x71\x90\x5d\x3b\x5a\x81\x86\x28\x93\x35\xef\xe5\xf1\x5a\x9c\x08\xb0\x50\xac\x02\x50\x05\xd9\x9e\x2d\xb7\x49\x99\xe9\xc3\xad\xae\x1a\x15\xa5\xed\x6c\x03\xa9\x6f\xf6\xcc\x22\x08\x47\xb0\x7e\x76\xec\x34\x95\x15\xf6\xa2\x72\x95\xbd\xc7\x23\x14\xe6\x3b\xbd\xe0\x8d\xde\x3d\x4a\x34\x45\x3e\x75\xed\xc6\xe5\x53\xb3\x0b\xcf\x39\x35\x71\x62\xfa\x14\x0c\x74\x95\xc6\x0f\x54\x98\xcf\xdb\x52\xd9\xb7\xdb\xa6\x6c\x4d\x6a\xb3\x62\x7d\x9c\x38\x74\xc6\x64\x79\x63\xe4\x52\x1b\x87\x08\xc9\xf1\x6b\xe0\x1f\xdb\x68\xf9\xee\x16\xfb\xb6\x5e\x93\xf0\xe3\xbf\xb6\xc5\x4e\x5c\x9a\x57\xc9\x99\x61\x9f\xf5\x36\x7d\xc6\x9d\x7a\x3a\x44\x0c\x01\x4d\xcd\x92\x19\xf5\x13\x88\x11\x86\x3c\xe6\x52\x87\xd1\x74\xc0\x98\x1a\x51\x73\x09\xaf\xb4\x65\xb5\x05\x7b\x47\x8b\x3d\x79\xa5\x2d\x6a\x4a\xdd\x2b\x5b\x1b\x58\x48\xf5\x5e\x3b\x39\xbd\x81\x46\xf7\x59\x6f\xe0\x7e\x4d\xc0\x98\x9c\xe6\x74\x55\x03\x7e\x28\x1b\x80\xda\x34\xa0\xbf\x48\x7d\x3b\xe8\xea\x6f\x26\x5e\xb2\xb6\x2a\x2f\x65\xa6\xd5\x67\xff\x4a\x5b\x98\xee\x92\xba\x25\xfb\x0b\x8f\xed\x8e\x8b\x76\x11\xfb\x7f\xe0\xb1\x63\x9b\x76\xcd\xcc\xc2\xe4\x82\x7b\x3a\xbe\xdb\x83\x6b\xb5\xd3\x1b\xaf\x3d\x86\xbd\x30\x4c\x6c\x81\x6f\xb3\x04\x97\x4f\x7a\x6c\x4f\x21\xda\xb9\x28\xfd\x0f\x6d\xc5\x26\xb4\x00\xcf\x3a\x5f\xfc\x42\x0f\x2f\xba\xa3\x8e\x85\x6e\x7a\xf2\xef\x78\x08\xb1\x1e\xf6\x6e\x8f\xb1\xe5\x44\x5c\x20\xa3\xe4\x5b\xbc\x2d\x64\x1e\x3a\xa9\x9f\xa7\x2f\x59\x35\x57\x6a\x22\x04\x4a\xed\xea\xb0\x1b\x18\x42\x2d\x13\x88\xce\xb8\x1e\x4e\x3c\xc4\x23\x39\x64\xa2\x4d\xec\x4a\xe8\xe0\x0a\xd9\x7f\x6e\xb1\xa7\x44\xeb\xc5\x74\x12\x15\x65\xdc\x06\x99\x75\xa1\xcc\x72\xe1\xbf\xa9\xb5\x05\x2b\xcb\xc4\xbd\x0b\x03\x6f\x3a\x1f\xf2\x6b\x5e\xd3\x23\x75\x89\xf2\xde\x85\x6f\xd5\x75\x19\xad\x17\x02\x5b\xbf\x24\x5b\x2f\x6f\x0b\xf6\x21\x8f\x8d\xc8\x85\xf9\xbe\xad\xe8\x0b\xf5\x65\xf9\x83\x72\xfd\x1d\x32\x62\xd7\x02\xd9\x03\x66\x52\xa9\x96\x44\x6d\x71\xd8\xee\x1d\xa3\x14\x2a\xbb\x41\x83\x71\xa7\x2d\x72\xc0\x56\x08\xf2\x80\x59\xba\x6c\xc1\x0f\x9d\xb0\x34\xc2\xc3\x21\x7b\x99\xc7\xf6\x6b\xcf\xbb\xff\x83\x5b\xd0\x0f\xe6\xd4\xd3\xce\x77\x4c\xcd\x00\x87\xb3\x0a\x0b\x8e\x53\xd0\xf3\x2d\x8e\x11\x58\x0e\xc5\x28\x59\x2e\xbb\x51\x4f\xd9\x41\x2c\xe9\x5c\xae\x96\xab\xd7\x8a\xde\xaa\x50\xf3\xc6\xbf\xe8\x6d\x10\x7f\xa6\x2d\xfe\xf4\x4a\x9c\x97\x55\x94\x0c\x9c\x04\xf7\xde\x63\x17\x59\xb3\x91\x2d\xc0\xad\x4b\xb2\x11\xb3\x37\x7b\x6c\x3f\x0d\x44\x56\xf8\x2f\xdb\x8a\x76\x45\xe3\x7b\xc6\x35\xb3\x3e\x53\x5f\xae\xc9\xb5\xfa\xf2\xb6\xc5\xd9\x97\x7b\x6c\x4f\x5b\xf4\x56\x97\x0b\xff\x05\x5b\x32\xaa\x8b\xde\x6a\xcd\xf8\xfb\x0c\xbc\xe6\x36\x49\x5e\xe3\xdb\xb6\x00\xb3\x5f\xf7\xd8\x3e\x41\x3a\x82\xff\xcb\xde\x16\xa4\xeb\x26\x20\x63\xf0\x2a\x4f\x2b\x1a\x4e\xab\x4a\xd1\xed\x65\x90\xce\xdc\x88\xd4\xdb\x56\xa9\x2f\x05\xd4\xc7\xde\xe5\xb1\xd6\x72\xdb\xff\xa9\x8d\x72\x99\xe8\x8d\x7d\xd2\xb5\x39\x9f\x9c\x74\xbf\xe2\x64\xbc\x94\x0b\x3e\xb9\x1a\xa5\xa9\x48\x1e\x85\xed\x8f\x7d\xda\x63\x7b\x97\xe5\xce\x25\x72\xff\x23\x5b\x51\xc1\x4e\xe2\xc3\x4e\xb3\x1f\xf2\xe8\x6a\xad\xf1\x74\xb1\x3e\x53\x37\x68\xac\xca\x48\x22\x7a\x22\xed\x14\x6a\x32\xa9\x82\x28\xb9\x9e\x46\x56\xa0\xe3\x94\xf2\xcc\xb0\x77\x7a\xec\x09\xbd\x2c\x2f\xd7\xb3\x5c\x1d\xae\xaf\xdd\xca\xb4\x9a\x73\xde\xa1\x4f\x7a\x96\x7b\xb5\xa6\xb9\xd1\xbd\x4b\xdb\x23\xfe\xf6\x49\x2c\x68\xca\x75\x19\x89\x6e\x96\xea\xdc\x2e\xfe\xaf\x3c\x29\xb8\xbd\x76\xad\x4e\x85\x6c\x13\x84\x57\x94\x28\xa2\x03\xaf\x50\xf4\xdc\x53\xe9\x09\xcc\xda\xba\x40\xd1\x0a\x9d\x8b\x9e\x8f\x0c\x23\xa7\xe3\xa2\x30\x17\x9f\x4a\x29\x72\x06\x9e\x3e\x80\x4f\x43\xde\x63\xc7\xbd\xf9\x33\x4f\x64\x6f\xf2\x98\x7d\xdb\x7f\xa9\x4e\xc5\x5a\xb8\x99\x58\x61\x17\x72\xa4\xac\x25\x93\x23\x08\x2c\xa3\xd8\x72\x88\x41\x56\xb1\x88\x76\x8e\x4a\x65\x40\x35\x8f\xe9\x0c\x43\x69\x07\x13\x15\xbb\xb0\x94\x4f\x7a\xec\xc9\x58\xff\xd9\x54\x1b\x38\xfd\xf7\xea\x06\xbe\xd9\xbb\x0c\x2d\x4c\x2d\xeb\xf5\x90\xa6\x19\xe3\xea\x21\xcc\xef\x0c\x67\xa2\x02\xd5\x14\x3d\xd1\x0e\x6b\x59\xa5\xdd\x84\xcb\x7f\x87\x94\x56\x18\xf3\x53\xf8\xff\x97\x17\x9c\x9a\x77\x27\x02\x85\x62\x98\x8a\x30\xf5\x0f\x61\x27\x61\x5e\xe8\xa9\x74\xb0\x46\x2c\xef\x44\x77\xdf\xc4\x6e\xd8\x4a\x5a\x56\x55\x96\x66\xad\x7f\xc4\x3b\xc8\xfe\xff\x0d\xe9\x54\xbb\x22\x5f\x11\x63\xe7\x45\xdf\xdf\xeb\xef\x06\x4f\x81\x7c\x72\xc3\xc4\xab\xfb\xfc\x3d\xf0\x16\x63\x7f\xe0\xb1\x21\x13\xd8\xff\xb4\x1e\xc4\xf7\x0d\x1f\xc4\x5a\x12\x2a\xc2\x30\xd5\x47\x11\x58\x1c\xaa\x9e\xde\x1a\xf3\x2a\xad\x0d\xe6\xb6\x8f\x86\xf5\x2c\x3f\x9f\x64\x51\xa7\x18\x37\xe9\xa4\x8b\x71\x2c\xb0\x10\xe5\xb8\x33\xba\x9f\xf1\xd8\x13\xb1\xf9\x13\x7a\x8e\xfe\xbc\xfe\xbc\xb7\x5d\x8e\x39\xba\xf5\x55\xb4\xd3\xa9\xfa\x15\x8f\x35\x6c\x2d\xfe\xff\xd0\xdf\xf3\x73\x5b\x1b\x2e\xb7\x89\xa3\x7c\xa9\x2a\x0d\xa5\xe4\xe3\x38\x58\x7f\xe8\xb1\x27\xb4\xb3\x24\x01\x15\x6a\x52\xee\xf4\xfe\x17\xf4\xb7\x7d\xd8\x9b\x54\x68\xfb\xd5\xa8\x58\xe5\xfa\x41\xc3\x2e\xa6\x57\x0e\xfa\x55\xf5\x4f\x3b\xef\x78\x55\x40\x8f\x68\xae\x9d\x88\x32\x48\x26\x64\xcb\x5d\xcb\xe2\x0e\xa4\xcd\xeb\x8a\xf6\x6a\x94\xc6\x45\x17\x9d\x36\x71\x09\xd6\x36\x40\xf1\xa1\xb7\xd5\x04\x27\xab\xfa\x53\xb1\x2e\x37\x0a\xe3\xb2\x9e\x57\x36\x65\xe7\x33\xbf\xee\xb1\x21\x87\x81\xff\x65\xfd\xb9\x9f\x80\xa1\x2c\x21\xce\xe0\xd2\x26\xe8\x21\x24\x78\x95\x77\xf0\x35\x48\x70\x0a\x86\xcd\xe6\x37\x0e\x3f\xba\xa3\x9b\x30\x5f\xe5\x4c\x03\xef\x17\x32\xf1\xdc\xa3\x20\xa8\x13\x8b\x6e\x8a\x35\xb2\xad\xa2\x77\x4f\xe5\x5a\xa3\x94\x68\xe6\x18\xb6\x86\x36\xac\xd5\xf6\x54\x0a\xf2\xaa\xf7\xf1\xbc\xea\xe2\xa7\x6f\xd2\xc3\xf6\x92\xa1\xb2\xac\xde\x72\x6a\xfb\x94\xc7\xae\x19\x4e\xd4\x01\xe6\x72\x20\xa3\x7d\xab\x17\x1c\xd7\xbf\x6a\x7c\x40\x51\x89\x01\x57\x82\xce\x14\xc4\xc8\x59\xe1\x8c\x6e\x74\xcf\x59\xb6\x60\x25\x91\xbf\x53\x7d\xd4\x31\x95\x89\xcf\x7c\x52\x9c\x16\x90\x09\xd2\x2c\x14\x28\x5a\x07\x4b\x39\x5f\xf2\xa7\x8c\x1d\xb2\xbe\x04\x59\x83\x29\xe7\xf6\x54\x5c\xe4\x15\xf0\x15\x9c\xa8\x3a\x2b\xa2\xf4\x3f\xca\x82\xd5\x86\xeb\x2a\x57\xa8\x8e\xfe\x44\xd8\x02\x22\x54\xa3\x0b\xbc\xa3\x9f\x27\xb7\x07\x42\xca\xdb\x91\x02\x95\x47\xf5\x14\xb7\x59\xc7\xa5\xab\xf9\xfd\x7d\xff\x7b\x46\xcd\xbd\x44\x05\x3f\xff\xd0\x06\x01\x11\x1b\x8e\x09\x04\x44\x4c\x2c\x6c\x27\x47\x63\x43\x29\x21\x20\x1e\x29\xda\xb9\xcf\x6e\xbb\xc4\xb6\x20\x76\xf6\x96\xd3\x5b\x49\x9c\xd8\xd8\x8a\x2b\x71\xb9\x57\xe2\x72\x1f\xaf\xb8\xdc\x47\xbc\xe7\x6c\x1e\x43\x7a\x9b\x7f\x7c\x8c\xe2\x46\x1b\x26\x70\x2d\x86\x94\xab\x68\x53\x5c\x35\x8c\xbd\xf7\x2a\x27\xa1\x8e\xce\x44\x2a\xc5\x91\x74\xe5\x2c\x74\xc6\x94\xe8\x25\x59\xbf\x2b\xd2\xd2\x7f\xfe\x55\xc1\x4d\x70\x70\x00\x3d\x28\x6a\xeb\xc3\x16\x76\x8e\x65\x50\x8f\xba\x47\xc7\x07\x0e\xb0\xaf\xee\x66\xfb\xba\xd1\x85\x85\x2a\x5f\x11\xfe\x97\x76\x6f\x75\xf7\xac\xca\x38\x09\xe3\xb4\x2c\xca\x3c\x9c\x49\xcb\x33\xf9\x02\x74\x55\xf0\x86\xdd\x76\x6c\xa2\x93\xb6\xb6\x70\xf6\x78\x2d\xb5\x12\xc9\xa8\xdd\x7c\xf7\x35\xc8\x7e\x50\x09\xf5\x62\x94\xf2\x68\xa9\xc8\x92\xaa\xd4\x02\xee\x21\x71\xe1\x38\xbf\x01\x42\x47\x23\x15\xa8\x41\x34\x84\xaa\x48\xa8\x1f\x9e\x3b\x7a\xe4\x69\x8a\xa5\x5e\x96\x48\xf3\xed\x08\x8f\x97\xf9\xe9\xe8\x82\xa5\xc2\xca\xf5\x71\x24\xe4\x13\xb5\xca\xe0\xbd\xa4\x4d\xa9\x59\x61\x8e\x5b\x55\x2e\xf5\x79\x9e\x55\xc8\x7e\x50\xf5\x5c\x0a\xf3\x63\x37\x3c\x2d\xe4\xd3\x2a\x5f\xfd\xba\xce\x66\x12\x17\x20\xc0\x94\x19\xbf\xee\xc8\xd3\x46\x95\x00\x69\x27\x8f\xd4\x7d\x06\xa7\x75\xd5\xe3\x71\xb7\x2b\x3a\x71\x54\x8a\xa4\x6f\xa0\x44\xee\x48\x6b\xcc\x1f\xf0\x8e\xea\x68\xcb\xba\x98\x93\x25\xa8\xc1\xc8\x1a\xa1\x93\x3a\x19\xf4\x89\xb8\xd0\x16\xa2\xc3\x8f\x5e\x77\xe4\x69\xf5\x7e\x0c\xf9\x19\x08\x62\x4a\xa8\x5b\x41\xf9\x59\x12\x22\xe5\xe7\xe3\x24\x11\x9d\xd1\x4d\x9b\xbf\x5c\xe5\xe5\xaa\xc8\x47\x31\x13\x0b\x6e\x36\xb2\x7d\xb5\xb6\xa9\x50\x31\x3b\xbf\x31\xc0\x5a\x3b\xea\x25\xb5\x51\xa0\x95\x0e\xa5\xc6\xc6\x26\xb3\xcf\xed\x66\x4f\xe8\x3a\xe3\xeb\x7f\x68\xc7\xb3\xfd\x1b\xbb\xb6\x38\xdb\x2b\x6b\x5a\x0d\x34\xfe\x51\x99\xe0\x97\x3e\x6d\x3b\xd9\x7a\x3a\x7c\x81\xc0\x46\x41\x4b\xe3\x12\x27\xb7\x9c\x38\x43\x67\x47\x87\x38\xc1\x6e\x1a\x1c\xc4\x6d\xcc\x7a\x9a\xa2\x7a\x52\x83\x04\x2f\xd5\xe9\xd1\x2d\xd4\xae\x67\xa7\x4d\x41\xac\xd8\xaa\xaa\x5e\xc3\x02\x1d\x98\xc8\x0d\x0b\x0d\xdb\xa1\x67\x41\x04\xe9\x8a\x60\x3a\x17\x43\xe7\x33\xda\x03\x1a\xba\x22\x64\xdf\xd8\xb7\x49\x88\xd6\xd1\xb0\x91\xc3\xed\xe3\xfb\x82\xcf\x8c\x5c\xa1\x6e\xfb\xdf\x8c\xba\xcd\x39\xd5\xbf\xb2\xeb\xb2\x30\xa8\xfd\x95\xc7\xbe\x1d\x6b\x9e\xc0\x9e\x3d\x5b\xc6\x49\xfc\x20\x6a\xeb\x5f\xd4\xf6\x89\x8f\x7a\xc3\x1e\x52\xb5\x3a\x01\x45\x2a\x3a\x15\x9f\x1e\x68\x0c\xcd\x2f\x2b\x99\x58\x2e\x12\xb1\x16\x41\x30\x63\xa7\x18\x35\x72\xa7\x20\x93\x8d\xbb\x1d\xba\x81\xaa\x4e\x95\xba\x0e\xa5\xfc\xc2\x42\x72\x74\xde\x9f\x6a\x31\xdf\xf9\x1a\xd8\x98\xfd\x97\xb7\x76\x46\x13\xf1\x2b\xde\x60\xa1\x8f\x4a\xe7\x40\x87\xe4\xd1\x3a\x15\x77\x88\xe8\x8e\x40\xd5\xd9\xb0\xaf\x0e\x8f\xf2\x22\xee\xc6\x49\x94\xab\xe5\x41\xb3\x91\xaa\x54\x0a\x40\xbf\x27\x42\xf6\x5f\xaf\x72\xd2\x3c\xe7\x4b\x51\x9b\x84\x55\xa1\x58\xa0\x5e\x73\x55\xf0\x0b\x2d\xeb\x82\x81\x42\x41\x0b\xb3\x44\xa0\xb1\x51\xa7\x6e\x53\xbc\x04\xb1\x5c\x91\x33\xb8\x2d\x1b\xf8\x54\xc4\x65\x59\xcd\x50\x29\x3c\x14\x29\x29\x88\xfd\x18\x85\x94\x5a\x98\xaa\x99\x92\x47\x1d\xc8\x9d\x93\x39\x08\xfc\xb5\x38\xe2\x0b\x95\x62\x6c\x50\xb9\x0a\xa0\x70\xfb\xb1\xa5\x3e\x11\x58\x58\xb7\x4b\x4c\x8c\x55\xf0\x38\x0d\x39\xb7\xbe\x98\x82\xfd\x31\x6e\xce\x6a\xac\x5c\xd4\xb0\x89\x08\xcc\x49\x15\x93\xe9\xc3\x34\xf3\xa2\xb7\x57\x76\xd0\xbc\x58\x76\xc9\xa9\xf6\xb1\xdf\xf5\x98\xba\xe5\xff\x1f\xde\x06\xb9\x98\xed\x21\x99\x17\xcb\xc1\x7f\xf0\xe8\xaf\x0d\xfb\x55\xed\xbf\x97\xd4\xb5\x38\x9d\xac\x6a\x48\x6e\x90\x93\x37\x59\x93\x42\xa1\xbc\x3f\x51\x95\xc0\x83\x2b\x72\x0c\x7b\x46\xf6\x75\xc0\xc4\xe4\x79\x96\x87\xec\x61\x8f\xed\x2b\x68\x28\xfc\x0b\xc1\xd3\xf5\xb0\xac\x66\x89\x14\x06\xcd\x34\xa2\x79\xaa\x88\x36\x48\x14\x50\xc9\x18\x41\x9f\xdc\x7a\xf2\x6a\xd5\x61\x54\xdd\x15\x73\xc3\x15\x73\xc3\xe3\x47\x03\xb6\x6a\xd9\x33\xbf\x7f\xa7\xe6\xcc\xef\xdc\xc0\x9a\xf9\x88\x77\xff\xe6\x86\x8d\x3b\xfd\x69\x4d\x8e\x05\xcb\x24\xa2\x35\x8c\xc1\x34\x35\x2e\x41\x6b\x07\xac\xf3\x66\xfd\x1c\x63\x77\xdb\x12\x6b\xa7\x1b\x17\xf2\x6e\x2e\x56\x62\x70\x4b\xca\xe2\x14\x4f\xa2\x62\xce\x4f\x57\xee\x15\x4b\xab\x59\x76\x7e\x92\x52\x21\xc0\x63\x40\x76\xfe\xbb\xfb\x83\x33\x9b\x3f\xe6\x66\xb7\xdf\xf8\xf9\x66\x46\xf4\x3f\xbb\xc2\x0a\xb8\xe3\xed\xe0\xcd\x9a\x11\xfd\x55\x5e\x70\xf0\xd4\xd6\x46\xc3\xde\xbe\x9f\xc5\xee\x65\x67\x87\x7b\xcc\x77\x30\x97\xae\x6c\x55\x3b\xd8\xaa\x3e\x6f\xfb\x5e\x7e\xd1\xdb\x19\x4f\xfa\x0b\xbd\x21\x44\xe9\x8f\xe1\xc7\x3d\xe2\xfd\xb0\xb7\xf9\x9e\xb8\xec\x77\x0c\x29\x7b\xe3\xdc\xab\x6d\x8b\x9b\xef\x53\x8d\x5c\xab\x0f\x3f\x91\x1d\x1f\xce\x28\xbb\x19\x99\xac\xff\xc9\x27\x04\xaf\xf4\x36\x79\xc8\xf0\x98\x18\xec\x8b\xc5\xe9\x84\x26\x01\x64\xec\x59\x13\x9d\xf1\x8e\x48\x63\xd1\x19\x5f\x8e\xe2\x64\xc0\x71\x43\x13\x56\x67\x34\xe2\x71\x51\x54\xa2\x63\x73\xcf\xba\xe6\xe7\x77\x5e\xcd\x7e\xe4\x00\x3b\x60\xdd\xf7\x1f\x61\xfe\xae\xa5\x7e\x29\x82\x8f\x33\x9b\xa1\x37\x2e\x34\x58\xbc\x63\xa8\x96\x06\x8a\x57\x6e\xe1\x22\x5e\x49\x45\xce\xa3\xe5\x52\xfe\x37\xe5\x13\xd4\x7c\xf3\x8d\x08\xd2\x86\x65\x5a\x4f\x59\x5a\x48\x75\x2c\x8e\xa0\xa0\x71\xfa\x42\xdb\xf9\x8a\xa6\x25\xdd\x9c\xd1\x5a\x52\xdd\xb8\xdb\xad\x20\xc5\x4f\xc8\x18\x49\xc5\x1b\xd0\x77\x63\x3c\x59\x4a\x89\x75\x4c\xf3\x64\x97\x4a\x6d\x3f\x98\x82\x9b\x01\x6c\x49\x9d\x0e\x81\xdf\xac\x1a\x73\xd1\x85\xd1\x03\x5c\xa4\x96\xc3\xa9\x03\x48\x0c\x87\x8e\xaa\xb7\x64\x48\x7d\x27\x61\x64\xb7\x5a\x1f\x63\x26\xc5\x8d\x62\x3b\xec\xca\xad\xef\x38\xe3\x47\x43\xe7\xc3\x41\xde\x57\x6a\x9e\x0d\x95\x99\x9b\x3e\xcd\x11\xd5\x1d\x32\x7e\x0c\x72\x3e\x5a\xd7\xf0\x3d\xd0\x98\x40\x2d\x9d\x9c\x9e\x5f\x9c\x39\x39\x33\x39\xb1\x38\x1d\x20\x8b\xdd\xa8\x2e\x35\xcd\xf8\xaa\x88\x3a\x22\x2f\xcc\x1c\x14\x69\x3b\xeb\x80\xbf\xbe\x8c\x18\x37\x64\x4b\xfc\xc4\xf4\xfc\x98\xba\x39\xb1\x30\x1b\x1e\xb5\x79\x90\xe5\xe6\x5f\xb5\x31\x78\xb7\xd0\xee\x79\xb0\x54\xa9\xdc\xb8\xd7\xcb\x3e\x9b\x3f\x39\x79\xc3\xb1\x9b\x8f\x84\x8c\x5f\x17\xf2\xd9\x2c\x1d\x93\x0d\x57\x3c\x72\x72\xd3\xc7\x34\xb1\xca\x12\x95\xa9\x39\x39\xf8\x29\xd6\x17\x47\x78\x02\x55\xa9\xca\xd5\xd3\x19\x65\x1c\xbc\xe1\x90\xdf\x1c\x49\x72\x7a\x49\x94\x46\x08\xb7\x15\x17\xca\xa1\xad\xbc\x21\x3c\x46\xed\xbc\xe9\xfa\x1b\x6f\xc6\x29\xd9\x45\x66\x80\x08\x87\x41\x57\x6c\x2d\x09\xd3\x7d\x66\x37\x1f\xb4\xae\x00\x4e\x09\x67\xda\xac\x54\xc9\xb5\x32\xaf\xb7\x0d\xcd\x6a\x86\xda\x1f\xa5\x50\x53\x95\x35\x6f\x11\x58\x77\x51\x2d\x15\xb2\xa2\xb4\x54\xbd\x62\xec\x5e\x25\x71\x1c\x00\xa1\x5e\x29\x72\x32\x0a\x3b\x1c\xd7\x94\x39\x49\x59\x8a\xe2\x94\x2f\x9e\x5a\x00\xb0\x7e\xb1\x1a\x9d\x17\x05\x04\x6b\xbb\x2b\x13\xa2\xac\x71\x3e\xc4\x29\x74\x0b\xaa\xfe\x21\x63\xf7\xae\x0a\xd9\xa5\x79\x1c\x25\x20\x4a\x45\x05\x7f\xc6\xc2\x99\x59\x39\x9e\xcf\x9a\x38\x7d\x6a\x94\xb0\x28\x65\x44\xcb\x86\x68\xc3\x93\x3e\x44\x79\xdc\x78\xbd\x9a\x68\xa3\xbc\xc8\x78\x0c\xcb\xa0\x00\xab\x41\xb6\x7c\x9c\x31\xce\x39\x3d\x77\x08\xfe\x1e\x93\xff\x3b\x31\x7d\xe7\xcc\x2c\xb7\xa6\x08\x5c\x85\xfb\x61\x18\x9a\xe7\xa6\x67\xa7\x9a\x9f\x3a\xec\x78\x30\xaf\x61\x4f\x75\x0e\x35\x79\xc0\x8e\xc9\x15\xef\xef\xf7\xf7\x46\x65\xd6\x8d\xdb\x8c\xbd\xbe\xe5\x20\x20\x5f\xdc\x0a\xee\xb3\x0e\x05\x2b\xf5\xbb\x35\x0f\x42\x7e\x37\xd8\x82\xed\x07\x73\xc1\x03\xb5\xe5\x06\xa3\x7a\x0f\xc3\x91\x55\x3b\x8c\x23\xde\x9d\x61\xa7\xb7\xce\xf1\x3e\xfc\xdc\x73\x80\x92\xdf\x31\xf8\xc1\xdd\xa8\x37\x76\x5e\xf4\x0b\xf9\xd1\x63\x9c\x80\x92\x7c\x68\xd7\xec\xf1\x77\x75\xa3\x1e\x63\x2f\x68\xb1\x7f\x37\x2c\xb9\x3f\x8a\x98\x94\x01\x9b\xce\xdd\x2f\x78\xc1\xcd\xce\x15\xbd\x36\x07\xf0\xc4\xda\x10\xa7\x72\x93\x3b\x07\xe4\x4b\x3d\xf6\x22\x8f\x5d\x95\x64\x51\xe7\x44\x94\x44\x69\x5b\xe4\x7e\xb5\x85\x98\x82\x53\xd6\x0b\x84\xbd\xb8\xdd\xbe\xd6\x4c\x5e\xe6\x9e\xe6\xb2\xd2\xb1\x25\x7a\x21\x64\x17\xf7\x3b\x70\x20\x95\x29\x5a\xf3\xda\x2f\xcc\x4c\x41\xa4\x0b\xa8\x66\x7f\xb1\x2f\xb8\xce\xb9\x82\x12\xb5\x8b\xe5\xd1\x0f\x6c\x9c\x90\xea\x03\x57\xd4\xaf\x1d\xab\x5f\x89\xd2\xbe\xda\xc1\xbf\x86\x3f\xea\x39\x55\xf5\x58\xd8\x8b\xf2\x46\x76\xfd\x06\x11\x8f\x43\x67\xc0\x15\x85\x6a\x07\x0a\xd5\x67\x6c\x85\xea\xa3\x3b\x54\xa8\x7e\xb0\x59\x9f\x7a\xac\x40\x6c\x8f\x78\xcb\x9b\xab\x52\x93\xfe\xc4\xd8\xa0\xe6\xa3\x01\x32\x6a\x96\xd5\x53\x56\xd8\x9b\x0b\x63\x7f\xcf\xd8\x3d\x97\x9a\xb7\x71\xe3\xdc\xc6\xfe\x8f\xb1\xe0\x7b\x37\x7c\xa2\xc6\x6d\x89\xf9\x7b\x21\x82\x3f\x5f\xa3\x79\x3c\xec\xfd\x8b\xde\x1e\x4c\xe8\x7c\xd1\x83\x05\xe3\xec\x7b\x6f\xdf\xcf\x1e\x60\xfb\xe4\xc0\xc9\x15\xe4\x8b\xe0\x7b\xd5\xdf\x6a\xf1\x5a\x02\x09\x64\x58\x56\x1b\xb7\xe2\xec\xa1\x7d\x4f\xa9\x2d\xb6\xb7\x3e\x38\x27\xdf\x38\x27\x7b\x2f\x70\x9c\x7e\xaf\x68\x31\x6a\x93\xff\xc2\x56\xf0\x75\x0f\xff\xd6\x54\xa3\xf8\xab\xd1\x8d\xa8\x3e\xf9\x72\x64\x71\x1c\xbf\x8d\x12\x5d\x3f\xba\xb9\xb3\x0d\x8d\x41\x92\xf0\x24\x5b\x17\x79\x5b\x2e\x7d\xbb\x3f\x7e\xcf\x63\xac\x58\xcd\xf2\x12\x67\xc3\xaf\x79\xc1\xfb\x3d\xf3\x1b\xbf\x4a\xfe\xac\xa5\xb7\x56\xdf\x3d\xaa\xa3\xc1\xea\xf9\x98\x35\x6d\x11\xe9\x2a\x15\xf1\x1e\xe2\x9e\x85\x33\x88\x78\xff\xe3\x74\x2d\x6b\x53\x90\x09\xd0\x67\x9d\x93\x4b\xaa\x5d\x26\x7c\x45\x94\xfc\x36\xa8\x5f\x56\x2f\x7b\x6b\xa6\x34\x2a\x4e\xed\xa3\xcc\xc6\x7e\x35\x3b\x60\x7f\xe3\xf3\x3d\xb6\xaf\x88\xd3\x95\x2a\x89\x72\xbf\x0c\x56\xd4\xdf\x7a\x9e\xa9\xdf\x4d\xe3\xbe\x41\x95\xce\x8c\xd3\x97\x3b\x1c\xe7\x9e\xd3\xcd\x5f\xf7\x18\x93\x6b\x68\x25\xcb\x63\x51\xf8\x5f\xf5\x82\x4f\x7b\xe6\xb7\x6b\xc3\x85\xe1\x13\x1d\x6e\x67\xcb\x8f\x8b\x81\x64\xb3\x4b\x22\xc9\x80\x5d\x23\x23\x0c\xc1\xc1\x28\x49\x0e\x1e\x36\x94\x1d\xbd\x6a\x29\x89\x8b\xd5\x47\x73\x6c\xa2\x24\x39\xb7\x51\xcf\xff\x37\x8f\x4e\xca\xf7\x7b\xc1\x7f\xf0\xce\x6f\xbe\xb6\x9d\x5e\x8f\x0b\x02\x15\x24\x7d\x73\x70\xa1\xf6\x44\x23\x16\xd2\x12\xb1\x9d\xfd\x0a\x02\x0e\xb0\x89\x4a\x31\xbd\xe1\xa1\x1c\x61\xe5\x38\x40\x06\x87\xae\x3a\xa8\x1d\x25\x49\xe1\x8c\xdb\x97\x0f\xb0\x70\x4b\x67\x92\x96\xcc\xfd\x9f\x3e\x10\x2c\xea\x5f\x46\x10\xa5\x54\xc8\xb0\x82\xa4\x5a\x1a\x49\xed\xb2\x34\x49\x6f\x07\x44\xe6\x18\x19\xc1\xd5\xea\x0f\x2f\x7a\xbb\xe4\x99\x7a\xd1\x23\xb4\xf2\x45\xcf\x4f\xa2\xa2\x5c\xcc\xa3\xb4\x80\xba\x16\x63\x48\x71\x97\x8b\xa8\x90\x5b\xef\xde\xae\x28\x8a\x68\xc5\x85\x4e\xfc\x30\x63\xa7\x34\xdc\xf9\x44\x70\x83\x2b\x12\x6b\x1d\x67\x54\x91\xa9\x2d\xe6\x52\xec\x3b\x19\x25\x52\xc5\x3d\x9b\x02\x2c\xc6\xe9\xa0\x45\x06\xad\xf2\x4f\x05\x77\xa8\x6c\x1b\x96\x0d\xca\x12\x38\x80\x44\x3a\xe5\xcb\x59\x16\x12\x55\x43\xd8\xce\xba\xe3\x46\x20\xb1\x4b\xfd\x40\x8b\x35\x7c\x9c\xff\x93\x2d\x76\xd3\x25\xc8\x0b\xf2\xdd\xe0\xab\xde\x60\x81\x5a\x4a\x8c\x8a\x92\x18\xaf\xed\x5e\x40\x86\x5e\xf8\x53\x41\xca\x64\xb7\x50\x9f\x99\x5c\x4f\xb4\xe4\x8c\x02\xaf\xb1\x5b\x70\x12\x24\x7d\x84\xe7\xe8\x29\x81\x94\xfb\x21\x47\x63\x56\x44\x13\xbd\x44\xd0\x11\x11\x20\x19\xd8\x0c\xb4\x4b\x97\x28\xa7\x04\xe5\xce\x56\xcc\xfd\x05\xf0\xca\xf6\xc8\x26\xf7\x02\x8f\xa9\x91\xf7\xd7\x82\x98\xfe\xc4\x1d\x66\xb5\xea\x82\x7b\x3c\xea\x00\x68\x47\xdf\xc3\x03\x1e\xb0\x71\x34\x4b\x0d\x9b\x95\xe9\x03\xfa\x4c\xc5\x65\xeb\x26\x38\x71\x86\xef\xd3\xad\xc6\x08\x98\xff\xdc\x52\x21\x30\x3f\xd6\x1a\xbc\x5d\x8f\x4a\x0d\xb5\x79\xdc\x8a\x8f\xd1\xe8\x33\xd3\x9d\xeb\x11\x9a\x34\x91\x6f\xa2\xea\xc9\x86\x9e\x84\xc9\x86\x3b\xc1\x28\x8f\x97\x9b\x0b\x8b\x75\x08\x63\xd2\xe7\x47\x8f\x21\x62\x03\xaa\xc6\x21\x0e\x8d\xce\x7f\xdf\x85\x67\x87\x0d\x4d\x8e\x0b\x7e\xcb\x68\xad\x3d\x52\xae\xaf\x60\x61\x03\xd0\x0d\x8c\xb9\xb9\xc0\xd5\xae\xf8\x95\x9a\x14\x64\xd5\x5e\x17\xb1\xf3\xa5\x16\xa3\x15\xed\x7f\xae\x15\x7c\xac\x85\x7f\x9b\x6d\x05\xd2\x1b\xac\xe4\x51\xb7\x1b\x95\x36\xd5\x72\x6e\x8f\x2a\xee\xac\xf0\xa2\x3a\xc0\x75\x7b\x0f\x16\x34\xfd\xad\x71\x9e\xc3\xd4\x1f\x39\x6c\x0c\x9a\x44\xd1\x5a\x17\xc0\x25\x28\x27\x02\xc5\xbb\x88\x0b\x3d\xcc\xb0\x40\x90\x35\xd0\x13\x45\x94\x02\x4e\x44\x8b\x6a\x30\x6f\xf1\xc4\x59\x5f\x45\x04\x5b\x69\x60\x6e\x11\x66\xa8\x28\xe2\x8e\xc8\x45\x87\x47\x7c\xa5\x8a\xf2\x28\x2d\x85\x00\x0a\x07\x8b\x0b\xdb\x5a\x69\x91\xb5\xc1\xd0\x44\xb4\xad\xdd\xb2\x89\xa4\x05\xa1\x3d\xd7\x9e\xa5\xbf\xd8\x62\xdb\x05\xc4\xfa\xaf\x6c\xf9\x57\xc7\x69\x39\x96\xe5\x63\x58\x4a\xf0\xd7\x9e\x75\x1f\x97\x19\x6c\x84\x1a\x1d\xbb\x8a\x08\x64\x0e\x68\x32\x44\x9c\xa8\x96\xf2\x7b\x71\xa9\xe3\x11\x6d\x5b\xdc\x78\x37\xca\x8b\xd5\x08\x91\x9f\x48\x3c\x65\x5d\x19\xe5\x71\xa9\xf2\xb3\x00\xf3\x96\xec\x37\xe2\xb5\x94\x13\x09\x68\xd6\xfa\x3d\x01\x38\x42\xc8\xb8\x93\x64\xeb\x05\xe4\x94\x2a\x33\xb0\x35\x8f\xd6\xd3\xd3\x41\xe5\xd8\x69\xba\xe5\xb8\xad\x10\xb3\xb5\xac\x06\xc1\x9e\x4e\x27\xfe\xc4\x01\x07\x2b\xa5\x80\xfd\x0b\x72\x66\x2f\x57\xc9\x82\x28\xfd\x7f\x64\xc1\x5b\x5b\xd6\x85\x3a\x6d\x4e\xa9\xd1\xa3\xb0\x56\xc8\x76\x08\x89\x4a\x60\x36\x97\xb1\x28\x42\x3e\xa3\xff\x86\x99\xa2\x52\xac\x47\xc5\x71\xc6\xc7\xf8\x2c\x9a\xac\x8e\xf3\x09\x95\xbc\xbf\x80\xfd\x90\x4f\xcd\x2e\x20\xae\x31\x2b\x40\x86\x0c\xe5\xd3\xc4\x7e\x71\x9c\x4f\xc8\x59\x9c\xf6\xb9\xc5\x57\x05\x79\x4c\xb4\x0d\x38\x04\x2b\xaa\xdd\x78\x3d\x2b\x55\xec\x9c\x82\x40\x61\x0b\x54\x9b\xfb\x28\x7f\x44\xc9\x7a\xd4\x97\x95\xf4\x74\x02\x2f\xd9\x97\x8a\x64\x45\x3d\xec\x5a\xc5\xfe\x66\xef\xe5\x4d\x34\x59\x52\xa8\x55\xb2\x01\x8d\x4c\xc3\xc0\x41\x80\xd5\xcd\x14\xbf\x67\x2c\x50\x0a\xa6\x6b\x06\xc7\xca\x3e\x88\x0b\xbd\x10\x65\xc8\xde\x6b\xe2\xaa\xde\xb1\x11\xb3\x54\x53\xcd\x68\xd2\x4b\x55\x72\x85\x61\x66\xbc\x39\xbb\x52\xeb\x7d\x45\xff\x10\x95\x91\x3a\xae\xec\x2d\x79\xa9\xcf\x8b\x4c\x1e\xaa\x71\xda\xc9\xd6\x61\xff\x05\x7a\xd3\x2b\x66\xb8\x2b\xa0\xa8\xc7\x2f\x06\xeb\xec\xe6\xb6\xa4\x63\xfe\x11\xe3\x96\xef\xf5\x0a\x6d\x2e\xb2\xe6\x7e\x1d\x95\xf4\xb1\x11\x87\x26\xc4\xa5\x35\x9b\x4e\xd7\x08\x36\xff\xa6\x91\xe0\x83\x5e\xed\x22\xe5\x63\x00\xbe\x20\x22\x37\xcb\x6c\x4e\x33\xd9\xe9\x6b\x71\x9e\xa5\x5d\x4c\x0d\x92\xc7\x72\xcb\xc5\x6d\x5c\xf9\x9f\xd0\x55\xa8\x35\x0c\x42\x03\x63\x71\x07\x0b\xa4\x77\xc5\x63\x07\xf6\x4b\x3d\xe2\xf0\xf4\x79\xd1\x1f\xc3\x49\xd3\x8b\xe2\x1c\x76\xe6\xc6\x1a\xdd\xed\xf3\x73\x1e\x7b\x58\x71\x3b\xfe\x40\x90\xcd\x3a\x4a\x3d\xc0\x2b\xcb\xc7\x88\xb1\x71\x8a\xed\x43\xe8\x7c\x94\xf8\x37\x07\xd7\x62\xac\x6a\xdf\x11\x7e\xa8\x63\x95\x81\x81\x8e\x35\x10\x01\x97\xb2\x2c\x11\x51\xca\xfe\xcc\x73\x8e\x58\x4d\x63\x24\x27\x15\x8d\xde\xa7\xbd\xc0\xfe\x6d\xc4\x43\x1b\xde\x0b\x1c\x0c\x29\x17\xf2\x39\xb7\xc7\x7e\xd8\x63\x27\xd9\xfe\x76\xd6\xed\x65\xa9\x48\x4b\xff\x96\x60\x74\x52\xfd\xc0\x15\x86\xb8\x60\x18\xf2\x35\x4a\x21\x46\x82\xb4\xe8\x38\xf2\xc0\x1d\x6c\x97\x3c\x6a\xfd\x9b\x82\x6b\x67\xb3\x8e\x32\x67\xa5\x5b\x2f\xe0\x65\xbb\xd9\x77\x5b\x5f\xbb\x9c\x64\xeb\x14\x01\xa8\x2d\xf1\x04\x62\xf5\x7f\x77\x57\xf0\x5f\x3d\xfa\x81\x16\x34\xda\x9d\xb3\x3c\x5e\x89\xc1\x7b\x8c\x01\xe6\x06\xb8\x51\x18\x09\x59\xc7\xd8\x2b\xa4\x42\x54\x49\xcd\xab\x54\xb1\xc4\x48\x6f\x0a\x62\x67\x2e\x40\xe6\x28\x57\x73\x21\x38\x1c\xe8\xd9\x32\x56\x48\x84\x77\xa6\xc2\x5b\x65\xa9\x55\x21\xf2\x51\x5c\xa8\xa3\x52\x6e\xaa\xa5\x70\x0a\x9b\xcc\xa1\x2f\x68\xb1\x29\xb6\x1b\xde\xf1\x6f\xdd\x20\xe9\x49\x53\x87\xdc\x29\xdf\x52\xd0\xde\x7f\x45\x7b\xf0\xb7\x05\xfb\xe6\x29\x3d\xa3\xdd\xbf\xf7\xb0\x27\x14\x4e\xee\x34\x7f\x6a\x03\xf2\xd0\xc6\xee\x77\x5e\x57\xd5\x9e\x60\xbb\xe4\x67\xfb\xc7\x37\x20\x5a\x6e\x2a\xed\x6c\x21\x72\x2a\xe3\x11\xef\x95\x1e\xfb\x17\xce\x26\x58\xa5\xe0\xa9\x5d\xf7\xab\x31\x4a\x33\x37\x56\x66\x63\x9d\xb8\x68\xe7\x71\x57\xf6\xb8\x38\xd1\x3f\x0e\xee\x60\xda\x16\xa1\x2b\xe0\x82\xfb\x95\xc7\xb9\xdb\x6c\x78\x44\x36\xf8\x38\x97\x2d\x60\x9c\x5b\x85\x66\xf9\x71\xd8\x58\x19\x7b\x25\x63\xdf\xd9\x94\xcd\x22\xeb\x20\xcd\xc1\xd7\xf6\x07\xb7\xab\x1f\xc3\x59\x0e\x48\x50\x84\x84\xcf\xb1\x49\xaa\x07\xfb\xa4\x33\x09\xde\xba\x9f\x7d\xde\x63\x57\x57\x29\x45\x8f\x42\x18\xdf\x47\xbd\xe0\x3d\xde\x59\xfb\x92\x8a\x89\x45\x4e\x36\xae\xef\xc4\x09\x25\x2a\x53\x71\x61\x21\x3f\xd1\x57\xa9\xd4\x46\x75\xfd\x56\x49\xdb\xde\x08\x81\x35\x02\xfe\x3b\x7e\x4d\x37\x4a\xab\x28\x19\x93\x3f\xc6\xa2\x4e\x37\x4e\x35\x3c\xcc\xd9\xbf\x7e\xdd\x63\x57\x21\x39\x1f\xed\x56\x9f\xdc\x0a\x57\xa5\xec\xd5\x49\xeb\xad\xe0\x55\xde\xcc\xb2\x89\xbf\x42\xad\xdb\x98\xe6\xe5\xa9\x02\xdf\xd7\xb6\x81\x67\xb8\x79\x01\x45\x0a\xa6\x3d\xb8\x1b\xa9\xb4\xb0\x60\x97\x84\x5e\x6d\xc1\xc4\x44\xaf\x35\x65\x7a\xc5\xe4\x7e\x55\xda\x25\x7b\xb1\xc7\x98\xe2\x42\x9c\x99\xf2\x2f\x04\xe7\xa7\x2c\xe6\xec\xd9\x0c\x23\xe0\x34\x7b\x17\x66\x69\x03\x8a\x50\x55\x44\xc8\xe7\x21\x29\x85\xa2\x68\x01\xec\xcc\xd1\xf0\xe8\x75\x21\x2f\x84\x30\x23\x02\x28\x12\x4d\x6d\x7a\xe3\xd1\x5b\x6e\xbc\xd1\x5e\xd3\xcf\x64\x7b\x7b\x59\x67\x72\x66\x6a\xde\x3f\x19\xdc\x32\x87\x7f\xd6\xed\x29\xbd\xac\xc3\x67\xe6\x78\x1e\xa5\x2b\x82\x47\x05\xe0\x5a\x34\xd8\x41\x76\x5d\x58\x23\x1b\xde\x47\x65\x16\xfe\x5b\x5b\xc1\x97\x3c\xf5\xab\x5e\xae\x2a\xb3\x68\x2c\x94\xd2\x1e\x52\x0c\x26\x08\xec\xca\x8c\x03\x75\xa2\x0d\xcc\x81\xa7\xb9\x43\x7c\xa4\x5c\xe5\x22\x2d\xf3\x3e\x0e\x90\x71\x94\x50\x7b\x54\x47\xce\x20\x1e\x49\x41\xa5\x74\xb0\xac\x95\xe7\x09\xe2\xf1\xb2\x65\x3e\x33\xb7\x76\x3d\xc8\xd0\x33\x73\x6b\x37\x6e\x60\xbe\xde\x06\xc7\xd4\x0b\x3c\xc6\x80\x13\xb5\x23\xf2\x99\x29\xbf\x08\x96\x67\xa6\x94\xd0\x01\xdd\xa0\xfb\x86\x0e\x9d\x76\x92\x55\x1d\xae\x5e\xe1\x96\x3f\x27\x2a\x8f\xf3\xdb\xe6\xe8\x86\x14\x5e\x6e\x3f\x3e\x3e\xae\x2f\x28\xbe\x0b\xb9\x3c\x66\xa6\x6e\xb7\x87\xac\xcd\xf6\xc8\x4f\x2f\x0b\xff\x59\x01\x1f\x5c\x2b\x94\xb0\x18\x1f\x71\xbe\xfa\x5a\x76\x88\x7d\xf7\xa6\x0b\x72\x51\xbe\xc8\xfe\x76\xa4\x31\x39\xf4\x62\xd6\xcb\x92\x6c\xa5\xbf\x40\x19\xbc\x16\x45\xde\xf5\x3f\x35\x12\xbc\xa6\x35\xc1\x4b\xba\xa7\xd3\x7b\xf1\x52\xe4\xdd\xfa\x34\xca\x45\x51\x25\x98\x62\x38\x5a\x12\x09\x7f\xa0\x12\x39\x28\xfe\x13\x3c\xad\x92\x44\x1e\xa3\x68\x6f\x1c\x52\x9c\x3a\xff\xd3\x4c\xc3\x2d\x60\xe1\xdb\xa0\x3d\x1a\x11\x4c\x46\x3a\x31\x3b\x25\x70\xde\xd0\x28\x80\xa6\x52\x2d\x91\x41\x62\xb9\x4a\xdb\x28\xba\xc9\x3d\x35\x2a\x38\x6c\xf3\xd6\xe7\x19\x47\x4b\x94\xf2\x28\xe9\xad\xba\xf9\x30\x60\x32\x82\x89\x56\x0f\x6e\x25\x6f\xba\x1b\xfe\x3b\x3c\xf6\x93\x1e\xfb\x36\x68\xfc\x29\xf9\xdd\xd3\x17\x64\xaf\x80\xef\xd6\x7f\x89\x17\x5c\x37\xa1\xdd\x42\x83\xdf\xed\x7c\xda\x52\x9f\x72\xde\x3a\x43\x3b\xcd\x26\xb7\x40\xaf\x5b\x1f\x3d\x68\xc9\xbc\x29\x9d\xfd\xd5\x01\x76\xe3\x26\xe7\xf8\x1c\x30\x3d\xcc\x57\x89\x28\xee\x8d\xcb\x55\x15\xd7\xe4\xbf\xff\x40\xf0\xfa\x91\x21\x37\x01\xc0\x46\x47\x66\xc4\x41\xd5\xc5\xa3\x52\x87\x38\x19\xe1\x0d\xed\xed\x56\xd6\x4f\x18\x5d\x78\x47\x59\x0d\xc9\xb3\xa4\x05\xc2\xf3\xc6\xf6\x49\xf2\x1f\xa9\xee\x4b\x8a\xf2\x51\x99\x79\x0c\x1e\xd0\x76\xf7\x42\xc6\xb6\x52\x1b\x95\x61\xb8\x87\x7d\x89\x9a\x7e\xa6\xbd\xf1\xb2\xc9\x8b\x12\x2f\x63\x12\x96\x43\xd1\x61\x13\xf6\x9c\xa5\x82\x77\x85\x0a\x9f\x2e\xea\x25\x39\xa2\x69\xda\xe1\x87\x96\x86\xbe\xab\x1a\x0d\xcd\x02\x43\x5d\x96\xce\x3b\xd7\x1a\x0a\x0d\x2f\x7a\x3a\x12\xcd\x4d\xcd\xbc\x8f\x7d\xb9\xc5\xae\x76\x0a\xf5\x3f\xd5\x0a\x7e\xd5\x3b\xe7\x5c\x3b\x47\xca\x7d\x12\x63\xc6\x57\x55\xa3\xd5\x45\xe4\xde\x50\xa9\x64\xb5\xc8\x6c\x22\xa7\xdb\xed\x2c\xef\x50\xc6\xd2\x72\x55\xc4\x39\x8e\x8f\x1a\x0e\xd2\x17\x8d\x5b\x70\xc2\xee\x81\x6c\x99\xd7\x9b\x24\x5f\x3c\x57\xff\xfc\x73\x7c\x15\x6d\x2a\x4b\x40\xb4\x38\xa6\x6c\xc1\x66\x9d\x4c\xb2\x89\x0d\xe8\x80\x9b\xe6\xfb\xe0\xd7\x6e\x11\x84\xf8\x82\x11\x2b\x02\xf0\xeb\xad\xe0\xc5\x2d\x3d\xf4\x35\xd0\x12\x45\x54\xa3\x36\x41\xe2\x2c\xa9\x10\xa0\x56\x80\xc8\xab\x5c\x22\x71\xc1\xf3\x4a\x8a\x86\x48\x4b\xbb\x94\x55\xa5\xd2\x5d\x06\x12\xb0\x5a\xb3\x47\x9b\xec\xe4\x28\xca\xcd\x16\x87\x13\xc7\x0d\x68\xd5\x44\x41\x09\x84\xe4\xda\x02\x9d\xe8\xb8\xa5\x29\x11\x80\x9a\x6e\x54\xa9\x7b\x4b\x36\x1d\x9b\x69\xa6\xa0\x00\x0f\xb4\x46\x57\x2a\x1d\xc5\x19\x8e\x9b\xd9\x8d\x1b\x70\xc8\x6e\xa0\x13\x6e\x71\x0c\xde\xd6\x62\x4f\xaa\x4f\x12\xff\x65\xad\xe0\x55\x5e\xc3\xdc\x71\x9c\xf3\xb3\xe6\xf6\xa3\x30\xcf\xe5\xe4\xd4\x3b\xd0\xd9\xf9\x53\x4e\xa7\x9c\x64\xdb\xd5\xd4\x1a\x1b\xbb\xc5\x2e\xfa\x85\xfd\xac\x91\xfe\x4a\xed\xf6\x9d\x05\xd1\xae\xf2\xb8\xec\x63\xc9\x00\x8b\x7c\x68\x7f\xf0\xf4\xc6\x3b\x6e\x1f\x0e\x3c\xb2\x31\x40\xf2\x97\xae\x00\x24\x77\x6c\x99\x5d\x57\x00\xc9\x34\x08\x34\x40\xd2\x8c\x88\xea\x2a\x1a\x06\x7b\xd6\x3d\x9d\x6d\x64\x8d\xd8\x6c\x5a\x5c\x31\x09\xef\x00\x2b\xf9\x59\x1b\x2b\xf9\xdf\x76\x88\x95\xfc\xa1\xc7\x29\xf6\xec\x72\x82\x25\x89\x4d\x8c\x1b\x1e\xb2\xc1\xad\x86\xb1\xaf\x86\xdb\xda\xb9\xc0\x8e\xf3\x9f\xc3\xe0\x58\xe3\x1d\x67\x6f\xc1\xa2\xb8\x48\x97\xb3\xbc\x2d\x3a\xe1\x45\x6f\x6f\x21\x4e\xc5\x69\x75\xe1\xa2\xb7\x3f\xaf\xd2\x89\xe2\x6c\x21\xf2\x8b\x9e\x5f\x54\xbd\x5e\x02\xa2\x73\x94\x80\x45\xaa\xb8\xe8\xed\x5d\x2e\xe0\x4f\x67\x6f\xfb\xe0\x18\x7b\x87\xc7\xbe\x1d\x1c\xb4\x73\x2a\x71\xd2\x74\xd1\x8e\x12\x84\x4c\xbc\xc4\x0b\xfa\xc3\x6e\xf2\x8e\x90\x9a\x0f\xa6\xc9\x5c\xa6\x74\xc3\xc8\x7f\x60\x24\x66\x88\xa8\xd1\x29\x99\xb8\xd0\x6f\x83\xe6\x5d\xa5\x96\x92\xe8\xa4\xdc\xcf\x2b\x02\x22\x28\x2b\xce\x2f\xb4\x98\x4f\xf9\x9b\x4c\xd2\x90\xc2\x7f\x5b\x2b\xf8\xa2\x37\x78\x5d\xab\x45\x49\xb6\xae\x36\x19\x79\x9b\x58\xee\x43\x8e\x79\xbd\x40\x56\x8d\x13\x07\x62\x4a\xc4\x41\xd6\xd3\xca\x87\x57\x15\x80\x9a\x01\x39\xbc\x17\xe5\x51\x57\x7e\x3f\x84\xb5\x00\xd5\x04\x64\xaa\x4f\x6d\xea\x24\xb4\x3b\x90\x1a\x6e\x17\xa8\x7c\xe2\x42\x73\xf3\x04\x74\x2b\x20\x7b\xc2\x25\x6f\x81\x13\xf5\xbe\x60\x1f\xf3\xd8\x93\xa8\x36\x95\xc1\xab\xf0\xdf\xe5\x05\xe7\xea\x17\x9b\x3a\x0d\x28\xdf\x7b\xf2\x6e\x48\x5d\xd6\xd0\x57\xe6\x21\xa7\xab\xec\x6f\xb8\x95\xdd\x32\x1c\x3b\x35\xe4\x1b\x54\xc3\xd8\x6b\x47\xd8\x53\x69\x76\x4c\x74\x3a\x93\x51\x0f\x4d\x8d\xb1\x28\xfc\x7f\x68\x05\xff\x67\xab\xf9\x9e\x12\x67\xe9\xae\xf2\xf4\xb7\xed\x47\x30\xdd\xa5\x22\x38\x82\x78\xbc\x7a\x0e\x32\x5e\xa5\x89\x28\x8c\x0d\x4b\xce\x58\x0d\x4a\x01\x6c\x60\x27\xcf\x7a\xe4\x22\x56\x65\xf7\x43\xce\x9f\x95\x55\x1a\x00\x02\xdd\x19\x59\xf7\xe5\xb8\x83\x58\x3b\xa4\xed\xc8\x05\x8f\xa2\xe9\x54\x9e\xf5\xec\x9b\xf2\xc0\xb0\x1f\x85\x66\x6b\x2f\x41\xdc\xed\x25\x71\x3b\x2e\x93\xbe\x9a\x63\xa3\x44\x40\x27\x3a\x0a\x8c\x42\x92\xb5\x9e\x7d\xf4\xa0\x53\xac\x6c\xf2\x46\xd8\xca\x5f\xf0\xd8\xbf\x51\x8d\x1f\xb6\x7d\xfc\xa8\x17\x3c\xec\x6d\xf2\x90\x31\x26\xd7\x06\x0b\x13\xca\x65\xb9\xf6\x57\x01\xd6\xa8\x2d\xc7\x42\xee\x32\x2b\x51\x9c\x62\xa8\x9d\xde\x5e\x0a\x8c\xba\x93\xe7\x6b\x2f\x02\x5f\x3d\xbd\xe0\x6e\x25\xaf\x18\x61\x4f\x5a\xce\xf2\xa5\xb8\xd3\x11\xe9\x42\xbf\x68\x97\x49\xe1\x7f\xa3\x15\xfc\x66\xab\x7e\xd5\x95\x52\xc4\x05\xdd\xb3\xfa\x41\xa9\x79\xc8\x27\xdd\xed\x2b\xcd\x52\x11\xf2\xe9\xa8\xad\x6c\x87\x72\x93\x88\xd5\x47\x24\xb2\xe5\xf8\x9e\x86\xb6\x40\x9e\x88\x38\xe5\xc1\xb5\x81\xfc\x07\xdd\x55\xed\xa8\x00\x0a\x9c\xb8\x70\x90\x49\x94\x75\x6f\x39\xbe\x00\x86\xa2\x7a\x53\x42\xbe\x80\x10\x94\x6b\x01\x03\x85\x44\x46\x74\x0f\x66\x88\x7e\x23\x64\x8c\x18\xe5\x8a\xe3\x1c\x30\xbc\xc1\x72\x96\x8d\x5f\x1b\xd0\x23\x05\xfe\x5e\x8a\xf2\x60\x54\xfd\xf9\x60\x30\xca\x45\xd9\x0e\xcd\xf3\x61\xed\xf9\xd0\x3c\x1f\x9a\xe7\x37\x98\x49\x3d\x76\x40\xee\x20\x84\xa1\xf1\xa3\x60\xc1\xfa\x59\x3b\x65\xac\x63\x90\x50\x45\xb8\xd1\xc2\x36\x7b\x97\xf5\x9a\x49\x8f\x87\x71\x94\xce\x0c\x78\x0e\x63\x26\x4d\xa0\xff\xcc\x60\xd2\x4a\x1a\xb8\x85\x53\x0d\xa9\xc0\xe5\x40\x98\xf7\xdc\x0a\xfe\xbe\xc5\x9e\x9a\x8b\xa8\x73\x26\x4d\xfa\xf3\x59\x56\x9e\x8c\x13\x81\x3a\xaa\xff\x9b\xad\xe0\x57\x5a\xcd\xf7\xf0\xe4\x20\x3a\x3f\x79\x04\xe2\xd6\x04\xa7\xbd\x9d\x96\x90\x18\xe3\x29\x05\x66\x0e\x0c\x54\xf2\xe0\xc9\xb3\xac\xc4\x14\x8c\xca\x79\xa8\xe2\x95\xcd\x6e\xe6\xec\x5c\x96\x14\x6a\x97\x88\x6a\xe0\xb0\x52\xa1\xc0\xb9\x85\x39\xa5\xac\x74\x44\xda\x57\x7d\x0d\xa7\x3a\x7d\xc0\x72\x94\x14\xa2\x56\x3b\xe8\x26\x5b\x69\xba\xec\x7b\x48\xbb\x5d\x48\x9d\x7d\xa9\x2a\x75\x12\x6e\xda\xc2\x50\x04\x42\x16\x22\xab\xe3\x3f\xd7\x62\x57\xe5\x55\x5a\xc6\x00\xa7\x2a\x0a\xff\x23\x2d\x36\xb9\xd5\x73\x67\xde\x7a\x71\x81\x8c\xec\x67\x90\x7b\x2e\xf8\xba\x67\x17\xab\x61\xe4\xf4\x94\x75\x90\x74\xe2\x76\xa9\xf0\x09\x30\x45\xc1\x63\x66\x17\x4d\x71\x0c\x91\xee\x30\xd7\x01\x91\x75\xe3\xb2\x54\x56\x73\xcc\xee\x63\x57\x0d\x78\x02\xfd\xb0\xd4\xf0\xe4\x32\x6a\x83\xd6\x30\x8d\x92\x21\xa0\x13\x32\xa7\xdc\x5a\x2a\x1a\xbb\x39\xae\x1b\x0a\x2d\x93\x3a\x1d\xf2\x3b\x3d\xb6\x97\x44\x13\xff\xcd\x5e\xf0\xbc\xb5\xe1\xf2\x95\xca\xe8\x0d\x19\xd7\x86\x89\x0b\x69\xc6\x1b\xa5\xaa\x45\x25\x2b\xca\xcd\x4a\x3d\xd1\xa7\x13\x54\x2e\xef\x83\xd7\x1e\xdc\x68\x0f\x79\x5f\x8b\xfd\x8b\x86\x53\xcc\xbf\xd8\x0a\xfe\xc9\x6b\x3a\xde\x9c\x9d\x7d\x50\x26\x20\xda\x4a\x13\x8c\x2d\x85\xd9\x4e\x53\x86\x52\xb7\xd4\xd4\xee\x74\x05\x55\xc6\x38\x7f\x93\x9d\x17\x19\x7e\x0e\x16\xe8\xf3\x15\x28\x0e\x83\xb8\x50\x15\xe5\x26\xf2\x42\xd3\xb7\x6c\x28\x2c\x6c\xd0\x69\x5f\xfb\x7f\xd9\x7b\x17\x30\x49\xb2\xaa\x4e\xfc\x8b\xac\xea\xd7\xed\x19\x90\x00\x05\x5d\xf7\xff\xdd\x7f\x0c\x6c\x75\x0d\x99\x59\x55\xdd\xf3\xa2\x19\x86\xa9\xa9\xaa\x66\x8a\xe9\xee\x29\xaa\xaa\x67\x00\x77\x96\x8e\xca\xb8\x95\x15\x53\x91\x11\x49\x44\x64\x55\xe7\xac\x7c\xcb\x63\x17\x84\x05\x59\x56\x50\xe8\xe5\x29\x2f\x1d\x44\x50\x50\x41\x50\x51\x77\xe0\x0f\xe2\xa2\x20\xb8\xa0\x82\x22\xae\xe2\x63\x5d\xdf\xee\xb0\xae\xff\xef\x9e\x73\xee\x2b\x33\x2a\xab\xaa\x7b\x06\x10\x5b\x3e\xa7\x2b\xe3\x71\xe3\x3e\xcf\x3d\xf7\x3c\x7e\xbf\x31\xf6\x18\x2a\xef\x5c\x5a\x84\xeb\x42\x6d\x86\x9f\x1d\x0b\x3e\x30\x56\x75\x67\xc7\x0d\x51\xa9\xb3\x3d\x78\xfa\x1b\xbf\x2b\xaa\xfa\x8c\xda\x13\xdd\xba\xda\x0c\xb1\x4d\xed\x75\x25\xc3\xad\x99\xe8\x21\x44\x55\x56\x35\xd6\xee\x0e\xf9\xca\x56\x16\x47\x64\x88\x41\x56\xf4\x1d\xf6\x5a\xda\xc7\xf6\xba\xd5\x5a\x8f\xef\x7b\xa7\x95\xab\x99\x8e\x81\xfe\xeb\xbc\x11\x4c\xa4\x03\x82\xf1\xd4\x0a\x86\x78\x0c\xc8\xc4\xbb\xa9\xac\x3d\x48\xc3\x6d\x79\x65\xbd\x20\x9b\x71\x5c\xe8\x9c\x1f\x0a\x42\x82\x63\xef\x5c\x96\x96\xe2\x42\xd9\x64\x11\x3b\x04\x8c\xc5\x8b\xf3\xfe\xb3\x82\x3b\xe8\xcf\xfd\x6a\x02\xf2\x95\x91\x5a\xc0\xeb\x3d\x76\x04\xca\xce\xf2\xb2\xf0\xbf\xdf\x0b\x4e\xe9\x5f\xf6\xb7\x70\xd6\xe1\x19\x27\xcb\x4b\xed\x6d\x76\x09\x85\xd7\x84\x4a\x45\x73\x06\xe0\x49\xec\xc6\x11\xdc\x5d\x03\xdd\x7c\x3b\x7d\x7e\x59\x7e\x81\xbd\xdb\x63\xe6\x50\xef\xbf\xc9\x63\xb7\xee\x63\x1f\xc3\xb7\x06\x07\xec\x7b\x74\x79\x97\xb2\x81\xd1\x9b\x14\x2f\x0f\x2f\x90\x84\x83\x80\xdb\x37\x78\x4c\x99\x23\xfc\x57\xed\x63\x72\xad\x2c\xc0\x3b\x83\x75\xbd\x93\xca\xda\x77\x4d\xd1\x0d\x39\x5c\xbf\x4f\x7b\xac\xc2\x30\xe2\x7f\xc8\x63\x8b\x7b\xae\xea\xd0\xeb\x83\xb5\x4e\x87\xbf\xb0\xd7\xd5\x61\xbf\xa9\x9c\x16\x72\x92\x8d\x5c\x29\xaf\x1e\x63\x8f\x52\x5b\x82\xca\xd9\x2d\xfc\xff\x5b\x0b\x7e\xb2\x36\x3b\x78\xb9\x6a\xd3\x8e\xd3\x24\x4e\x85\xc3\xa4\x89\x1d\xa7\xa2\x51\x8c\x4c\x23\xb5\x0e\x72\x19\xd6\x04\xec\x69\xc4\x43\x1d\x9a\x05\xc6\x67\x55\x5e\x0e\x9a\x56\x07\xad\x05\x69\xdf\xfa\x96\x46\xef\x56\x94\xde\x54\x1b\xc3\x06\xaa\xed\x35\xda\xd7\xcd\xd7\x84\x0a\xd9\xac\x2b\x24\x17\x04\xfd\xcc\xd2\x0c\xe2\xc2\xd7\x75\xa2\x12\x7a\x6a\x49\x99\xc1\x41\x98\x5b\x59\x5c\x84\x8f\x10\x21\x9e\xad\xfb\x38\x0b\xf7\x66\x36\x2a\x92\xac\xda\x60\x61\x52\xf4\xdf\x5f\x63\x0c\xd6\x1a\xca\xda\xb7\x8f\x22\x99\xad\x5a\xbc\x95\xe2\xf6\x4b\x9e\xb9\x77\x69\xeb\x17\x5f\xdd\x61\x01\xef\x49\x13\xb5\x8a\x91\x83\x57\x86\x9b\x02\x06\x15\xca\x74\x72\x5f\x48\x21\x29\x94\xbe\xa9\x5e\x73\xb4\x4d\x9a\x4f\x4a\xdb\x7c\xd0\x63\xdf\x4e\xb3\x79\x29\xcf\x5a\x67\xb2\x5e\x5a\xae\xf6\xbb\xa2\xf0\x7f\xd7\x0b\x7e\xd9\x9b\xad\xba\x55\x35\xab\x95\x5c\x76\x9f\x6c\x8e\x34\xfa\xc1\x24\x92\x75\xa5\xac\x5a\xe7\xdd\x01\x65\x15\xfd\x39\x56\xfb\xdc\x87\x55\x13\xd7\x93\xb0\x3d\xd0\xc4\x11\x9b\x33\x6d\x7a\x8b\x4b\x73\x6a\xd3\x5b\x5c\x9a\xdb\xef\xa6\x27\x5f\x19\xb9\xe9\x7d\xcc\x63\x8f\xdb\x49\x57\xf4\xef\xf7\x82\x57\x78\x3b\xdd\xa5\x18\x54\x31\xc2\x82\x16\xe5\x59\xb7\xab\x72\x15\x07\xf4\x64\xbe\xba\x21\x0a\x41\x48\xf1\xf8\x05\xea\x1b\xf5\x96\x5c\xcd\x06\xd8\x16\xb4\xe6\x51\xfd\xf5\xe5\x31\x76\x3b\x2d\xa3\xcd\xde\x9a\x68\x84\xed\x76\x2e\xda\x61\x99\xb9\x60\x04\x95\x90\x85\xb3\x4b\x8b\x14\x8f\x49\x90\x32\xaf\x19\x0b\x66\x07\x2f\xda\xc9\xb3\x72\x59\x47\x4e\x38\x33\xa6\x28\x86\xa9\x25\x69\x1c\xd3\xfa\x2f\xd7\xd8\x47\x5c\xbc\x9f\x9f\xa8\x05\x8f\x9f\x53\x49\x24\x14\x92\xab\x53\xf0\xc2\x6e\x4c\x1f\x77\xda\x7c\x0f\xfb\x1e\xf6\xac\x4a\xd1\x71\x19\x6d\xb6\x60\x7c\x76\x05\xe7\x79\x38\x28\x11\xf7\x8e\x1e\xc4\xfe\xf1\x08\x7b\xc2\x9e\x48\x7d\xfd\xcf\x1e\x09\x5e\xe1\x2d\x0f\x33\xfa\x1a\xeb\x0a\x32\xfb\xc2\x84\x4d\xc2\x42\x71\x4e\x12\x71\x2e\x46\x6a\xcb\x35\x33\x07\xe4\xc2\xfa\x10\x4a\x7c\x1d\x2a\x21\x3d\xdb\x4e\x45\x5e\x6c\xc4\x5d\xde\x09\xd3\xb0\x4d\x67\xed\x9c\x93\x16\xc3\x73\x01\xfa\x47\x9c\xb6\x9b\x17\xbd\xc3\x9d\x2c\x8d\xcb\x2c\x77\x1d\xca\xef\x3a\xcc\x5e\xeb\x51\xb8\xf2\x2b\xbd\xe0\x05\xde\x9d\x14\xa4\x8f\xb1\xc0\x4a\xb4\xe7\x61\x94\x15\x18\xc2\x20\xcf\x43\xfa\x2c\x85\x18\x58\x9d\x38\xdd\x0f\x0d\x3d\x32\x26\x1b\x1e\xfa\x6b\x36\xb2\xed\x46\x99\x35\x7a\x85\x68\xc4\xa5\xbd\xac\xde\xe1\x31\x5d\x6d\x38\xf2\xab\x08\x89\x93\xfc\x0c\x5d\xae\x44\x1b\x92\x9d\xad\xde\x7b\x28\x6b\xb6\xe3\xfa\x17\x6c\xbc\x1b\x96\x1b\xfe\x3d\xc1\x92\xd3\x81\x91\x4a\xb3\x57\x14\xb6\x79\x96\x95\x75\x9e\x87\x94\xf4\x10\xaa\x70\xb8\x24\xc1\x5a\x97\xb9\x70\x7b\x77\xca\xfe\xcc\x7f\xf3\xd8\x61\x65\xae\xf3\x7f\xd1\x0b\x7e\xd2\x1a\x2e\x1b\x80\x01\xcd\x5d\xc0\x37\x39\xb5\x9d\xc7\xa5\x98\x6c\xf2\x65\x7a\x0f\xad\xf3\x96\x31\x0f\xf6\x44\x75\x53\xd9\xba\xe3\x94\x12\x01\x61\x27\xa9\xf6\xd4\x5f\x6a\x2f\x5a\x3b\xc0\xcf\x7b\x8c\x21\x6b\xb9\x5c\x1b\xfe\x7b\xbc\xe0\x2d\x56\x93\x56\xf4\x1d\x0d\x4a\x12\x96\x1b\xb2\x81\x9b\xa2\xcf\x73\x65\x93\x3f\x07\x41\x41\x76\x9f\x89\xb2\x05\xb5\x98\x92\x33\xb6\x89\x1f\x78\x78\x66\xe8\x9f\x78\xec\x08\x96\xbf\x2c\xd6\xfd\xdf\xd9\x0b\x39\xf2\xe9\xac\x15\x26\x98\x92\xb8\xac\x20\xce\x83\xb7\x0e\x35\x7b\x59\xac\xf3\xd8\x42\x41\x57\xe6\x9e\xc1\xd4\x0c\x6c\x5c\x65\x3f\x10\x4e\xe4\x43\xd6\x70\xf6\xe0\x55\x0e\x30\x98\x96\x80\x10\x42\x7e\x26\xec\x3a\x42\xf0\x57\xaf\x0a\x1e\xa8\xcd\x46\x61\x17\x05\xa0\x7a\x86\xc7\x29\xc4\x32\xe2\x27\x47\xe7\x46\xe9\x97\x2a\xd2\xa3\xd6\x84\x8b\xf9\x17\x6a\xea\xe8\x02\x8c\xb7\x36\xdd\x86\x94\xe4\x4a\x15\xb1\xca\xa1\xb5\x09\xa6\x5e\x48\x58\xaa\xdb\x0e\x36\x8c\x0d\x11\x78\x0c\xaa\x40\x25\xd5\xc9\xe0\x9d\xb0\xdb\x85\xec\xee\x6c\x1d\xbf\x54\x66\xca\x4d\x69\x5a\xad\x44\xf8\x48\xd9\x2d\xb5\x8e\x0a\xe1\xed\x48\xec\xbf\x3d\xc2\xfe\x78\x8c\x1d\xa5\x61\x3e\x93\x45\xc2\xff\xc2\x98\x22\xe8\xf8\xaf\x63\x66\x1a\x75\xb2\x48\xf0\xb5\xb8\x34\x10\xaf\xf2\xd4\xd4\x95\x1a\x1c\x06\xd5\xf2\x2c\xd5\xf9\x17\xd8\x63\x6b\x3a\x43\xc2\xc2\xcb\x49\x79\xd6\x2a\x55\x58\x8f\x3c\xf2\x6c\x0b\x91\xf2\xe9\xe9\xe9\x69\xa8\xef\xf4\x8d\x37\xde\x88\xa9\xdc\x91\x68\xc5\x9d\xe1\x07\xe1\xa9\xeb\x67\x66\x9a\x98\xcd\x8d\x99\xd4\x14\x48\x87\x25\xcb\x07\x9c\x97\x8b\x3a\x26\x61\x6b\xbd\xd6\xbd\x0b\xb3\x5d\x37\xcf\xc5\x9e\x99\xbe\xe1\xba\xeb\x9a\x7c\x9e\x08\xea\x63\x4a\xc1\x53\x6a\xa8\x14\x1f\x8a\xf4\x36\x04\x4f\xb9\x3a\xcf\x62\xaa\x6e\x69\xb2\xe6\x3b\x71\x7b\x83\x1c\x94\x90\x48\x91\xc4\xad\x12\x07\x1e\x80\x2e\x34\x11\x0c\x9e\x25\x91\xe0\x41\xcf\x26\x59\xb9\x3a\x82\xc4\x90\x5d\xca\x09\xab\x95\xeb\x93\xce\x9b\x58\x98\x19\x2b\x79\xfa\x71\x50\x0f\xee\x1f\x57\xf1\x4a\x6f\x1e\x0f\xbe\x3c\x36\x18\xa9\x00\x81\xfc\x6e\x72\x60\xc5\x4c\x77\x73\xfe\x5a\x22\xb2\x66\xa6\x59\x4d\xd9\xbd\xd8\x21\xb0\x42\x21\x80\x4c\xaf\xa8\x10\xdb\xb5\x0d\x5c\x37\x36\xcd\x8c\x94\xc4\xa0\x2c\x13\x10\x2a\x5d\xa6\x33\xd8\x70\xe8\xbd\xd4\xae\x44\x84\xeb\x64\xc4\x97\x0d\xb9\x10\xac\x24\xc2\xe6\x49\x87\xde\x26\x05\x5d\xc3\xfb\x2e\x4a\xcd\x49\xd6\xc9\xce\x9e\x20\xb3\x19\x41\x8b\xa8\xac\x4a\xea\x25\xdd\x11\x75\xbb\xc9\x85\x28\x7b\xd4\x35\xc0\x56\xa1\x04\x03\xda\x7a\x3b\x61\xbe\x29\x22\xae\x12\x1a\x9b\x1c\x63\x16\x94\xa1\x42\x2e\x5d\x08\xc1\x50\x21\x70\x36\xeb\x89\xfc\xc8\x44\xb3\x39\x81\x93\x31\xcb\x91\xc5\x0a\x67\x96\xbc\xee\xa8\xdc\x4d\x56\x67\xd7\xee\xba\xa3\xdc\x21\xfa\xab\x19\x44\x27\x7c\x93\x24\x7b\xae\x58\xc9\x9e\x4f\x0b\x9e\x54\x95\xec\x69\xe6\x5f\x96\x83\x97\x1c\xc6\x74\x54\xee\xe7\x2b\x9f\xc2\xbe\xbb\x7a\xf3\xc1\x63\x9d\xff\x3f\x6f\x0e\x66\x35\xe6\x01\xc4\xcd\xb7\x42\x1b\x78\x08\x35\xae\x12\x5c\x39\xdb\x61\x5a\xda\x9e\x46\x65\x39\xaa\x24\x59\x7a\xcd\xcd\xec\x85\x35\x76\x75\x12\x6f\x89\x54\x14\xc5\x52\x9e\xad\x09\xff\x7f\x79\x7b\x48\x14\x81\x47\x83\x07\xbc\x25\x91\xc7\x59\x14\xb7\xe4\x54\x5f\x53\xc8\x40\x54\x27\x55\x2c\xec\x16\x74\x4d\xad\x8c\x5c\xc0\xec\x30\xa6\x24\x7c\x7f\x3d\x8c\x93\xa2\x32\x56\xef\x92\xb9\x9c\xbb\x19\xfe\xa7\x21\x0f\x21\xad\x7e\x2b\x11\xd7\xe8\x2a\x36\xe0\xab\x05\x7b\x4b\x8d\x3d\x42\xea\x94\xb1\xe9\x85\xef\xaf\xed\xb9\x17\x7e\x7f\x64\x2f\xa8\x43\xa8\x2e\xbf\xba\x3b\x3a\x00\xed\x0d\x67\x7a\xf5\x86\x0a\x6d\x2c\xbe\x29\xfa\xe8\xcb\x1e\x3b\xa2\xc3\x4e\xfd\xdf\xd8\x8b\x42\xa8\x63\xac\xad\x1c\x96\xe0\xa2\x37\x97\x75\xba\xbd\x52\xe8\x5c\x82\xc2\xd8\x2a\xd4\x8e\xe5\xb8\xfe\x2e\xb7\x9d\x4e\xb6\xe0\x14\x6a\x25\x3a\x02\xbb\x68\x18\x9f\xff\x14\xfb\x4f\x35\xf6\xc8\xc2\x35\xff\xfa\xff\xc7\x1b\x01\xa9\x61\xa5\xda\xdb\x2f\x05\x1f\xf3\xd4\x15\x6b\x3f\x25\x73\x11\x7a\xf4\xd5\x02\xdd\x77\x73\xd0\x34\x35\xa5\xaa\x09\xf5\x17\x17\xca\xa9\x3d\x96\x53\x86\xc5\xa6\xe9\x13\xd1\x90\xc3\xae\xbb\x60\xb8\x54\xf6\x1b\xe3\xec\x2a\x58\xab\xbd\x2e\xae\x8c\x5f\x1e\xdf\xf3\xca\x78\xed\xf8\x8a\xf5\xe6\xa0\x31\xb0\x44\x9e\x64\x70\xff\x15\xbd\x56\x4b\x14\x85\x3c\x2f\xf6\x39\x14\x8c\xc0\x71\x83\x3b\x6d\x9a\x91\x62\x81\xb3\x12\x74\x1e\x71\x41\xb4\x7a\x90\x89\x90\x96\x71\xa2\x26\x90\x3c\x06\x94\xc2\x2d\xd9\x98\x5f\xad\xa5\x54\xd7\x15\x19\x12\x4e\x75\x7e\xaf\x94\xdb\xa1\x5e\x81\x8e\xa4\xe4\x48\x20\x60\xd1\x4c\x2a\x33\xbb\xd4\x93\x31\xdf\x8c\x47\xf1\x3a\xee\x50\xf4\x49\x1d\xd3\x58\x28\x67\xf5\x9a\x68\xc7\xc8\x0e\x0a\xd6\x91\x25\x30\x01\xeb\x85\x58\xd7\x84\xfd\xa8\xb7\xa1\x31\x98\x27\x99\x3c\x01\x00\x9a\x59\x06\x28\xc7\x08\x84\x92\xe5\x7c\x3b\xcc\x3b\xe0\xd5\x6e\x6d\x00\x62\x79\x98\x2a\x66\x46\xa0\x51\xeb\x37\xc8\x1a\xd6\x25\x7c\x2b\x53\xfd\xaf\xab\x38\x79\x60\x8c\x7d\x07\x1a\x5d\xa1\x16\x67\x10\xa6\x4c\xee\xf5\xfe\xfd\x63\xc1\xeb\x2d\x75\x7f\x09\x74\xdb\xd2\x42\x17\x00\x8d\xad\xcc\xac\x2b\xba\xf8\x89\x82\x5b\xa5\x6a\xf4\x33\x35\xb2\xdb\x79\x5c\x96\x02\xf0\xbb\x94\xe1\x42\x6b\x66\x76\x19\xeb\xda\xa0\xd5\xe4\x67\x54\x19\xe6\x5d\xa9\xc2\xa6\x91\xb6\xad\xae\xe5\xb1\x58\xe7\xeb\x71\x1a\x26\x04\x5c\x43\x84\xb5\x21\x5a\xcd\x8b\x42\xe4\x98\xa9\x1c\xc6\x49\x2f\xd7\xa0\x6c\x4d\x7e\x37\xd5\xab\xcc\x7b\x29\x66\xd3\x90\x0f\x0a\xd3\xb8\xd7\x79\x1b\x8e\x31\x64\x51\xb9\x6e\xfa\x49\x37\xf0\xb5\x7e\x29\x28\xed\x11\xd9\x39\x55\x1b\x13\x91\xb6\x65\x4f\x19\x62\x3d\x2b\x9a\x49\x75\x00\x30\x58\x62\xc5\x67\x8e\x6f\xae\xb9\x47\x8c\xa9\x48\x6c\x4d\x59\xdd\xd7\x48\xb2\x76\x95\x00\xb6\x35\xa2\xbf\xf0\xd8\x55\x5b\x96\x41\xc5\xff\x5d\x2f\xb8\x5d\x2e\x26\x75\x2e\x2c\x33\xb2\x09\xee\xda\xd1\x55\x1f\x32\x1a\xe3\x34\x6b\xb2\xfa\xae\x72\xc7\xb2\xed\x3c\xe8\xd5\x77\x31\xaa\x5e\xe5\x33\xa8\x1a\x68\x98\xfb\x48\x04\xfe\xdc\x18\x3b\x24\xd2\xad\x53\x79\xd6\xf1\x3f\x31\x16\x7c\xdf\x98\xe2\x02\xd2\x58\x9e\x16\x84\x4a\x35\x7c\x8a\x62\xab\x33\xfb\xdc\xaa\x3a\xce\x2b\xd4\x2b\xad\xbd\x29\xe2\x44\xcd\xfa\x30\xf7\x9c\xc5\xf9\x85\xb3\xab\x8b\xa7\x16\x17\x96\x91\x6a\x22\x46\x9a\x05\xf7\xf4\x91\x0b\x79\x20\xa7\xb0\x0d\xc2\x04\x31\x71\xd3\x46\x43\x91\x27\x0a\x29\xef\xe0\x88\x08\x68\x65\x78\xd2\xd0\xa4\x80\xbc\xd3\x4b\xca\xb8\x9b\xa8\x5c\x7c\x12\x99\x0a\x6e\xb3\xc8\x5a\xb1\xb1\x1e\x94\x0a\x6b\x91\xaa\x0d\xb5\x01\xa9\xd5\xcd\x45\x4b\x44\xf2\x94\x46\x64\xc1\xa6\xad\x6b\xf2\xac\xc5\x17\xd2\x2d\x15\xb4\x16\xf5\x50\xcb\xc5\x73\x58\x75\x19\xa3\xe7\xcb\x71\x36\xcd\x9a\xbb\xce\x97\x05\x1c\x46\xb4\xec\xb0\x4f\x79\xec\x40\x51\x46\x71\xea\x3f\xe0\x05\x3f\xed\xdd\xad\xd5\x7a\x5b\x25\x51\x1b\x78\x98\x24\x19\xf2\xca\xf0\xb5\x9e\x14\xf2\x70\x7a\x87\xd7\x87\x86\x57\x05\x97\x99\x0d\x88\xce\x6c\x85\x28\xeb\xa0\x1a\x52\x4a\xcb\x0e\xaf\xdb\x38\x63\x74\xcc\x8e\x53\xbe\x70\xe7\x29\xbd\x80\x65\x81\x60\x1d\x75\x3d\x51\x2f\xf0\xd8\x58\x59\xf6\xfd\xfb\x82\x64\xcf\xad\x59\x5d\x7d\x16\x3a\x8a\xcb\x42\x24\xeb\x75\x1e\x26\x45\x66\x2c\x16\x13\x50\xc5\x09\x12\x7d\x90\x43\xb0\x4b\x1d\xfe\xca\x63\x57\xa3\x34\x98\x17\x52\xb1\x2d\xfc\xdf\xf3\x82\xa7\x39\x57\x06\xb3\x22\x91\x5e\x23\xa2\x9b\xf8\x29\xdb\x45\x6f\xd6\x8d\x3d\xe4\x33\x6c\x8a\x35\xf6\x28\x22\xf0\xc3\x0f\x7a\x8d\x5d\x64\xc4\xd5\xfe\x51\xac\xc6\x7e\x85\xc4\x47\x3c\xc6\xe8\x6c\x39\x1f\xe7\xfe\x4f\x78\xc1\x5b\xbd\x39\x4b\xf2\xd1\x3d\x1e\x91\x45\x07\x95\x13\x98\x13\xae\x61\x61\x68\x12\x4d\x14\xda\x16\xaa\x96\xba\xec\x9b\x3a\xed\x87\xda\xbe\xa3\x75\xbc\x68\x78\x42\xc5\x1d\xd8\x7f\x46\xcb\xf6\x57\xd4\xd8\x01\x78\xd0\x7f\x51\x2d\xf8\x1b\x6f\x3e\x6b\x6d\xaa\x57\xc1\x54\x72\x29\x9a\xb8\x52\xb5\xa1\x94\x62\x80\xb5\x48\x9d\xad\x4d\x56\xcb\x46\xdc\x96\x73\x36\x11\x5b\x22\xa1\x06\xd9\x76\xc5\x32\xd3\x5d\x91\xe5\x5c\x9e\xe9\xf3\x38\x1a\x6a\x27\xc8\x30\xa5\x9e\xa8\x50\xf8\x44\x6e\x8b\x60\xc8\x9a\x17\xdd\x24\xeb\x63\x86\x3d\x18\x2a\x0d\xd0\x96\x0b\x14\xfc\x25\x8f\x8d\x89\x74\xcb\xff\x4d\x2f\x58\x54\x22\xbf\x5a\xbc\x93\x4d\x72\x58\xca\x8f\x96\x59\x4f\x64\x93\x6c\x62\x2f\x32\xeb\xae\x30\xdf\x93\xcf\x50\x8e\xd2\x7e\x26\xed\x8f\x78\xec\x88\x56\xda\xfc\x8b\xde\x1e\x6c\x34\xa7\xd5\xe3\x41\x32\xdb\xb2\x2c\x87\xe0\x1c\x32\x23\x45\xee\x41\x92\x36\x20\xcb\x81\x5d\xbb\xe8\x66\x69\x01\xda\x9c\x6d\x38\xa0\x22\x71\xcf\xaa\x3c\xec\xb2\xd7\x28\x8b\xd0\xcb\xbd\xe0\xdf\xd9\x26\xa1\xa1\x30\x6c\x15\xb8\x38\x7f\x76\xe5\x39\xa7\x67\x6f\x5b\x38\x4d\xf1\x91\xd6\x2c\x51\xe1\x35\x86\xa7\x29\xe4\xbd\x34\x7e\x6e\x8f\x6c\x82\xc7\xf4\xbb\x93\xbb\x2d\x9a\xaf\x8d\xb3\x43\xad\xac\xd3\x09\xd3\xc8\xff\xf3\xf1\xe0\xd3\xe3\x0b\x69\x99\xf7\x31\x47\x11\x86\x19\x51\x63\xf4\xa1\xc5\x6c\xf3\x1b\x22\x49\x50\x0f\x88\xac\x95\x36\x51\xf0\x85\xb3\xab\xcb\xcf\x5a\xba\x73\xf1\xec\xaa\x0e\xea\x8b\xdd\x6d\x84\x4e\x1b\x91\xdc\x58\x71\x0e\xda\x2c\xb3\x8f\x3f\x76\xd7\xec\xf2\x73\xce\xce\x9e\x59\x98\xa4\x03\x53\x37\x04\xf5\xd5\x38\x12\x6c\x9d\xcc\x9a\xd2\x64\x77\x54\x13\x7b\x47\x4a\x5c\xe3\xcc\xa1\x29\x1f\xa7\xdd\x5e\xa9\x12\x40\xb5\x94\x4a\x35\x2c\xb2\x6c\xa5\x5d\xad\xa2\x9f\x96\xe1\x05\x75\x88\x12\x45\x2b\xec\x6a\x92\x34\x1e\x65\x3d\xf9\xf1\xc7\x3f\xbe\xce\x63\x71\x92\x3f\xde\x7a\xb1\xc9\x17\xe8\x59\xab\xbd\x68\x40\x15\x5b\xc0\x9e\xae\x5b\x2b\xf7\xd9\x76\x98\x47\x60\xee\xcc\xd6\x07\xe0\x69\xa9\x81\xa4\xf3\x00\xbc\x42\x79\x19\xc6\x07\x3c\x65\xc7\xe9\xbd\xa2\x55\x36\xe4\xd1\xac\x61\xd9\xed\x70\x49\x89\x06\x4d\x93\x46\x98\xb7\x01\xa9\xdd\x3a\x87\x5f\x93\xf7\xe0\x4c\xd8\x08\xf5\x53\x71\xda\x08\x1b\x30\x49\x46\xf8\x6b\x5f\x52\x63\x8f\x84\x69\xb3\xd4\x4b\x12\xcc\x70\xf4\xff\xca\x0b\xfe\xc0\x5b\x04\xa9\xdd\xed\x25\x09\xc5\xb6\x34\xf9\x9d\x88\xae\x30\x0b\x6a\x45\x9d\x9f\x95\x1d\x56\xe7\x8b\xeb\x67\xb3\x72\x49\x99\x9d\xed\xe3\x01\x3e\x28\xa7\xde\x49\xca\x8c\x2e\xc3\xf6\x00\x9a\x4f\x96\x3b\x05\x18\xf2\xac\x87\xc8\x90\xe3\x6e\x1f\xd7\x40\x49\xb2\x9b\xf0\xb7\xdd\x13\xaf\x3e\xc0\x0e\x74\x21\xe8\xf4\x65\x07\x82\x97\x69\x15\x1d\x2e\xc9\xd6\x60\x20\x69\x65\x14\xcd\x82\xbc\x05\xa8\x6f\x18\x8c\x0a\x8e\xe5\x76\xbc\x45\x71\x48\x24\xca\x0c\x45\x56\x45\xc8\x0a\x9c\xe1\x28\x4b\xa6\x95\xa5\xa9\x20\xd1\x18\xda\x39\x6f\x85\xd4\xa4\x21\xed\x02\x92\x5c\x3a\x61\x1e\x83\xf5\x43\x17\x16\x26\x28\x2c\xb0\x83\xfb\x83\x55\x9a\xbf\x73\x61\x85\x9f\xbd\x73\x55\x6a\xc5\x5b\x42\x85\x5d\xc0\x7d\x68\x16\xa5\x19\x50\xc4\x2c\x9f\x4d\xfb\x78\x53\x7b\x0d\xc0\xeb\x80\xa6\x87\xd4\xc9\x0c\x0b\xa6\x9b\xf0\xbf\x40\xb6\x32\x07\xff\x00\x04\x81\x3b\xf5\xd7\x29\x7d\x60\x60\x89\xe5\xf2\xd1\xbd\x49\x6d\x7f\x68\x94\x74\xad\x36\x2d\x65\x79\xf9\xa0\xf7\xe4\x91\x71\x2d\xff\xd2\xff\x17\x0d\x53\x49\xf9\x06\x6b\x48\xf9\x58\x66\xad\x2c\xd9\x0b\x53\xd6\x83\xde\xf4\x2e\x1b\xeb\xb7\xf9\x8f\x70\x3f\xb0\x8f\x1d\xf6\x37\xc7\xd9\x11\xd0\xa0\xef\x4c\x5b\xc2\xff\xf8\x78\xf0\xb3\xe3\x77\x5b\x82\x68\x48\xd9\xd3\x68\x05\x49\x46\x89\x3e\x78\x44\x90\x92\x34\x15\x09\xc1\x89\xc5\x18\x44\xbf\x26\x44\xca\xb3\xae\x50\x47\x2a\xed\x33\x28\xcb\xb0\xb5\x41\x87\x3b\x3a\x62\x14\x98\xfc\x64\x4a\x2c\xca\x5c\x84\x1d\x05\xd1\xd9\x01\x86\xc3\xae\x3c\x0c\xa2\x41\x41\x9f\x01\xb1\x30\x5e\x10\x82\x11\x5a\xe8\x54\x8b\x14\xf7\x24\xe5\x56\xd5\xcd\xd7\xa8\x5a\x8e\xd7\x02\x8e\x9d\x75\xed\xd1\xd7\x06\x3c\xc5\xb2\x47\xf9\xfc\xf8\x45\x52\xab\x64\x79\xda\xe7\x98\x6a\x2a\x47\xac\x6a\x1a\x69\x37\x2c\xd8\xc3\x4c\x81\x54\x14\xd0\x63\xc0\x72\x2c\xea\x96\x51\x09\x3a\x5a\xd5\x14\xba\x3a\xa2\x84\x0f\x2c\x9d\x2e\x59\xc5\xd9\xa7\x67\x6d\x2f\xb4\xc2\x3b\x13\x14\x8d\xeb\xc8\xaa\x60\x2f\x1b\x4a\x97\x54\x56\xd0\xa1\xc3\x9f\xb5\x7b\xc9\xc3\x2e\xba\xdb\xaa\xcf\x7a\xce\x31\xeb\x7f\x8c\xb1\xc7\x55\x58\xd4\x70\x0f\xf8\xf8\x58\xf0\xa1\xb1\x45\x45\x69\xb8\x91\x6d\x63\x34\x42\x85\xa9\xcc\xe0\xae\xeb\xb0\x80\x26\x04\x78\xd9\x64\x1b\x03\x81\x0d\x95\x76\xbc\x21\x20\x59\x67\xdc\xcb\x5e\xa1\x3f\x99\x51\xda\x0d\x99\x6a\xa1\xe7\xc9\x54\xd6\xe4\xa7\xc2\x24\x59\x0b\x5b\x9b\xab\xd9\xe9\xac\x5d\xdc\x99\x2e\x80\xd7\xd2\xa9\x0b\x98\x1b\x5a\x1b\xbd\x74\x73\xc0\x01\x95\xb5\x79\xd6\x2b\xa5\x26\x42\xf6\xdb\xaa\x06\xaf\x53\x18\x0e\x4e\x41\xe5\xcc\x36\xa5\x88\x0b\xb1\x4d\xd8\x8a\x1c\xff\xa0\xc0\xd8\xe5\x17\xb6\x69\xed\xf8\xf4\x75\x37\xa1\xa1\x4e\x6e\x88\x37\x4d\xf3\x24\x4e\xa5\xac\x87\xd9\x06\x23\x2b\x97\x49\x27\x94\x87\x10\x77\x8f\x95\x1d\xbd\x9b\x86\xf9\xa5\x71\x36\x1e\xe6\xed\xc2\xff\xdc\x78\xf0\xc1\xf1\x59\x52\x1e\x34\x6a\x89\xd0\xfa\x66\xa5\x32\x39\x77\x66\xfe\x8a\x16\x79\x45\x8b\x1c\xa9\x45\x7e\xa2\xc6\x78\x15\x5c\x67\xd8\x11\x45\x37\x6c\x21\x2c\xea\xdb\x6b\xc1\x0d\xce\x95\x9d\xb1\x51\xa5\x5a\xc4\xf5\xb3\x6e\xd8\xcf\x17\x3d\xf6\x5b\x1e\x63\x60\x3d\x8f\xef\x13\x79\xe1\x7f\xd2\x0b\xde\xe3\x9d\xd2\xbf\x29\xf6\x3c\xeb\x86\xf2\x38\xa6\x13\x21\xed\xf8\x7a\x95\x47\x41\xd8\x81\x10\x0b\x14\xa6\xc8\xf9\x81\xae\x55\x05\x2d\x43\x72\x16\xf9\xd8\xf6\x35\x0a\x0a\xfc\x54\xe4\x8d\x56\xd2\x93\xff\x62\x94\x80\x6c\x51\x31\x35\xa2\x33\x3f\x79\x95\x13\x5d\xeb\x10\x6f\x2a\xce\x4d\x4c\xa3\xfd\xe1\xab\x82\xbf\xaa\xd9\x57\x06\xf1\x13\x5b\x70\xd1\x25\xdb\xac\xdb\xc1\x2f\x64\x1e\xa3\x5b\x7c\x05\xb2\x45\xe4\x2a\x39\x1f\xe3\x25\x28\xa1\xe9\x36\x32\x2e\x1a\xa4\xf7\x35\xe0\xf6\x79\x0e\xf3\x95\x42\x0c\x5c\xb7\x96\x66\xc5\xa5\x4c\x13\xee\x54\xd7\xec\x1d\x56\xd2\xa0\x0e\xbc\x22\xd3\x32\xe9\x24\x03\xed\x24\x6b\x31\xa4\x02\x02\x9d\x85\xa9\x82\xa3\x4f\xa4\x62\x5b\x37\xcf\x90\x67\xc9\x25\x0e\x91\xe2\xd4\x45\x56\x84\x8c\x52\x52\x35\x6e\x29\xb0\x06\xd0\x46\x8a\xdd\xe1\xcc\xc7\x9f\x39\x7c\x05\x89\xea\x0a\x47\xc0\x37\x0e\x10\xea\xd3\x36\x20\xd4\x03\xde\x65\xf2\x82\x04\x2f\xf2\x34\x26\x14\x76\xe6\x44\xf1\xf5\xc7\x85\x62\x5f\xf6\x88\x9e\xe4\xf3\xde\x08\x84\x9a\x9d\x45\x23\xd0\x94\xbc\xc1\x83\x5d\x26\x76\x29\x4a\xaa\x08\x88\xe1\x9d\x87\xbd\x75\xb2\x41\x0d\xb9\xa7\xa2\x3a\xfb\xa0\xd7\xda\x1d\xfc\xea\x56\xff\x16\x07\xfc\xca\xe0\x5e\x59\x8d\x1f\xe0\x09\xb5\x1b\xc5\xd8\xa7\x8f\x38\xd1\x62\xf9\x5a\xd8\x32\x69\x6d\x59\x22\xfc\x9f\x38\x12\x7c\xda\x93\x7f\xe1\x9a\xd3\x9b\x54\x54\x97\x0a\x6b\xdc\x52\x89\x8f\x14\x71\x30\x84\x70\xa8\xb3\xf2\xf5\xae\x02\xd6\x53\xd9\x55\x78\xa8\x94\x65\xdf\x26\x37\x82\xb4\x2d\x95\x57\x85\x86\x2d\x95\xb6\xad\x99\xe6\xcc\x8d\x40\x01\x17\x6e\x21\x30\x3f\xd4\x0f\x73\xf1\xe3\xfb\x30\xe0\x40\x85\x66\xcf\x40\x49\xc4\x15\x85\x31\x8f\x10\xd7\x80\x0a\x17\xf1\x6c\x62\xa1\xc7\x8f\xbb\x42\xfa\xcb\x07\xaf\x08\xe9\x2b\x42\xfa\x1b\x27\xa4\x37\x2c\x19\xfd\xaf\x2f\x57\x44\xff\x8b\x11\x12\x9a\xf5\xd9\x81\x1c\x90\x4b\xbb\x41\x03\x17\xe9\x46\x96\x44\x18\xdf\x81\x71\x4b\x66\xf9\x6a\x12\x36\xb9\xae\x6c\x8d\xf4\x3a\x76\x7c\x44\x0c\x9b\x23\x42\x6c\x0c\xd1\x8d\xdd\xe5\xd9\x82\x3f\xd7\x20\x31\x25\x3f\xea\x12\xd3\x0c\x20\xfb\xed\x28\x0a\x18\xfb\xd4\x51\xf6\x78\xab\x46\x6b\x61\xd9\xda\x30\xa4\xdc\x79\x96\x3e\x3d\x5b\x83\x43\xc7\x3b\x8e\x06\xcf\xb2\x7e\x5b\x47\x0e\x65\xd3\xb8\x37\x5b\x23\xe7\x0a\x10\x07\xc6\xc0\xe6\x9a\x6d\xa2\xab\x8f\x68\xe9\x52\x0d\x74\x13\xb6\xca\x1e\xa2\xf4\xf4\x52\x00\x4b\x46\xda\x04\x71\xd1\x3b\x7a\x6f\xb6\xb6\x2a\x3a\x5d\xb9\xaa\x1d\xd9\xf3\x56\xc6\x7e\xc1\x63\xdf\x65\xe2\xca\x9e\x9e\xad\x15\xb7\xc7\xf2\x50\xd1\x3f\x1d\x77\xe2\xd2\x7f\xb3\xa7\xd2\x16\x5e\xe6\x49\xe5\x1b\x29\xd5\x10\x00\x5a\xbd\xc4\xe5\x50\x00\x71\xeb\xbd\xd9\x1a\x62\x03\x09\x79\x6a\xb3\xb3\xb0\x61\x89\xc8\xb5\x93\xf1\x28\x2e\xe4\x12\xec\xc5\xc5\x86\x4e\x40\x50\x69\xe3\xfc\x3e\x91\x67\x08\x3f\x66\x7b\x8f\x5d\xc3\xc2\x09\x37\xf8\xfe\xcd\x1e\x3b\x54\xf4\x8a\xae\x48\x23\xff\xd5\x5e\xf0\x1f\xbc\x55\x6d\xb5\x2a\x45\x42\x38\x61\xc6\x53\x8a\x6c\xb1\xf0\x38\xc2\xa7\x3f\xb7\x27\x97\xac\xee\xe7\x02\xd8\xea\xa2\x4c\xa0\x21\x41\x9e\x52\x11\x9f\x23\xc9\x45\x18\xf5\xb9\x0a\xf6\x35\x2f\x34\xf9\x70\x62\x97\x1b\x36\xf0\xc9\x1a\x7b\x54\x2b\x4b\x91\x11\xac\x45\x70\x8d\xfe\x4f\xd7\x82\x1f\xa9\x11\x40\xbe\x1a\x75\x79\x36\x10\x21\x44\xa2\x2b\xfe\x30\xf3\x21\x8c\xaa\x7b\x7a\xb6\x06\x41\x27\x71\x64\x51\x11\x9e\xe4\x0d\x1e\x40\xbe\x73\xc0\x8f\xd1\x91\x60\xf2\xa4\x4a\xfa\xa5\x69\xa6\x71\x9b\x4c\xe9\x49\xff\xc9\xf2\xcd\x53\x80\xc7\x15\x9c\xd4\xc0\x5c\xd6\xf7\xf3\x9e\xec\x93\x62\x33\x86\x0c\x1a\x9e\x8a\x0b\x70\x8d\xc7\xeb\x60\xac\x8f\xb3\x1e\x00\x0e\xc9\xf3\x4e\x3a\x51\x9a\xc9\xd0\x17\x25\x94\xbd\x2c\xba\x49\xd8\x12\xc1\x49\xb9\x1d\xb7\x44\x62\xb3\x58\xd2\xa1\x1e\x66\x3a\x1a\x29\xe1\xd9\x02\x27\x35\x82\x4b\x89\x6d\x9e\xa5\x62\x00\x4a\xee\x3b\x30\x60\x71\x68\xbe\xbe\x5e\xcf\xd7\x17\x0f\xcc\x57\x7c\xe1\xeb\x3d\x57\x67\xdc\xb9\xfa\x52\x8f\xd9\x8b\xd1\xbf\x8f\xdd\xb0\xa3\x2c\x73\x25\xc7\xd3\xcd\x5b\xa0\x41\xde\x6a\x26\x8e\x12\x15\x4e\x6a\xb5\x26\x6d\xd9\x80\x2a\xc3\x14\x02\x9f\x0b\xcd\x85\x26\xbb\x87\x69\x11\xe1\x3f\x23\x98\x97\xbd\xa5\x7e\x03\x13\x6e\x8e\x1c\x4c\x9d\xb0\xac\xf3\x42\x08\x93\x97\x96\x36\xb7\xe3\xcd\xb8\x2b\xa2\x38\x6c\x66\x79\x7b\x4a\xfe\x9a\x92\x8f\x3b\xa6\xbd\x8f\x79\xec\xb1\x2a\x16\x6b\x5e\x84\x51\x12\xa7\x62\x45\xb4\xb2\x34\x2a\xfc\x77\x79\x8a\x59\xf5\x35\x3a\xa7\x8e\x47\xf4\x90\xfc\x78\x81\x0f\x52\x18\x12\x16\xa2\xdb\x89\x50\x5f\x9d\x18\x0c\xcf\xaa\xca\x11\x5a\xbe\x91\x34\xaa\x4f\xf4\xa1\x4d\xce\xcf\xc8\xc7\x68\xb4\xad\x95\xa4\xbb\x89\x42\x27\xc3\x42\x4d\x90\x2c\x15\x85\x3b\x68\xff\x6d\xac\x3a\xa7\x58\x65\x47\x18\x56\xb2\xb7\x8d\x05\xbf\xe4\x0d\x5f\xb7\x88\xc9\x4c\x4a\xc5\x43\xc5\x4d\x36\x2a\xff\xee\xa1\xa0\x27\xfb\xb5\x6f\x1a\x7a\xb2\xa7\x59\x19\x2b\x4f\x0e\x1a\xa3\x33\x56\x46\x65\xa9\x7c\xf2\x70\x25\x43\x99\x15\x70\xe9\xff\xe8\xe1\xe0\x56\xeb\xb7\xb5\x39\x87\x18\x03\xaa\x83\x9c\x09\x18\x44\xc7\x3e\x18\xe7\x2b\xa5\xa9\x5c\xf4\x8e\xe8\xd0\x4c\xa7\x67\x1f\x38\xc4\x4e\x52\xc7\x1e\x0f\x9e\x80\x09\x6d\x2e\x67\x8c\xea\x6c\xf5\x15\x67\x81\x95\x56\x9e\xf1\x46\xf0\xec\x33\x2a\x7b\x59\x84\x51\x43\xb1\x38\xa0\xa1\x09\x2e\x41\x8e\xb1\x71\x62\xf3\x63\x98\x80\x0c\x69\x53\x5a\x78\x4d\x36\x77\xdb\xca\x5e\x04\xbb\xed\x1a\x44\x37\x5f\x08\x36\xc1\x3b\x62\x65\xef\x51\x6a\xd6\x00\x09\x9b\x6d\x4c\x57\xc9\x5b\xda\xbe\x46\xd1\xcb\xee\x97\x83\x80\x1f\xc3\x27\x27\x0a\x48\xc8\x9e\x74\xda\xfe\x0b\x35\x76\x94\x6a\xb1\x70\xa1\x9b\xe3\x91\x69\x41\x59\xf1\xbb\x0f\x61\xa5\x6e\x13\x1b\xe1\x96\x94\x33\x71\x27\x4e\xc2\x1c\x75\x81\x15\xfc\x34\x38\xbc\xab\x96\xd0\x43\xe5\x6b\xd8\xa5\x47\x54\x3d\x64\x17\x60\xb8\x17\xd5\x2b\x04\x16\x05\x52\x07\xc5\x85\x56\xd2\x2b\xe2\x2d\x77\xf6\x3c\x97\x99\x59\xe9\x47\xc1\xdd\x83\x03\x69\x9c\x46\x4e\xbc\xfa\xce\x1d\x85\xc9\xa8\x76\x16\xdd\xc4\xc9\x09\xe7\x93\x9f\xf5\xd8\xb7\xe1\x37\xf3\xac\x1b\xb6\x11\xf2\xf5\x23\x5e\xf0\x93\xde\xe0\x55\x1b\xc5\x44\xaa\x45\x70\x1f\x33\x24\xba\xf4\x94\x8d\x1a\x02\x58\x5b\x4e\xd8\x95\x72\x7d\x61\x82\xc5\x76\xd8\xe7\x61\x9e\xf5\xd2\x88\x2c\xb7\x3a\x5c\xf5\xcc\xc0\x87\xcf\x66\xa9\x50\x2e\xa5\xe6\x40\x44\x1f\x00\x09\xc5\x29\x9f\x69\xce\x4c\x3b\xed\x7a\xff\x98\x4d\xba\x5f\x6d\xfa\x21\x24\x8d\xdb\xc2\xd6\xa6\xd4\x53\x5f\x30\x16\xcc\x55\xde\xb1\x67\x4e\x08\xa8\x72\x28\x43\x15\x1d\x1c\x1a\x30\xe8\xe1\xca\x44\xb8\x77\xd4\xd8\x05\x92\x2a\xdd\xa0\x75\xd6\x4a\x03\xb5\xcc\x20\x94\x97\x85\x06\x75\x95\xa4\x05\xc2\x07\xfc\x3d\xca\x69\x05\xe4\xbf\xda\xea\xa2\x72\xb1\x95\xcd\x9a\xac\x0b\x76\x57\xfc\xa8\xc7\xc6\xbb\x59\x2e\xcf\x0d\x23\x20\xa8\xdd\x1e\x72\x3b\x60\x29\xcb\xcb\xa0\xb5\x04\xe9\xd7\x43\x09\xb1\xba\xda\xb3\x18\xfd\xa1\x70\xff\xf0\x07\xaa\x7a\xb1\x95\x83\x85\xb0\x99\x95\x1d\xdd\x64\x3f\x73\x90\xcd\x58\x95\x52\x9e\x14\xa5\x70\xa1\xc4\x9d\x05\x7f\x7d\xc7\x50\x57\xbe\xe0\x60\xf0\x3b\x5e\xf5\x3d\x97\x28\x9a\x56\x0a\x28\x65\x16\x01\x38\xfa\xff\xa3\x26\x5f\x86\xd8\xd6\x34\xdb\x46\x24\xa1\x25\x79\xd6\x2c\x4a\x91\x2a\x44\x72\xb2\x5d\xa9\x17\xf8\x56\x1c\x72\xc5\xde\xa6\xae\xe6\x75\x30\x50\x01\x51\x13\xdf\x46\xd4\x21\x05\x8b\x59\x64\x0a\x1e\x4b\x63\x70\xa6\x44\xb2\xb7\x70\x21\x6c\x49\xed\xdb\xe2\x55\xa1\xaf\x61\x66\xb3\x35\x9b\xfe\xf3\x38\xfb\xc8\x18\xfb\xb6\xd8\xc2\xc0\x82\xc3\xeb\xbb\xc6\xf6\x90\x34\xa7\xdb\x54\x9a\x37\x83\xbf\xa8\x0d\x16\x66\xb1\xc3\x93\x25\xc0\x0e\x42\x4a\x85\x3c\x66\x86\x39\x1e\xc2\x30\x60\x23\xe4\x5d\x5d\xb4\xea\x68\x3b\xfe\x9e\xc0\xa7\xa8\xfd\x36\xe2\xc1\xe0\xa2\x36\x78\x01\x30\x0a\x8a\x44\x6f\x6e\x65\xf1\x4c\xdc\x56\xa4\x7c\x08\xcf\x04\xdc\x5b\xba\xaa\xc0\x3f\x9f\x50\x5e\xbe\x48\x54\xdc\xc3\x88\x4f\x23\x3d\x54\x55\x9f\x54\x49\x9a\x06\xc6\x09\x57\x21\x96\xad\xf5\x09\x3f\x88\xac\x9c\x8a\x01\x70\xc7\x8a\xb3\x33\xec\x31\xdd\x81\xef\x4a\xc1\xe0\x5f\x1f\x1c\xb3\xf5\xb9\xe1\x4e\xd5\x5d\xee\xac\xf2\xb7\x1e\x64\xcf\x36\xc3\x2d\x27\x65\x0a\xf1\x33\x0d\xc3\x76\x65\xa3\x0b\x99\x07\x40\x6d\xee\x15\x65\xd6\x51\x09\x93\xf3\x7a\x06\x69\x90\x21\xff\xbf\x1e\x08\x9e\xb5\xeb\x53\x36\xd8\x52\x19\xc6\x49\xa1\xc7\x4e\x9d\x5b\x0d\xff\xbf\xb2\x91\x52\xee\x70\xd9\xef\x8a\x8b\x1e\x91\x7d\x3b\xb3\xfd\x13\xe3\x4c\xb0\x43\x14\x4d\xe1\x3f\x3b\x38\xa3\x02\x2b\xe0\x5c\xb8\xd1\xeb\x84\x08\x74\x0c\x7b\xbc\xbe\x87\x7e\x44\x08\x80\xa7\xaa\x60\xe0\x1c\xc4\x72\xc0\x3c\x81\x6a\x38\x5d\x78\x2f\x3b\x88\x47\x14\xff\x7c\xb0\x82\x7f\xe1\x47\x30\x54\xb7\x2e\xd7\x66\x63\x3b\xcb\xa3\xba\xb1\x10\xd2\xa1\xc6\x34\x54\x35\x70\xa2\x18\xf9\xad\x67\x6b\x62\xf3\xa5\x60\xae\x70\x38\xca\x0d\x37\xb9\x53\x20\x18\x2b\xa5\x3c\x58\x05\xfd\xf1\x14\x46\xfe\x9c\x4b\x37\xd3\x6c\x3b\x1d\x50\x23\xa0\x3b\xfd\x38\xf8\xd7\xf2\x5f\x55\x2e\xfc\x3d\x5c\x2a\x01\xb0\x21\x9a\x3c\x5f\x00\xce\x7b\x38\x8d\xd7\xd1\xa5\x3e\x0b\x01\x4f\x14\xb0\xb4\xaa\x02\x5c\x14\xba\x86\x01\x25\xf5\x65\x73\x57\x75\x6b\x57\xe3\x8e\xf0\xbf\x7f\x84\x3b\x66\x84\xf1\x52\xbe\x1b\xac\x0c\x97\x47\x3d\x0a\x69\x89\x76\x23\xac\x4e\x56\x2a\x88\x14\xa3\xd4\x91\x40\xfa\x06\x4a\x47\x93\xfd\xed\x0d\x95\xc7\xc6\x05\x05\x56\x68\x92\xe2\x3f\x79\x43\xf0\xa2\x03\xb3\x29\x1f\xbe\xa5\x90\x93\x9c\xdc\x78\x07\x26\xb8\x14\x9d\x6e\x96\x63\xb8\x25\x52\xce\xc1\x0e\x2e\x67\x63\x37\x8b\x88\xce\x52\xe4\x0d\x4c\x44\x85\xbe\x6d\x95\xf1\x16\x82\xb1\xa9\xcc\xbe\x48\xac\xf5\xda\x6d\x70\x82\xe8\x3a\xd8\x29\x77\x10\x46\x9e\x66\xc6\x58\x2e\x8f\xe5\x78\xf0\x96\x1f\x6a\xf7\xc2\x3c\x4c\x4b\x21\x0a\x1d\xcd\xd6\x77\xa0\x1f\x4c\x8a\xbc\xca\xd9\x82\x94\x2c\xc8\x74\xd8\x46\x57\xba\xac\x2c\x6c\xdd\x98\x3d\x0e\x74\x81\x76\x28\x9a\x6c\xd8\x70\xd5\x78\x2b\xec\x15\xa2\xa0\xf7\x21\x14\xb6\x25\x44\xc4\xd1\xf8\x4e\x95\xa5\x7c\xa0\x38\x4b\x35\x32\xa1\xea\x43\xb1\xa5\x90\xb0\xab\x9a\xad\x60\x21\x74\x67\xaf\xf5\x29\xc7\x25\xe9\x73\x15\xaf\x6b\x92\xa1\x55\x84\x42\xdf\xe4\xb7\xc1\x5b\x72\xe7\x2e\x35\x22\xa2\x6e\x85\xf5\xa1\xa2\xb7\xa6\x6a\x3b\xd8\x83\x61\xb7\x2b\xc2\x7c\x10\xa3\x8f\xcb\x93\x34\x41\xde\xed\xcc\xeb\xa8\x36\x06\x0a\xa5\x18\x9e\x5e\x85\x03\x3d\x58\xa9\x4e\xbe\x77\xe6\x4a\x8e\xe0\xb7\x6a\x8e\xe0\x95\x08\x7b\xfb\xdc\x02\x47\xa6\x37\x7b\xc1\x7f\xf6\x6c\x9d\xa8\x4a\xe8\xec\x9c\x8c\x03\x4b\x11\x4e\x25\x6a\xfa\x52\xea\x4d\xd8\xc9\xd2\xf6\x40\x26\x73\x1d\xd0\x01\x6c\x81\x23\x97\x7e\xc5\x07\xdd\xd4\xad\x7f\x37\x8c\xeb\x90\x5c\x02\xac\xc3\x4d\x03\x17\x54\xe4\xa4\xc2\x42\x05\x06\xe2\xaa\xba\xb0\x3f\xab\xb1\x47\xa3\xf1\x51\x0b\x12\xd0\x26\x3f\x5b\x0b\x3e\x56\x43\x3e\x07\xa2\xf0\xad\x4c\x65\x82\x3d\x73\x29\x8b\x40\xf5\x37\x6c\x90\x55\x1d\x8d\x5f\xa1\x14\xf1\xaa\x07\xf4\xda\xef\xe9\x4c\x53\x13\xc9\xc6\x8f\x2d\x2e\xcd\xd5\xf9\xd2\xe2\x3c\x00\x9d\x4f\x6a\x3d\xd0\x12\x43\x2a\x4f\x11\x68\xae\x49\x54\x54\x7d\x28\x2e\xd4\x37\xb6\x37\xc2\x12\x22\x29\xad\x0f\x85\xb9\xe0\xc5\x46\x98\x5b\x34\xdc\xc0\xd5\x70\x36\x53\xe1\x65\xd5\x01\xf1\x30\x49\x14\xda\x18\xba\x95\x49\x65\xb7\x07\xfc\x9f\x67\xd2\xe9\x95\xd0\xe0\x2b\xa1\xc1\x0f\x6f\x68\xf0\x95\xf4\xc6\x2b\xb3\xef\x1b\x37\xfb\xbe\x05\x92\xea\x7f\xd1\xc1\xd1\x7a\xdf\x25\xe3\x68\xdd\x67\xe0\xb3\x34\xf0\xe3\x6e\x5a\x48\xf5\x89\xad\x57\x08\x5e\x74\x11\x33\x5c\x17\x49\x81\x1b\x0a\x16\x42\x93\xf2\xc8\x2d\x9a\xdd\x3b\x00\x09\xf5\xec\x3d\x23\x42\x4d\x2f\x19\xdc\xa6\xbd\x29\x4e\xff\x1c\x01\x66\xbe\xf5\x81\x06\x92\x21\xc0\xbd\x87\x73\x0e\x7d\xeb\xe1\xba\x5c\x49\x23\xbd\x92\x46\x7a\x25\x8d\xf4\x4a\x1a\xe9\x65\xa4\x91\x7e\x33\x40\x43\x75\x6c\xb8\x99\xf3\xfb\x42\x9b\x39\xae\xff\xdc\xbb\x11\x66\x73\x10\xeb\xf6\xe1\xdc\x74\xae\x20\x1c\x7e\x6b\x20\x1c\xc6\x0a\x04\xeb\x7c\xb0\xf2\x30\x40\x60\xd9\x9f\xea\x2b\xc8\x92\x6e\x30\x85\x14\x79\x7b\x9e\x6e\x97\x89\xaa\xc1\x7e\xa6\xc6\x26\xf6\xc8\xe9\xe0\xff\xc7\x5a\x50\x54\xdd\x30\xee\x65\x91\x66\xbd\xf6\x86\x13\x92\x50\x66\x3c\x11\x88\x51\x4d\xe2\x64\x20\x4e\x85\xd2\x48\x08\x68\x64\x38\x92\xc6\x8d\xb1\x78\xc9\x37\x4d\x84\xe5\x7b\x6b\xec\x36\xab\xe7\xd6\x93\x6c\x9b\x02\xe7\x4d\x52\x45\x1e\x67\x79\x5c\xf6\x4f\x8b\x2d\x91\xcc\xd9\x70\xc4\xa6\x53\xbf\xea\x05\xf7\xee\xe1\x39\xd3\xc7\x4e\xe7\x22\xf0\x4b\x6c\xd9\xfa\x02\xca\xa3\x69\x74\xa9\xd0\x80\xf0\x60\x20\x4c\xab\xca\x6b\xb5\xc1\xd6\xa9\x43\xff\x4d\xf0\x8c\xf3\xf2\x8f\xf3\xca\x44\x6a\x1b\xa5\x55\x79\x0e\x28\x1b\x55\x93\x3e\x61\x8d\xaa\x22\xb6\x71\x96\xd4\x07\xaf\x66\xd7\x8c\xca\x61\xa3\xec\x32\xff\x55\x57\x07\x3f\x32\x66\x5d\x70\x03\xbd\x72\xc8\x1e\x5b\xeb\x59\x89\x07\x2a\x8a\x2e\x2e\x9b\x1c\x22\x4f\x80\x53\x5a\x75\x1d\xa6\xae\xed\x10\xa7\x05\x51\x4f\x73\x98\xd9\x6d\x3f\xd6\x4e\xb2\xb5\x30\xb1\xa6\xa1\x2c\x37\x8c\xa2\x82\x6f\x6f\x64\xce\x20\x6c\xc5\x21\x5f\xe9\xe1\x6c\xc1\x10\x77\x5d\xb8\xfd\xd8\x5a\x9f\xc4\xba\x75\xbb\x34\x8e\xb4\x26\xb7\x13\xec\x0a\xc4\x26\x6b\xcb\x5d\xcb\xae\x2c\x84\xb6\x84\x5b\x82\x0b\x24\x68\x88\x69\x0e\x58\xd5\xbc\xec\xcc\x3c\xaa\xc2\x5e\x13\xf4\x2e\x7a\x87\xe4\x88\x2c\x8b\x75\x67\x56\xfd\xd6\x61\xf6\x52\x8f\x1d\x2e\xa8\x63\xfc\xe7\x05\x4f\xd1\x9d\x84\xa9\x4c\xd6\xa0\xd2\xdc\xa5\x25\x87\xab\x39\x53\xa0\xf7\x70\x7f\x1f\xb6\x77\x67\x5a\xd1\x37\xaf\xa4\x0d\x5e\x49\x1b\xfc\xe7\x90\x36\xf8\x07\x1e\x53\xab\xd1\xff\xbc\xb7\xd7\x45\xb2\x8c\x6f\x04\xef\xf4\xe8\xaf\x91\x02\x54\xc5\x8c\x5d\x92\x0c\xc5\xbd\xc4\xfa\x4c\xa5\xe1\x7d\x96\xc4\x93\xc8\xd1\xed\x97\x8b\xb2\x97\xa7\xe6\xfc\xf4\xa0\x97\xed\x9e\xa3\x78\xda\x7f\x7a\x63\xf7\x1c\x44\x6e\xa5\x31\x92\xe4\xab\xca\x66\x64\x3f\x58\x63\xc7\xf7\x34\x54\x4b\xb9\xb0\x78\x09\xbf\xe4\x05\xcf\x76\xae\x68\x67\xf7\x7a\x2f\x59\x8f\x13\x08\x39\x11\xeb\x72\x5a\x02\xd8\x09\xa1\xb3\xf3\x63\x38\x81\xeb\x3c\x12\x89\x90\xff\x8a\xb2\xd5\x9c\x04\xfb\x42\x98\xe7\xb1\xd4\x9b\x7a\x03\xe1\xa7\x3d\xd6\x60\x63\xbd\x38\xf2\xff\x55\xf0\x9d\x6e\xc2\x13\x65\xbf\x9c\x5b\x9c\x77\xb6\xe3\x79\xf6\x48\xb5\x34\x95\x5c\x9c\x09\x1e\x5f\xf9\xea\xb2\xfb\x9c\x5d\xca\x1f\x3e\x72\x27\x1a\x13\xcc\x2e\xf1\x3f\xfa\xc8\xe0\x46\x93\x6b\x82\xb2\xdf\xd5\x1e\xc0\xd8\xb2\x0e\xb1\xc9\x51\x41\xc1\xe9\x85\x4e\xe1\x50\xed\xbb\xff\x11\xec\xb5\x63\x8c\xad\xc5\x69\x98\xf7\xe7\xe5\x5a\x7a\xe9\x58\xf0\x07\xb5\xdb\xf4\x6f\x2b\xe0\x74\x43\x70\x7c\x8e\x23\xe0\xc1\x02\xb1\x0a\x61\xef\x03\x4a\x89\x62\x33\xed\x6e\x84\x69\xaf\x23\xf2\xb8\xc5\x5b\x1b\x61\x1e\xb6\x4a\x08\x37\x98\x68\x4c\xd4\xf9\xc4\x73\x26\xe4\xe4\x9e\x68\x4e\x34\xb9\xfd\x9d\x50\x9b\xa1\xe0\xd4\xc2\x31\x31\xb2\xa5\x99\x78\x49\x7b\xa7\x65\x70\x6e\xf5\x54\xe3\x26\xa4\xb6\xb6\x62\x79\x8a\x32\x23\x6f\x82\x55\xb2\x26\xa9\x97\x3a\x69\x02\x64\x46\x14\x37\x93\xa5\xa2\x82\xf2\xab\xae\xad\x8f\x22\x05\xf6\xbb\x48\xa1\xfe\x43\xd0\x0f\x76\x2f\x99\x8c\x9a\xfc\x1c\x39\xc6\x74\xfc\x2c\x59\xeb\x40\x55\x83\x58\xfd\x27\x72\x1d\x96\x0a\x3b\xd1\x26\x52\xb5\x37\xef\xf7\x1e\xcd\x1e\xe5\x8f\xcb\xc6\x5a\x63\x6f\x0d\x0f\xfb\x54\x8d\x8d\x83\x80\xfb\x68\x2d\xf8\x91\xda\xf0\x70\x0c\x8f\xf8\x43\x32\x2a\x14\x2c\x04\xdd\x94\x66\x69\x03\xfb\x7a\x60\x50\xa0\x78\x65\xee\xb1\x7a\x1b\x3a\xa1\x72\x48\x76\x19\x0c\x9b\x7f\x6d\xb0\xbc\x7d\x0d\xc9\xfd\x03\x9e\x2e\xbb\x47\xff\xc8\x63\x47\xe2\x4e\xa7\x57\x86\x6b\x89\xf0\xbf\xe0\x05\xbf\xe2\x2d\xaa\x9f\x75\x79\x84\x76\x2c\xa5\x22\x2d\x7a\xb9\x9a\x80\xb0\xa0\x4c\x6b\xdc\x6c\xaf\x21\xca\x07\x7e\x0c\xf4\x4a\xda\x98\xd5\x66\xa2\xbc\x96\x9d\x2c\x52\xe9\x4e\x76\x00\x88\xfa\x2e\x9a\x2d\xe4\x64\x1a\x78\x9e\x13\x25\x35\xda\xc9\xe9\x34\x8e\x07\xf4\x34\x4e\x5c\x7b\xd0\x15\xe5\xe2\x0a\x70\x8c\x03\x1c\x73\x45\x5d\xbf\x3c\x75\xfd\x41\xef\x19\xbb\x2b\x48\x4d\xbf\xae\x15\xa4\x20\xd0\x9a\x90\x96\x14\xae\x1e\xc4\xd8\x67\xc6\x9d\x24\x20\xed\x1a\x2d\xba\x1b\x22\x17\x77\xc5\x79\xd9\x0b\x93\xf9\xb8\xd8\x74\x68\x33\xdf\x30\x1e\x3c\xc1\xa1\x0e\xde\x5a\x81\xe7\x55\xda\x84\xaa\x7e\xf3\xa2\xc7\xf0\xd2\x50\xf2\xe7\x17\xc6\xd8\x47\x3c\x76\x70\xbd\x58\xed\x77\x85\xff\x53\x5e\xf0\x4e\xcf\xa2\x1e\x86\xf8\x79\xe5\x0d\xb6\x78\x1e\x2d\x5b\x27\x3e\x43\xc1\x63\xc6\x16\x09\xb9\x70\xa4\x73\xa5\x6d\xae\xec\xa2\x0b\x17\x9a\x3c\x10\x17\xca\xeb\x82\x3a\x0f\x2e\xac\x17\xf2\x9f\xb4\x5c\x2f\x82\x26\x5f\xec\x68\xca\x7e\x90\x25\x86\x68\x1b\x5f\x90\x62\xd9\x4a\x11\x75\xd4\xad\x0b\xec\x91\x94\x37\x85\x5e\x95\xc5\x79\x5f\x04\xcf\x5c\xc1\x4b\x04\xe8\xc1\x6f\x0b\x0b\x11\xf1\x33\x06\x40\xfd\xd8\xca\xd2\x6d\x67\x26\xe5\x86\x01\xc6\xe1\xc5\xf9\xca\xa8\xda\x15\xbb\xe0\xb3\xe1\x40\xf6\xeb\x5d\xec\x51\xc5\xe0\x03\xfe\x6c\x70\xdd\x3e\xbf\x9d\x0e\x96\x3b\xc7\xac\x11\xf3\xaf\x0f\x8e\xa1\xe3\x46\xee\x40\x71\x24\x67\x28\xa8\x91\x03\xe3\xbd\xd5\x89\x36\x9d\x8c\xc0\x03\xec\xba\x3d\xe4\x43\xcd\x25\x61\xdc\x71\x26\xd6\xd7\xc6\x83\x37\xd6\x76\x7d\xcc\x31\x33\x20\x9f\x7b\x3e\x51\xf0\xa5\xbb\xe6\xaa\x0d\x42\x14\x97\xaa\xd2\x60\xa5\xb4\x45\x75\x32\xeb\xa5\x11\x5f\xba\x0b\x57\x3c\xe6\x58\x42\x4b\xd5\x93\x76\x2c\xe3\xec\x50\xea\xd2\x70\xbd\xe2\xa2\xce\x45\x01\xfb\x59\x98\x24\xfd\x3a\x0f\xf9\x76\x1e\x76\xbb\x52\xf3\x82\xfc\x4b\x95\x1d\xa1\x13\x44\xec\x2c\x39\xb9\xd7\x6d\x93\xa3\xb6\xc8\x3a\x22\x4b\x05\x17\x40\x92\x5c\x6a\xa8\xea\xc9\xe6\x45\xef\x48\x4b\x7e\xf8\xec\xa0\xcd\xef\x17\x6a\xec\xf7\x3c\x66\x6e\xfa\x9f\xf1\x82\x07\xbc\x39\xf5\x73\xd0\xfe\x37\x9c\x8a\x05\x8f\x8e\x4e\x7d\xec\x66\x26\x14\x4b\xf7\xe8\xbe\x8d\xb3\x34\x71\xa7\x4c\xbe\x55\x83\x22\x40\xae\x31\x97\xf0\x0a\xb4\xc6\xd9\x1b\x9f\x65\xa5\x7d\x9f\x09\x9e\x7a\xf7\x7e\x49\xa2\x95\x17\xab\xc2\x85\xf5\xde\x83\x6c\x72\xcf\x18\x66\xfe\x0b\x0f\x06\xf3\x83\x17\x55\x78\x5b\xb1\x03\x8e\xb8\x46\x7d\x34\x70\x8b\xee\x81\xe8\xdd\x07\xd8\x3b\xc6\x80\x05\x9f\xe0\x5a\xfc\x1f\x1a\x0b\x5e\x3a\x36\x67\xe0\x5b\x60\xee\xeb\x2d\x6c\x30\xc4\x58\x61\xbc\x58\x59\x97\x1b\x61\x1a\x25\xa4\x4b\x21\x48\x22\x2e\x07\xc2\x46\x91\x93\xdc\x30\x9b\x05\xeb\x49\xb8\x95\xe5\x45\x60\x4e\x3d\xba\x58\x2d\x63\x61\x6e\x98\xaf\x35\xf9\xa9\x2c\xe7\x44\xd7\x5c\x07\xb7\x80\xdc\x3e\xc1\xa6\x69\x4a\x5e\x32\x6c\x69\x6a\x5d\x41\x39\xb1\x7c\xab\x43\x2a\xa0\x5d\x28\x54\xd2\xf8\x9e\x07\x82\xce\xa3\xac\x13\xc6\x69\xa3\x9b\x8b\xf5\xf8\x82\xca\x79\x4f\xe5\x8e\x91\x0b\x74\x40\x1d\xbf\x7e\xda\x3a\x67\xc8\x89\x80\x4e\xa7\x3a\x17\xcd\x76\x93\x07\x61\xab\x23\x00\x33\x13\xc7\xa1\x61\xbe\x1d\x0c\x26\x24\x6a\x95\xdd\xf5\xba\xd6\x18\x33\x1c\x70\xfe\x5f\x7a\xec\xd4\x25\xe0\xe1\x99\x7e\x31\x4c\xdb\xef\xf4\xac\xde\x02\xad\x39\x89\xd3\x4d\x4c\xa0\x6c\x41\x66\xa0\xd1\x91\xe9\x44\x06\xe0\x26\x06\xc6\xde\x3d\x9a\x59\x19\x73\x6e\xf7\xda\x44\x2d\xf1\xd0\x2c\xd2\xb6\x78\x75\x9c\x14\x17\xca\x3c\xb4\x78\xef\x9a\xec\xc7\x1e\xc1\x9e\x6c\x35\x33\x8c\x88\xb8\x39\x17\xed\x18\x62\x85\xe2\x2c\x35\x36\xa9\x5e\x22\xee\x8e\xcb\x8d\x3b\x95\x49\xa4\xf0\xff\xf4\xea\xe0\x85\xde\xf0\x75\x6c\x74\xd9\xeb\x26\x30\xbb\xad\x1b\x52\x54\xeb\x70\x39\x30\xe0\x83\x5a\x28\x35\x2e\xed\xe5\xec\x84\x9b\x52\x33\xc8\x15\xb4\x29\x65\xd5\x62\x71\x10\x64\x89\xdc\xd2\x72\x76\xc3\x01\xce\x5d\x83\xff\xfd\x2a\xf6\x7e\x8f\x1d\x09\xbb\x31\xb0\x22\x17\xfe\x3b\xbc\xe0\xa5\xde\xec\xd2\x22\xfe\x54\x82\x74\x76\x69\x11\x35\x2d\x95\x5d\xae\x02\xee\xd6\x04\x52\xfa\x65\x4d\x3e\x71\xed\x04\xc7\xa5\x46\x8f\xc2\x81\x8b\xae\x92\xfe\x44\x9c\xc3\xe8\x10\xa5\xa5\x5c\x24\xb1\x95\x02\x94\xa5\xa2\x69\x7b\x5f\x76\x8c\xa9\xfc\xb0\xc7\x8e\x1a\x15\xbb\xf0\x7f\xcc\x0b\x5e\xe9\x19\x1d\xdb\xa9\xfa\x96\xba\xb6\x97\xca\xab\x87\x1f\xde\xea\xbf\xb9\xc6\x58\x66\xe6\xc6\xab\x6a\xc1\x5f\x7b\xee\x9c\x00\xab\x89\xb9\x22\x7f\xea\x29\xc7\x37\xb2\x6c\x93\xb7\x42\x79\x62\x46\x89\xdb\xe0\x73\xcb\x0b\xb3\xab\x0b\x75\x7e\x6e\x69\x1e\xfe\x9d\x5f\x38\xbd\x20\xff\x9d\xbb\xf3\xec\xd9\x85\xb9\x55\x9e\xe5\xfc\x5a\xcc\x92\x4f\x12\xac\x7c\x56\x38\x5f\x80\xf8\xa1\xb4\xaf\x72\xcc\xcd\xc7\x9c\x5a\x90\xa4\xa4\x7c\xb4\x87\xb3\x8b\x7e\x70\xdc\x0e\x39\xfd\xbe\xf1\xe0\x4f\xc6\x4c\xf0\x28\x49\x0a\xb4\xb7\x98\x21\xc5\x93\x53\xcf\xf5\xbd\x30\x66\x09\xed\x93\x7c\xa2\x9b\x45\xc5\x04\xef\x88\x30\x2d\x28\x4b\x1e\x2e\x4d\x25\x59\x5b\x5d\x2e\x29\xc0\xc5\xca\xd6\x43\x1a\x0e\x78\xfa\x5a\xf5\x58\x08\xf6\x27\x9d\x3a\xb6\xd6\x43\x83\x8b\xf5\x96\x2e\xdc\x79\xc7\x7e\xc0\x2e\x77\xaa\x68\x85\x89\x70\x9e\x94\x17\x06\x0b\xbc\x76\xaa\xba\x06\x2a\x7e\x27\xce\xdd\x37\x18\x5b\x5c\xe7\xdb\x71\x12\xb5\xe4\xb9\x78\x70\xb0\x2c\xbb\x0e\xf4\x1c\xb2\x6c\x83\x3d\xc6\x2a\x3a\xca\x86\x6d\x49\x40\x76\x4e\x29\xb0\x6c\x5e\x74\x05\xba\x3b\x89\x95\x43\xa4\xad\x04\x09\x49\x50\xda\xd4\xdd\x66\x77\x08\x14\x01\x4f\x3b\x18\x28\xb0\xb7\x89\xf1\xd6\x31\x76\xa0\x68\x65\x5d\xe1\xbf\x6e\x2c\xf8\x8f\x63\xf0\xa7\xde\x33\x29\xd9\x19\xae\xe9\xa3\x74\x2f\x11\xc3\x38\x6a\x3c\x20\x9b\xbf\x3c\x20\x69\x98\xef\x28\xc0\x53\x71\x70\x6d\x60\x1e\xd0\xb3\x22\x2c\xd1\x89\x49\x58\xda\x0d\xf8\x4e\xe4\x40\x3a\x27\x89\x46\xf5\xd1\x9f\xd6\xa5\x83\x38\x52\x7e\x42\xd0\x38\x9c\x82\x9a\x4e\x45\x86\xbe\x6a\x40\x51\x77\xfd\xa2\xac\xbe\xf5\x7a\x09\x47\x18\x34\xec\x52\xe7\xe4\x42\x76\x67\x8b\x60\xed\x56\x9c\xa1\xd1\xa8\x44\x56\x3f\xca\x59\xd5\x0d\x11\x26\x4e\x9d\x79\xed\x38\xa9\xe0\xda\xc0\x51\x19\x7e\xf2\x40\x35\x1a\x7b\x16\x09\x93\xff\xff\x8a\x03\xc1\x4d\xce\x15\x63\x75\x35\x69\xd8\xb6\x7e\x89\x30\x1f\x69\x16\x89\xd1\xe9\xfd\xef\x1f\x67\x75\xca\x57\x7f\x7c\xf0\xd8\x55\x3a\x82\x40\xf4\x8e\x49\x52\xb7\xab\xfb\x7c\x8f\x3d\x2a\x09\x8b\xf2\x76\x11\xe6\xe5\x9a\x08\x4b\xc8\x34\xdf\xbc\xf4\x44\xf3\xe9\xd3\x3a\xab\x7c\x5b\xf0\x76\x06\xb8\xe3\x68\x7a\x43\x64\x79\xf4\x8e\x9b\xda\xb0\x97\x57\x67\xbb\x6f\x5d\x7a\x1d\x9e\x7c\x7a\x74\x66\xfb\xc8\xa4\xf6\x67\x1a\x78\x84\x33\xc1\xad\xb7\xf7\x3a\xe0\x54\x7b\x28\x10\x11\x9e\xae\x11\x11\x6e\x0d\x4e\x1c\x83\x28\xad\xc9\x4b\x45\x3c\x38\xad\x11\x0f\x6e\x0b\xae\x5f\xa9\xc6\x38\x00\x88\x05\x79\x75\x57\x8c\x83\x2f\x1c\x66\x37\xd9\x3a\x9e\xe3\x72\x1b\x70\xca\xcf\x42\xbc\xe6\xb2\xd8\x8a\xc5\x36\x9c\x92\xde\x70\x38\xf8\x61\x6f\x87\x9b\xb8\x61\x21\xb4\x58\xd7\x20\x55\x28\x1a\x22\x65\xaa\x6d\x72\x07\xb1\x25\x5b\xd7\x0a\xe0\xac\x5d\x95\x59\x43\x52\x20\x65\xd5\xd9\x2c\xdd\xed\x31\xb5\xfb\x16\xa2\x74\xd6\xc9\x57\x0f\xb2\x8f\x7a\xec\x00\x68\xbb\xfe\x87\xa5\x0e\xb5\x00\x8a\x6f\x2b\xcb\x91\xe5\x2e\xd2\xe7\xaf\x5e\x21\xf2\xe6\x62\xba\x9e\x35\x9f\x26\x4a\x78\xea\xd8\x24\xef\x88\x72\x23\xb3\xf0\x99\x64\x8f\xc9\x03\x4e\x2b\x2c\xb3\xbc\xc9\xf9\x4a\x9c\xb6\xcc\x99\x1f\x13\x90\xa8\xbc\xd0\x78\x49\xe3\x92\xa7\x42\x44\x10\x1e\x23\xd6\x13\xa4\x9d\x02\xb6\xa8\xe6\xfd\xde\x77\xb3\xef\xda\x79\x33\xb0\x3d\x0e\x67\xd8\x01\xd0\x3c\xfd\xf9\xe0\x98\xab\xbf\x92\xee\xda\xcf\x7a\x13\x52\x53\x16\x08\x9a\xb0\x9e\xe5\xa3\xb6\x99\xf7\x78\xec\xdb\x53\xab\x67\x75\x67\xfa\xaf\xf1\xd8\xfc\x8e\xc7\x9f\xea\x39\x73\xb6\xaa\xa0\xe0\x4c\xe5\x65\x0b\x81\xae\x4a\xf4\xa5\x0d\x03\x77\xe0\x4c\x1e\x59\x63\x3f\x1f\xae\xee\x6b\x3d\x36\xbb\xcf\xea\x56\xd4\x75\x65\xb7\x29\x36\xaa\xd6\x3b\xd5\xf8\x16\x74\xf9\xde\x18\x5c\x7b\x6e\x71\x7e\x07\x43\x02\x3d\x4b\x41\x60\xb9\xb3\x5e\xef\xf7\xd8\xb8\xbc\xe8\xbf\xc5\x0b\x7e\xc0\x3b\x57\x60\x70\xb1\x9a\xad\x55\x03\x2e\x55\x56\x79\x6c\x27\x72\x32\x1e\xc8\x97\x02\xad\xba\x05\x30\x71\x82\x3a\xc6\xd7\xc7\x80\x4e\x0a\x46\xf3\x6e\x2e\x08\x70\x20\xb8\x1b\x66\xf3\x3a\x87\xcf\x6d\x0b\x15\xeb\xa8\xf0\x95\xc0\xe4\xd1\xa7\x39\x67\x57\xf6\xcb\x35\xf6\x5d\xc3\x31\x0c\x2a\x7c\xc1\xff\xe5\x5a\xf0\x54\x1d\x59\xb0\xc7\x70\x39\x08\x33\x32\x01\x72\x17\xbd\xc3\xea\x28\x77\xd1\x03\x97\x54\x55\xc4\xdc\x17\x3c\xb6\xc4\xf4\x83\xfe\x7c\x70\xa3\x3a\xf0\x39\xeb\x45\x8b\x62\x3d\x78\x83\x61\x72\x76\xe3\x6e\x23\x17\xd8\xc9\xa0\xa1\x1c\x60\xa5\x85\x0e\xb3\xd7\x32\x20\x90\xef\x64\xd0\xa8\x32\xe3\xed\xa9\x8c\xff\x52\x77\x82\xf4\x0c\xd4\x57\x73\x6e\x65\x71\x3e\x8f\xb7\x44\x0e\xc2\xfa\xaf\x9e\x18\x1c\x77\xae\x68\x9c\x1c\x54\x24\x5b\xc6\xed\xc2\xf5\x73\xee\xe1\xf9\xbd\x4f\x64\xcf\x3f\xa8\xad\xe2\x73\x61\x37\x6c\xc5\x65\xdf\xff\xd3\x03\xc1\xef\x1c\x58\x5c\x1f\xcc\x04\x71\x1e\xd2\x8c\x24\x16\xed\xe8\xdc\xca\xa2\x06\xb8\x82\xaf\xf1\xed\x50\x0e\x35\x00\x81\x18\x04\x16\x8a\x2f\x88\x23\x4a\x9a\xa1\x92\x79\x4b\x15\xad\x0b\xa4\x52\x22\x4d\x14\xab\x91\x42\xb3\x1c\x40\xf1\x01\x38\x56\x96\x39\xb7\xb2\xb8\x32\x50\x41\xa5\xa4\x82\xa2\xdf\x32\xd5\xd6\xd3\x51\x41\x95\x6e\x88\xd6\xa6\x4e\x11\x25\x14\x92\xb8\xd3\x11\x51\x1c\x96\x22\xe9\x23\x2c\x07\xd6\x01\x51\x69\xb1\x5a\xe0\x7d\x24\xe4\x7e\xb9\x1b\x83\x35\x51\x1e\x37\x11\x78\x78\x5b\x27\xca\x41\x05\x20\x8d\x62\x8d\xc2\x29\xa1\x15\xdd\xb0\x57\x08\x2b\x27\x65\xb8\xb1\x1b\x61\xc1\xbb\x3d\x42\x1d\x02\x2b\x33\x2f\x7a\x31\xd8\xb6\x76\x6e\x70\x93\xb1\xd9\x04\x7c\x64\x65\xbc\x25\x92\x7e\xdd\x2e\x9c\x5a\x89\xdf\xb0\xfd\x17\x68\x3d\xeb\xa5\x72\xc8\xb3\x1c\xed\xad\x88\xe7\xa5\x89\x09\xd6\x93\xb8\x2b\x8f\x0a\x09\x84\x8d\x6f\x63\x2e\xd2\xc0\xc8\xd9\x8b\x5d\x67\x34\xe9\x26\x40\x87\x57\x9b\xea\xe8\x0e\xe8\x18\x80\x5d\x88\xcf\xc8\x1a\xc0\xc1\x21\xdc\x0a\xe3\x04\x1a\xae\x41\x52\x2a\x7a\x40\x01\xc4\x80\xef\x1f\x06\x92\x12\x9f\x47\x26\x43\xfc\xc1\x41\x76\x75\x99\x6d\x8a\x74\x99\xbc\xcd\xfe\x67\x0e\x06\x5f\x3d\xb0\x6a\x5f\x72\xa6\x3b\xce\x74\xea\x52\xdc\xf6\xf1\x44\xae\xc0\x09\xc3\x16\x40\xf5\x72\x28\x15\x25\x70\x61\x90\x58\x6d\x87\x45\x06\xc7\x52\x8d\x1a\x67\x29\x1f\x80\x41\x75\x07\x86\x7f\xa8\x09\x53\x90\x48\xa2\x62\x95\xb5\x5c\x83\x62\xe8\x8e\xe1\xf2\x48\xb2\x84\xfd\x4e\x98\xaf\xad\x30\x49\x28\xba\xdf\xaa\x3d\x99\x6c\xbb\x61\x4e\xe3\x4d\x67\x69\x54\xb7\xd7\x33\x79\xaa\x95\x75\x76\x3e\x74\x92\x07\xad\xc2\x48\x26\xf2\xa3\x52\xe3\x67\xb1\xed\x4d\xac\x64\x70\x92\xff\x5b\xc6\x79\x70\x73\xd8\x8b\x62\x29\xea\x6e\xa1\x2b\x9c\x07\xf0\x44\x70\x92\xdf\x0c\x7f\xdc\x52\xc7\xab\xe2\x42\x37\x46\x63\x8d\x3c\x00\x14\x65\xd8\xe9\xca\x67\xcc\x65\x38\x0d\xc0\x75\xd9\xe0\xe5\x53\x73\x27\x4e\x9c\x78\x12\xbc\xfd\x3c\xf9\x9f\x66\xb3\xc9\x9e\xc7\xd8\xd9\xac\x14\x27\xf9\x2c\x7d\x55\x3e\x09\x67\x7d\x7b\x50\x2d\x7b\xb5\xb1\x7c\x83\x0d\xa9\xe4\x1d\xf0\x4c\xa6\xd4\xd7\x26\xbf\x08\x65\x74\x93\xaf\x66\x26\xbb\x0b\x16\x3b\x3e\x87\xe9\x7c\x50\xd7\x7e\x5d\x99\x02\x8a\x65\x41\x4b\x60\x90\xd5\xa9\xcc\xe3\x76\x5b\xe4\x15\xa3\xd5\x15\x79\x9c\xc9\x19\x97\x24\xfd\x8a\xc5\x41\xf3\x7c\xb7\xe5\xe1\x8c\x08\x34\xbd\x6a\x89\xec\x9d\xb1\xc0\xda\x8b\xec\x8e\x7c\xd0\xbb\x66\x47\x72\xcf\x23\xfe\xa1\xb0\xcc\x3a\x71\x8b\xb1\x3f\x3e\xc8\x1e\x83\x73\x5f\xa7\x10\x9d\xc9\x22\x51\xf8\xbf\x7e\x30\x78\xd5\xc1\xaa\x3b\x3a\xce\x60\x5b\x8a\x59\xb9\x3b\x1b\x37\x1c\x99\xca\x86\xf7\x1c\xf2\x32\x17\x03\x4b\x1f\x0f\x2b\x60\x70\xd3\xa3\x29\x0f\xfd\xc6\xb1\x16\xd4\x0d\x43\x2b\x2a\x60\x52\xb6\x59\x30\x48\x6a\x71\xb9\x1b\x2c\x48\x49\xe5\x17\xc1\x78\x1f\x0b\x0a\x55\xe1\x69\xf5\x8a\x5e\x98\xf0\xa5\xbb\xa6\x96\xee\x9a\xe3\x1d\xd1\xda\x08\xd3\xb8\xe8\x60\x1d\xd1\xcb\xd8\x81\xc4\x9c\x82\x07\x3a\x59\x3c\xa0\x2d\x06\x84\x47\x24\xea\xba\xe5\x61\x6e\xea\x45\xe0\x8d\x56\xfa\x86\xc6\xdd\x02\xe9\x3e\xb7\xb2\xe8\xb8\x3d\x8d\x85\x2d\xb1\x13\xb9\xca\xd8\xe4\x9a\x9b\x1b\x70\xc4\x03\x9d\x0d\x9c\xaa\xd4\xc3\x1b\xa1\x82\x69\x09\xb7\x65\x55\x94\x95\x6a\x4d\x00\xc2\x19\xc9\x3b\x98\x98\xed\x8c\x76\xfc\xb6\x90\x0a\x66\xb5\x58\xc2\x44\x5f\x48\x6b\xd2\x5e\xca\x53\x59\x8e\xee\xa1\x61\x95\xda\x71\x42\x59\xbd\x63\xe3\xbe\x5b\x73\x51\x8a\xaa\x76\x5c\x6e\xf4\xd6\xb4\x73\x53\x67\xf2\x34\x92\xac\x15\x26\xca\xa1\xd9\xdc\x28\x3b\x89\x69\xa6\x5c\xac\x0a\xef\x06\xce\xb2\x54\x23\x3c\xf7\x16\x10\x69\x45\xe7\x57\xb8\x8e\x3f\x1d\xc0\x3b\x92\xc8\x68\x90\xae\x42\xce\x1c\xb1\x25\xee\x7c\xb0\xdb\x85\x4c\xb7\x10\x25\x63\x1f\x3f\xc0\x1e\x81\x89\xb7\xca\x20\xe9\xff\xf4\x81\xe0\x9d\x07\xdc\x6b\xce\xb6\x56\xb9\x9a\x74\x92\x61\x98\xaa\xd4\x61\x2b\x48\xd6\x1e\x71\x35\x2a\x66\x87\x34\x8e\x50\x67\xcc\xf5\xa1\x7b\x52\x25\x02\x93\xba\x67\x2d\x1b\xfa\x54\x24\x4a\xc5\x18\x4f\x6e\x2e\x15\x2f\xa4\xfc\x44\xf4\x20\x55\x1a\x0e\x3a\xeb\x61\x4b\xd0\x4a\x06\xdd\x8e\xc2\x94\xe0\x89\x50\xc3\xe0\x2a\x83\x11\x24\x59\x84\x71\x59\x58\xaa\x98\x2a\xad\x30\x88\xb6\x14\x23\x0c\x71\x83\x22\xa2\x49\xad\xb6\x74\xb3\xad\x2a\xd0\xdb\x86\x02\xbd\xe5\xad\x2c\xcb\xa3\x38\x85\x3e\x56\x0b\x72\xa0\x8f\x65\x15\xd0\xae\x36\xba\xaa\x5a\xb6\x0f\x0d\x04\xa4\x8f\xca\x01\x28\x4d\x8c\xb7\xd6\xf9\x97\xd1\xbd\x67\xd4\xa3\xb6\xdc\xdf\xcd\x06\xa0\x93\x66\x31\xcc\xcf\x86\x3d\xd3\xb8\xea\xf5\xea\xef\xaa\xe4\x3f\x20\xdc\x90\xda\xd6\x9d\x1a\xb2\x7d\xe4\xf3\x72\xf8\x46\xea\x83\x8e\x82\xf6\x5b\x35\x76\xf5\x7a\x01\xe7\x3b\x95\x0f\x5d\x0b\x3e\x54\x9b\xa7\xcd\x81\x44\x7b\x2f\x8d\x44\x9e\xf4\x2d\x0d\x4b\xed\x04\x90\x6d\xdf\x06\x3b\xfe\x76\x2a\xf2\x62\x23\xee\x42\x9b\xbb\x22\xd7\x1e\xa2\x75\x7b\xe0\x69\xb0\xf1\x94\xa6\x41\xc3\xc1\xe5\xab\x64\xa4\xda\x03\xf8\xa9\x15\xab\x62\xca\x38\x0f\x36\x03\xe3\xe0\x25\xc3\xe2\xa0\x00\x80\x28\x5b\xc4\xce\xad\xef\x03\x3c\x57\x8d\x2e\xae\x26\xf7\xfb\xf6\x10\x8f\xec\x5e\x0b\x4e\xee\x28\x7b\x44\x37\x8b\x16\xd3\xf5\xec\xce\x14\x09\x05\x7e\xf0\x68\xf0\x92\xa3\x83\x07\x40\xf7\x99\xfd\x48\x0e\xd3\x11\x00\x9c\x69\x09\xf4\x63\xc0\x0e\xd4\xcd\x22\x79\x5a\x86\x4f\x9c\x23\x00\xb4\xe6\xa4\x8a\xda\x45\x90\x11\xe3\xc2\x43\xc4\x02\xac\x19\xcd\xcd\xc1\x62\x6d\x38\x4f\xa9\x35\x23\x70\x01\xc5\xa7\x0d\x65\x46\x0f\xe9\xc4\xda\x11\x33\xd0\x64\x79\x1c\x0b\x11\xeb\x7b\xe8\xb0\x86\xcb\x0e\x3a\x6a\x58\x69\x1f\xac\x5f\x58\xec\x43\x73\x3f\x36\xb9\x83\xee\x8e\xd0\x05\x5d\x79\x94\x06\x0e\xf8\x0c\xdc\x1b\x48\xa0\x6f\x34\x79\x15\xac\x6b\x7f\x9f\xfa\x24\x1e\xae\x09\x7e\xa4\x42\xef\x97\xb5\x54\xab\x57\xbd\xbe\x3e\x34\x27\x1c\xf4\x08\x9a\xee\xa0\x72\xa1\xaf\xac\x9d\x67\xdb\xe8\x64\xc4\x84\x46\xb1\x1e\x5f\x70\x60\x7f\x9a\x95\x27\x0b\xa9\x7d\xa4\x61\x47\x04\x27\x41\x11\x01\xcb\xca\xa8\xe7\xc0\xc3\x64\x3d\x8c\x8e\xab\x9d\xde\xe8\x01\x97\x10\x2e\x86\x63\xf2\xc2\xb9\xc5\xf9\xc9\xca\xa7\xb5\xda\x10\x9c\xe4\x81\x6c\x61\xa0\x64\x8f\xb5\x61\xd8\xd0\xaf\x0e\xa6\x39\x1c\x6f\x46\xfd\x9f\x03\x0a\x3e\xa0\xb6\xd5\x2d\x32\x8c\x00\xa6\x6d\xc0\xd8\xe8\x3a\x12\x0d\xa1\xd8\x36\x3a\xbf\xa3\x99\xce\x34\x67\x6e\x50\x31\x19\x20\x75\x1c\x24\x7a\x9c\x63\x05\xed\xa3\x4a\x0d\x02\x9c\x07\xc8\x72\xb5\xd4\x66\xf4\x36\x1a\xad\x95\x26\x8d\xa3\xca\xd3\xce\xa0\x8b\x95\xba\x55\x22\xc2\x2d\xa1\xd7\x06\x8f\xe2\x42\x6f\x47\x53\x59\xce\xe3\x76\x8a\xe1\x41\x4a\x84\x35\xf9\x6c\x31\xd0\x80\xeb\x21\xec\x25\x9d\x18\x04\x26\xc4\x3c\x00\xfb\x63\xd0\x42\x5b\x9b\x03\x75\xdb\x32\xed\xa0\x8c\x20\x2d\x94\x9c\x9a\x7a\x67\xb4\x8c\x32\x16\xdb\x03\x41\x54\x65\x91\xf6\x0f\xd4\x31\x51\x1c\x1d\xf5\xa0\xfb\x87\x9c\x80\xb6\x00\x44\xc2\x84\xe3\xa8\x1d\x87\x6c\x49\x7b\xdc\x09\xff\x71\x8c\x3d\x2a\x1f\x3c\x55\xfa\x5f\x1d\x0b\xbe\x30\x36\x7c\xd8\xdc\xd1\x64\x81\x56\xb9\xf3\x43\x92\xe6\x3c\xed\x78\xf6\xe1\x93\xb6\x6a\x24\x9f\x02\xc7\x06\xd8\x86\xbb\x59\x81\x52\x07\xa1\xd7\x94\x00\x53\xd0\x07\x4a\x8b\xb7\x1a\x15\x0d\x51\xb5\xe8\x43\x3a\x1c\x9a\x43\x9b\xa4\x0d\x01\xa0\x93\x1d\xce\x0b\x75\x9b\x0b\x0d\x64\xa3\x2c\x74\xf8\x59\xd2\x15\x61\xe8\xc9\x6f\x59\x56\x70\x13\xa9\xca\x9e\x15\x44\xde\xa1\x4c\xd4\xf6\x36\x52\x08\x91\xe2\xca\x54\x34\x60\x06\xf4\xe9\x61\x3f\x9f\x5b\x33\xe0\x4f\x3d\x76\xf6\x92\xe1\xec\x17\x48\x3b\x9d\xcf\x5a\x80\x0c\x87\xac\x26\xaf\xf3\x82\x67\x55\xde\x51\x51\x87\xca\x4e\x8d\x3b\x8b\x21\x76\xd0\xe6\x6c\x9c\xf4\x84\x5c\x11\xd9\x45\xb8\xf6\xe6\x69\xe6\xb3\xb1\x5e\x9e\xf8\x8e\x9b\xea\xbb\xd8\x51\xcb\xad\xe8\xde\x7b\xd1\x21\xf6\xf4\x4b\x6e\xee\xdd\x62\x6d\x23\xcb\x36\xe7\x20\xb7\x00\xf2\x18\xde\x7f\x30\xb8\x79\xe8\xea\x20\x99\x61\x86\xa7\x8b\x90\x92\x12\xe0\x91\x6d\x7c\xe9\xa2\xf7\x38\x73\x11\x7d\xa3\x2a\x5c\xcb\x69\xe8\x27\x0f\xb0\x07\x3c\x76\x15\x66\xb3\x60\x3c\xbf\xff\x7e\x8f\x3d\x7b\x27\xcb\xca\x25\x36\xcc\x2a\x3e\xb8\xcb\xfe\x98\x32\x61\xc4\x69\x51\xe6\x3d\x0c\x91\x80\x51\xb2\x1b\x28\x1f\xa0\x76\x41\x36\x13\xa1\x85\xc9\x77\xcf\xd3\x17\xce\x37\xd9\x0b\xc6\xd9\x8e\x6d\xf6\xbf\x32\x16\x7c\x6c\x6c\xa7\xbb\x8a\x40\x3e\x47\x3e\x72\x15\xeb\xd4\x85\xe9\x24\xaf\x9c\x9f\x1b\x78\xf3\xbc\x1b\xe8\x46\x95\xe0\xe2\x42\x57\xb4\x94\x41\x67\x76\x69\x91\x94\x62\x17\x45\x08\x21\xa7\xd4\x78\x91\x3c\x82\x6f\x92\x51\xa7\xb4\x2c\x43\x90\x48\x95\xea\x60\x5c\xfd\x55\x73\xf0\x89\xc9\xf0\x02\x25\x00\x64\xae\x9d\xbc\x60\x2a\x51\x77\x66\x09\xc4\x37\x87\x71\x62\xf1\x3b\x38\x01\xa0\x04\x7c\xa9\x02\xa7\x23\xdd\x44\x37\x04\xd4\x68\xa0\x0a\x7f\x44\xd7\x50\x8a\x14\x1d\xea\xa9\xc8\x09\xa4\x44\xd6\x4f\x40\x4c\x80\x3a\xa6\xc8\x8a\xae\xa8\x8a\x2a\x49\x69\x0f\xbc\xae\xf2\x28\x47\xf2\x6f\x1f\x65\x27\x46\x78\x5c\x9b\x2b\x22\x59\xaf\x08\x1b\xf0\xdf\x76\x34\xf8\x5b\x6f\x87\x9b\xea\x6c\xae\xf0\x32\x07\x08\x31\xc0\xfb\x29\x37\xee\xae\xc8\xa5\xe2\x0a\xf6\x87\x16\x5a\xc5\x01\xe9\x74\x3d\x4e\x12\x8a\x19\x0f\x11\xce\xde\x04\xc0\x63\xdc\x50\x10\x83\x04\xb3\xb0\x8f\x83\x26\xe7\xb2\x3a\x94\xdc\x26\x7b\x39\x4c\xc8\x7d\xa3\x4c\x19\xf2\xc3\x85\x81\x60\x02\x88\xb8\x35\xc1\xf1\xe0\x95\x91\xb7\xc8\x02\xf9\xec\x57\xd7\xf2\xa2\x07\xc4\xdb\x8e\x54\x78\xcb\x15\x8a\xfb\x2b\x30\x08\xdf\xc0\x4c\xc5\x3b\xac\x44\xc5\xa7\x5e\x66\x9e\x22\x04\x2e\x00\xb5\xfc\x5b\x3c\xb6\xb0\xe7\xe0\x8c\x9d\x44\x05\xb0\x2d\x3d\x13\xbc\xd8\x98\xa0\x3e\x32\x92\x82\xf4\x54\x21\x47\x1b\xbb\x17\xe5\x85\x9c\xb4\x14\x29\xa3\xc2\x85\xc0\xd0\xcf\xde\xe9\xe9\x20\xab\xd7\x7b\x6c\x6e\x1f\xd5\xad\xa8\x2a\x94\x13\x9c\x5b\xd1\xe4\x34\x04\x23\x00\xc9\xe8\x68\x1c\x32\x09\xdc\x46\x09\xb7\x81\x81\x55\x3b\x62\x23\xe1\x11\x1b\xf8\x41\xef\xb9\xbb\xe7\x0d\x9e\xf5\x4f\xeb\xbc\xc1\x91\x98\x0a\x3b\x74\xf6\x60\x5e\xe1\xef\x8c\xb3\x9b\x77\x8f\x19\xdb\x79\xe0\xfc\xd7\x8f\x07\xef\xd8\x49\xce\x7f\x33\xc7\x8e\x7d\xa5\x76\x25\x46\xea\xeb\x10\x23\xf5\x9e\x23\x0e\xcb\x38\x80\x78\x6e\x49\xf9\x02\x58\xb7\x28\x53\x97\xb2\xbc\xf4\x5f\x7c\x24\xb8\xd1\xbe\xe0\x92\xd4\xc1\x15\x45\x0c\x00\x94\x24\xf8\xe4\x4a\x12\xb7\xdc\xf8\xa0\x9f\x3f\xcc\x3e\x5f\x63\x47\xc3\x6e\x77\x29\xcf\xca\xac\x95\x25\x68\x31\x96\xfa\xa3\x05\x98\xcd\xbb\x74\xd7\x30\xb8\x4b\x15\xcf\x39\xb6\xa2\x29\x0c\x58\x58\x30\x29\xdb\x32\x40\x24\xe1\x9a\x48\x08\x63\xbc\xc9\xcf\x59\x09\x4e\xa0\x72\x70\x82\x76\x46\xbc\x26\xf9\x89\xc5\xd9\xb3\xb3\xa6\x24\x15\x2d\x80\x0f\x1f\x0b\x0b\xa9\x45\xf0\xe5\x53\x73\x8d\x1b\x4e\x9c\xb8\x1e\xe6\xb6\xdc\x1c\x4e\x4e\x4d\x6d\x6f\x6f\x37\xe3\x30\x45\x06\xe3\xb0\x28\xe2\x36\xe0\x11\x17\xca\xe7\xde\x80\x22\x26\x9b\x72\x2d\x34\x74\xf9\xaa\x75\x5a\x19\x90\xea\xcd\x40\x15\x15\x98\x60\xa7\xdf\xca\x3a\xdd\x30\xed\x37\x5b\x59\x67\xaa\xd3\x6f\xa0\xe2\xda\x50\x65\x38\x16\xe3\x07\xc6\x28\xcc\xe9\xc3\x63\xc1\x8f\x8f\xad\x3a\x99\x6d\xba\x0f\x67\x81\x8f\x25\x47\x92\x9a\xc1\xd1\xc2\x15\x09\x49\x67\x8a\x45\x8c\x60\xfd\xc8\x79\xe1\x3e\x1d\xcb\x79\x97\x6f\xc5\x8a\xc4\xca\x21\x9b\xa4\x3e\xa8\x2b\x14\xcd\xa1\xb0\x4f\x3a\x66\x37\xa1\x36\xdf\x73\x4f\x13\x3f\x74\x56\x13\x9f\x88\x18\xe4\xf2\x1a\x20\xc0\xd8\x6e\x7e\x60\x6b\x0c\x8b\xc2\x90\xa6\x58\xa9\x06\x27\xf9\xb5\x5a\xae\x38\xa9\x6c\x37\x9c\xb0\x33\xd9\x92\x2c\x6d\x37\xd5\xa3\x16\xc2\x86\x14\xfa\x39\xa8\x9e\x3b\x81\x6d\x00\xc4\x46\x63\x42\xbf\x0c\x20\xb5\xc8\xba\x92\x1a\x3c\xd3\xea\x97\x1d\x2b\xb7\x13\xb9\x60\x8f\xe3\xfb\x14\x01\xe6\xfd\x9a\x88\xfc\xd5\x40\x44\x6e\x53\x54\x2a\x6a\x19\x1a\x90\x61\xc4\x65\x03\xf0\xde\x75\xa0\x13\x55\x78\x3e\x39\x98\x54\x5f\xd9\xb1\x88\x16\x2e\xb3\xb8\xa0\x39\x34\xb5\x6b\x85\x50\x68\x72\x97\xe1\x7a\x8d\x1d\x56\xb3\xd2\xbf\x2b\x58\x94\xf5\x5d\x5c\xda\x71\x29\xab\x6c\xf4\x73\xf3\x4b\x75\xbe\x3a\xb7\x04\x44\x3e\x2b\x73\xab\x4b\x4e\x17\xad\xce\x2d\x39\x3d\xf3\x43\x8f\x76\x92\x4b\xc3\x6e\xb7\xa0\x40\x47\x79\xe6\x38\x07\x4a\xdd\x7c\x28\x3a\x59\xba\x22\x4a\xff\xab\x7e\x70\x1b\x12\xc4\x64\xca\x55\x49\x56\xc3\x02\xec\xa9\x6b\x62\x23\xdc\x8a\x11\xfd\x2d\x82\xb7\xc0\x4c\x9e\x63\x69\xa4\x23\xba\x96\x92\x1f\xf4\xd9\x6f\x32\x76\xb8\x13\x5e\x58\xe9\xe5\x6d\xe1\x7f\x82\xed\x55\x5f\xeb\x95\x71\xd2\x8c\xd3\xb2\x28\xf3\xe6\x62\x5a\xde\x99\xaf\x40\x93\x82\xef\x83\x60\xb7\x4e\x78\x21\xee\xf4\x3a\xd6\xe0\xa6\xe0\xb5\xd6\xf0\xb8\x8a\x8f\xcd\x58\xac\x74\x3b\x91\xb6\x0c\x03\xde\x52\x5a\xbc\xa9\xc6\x30\x71\x1f\x23\xdf\x0d\xfd\xa3\x1f\x23\xbc\x18\xcd\x17\x2a\x75\xbb\x22\x4b\x7a\xa5\x26\xbf\x3f\x26\x2e\x9c\xe4\xd7\x4f\x22\xa4\x55\x57\xe4\x2d\x79\xce\x69\x83\x70\x51\xdd\x09\x20\x45\xf0\xdc\xcc\xf4\x13\x26\x49\x5a\xcb\x12\x49\xe7\x9e\xe6\xf1\x3a\x3f\x13\x5e\x38\x97\x9a\x46\xc4\x05\x9f\x6e\xf2\xd9\x81\x8f\xc1\x7b\x49\x8b\x48\x35\x41\xb4\x58\x9f\x5c\xeb\x73\xc8\x03\xc7\x31\xc2\x84\xce\x4e\x9c\x42\xff\x65\xeb\x7c\xc6\xcc\x20\xed\x38\x9d\x06\xbe\x52\xcc\xd2\x22\x4b\x1f\xae\x14\x72\x8b\x9c\x98\x7e\x42\x5d\x87\x17\x9d\x98\x7e\x82\xa6\x47\x07\xbc\xd5\xc1\x41\x19\xa0\x63\x55\x86\x47\x98\x5a\x38\x8d\x64\x57\x1f\x8b\x9b\xa2\x49\x2e\xe2\x26\xf5\xd1\x59\x28\x69\x45\xd1\xcd\x4f\x9a\x11\xc3\xd0\x0f\xf4\x0b\xc8\xb7\x15\xf5\x3e\xb9\x3c\x21\x67\x2f\x89\x14\xc3\x5d\x27\xcc\x37\x31\x66\x18\x61\xb0\x28\x9e\x8e\x8c\xa9\x20\x91\x0a\xd9\x4f\x49\xd8\x4b\xe5\x24\x6c\xab\x72\x0b\x9e\xa5\xaa\x81\xd0\x98\x26\x07\x7c\x71\x6b\xc2\xd0\x27\xcc\x18\x1d\x5b\x06\xea\x03\xd0\x25\x4a\x9e\x88\x10\xfc\x46\x29\x5c\x25\x32\xfe\x49\x5d\x41\x77\xba\xa9\xd8\xe2\x94\x62\x68\xa8\xde\xba\xd2\xb4\xab\xa8\x96\xad\x89\x56\x26\x77\xbf\x9e\x35\x43\x5c\x32\x7e\x55\x19\x93\xb3\x51\x58\xfe\x46\x29\x54\x91\x8c\x0f\xc4\x89\xdc\x9e\xf2\x30\x4e\x65\x47\x0f\x37\x50\x87\x92\x46\x49\x5f\x77\xb7\x53\x61\xb9\xf8\xe4\x09\x47\x45\xc5\x02\x16\x86\x5c\xf6\x08\xaa\x5b\xc0\x66\x8a\xfe\x38\xbc\x1c\x77\x28\x35\x10\x64\x75\x51\xc4\x6b\x71\xe2\x04\xce\x9a\x4c\x28\x92\xa1\x3a\x7e\x09\x27\x0e\x84\x79\xa6\x18\xe5\x8d\x10\x97\x90\x57\x14\xa6\x8a\x14\x25\x56\x90\xae\xc4\x15\x40\x76\x8f\xf5\x30\x4e\x88\x37\xb1\xb0\x18\x16\x01\xb5\xb8\x88\xb7\xac\xe2\xb5\xd2\x01\xd4\x70\x00\x85\xac\xa3\x22\xa9\x96\x7d\x38\x68\xa3\xd5\x05\xba\x13\x7a\x59\x49\x8e\xb8\xc8\x7b\x5d\xb4\xf7\x0c\x73\x06\xea\xe0\x50\xed\x64\x06\x63\xb9\x7c\x51\x4f\x0c\x94\xd2\x20\x3e\x07\xdc\xe2\x1f\x3c\xc4\x1e\xd1\x71\xe4\x83\xff\x8e\x43\x97\x2b\x5f\xbf\x72\xb0\x5a\xbe\x3a\x13\xb5\x30\xf2\x13\x88\x6b\xcc\xf4\xa3\x66\x97\x7a\x75\x5d\x96\xa8\x1c\x94\x27\x03\x95\xa0\x59\x82\x1a\x05\x89\x20\x5a\xd3\x96\x5c\xbd\x74\x69\x19\x91\x05\xd2\x91\x97\x90\x36\xab\x64\xf5\x80\xa8\xc6\x51\x92\x02\x74\x58\xa6\xce\xfc\x13\x91\xa9\xd0\xb3\x45\x99\x41\x90\x34\x4a\x13\xd5\xa9\x84\xc4\x85\x4b\x0d\xf1\xb8\x2a\xc5\x28\xbc\x0d\x1b\xe6\x60\x73\xb2\x42\x0c\x0d\xa2\xe2\x50\x58\xcb\x01\xb0\xb6\xd7\x05\xe1\x3b\xf0\x14\xaa\x58\xb2\x7a\x09\x80\xab\xdc\x99\x12\xf8\x86\x16\xd4\x90\x0c\xad\xe6\x61\x9d\xc7\x44\x2e\x47\x91\x4e\x52\x90\x97\x19\x85\x28\xba\x65\x4b\x8d\xbb\x57\x60\xa2\x2d\xf6\xa1\x6c\xa7\x92\xdc\x37\x62\xd5\xb3\x3c\x6e\x03\x7c\xf9\xce\x93\xd1\xfe\x3e\x57\x10\x00\xb1\x94\xcf\xc3\x8b\x82\xfd\xea\x38\x1b\x38\xd8\x66\x45\x2b\x4c\x00\xab\xe1\xb8\x3c\xd6\x1e\x6f\xde\xbe\x34\xbb\x82\x97\x28\x5a\xe8\x75\xe3\xc1\xdd\x83\x17\xc9\x6a\x88\x04\x1b\xc8\x1b\xa9\xfc\xb9\x70\x34\x91\x5b\x05\x30\x6c\xe0\xd1\xd6\x38\x06\xba\xa1\xb2\x96\x6e\x85\x89\x49\xe1\x3c\x00\xf3\xf5\xa2\x77\x35\x3a\x4f\x69\xbf\x72\xe9\xc9\xc7\xd8\x27\x3c\xe6\x3e\xe0\x7f\x40\xab\xde\x3f\xec\x2d\xd9\x77\x06\xf2\x81\xb7\xe3\x34\xca\xb6\x61\x32\xc4\x14\x3c\x6e\x40\xea\xa9\xfe\x0a\x43\x44\xd5\xbd\xc9\xdd\x12\x95\x0a\xee\x40\xbf\xdf\x27\xf2\x0c\xe6\x12\x10\x58\xc1\xa5\x2c\xe7\xe2\xb9\xbd\x30\x01\x2c\xf7\x9b\xa6\xa7\xf9\xb1\x13\xd3\x72\x25\x4f\xba\x8a\xf8\xad\x94\x9e\x7a\x53\xf0\xc4\x55\xa2\x53\x56\x81\xcd\x2a\x37\x09\x33\x70\xa1\xdb\x15\x39\xe7\x40\xca\x2a\xf6\x9b\xbf\xad\x7a\x61\x8d\xa4\x9e\x0d\x5d\x18\x52\x08\xd0\xba\xf2\x36\xeb\x30\x5d\x88\xe0\x2a\x2d\x44\x29\xc5\x00\xba\x58\xee\xdc\x5a\xa7\x11\x6f\x1c\x77\x5c\x1b\xf4\xbe\x01\xc1\x48\x67\x31\xf0\x6c\x85\xbc\x63\xc8\x1e\x50\xf8\xff\x63\x2c\x38\xbf\xe3\x5d\xc7\x8e\xaf\xfd\x6a\x90\x49\xa4\x30\x0a\x32\x7a\x52\xf5\x18\x2a\x06\xce\x0b\x72\x72\xe5\xbd\xc4\x35\x6d\xfc\xcf\x1a\xfb\x07\x8f\x1d\x04\x30\xcb\xc2\xff\x4b\x2f\x78\x8f\x87\x7f\xc3\x3a\x82\xde\x22\x83\x22\x5d\xce\xd6\x79\x3b\x56\x1b\x0f\x05\xa7\x62\xa8\x0d\xe5\x91\x6d\xc3\xa4\x81\x40\x2c\xa9\xe1\x00\x3a\x8f\x5e\x19\xed\x98\x84\x4c\xd1\xeb\x76\x13\x8b\x93\x26\xc7\x81\x50\xf9\x23\x80\x0e\xe3\x9c\x50\x4d\x26\x3c\x4c\x56\x79\x0e\x83\xfe\x72\xfc\x4c\x27\xd8\x0c\x9b\xda\xd1\x6a\x35\x30\x18\x8b\xf3\xcb\xf2\xa3\x2c\x61\xd0\x2d\x7e\x14\xdc\x0d\xe9\xfe\xf1\x60\x3f\xcb\x96\x82\x5f\x2b\x8a\x5b\xa5\xea\x55\xe8\x16\x10\x31\x66\xd8\x54\xb8\x9e\xdd\x37\x85\x28\xdd\x04\xbd\xc3\x6c\x96\xea\xb4\xd9\x5b\x13\x8d\xb0\xdd\xce\x45\x1b\x92\x43\x6d\x07\xed\x00\x86\x4a\x73\x76\x49\xb9\xf7\x4f\xc7\x45\xe9\x7f\xe5\x50\x30\xe5\x5e\x72\x01\x20\xcc\x3d\x95\xb7\xd4\xbc\xe8\x1d\x88\x4b\xd1\x71\x65\xc8\x7b\x0e\x5e\x71\x21\x5d\xae\x0b\x49\x30\xec\x58\xff\x5f\xdb\x53\xf1\x4e\x76\x86\xdd\x51\x39\x15\x2f\x6d\xd8\xaf\x78\xaa\x2e\xc3\x53\xb5\x68\x79\xaa\x9e\xc2\x9e\x7c\x09\x9e\x2a\xb9\xc4\xce\x88\x32\x7c\xd0\xbb\x77\x77\x87\xca\xd3\xfc\x05\xe3\x50\x19\x18\xd4\x01\x97\x8a\xbb\x88\x07\x3d\x29\x9f\x3e\x34\xe8\x28\x1f\xd0\x4e\x4c\x0c\xce\x19\x51\xe6\x71\x8b\xa0\xf4\x2e\x1e\x0a\xfe\xb7\x57\x75\xc7\xf2\x1f\x51\xe8\x06\xe2\xa9\x00\xfe\x42\x07\x1e\xc4\xdc\xdd\x01\x4c\x42\xb9\xc5\x58\x36\x52\x9a\x42\xc7\xec\xe8\x34\x03\x71\xf3\xdc\x9e\xe8\x41\x08\x57\x2b\xc9\x7a\x11\x21\x23\xc0\x69\x53\x99\x56\xb3\x9c\x3f\x63\x69\x05\xe7\x57\x92\x85\xd1\x5a\x98\x84\x69\x0b\x69\xce\x40\x7f\xce\x7a\x25\xe4\x9f\xc8\x3d\x1a\x63\xe7\x26\x9b\x8e\x33\x27\x40\x20\xeb\x80\x90\x20\x0d\x52\x98\x28\x9b\x17\x3d\x86\x4d\x19\x82\xc9\x7b\xe1\x01\x76\x9a\x59\x37\xfd\x5b\x82\x19\xf3\xab\x8a\x25\x83\xfa\x24\x4e\x39\xe6\x5c\x0f\x80\x2d\xbc\xd3\x63\x8f\xc0\x47\x56\x44\x02\x54\x95\xfe\xab\x3d\x76\xeb\xa5\x4c\xb0\x70\x4d\x24\xaa\x90\x60\xc9\x2d\xd4\xd6\x85\x08\x89\xb1\x6f\xb4\xc8\x16\x31\x96\x89\x3c\x26\xd3\x9b\x45\x36\x81\x05\x35\xd9\xaf\x78\xcc\xc7\x3e\x9b\xdd\x12\x79\xd8\x16\xa0\x18\xf9\xef\xf7\xf6\xb1\x1c\x9a\x3a\xee\xe4\x19\xbd\x30\x2d\xe3\xb2\x1f\xf4\x86\xcb\xd4\x99\xce\x08\x35\xde\x15\x79\x43\x9e\x81\x50\x40\x49\xf5\x01\x51\xe4\xa9\x67\x8f\x01\xa2\xdb\x73\xa9\xbc\xc9\x26\x3f\xd3\x2b\x7b\x10\x29\x28\x2e\xc8\xa1\x8f\xb7\x48\x2d\x58\x85\xe2\xe0\x0b\x4d\xf6\x41\x8f\x1d\x2d\xcd\x05\xff\x47\x2f\xb3\x19\xf7\x5a\x85\x0d\xd4\x5f\xd7\xdb\x9a\x0e\xfb\xac\xb4\xdd\x3b\x4d\xf6\xa9\x43\xec\xda\x1d\xd1\xe1\xce\xe2\x2f\x3c\x54\x80\x13\xeb\xcd\x87\x82\x1b\x86\xae\x5a\x7e\xb3\x10\xcd\xeb\xf2\x78\x2c\x55\x12\x5e\xe6\xe1\xba\x9c\x13\x84\xea\xae\xa6\xfe\xe7\x0f\xb2\xd7\x8d\xb1\x43\x22\x8d\xa0\xd4\x57\x8c\x29\x0d\xf9\xef\x34\x6d\x79\x45\x0a\x36\xaa\x65\x00\xc8\x24\xcf\x94\x78\x48\xa7\xcf\x51\x51\x75\x0c\x1b\x92\x6d\xae\x5b\x0b\x51\xa9\x8d\x03\x8a\xb4\xe5\x0a\x33\xc7\x76\x9d\x7b\x46\xcc\x34\xf2\x03\x3a\x74\x55\x3e\xa3\x1e\x90\x2b\xa1\xea\x19\x75\x1f\x86\x45\x2e\xdf\x88\x1f\xc3\x15\x3a\xa9\x5d\x70\x42\x55\xd8\xf8\xf2\xe1\x50\x92\xe5\xae\x4a\x6f\xbb\xec\x4c\xf0\x64\x9c\xf2\x59\xb0\x15\x15\xa0\x01\x82\xc5\x4a\xb7\x55\x25\xbe\x18\xa2\xe2\x53\xf4\xe6\xd3\xe4\xc3\x81\x33\x7c\x0b\x58\x8b\xc0\x3d\xfc\xfc\x74\x8d\x7c\x27\xef\xae\x5d\xae\xe5\xe8\x4f\x8c\xcf\x85\x80\xa7\x50\x12\x68\xdf\x9b\xb1\x70\x5b\xae\x2a\x4e\x4e\x1f\xec\x11\xec\x43\x55\x46\x88\x09\x7b\x9a\xcd\xd0\x1e\x1a\xc5\xe9\x4c\xfe\x32\x40\x4a\xc2\x40\x33\x72\xfb\xa0\xeb\x32\x8d\xe8\xc4\x8e\xd1\x72\x1a\x6f\x0b\xc2\x59\xcd\x94\xb5\x1d\x36\xc2\xb8\x1c\xf9\xec\xd9\x79\x2c\x4e\x45\xf7\xe3\x77\xa2\x26\x7b\x81\x67\xf9\x6f\x7a\xc1\x06\xb4\x5d\xbd\x76\x0c\x3c\x34\xe0\xab\x21\x37\xcd\xa4\x3a\xe6\xd2\x27\x61\x2e\x40\x61\xd5\x14\xc3\x95\xf1\xc6\x83\xee\x9d\x7f\x18\x67\x8f\xab\x40\x76\x5a\x0d\xe3\xb4\xf4\x7f\x77\x3c\x38\xbf\xaa\x88\xd2\xa0\x40\xb8\xee\x64\x83\x95\x19\x26\x41\x6e\x08\x1e\x20\x13\x50\xa0\xec\xa9\xda\x49\xa2\xa3\xf4\xca\x2c\x11\xb9\x3a\x86\x40\x59\xcd\x8b\xde\xd8\xa6\xe8\x5f\xf4\x0e\xe2\xcb\xce\xc2\xff\xdd\x31\xf6\x66\x8f\xd1\x1d\xff\x35\x5e\xf0\x12\x4f\x63\x8a\xe1\xb2\x40\xea\x21\x65\xe6\x82\xca\xa1\xdd\x4a\xa1\xcf\x67\xc3\xdf\x85\xc7\x14\x84\x18\x96\x80\x47\xc6\xb3\x99\x32\x69\xd5\xf9\x12\x84\x66\x9a\x2b\x14\x5f\xb1\x80\xa4\xe2\x03\xdc\x15\xb2\x05\xfe\x53\x82\x69\xb7\x72\x58\x9d\x4d\xd1\x57\x19\xa2\x80\x62\x17\xa1\x35\x10\x50\xaf\xec\x52\xde\xed\xb1\x23\x72\x2f\x9c\x8d\x22\x11\xf9\x6f\xf2\x2e\x1d\x28\x2a\x5a\x55\xc5\xd8\x41\x03\xd0\x72\xb9\xd7\x3a\x2c\x7f\x58\xc7\xed\xb0\xd0\x20\x80\x26\x09\x42\x71\xf1\x49\x4d\x49\xb7\x1c\xdf\x28\x9a\x6c\x41\xd9\x2b\x6e\x0e\xa6\x4c\x6b\xb7\xc8\x64\xa1\x3c\xcf\x94\x1d\x58\xda\xdd\xe1\x34\xfb\xbd\x87\x1d\xf7\x62\x65\x40\x04\x78\xbe\xe1\xec\xf8\xc2\xc3\xc1\x0d\x43\x57\xdd\xd0\x88\x44\x93\x15\x93\xde\x0f\x88\x85\x45\xe5\x11\xf2\xe3\x57\x8e\x90\x97\x7d\x84\x5c\x57\x47\xc8\x7b\x82\xc7\x9e\xae\xee\x7b\xfb\x6c\x79\x23\xbb\x9e\x9d\xd8\xd1\xcc\xb1\xf3\xf0\x5f\x39\x43\x5e\xc6\x19\x72\xcd\x3a\x43\xde\x75\x59\x67\xc8\xe0\xb1\x9a\x92\x01\x93\xd9\x14\x1d\xc3\x83\x5e\x7b\xf7\xc3\xe5\xbc\x7f\x9b\x3e\x5c\x9a\xa1\x1e\x38\x56\x0e\xad\xef\xc1\x93\xe5\x0b\x6a\xec\x89\xd6\x9c\x71\xf2\x01\xc8\x3c\x86\x38\xc5\x18\x96\xe8\xff\xba\x17\xdc\xe4\x5c\xd1\x1a\xa8\x13\x66\x8d\xda\x11\x6d\x25\x95\x20\xd7\xdf\xe7\xb1\xff\xe0\xb1\xab\xe4\xa9\xef\x36\x3a\xf5\xf9\xbd\x11\xb3\xd9\x30\x58\x9a\x17\x28\x64\xf2\x16\xfb\xda\x00\x3b\x8d\x55\x1f\x83\x7f\x27\x3f\xda\x50\x67\xcd\x26\x7b\xed\x21\x76\xfd\x25\x05\x6b\xfa\x5f\x3c\x18\x7c\xe7\x8e\x77\x2f\x7a\x87\x48\xf9\x75\xe9\x8e\x0e\xb2\x16\x53\x77\xfc\x67\x06\x4f\x9f\x25\x0d\x39\x2e\x74\xaa\x5c\x13\x00\xf9\x94\x8e\x8b\x91\xdf\x64\x50\x35\x2a\x75\x9d\xe0\x74\x74\x06\x9f\x9b\xd1\xf3\xc2\x1a\x3b\x18\x89\x34\x16\x91\xff\x77\x5e\xf0\x47\xde\x3c\xfc\x6d\xa3\x3d\x8f\xfe\x0a\xbe\x6b\x27\x08\x52\x5a\xeb\xe2\x3a\xe6\xeb\x85\xa6\xde\x06\xd7\x27\xd2\x5f\x31\xf9\xdd\x94\x5c\x6e\x50\xed\xa4\x8e\x93\x66\x3c\xeb\xc6\x29\xc8\xf8\xd4\x04\xb1\x66\xe6\x39\xab\x56\x4d\x4e\xb5\x97\x42\x85\x84\x42\x49\x75\xb7\xba\x0f\x9c\x13\x4e\x27\x7c\xb1\xc6\x1e\x49\xe1\xbc\x71\x86\x24\xd1\xfe\x27\x6a\xc1\x87\x6b\x0b\xee\x45\x72\x03\x2b\xd0\x46\xe5\x46\x07\x6c\x24\xa0\x26\xe3\x59\x0b\xe6\x52\x64\x3b\x8f\x9c\x99\x82\xae\x6c\xb5\xe9\x4b\x39\x92\x8b\xc4\xca\x68\x53\x60\x15\xc4\x75\x06\xbd\x65\x27\x00\x64\x52\xd0\xf6\x84\x49\x04\x1c\x28\x9e\xa6\x70\x9c\xf2\xa2\x1b\xe3\xe2\x8a\x4b\x04\xb4\x88\xd3\xa2\x94\x73\xb9\xce\x97\x6f\x9b\x9d\xd3\x84\x3d\x71\x81\x49\xbb\x16\xb3\x26\x11\xb9\xca\x0b\xa8\xa6\x15\x25\x64\x13\xa3\xf4\xc6\x3a\x01\x47\x01\xe4\xe5\x41\xc4\xc2\x50\x94\xb4\xa3\x72\xdc\xa3\x11\x2b\x57\x82\x53\xcb\xf8\x86\x33\xc5\xa0\x3f\xac\x60\x65\x48\x75\xa3\x30\x65\x50\x95\x4c\x9c\x32\xce\x1d\x37\x60\x6a\x8c\x05\x55\x1a\xf5\xdc\xd2\x4a\xd6\xda\x14\xe5\x2c\x4c\x0f\xff\x7f\xd5\x82\xa7\x0c\x5c\xb3\x4f\xc6\x2a\x79\x82\xaf\x85\x94\xb3\x9d\x75\x05\xa2\xa5\xf3\x02\xde\xb9\xe8\xc1\xa9\xcb\x59\xa8\xff\xa5\xc6\x3e\xa6\x22\xd9\x7e\x7e\xcf\xfc\x3b\x3b\x9d\xc6\x9e\x77\x96\x1c\x95\xb9\xcb\xbe\xaa\xce\xec\x18\xda\x9a\x99\xc8\x35\xcc\x01\xe4\xf4\x9a\x09\x73\xb3\xce\xe3\x33\xf2\xcd\x1b\xae\xbf\xfe\xc4\xf5\x76\xdc\x21\x7a\xf5\x17\x67\xcf\xce\x3e\x67\xe5\xae\xb9\xe7\x9c\x9d\x3d\xb3\xd0\x64\x77\xb2\xf1\x8d\xac\x28\xfd\xa7\x05\x27\x0d\x51\xf5\xed\x59\x81\x07\x33\x9a\x80\xa9\xdc\xcb\xcb\xac\xee\x1c\x72\x14\xf4\xcb\xa2\x7b\xd6\x79\xdf\x63\xd8\x3d\x97\x9a\x68\x86\x1b\xcb\x1c\x84\x82\xaa\xc0\xe0\x79\xdd\xb7\x4a\x7d\xfc\xf5\x47\x07\x37\xef\xf2\x8c\x63\xfe\x50\xd9\x52\x52\xc5\x9e\x5b\x9e\x57\x9c\xb8\x17\xbd\x83\x18\x2a\x7b\xd1\x3b\x44\x49\xcd\xce\x30\xff\xb1\xcf\x5e\x34\xc6\x0e\xa2\xe2\xe8\xff\x7d\x8d\x45\x0f\x4d\x36\x5d\x55\x23\xef\xd2\x61\x9e\xc1\x2b\x6b\xa4\xf8\x9a\x26\xa0\x57\x13\x2e\x82\x91\x4f\x36\xc4\xc2\xa0\x06\x68\x89\xbc\x87\x76\x51\xd2\x96\x55\x9b\x15\xb0\xeb\x60\x5a\xd8\x6a\xd6\x45\x20\x08\x05\x4c\xa1\x74\x08\xad\x28\x4b\x21\xd0\x19\xb2\x5a\x81\x8b\x77\xe8\x59\x4d\x9c\x16\x12\x3c\x07\x46\x4a\xa0\x25\xb2\xa5\xd4\xeb\x82\x1f\x2b\xf5\x67\xad\xfa\x53\xd3\x8c\xb9\xa4\x87\x60\x01\x45\x29\xc2\x68\xb2\xc9\x36\x18\x0d\x94\xff\x6f\x82\x25\xc5\x46\x2b\x07\x76\x3d\x09\xdb\x3a\x10\x67\x0a\xf3\xb9\x0d\xfd\x88\x1a\x75\xa9\x5f\x62\xfe\x06\xbd\xbb\x15\x87\xa8\xb2\xce\x2e\x2d\x16\xce\xae\x70\xbf\xc7\xd4\x54\xf0\xdf\xe8\x05\xaf\xf4\x14\xe0\xdc\x00\xf8\x84\x6e\xbf\x5b\x65\x88\x16\xa1\x5c\x3b\xc8\xd8\x75\x7b\x1d\x56\x8d\x4a\x9f\x97\x87\xd7\xdc\x2c\x4c\x61\x99\xae\x4d\x76\x5f\xb9\xa1\x9e\x7f\xca\xf0\x16\xf6\x1f\xc6\xd8\x55\x36\x3e\xb8\xff\xd7\x35\xb6\xfe\xf0\x4d\x51\x1b\xef\x3a\x78\x69\xcd\x41\x26\x57\xee\x77\x80\xcd\x72\xee\x54\x4d\x46\x65\x8c\x1b\x24\xab\xd8\x08\xb7\x46\xcf\x4c\xbb\xe0\x3d\x4d\x4f\x07\xa2\x7b\xff\x73\xd4\x6d\xe3\x4e\xd3\xf3\x2f\xc6\xd9\xe3\x0c\xe4\xc8\x52\x0e\x67\xca\xb9\x2c\xe9\x75\xd2\xc2\xff\x8d\xf1\xe0\x73\x63\x3b\xdd\xb5\xf3\x2e\x6d\x76\x0e\xbc\x89\x1c\xa8\x18\xb5\xbc\x0a\xda\x40\xd6\x2b\xbb\x3d\x38\xed\x56\x01\x4f\x69\xb4\x29\x0d\xf3\x39\x05\xf6\x46\x39\xe0\xea\x18\x83\x14\x3b\xd7\x20\x94\x9c\xbc\xa7\xdb\xd7\x08\x8b\x06\x00\x00\x10\xe9\x8b\x46\x90\xd9\x71\x38\x54\x3d\xf7\x32\x12\xea\xd9\x4b\x18\x04\xf5\xea\x4e\xfd\x8f\x56\x39\x6e\x5e\xc8\xf2\x1d\xab\x69\x59\xee\x74\x60\x01\xde\x97\x27\xa5\x6e\x12\xf6\xb5\x06\xd7\x16\x3b\xc8\x4e\xe5\x6a\x71\xe2\x0a\x52\x96\xb0\x7b\x1f\xbe\xb5\x87\xf3\xc5\xec\x72\xec\xb7\x3d\xc6\x22\xcd\xfb\xed\xff\xaa\x17\xfc\xbc\x17\xd9\x3c\xe0\x95\xe2\x6a\x87\x06\xcd\x2e\x2d\xa2\x8d\x5c\xbd\xdf\xe4\x77\x43\x08\x86\x8d\xc6\x23\x1f\xb2\x4e\xe1\x6e\xc1\x06\x9a\x70\x3b\xcc\x61\x1f\xda\x10\x61\x24\x72\xcd\x17\x85\xf9\x69\x84\x1e\x63\x50\xf0\x2d\x70\x06\x97\x6b\xa9\xc6\x7c\x55\x9d\x38\x4b\xef\xc6\x42\xfd\xd7\xd5\x82\xff\x58\x1b\xbe\x0e\x14\x0b\x79\x1c\xd1\x4e\xa9\x10\xf8\x54\x55\xf4\x3a\x2a\x33\x68\x05\x9a\x16\x8a\x26\x3f\x13\xf6\xd1\x06\x47\x13\x11\xc4\xf7\x79\xd3\x0d\xe7\xf5\xd1\xc1\x41\xf6\x53\xe5\xee\xd0\xc9\x4e\x4f\x52\xcc\x28\x11\xd0\xc0\xc4\x51\xa3\x90\x8a\x6d\xc0\x84\xa4\x3d\xc9\x0c\xd1\x90\xd7\xa1\x28\x43\x8c\xb7\x05\x12\x50\x20\xfe\x02\x12\xf9\xe6\x00\xbb\x0b\xe6\xc8\xbc\xcf\x0b\xde\xee\xa5\x96\xb7\x52\x15\x9d\x02\xe4\x11\xd0\x1b\x7d\xf9\xf9\xaf\xdf\x9a\xf9\xf2\xf3\xdf\x50\x87\xbf\xd0\x51\x0c\x3f\x45\xd9\xc2\xc6\x0e\x6d\x5d\xb0\x7c\xb0\xae\x00\x7c\xe5\x36\x3a\x2c\xf9\x79\x29\x63\x8a\xa9\x9b\xc1\xf2\x70\xcb\xd4\xcd\x74\xeb\x96\xa9\x66\xb3\x79\x5e\x56\xfc\x3c\xbe\x7e\xde\x3a\x90\x99\xea\x7f\xd5\x35\x37\xac\x27\xd9\x36\x65\x36\xe8\x25\x81\x81\x4f\x78\xa8\xf6\x3f\x58\x0b\x9e\x6a\x5f\xa0\x2c\x4e\x14\x5b\x22\x1a\x4a\x16\x43\x3b\x09\x20\x3a\x12\x43\x7d\xb3\x0a\xe8\xf9\xcb\x1e\xfb\x92\xea\xc8\xcf\x79\xc1\xff\xe7\x74\x24\xa4\x7c\x22\xd0\x33\x05\xf0\x80\x2b\x03\x5c\x07\xc1\xb5\x01\x92\x0b\x95\x2d\x38\x00\x5b\x0f\x17\xae\xc0\x26\x68\xc0\x56\xd6\xb1\x64\xf7\x94\x16\x0c\x53\x6b\x49\xb6\x36\xd5\x09\x8b\x52\xe4\x53\xdd\xcd\xf6\x94\x0b\xd1\x3a\x25\xcb\x85\xff\x34\xdb\x19\xe2\x18\x66\xc0\x97\x90\x24\x0d\x4c\xc4\xc7\x0a\x82\x57\xc5\x21\x07\xb1\x20\xfb\x3d\xa7\xaf\x07\xa2\x9e\xe8\xd8\x7c\x2a\x11\x17\x10\xd6\xc4\xbf\xdf\x0b\x4e\x0f\x5d\x75\xed\xc1\x24\x4b\xe5\xed\x01\xd2\x3a\x75\x8c\x43\xeb\x3c\x08\xce\x8b\xde\x41\x04\x86\x71\xba\x7e\x81\xcd\x31\xba\xee\x3f\x29\xa8\x1b\xcc\xa9\x41\x8f\xbb\xf5\x91\xc8\xc2\x98\xa6\xc6\xbd\xf2\x60\x25\x67\x65\x25\x9d\x9d\x61\xd5\xf8\xfc\x81\xe0\xb6\xd1\x8f\xd0\xd1\x2b\x29\x06\x58\x1b\xb4\x41\xab\xbb\xd5\x1a\xc9\xaf\xf1\xb7\xe3\x2c\x65\x57\x27\x61\x51\x2e\xe5\xd9\x9a\x00\xa2\x8a\x7b\x2e\xdd\xff\x10\x38\x64\x19\x5d\x59\x64\xa4\x8e\x88\x8a\x1e\xe3\x55\xd5\xf4\x18\xff\xf6\xd2\xbf\x7a\xeb\x2e\xf4\x18\x68\xb9\xdf\x27\x47\x46\xe3\xa1\xe1\xc8\xf8\xaa\xa7\x4d\x0e\x5f\xf4\x82\x5f\xf3\xce\x41\x9a\x1f\xf9\xe8\x2c\xbf\xb3\xfc\x3b\x2f\xeb\x9c\xda\x8a\xe2\x0c\x93\x18\x65\x1d\xc8\xd4\x0d\xf3\xb7\x1d\x6f\x09\xc5\x81\xa5\xb9\x36\x46\xf1\x6c\x48\x65\x24\x2e\x09\x30\xbc\xe0\xc1\xb2\x28\xe2\xfb\xc4\x4a\x19\xe6\xa5\x88\x88\x36\xcf\xf0\x25\x59\x08\x82\x86\xda\xd0\xc2\xf6\x52\xa8\xed\xb2\x0c\x77\x09\x7f\xbb\x4e\x2f\x77\x70\x33\x1e\x4d\x11\xb2\xce\xc5\x2f\x30\xf6\x2f\xab\x4c\x25\xe8\xad\x93\xb3\xff\x03\x2c\x78\x05\xf9\x84\x23\xf2\x40\xea\x9b\x83\x6e\x48\xe5\xe4\x2b\x90\xeb\x39\x8c\x55\x82\x85\x72\xec\x82\x17\x2a\x8f\xbb\x89\xe0\x37\x6f\x8a\x7e\x1d\x3d\x2b\xe8\x01\xbc\xc5\xf2\x80\xc3\xf3\xb0\x7b\x03\xd2\x5e\x96\xf3\x9b\xd5\x5f\xb7\xb8\x16\xe1\xd7\x1d\x61\x1f\x30\x0e\xca\x77\x7b\xc1\x9b\xbc\x05\x74\x49\xba\x88\x53\x58\x19\xf2\x56\x2a\x61\xdc\xe4\x0b\x90\x8e\x88\xbd\x6e\xe4\xb3\xfd\x70\xa1\x14\x1e\x4b\x3d\x54\x38\x28\x86\xfc\xe8\x12\x1d\x97\x9f\xf1\xd0\x73\xf9\x2b\x5e\xf0\x73\xde\x1d\xa2\x6f\xa2\x47\xb4\xe3\x52\x05\x53\x94\xa6\xd7\x2d\x32\xae\x91\x0d\xd8\x14\xfd\x42\xa7\x1b\x6d\x62\xe9\x90\x7f\x59\x37\xfd\xaa\xce\x98\x0b\xa0\x35\x3c\x59\xe5\xad\x76\xd6\xe2\x14\x3f\x46\x33\xd2\xde\xbe\x54\xbb\xd3\x08\x7e\xc2\x67\xec\x56\xfd\xad\xc7\x0e\xab\x0f\xf8\x5f\xf5\x82\xdf\x22\x2e\xb6\x2c\x77\x37\x86\x4d\xd1\x9f\x90\x47\x99\x04\xb1\x14\x37\xe2\xae\x32\x1d\xc1\x17\x94\x83\x58\x15\x85\x3d\x8d\x15\x85\x8f\x2f\x48\x65\xc8\xd5\x17\xe9\x12\x3d\x24\xdb\xfb\xdc\x5e\xbc\x15\x26\x02\x00\xd0\x0d\x6b\x17\x59\x49\x80\x4b\x20\xa3\x64\x01\x4c\x41\x0b\x53\xe3\xab\xd6\x5d\x89\xcc\x9b\x00\xb8\x18\xb7\x7a\x49\x98\x73\x39\xb3\xda\x59\xee\xba\x52\xff\xb0\xc6\x1e\x65\xc6\x49\x45\xd5\xff\x7a\x0d\xa2\x65\x6e\xb8\x2e\xf8\x50\x6d\x75\xf0\xee\xa0\x83\x18\x03\xf2\x75\x68\xfd\xc0\xc8\x1f\xb3\xb2\x02\xd6\x60\x67\xa1\x19\xad\xa7\x97\x6d\x7a\x77\xe3\x2d\x10\xc2\x2e\x9a\xb4\x56\xa9\xe5\x8b\xbf\xad\xaf\x74\xd8\x3a\x41\x26\x13\xb9\xbb\xc2\xa1\x56\xf3\x60\xd0\x8d\x2f\xbb\x52\xc8\xdd\xf8\x18\x79\xfa\x21\x7b\x6a\xb2\xc9\x9f\xad\x62\xf9\x53\xd1\x06\xf8\x7f\x35\x6f\x54\x14\x46\x49\xe9\x67\x61\xc1\xa7\xf9\x31\x78\xcd\xa6\x3a\x98\xd4\x00\x16\xc8\xc4\xec\x44\xbc\xfc\xb0\x0e\xd6\x7f\xad\x17\xbc\xcc\x1b\x08\xbd\x32\x7e\xf0\x81\xfe\xd3\x72\x28\x33\x69\x78\x6a\x6a\xc6\x05\x4d\x9b\xba\x99\x82\x76\x98\x0e\x2d\x1c\xdd\xbb\xf7\xca\x41\x08\x79\x2e\xda\x30\x21\x2a\x52\x9a\x7f\xe2\x28\xab\x57\x08\xd7\x65\xa1\x93\xfe\x0d\x28\x31\x40\x57\x3c\xff\x68\x70\xc7\x8e\x77\x47\x30\x6a\xe4\xe6\x1d\x9b\xa9\xd3\x11\x94\x6f\x63\xec\x45\x35\xf6\xc8\x81\x1c\x46\xff\xcf\x75\xca\xc7\xe7\xbd\x33\x94\x1f\x65\xb2\x63\x0a\x9a\xa4\x26\xc3\x03\x92\x35\xad\xdc\x41\xc0\xf7\x36\x19\x4c\x90\xa7\xa8\x52\x08\x21\xb5\x40\xee\x79\x85\xb1\x4d\xf3\x56\x1e\x16\x52\xb4\x23\x3a\x61\x5c\x92\xf2\xa7\xd2\x0d\xe5\x7c\x50\xf9\x37\xee\xd2\x9e\xe6\xc7\xe4\xc7\x34\x6a\x6f\xc5\x0b\x72\x2a\x15\x19\x62\x9a\xc6\xc4\xef\x19\x46\xfd\x49\x67\xea\xbc\xb9\xc6\x0e\x53\x87\x15\xfe\x0f\xd4\x54\xf3\xff\xc1\xa3\x9e\xd7\x34\x47\xa6\x1b\x54\xb2\xaf\x7a\xcd\xca\x02\x44\x78\x3e\x74\x42\x45\x68\xda\xeb\xc5\xc5\x06\x5f\x13\xe5\xb6\x10\x29\x17\x17\x90\x23\xdc\xa4\xb6\xd8\x8c\xe0\x4e\x03\x67\xf6\x4d\x87\xbc\x9d\xe5\x9b\x49\x16\x46\xc5\x94\x19\xf5\x62\xca\x9a\x0d\xe6\xf2\x35\xdb\x1b\x61\xd9\x88\x8b\x46\xd8\xa8\xbc\xef\x74\xd1\x07\xc6\xd8\xe1\x42\x85\xa4\xbe\x6b\x2c\x78\xc5\x98\x1d\x4b\x1a\x12\xd6\xc4\x73\x7b\x22\xef\xc3\xf9\xda\x0a\xf7\xa1\xb9\x60\x38\xef\x74\xaf\x22\x13\x83\x5c\x76\x76\x61\xb4\xac\x70\xb4\x48\x02\x59\xe0\xef\xf2\x43\x9a\x60\x51\x39\x3a\x96\xa4\x26\x22\x3a\xdd\x04\x32\x13\x21\xf6\x15\x36\x21\x85\x34\x6b\xb2\x28\x74\x7c\x16\x8f\x09\xfb\xcd\x4c\x37\x87\x9a\x18\x63\x48\x2a\x16\x11\x1c\xab\x11\xb4\xc0\xa9\x1d\xd5\x2c\x4b\x07\x6a\xb3\xcf\x11\x94\xbd\xb7\x15\x8b\xed\x29\x8a\xe2\x6c\xc8\xb5\xd3\xa0\xac\x8b\x29\xfc\xc8\xd4\x35\xf0\x6f\x43\x8d\x48\x71\xff\x08\xd6\xae\x9f\xab\xb1\xc3\xaa\x36\xfe\x7b\x6b\x23\xf8\x13\xf4\xe9\x27\x8b\x56\xe9\x05\x00\x3e\xfa\x9a\xa7\x7e\x6a\xde\x54\x3c\x45\x63\x38\x97\xe3\x78\xd0\xf1\x65\x7a\x55\x92\x58\x88\xd7\x79\x9c\x16\xbd\xf5\xf5\xb8\x15\x23\xc7\x21\xcd\x02\x64\x0b\x28\x45\x8b\xf2\xb7\xe5\x27\xc2\x4d\x01\x63\xdc\x12\x11\xf0\x63\xc0\x94\x0a\xb9\xaa\xc6\xb2\x58\x7f\x58\x57\x46\x37\x8b\x1a\xaa\xcb\xd8\x27\x3c\xf6\x9d\x15\x9d\xb4\xd2\x2f\x5a\x65\xe2\xff\x98\x17\x3c\x11\xff\xd4\x71\x49\x52\x8d\xc9\x53\x91\x58\x68\xaa\x38\xc5\x0a\x70\x10\xa2\x33\x09\xf7\x2a\x47\x1e\xaf\xb1\x69\xb5\x85\x4d\x04\xdf\x75\x97\x8a\x13\x0e\xe5\x71\xad\x2b\xf2\x12\x62\xd6\x14\x46\x10\x6d\x28\x4d\xb2\x3e\xfc\xab\xe0\x3b\xcf\x6a\x32\xf6\x11\xcf\x7f\xff\x63\x9c\xc3\xbc\xd1\xee\xbb\x59\x92\xb5\xfb\x2b\x5d\x29\x1e\xe7\xb2\xb4\x28\x73\x88\x36\xfc\xe2\xa3\x83\x67\xee\x74\xd3\xb2\x47\xab\xe0\x7f\x78\xc2\xe8\xea\x98\x40\xd9\xc9\x48\x87\xa7\x04\x53\x2a\xae\x79\xd1\x3b\xd4\x09\x2f\xac\x6c\x8a\xed\x8b\xde\x51\x75\xf5\x0e\xd1\xbf\xe8\x3d\x6a\x7b\x43\xa4\xe7\xd2\x22\x2c\xe3\x62\x3d\x96\x72\xdc\x0d\x11\xf3\xd9\x27\x0f\x32\xf5\xb2\xff\x91\x83\x4a\x62\xdf\x7f\xf0\x0c\x5e\x1b\x98\x94\x91\x68\xe7\x02\xbc\x93\xb8\x65\x41\xbd\x54\xb2\x59\x2a\xb6\x44\x9a\xf4\x41\x56\x23\x8c\x90\xb2\x68\x9e\x1f\xaa\xc6\x53\xe6\xb3\xb3\x59\xa9\xd4\xf8\xf3\x4a\x4a\x95\x56\x6a\xb5\xc9\xf9\x53\xac\x2d\x40\xa8\x85\xb2\xdf\xdd\x44\xdc\x7e\x22\xe3\x27\x85\x89\xab\xfe\xd0\x18\xbf\x2a\xcc\x1d\x77\xe4\x01\x9e\x74\x08\xd0\x3f\xd1\xb8\x4f\x9e\xa2\x29\xc1\xa1\xce\x55\x5f\x98\x5c\xe4\x19\x0c\x59\x83\xcf\xb9\xb9\x71\x89\x9d\x2e\xa0\xc6\x31\x2c\xf8\xcc\xd4\xcc\xd4\xf4\x49\xfe\xbd\x5c\x16\x3d\x43\xff\x1e\xa7\x7f\x4f\xf0\xef\xe5\xdf\xcb\x39\x5f\xe2\xdc\xf9\x97\xc3\xbf\x0d\x95\x3c\x4d\x75\x98\x81\xc8\xee\xac\x43\x0d\x36\xb0\xc7\x6b\x42\xd1\x6f\xa1\x20\xc5\xa2\x61\xc9\xb4\xb2\x8e\x80\x3a\xcc\x3c\xd9\xa6\xe8\x8a\x4b\x4c\xfb\x85\x4a\x1d\x83\x2a\x4d\x52\xc4\x09\xf0\x75\xcb\x66\xcd\xb6\xca\x5e\x98\xc8\x8f\x1f\x3b\xde\x98\x9e\x94\x72\xd9\x79\x7c\x2b\xce\x40\xa2\x51\x0d\x8f\xcd\x4c\x36\x87\xaa\x7c\xbc\xa2\xca\x4e\x6d\xa1\x16\x52\xa9\xb9\x0f\xd2\xc7\x77\x9a\x35\x6a\xc2\xcc\xa6\xfd\xed\xb0\xaf\xa7\x8d\x4a\xc1\x90\xeb\x82\x6f\xc4\xed\x0d\xb9\x6f\x1a\xa1\x07\xc7\x67\x98\x05\xb1\xce\x0c\x87\x42\xfb\x10\xb1\xb1\x58\x4e\x14\x14\x04\x81\x69\x8c\x08\x42\x3d\x9c\xa0\x0e\x63\x3e\xad\x14\x78\xc5\x04\xec\x6c\xef\xef\xac\x31\x7b\x05\xfa\x3f\x54\x0b\x5e\x5a\x5b\x35\x17\xd4\x24\x97\x47\x46\x45\xf3\x8a\x5b\x51\x13\x80\x8d\xa9\x7a\x84\x98\x84\xca\x00\x4d\xb0\xb8\x80\xb7\x00\x6c\x6e\xd0\x6f\x03\x04\xbd\x46\x6b\x43\x29\xa9\x9c\x00\x18\x4f\x40\xb2\x82\xdf\x6d\x9e\x44\xaa\x24\xb0\x16\x60\x41\xb7\x60\xa0\x7e\xb0\xd6\x6b\x6d\x8a\x92\xf8\x85\xcb\x1c\xe4\x5f\xb7\x57\x72\x0a\xd1\x8a\xac\xa5\x47\x2b\xae\xcc\xb0\x30\x7c\x73\xa7\x3e\xb5\x05\xe8\xa7\x0e\xb1\x61\xe9\xe4\x7f\xf0\x50\xf0\xae\x43\x77\x0f\x5e\x1e\xce\x8e\x8a\x44\x48\x1d\x83\x67\xcb\x18\xec\x3f\x1a\x24\x9c\x46\x17\x75\x7a\x58\x82\x2d\x2d\x6d\xe5\xec\x74\xa4\x0f\x3f\x46\xea\xc7\x24\x2f\x05\x40\xa7\x6e\x98\xa9\x99\x53\x88\xb5\xbe\x00\x53\xa6\xc1\xdd\x99\x58\xf9\xa2\xfd\x92\xda\xd1\x63\x0c\x1e\x4f\x32\xdc\x2a\xeb\x8c\x43\x18\x50\x1b\xbc\x82\x7b\x9b\xba\xb8\x3c\x37\x44\xd2\xe5\xb9\x88\x7a\x98\xe6\xcf\x38\x2f\x36\xc5\x76\x93\xcd\x5a\x2d\x45\xba\x10\x3d\x2b\x02\xa7\x4f\x03\x85\x5d\xe0\x2c\xcc\x78\xdd\x80\x59\x4b\xed\x6c\x4b\xea\xa1\x3a\x64\x0a\xa6\x2b\xe0\x92\x01\x44\x3a\x42\x30\x21\x6d\x10\x55\x4b\x09\x83\x80\x16\x3f\x84\xcb\x83\xa9\xdc\xcc\xc0\xaf\x8f\xc0\x3d\x21\x85\xdd\x68\x81\xbb\x04\xff\x1b\x16\xbc\x8b\xeb\xbc\x62\x0a\xea\xba\x38\x93\x67\x3f\xb2\xf8\x38\x88\xcc\x13\x93\x96\x48\x3e\x31\x75\x7c\x6a\xe6\x98\xac\xeb\xf1\x49\x59\x6b\x47\xd8\xce\x68\x61\xab\xdf\xa4\x1a\x89\xc2\x11\xb7\x8b\x29\x01\x37\x6c\x67\x79\x44\x87\x6c\x05\x6d\x0f\xec\x46\x25\xe9\x90\x71\x47\x2d\x61\x0c\x3f\x33\x93\x75\x3b\x93\x0b\x07\x84\x7e\x5c\xf2\x6b\x3b\x59\x2e\xae\xb5\x1e\xdf\xcb\x8a\xfe\x9a\xc7\xae\x76\x46\xc3\xff\xea\x43\x91\x72\xf7\x36\xcf\xf9\x6d\xcb\xfb\xf5\x38\x1d\x50\x92\x9a\xf2\xcc\x50\x58\x96\x51\x42\x77\x46\x04\x3f\x55\x04\x8a\x4b\x84\xb0\x07\x61\xa2\x22\xfc\x5c\xb5\xc2\x05\xd6\x18\x8c\xfb\x27\xb5\x22\xca\x3a\x61\x9c\x36\xd9\xf7\x79\xac\xb9\xa7\xd6\x9d\x89\x5b\x79\x06\x0e\x81\xbb\xfc\x23\x51\x58\x8a\x46\x19\x77\x44\x70\x8b\xbe\xce\x5d\xd7\x31\x5c\x82\x39\xdf\x91\x4f\xa0\xdd\x80\xa3\xcf\x5d\x4a\x0a\xe0\xa2\x74\x06\xe2\xdf\x1f\x64\xdf\x5d\xa1\x9b\xde\x21\xfa\xab\xd9\x52\x58\x6e\xf8\xbf\x7f\x20\xa8\x9f\x09\xbb\x56\x84\x39\x25\x72\x84\xbc\x1b\x96\x1b\x26\x55\x91\x20\xf3\x55\x1a\xcb\xb8\xbc\xeb\xda\x88\x0f\xb0\x09\xb4\xb3\xf2\xe0\xd1\xab\xb4\xb1\xc9\xdd\x22\xcf\xc0\xd9\x67\x57\xeb\x43\x63\x6c\xbc\x93\x45\xc2\xff\x49\x9d\xe3\xf6\xfa\x31\x13\xfc\x06\xfc\x0a\x6b\x40\xd0\xa4\xd0\x25\x30\x4b\x91\xb8\x7b\x28\x18\x0f\x71\x58\x85\x41\x7c\x93\x2b\xae\x55\xaa\xbd\x50\x6b\x88\xd3\xd3\xd3\xd3\xb8\x61\xdf\x78\xe3\x8d\x88\x9d\x13\x89\x56\xdc\x19\x7e\x10\x9e\xba\x7e\x66\xa6\xc9\x9f\x35\x7b\xe6\x34\x44\xfe\x75\xcb\x02\x63\x6a\xb1\x64\x0c\xa4\xb5\x5e\x2e\xea\xfc\xe9\x2b\x77\x9e\x35\xb0\x44\xee\x5d\x90\x8c\xba\x3d\xd5\x09\x4c\x9a\x4c\x84\x36\x9f\x33\x88\x10\x65\x73\xa4\xc0\x11\x0e\x19\x55\x70\x3b\x6f\x65\xe9\x7a\x12\xb7\x4a\x9c\x0e\xb8\xe4\x15\xc0\x04\x1a\x5a\xc9\x08\x0f\x38\xe9\x89\x20\xc2\x34\xc0\x7c\x20\x62\xa5\xba\xd6\x83\x73\x51\x48\xf5\x86\x54\x31\x8b\x9b\x0e\xc6\x00\xe1\x11\x2c\xcd\xe6\x3d\x1e\x83\xe1\xf7\xdf\xe1\x05\xaf\x05\xaf\x05\x5a\x97\xb7\x04\xce\x19\xf2\x12\xc2\x67\xc1\xa4\xdd\xd5\x8a\x4e\x99\xa1\xd3\x9f\x02\x85\x6d\x88\x23\xf9\xaa\xb9\x49\xa6\x2c\xdc\x2d\x65\xa1\x02\xa9\xc6\xf8\x44\xb3\x39\x61\x1e\x43\xfc\x09\xb3\x0d\xe0\x14\x86\x67\xec\xf9\xf6\xea\x31\xf6\x84\x2a\x5e\x7f\x05\x13\x6e\xdc\x90\x7f\x5c\x0b\x9e\x32\x7c\xd9\x44\xae\xef\xe0\x7a\xd4\x80\xe3\xa3\x19\xfe\x5f\x5e\x63\xdf\x61\xbc\x6f\x47\x07\xfc\x49\xe4\x3a\x3b\xfa\xf0\x51\xc5\xdf\x4c\xde\xa9\xeb\x82\x89\x55\x22\x39\x36\x10\xea\x16\xe5\x9a\xf1\x60\xda\x6f\xdf\x59\xe9\xcb\x7c\xd2\x25\xfb\x32\xd9\xef\x1d\x61\xff\xaa\x62\x58\xe6\xb2\x4e\x37\x4b\x45\x5a\x62\x7b\x21\x23\xea\xa7\x8f\x04\x2f\xf0\x4c\xfb\x15\x99\x81\xae\x68\x61\x91\xf0\xd3\xdb\xa8\xc1\xaa\x3c\xa9\x81\x42\x35\xe6\x06\x9f\xd7\xa1\x22\x27\x71\x99\x0d\x05\xe3\xc8\xf5\xb6\x35\xd3\x9c\x79\xd2\x13\x2b\xb3\xab\x7e\xee\x10\x4b\x54\x72\x50\x2b\xe0\xa7\x77\xf9\xe0\xde\xb9\x37\x77\xe8\x8f\x2b\x29\x42\x97\x91\x22\xf4\x29\xcf\xca\x11\xfa\xa5\xfd\xa4\xa4\x57\x24\x09\xbd\xd0\xdb\x21\x4b\xe8\xeb\xd8\xb8\x2b\x99\x7d\x97\x99\xd9\xf7\xa0\xf7\x3d\xbb\x67\x74\xdd\xe4\xdf\xa0\x33\xba\x82\x40\xe7\x70\x55\x48\xaa\xc1\x2c\xae\x5f\xba\x8a\x1d\xdb\x6b\x34\x8c\xff\x9a\xab\x82\xb3\x95\x77\x70\x81\xf7\x0a\x91\x4f\xe8\x75\x46\xa7\xb6\x88\xb7\xe0\x09\xd4\xd9\x06\x83\x18\x1c\x49\xf5\x39\x76\x65\xb6\x5c\x61\xa3\xf8\xc6\x09\xdf\x4f\xdb\xc2\xf7\x81\x3d\xe7\xed\xec\xc0\x47\x11\xbc\xc8\x88\x5f\xec\xcc\x89\xe2\xeb\x27\x82\xd5\x87\xd8\xdf\x2b\x56\x8c\x3f\xf7\xd8\xc9\xdd\xfd\x40\x55\xab\x1b\x3c\x42\xef\xf5\xc0\xff\x6c\xaf\x40\xe5\x11\xd5\x20\xdf\x71\x51\xc6\x2d\x0a\x93\xd8\x52\x41\x80\x30\xee\x8a\x44\xb0\x9b\x45\x94\x9c\xb6\x6f\x6f\x0e\xe5\x39\x4c\x19\x19\xa2\x58\x92\xaf\x31\x97\xf0\x0a\x48\x9c\x82\xbd\xb8\xa6\xb5\xd3\xaf\x79\xec\xe6\x4b\x6c\x3c\x66\x8a\xfe\xac\xd2\xee\x06\x42\x35\x54\x9a\xa8\x15\x4e\x3a\x65\x52\x46\x2b\x24\x1e\x8a\xc3\x26\x5f\x16\x61\xd4\xc8\xd2\xa4\xff\xf0\x77\xc4\x83\xde\x3d\xbb\x6f\x21\x27\xfd\x9b\x1a\xb4\x6d\x54\xf6\xc3\xc0\xc6\xc1\xad\xed\x86\xbd\xf2\xa0\xb3\x89\xec\x9c\x0a\xbc\x7a\x7a\xc5\xff\xf2\x81\xe0\x8c\xf9\x39\xe0\xb6\x81\x48\x39\xc8\x6c\x4b\xc2\xbe\xc8\x79\x21\x5a\xbd\x3c\x2e\xfb\x15\x70\x52\xd5\xc9\xc1\xff\xfe\x00\xbb\x58\x63\x07\x36\xb2\xa2\x2c\xfc\xff\x54\x0b\xfe\xca\xbb\x3d\x2b\x08\x58\xc2\x68\xdb\x70\x5b\x91\x55\x69\xd4\x77\x59\x9d\x96\xc8\x4b\x0c\xac\xa0\xa0\x6e\x3a\x27\x3b\xec\x5b\x96\x4b\x59\xc5\x9e\x4e\x15\x2a\xeb\x00\xdb\x91\x14\x2b\xa2\x95\x8b\xd2\xf5\xee\x97\x80\x9b\x49\xe1\x47\xb2\x12\xf2\xf4\x0a\x52\x58\x1d\x0f\x1c\x2c\x2b\xeb\xc8\xb3\xde\x4b\x14\xbf\x14\xd4\x83\x1a\x0f\x9e\xe9\x44\xac\x97\x4e\x44\xc1\x08\xe2\xac\xaf\xd4\x18\x2b\xa0\x66\x80\x5e\xf5\xd9\x5a\xf0\xb1\xda\x8a\xfe\x5d\x15\x4d\x8b\x4f\x6b\x53\x07\x1a\xa0\xe4\xb9\x72\x65\xe5\xb4\x8d\xbb\x72\xdd\x75\x27\x9a\xfc\x94\x0a\x34\x82\x3a\xa9\x5c\x4e\x03\x2d\x24\xdf\xc9\xb3\x1e\x34\x59\xa7\x54\xae\x9c\x5d\x84\xce\x80\xaf\x86\x09\xf8\x55\x28\x30\x47\xdd\x42\xeb\x6b\x02\xb3\x12\x3b\x06\xcc\x0b\x96\x89\x35\x90\xe3\x1c\xa8\xb4\x02\x0c\x78\xb2\xa8\x39\xa8\xbf\x96\xc1\x10\xea\x96\x5c\x98\x9c\x39\xd5\x38\x95\x34\xe7\x20\x37\x41\xce\xa3\x4a\x5b\xb0\x5e\xa2\xf6\x38\x67\xd1\x37\x8d\xb3\xc6\x8e\x08\x4d\xb7\xaf\xae\x2e\x59\xf5\x41\xfc\xa9\x3f\x1b\x0b\xde\x5a\xab\xba\xe3\xc2\x32\x4a\x01\xa1\xcd\x83\x05\x06\x9e\x10\xba\xc7\x5a\xd8\xda\x14\x69\x54\x10\x47\xbf\x50\xa6\xeb\x93\x8a\xb4\xe3\x66\xd9\xde\x5b\xa6\x6e\xee\x86\xe5\xc6\x2d\x4f\xbd\xb9\x10\x72\x13\xe9\x86\x79\x79\x0b\x6f\xdc\xa2\x5e\xe7\xdb\x90\x76\x86\xff\xed\x02\x0e\xb0\xc2\x63\xce\x13\xcb\xb4\x28\xbf\xb8\x7c\x6a\x8e\x9f\x78\xd2\x4d\x37\xd4\x95\xe2\x45\x0a\x84\x6d\x1c\xb2\xa2\x06\xdb\x61\x9c\x16\x25\x9a\xea\x4b\x30\x84\x86\xeb\x25\xf1\x0f\x41\x9c\xec\xc4\xd4\x04\x25\x37\x6b\xe8\x77\x24\xd1\x9b\x78\xea\x04\x90\x4f\x5c\x33\xd1\xbc\xe8\x1d\x90\xf5\x77\xcf\xb8\xef\xf5\xd8\xbb\x3d\x86\x37\xfc\xb7\x7a\xc1\x75\xb3\xbc\x25\xd7\x4d\x4b\x69\x80\x70\x47\x19\x5b\xbb\x4e\x9e\x8a\xee\x36\x7b\xd5\x9c\x64\x37\xb1\x1b\x76\xdc\x30\x76\x1c\xce\xa5\xb0\xdc\x78\xd0\xbb\x66\x47\x2e\xfc\x23\xfe\xa1\xb0\xcc\x3a\x71\x8b\xb1\xd7\x1d\x76\xc2\xd0\x07\x62\xec\x97\xb2\x68\x5e\xe3\x8d\xdf\xd6\x8b\xda\xa2\x04\x3b\xc3\xef\x1f\x0a\x6e\xdd\xe1\x1e\xce\x13\xb7\xd9\x15\x8f\xee\x00\xe3\xf9\xa6\x2b\x18\x2c\x97\xad\x7b\x9f\x53\x66\x96\xd3\xf6\x54\x7a\x2a\x1b\x81\x17\xb9\x87\x71\xbf\xa2\xd2\x7f\x93\xc0\x76\x6e\xec\xae\x44\x2d\xf8\x73\x5a\x89\xaa\x5e\xa5\xae\x1a\x05\x83\x6e\x74\x29\x9c\x0c\x8c\x7d\xf6\x20\x7b\xc6\x25\x53\xb1\x0e\x26\x06\x6a\xaa\xd9\x7f\x3c\x10\x9c\xd9\xe9\x66\x05\xe3\x2c\xad\x4e\x15\xd2\x52\x1a\x3e\x4f\x50\x6c\xe7\x96\x9b\x17\xbd\xc3\x0a\xe2\xd8\xa5\x8a\x39\xc0\x1e\x18\x63\xfa\x9e\xff\x81\xb1\xe0\xfe\x31\x0d\x86\xec\x86\x10\x55\xa6\x91\xd1\xb7\x81\x04\x04\x5d\x1f\xea\xdb\x4d\x0d\xda\x61\xe2\x09\x4e\xf2\x06\x3f\x7f\x36\x4b\xc5\xf9\x93\x98\x99\x46\x6f\xe7\xe8\xdf\x24\x84\x6e\xc8\xd9\xd4\xd2\x0d\x64\x08\x3a\x82\xd1\x6b\x0e\xac\x53\x69\x9f\xbc\x0b\x14\x2f\x9d\x56\xa7\xc6\x37\x0c\x1b\xed\x49\x8b\xcf\x14\xb7\x3d\x24\xb2\xcd\x1c\x5a\x62\x45\x6d\x5a\x66\x3c\xca\x94\x61\x98\x9a\xd4\xe4\xb3\x26\xb3\xd7\x3a\x45\x30\x0e\xe1\x1b\x42\x44\x4a\x37\xd9\xd0\xe8\x18\xe4\x70\xd1\x3e\x1d\x20\x1b\xed\x12\xb1\x17\x19\xd6\x41\x13\x2b\x28\xc0\x82\x10\x55\x00\x73\x50\x3e\x6b\x7d\xde\xaa\x5b\x05\xd6\xf4\xaf\x78\xec\x10\x3d\xe1\xff\x82\xc7\x9e\xf9\x10\xd3\x05\xeb\x6a\x04\x2d\x55\x8f\x6a\xea\x63\xb7\xcf\x54\x87\x5a\x88\xde\x98\x9a\xa9\x26\xd9\x79\xcb\x09\x6e\x31\x07\x7f\xf4\xc8\x80\x9f\x25\xcb\x23\x52\xf7\xb4\x04\x3e\x2d\xc2\x42\xf8\xaf\x3f\x12\xfc\x3f\xf0\x97\x15\xe2\x97\xc0\x6f\x3a\x86\xb9\xa7\x8e\x97\x1c\xbe\x22\xa5\x2f\x43\x4a\xff\x94\x6d\x78\xf9\x91\xcb\x36\xbc\xb4\xbf\x5e\xb6\x95\x4f\x29\xdb\xca\x47\x47\xe0\x04\x8e\x98\x66\x60\x58\x79\x19\x18\x56\x9c\x60\x7e\x39\x8c\xf0\xc0\xc3\x3e\x20\xb2\xf6\x8d\x30\x8d\x1a\x68\xb3\xb8\xa2\x03\x5e\xb6\xb5\xbe\xb5\xbb\x96\x70\xab\x7f\x8b\xb6\xd6\x3b\x93\x63\x00\x82\x0d\xa6\x40\x95\xc2\xc0\xde\x74\xa8\xd2\x6a\x3f\x7b\x5f\x2f\x17\xf3\x71\xb1\x89\x76\x1b\x82\xf2\xfe\xcb\x83\xc1\xaa\xbe\xe3\x24\x5f\xa5\x1c\xae\xf3\xf9\xb0\x0c\x39\xdc\x25\x5e\x0b\xdc\xf8\xe0\x78\x0c\xe7\x31\x88\x5e\x41\xaa\x24\x0d\xeb\x23\xb7\xff\x28\x2e\x36\xcf\x42\x70\xf3\x21\xf9\xe7\xb9\xe5\x45\x47\x2c\xfe\xfd\x01\xf6\x11\x8f\x1d\x5c\x2f\x56\xe5\xe1\xe7\xa7\xbc\xe0\x9d\xde\xa9\x38\x11\x98\xcb\x83\xb8\xdb\xf2\x74\x88\x39\x01\x3a\x4a\x02\x3c\xf3\xf6\x33\x0e\xd1\xb9\xae\x18\xe6\xec\x00\x6a\x0b\xe6\x06\xf1\x85\x0b\x4d\x1e\x88\x0b\xe5\x75\x41\x9d\x07\x17\xd6\x0b\xf9\x4f\x5a\xae\x17\x41\x93\x2f\x76\x30\x03\x23\x21\x01\x6a\x62\x0f\xf1\x05\x1e\xaf\x0f\x58\x50\x8c\x74\xfa\x5d\x8f\x24\xfb\x6f\x7a\xc1\xc7\xbd\x05\xa0\x7e\x37\x0a\xc8\xca\x46\x98\x8b\xe8\x24\xef\xf4\x92\x12\x72\x1a\xd7\x92\x6c\x8d\xcb\xde\x40\xf6\x4c\x05\x10\xa3\xb8\xa6\xf8\xbc\xc0\x08\xc1\xe8\xa4\x4a\x8a\xd6\x6f\x54\xbf\x70\x26\x4c\xc3\xb6\x7c\x3c\x84\xc1\xea\xe0\x4f\x2e\x25\x10\xbe\x75\x0c\x83\xe0\x52\x7d\x8b\x72\x61\x90\x82\xab\x10\xe5\x64\xd3\xc1\x65\x2a\xa0\xca\x76\x13\x2f\xb0\xc3\xb9\x08\xa3\x3b\xd3\xa4\xef\x27\xc1\xbf\x19\x02\x5f\xe0\xc7\xe4\xed\xa9\xed\x3c\x2e\xc5\x24\xda\x2e\xe5\xb3\x1c\x4d\x05\x40\xd8\x9e\xe5\x44\xe3\xa3\x6f\x2a\xd3\x56\x9c\x72\x9c\x8f\x67\x64\x7b\x0a\x17\xc8\xe1\x0e\x76\xb4\x05\x42\xbd\x7d\x26\x8b\x00\xc5\x14\xcc\x2c\x73\x78\x0d\xc2\x3c\x4e\x72\xa9\xdc\xd5\xa1\x60\x2e\x4b\xa6\x3f\xef\x96\xb5\x71\x46\xea\x14\xd3\x33\xd2\x3f\x19\x34\xa4\x32\x78\xd6\xb2\x64\x99\x1e\x23\xcd\x0e\x3a\xde\x46\x76\xa2\x72\x66\x99\x9a\xce\xfe\x0d\xc1\xa4\x2c\xe6\xdc\xf2\xe2\xfe\x8a\xf8\xda\xa3\x5c\xbe\xc6\x0a\xdc\x7d\x42\xd5\x97\xfb\xc7\xaf\x3e\x2a\xf8\x5e\xf3\xb3\x22\xca\x1e\x20\xf6\xb5\xc1\x4c\xa7\xd3\x2b\x30\x71\x98\x00\xe7\xe5\x62\x39\x4f\x51\x91\x3a\x4e\x46\xc5\x9c\xa1\x4a\xeb\xc0\xdc\xf3\x50\x2e\xf5\x96\x98\x54\x51\x21\xce\xe2\xfd\xe0\xb7\xb1\x5f\xf2\xd4\x4f\xff\x67\x3c\xf6\xd4\x51\x64\xbe\x43\xad\xd3\xfb\xb1\x66\x0e\x08\x4a\xad\xda\xac\xcb\x2d\x00\xe9\xb8\xb0\x05\xa4\xf5\x11\xb0\x1a\x36\x6e\x73\x24\x5b\x40\x9d\x6f\xc4\x65\xd1\xe8\x8a\xbc\x41\x81\x66\xa0\xd5\x2b\x43\x1f\xbd\x32\xd9\x64\xaf\x01\x5c\xec\xa8\xf0\x5f\x3e\x02\x17\xbb\xb2\x09\x4b\x59\x54\x38\x0d\xf8\x84\x07\x81\x76\xa3\xeb\x0f\xd1\xc5\x14\x41\x6b\x7b\x08\x70\x0c\x29\xd6\xdf\x6d\x09\x58\xbd\x11\x68\xae\x68\x00\xcd\x55\x51\x88\xc8\x6a\xdb\x64\x93\xdb\xc6\x68\x65\x5f\x0b\x11\x15\x5e\x8a\xb2\xb6\x20\x1c\x6e\xb0\x9c\x61\x5e\x38\x10\xe8\xe6\x26\x91\xca\x86\xa3\x6f\xb2\xd7\x8d\xc9\x45\x8f\x5b\x98\xff\xf2\xb1\xd1\x5c\xcd\x43\x7d\xa3\x8e\x90\x4e\xff\x7c\xb4\xa6\x55\x59\xbb\x8f\xf4\x45\x35\x5d\x15\xc7\x2f\xb2\x88\x19\xb0\xee\x38\x35\xea\x2a\x10\x40\x01\xd9\xe0\x24\x47\xdc\x8a\x32\xb3\x09\x24\xf6\xdd\xe3\x80\x6b\x32\xb7\x74\x8e\x67\x39\xef\x88\x4e\x96\xf7\x27\x9b\x7c\xa5\x07\x59\xad\xb2\x5a\x78\xf4\x5c\xeb\xc5\x09\x58\x9b\x9d\xaf\xa1\xbe\x01\xb1\xee\x50\xdb\x30\xd1\x24\x52\x2a\xaa\x2d\x83\x34\x17\xc3\x8d\x66\xf2\x11\xcb\x8c\xa7\xf2\x54\x97\x68\x22\x03\xf5\x41\x93\xeb\x1e\xc8\x89\x15\x70\x3a\x61\xb2\xb7\xd6\x28\x12\xea\x62\x2d\x78\x79\xad\x24\x2e\x2b\x18\x43\x8a\x8b\xa2\xae\x54\x47\x52\xbe\x68\x73\xc8\x51\xbc\x55\x30\xa7\x12\x2e\xd5\x68\xc9\x1d\x51\x91\x7a\xc8\xbf\x71\x91\xca\xbf\x96\xe0\xfb\x59\x0e\x88\x04\xea\x59\xe8\xd8\x4e\x88\x0c\x70\x38\xdd\x5d\x71\x42\x1d\xae\x94\xbb\xb3\x59\x29\x4e\x56\x7d\x96\xab\x26\x98\x5e\x41\xe8\x4e\x7c\x9f\x50\xf2\x1b\xed\xb0\x14\xfc\xf6\xa5\x59\x5d\xc0\x19\xea\xa8\xb8\x50\x08\xf9\xb6\x8c\x7d\xf3\x38\x7b\x54\x6b\xf0\x5b\xfe\xcb\xc6\xd9\xd3\xf7\x35\x95\x87\xaa\xeb\xcc\xe9\x0f\x8d\x99\xb4\xd5\x6f\xc8\xec\xb6\x70\xa8\x54\x3d\xe2\xd4\xcc\x79\x0d\xd9\xf4\x4f\x7c\xce\x57\x70\x6a\x12\x03\x02\xc4\x90\x60\xec\xa7\xa2\x49\x20\x3d\xb0\x6a\xaa\xa8\xb7\xd6\x93\xb0\xdd\x64\x7f\x5d\x63\x87\x95\x19\xc6\xff\xc3\xda\x3e\x85\x5c\x15\xff\x4d\xf0\x96\x9a\xb6\xeb\xd8\xd3\xc0\xe5\x24\x51\xe0\x36\x7b\x66\xc2\x01\xf4\xd7\x10\x99\xdd\xad\xaa\x98\x3d\xdf\xc6\x2b\xa2\x30\x7e\x38\xff\xeb\x78\xc2\x62\x04\xf5\xcd\xd7\x99\x67\x67\x77\xed\xe7\xf8\x3f\x01\xed\xe7\x72\x36\xc8\xe3\x57\x36\xc8\x2b\x1b\xe4\xb7\xc6\x06\x79\xfc\xca\x06\xf9\x4d\x31\xe7\xbf\xf9\x36\xc8\xe3\x57\x36\xc8\x4b\xdd\x20\x2f\xfd\x64\x7d\xfc\x9f\xfe\xc9\xfa\xf8\xb7\xf8\xc9\xfa\x1d\x87\x2b\xd3\x79\xe6\xe2\x34\x12\xb9\x63\x9a\xfd\x3f\x87\x82\x3f\xf1\x96\x6d\x38\xac\x16\x3c\x64\xa2\x27\x35\x9f\x3d\xbf\xb3\x2b\xd2\xa2\x0c\x5b\x9b\x4d\x3e\xcb\xe7\x9c\xc7\x20\x1e\x0d\x20\x3f\x55\x35\xc1\xa6\xaa\x37\x25\x0b\x14\x7e\xd5\xa4\x73\xc1\x5b\x61\x52\x0c\xa5\x60\xe7\xa2\x1d\x23\x7e\x0e\xe4\x45\xf5\xd6\x44\x22\xca\xa6\xfb\xcd\x42\x59\x65\x79\xb6\x9d\x8a\x1c\x70\xbb\xd0\xfc\xd8\x51\x74\x00\x2b\x0b\xa7\xe3\xb4\x77\x01\xb2\xaf\xd6\x04\x8c\xff\x45\xef\x30\xbe\xbe\x38\xef\x68\x5b\x7f\x7d\x80\xbd\xa8\xa6\x0d\xc5\x7f\xef\x05\x7f\xfc\x30\x1b\x8a\x61\x7a\x14\x27\x1f\x2a\x73\x71\x95\xab\x81\xe6\x60\xa1\xfc\x0d\x9d\x7e\xf1\xdc\xa4\x81\xe3\xdb\xe8\x46\x53\xcb\x0b\xb3\xf3\x67\x16\x9a\x1d\x67\xc7\xfe\x45\xcf\xb2\xc4\xbe\xdf\x0b\xde\xee\x99\xb4\xc3\x87\xd7\x2a\x7b\x99\x4d\xb0\x4c\xba\x6f\xf4\xd8\x11\x0c\x1a\x5c\x16\xeb\xfe\x7f\xf2\xd8\xf5\x7b\xa0\x66\x69\x85\x09\xca\xb5\x65\x85\x4d\x1d\xdc\x6d\x9a\x0e\xde\x4d\x92\x0b\x14\x8f\x48\x82\x8b\xa6\x36\x64\xd5\x2a\xec\x15\x8b\xc3\x5a\x33\x13\xc0\xf2\x59\x81\xe5\xc3\x5e\xec\x31\x3d\x0d\xfd\xfb\x82\x8e\xc2\x53\x8c\x86\x19\x32\xad\xec\x47\xb9\x09\x40\xcb\x1f\xba\xd1\xfe\xe8\x61\x76\x4d\x15\xce\x0c\xb4\x70\x09\x13\x53\xe3\x2c\xf5\xdf\x78\x38\x78\x45\x6d\x36\x0a\xbb\x08\xa5\x8a\x1d\x00\x50\x06\xa1\xca\x5f\x15\x91\x4a\x81\x65\x8c\x82\x20\x4a\x91\x9a\xe0\x3d\x92\x50\x58\xf4\x44\x81\x2e\x1f\xd4\x51\x95\xcc\x23\x29\x84\x8a\xd8\x70\xc1\x52\x1e\xc0\x82\xb3\x34\x14\x00\x5c\x22\xd1\x61\x95\x48\x92\x03\x32\x2c\x09\x67\x56\x6a\xbe\x0a\xc7\x10\xd5\x18\x03\x0d\x61\x0f\x2b\x7d\x4b\x07\x12\x22\x88\x58\x69\xa1\x2b\x77\x34\x65\x9b\x92\x1e\xff\x78\x80\xbd\x6d\x5c\xc5\x5e\x5d\x1c\x0f\x7e\x7b\x6c\xd1\x59\x9e\xa4\xa7\x6f\x8a\x7e\x03\xdd\xf0\xdd\x30\xce\x2b\xaa\x4d\x5d\xa5\xd1\xd1\x23\xea\x2e\xab\x87\x54\x97\x40\xe7\x5b\x93\x03\x72\xf9\xa0\xbd\xdb\xa0\xe8\xd9\xa8\xc0\x0a\x0b\x83\x86\x44\xa3\x2e\x23\xbc\xe1\xe2\xfa\x60\xb6\x2d\x44\xb9\x46\xd8\xb7\x23\xbe\x6c\x71\x1c\x86\xe5\x06\x69\xa3\xbd\x74\xe8\x6d\x72\xf8\xd3\xe8\xc2\x07\x43\x05\x04\x69\xca\xd0\x04\xf6\xc8\xc8\x88\x50\x5b\xd4\x45\xd8\x0b\x4e\x2a\x70\x21\xca\x5e\x17\xcb\x47\x4e\x9a\x5e\x0a\x6c\xfd\x88\x72\xd2\x09\xf3\x4d\xa9\x94\x69\x2a\x97\x25\x08\xbf\x54\x88\x85\x3a\x1d\x57\x39\x71\x07\x33\x6a\x27\x9a\xcd\x09\xca\xd5\xcd\xed\x1c\x5a\x4a\x9c\x35\xb1\x75\x4d\x56\x67\xd7\xee\x2a\x5d\x74\x3a\x39\xfb\xf7\x0a\xba\xf9\x7b\x83\xcc\xf6\x0c\xd1\x90\x97\x0f\x31\x74\x17\xcc\xfd\xa9\x6b\xe0\x1f\x37\x53\xf5\xb0\xea\x1b\x7f\x2e\xb8\x7e\x45\x93\x23\x10\x95\x91\xee\x73\xa0\x2b\x2d\x11\x4e\x45\xf5\x1e\x91\x23\x38\x22\xf7\xfd\x47\x76\xa4\xef\x23\x4b\x16\x45\x88\x00\x73\xeb\x8b\x8f\x04\x37\xda\x17\x5c\x40\x4e\xb8\x62\x45\x4e\x3b\x44\x60\xce\xba\xfb\xf9\xc3\xec\xf3\x35\x76\x34\xec\x76\x97\x14\x7d\xe6\xc7\x6b\xc1\x87\x6a\xab\x1b\xc4\xb2\x48\x11\x0d\x9a\x4c\x53\x87\x31\xd9\x0c\xa9\xb0\xf4\xd6\x33\xd4\xec\x0b\x95\x31\x63\xe9\xff\x04\x89\xd0\x4f\xcb\xf0\x42\x93\x9f\x4b\x1b\xdd\x5c\xac\xc7\x17\x44\xa4\x98\x41\x73\x50\x94\x10\x7a\x5c\x7e\x62\x71\xf6\xec\xac\x29\x89\x34\x75\x7a\xf8\x58\x88\x6e\xd9\xe5\x53\x73\x8d\x1b\x4e\x9c\xb8\x1e\xcf\x70\x18\x1f\xbd\xbd\xbd\xdd\x8c\xc3\x34\x6c\x66\x79\x7b\x0a\xc0\x43\x52\xa9\xcc\x14\x53\x54\x44\x03\x8a\x98\x94\xc2\x2c\x6d\xe8\xf2\x55\xeb\x74\x4c\x43\xaf\x80\xd5\x66\x57\x51\x9d\x78\x3b\x7d\xd0\x1c\xd3\x3e\xa0\x7b\x77\xfa\x0d\x8c\x2d\x6b\x68\x62\x55\x7b\x96\x3c\x30\x46\x93\xf5\xc3\x63\xc1\x8f\x8f\xad\x3a\x61\xf9\xba\x0f\x67\x89\x25\xb5\x40\xc0\x16\x77\xb4\x70\xce\x10\x44\x4f\x0f\xd0\x8e\xa1\x0c\x1d\x5e\xef\x3e\x0d\x59\xc5\x72\x3a\x13\x40\x73\x68\x8f\x82\x3e\xf0\x10\x38\xac\x8a\x03\xd7\x81\x1a\x44\x42\xde\x84\xda\x7c\xcf\x3d\x4d\xfc\x90\x21\x17\xb2\x98\x62\x53\x02\xd3\xa3\xd8\xa8\x2c\xe7\xdd\xb0\x28\xf8\xfc\xd9\x95\xe7\x9c\x9e\xbd\x6d\xe1\xb4\xc5\xfd\x72\x92\x5f\xab\x67\x7e\x2a\x55\x40\x88\x0b\x0f\x53\x7e\xc3\x09\x93\x77\x54\xf0\x24\x4b\xdb\x4d\xf5\x28\x20\xc9\x60\xb8\x7c\x92\x6d\x03\xb2\x88\x3c\x7b\xcb\x33\x33\x51\xd4\xda\xaf\x66\x39\x9f\x68\x4c\xe8\x97\x51\xdc\xc8\x59\x01\xc1\xf0\x94\x6b\x52\xfd\xb2\x81\x5e\x52\x20\x86\x55\x08\xa0\xef\x53\x54\x50\xf7\x6b\x98\xcd\x57\x1b\x82\x5d\x03\xe2\x61\x47\x72\x19\xaa\x5c\x05\xc2\x6a\xf6\x08\x1c\x6e\x39\xeb\xe5\x8d\x5c\xc8\xcf\xb4\x14\x95\x80\x61\x7b\x2a\x45\xde\xcd\x45\x69\x72\x52\x60\xff\xb9\xa0\x69\x5a\x35\xf5\xb7\xec\x2e\xd9\x32\x17\x45\x61\xcd\x22\xc5\xbd\x2b\x58\x94\xf5\x5d\x5c\xda\x71\x29\x2b\x9d\x1c\x88\x72\x81\x32\x97\xd8\x72\x9d\x2e\x1a\x64\xbd\xfd\x89\xef\x66\x7b\x0b\xee\xba\x3b\x2c\x5b\x1b\x0b\x5b\x22\x2d\xfd\x7f\xf8\x17\xc1\x0d\xf0\x57\x25\xbe\xbc\xd8\x22\x90\xdf\x90\x6f\x23\xbf\xaf\x89\xdb\x34\xd0\x07\x28\xc0\x1c\x59\xf6\x9b\x35\xf6\x0b\x35\x7d\x26\x7f\x7f\x6d\xe7\x5c\x39\xb7\x9a\xb9\x3c\x67\x75\x44\x73\x39\xdc\x5e\x50\x21\x8e\xc1\xdf\x78\xa8\xcb\xf2\xb8\x38\xc9\xf8\xb5\x72\x2c\x57\xc9\xbe\x87\x84\xb0\x59\xce\xcf\x64\x11\x8c\xe7\x49\x4c\xb8\x11\xdb\x2e\xf3\x21\x19\x3d\x06\xde\x9e\x97\x27\x31\xf5\x4e\xc5\xf3\x36\x74\xae\x3a\x0a\x46\xf2\x1d\x40\x7b\x77\xcb\x02\xf6\xba\x93\xfc\x5a\xca\x6b\x83\xc0\x2d\xa2\xa1\x10\xd1\x93\xc9\x80\x0e\x01\x7b\xb0\x47\x03\xe0\x4e\x21\x52\x88\x48\xe2\x91\xe8\x0a\x84\x97\x21\xf4\x09\x71\xa1\x6c\x56\xa2\x8b\x3f\xe8\xfd\xca\xe3\x76\x0f\x89\x7a\xe7\xe3\xfc\xb7\x3d\xae\x2a\x85\xd9\x8c\xfc\x40\x02\x5a\xa3\x3a\x1d\x2d\x8c\x08\x84\x65\x30\x98\xca\x2a\xa8\xb1\x9f\x87\x2b\xc2\xaf\x86\xdf\x97\x27\x66\x88\x3d\xdd\x47\x59\x0f\x51\x31\x83\x55\x72\x42\x6d\xf7\x5e\x95\xbd\x7c\x84\x8f\xfc\x88\x5d\x85\xcb\xec\x8f\x4b\x2b\x60\xa8\x27\xba\xc5\xbe\x3e\xba\x87\xc7\x2f\xe5\x13\x60\xf1\xda\xb5\x87\xad\xce\x75\xe8\x3c\x86\x7b\xb7\xf2\xf6\x5e\x2b\xbe\x9f\x4a\x18\x26\xd3\xaa\x3a\x0c\xdd\xdd\x6f\xdf\x19\xb3\xe0\x65\xcf\xcf\xe3\x83\xf3\xd3\x2a\xfb\x52\x3e\x78\x5c\x8d\x19\xbd\xba\x26\x1f\xdb\xc7\x5c\xda\xdb\xf3\x03\x1d\x62\x65\x9e\xee\x67\xe1\x5e\xc6\xdb\x83\x15\x18\x11\x8a\xba\x87\x0a\x5c\xc2\xdb\x03\x15\xd8\x91\x8b\x78\xf7\xaf\xef\xfb\xd5\x81\x4f\x83\xde\x70\x09\xdd\xbe\xbf\xf7\x06\x3f\xaa\x65\xe8\x43\xb4\x7e\x41\x4b\xb5\xd6\x81\x4d\x92\x64\xd2\x32\x06\x57\xf3\xc8\xa7\xf6\xd9\xa4\xb8\x13\xb6\x05\xe5\xb4\xed\xb1\x04\xaa\xf4\x7e\xdb\xa6\x02\xc2\x47\x34\xcc\x4a\xd3\xdc\xf7\xc0\xee\xff\xdd\x81\x9e\x48\xb3\x48\xec\xe3\xb3\x23\x5a\xba\x4b\x49\x97\xf4\x45\x55\xd9\x4a\xe1\x4f\x69\x68\x95\xdf\xda\x35\x79\xad\xf2\xad\xbd\x6e\x3b\xf9\x5a\xd8\x6a\xee\xb0\xf7\x8c\xe8\xa1\x9d\x5f\x1b\xd5\x5f\xfb\x7a\x6b\xd4\x50\x1b\x98\xe2\xfd\xcf\xb3\xfd\xbf\xab\xd7\xcb\x25\x97\x30\x58\x7d\x22\x1c\xdd\x7f\xdd\xf7\xf7\xe2\x50\xc5\xf7\xf7\x3a\xa5\x3e\xfc\xe2\x61\x27\xb3\x7e\x90\x72\xdd\x8d\x4e\x5d\xee\x25\xc2\x7f\xe5\xe1\xe0\xc7\x3c\xfb\x8a\xb2\xc0\xaa\x9c\x7a\xf2\x0f\xe2\xa1\xaa\xa7\x4e\x6e\x36\x31\x59\x57\xe4\xeb\x59\xde\xe1\xc0\x64\x48\x59\x1f\xe8\x4b\xc3\x32\xf2\x48\x80\x25\x23\x46\x50\xdf\xb8\x9d\xc2\x5e\x9c\x02\x89\x54\x5f\xdb\x36\xa3\x1e\x1a\xc5\x54\xec\x00\xc1\xd5\xf6\xf1\xc0\xde\xca\x3a\x5d\x79\xbc\x6b\x5e\xf4\x0e\x6c\x89\x7c\xcd\x4d\xd0\xfe\xf8\x41\xf6\xa2\x1a\x3b\x12\x76\x63\x40\x47\x2c\xfc\xbf\xf1\x82\xff\xee\xcd\x2e\x2d\xe2\xcf\x2a\xe4\x06\x75\x13\xed\xee\x0e\xbb\xbd\xd5\x0a\x79\x26\xd4\xc9\x0b\xb3\x4b\x8b\xc4\x49\x37\x44\x87\x99\xf6\x15\x17\xb6\xc1\x74\x51\x29\xfd\x14\x4f\x83\x16\x8c\x5e\x07\xd8\x61\x22\x2b\xa9\x93\xc0\x84\x75\xe9\xc6\xcf\x4a\xd0\xd4\x1c\xf8\xf1\x90\x5e\x26\x4c\x92\x51\x08\x16\x1f\xf4\xd8\xd5\xaa\x64\x80\x42\xf4\xdf\xe9\x05\x2f\xd3\x03\x0c\x97\x28\x4c\x42\xe3\x4f\x6c\x6f\xc4\xa5\x19\x6f\xb4\xc9\x69\x52\xa5\x1c\xe8\x99\x2c\x3a\x25\x3e\xab\x0d\x54\xc2\xb0\x71\x85\x0e\x72\x81\x99\x1f\xfb\xa9\xfb\x57\x3c\x76\xc4\xd0\xf1\x7e\xce\x0b\x3e\xa8\xeb\x5d\xb8\x38\x0f\x03\x04\xb9\xc3\x75\x74\xbe\xa9\xdd\xb7\xda\x9e\xaf\xe7\x49\x93\xf1\xe0\xda\xa9\xf5\x2c\x0b\x06\xa1\x6b\x2c\xf2\x5a\x3e\xb1\x9e\x65\x13\x88\xd8\x95\x24\xee\xc0\xed\x54\xee\x88\x66\xbe\xc1\x63\x38\x83\xfd\x1f\xf0\x82\xed\xbb\x44\xbe\xe6\x36\xce\x8a\x3f\x70\x38\x46\xe1\x1d\x44\x01\x3d\xc9\xdb\xa2\xac\xc3\x1b\x75\x34\xe5\xd4\x89\x57\xa2\x4e\xb9\x8b\x75\xb4\x6c\x88\x3a\xef\xe6\xd9\x85\xfe\x7e\x86\xe1\x0d\x87\xd8\xc9\x5d\x65\x08\xf1\x46\xce\x02\xbd\xfa\xb2\xd8\x8a\xc5\x36\x9a\x4a\xfc\x2f\x1e\x0c\xbe\x73\xc7\xbb\x17\xbd\x43\x34\x31\x9c\xe5\x7b\xff\x41\xf6\xc2\x1a\x3b\x88\x44\xf5\xfe\xdf\x79\xc1\x1f\x79\xf3\xf0\xb7\xcb\x79\xbf\x9a\xf7\x04\x8f\x71\x25\xd1\x6a\xdb\x56\x41\x6b\xf8\xae\xcd\x3e\x84\xc4\xa8\x72\x01\x03\x04\xac\x12\x58\x71\x41\xfe\x61\x44\x83\x55\x5f\xa1\x24\xe0\x52\x05\x95\xa9\x66\x8b\x9c\x6f\x84\x05\x4f\x33\x9e\x75\xe3\x14\x52\xf6\x52\xe3\x8b\xc8\xcc\x73\x56\xad\x9a\x9c\x6a\xdf\x31\x68\xa9\x25\xd5\x7d\xd6\x54\x63\x98\x93\xfa\x8b\x35\xf6\x48\xb1\x15\x26\x3d\xe8\x6c\xb0\x41\xf9\x9f\xa8\x05\x1f\xae\x2d\xb8\x17\x69\x05\x2b\x9e\x3f\xc8\x48\x0b\x4b\x04\xe8\x46\x8f\x53\xd6\x82\xf8\x8f\x88\x47\xbd\x5c\x93\xf2\xda\x23\xc9\x5b\x1b\xa2\xb5\x09\x71\x43\x10\x1e\x57\xc6\xb9\x48\x2c\x8c\xf0\x32\x93\x73\x0c\xac\xd1\x50\x20\x22\x8f\x70\x15\x9e\x25\xe5\x65\x9c\xf6\x84\x05\xba\xec\x16\x4f\x30\x4f\x71\xca\x8b\x6e\x8c\x96\xb8\xb8\x44\xd0\x70\x29\x14\xc3\xb4\x25\xea\x7c\xf9\xb6\xd9\x39\x15\x9c\x05\x56\x1e\x88\xc3\xc9\xb3\x44\x20\xae\xb5\x48\xb3\x5e\x7b\x03\x2e\x90\xc8\x05\x04\x6c\xe5\x81\x83\x3a\x65\x3d\x84\x5b\x21\x8a\x41\xc4\x76\x45\x29\x0e\x82\xd8\xb1\xa9\xde\xa3\x81\x5a\x57\x82\x53\xcb\xf8\x86\x33\xc5\xa0\x3f\x34\x4a\xfd\xf6\x46\x9f\xb0\xb2\x45\x51\xf2\xed\xd0\x6c\x7b\xc0\xf1\x9c\x0e\x26\xc3\xb5\x98\x9a\xde\xfe\x33\x83\xa7\x5b\x43\xad\xe0\xb6\x47\xcf\x61\x7a\xb7\x4e\x13\x54\x4f\x65\x77\x92\xbc\xf1\xb1\xec\x14\xad\x4f\x29\x2a\x1a\x61\xbb\x9d\x8b\x76\x58\x66\x6e\x4e\xb9\x63\x24\xd2\xec\xa4\x4b\x8b\xe4\x89\x80\xd8\xe2\xcf\x7c\x47\xf0\x56\xcf\xbd\x66\x76\xc2\x41\x0e\x58\x04\xbb\x97\x23\x04\xee\x5a\xc8\x0f\xa5\x2b\x84\xe0\x4f\x27\x0d\x8e\x71\x16\x65\x89\x1b\xaf\x8e\x3b\x91\xab\x0b\xc6\xb3\x9f\xf5\x10\x72\x8a\x26\x13\x52\xcf\x0b\xfb\x98\x2e\xa5\x9d\xce\xfa\x6d\x5e\xf4\x1e\x03\x3b\xe3\x52\x1e\x67\x79\x5c\xf6\x89\x86\xeb\xa2\xf7\x48\x52\x81\xd4\x0d\x47\xaa\xfc\xcd\x63\xd8\x9f\x7a\xec\x70\x2b\xbc\xad\x97\x46\x89\xf0\xbf\xe4\xf9\xe3\x6b\xfd\x52\x04\x6f\xf7\xe6\x66\xf1\x1a\x4a\xde\xa5\x85\x33\x5c\xa4\xad\x2c\x12\x11\x9f\x9b\xe5\x6b\x78\x0b\x9d\xbe\x83\x68\x3d\xe4\x7c\x01\x3f\x8d\x94\xca\xd8\xe6\x09\x72\x01\xa5\x6d\x17\x23\x6b\xd0\xd5\xae\xe2\x71\xf2\x5e\x51\xf2\x3c\xcb\xca\x42\xa5\x93\xea\x83\x1a\xf4\x0c\x31\x64\x5b\x76\xe2\x3d\xc1\xe6\xcc\xb1\x03\xd0\x4f\xfe\xc9\xa0\x81\xca\x0d\x29\x3e\x46\xb7\x00\x15\x08\x39\x3d\xf1\x73\x80\xf6\xe5\xf8\x1d\x0e\xb0\xca\xde\xf6\x5f\x7b\x40\x79\x68\x5e\x78\xe0\x69\x83\x0f\xa4\xbd\x8e\xfa\x58\x97\x2e\xe3\x67\xf0\xb3\xe4\xfd\x43\x6f\x5b\x09\x68\x01\x65\x93\xdf\xae\x88\x12\xe8\x05\x4b\x9b\x90\x05\xb5\x55\x13\xba\xe0\x82\xce\xd1\xeb\x4a\x04\xd4\xc8\x81\x84\x2e\x2c\x5d\x40\x96\xba\xe1\x14\x68\xa7\xb7\xb1\x39\xac\x3a\x21\x24\xb7\x22\xdf\x17\x5b\x22\x55\xcc\x0d\x95\xad\xa3\x20\x38\xf0\x56\xea\x2a\x6e\x6f\x64\x89\xaa\x68\x5b\x80\xb3\x65\xc3\x6d\x14\x6a\xc2\xdd\x3c\xee\x84\x79\x9f\x17\x59\x0e\x02\x57\x07\x66\x0e\x7d\xaa\xd3\xeb\xd4\x51\x67\x16\x11\x96\x55\x68\x3f\x58\x99\x41\x83\x8b\x92\x1f\x3b\x3e\xad\x5c\x18\x33\xd3\x93\xf8\x0d\x8c\xd9\xab\xfc\x0a\xcc\x30\x79\xc2\x58\x13\x18\x55\x82\x31\x7a\x71\x61\x52\xea\x6d\xdd\x58\x05\x95\xf2\x63\x5b\x33\xcd\xb5\x50\xc7\xf7\x6d\xcd\x34\xd7\xb3\x6c\x92\xdf\x2d\x26\x2c\x5e\x6e\xd8\x77\x50\xf1\x43\xe5\xe4\x5a\x3a\xb5\xf0\x63\xe2\x42\x4b\x74\x4b\xcb\x96\x33\x29\x47\x7f\xe6\x26\x05\xea\xbe\x14\x86\x2b\xa2\xe0\xc7\x20\xfe\x68\x23\x5e\x2f\xeb\x7c\x5e\xc4\xf2\x29\x70\x5b\x6b\x8f\x8b\x4b\x85\x72\x7c\x7a\x7a\xba\x70\xfc\x71\xbf\xea\xb1\x6f\x8f\x53\xc0\xc3\x13\x2b\x9b\x71\x77\xf5\xf4\xca\x5d\x52\x80\xf4\xfd\xf7\x79\xc1\xdb\xbc\xc5\xaa\x5b\x4a\xec\x14\x83\xd0\x76\x8e\xec\xc1\x30\xf3\x0a\x71\x67\xad\x21\x13\x7d\x5c\x94\x79\x96\xb6\x91\x3c\xa9\x95\xf5\x20\x4a\xb2\xc9\xf9\xb3\xb2\x9e\xed\xff\x96\x6d\x30\x12\x08\x99\xf8\x5d\x19\xff\xbf\x6b\xec\x10\xf9\x94\xfd\x3f\xab\xb1\xbb\x2a\xdd\x6d\xfb\x96\xfe\x24\xe6\x4d\x10\xd9\x7f\xae\xd1\x25\x94\x83\x3a\xb6\x47\x79\xad\x55\x74\x80\xf6\x67\x1a\x89\x87\x5b\x25\x79\x94\x55\xe7\x40\x5c\x3e\x38\x6e\x01\xef\x4e\xc1\xd4\x99\x6f\xa4\x71\x52\x1f\xa4\xf1\xdd\x08\xd3\x28\xb1\x51\xfe\xb4\xb0\xb2\x38\xe1\xe1\x21\x11\xc1\x26\x94\x24\x7d\xcd\x21\x60\x06\x40\x20\x16\x0a\xc8\xeb\x22\xee\x74\xe5\x20\x88\x44\x40\x52\x00\x35\x87\x62\xca\xb1\xac\x9c\xb7\x36\x42\x8c\x5a\x5f\x13\x0a\x41\x50\x44\x40\xb1\xac\x40\x81\xce\x04\xb7\xde\x65\xea\xa0\xea\xa6\xaa\x35\x24\x45\x9b\xdc\x25\x45\x09\xb6\x66\x02\xe7\x80\x73\x88\x0d\x6e\x58\xfe\xaf\x1d\x52\x42\xf5\x67\x0f\xdd\xe5\xde\x53\x08\x87\x44\x31\xa7\x8e\xd1\x4a\x88\xd9\x55\x89\x53\x15\x45\x1d\x97\x24\xde\x9a\x5c\xfb\x9d\x15\x13\x3e\x84\x06\xdc\x27\xf2\x6c\x17\x99\x34\x50\x8f\x61\x71\xb4\xb3\x1c\x5a\x89\xe5\xf4\x89\xcb\x89\xc2\xaa\x53\x88\x35\xc2\x18\x29\x12\x65\x8a\x3a\xaa\x13\x26\x09\x1c\x52\xd6\xc2\x35\xcc\xd3\x97\x0f\xcd\x4c\x23\x50\x1f\xc4\x26\x68\x52\x7f\xd5\x5a\x12\xad\xb1\x50\x44\xa4\x74\x9d\x22\x26\x06\xb7\x6c\x29\xeb\x7a\x44\xc8\x8a\x0c\x87\x54\xb5\x50\xf5\x14\x4d\xd4\x81\x72\xe2\x82\x07\xb0\xc6\xa4\x54\x0b\x80\xa4\x0a\x67\x97\xec\xaf\x70\x2d\xdb\x92\x33\x2a\xb5\x9f\x19\x28\xa1\x50\xcc\xb0\x52\x9a\xa9\x3e\x4c\xc4\x85\xb8\x95\xb5\xf3\xb0\xbb\x11\xc3\x5c\x6e\xf2\xe0\x8e\xa1\x12\x0a\x3b\xb2\x2b\xe4\xc1\x56\x40\x67\x13\x59\x14\xc6\x03\x29\xd4\x58\xea\xd0\x63\x25\x90\xae\xdd\x9b\xe9\xfd\x6e\x92\x5e\x51\xda\x6d\x42\x4c\x4a\xd8\xba\x00\xf6\x03\x4c\xda\x91\xe2\x21\x40\xb6\x62\x24\x1a\x77\x0b\x8d\x53\xab\x50\x98\x3b\xf2\xf8\x24\xd5\x71\x8c\x27\x46\x18\xc2\xb5\x3e\x7f\xda\x2c\xbf\x85\xcb\xc2\xf8\x2d\x94\x85\x71\x0c\xa1\x12\x9f\x36\x8b\x12\x46\x43\x14\xc9\x66\xa5\x19\x07\xfe\xc3\x0b\x3a\x02\x08\x5e\x85\x63\x76\x77\x23\x9c\xd4\x1c\x17\x29\x6c\xfb\xb8\x63\xa5\x6d\xb7\x91\xd4\x46\xa7\x8e\x4d\xb0\x51\x50\xba\x02\xd5\x51\x9d\xae\x55\xff\x9e\xe4\x5b\x33\xd3\x75\xbe\x75\xbc\xce\xb7\x66\xe4\xff\xa3\x2b\x52\xfe\x35\x2d\xff\x3a\x51\xe7\x5b\x27\x40\x6c\xca\x4b\xc7\xd1\x3c\x07\xcf\xc1\x9f\xc7\xeb\x7c\x3d\xcb\x66\xf0\xbf\xd3\x6e\x70\xc8\xcf\x1c\x65\xff\x6f\x15\x61\x45\x16\x89\x15\xd0\xff\x16\xd3\xf5\xcc\x7f\xd5\xd1\x60\xce\xbd\x44\x90\x48\x02\xea\x19\x47\xc5\x54\xaf\x17\x63\x04\x11\x46\x27\xc9\xe5\x61\x87\xde\x82\x49\xfb\xa2\x77\x84\x62\x2e\x16\xe7\x2f\x7a\x0c\x15\xcc\x73\xe7\xe4\x8f\x83\x6b\x59\x56\xca\x3f\xae\x46\xfe\x45\x5a\xd6\x17\xbd\x43\x59\xb1\xd8\x09\xdb\xe2\xa2\xf7\x58\x93\x00\x85\xa1\x1a\xfa\x99\x47\x50\x64\xbb\xbe\xf0\x6d\xf2\xc2\x52\x9e\x5d\xe8\xeb\x4b\x8f\xd4\xc1\xe3\xd8\x88\x8b\xde\x55\x36\x2c\x8c\xa3\x8c\xff\xf0\x11\x76\x3b\x1b\x2a\xc3\xbf\x2e\x98\xb8\x43\x5d\x53\x72\x87\xf8\xdf\x4d\xa0\x7a\xaa\x83\x5a\x49\x90\xce\xb3\x81\xea\xf9\xc7\x83\x27\xdc\x81\x57\xf6\x5e\xca\x97\x3c\x66\xfa\xce\xff\x75\x2f\xf8\x65\xef\x8c\xfa\x59\xfd\x36\x88\x77\x0a\x15\x53\xfc\xf7\x34\x24\x4a\x5d\x50\xe1\x44\x94\x39\xe3\x12\x4c\x6b\x3d\xb6\xc9\x4f\x8b\x30\x4f\x31\x62\x0b\xc2\xc9\x3a\x61\x7a\xec\xfa\x49\x55\x6a\x23\x8e\x34\x30\x69\x27\x4c\x6f\x84\xa0\xbb\x24\x4e\x7b\x17\xe4\xcf\x46\x37\x6c\x8b\x42\xfe\x75\xfd\x94\x79\xa1\x79\x7d\x73\xa3\xec\x24\x2e\xc0\xdf\xe0\x10\x29\xc8\x8e\x3b\x75\xd8\x3f\x5e\xaf\x6c\xaf\x5d\xd4\xff\xf5\x98\x35\xb9\xfc\x3f\xf7\x82\xdf\xf3\x56\xf4\xef\x4b\xee\x2e\xd3\xe1\x6e\xf7\xac\x3a\xfd\xa6\xa3\xb1\xca\x8c\x2f\x8b\x88\xdf\x1e\x96\x84\x52\xac\xc2\x50\x43\x30\x35\x35\x73\x11\x6d\x84\x25\xc4\x0f\x46\x59\xab\xd7\x51\x38\x4b\x53\x22\x6d\xf4\x8a\xa9\x5c\x44\xcf\xd9\x08\xcb\xe7\x14\xbd\xb5\xa2\x95\xc7\x20\x17\x9f\x63\x12\x33\xa6\x66\xa6\x64\x0f\x4e\xe5\x1b\x45\x07\x16\x9f\xdd\x01\x29\x73\x97\x91\x7f\x4f\xb0\x74\x07\xf2\x9a\x8e\x9a\x71\x38\xba\x13\x3d\x50\xaf\x1b\xf9\x04\x65\xd5\x9d\x68\xce\xdc\xd0\x9c\x6e\x4c\x37\xd7\xba\x59\xf3\xba\x46\xd8\x89\x6e\xb8\x6e\xd2\x99\x9d\xd7\x31\x5a\xbf\xfe\xb5\xc1\xbf\xbc\x2d\xcb\x4a\xbe\x53\x37\xdb\x6f\xbd\xc4\x63\x3b\xad\x69\x3f\x09\x9e\x33\x37\x70\x6b\x74\xd5\xcb\x8d\x1c\xed\x2f\xf4\x70\x2e\x3a\xf2\x64\x25\x15\x0f\x6c\x45\x94\xb5\x36\x45\x7e\x72\x6a\x6a\xa6\x79\x7d\x73\xda\xad\x7f\xca\x94\x98\xf1\x5b\xc1\x5d\x77\xae\x70\xf8\x7b\x44\x1f\x4d\x89\xb2\x35\x95\x15\x8d\x5c\x20\x96\x1c\x7e\x62\x5e\xac\xc5\x61\xca\x9f\x76\xf6\xdc\x14\x26\xcc\xdc\x08\x7b\x8a\xb8\xaf\x3f\xe9\x7e\xef\x36\xe6\x08\x1f\x29\x11\xe4\x44\x9f\xb5\xae\xed\x3a\xc9\xdf\x3e\xce\x6e\xd9\x53\xc8\x1d\x02\x75\xcd\x46\x51\x2e\x8a\xe2\xb6\xfe\x1c\x1c\x49\xe7\x16\xe7\x97\xfd\x2f\x8f\x05\xaf\xf0\x76\xbc\x0d\x7c\x7f\x85\x8d\x44\x37\xc4\x5b\xa6\xac\x00\xf8\xb2\x3e\x69\xf6\xad\x03\x44\xdd\x8d\x30\x33\xa5\xc1\x27\xcc\x1b\x90\x6e\xdb\xbc\xe8\x31\x73\xf7\xa2\x77\x75\x61\x57\xce\x91\xd1\xef\xa8\xb1\xe7\x7b\xcc\x7a\xda\xcf\x03\x21\x3b\x11\xca\x85\x3d\x1b\xf5\x19\x75\x04\x97\x4a\x9c\x46\xfa\x8e\x73\xbe\xb8\x84\x74\x6e\x6d\xd9\xd9\xca\x0e\xb7\x87\x06\x39\x03\xf9\x66\x8f\xb9\x75\xf4\xbf\xdf\x0b\x9e\xef\xd1\x0f\xad\xfe\xe2\x23\x75\x5e\xf4\xe2\x12\xac\x47\x60\x9f\xd7\xdd\xaa\x99\xe3\x08\x90\x0d\xf5\x35\xd9\x10\x12\x2e\xa4\x80\x86\x1a\x5c\xbb\xae\xff\x3a\x29\x67\x49\x5d\xb6\x26\x93\x6d\x82\x9f\x4e\x15\x7f\xa3\x56\x89\x30\x36\x9f\x6d\xa7\xdb\x61\x1e\xcd\x2e\x2d\x5a\xf9\x29\xef\xac\x05\xaf\xb3\xd3\xd8\x22\x7a\x0a\xb9\x8a\xe4\xbe\x2f\x6b\x4e\x39\x0c\x98\x7c\x54\x9d\xbb\xb2\x7b\x9a\x48\x64\x2a\x70\x49\xb9\x22\xcf\x63\xff\x56\x65\x8a\xe4\xc1\xe4\xa2\xfc\xc3\xf5\x4d\x58\x2d\xc4\xfc\x28\x48\xeb\xd8\x3b\x36\x74\x45\x4f\x61\x39\xa7\xe2\x44\xb0\x17\x1e\x62\xd7\xee\xe5\x61\xca\x0f\xfc\xcc\xc1\xe0\x07\xbc\x1d\x6e\xba\xf1\xaf\x8a\x5c\xc0\xa4\x43\x0d\x0d\x42\x53\xb7\x8d\x9b\xde\xbb\xb4\x3c\x3e\xa7\x4b\x7f\xfc\x00\xfb\xb1\x71\x76\xd4\x62\x87\xf3\x5f\x3f\xae\x8e\x7c\x2f\x1e\xaf\xe2\xcd\x93\x0a\x5f\x01\x47\x69\xc5\x15\x8e\x99\x45\x6b\x9a\x60\xdd\x4e\xf3\xdb\x27\xf3\xde\x1e\x8a\xfc\xe6\x27\xe2\xb3\x93\xfd\xa6\x6f\xb8\xee\xba\x26\x9f\x8f\x73\x00\x7d\x8f\x45\xa1\x68\x0f\x4b\xc5\x3e\xa7\x82\xc1\x91\x55\xcf\xe2\xb3\xa7\x6c\xbf\x6f\x32\x86\xbe\xe7\xa9\x25\x58\x06\x4f\xac\x58\x82\xd1\xf0\x34\x7d\x48\x17\xe1\xbb\x0e\xb3\x4a\x1e\x33\x48\xd6\x1b\x24\xbf\xa0\x95\xf8\x77\x57\x32\x75\x61\xb1\xff\xf9\xff\xcf\xde\xb7\x07\x59\x76\x94\xf7\xe5\xdc\x7d\xf7\xea\x79\x48\xc5\x41\x24\xa1\x73\x48\x79\x77\xed\x3b\x77\x77\xf5\x42\x1a\x3d\x60\x76\x67\xd7\x1a\x49\xbb\x3b\xda\x99\x95\xa2\x18\xc2\xf4\xbd\xa7\xef\xbd\x47\x73\xee\xe9\xcb\x79\xcc\xec\x95\xcb\xae\x60\x93\xc4\x2e\xca\x96\x08\xb8\x80\x8d\x1f\x82\x28\x42\x41\x2a\x08\x46\x26\x26\x89\x08\x08\x6c\x12\x83\x95\x82\xb2\x11\x26\x31\x45\x30\x2a\x87\x20\x57\xd9\xd8\xa1\x94\x4d\x5c\x95\xea\xef\xeb\xee\xd3\x7d\xee\xbd\x33\x77\x66\x56\x2b\x0a\xcf\x7f\x33\xe7\x9e\xd3\xa7\x4f\xf7\xd7\x5f\x7f\xfd\x3d\x7e\xbf\x5d\x3f\x14\x35\xaa\x1f\x70\x6a\x54\xdf\xe5\x4d\x40\x73\xb7\xa0\x6f\xff\x6b\x53\x9e\xba\x5d\x92\x2d\x47\xe1\xc5\x5d\xa4\x6e\x89\x83\x9d\x2d\x69\xc0\x7c\xa3\x5e\x94\x9f\x85\x7a\xa8\x8c\xfb\xcf\xec\x0a\xe6\x9d\x2b\x06\x36\x57\xe1\x19\xa1\x77\xd6\x42\x66\x86\xf4\x1c\x96\xe8\x40\x3a\x3f\xcf\x5b\x05\x24\xd0\x80\xbe\x4e\xc4\xea\x48\x88\xa2\xef\xec\x20\x9f\xf3\xc8\x9e\xb7\x17\xbc\x88\x92\x8e\xff\xef\xbc\x35\xc0\x3b\x46\xf5\xfa\x3e\x7c\xf0\xb8\x48\xc0\x9c\x85\x53\x64\x90\x2e\xa9\xf6\x96\x68\x57\x00\x80\x38\x56\x0e\x95\xb7\xd8\x82\x2c\x37\x2d\x75\xbf\x73\xa0\xed\xb1\x01\x16\x6c\x25\x53\x98\x48\xa3\x69\xba\x15\x4a\x53\x94\xd1\xa5\x40\xbe\x9f\x07\x4b\x0d\xf2\x98\x06\xf3\x01\x0e\xfa\xf2\x16\x75\x87\x02\xdd\x01\x20\x9e\x86\xb9\x68\x05\xd2\xd6\x1f\xc8\xa2\x2f\xb5\x64\x9a\x46\x2b\x72\x6b\x4e\xb9\x3c\xa4\xa8\x32\x67\x44\xf8\x90\x3a\x38\x46\x9b\x5d\x23\xa9\xe8\x67\xc1\x0e\x50\x1f\x89\x90\x34\x18\xee\x66\xad\xae\x14\x33\xdd\xb3\x2d\x76\x08\x71\x8c\x65\x83\x1a\xd2\xdc\x0d\x8e\x2e\x91\xd7\x38\xc1\xd1\x22\x91\x53\xec\xcf\xf9\x3f\x31\x05\x91\x98\x34\x02\xe2\x1a\x91\x4e\xc3\xba\x23\x14\x67\x22\x9b\xca\xc5\x94\xf5\x3b\x3f\x36\x98\x26\x94\x52\xfd\x41\xd3\x54\x49\x01\x21\xcf\xec\x23\xe7\x2e\x11\xe9\xc0\xac\x11\x42\x20\x2d\xf9\xfa\xde\xe0\x9e\xb5\x6e\x70\x77\xff\x71\x77\x1a\x86\xd2\x91\xfc\x25\x7f\xb5\xcd\x5f\xb2\x65\xfe\x92\xdf\xf0\xb4\x5d\xf6\xa4\x17\xdc\x0c\x7f\xe1\xac\x44\x49\x18\xad\x44\x61\xc1\xe2\x75\x67\xc7\xb6\xd1\x5a\x84\x91\xb7\x5d\x22\xc6\x80\x71\xef\xdd\xc6\xdd\xff\x01\x61\x47\x59\x59\xbf\xc8\x6f\xc1\xbf\x6f\xb2\x3a\xb5\xb5\x94\x45\x95\xc0\xf4\x0b\xc4\x71\x54\x54\xca\xa3\x56\x8e\x36\x16\xc5\x32\x4f\x30\x43\xcf\xbf\x40\x82\x0f\x7b\xd6\x05\xca\xf2\x5c\x6e\x50\x99\xce\x73\x53\x8f\x4a\x4b\x26\x97\xb7\xa1\x4d\x87\x00\x64\x45\x26\x8d\x2b\x84\x70\xb3\xdb\x30\x33\xad\x76\xbd\x16\xec\x0c\xda\xde\x29\xf9\x35\x64\x73\xd6\x3b\x44\x4a\xfb\x71\xd1\x89\x8c\x43\x1d\x83\xdd\x66\x2d\x5c\xf0\x80\x5b\xc0\xd1\x72\x7f\xbe\x87\xdc\x63\xcd\xd9\x9b\xb6\xc8\x94\x40\x7e\x46\xf1\x17\xac\xac\x41\x0d\xb9\xe6\x88\x02\x83\xc1\x2d\x90\x55\x85\xe6\x82\x9d\x52\x35\x94\xa8\xa6\xd0\x93\x54\xfe\x1f\x0f\xc9\xe3\x9e\x61\x68\xfc\xc0\x5a\x0c\x8d\x6b\xf7\x01\x19\x1a\x1f\x2c\x0b\x59\x31\xce\x2d\xc7\x55\x4d\x82\x76\xd0\x49\xe5\x60\xa5\xbe\x95\x70\x0b\xba\x83\xda\x65\x66\x89\x42\xd8\xd8\xde\x59\xb6\x59\x69\x5f\x35\x25\x7d\xd1\x6b\xaf\xaf\x59\x8f\xfb\x33\xe3\xea\xa1\xd7\x2c\x17\xb5\x56\x11\x21\x1f\xb9\x82\xdc\xba\x6e\x56\x34\x20\x35\x8d\x48\x7e\xf6\x5f\xda\x1f\xfc\x89\x37\xee\x57\x4c\xc5\x2d\x97\x9c\x48\xd1\x5d\x05\x3a\x55\xfe\x87\xe9\x65\x72\xf5\xe9\x52\x0b\x96\xe8\xe4\x51\x30\xd3\x3b\xd1\x0a\x4f\x30\x73\xbf\xcf\x5a\xbc\x41\xef\x62\x2b\x98\x4b\x6b\xae\xd1\xac\x25\xfa\x56\xd9\x01\x94\xac\x03\x24\x4d\xaf\x68\x75\x29\x67\x59\x84\x09\x66\x9d\x94\x25\xf9\xf0\x73\x58\xc0\xa4\x90\x09\x91\x1a\x34\x73\x7c\x8b\xf0\x11\xe8\x1c\x19\x56\xcd\xdf\xdb\x4b\xbe\xaf\xe9\x60\xfe\xd4\x23\x27\xd7\xd4\x65\x93\x65\x9b\x4b\xdd\xfa\x21\x6f\xb3\xca\xb5\x41\x91\xeb\xa9\xfc\x4e\x8d\x0d\x81\xa9\x26\x3a\x63\xc8\xfc\x3c\x10\x05\xed\xb1\x90\x3b\x8d\xaa\x0a\x0f\x2c\x13\x81\xb3\x5c\x5d\x81\xfc\x28\xaf\xaa\xd4\x8f\x4f\x97\x6a\xfc\x83\x1e\xb9\xeb\x12\x7c\x3a\xaa\xf4\x73\x97\x42\xa5\x47\x4e\x4a\x73\x22\xf2\x6d\x75\xbe\xad\xce\x5f\x3d\x9b\xfb\x52\xda\x6f\x17\xbd\xd5\xf5\xf7\x86\x45\xff\xec\x64\x85\xfb\xe3\x94\xf7\xc8\x32\xbc\xf7\x10\xc7\x2b\xa6\x9d\xa4\x67\xb9\x81\x7d\x3a\x6e\xc8\x8b\xc1\x09\xf0\xad\x7d\xc1\x89\xb1\xbf\x8e\xe2\x2e\x4d\xcb\x9b\x2d\x22\xe4\x31\xc7\xff\xc7\xb6\x09\xd8\xb6\x22\x94\xff\xd5\x26\x60\x7b\xce\xdb\xd2\x51\x30\xf8\xd9\x92\xf7\x1e\xd9\xb2\x2f\x17\xe7\xbd\xf5\x71\xdb\x3a\x7e\xab\x3a\xfe\x79\xe3\x0c\xfa\xbc\x17\xfc\xd4\xbd\xa6\x2c\x71\xf4\xa2\xdc\x28\x6e\xde\xaa\x48\x97\x63\xc1\xc2\xec\xb0\xd5\xca\x61\xab\xf5\xf2\xf2\xc6\xa3\x7e\x23\xd5\xcc\x45\x8f\xad\xaf\x2b\xef\xf4\x6f\xd7\xc5\xea\x63\x75\xd5\x5a\x44\xf8\x9f\xbe\xca\x41\xfe\x1d\x4f\x84\xef\xbf\xf7\xaa\xe0\x5d\x3b\x34\xa2\xf2\x28\xed\x57\xc4\xba\x46\x15\x99\xd3\xa3\x04\xeb\xc0\x54\xcc\x08\xe3\xb4\x02\xfd\xd0\x8e\x0e\xca\x34\x42\x20\xe6\xce\x2a\x7a\x6b\x48\x18\xd5\xef\x53\xa7\x5c\xed\xd6\xc7\x48\x8a\xb4\xb0\x75\x3a\x7c\x66\xb8\x4b\xe3\xc1\x14\xbc\x02\xf2\x4f\x8a\x34\xce\xea\x80\x72\x4d\x15\xcc\xb5\x26\x80\xaf\xbb\xbc\xf0\x75\x2a\xda\x52\x4b\x42\x4a\x18\x26\x5d\xaf\x44\x69\x2e\xcd\xce\xae\xc8\x40\xf9\xf2\xbc\xd5\xa0\xb3\x27\xe6\xcf\x9e\x38\x3e\xb3\x78\x62\x96\x4e\x61\xfc\x00\x0f\x02\x3a\x89\x56\xb4\xa9\x35\x46\xa1\x5c\x8f\x70\x24\x97\x9f\x36\x84\xd0\x70\x58\x8d\xb1\x7e\x44\xae\x51\x6d\xcc\x62\xbe\x55\x22\x72\x13\x69\x57\xe2\xaa\xcc\x69\x37\xa3\xe1\xfd\x64\x5b\x7f\x6c\xdb\x88\xaf\xde\x76\xfc\x15\x7b\x3b\xfe\xed\x2d\xf3\xa1\xfe\x5c\xb9\x21\xe3\x60\x1e\xc8\x2e\xdf\xa6\x6c\xc8\x52\xff\x48\x9f\x8e\x7f\x7f\x2d\xb2\xd4\xf1\x2a\x13\x8e\xc3\xef\xc7\xe3\xb0\x2a\x55\x09\x79\x06\xe4\xbf\x0e\x12\x9e\x59\xfd\x97\x99\x3a\xf5\x8f\xcb\x43\xf0\xd7\x3c\x72\xeb\x66\xbe\x10\x4f\xbd\x17\xbc\xf2\xd8\x9b\xdb\x30\xf7\x3f\x00\x5f\x79\xd1\x7b\xcb\xfa\xbb\xe8\xad\xfe\x1b\xd7\xc2\x38\x52\x5d\x1f\x79\xac\xf8\x9c\x47\x4e\x6f\x3a\x46\x74\xf7\xc2\x99\xd3\x0b\xa0\x34\xe7\x53\xd1\xcf\xce\xa4\xc7\x84\x88\xfd\xb7\x07\x62\xe4\x0f\x76\xfe\x5c\xe5\x06\x8c\xf9\xaa\x5c\x11\x0d\xd4\x6c\xa7\xb6\x40\xa9\xbb\x2e\xe6\xd2\xf7\xf5\x53\xd1\xe7\x69\x3e\x68\x90\x97\x9d\xc3\xd1\xfa\x80\x91\xfe\x17\x48\xf0\x29\x62\x5f\x91\x73\xaf\xb3\x3d\xba\x22\x0e\xad\x71\x04\x72\x13\x4d\xb0\x61\x78\xd1\x09\x59\x14\xaa\xf2\x2e\xca\xea\x88\xd6\x68\x00\xbe\x21\xa3\xb5\xcb\x32\xea\xbc\x82\x65\x50\x4a\x85\xcc\x33\x09\x1d\x88\x22\x35\x0d\xd7\xed\x5d\x2d\x4f\x8b\x56\x8e\x3b\x8d\xc6\xb5\x54\xb7\x9b\xcd\x08\xef\x69\x40\x19\x20\x24\x2a\x25\x9c\xab\x9a\xf5\x4e\x04\x65\x02\x70\xff\x0a\x4b\x23\x51\x64\x3a\xf0\x01\xea\xaf\x41\xc8\xe1\xc3\x74\x4e\xb7\xd4\x67\xad\x65\xd6\xe1\x18\xc6\xa6\xa7\x06\x33\xf3\x73\xea\xa5\xf8\x0e\xfa\x53\x64\x9f\x1e\xc3\xc5\x41\x9f\x4b\x0d\x47\x97\x1e\xca\x44\x32\x1d\xd4\xa3\x24\x8e\x12\x1e\x2c\x91\x7d\xa7\x06\xf3\xf8\x0e\x7d\xaf\x6a\x44\xdd\xd9\x53\x3f\x07\x4b\xe4\xa7\xf1\x4d\xf8\xff\x8c\xf5\x96\x19\x4c\xf0\xd1\x3b\x96\x7a\x92\xe1\x55\xf9\x20\x74\x5c\x53\xa0\xbc\x62\x1d\x77\xe6\xec\x92\x77\xff\x0c\xc6\x9e\x56\xa3\x14\xa1\x21\x30\x49\x11\x12\xb3\x62\x21\x96\x2b\x85\xaf\x20\x5d\xd3\xb2\xf5\x40\xae\xe5\x60\x3a\xb0\xbe\x33\xa8\x93\x7d\x41\x69\x33\x05\xd3\xc1\xca\x51\xb8\x66\x7a\x0b\x4f\x9a\x47\x55\x97\xe5\x2d\xfb\x4c\xbf\xa6\x83\xb6\x10\xf2\xd2\x4f\xd7\x65\x0f\x17\x04\x5d\x95\x36\x70\x97\xf5\xfb\x3c\xc9\xde\x44\x67\x79\x0b\xd2\xe9\xa1\x1e\xab\xc8\x78\x46\x1f\x82\x0a\xdf\x94\x0e\x58\x2f\xc6\x4a\xa2\x1e\x4b\xb3\x2e\x8b\xb5\x9b\x30\x62\x31\x58\x43\xc0\xdd\x0a\x99\xc7\x8e\xa8\xdb\x53\xd5\xa0\x8b\x98\xb1\x01\x2d\x83\xe9\xc8\x56\x71\x48\x14\x3f\x7e\x2e\x52\x1e\x22\x74\x83\xb4\x1c\x8a\x44\x4e\x3b\x96\x73\x70\x9a\xf0\xf3\x72\xb6\x39\x16\xa5\x0b\xda\x12\xfd\x01\x3d\x88\x70\xf9\xfd\xe5\xce\xe1\x92\xc1\xfe\x50\x89\xe3\x3e\xb4\x86\x16\x01\x93\x06\xeb\x11\x94\x50\x1d\xc8\xb4\xfa\x01\x2d\xc5\x61\x31\x5b\x7c\xf8\xed\x22\x51\x47\x02\x40\x9e\x00\x57\xa9\x55\xdc\x8f\xbd\x2c\xa7\x17\xbf\x42\xae\x62\x5b\xb8\xea\x34\x2f\x52\x48\x55\x8b\xf2\xb2\x7b\x80\xf4\xdc\x32\x09\x6d\x52\xd8\x50\x15\xc8\x46\xcc\xcd\x70\xab\x1e\xc3\x83\x8b\x67\x66\xcf\x48\x3d\x8f\x0d\x48\xcb\x1b\x6b\xe4\x6c\xa0\xda\x4c\xd9\xc3\x45\xa2\xe8\x9d\xb0\x61\x23\xf6\xe7\xd4\x75\x75\xbf\xae\x78\xd4\xc9\xbd\xba\x07\x3c\x6c\x1c\xb2\x6c\x77\xf2\xbf\x09\x79\xbd\xb5\xd9\x02\x12\x23\xe4\x0f\xa4\x22\xb9\x5b\x34\x01\x13\xe2\xcb\x24\x78\xd0\xfa\x5f\x53\xcc\xe8\x04\xad\x2e\xa7\x0f\x89\xa6\xca\xd5\xc1\x52\x3e\xbd\x1a\x60\x0d\xc8\x57\x43\xc9\xb4\xae\x96\x64\x2d\x79\xb2\x89\x07\xb2\xef\x8d\x0b\xde\x5e\x85\xc8\xc5\x2f\x78\xfb\x1f\x12\xcd\x45\xde\xeb\x4b\xdb\xdd\x39\x61\x3c\xbf\x8f\x7c\xc1\x23\x3f\x02\x15\x90\x51\xd2\x99\xe5\x2c\x94\x0a\x60\x01\xea\xdb\x33\xff\x29\x44\x8a\xbe\xf9\xc6\xe0\x97\x4d\x72\x25\x0d\xd5\x4d\x80\x38\x82\x37\xc2\x1e\xa4\x1b\x31\x3d\x8f\xda\x10\xf4\x88\x32\x29\xc4\xba\x33\x21\x05\x81\x82\x12\x82\x64\xa0\xb0\x44\x1a\x94\x9e\x92\xb7\x85\xf2\xb9\xac\xfc\xe4\x92\x78\x00\x38\xa8\x39\x50\x3a\xb4\x59\x14\x43\xf5\x2c\xcf\xdc\x74\xde\xf7\x79\xe4\xba\xac\x00\x8f\x61\xbb\x88\xef\x16\xcd\xec\xae\x48\xce\xcd\x00\xb2\xe0\xfc\x81\xce\x05\x0f\x17\xcb\xe2\x58\xd1\xa6\xe5\x23\x54\x1a\x4a\x59\x57\x77\x03\xb6\x8d\x9c\x45\x49\x83\xde\x0f\xc7\x84\x12\x00\x3c\x99\x4a\x78\x07\xc9\x03\xd4\xdb\xdd\x8d\xf9\x06\xb7\x67\x8f\x79\x64\x4f\x56\x64\x7d\x9e\x84\xfe\x7b\xbd\xe0\x9f\x7a\x98\xa3\x16\xb3\x0e\xcd\x79\x1c\x9b\xec\x36\x75\xbc\x87\x84\x72\xbc\x5d\x9e\x17\x32\x79\x54\x48\x72\x6b\x58\x20\xf6\x11\x0a\xae\x78\xab\xfa\xfd\x78\x00\x19\x02\x71\xca\x59\x38\xc0\x99\xe0\xa1\xf5\x40\x83\x0e\xa7\xc4\xba\x75\xf0\x5f\xaa\x01\x47\x1b\x1a\x79\xad\xc1\x3c\x04\xa1\xfc\x4f\xd6\x82\x27\x6b\x0b\x55\x22\xc4\x5c\x0a\x3f\x35\x77\xdb\x3d\xc3\x22\xe4\xbb\x45\x13\xc6\x2c\x32\xf4\xe4\x2c\xe5\xd3\x74\x8a\x06\x80\xd4\x12\xd0\x83\x2a\x6a\x73\x68\x5a\x73\x69\xa9\x85\x80\xa3\x5e\x24\x56\xeb\xf1\xe0\x36\xf9\xe4\x49\x91\x36\xa3\x30\x98\x96\xa2\xd3\x8c\xc2\xcc\x7e\x7f\x5a\xc8\x31\xc9\x96\x23\x64\xc0\x03\xfd\x27\x1b\x89\xda\xb4\x9f\xf2\x15\xd8\xeb\xe5\xff\x5d\x06\xc0\x68\x66\x9a\x07\x3c\x87\xb6\xcf\xf2\x7e\xcc\x5a\x3c\x98\xa6\x2d\x96\xb4\x78\x9c\x51\xf3\x6e\xc3\x9f\x25\x25\x5a\x2e\xb9\x14\xef\xcd\x70\xd9\x41\xcd\x70\xc2\x57\xa5\x38\xda\x47\xa7\x47\x3c\xf2\xb7\x50\x50\x87\xe4\xb0\xd0\x72\xb8\xe4\xca\xa1\x92\xeb\x4b\x26\x83\x47\x5d\x19\x7c\xa7\x47\x6c\x35\xe0\x17\xe4\xe8\xd8\xb3\x81\x51\x57\x77\x97\x0f\xc0\xb9\xe7\xcd\xa5\x2c\xe8\x55\x0e\xbe\xa1\xaa\x5a\x04\xbd\xa4\xa4\x02\xa2\xa3\x6a\x7a\x1b\xe4\xad\xc4\xe8\x25\xff\xbe\x60\x16\xd0\x34\xd4\xff\x52\xa1\xc8\xfb\x28\xba\x43\xea\x34\xe3\xbc\xcc\x99\x4d\x1a\xab\xd1\x72\xd4\xe7\x61\x84\xc4\x08\xf2\xbf\xc3\xf2\x76\x97\x24\x7f\xa7\x43\x65\x33\xee\x43\xfc\xdf\xda\x19\xdc\x57\xb9\x66\xe9\xdf\x5c\x53\xac\x83\x28\x3b\x90\x2a\x08\x52\xa1\x4b\x3b\x90\x9e\x20\x1f\xa5\x5a\xbf\xba\x83\xfc\x1f\xfb\xfc\xfc\xd2\x96\xcf\xcf\x1f\x59\xe3\xfc\xac\xcf\x63\x20\x34\x4e\xf7\xc0\x25\xa1\x7b\x78\xf9\x8e\xd9\x2f\xe8\x63\xf6\xf3\x1e\x39\x34\x91\xa0\x81\x80\x3d\xe6\x29\x09\x6b\x19\x67\x95\x7d\xc2\x6e\xf2\x2e\x5b\x89\x44\x6a\x7d\xee\x65\x3f\x60\xbf\x7c\x05\x09\x46\x79\x7b\x8f\xcd\x3a\x95\x1a\x5f\xbe\x22\xf8\x79\xb7\x52\xe3\x2c\x0b\x45\x46\x8f\xc5\xa2\xb5\x4c\x67\x39\x00\x75\x40\xde\x3a\x2e\xa1\x98\x65\xb9\xc6\x83\x6c\x73\xd8\x24\x41\x97\xf6\x45\xd8\xa0\x67\x8f\xcd\x6e\xb5\x8c\xa2\x27\x92\x48\x7e\xf9\x05\x6f\x17\xa0\xe3\x3a\xc2\xfa\xf8\x7e\xf2\xb3\x1e\xd9\x59\x64\x3c\xf5\x1f\x0e\x7a\x60\xfa\x41\x6f\x21\x31\x02\x29\x34\x2c\x02\x05\x16\xf6\xa4\x46\x9a\x20\xb5\x5d\x75\xfa\x70\xda\xb4\xd2\xda\xdf\xd0\x15\xab\x53\xb9\x98\x2a\x32\x3e\x15\xe5\x0e\x47\x45\x99\xea\xff\x64\x2d\xb8\x50\xab\xa6\xfa\xab\x69\x57\xb5\x06\x30\x6e\x03\x51\xd0\x55\x86\xf5\x9d\xaa\x0c\x60\x31\xea\x4f\xd3\x13\x49\x56\xa4\xbc\xc4\x0f\xaa\x56\x04\xd8\x78\x59\x3f\x08\x45\x01\x6b\x85\x29\x14\x44\xa9\x1e\xcd\x37\xa4\x4d\xa7\x34\x20\x25\x38\xa5\x7e\x14\xbc\xa5\x9c\x3a\xb8\xa4\xe6\xee\xd2\xcf\xd4\x53\x1e\xd9\xb3\xcc\x07\xf2\x6f\xff\xd7\xbd\xe0\xdd\xde\x3d\xf8\x8f\x81\xa6\x62\x79\x57\x8e\xc1\x32\x1f\xd0\x54\x63\xce\x9c\x3d\x36\x7b\x2e\xab\xf0\x95\x40\x31\x72\x8b\xf7\xbb\x87\x55\x73\xaf\x44\x6f\x7f\x49\x6a\x62\xb5\x00\xfc\x77\x78\x01\x9f\xa9\xc4\x39\x8e\xf3\x7e\x97\xea\x3b\x2e\x5d\x0f\xc6\x42\x3f\xfe\x13\x20\x63\x11\xb1\x7f\x3e\x58\x2e\xa7\x4c\x5e\x19\x5e\x6d\x69\x73\xb2\x32\x92\x0d\x8e\xc9\x33\x76\xfd\xd4\x87\xbd\xe0\xfd\xde\x16\xab\xa2\x86\x2d\xcc\x4b\xd6\x6b\x1b\xb0\xc9\x29\x99\xfa\xee\xa6\x69\xfd\x3e\xee\x99\x4a\x2a\xc0\x4b\xb2\x40\xb9\xdc\x64\x35\x5d\x4a\xe5\x08\xf0\x9c\xb4\x2c\xc5\x4a\x04\x2c\x2a\x2b\x3c\x4d\xa3\x90\x03\x3f\x16\x4a\xb0\x35\x7b\x49\x14\x5f\xaa\x71\x20\x8f\x5c\xeb\x94\xed\x8e\xe7\xd7\x02\x22\x25\xff\x9b\xd7\x04\xff\xdd\x73\xb9\x95\x5c\xb2\x1a\x79\xb6\xc8\xab\x9c\x3f\x2a\xdc\x17\xc9\xde\xe1\x9e\xa2\xc3\x71\x08\x33\xa1\x93\xe0\x34\x64\x55\x0e\xd2\xa2\x92\x8e\x0d\x82\xaf\xfb\x5a\x95\x95\x2f\x4d\xba\xd8\x54\x86\xc2\xee\x64\x90\x7b\xb4\x61\xfb\x90\x80\xa0\x61\x2e\xe4\x00\x87\x85\x92\xbe\x76\x11\xc7\x1a\xb0\xc5\x74\xb4\x71\xc1\xdb\xaf\x8a\xdd\x17\x81\x51\x67\x9f\xf9\xc9\xd9\xdd\xfe\xf4\x4a\xf2\x67\x35\x62\xdf\xea\x7f\xb3\x16\x7c\xa5\x66\x5d\xb0\xd8\xde\x73\x8b\xbd\x5b\x97\xd2\xb7\x58\x9a\x46\x56\x3d\xab\xf3\x75\xc8\x7e\xa5\x6e\xd5\x60\xb5\x72\x7b\x29\xb9\xaf\x9a\xbc\x2c\x96\x94\x6d\x57\x61\x37\xa2\x5e\xaf\xc0\xda\x7a\xd6\xce\x79\x8a\x16\x1c\xe0\xeb\x48\xe5\x80\x28\x44\x60\x48\xeb\xd2\x7e\xe0\xc3\x61\x29\xb7\x0e\x2a\x66\x37\x9b\xa6\x3f\x46\xe7\xe6\x57\x6e\x84\xe0\xb1\x99\xed\x04\xae\x51\x55\xdc\xdf\xc0\x7b\x6e\x1e\x71\xcf\xcd\xf6\x3d\x27\xef\x9b\x3d\xed\xde\x43\x4f\x16\x71\x3c\xa0\xf7\x15\x2c\x46\x28\xde\x59\xd1\x63\x51\x02\x0c\x5b\x8e\x29\xbe\x1d\xba\xdc\x62\xe8\xf2\xdb\x1e\x29\x05\xda\xff\xaa\x17\x3c\x54\x2e\x51\xa7\x54\x49\x01\xbf\x58\xbf\xda\x12\xd8\xa0\x27\x58\xab\xab\xa5\x11\x42\x95\x90\x54\x0a\x2c\xf1\xe7\xa3\x5e\xd1\x93\x6d\x1c\x3d\x72\xe4\x88\xb5\xb2\xec\xad\xeb\x8d\xe4\x26\x72\xc3\x58\x25\x3b\x5e\x09\x4d\x08\xa5\xb9\x1d\xa1\xdd\x7c\x84\xb6\x6b\x1d\x30\xdf\xb2\xd5\xf3\xe5\xeb\xd6\x08\xcf\x92\x7f\xb3\x83\xec\x02\xc6\x37\xff\xd7\x77\x04\x1f\xad\x21\xf9\x9b\xab\x34\x0d\xae\x39\x66\x44\x28\x82\x38\x7e\xbe\x2f\x14\x8b\x23\xe4\x88\x98\x21\x1f\x25\xa4\x70\xb0\x19\xcb\x16\xf8\x80\x3c\x7b\x2b\x96\xc1\xcc\xce\x02\x36\x89\xb7\xda\xda\x4f\xb9\xc2\x22\x30\x49\x28\xf0\x98\x6a\x82\xe1\x7b\xa2\x32\x45\x45\x3b\x72\x22\x64\x31\xd4\x9a\xc1\x69\x3b\x60\x9a\xe3\x30\x98\x78\x49\xa9\xd7\xda\xab\xe9\x36\x72\xeb\x1a\x71\xe7\xb5\x29\x33\x27\x5b\x51\x17\xbd\xee\xfa\xc1\xd1\x13\xfe\xf1\xb5\x58\x34\xc6\x12\xd9\x38\x3b\x1f\x21\xbf\xb9\xdb\xf1\xb7\x58\xc9\x30\x2b\x26\x9e\xbc\x78\xef\x82\xff\x8b\xbb\x83\x53\xe5\xbf\x15\x57\x0b\xb0\xa3\xc3\xb8\xc7\x6c\xc0\x53\x0a\x10\xa2\x51\x3e\x18\xc1\xc8\x6f\x02\xce\xce\x06\xff\xb1\x5d\xe4\x33\x35\xb2\x0b\x80\xaa\xfc\x4f\xd6\x82\xbf\xf0\xee\x02\xcc\x2a\x80\x3e\x36\x72\x89\x38\x56\x6a\xaa\x0c\x1d\x61\x05\x92\xb4\x61\xd3\xb0\x6b\x11\xc5\xfc\x41\x29\x96\x06\x83\x07\x44\xf2\xb0\x8a\x8e\xaa\xa6\xf2\x38\x43\x9b\xb2\x12\xa5\x85\xc0\x56\x1c\xb6\xe4\xd2\x82\x23\xa6\x36\xa1\x75\xe8\xd6\x21\xec\xb7\xfc\xc0\x0a\x2b\x13\x75\x5e\x94\xe9\x8f\xaf\xcb\xf3\x64\xcc\xdb\xb9\x73\xa8\x1c\x7f\xda\x98\x50\x11\x7f\xa7\x46\x08\xda\xba\x72\x2f\xf7\xbf\x5e\x0b\xbe\x54\x5b\x30\xff\x8f\x62\x76\x50\x96\xb1\x8e\x10\x97\xb9\x57\x72\x50\x55\x4a\x96\x0b\x54\x7a\x52\x5b\x3c\xd0\x7d\x43\x8c\x00\xce\xeb\x58\xac\xc2\x83\xa9\x40\xaf\xa1\x41\xc9\x5c\x38\x3d\x67\x10\x83\x28\x8b\x45\x52\x32\x87\xea\x9f\xb0\x3a\x03\x48\x87\x13\x1c\x43\x40\x15\xc9\x34\x82\x2c\xa7\x81\x14\x89\x80\x76\x39\x0b\xe5\xc8\x42\x3f\x2c\x7a\x59\x35\xb4\x67\x8b\x58\x05\x20\xcb\x96\xd5\x24\xc3\x64\xa9\x2f\x84\xb8\x75\xa2\xcc\x03\x3d\x1a\xf2\x05\xba\x7d\xfb\x21\xf5\x3d\x8e\x71\xf4\xd1\xbd\x8e\x13\x09\x38\x60\x56\x8e\x36\xce\x8a\x98\x1f\x8b\x00\x6f\x0a\xb2\x95\xff\xf9\xde\xe0\x70\xe5\xda\xa8\x2c\x3d\xeb\x96\x6c\x64\x5e\xf2\xf3\xdb\x65\xc9\x5b\x36\xc7\x3a\x3a\x11\xf5\x1f\x07\xaf\x1f\x81\x16\x63\xcf\x81\xbd\x14\x8f\x90\x06\xa9\x8f\xd5\xf7\x23\x26\x7e\xdb\x20\xda\x82\x41\xd4\xb6\x0c\xa2\x7f\xb4\xb5\xfc\xf1\xb5\xcc\xa1\x8b\x5e\xb2\xfe\x16\x7b\x8f\x3f\xb7\x01\x92\xa7\xca\x32\xaf\xd6\x17\xbf\x78\x05\xb9\x7e\x32\xc0\x3c\xf0\x52\xcf\xf2\x9c\x45\x71\xe6\x7f\xf4\x8a\xe0\x3b\x35\xe7\x92\x83\x7d\xca\xc2\x30\x52\x2a\x58\xa5\x0d\x45\xda\x8a\x3a\x35\xf3\x20\x64\x1a\xf0\xbc\x52\xea\x84\x27\xf3\x15\x80\xf5\x2d\x1f\x1f\xae\x09\x63\x34\x55\xc0\x23\xb8\xa7\x2a\x12\x08\xc3\x71\xcf\xa8\xca\x2f\x53\x62\xaa\x35\x10\x24\x5a\xb0\x1c\x85\x82\x97\x21\xe0\x0c\x20\x85\x14\x1e\x1e\x6c\xc6\x51\x27\x01\x54\x4f\x40\x98\xc0\x5e\x87\x02\x42\xa2\xe5\x36\xad\x2d\x3c\x7d\xa2\x07\x1b\xd4\xb4\xae\x52\x08\x50\x63\xb1\x2c\x33\x5e\x65\xa0\x19\xd2\x77\x69\xe7\x86\xb6\x38\x13\xa0\x26\x00\x6e\xe1\x02\x10\x8e\xd4\x4b\x5c\xa3\xe4\x13\xfb\xc8\x2f\xea\x95\xfc\xce\x5a\xf0\x7d\xe0\x5a\x96\xff\x59\x0d\x1b\xde\x77\xb5\x6c\xab\x06\x4f\xae\xf8\x75\x8b\x4c\x0d\xd6\x59\x15\x2f\x3f\x93\x20\x07\x89\x72\x56\x8b\x04\x97\x74\x18\x41\xbe\xb3\x59\xce\x25\x45\x92\x79\x85\xd4\x27\xaf\xd6\xea\x7c\x9f\xe6\xbd\x07\x9c\x41\x43\x25\xbe\xe5\xf1\x00\xa0\xca\x44\x19\xfe\x91\xc5\xc3\x0c\xcd\x2b\x24\x45\x4c\x36\xd7\x96\x67\xe8\x62\x5a\xbe\xaf\x46\xae\x4d\x79\x9e\x0e\x66\xda\x39\x4f\x75\xfe\xc3\x3b\x6a\x3a\x56\xfb\x5d\x6f\xae\x6d\xd3\x50\x81\xb9\x17\xf5\x9c\x24\x08\x85\xa4\x05\xd9\x25\x7a\x5a\xb4\x6c\x35\xe5\x47\xe5\x29\x38\xff\x17\x0c\x79\x8c\xd6\xc3\x78\xc4\xb0\x75\x2e\xc8\x77\x0e\xd9\x73\x09\x65\x31\xec\x90\xb9\xe1\x31\x99\x52\x96\xa3\xc8\x4c\x43\xb9\x8b\x7a\x63\xda\xec\x8a\x55\x60\x25\x97\xcb\x76\x95\x45\x06\xef\x2b\x67\xcb\x86\xad\xa6\xd2\xbc\x1b\x37\xfe\xd7\x1e\xd9\x51\x44\xa1\xff\xab\x5e\xf0\x88\x77\x6e\x6e\xb6\x3a\x47\x8d\xb1\x83\x6f\x66\x71\xdc\x04\x54\xa5\x70\x74\x14\xa4\xc8\x78\x3a\xd5\x29\xa2\x90\x1f\xd6\x90\xb6\x3c\xcd\xde\x50\x44\xae\x6c\x7d\xd3\x23\xbb\x31\x61\xca\xff\x03\x2f\xf8\x39\x90\xae\xe3\x98\x40\x05\x3b\x71\x59\x4b\xdb\x43\x3a\x6a\xd4\x86\xa3\x24\xcc\x11\xad\x36\x8b\xe2\x22\x45\x4c\x48\x60\xa5\xb2\x7f\xc5\x09\xd4\xea\x10\xdb\xe4\xa1\x4a\xdc\x72\x2c\xf2\x63\xe4\xcd\xe4\xce\x4d\x6c\x47\xf8\x36\xf8\x10\xd2\xd4\xa4\x23\x0f\x06\xf7\x2e\x1a\xd6\x8e\xad\x6b\x13\x7b\x18\x7f\xfe\x6a\xa7\x74\x84\xf5\xfb\x88\x7b\x62\x0e\x25\x67\xf9\x4a\x04\x16\xe4\x37\xae\x0a\x3e\xbe\x6b\xf8\x7a\xe9\x38\x46\xe6\x24\xe3\xd9\xcc\x12\xd6\xcf\xba\x02\xf6\x1d\xcc\x21\xc6\xc4\x6f\xad\xd2\x11\x7e\x08\x76\x8c\x48\xa3\x8c\xea\xb4\x39\x4d\x86\x13\x72\xfb\x4a\x99\xc7\x95\x39\xdc\x72\x14\x31\x52\xad\x8c\x36\x38\xd8\x9d\x49\xe4\x98\xd0\x11\x5d\xee\x02\x22\x3a\x4f\xac\x5c\xa0\x78\xa0\xc3\xe8\xe0\x06\xb0\xc0\x93\x8c\xbd\xb3\xa8\x58\x0a\xd0\xa6\x52\xf1\x12\x16\xc5\x9a\xb9\x46\x9b\xc8\xc0\x64\x66\x23\x31\x29\xb4\x0f\x88\x58\x16\xb9\x5e\xf9\xb3\x2c\x57\x99\xb2\x8d\x11\x7d\x04\x49\xab\xcb\xe5\xcc\x01\x8c\xb5\xa9\x18\xd5\x65\x47\x0c\x5a\x69\x9d\x86\x05\xe0\xa1\x29\x68\x4a\xb9\x73\x03\x18\x24\xb6\xcf\x7b\x22\x59\xe0\x2a\x48\x2c\xdf\xdb\x2e\x62\xf9\xbf\x55\xfb\x04\xa3\x8e\x5f\x88\xd9\x2e\x22\x8e\x9b\xac\xb5\x5c\x77\x2c\xca\x08\x01\xe4\x1b\xf4\x2e\xdd\x9d\x28\xd7\x9b\xa4\x66\x2e\xcc\x85\x52\xee\x98\x33\x63\x9f\x1b\x5a\x5d\x96\x74\xf0\x78\xdd\x2e\x14\x5c\x30\x54\xc6\x28\x3e\x42\x0d\x7a\xab\x74\xa7\x1c\x77\x84\xe1\x95\x87\x41\x40\x72\xcc\x59\x33\x8a\x81\xf2\x05\x59\xb5\x90\x60\x21\x8a\x07\xd0\x7f\x33\xf1\x6a\x08\xdc\xb2\xcd\xbd\xa9\x1a\x52\x67\xb7\xfe\xf8\x76\xe5\xe6\x76\xa9\x48\x25\x87\xa5\x49\x8c\xac\xf8\xf7\xeb\x4c\xc8\x99\x52\xd1\x59\x6e\x47\x29\x18\xea\xb2\xf6\x8d\xc0\xc2\x36\x32\x80\x8e\x06\xb9\xc6\xdd\xed\x75\xfb\x58\xbe\xc5\x63\xf9\x80\xec\x04\xa1\x7b\xfb\x78\xec\xa0\xf5\x2b\x1f\x82\x1b\x40\xfb\x2a\x17\x97\x95\xac\x3d\x3c\xb4\x66\x6e\x1b\x1b\x2e\x42\x71\x78\xfa\x87\x55\x7c\xf5\xc8\xf7\x8d\xdd\x84\x8e\xc2\x87\xd5\xd8\xac\xf3\x22\xcd\xfd\x4f\xed\x0e\xee\x74\xae\xb8\x21\x5e\xdb\x17\x8f\x3e\x32\x65\x9c\x95\x08\xaf\x17\xbc\x2b\x5b\x76\x03\x8e\x5a\xfc\xfa\x2e\x32\x43\x76\x77\x45\x96\xcf\xcd\xfb\x6f\x0c\x7e\xec\x81\xae\x22\x04\x50\x08\xe4\xcd\x48\x41\xef\x9a\x34\x76\x78\x53\x2e\x1c\xa3\xe2\x45\x8f\xec\x95\x0f\x41\x8f\xbf\xe6\x69\xa3\xfa\x39\xef\xb4\xc9\x7e\x54\x8f\xa9\x70\x81\x06\x5b\x97\x0f\x81\xaf\xcf\x31\xbd\xa3\xcc\x44\x55\x19\x6e\xb6\xf8\x38\xe6\x52\xd6\xe9\x11\x7a\x3b\x3d\x4f\x6f\xa7\x37\xdf\x74\xd3\x0d\x37\xc3\xe3\x77\x89\x2c\x3f\xad\x86\xa2\x64\x39\x70\x5b\xc3\x33\xa3\x33\x96\x52\xa2\xb3\xbc\x1c\xab\x4c\x9f\x2f\xb1\xac\xa4\x1b\x55\x32\x90\x7f\x47\x9f\x6f\x9e\xf5\x82\x8f\x0d\x9d\x18\xec\x6e\x27\x74\x6e\xe6\xf4\xcc\xdb\x16\xee\x3f\xfe\xb6\xd3\x33\xa7\x4e\xc0\xea\x52\xb1\x0e\x1b\x54\x59\x84\x2a\xd4\x20\x5b\x0d\xad\x59\xec\x8b\x70\x7c\x9c\x04\xdc\xb5\xda\xb7\x8c\x03\xab\x01\x20\xe1\x1c\x62\x65\x28\x0d\x4c\xa9\xad\x33\x61\x0f\x92\xbd\xfd\x54\xe4\xa2\x25\x62\xff\x54\xf0\xe6\x79\xf5\x37\xe2\xa7\xe3\xc0\xa8\x2f\x39\x37\x3b\x5f\xa7\x8b\xc7\xe7\xe1\x34\xbc\x70\x7c\x71\xde\xf5\x7d\x07\x8b\xc7\xe7\x03\xd7\xc0\xf4\x88\x2b\x6f\xfe\xaa\x96\x87\xe6\x7a\xe2\xd0\x17\xe1\x81\x4c\x4a\x1e\xd3\x11\xea\xc5\x0d\x0a\x83\x33\x5d\x4f\x12\x72\xd4\x5a\x60\x9a\x08\x5a\x47\x5e\x8e\x2f\xcc\x2d\xe0\xa5\xe3\xac\xcf\x5a\x51\x3e\x00\x9f\xec\xf7\xf6\x05\x27\x46\xff\x34\xca\x35\x3b\x7c\xe7\xda\x00\x92\x1f\xda\xbb\xbd\x25\x6c\x75\x4b\xf8\x03\x83\x19\xf0\x45\x2f\x38\x62\x7c\xb5\x76\x8c\x72\x8d\x69\xb1\x4f\x6d\x6f\x22\x6b\xe0\x11\x4e\x20\x30\x17\xbd\x03\xe4\xba\xe1\xc8\x4b\x8f\xf5\xa7\x96\xf9\x20\xf3\xf7\xf9\x7b\xa6\x60\xd1\x92\x8b\x1e\x1d\x1b\xa3\xd9\xed\xef\xec\xb1\xfe\x76\xa4\x7c\x2b\x06\xea\xef\xda\x06\xea\xb3\x5b\x84\x16\xf9\x99\xd1\xc8\x22\x97\xcb\x30\xbd\xe8\x89\xf5\x0d\x8f\x7b\xfd\xbb\xd7\xa3\xb8\x1f\xad\xc5\x46\x16\xc4\x3e\x7d\x35\xf9\xf1\x0d\xf8\x2b\xfc\xff\x77\x55\x70\xbc\x2c\x1f\x66\x34\xe5\x79\x91\xaa\x1a\x56\xd8\x45\x5a\x0c\xeb\x5e\xc0\x67\x9b\x1c\xc8\xf5\x1d\x8a\x01\xc0\xac\x45\x4b\x35\xbe\x70\x25\x39\x47\xf6\xf4\x78\x96\xb1\x0e\xf7\xef\x0e\xee\x98\xa1\xdd\xa2\xc7\x92\xa9\x94\xb3\x10\x5c\x0c\xe8\x5b\xea\x57\x4d\xb5\xa2\xe4\x32\x31\x8e\xb9\xc6\x0f\x37\xee\xcc\xb7\x3d\xc3\xbf\xfc\x87\x5e\xf0\x45\x6f\xc6\xf0\x5d\x8d\x1b\xac\xd5\xee\xa0\x32\x44\x90\x98\x86\x1b\x6f\x70\x12\xbd\x60\x81\x1a\x4f\x15\x7f\x8d\x32\x35\xa3\x3a\x1d\xa3\xf4\x02\x26\xc2\x0d\x09\xac\xb0\x28\x96\xaf\x6d\xd0\x19\x1d\x09\x68\xc5\x2c\xc5\xec\x11\x96\xd0\xbb\x16\x17\xe7\xf5\x64\x41\x69\x65\xb3\xb0\x4a\x9c\x74\x8a\x25\x8d\x5c\x7a\x98\xcf\x94\xc5\xee\xbf\xe9\x05\x1f\xd6\x15\xeb\x9a\x5c\xd5\x4c\x36\x3d\x93\x70\x2a\xda\xd3\x34\x58\x40\x0f\x0f\xa2\x79\xeb\xaf\xba\xdc\x35\x04\xdb\x69\x72\x97\x70\xb7\xbf\x9f\xec\x94\x02\xe3\x9f\xd6\x66\xe4\xad\x0b\x45\xa7\x83\x51\x0f\x90\x2a\xa5\x59\xb0\x60\x57\xf3\xab\xe2\x44\x48\xf3\x30\x6a\x83\x88\x0d\xb1\x83\x3c\x5d\x23\x7b\x94\x97\xd8\x7f\xac\x46\xde\xbc\x69\xe7\xad\x0a\xbc\x05\xff\xcd\x83\xf3\x66\xa8\x6b\x80\x47\xb9\x67\x4d\x3d\x24\x58\xfd\x8a\x69\x1d\x22\x3b\x20\x07\xe0\xfd\x12\xab\x58\x79\x8f\x2d\x61\xe3\xd5\x84\x52\x93\x59\xa1\xa9\x51\xe0\x8d\x38\x12\x48\x94\x2e\xbf\xb9\x53\xb0\x94\x25\x39\x37\x34\x10\x00\x79\x99\x0b\x08\x7f\x29\x19\x53\xc4\xc2\xa8\xa9\x4b\x44\x9f\xb2\xb3\x98\xd0\xba\x6d\x9c\x6c\x05\x5b\xf5\xf4\xfa\xfb\xf9\x8f\xfb\x87\xa6\x2c\x8c\x27\xbd\x87\xa3\x88\x55\x5d\x07\xbf\x7a\x05\x99\x5a\x87\x76\xe1\x64\x2c\x56\x11\x6b\x02\x0a\xe2\xbe\xbb\x3f\xf8\x09\xf7\xd2\x88\x7a\xe4\xf2\x86\x03\x25\x55\x20\xea\x9c\x58\x88\xe5\x0c\x0a\x93\x1b\x17\xbc\xeb\x34\x25\xf7\xbd\x72\x64\x1c\x66\x07\x67\x47\xff\x3e\x21\x7f\x59\x23\xbb\x00\x64\xca\xff\x5f\xb5\xe0\x25\x6f\x09\xfe\x5e\xb2\xde\x8d\xc1\x23\x33\x8b\xe0\x65\xd7\xe1\x5d\x28\xa2\x15\xab\x4a\x54\xd5\x1a\x28\x3b\x69\x68\xc9\x58\x89\xc2\xd9\x86\x15\xa1\x79\x20\x34\x41\x3a\x15\x09\xa7\x3d\xae\x4f\x9f\x88\x7a\x65\x93\x9a\xa9\xe7\x1b\xaa\xa8\x39\x82\x3d\x0b\xb7\x3c\xc8\x02\xac\xab\x9d\x4f\x07\xab\x13\x61\xe3\x71\xe7\xad\xae\x16\xf8\xb2\x77\xce\x41\xe3\x2e\x72\x92\xcc\x6e\x88\x36\x03\x2b\x74\xcf\xca\x9e\x3e\x10\xe5\x5d\x05\x98\x98\x4d\x98\xeb\xf5\x44\x8d\xbc\x26\x8c\x00\x02\xab\x88\xb2\x2e\x4f\x4f\xf1\xbc\x2b\x42\xff\x97\x6a\x1b\xec\x86\xfc\x9e\xd9\xe1\x86\x82\xdf\xf6\x96\x46\xb4\xbf\x54\xe5\x20\xb1\x19\x83\x61\x2a\x9d\x87\x30\x7d\xca\x89\x9d\x58\x53\xaf\x67\x7d\x29\x89\xe2\x25\x27\x17\x55\x95\x7c\xb9\x6d\x45\x99\xe6\x21\x0f\x95\x56\x2c\x94\x38\xb1\x78\x95\x0d\x32\x9d\xa4\xaf\x66\x15\x56\x66\x83\x3c\xb2\x83\xf8\x7a\x02\xe7\x53\xde\xe2\x21\x4f\x5a\xdc\x7f\xd9\x44\x86\xbf\x55\x5b\x1a\xfe\x7d\xc9\x06\x4e\x69\x75\x85\xc8\x38\x65\x3d\x31\x24\x03\xce\x47\xe9\xc2\x0a\x23\x6b\x40\xb5\xdd\x15\x19\x4f\x6c\xa1\x96\xb2\xa7\x9a\x92\xcd\x9a\xad\x23\x29\x7a\x3c\x45\xa2\x63\xc3\x19\xad\xc0\x17\x38\x86\x92\xb1\x24\x2d\x16\x1d\x75\x97\x22\x99\x3e\x44\x4f\x0d\x7d\x80\xde\x81\x86\x7f\x51\x9a\x5c\xfb\x5c\x52\x96\x74\x30\x03\xf2\x27\x8f\xd6\x8f\x1e\x39\x72\xe4\xc8\x5b\x6d\xfa\xbc\x08\x2d\xb1\x7e\xf9\xb8\xda\x7a\x2c\xcf\x58\x94\xdb\x39\x1e\x50\xba\x0c\x0c\x68\x06\xdb\xd7\xdd\x97\x3f\x5b\x23\x6b\xe8\x17\xff\x43\x35\x32\xbf\xb1\x85\x34\xb6\xad\xb2\x58\xe8\xf7\xbc\xa5\xf1\xaf\x5c\xd2\xa6\x55\xc9\xea\xce\xe8\xf8\x56\x2b\xc4\xb5\x26\x95\xb1\x7c\xba\x65\x76\x29\xb9\x33\xc6\x2b\x3a\xe1\xc0\x12\x02\x3d\x60\x98\x02\x13\x2a\xe3\x2c\x5d\x46\x88\x04\x95\xa1\x22\xdf\xa4\xc2\x65\x60\xb3\x1b\x4a\x17\xf2\xde\xdd\x0e\x5b\x81\x93\xb6\xeb\x60\x41\x2d\xde\xbb\xe0\xff\xc9\xae\x57\x32\x77\xf7\x17\x76\x91\x0b\x26\x77\xf7\xd1\xed\xdc\xdd\x6a\xa5\xe0\x76\x56\xee\xe5\xcb\xca\xfd\xf4\x7e\xf2\xf7\x46\x44\x5c\xe6\x45\x38\xd3\x06\x7d\x32\xf0\xff\xe5\xfe\xe0\x4d\xf3\x22\xa4\x4c\x5d\x40\x23\x17\x33\x31\x44\x1b\x0f\x4d\xe0\x9d\x37\x37\x28\x78\x03\x39\x76\x60\x57\xb8\xe2\xff\x45\x42\x5e\xda\x45\x0e\x1b\x42\xe6\xd9\x42\xf6\x64\xc1\x3c\x33\x87\x0b\x1c\x2f\x9f\xd0\x38\x1b\xfe\x6f\xed\x0a\x5e\xda\x69\x83\x27\xa8\xfc\x03\x6c\x07\x00\x44\x34\xaa\x42\x5f\x20\xb5\x79\x22\x42\xbd\x39\x66\x2c\x8f\x32\xc5\xdb\x66\xfa\xc9\xcf\x4b\x83\x5b\x51\xe2\x6b\xc9\x34\x15\x6f\x30\x35\x08\x38\xa4\x62\xfd\x7a\x5b\xd3\x0c\xc6\x2c\xa7\x2b\x91\x80\xc3\x23\x98\x53\x1a\xde\x52\x97\x17\x96\xcd\x2b\x94\x22\xf3\x58\x94\xd1\x9e\x9c\x25\x33\x08\x5a\xaa\x65\x33\x46\x54\x3a\x90\x8e\x21\x57\x20\x96\x57\xac\xf2\xa8\xd3\xcd\xe5\x82\x6a\xf0\x06\x4c\x29\xe4\xf6\x95\xcd\xf6\x38\xcf\x01\x74\xdd\x2c\x0c\x6b\x26\x50\x15\x62\x9a\xca\x41\x73\x00\x51\x5b\x6f\x5d\xff\x3e\x34\x1d\x23\x87\xab\x0e\x28\xa2\x87\xea\xc6\x9a\x61\xd0\xc7\xe6\x80\x46\xb9\xae\x25\xd7\x0c\xcf\x30\x14\x3a\x3f\x46\x9f\xdf\x51\xee\xa5\xe8\xb2\x10\xb8\x86\x03\xfc\xb8\x40\xab\x20\xd9\x9c\xda\x4a\xe1\xfb\xba\x2c\xc3\x79\x55\xb5\x93\x96\xa5\x0a\x40\x49\x59\x5f\x20\x69\x71\xbf\x14\xdd\x45\x9e\xf6\x6e\x33\x4d\x1c\xcc\x0e\x95\x43\xab\x0c\x01\x78\x0d\x53\x29\x6c\xee\x8c\x38\x1a\xea\x76\x32\x4d\x6e\x59\xb7\xea\xf6\x01\xf8\x04\x1e\xce\xbb\x5d\x20\x7f\xb8\x93\x34\xc6\x0d\xef\x18\x69\x7f\x62\x67\xf0\xad\x1d\x4a\xb7\x98\x09\x70\xa6\x70\x8c\xc0\x1a\x46\xd2\x1e\xcf\xa5\xb1\x6f\x49\x40\x1e\xf5\x94\x4e\x91\xab\x15\x16\x8f\xda\x74\x4b\xa8\x22\xa1\xd1\xa7\x90\x64\x7d\x53\x3d\x68\x01\xe4\x2b\x9a\x5e\xba\x17\xa2\xc7\x91\xac\x91\x86\xf0\xa5\xd0\x85\x12\xe8\x49\x51\x7e\x63\x2a\x0f\xc6\xfa\xa0\x56\x56\x1d\x5a\x0f\x61\xb7\x15\x98\x81\x5c\x8a\x72\xa9\xb1\x01\xf4\x3f\x4f\x01\x0a\x88\xcb\x53\x27\xa2\x41\xf1\x95\x48\xf1\xa7\xca\x86\xe0\xcc\x2c\x8d\x02\xfc\xa4\x07\xca\xd4\x3d\x39\x54\xa6\x7c\x57\x8b\x68\xdd\x44\x4d\x40\x5a\x51\x87\xb8\x22\x26\xdf\x86\xb5\x5b\xce\x3c\x43\x7b\xa0\x0e\x33\xa8\xf8\x55\xeb\x54\x2e\x47\xa9\xaf\xcb\xa8\x1d\x2a\xa3\xea\x2e\x78\x23\xb9\x7e\x02\x32\xcc\xaa\x6c\x7d\x6c\x0f\xb9\x6e\x44\x76\xbd\x3a\x1c\xf9\x8f\xee\x09\x7e\xc7\x53\xff\xe8\xa8\x2e\x9e\x0e\xb5\xe9\xa5\xe6\x5b\xf9\x20\x44\x8a\x00\x18\x98\x77\x08\xb9\xd1\x8c\xa6\x22\xe6\x10\xf7\x06\x75\xd0\xef\xc7\x70\xe6\x10\x0d\x5a\xb2\x5d\xf3\x08\xbc\xd8\x80\x98\xc8\x68\x08\x8c\xb9\x90\x2a\x66\x7c\x1b\xea\x7d\x75\x84\x78\x2c\xdd\xe2\x89\x48\xa6\x74\x4a\x5b\x56\xc8\x43\x81\xc2\xe0\x90\xca\x01\x77\x19\x60\xca\x68\x5c\xf0\xc0\xdb\x72\xc1\x83\x80\xb3\xb3\xa5\x7c\x64\x17\xb9\x55\xc5\xa1\x8f\x06\xff\xe0\xb4\x65\x19\xa8\xd7\x23\x43\x87\xe9\x84\x43\xf1\x47\x3e\xe7\x91\x7d\x86\x8c\x03\xbd\xaa\xa7\x0d\x37\x87\xa8\x98\xaa\xa1\xf1\x32\xea\xc5\xa1\x5e\xb1\xac\xfc\x3f\x00\x53\xa4\x1f\xaf\x9b\x4f\x0a\xce\x65\x3c\x45\x1f\x2c\x50\xf8\x06\x9a\xc3\xd7\xf6\x28\x4b\x71\x36\x5e\x65\x3a\xa3\x92\xe7\xa5\x8d\xa9\xcd\x6d\xb0\x60\xe4\x78\xa7\xa9\x48\xab\x5f\xb1\x97\xf5\x23\x68\xdc\xff\xa4\x17\x3c\xe9\xcd\xcc\xcf\xc1\x7f\x16\x9f\xa4\x9c\x11\xb3\x73\x57\xbe\x4b\xa5\xb1\x55\xa2\xd9\x01\x4c\xd2\x02\x86\xcc\x67\x5a\x80\x4a\xa6\x6f\xad\x20\x20\x04\x63\x53\xfe\xb1\x91\x73\x7a\x56\xb1\x57\xa6\x91\x4a\x4e\x2b\xfa\xd4\x5e\xf0\x82\xff\xe2\x81\x4f\x4d\xb4\xc7\xce\x22\x22\x44\x65\xae\x97\x2e\xca\xac\xcf\x94\x4b\x12\x87\xbe\xee\x0e\x7c\xe0\x7e\x52\x60\x74\x9d\x35\xea\xc6\x19\x6f\xf9\x7d\x73\x9d\xd2\xae\x7c\xc4\x1b\x9f\xa8\xa7\xf6\x91\x43\x13\xe5\x9b\x42\xec\xfd\xaf\xf6\x06\xe7\x46\xff\xa4\xc3\x4c\x6a\x23\xb7\x08\x6f\x2d\x4a\xc7\xe1\xcc\xcf\x35\x63\xf1\x4f\xef\xd9\xf6\xce\x6f\xd5\x3b\xbf\xaa\x43\xf1\x49\x70\x70\x74\x24\x7e\x38\xd5\x75\xf2\xe2\xf3\xf1\xe2\xb2\xed\x8f\xde\x42\xb0\xfc\x13\x76\x3c\xf4\x89\x2d\xc6\x43\x3b\x97\x2d\x2a\xbe\xb4\xbe\x17\xfd\x0e\xff\xb6\x49\xd3\xf1\x46\x55\x61\xfd\xb3\xfd\xe4\x86\x11\x66\xc8\xcc\x03\x0b\x27\x62\x96\xe5\x51\x0b\xd0\xbd\x16\x72\x91\x72\x07\x0c\xec\x79\x12\x7c\xb0\xe6\xa0\x68\x94\xfc\xee\x74\x36\xca\x96\x1d\xe2\xf6\x99\x07\x16\x1a\x84\xcc\xc0\x1f\xf4\xc4\xb1\x05\x1a\xca\x3b\x36\xc8\xd9\x5e\x3e\x33\x8a\xb1\x5d\xb6\xfc\xb0\x3c\x5e\x55\x39\xdb\xab\x6f\x95\xe6\x0c\x78\xcb\x9b\xea\x7d\xe8\x61\x2a\xd9\xd4\xa9\x00\x77\xa1\x7e\xe8\x95\x20\x7b\x7f\x74\x2f\xf9\x7c\x09\x0c\xf6\xa9\x5a\xf0\xd4\x36\x30\xd8\x68\x60\x30\xb6\x9a\x71\x14\xc4\xa6\x14\x44\x40\xa6\xb5\x17\xf6\x93\x35\xb2\xaf\xcf\xd2\x1c\x16\xb2\x7f\xc1\x38\xb1\xdf\x59\x5b\x04\xcc\x2e\xf5\x8b\x16\x95\x75\x86\x73\xae\x4d\x45\x2f\xca\x73\xed\x9f\x0c\x4b\xd4\x23\x7d\x93\x1c\x40\xd5\x0a\xe6\x26\x96\x43\x76\x52\xa4\xfa\x27\xb9\xda\x0f\x67\x21\x3b\x5a\x87\xd7\xe0\x78\xe0\xc8\x97\x7d\x92\x66\xe3\xd1\xa0\x41\x17\xa2\x5e\x14\xb3\x34\x1e\xd4\xed\x3e\x96\xf7\x49\xe3\x4a\x37\x08\xf4\xdd\x47\x02\x7a\x50\x00\xf2\x37\x48\x73\xcc\xd9\x0a\x57\x5e\x69\x44\x4a\x47\x2b\xf3\x90\xeb\x66\xfe\xb6\x0d\x8f\xf5\x82\x17\xfc\x67\x85\x4c\x38\xa0\x41\x9e\x16\x1c\x8e\xe6\x88\x8e\x05\x25\x84\x1c\x85\xc9\xa0\x64\x99\xb6\x2b\x30\x59\x60\x1a\x42\x03\x6b\x8e\x5f\x00\x00\x5a\xa3\x13\x13\xb6\x2e\x10\x16\x9c\xd6\x33\x36\xcb\xff\x87\xbd\xe0\x03\xde\x39\x4c\x25\x2d\x2b\xbd\xfa\xa5\xaa\x0a\x47\xa8\x2a\x7a\x70\xa6\xc7\x1e\x16\x89\xb5\xfc\x87\x4b\xbb\x2e\xb5\x20\xff\x59\x8d\xfc\xfd\x51\xb9\xd1\xb0\xd7\xce\xcd\xa3\xc3\xdd\xff\xdd\x5a\x70\x9f\x7b\xc9\xce\x8e\xce\xab\x94\xf2\x70\xc4\xc5\xfb\xe9\xdc\xbc\xf2\x74\x66\xe8\xe4\x31\xa7\x7e\xd7\x7d\xf7\x3f\x3d\xf2\xa2\x47\xae\xca\xa3\x1e\x17\x45\xae\xeb\x06\xbf\x62\x52\x9c\xff\xa3\xe7\xfe\x54\x81\x4c\xd1\xa5\x83\xe6\xbd\x73\xf3\xa8\x84\xf4\x5b\xe5\x08\x2c\x0f\xc0\x59\x61\x79\xb7\xcd\xd9\xf9\xce\x23\xf4\x47\x7f\x94\xde\x7e\xc7\x2d\x37\xdf\x78\xe4\xc8\x41\x29\xf9\x47\x69\xc8\x06\x87\xa4\x5e\xd1\x26\xbd\xf6\x56\xdc\x71\x07\x0d\xf4\x4b\x82\x12\xa2\xcc\x1c\xb7\x8e\x1e\xb9\x45\xb5\x71\x03\xed\x8a\x22\xcd\x2a\x0b\xe2\x7b\xf5\x09\x2b\x90\x67\xa1\x2e\x09\xf1\xa2\x33\xff\x3f\xd4\x83\x69\xe7\x8a\x2e\x14\x32\xf8\x69\x50\x3e\x08\xc5\x4c\x58\xe6\x65\x9d\x94\xdd\xc1\x7e\x97\xbf\x6d\x8f\x6f\xd5\x1e\x7f\xac\x46\x76\x87\xe9\xe0\x6c\x91\xf8\xbf\x5c\x0b\x5e\xf6\x10\x53\x07\xc7\xa3\x5e\x45\xd2\xe9\x89\xd0\x44\xf8\x9d\x72\xac\x66\xa9\x15\x90\xd2\x49\x47\x9c\xa0\x24\xda\xfa\x54\x7c\x95\x72\x81\x44\x2b\x2a\x40\x9e\xf2\x0c\xf4\x5c\x62\xce\x84\xa6\x56\x1c\x06\x2e\x11\xb4\x5d\xa4\xe0\x43\xe9\xa7\xa2\x25\xd7\x42\xd2\x29\x0f\xe9\x2a\x3c\x3a\x0a\x60\x7a\x26\x8e\x01\x52\x5a\xbe\x18\x20\x9f\xb3\x9c\x75\xac\x22\x72\xd5\x1c\x0f\xd7\x88\xb8\x7c\xa3\x46\xfc\x4e\xca\x5a\x7c\x9e\xa7\x91\x08\xf5\xa2\xfe\x42\x4d\x97\x00\x7d\x1c\x76\xcb\xd0\x8a\xe7\x8d\x2a\x01\x56\xa4\x0b\xa6\xfe\xd7\x94\xeb\x4d\x02\xe4\x5c\x2e\xf5\x87\x79\x2a\xac\x79\xc1\x56\x68\xd4\xeb\xf1\x30\x62\x39\x8f\x07\x23\x72\xf1\x92\x28\x76\x77\x14\xf8\x1c\x39\x65\x91\x08\x4d\xe4\xaa\xf4\x58\x82\xca\xd1\x43\x54\x64\xb2\x93\xb6\x2b\x83\xc9\x27\xf5\x07\xa9\x97\xb4\xdd\x28\x6e\x03\xfb\xd9\xe3\x2c\x19\xd9\x47\x47\x93\x6c\x1f\xcd\x36\x7f\x34\xfb\x7c\x8d\x5c\x23\xd2\x7e\x97\x25\xb3\x50\x12\x29\xbf\x5a\xaa\xc1\xc7\x6a\xb3\x86\x96\x6c\x9a\xf6\x91\x68\x0c\x89\x58\x38\x9d\x4f\x45\x9f\x75\x40\x5a\x31\x6d\xa4\x6e\x7b\xa8\xf5\xc4\x5b\xbc\x66\x51\x42\x8f\x36\xde\xd8\xa0\x0b\x28\xbd\x28\x4a\xea\x75\xa6\xe4\xb6\xc9\x29\xf6\x04\xc6\xa6\x0d\x6c\x34\x87\xc1\x6e\x41\xe1\x0b\xf0\xd7\x80\xb6\xa3\x04\xca\xa9\x52\xf3\x2a\x16\x86\x60\x09\x1f\x4e\x79\x4f\xac\x94\x50\xd5\xbc\x2c\xf7\x33\x0f\x61\x08\xb7\x41\x4f\x44\x8a\x53\xd7\x74\x5c\xa4\xc3\x5f\x66\xca\x50\x79\x5e\xd2\x63\x34\x45\xde\x75\xe1\xee\x9f\xf3\xc8\x95\xf2\x6b\x45\x82\x08\x16\x99\xff\x6f\xbd\x4d\xa5\xf8\xcd\xdb\x8d\x04\xb1\x2e\x8b\x51\xd1\x5f\x80\xac\x06\x95\xc0\xd4\x0e\x87\x69\xb5\x1a\x4f\x52\x14\x68\x46\xcb\x4e\xf6\x45\x06\x85\xcf\x75\xca\xe8\x8d\x47\x6e\x95\x27\x52\x88\x96\xea\xac\x58\x3d\x76\x3a\x89\xaf\x41\xfe\xef\x0e\x72\x6d\xbf\x3a\x00\xfe\x8b\x3b\x82\x17\x76\x3c\xa0\x18\x88\xa5\x36\xed\x8a\x55\xda\x61\x69\x93\x75\xb8\x5d\x9c\x62\x54\x22\x32\x5d\xcb\x49\x1c\x39\xc4\x67\x2a\xd2\x36\x7e\x84\xf1\xdc\xa9\x54\x8e\xa2\xb1\x06\x90\xb5\x16\xec\xf3\xea\x2c\x05\x47\x58\x88\xa0\x1b\xb1\x90\xd6\xb3\x3a\x72\x98\x6c\x69\x6b\xfe\x75\x0e\xa3\x5e\xfc\x53\x3a\xf5\xac\xf2\xb2\x06\x9d\x69\x49\x73\x12\xd2\x9b\xed\x7d\xe1\x00\x7e\xc3\x01\x3a\xa5\x24\xd6\x95\xe8\xec\x36\x7a\xe0\x18\x6b\x2d\x77\x52\x51\x24\xa1\xbc\x0b\xc3\xde\x10\x75\x74\x07\x4e\x40\x6c\x55\x29\x38\xb7\x11\xfd\x05\x4d\xd3\xd2\x6d\xf4\xc0\x49\x91\x72\xab\x59\xda\x62\x59\x8b\xa9\xc8\x5c\x49\xf3\x8d\xed\x61\xac\x72\xb8\xc1\xb6\x69\xa3\xe1\x24\x0d\xfe\x8f\xd7\xae\xef\xef\x78\xf6\xb5\xfe\xbf\x7f\xed\xa8\xbc\x41\xc7\x24\xab\xb8\x39\x4a\x0f\x49\xa8\x48\xc7\xab\x95\x03\x6b\x3f\x3d\x1e\x56\x6e\xc2\x06\x87\x3b\x80\xfc\x4d\xe9\x48\xe4\x9c\x8d\x7d\xca\x26\x5b\xc2\x0f\x99\x9a\xe0\x95\xe5\xc7\x3a\xc4\x60\xea\x55\x53\x6b\xfe\xba\x81\x8e\x94\xcd\x6c\x7e\x6c\x26\x7c\x19\x5d\xe7\x65\xe3\x38\xff\x6d\xff\xda\xb8\x09\x1e\x7f\xcf\xda\x5f\x3d\xf9\x63\xd7\x13\x87\x77\xba\x04\x76\xde\x9c\x1c\x6d\xb6\x09\xfd\x05\xa3\x87\x6a\x2d\x64\xa8\x71\x43\x37\xf9\x33\x6b\x0e\x65\x91\x8b\xac\xc5\x62\x69\x5c\x6c\x74\x24\x36\xf0\xe0\xf5\xc3\xdf\x7f\x3d\xce\x0e\x9d\xbc\x41\xf3\x6e\xe0\x8d\xd8\x58\x77\x27\x7e\xa4\x32\x3e\x56\x7e\xd9\x46\x96\xe9\x26\x56\xd7\xa8\x37\x95\xdd\x10\x22\x0d\x55\x92\xd3\xa6\x04\x77\x93\x0d\x6c\x42\xef\x0d\x61\x87\x4e\xad\x8b\x2a\x3a\xf9\x74\x40\x62\xc3\x06\x27\x62\xb3\x8f\x4e\xfc\xed\x95\x89\xb4\xc8\x26\x9d\xf5\xce\xe2\x7e\xd7\xbe\xcf\xce\x46\x2d\x89\x25\x27\x5b\xfd\x9b\x78\x76\xad\x61\x05\xb2\x08\x65\x45\x4d\xde\x88\xfa\xa0\xb2\x15\xe5\x04\xd9\x54\x8f\x4c\x63\x1b\x10\xb5\x21\xf2\xe5\x4d\xcc\xd6\xa8\x36\xf4\x4f\x22\x1c\x2a\x9a\x9c\x48\xd4\x36\xf8\x60\x75\x1c\x37\xf8\xf8\x26\xd6\x28\x4e\xf5\x9a\xb6\x9a\xba\x65\x1d\x39\x5c\x1f\xd8\x70\x62\x5b\xb1\xba\x3c\x36\xda\xf4\x5a\x1f\xb3\xf1\xb6\x36\x30\x98\x65\x56\xd9\x90\x04\x0d\xff\xb4\x11\x71\x18\xff\x3d\x93\xb6\xbb\x5e\x09\xf0\x44\xd2\xbc\xf1\x67\xab\x02\xbd\xf1\x16\x54\xdd\xf1\x7b\xa8\x93\xd0\xa5\x54\xf8\xca\xd1\xc6\x09\xf9\x97\xff\x97\xaf\x0f\x3e\xbd\x03\xfe\xd4\xf9\x1f\x18\x74\x04\x97\x2e\xdc\x0c\xc9\x76\x48\xfb\x38\x94\x77\x9f\xd3\x0e\x4f\x78\x0a\x59\x72\x21\x47\x56\x76\xc8\xcd\x43\xec\x1e\x84\xa9\x32\x61\x53\x1d\xf3\x83\x2e\x68\xdc\x8d\x38\xea\x45\x08\xaf\x98\xa3\xef\x06\x51\x01\xe1\xc4\x9a\x46\x9d\x8e\x3e\xbe\xaa\xaa\x65\x3c\x3c\xf3\x15\x11\xaf\xe8\xcc\x56\xf0\xec\x63\xab\xb4\x25\x92\xac\xe8\x59\xee\x67\x4c\xb7\x89\x07\x1a\x00\x23\x8f\x7a\xca\x13\x6a\xbe\x4f\x41\x97\x63\xe9\x88\xaa\xac\x4d\x79\x1b\x4e\xf9\x90\xfd\x22\x1b\x55\xe1\x1b\x40\xab\x8c\x07\xe0\x51\xc3\xde\x41\x02\x9a\x0a\x84\xe4\x51\x52\x00\x6b\x20\xdc\x8d\xd9\x5e\x38\xe0\x3a\x53\x94\xe5\xf4\xac\x29\x50\x5c\xb1\x31\xba\x9a\x1c\x59\x01\x75\xfd\x81\xaa\xfc\x5d\xe1\x75\xda\xe4\x59\x3e\xc5\xdb\x6d\x91\xe6\x75\x88\xac\x62\x6a\x21\x8b\x11\xff\xed\x82\xb7\x0f\x5e\xb2\x18\x55\xd2\xd8\x9e\x7d\x1d\x79\xce\x23\xd7\xe2\x94\x46\x49\x67\x2e\xc9\x72\x96\xb4\xb8\xff\xb4\x17\xfc\x9a\x37\x74\x59\xe7\x95\x94\x71\x2b\x2b\x19\x3f\x52\x37\xd5\x29\xa4\x56\x2e\xa9\x70\xf7\xd4\xf9\xc1\xc3\xed\x25\xa7\x76\xb2\xac\xbd\xc0\xd4\x33\x48\xca\xe3\xab\xfa\x73\xe5\x5c\x2a\x38\x38\x94\x80\x1c\x13\x65\x8f\x5e\x7f\x8b\x94\x97\x94\xb5\x72\x9e\xba\x49\x5c\x5f\xf2\xc8\x6e\x04\x56\xf4\x3f\xeb\x05\x9f\xf0\x14\x86\x63\xa4\x91\x4e\x95\x37\x86\x65\x50\xa4\x93\x1c\x56\x3c\x80\x29\xef\xb0\x54\x27\x58\xa2\xf7\x43\x5f\xd0\xb1\x0a\xc4\x3e\xab\x16\x76\xbf\xc2\x9f\xb3\xed\xbc\xdd\x46\x49\x73\x50\xd2\xbe\xec\x91\x3d\x29\x8f\xe5\x28\xcb\x6f\x5a\x3f\x69\xb7\xca\xc4\xf4\xa8\xa7\x1e\x37\x69\xff\xba\x00\x05\xc3\x2a\x2c\xd5\xe0\x2c\x20\xbe\x50\x54\xd0\x12\xbd\x7e\xcc\xcf\xab\xd5\x93\x35\xe8\x09\xb9\xae\x21\x92\x58\x5d\x27\x96\x22\x36\xbc\x3a\x52\xeb\x19\x7f\xac\x68\x53\xdd\x01\xb5\xb2\xc8\xa7\x3c\xf2\x1a\xa3\x61\xca\xd4\x20\xff\x83\x5e\xf0\x1e\x6f\xc4\x0f\xa3\xca\x70\x6c\x3e\x57\xb9\xd0\x39\x86\xfa\x15\x81\xd0\x0a\xc4\xdb\x4a\x6d\x54\x06\xc6\x95\x6e\xda\x88\x5a\xaa\x02\x2c\x5d\x5d\xba\xf2\x8f\x8b\x22\xb1\x39\x47\x2b\xbf\xe8\x8e\x5b\xbe\x7f\x95\x38\x9f\x65\x98\x9a\xde\x64\xad\xe5\x55\x29\x62\x72\xc8\x59\x1e\x21\xf0\x22\xee\x08\x6a\x46\xd5\xee\x05\x75\xe0\x4e\xa0\xe7\x09\x8f\x5c\x53\xb6\xac\x72\xa1\xde\xed\xad\x81\x87\xae\x65\x04\x5a\xc4\x07\x02\x56\x6d\xe2\x92\x77\x9a\x3c\x5e\xc2\x65\x7c\xc0\x0b\xde\xe5\xa9\xca\x76\xd0\xd1\xaa\x4c\xa6\xa2\xa6\xb5\xf6\x75\x11\x48\x5e\x61\xdd\x7b\xc1\x23\xbb\x33\x9e\x46\x3c\xf3\x1f\xf1\x48\x63\xec\x28\x56\xcc\xa4\x05\x78\x24\x78\x2b\x3e\x0a\x2e\x79\xc0\x1f\x00\x98\x6e\xf9\x71\x38\x14\xea\x67\x90\x4f\xb4\x2d\xac\x64\x09\xd9\xf7\x28\xc6\xf2\xe7\x03\x25\xc8\x6f\x2e\x12\x7c\xba\xb1\x1d\x9a\xdf\x6a\x68\xfe\x7d\x1e\x29\x4d\x21\xff\x17\x3c\x72\xfb\x26\xb6\x87\x53\x51\x2b\x15\xb2\x81\x60\xce\xb4\xa5\xd7\x0b\x98\xa6\x0a\xac\x59\xeb\x20\x90\x68\xa4\xb1\x17\x4d\x38\xb3\x87\x5a\xb4\x53\x53\x2e\xfa\xac\x47\x76\x4a\xf3\xd8\x7f\xc6\x0b\x9e\xf0\xe4\x5f\xb8\xe7\x6f\x01\x7e\x87\x9e\x62\xe7\xa3\x1e\x8b\x69\xcc\x93\x4e\xde\xd5\xf7\xeb\xb6\x8f\x2e\x1f\xc3\xe0\x52\x1c\x35\x53\x06\x72\x59\x5a\x9a\xfd\x94\xf7\x99\x4a\xa5\xeb\xb2\x24\x2c\xa3\x3d\x45\x5f\x5e\xbb\xf9\xc6\xe5\x63\xce\xba\xf9\x64\x8d\xec\x33\xdb\x82\xff\x64\x6d\x13\x9b\xd4\x5f\x78\xe5\xbe\x62\x2a\x47\xac\x38\xbf\x35\xa2\x72\x68\x9a\x18\xd9\x4b\x70\x49\xb7\x18\xf0\xc2\xc1\xc2\x49\x14\x3f\x3d\x35\xdb\x88\x63\xa8\x1a\xf0\x64\xb5\x39\x9c\xe5\xfd\x38\x6a\xb1\x05\x9e\x1f\x1f\x75\x97\xf5\x7b\x56\x96\x50\x70\xdd\x0f\xbd\xe7\x34\x39\x40\x52\x4b\x7d\xc3\x5a\x72\x35\x2b\xf8\x78\x0b\x89\x97\x59\x4d\x99\x7d\xf0\x13\x1e\xd9\x09\xd0\x01\x1f\xf1\x82\x5f\xf1\x74\x7a\x64\x6e\x31\xe4\x59\xef\x3b\x78\x5a\x1a\xfd\x71\x9d\x3e\xc0\x80\xbf\xff\x50\x1d\xd4\x1d\x72\xd5\xb5\xf4\xe4\x61\xe8\x57\x87\xb6\x00\xfe\xf7\x12\x58\xb2\xce\x74\x7f\xd6\x23\x7f\xbb\xdc\x1b\x4e\x4a\xe1\x96\xcb\x20\xcb\x59\xaf\xef\x3f\xee\x8d\xa7\x5f\x5a\x63\x5d\xc1\x92\x8a\xc6\xb5\x7a\xe9\x77\xa4\xff\xe4\x91\x1f\x29\x5b\xbb\x97\xd9\x9f\xf0\xaf\xb6\xf0\x09\xdd\x31\x8d\x5e\xf2\x2f\xb8\xe8\xfd\xc3\xf5\xc3\x93\x37\xf9\x37\x68\x4f\x0b\x3c\x3a\xce\xc3\xe2\xba\x6e\xc9\x6f\xec\x26\xf5\x91\x38\x7c\x0d\x4c\xb5\x9c\xc9\x73\xd6\xea\xf6\x8c\x09\xe1\xbf\x63\x77\xf0\x47\xde\xe8\xdf\x5c\xbc\x54\x3b\xe3\xb5\x54\x38\x0c\x9e\x91\x9a\xf1\x6c\xd4\xe9\xe6\x34\x11\xab\x98\x14\x6d\xb2\xb8\xef\x57\x69\xcf\x0a\x63\x53\x3f\x40\x57\x22\x56\x42\xa3\xaa\xab\x69\xdd\x42\xbe\x5e\x45\x76\x09\x0c\x38\x43\xa6\x76\x94\xc4\x51\xc2\x4d\x22\x75\x94\x40\xa5\x27\x64\xce\xb6\x72\x70\x05\x18\xa8\x12\xf5\x36\x84\x2e\xb2\x8e\xce\xff\x62\x27\xf9\xcc\x0e\x72\x0d\xb6\xa4\x12\xd1\xfb\xbc\xe5\x3f\xb5\x63\x02\xb6\xd4\x32\x33\xbd\x7c\x32\xf8\xf3\x5a\xb5\x31\xab\x7e\x2e\x8e\x41\x74\x6c\xbc\xaf\x84\xb7\x78\x96\x31\x2c\x46\xc4\xef\xc6\x34\x22\xed\x8a\x50\x03\x6d\xd5\x2a\x31\x05\xef\xa9\xbe\xdf\xce\x9f\xaf\xa2\x1b\xf5\x45\xbf\x50\x06\x7b\xa2\x30\xbf\x65\x07\x8e\x2f\xcc\x9d\x8a\x3a\x2a\x2b\xab\xcd\x99\xd1\x2d\xa5\xc2\x4e\x59\x92\xc5\xa5\x74\x67\x9a\xc2\x7d\xec\xab\x31\xff\x69\xd4\x98\x54\x3b\xd5\xe4\x39\x9b\x8a\xa5\x09\x80\xa6\x5d\x86\x9d\xeb\x0a\x44\x68\x50\x50\xab\x3c\x55\xb9\x75\x3c\x41\x28\x92\xb1\x1d\x27\xa7\xc8\xdf\xec\x57\xde\x0b\x10\x00\x37\x05\x07\xed\xa2\xbe\xe1\x41\x35\x43\xee\x68\xc5\x5f\xd9\x43\x5e\x3f\xca\xd2\x4e\x56\xee\x67\xa9\x5a\x27\x2f\xef\x0e\x6e\xb7\x2f\x54\xf8\x62\xf1\x9a\x1e\x6e\x53\x66\xcf\x12\x8a\x0f\xb9\x52\xf8\xd4\x6e\x12\x91\xab\x31\xc3\xf6\x14\xeb\xdf\xc3\x07\x67\x79\xdb\xbf\x7f\x02\x11\x3c\x6e\x3d\xb3\xc0\x31\xef\x22\xf8\xbb\xf8\x97\xec\xc8\x32\x1f\x20\x83\x8c\xb9\xb1\x41\x7e\xad\x46\xf6\xc2\x5c\xc8\x97\xbc\xbb\x46\x6e\x9c\x70\x8b\x07\xec\x03\xf3\x92\xaf\x79\xe5\x5b\x0c\x53\x8d\xaa\x9d\x9d\xd6\x75\x01\xd6\x29\x5b\x1e\xf8\xea\xee\xbf\xaa\xd4\x71\xc9\x5c\x44\x9e\xdb\x9f\x3c\x70\xfb\x3d\x27\x1e\xbc\xf3\xc0\x5b\x97\xec\xdf\x60\x53\xc3\xfc\x4a\xfb\x86\x4c\xca\x57\x22\x42\x98\x70\xf5\x6f\xe6\x94\xe8\xa9\x1f\x10\xd9\x03\xf1\x99\xcd\xbf\x7d\x11\x56\xff\xcb\x1a\xe4\xf1\x1a\xb9\x46\xdb\x9d\x27\xf5\x40\x3d\x52\x23\x37\xaf\x3b\x50\x67\xed\xa7\xcc\x50\xfd\xbe\x35\x54\xc6\xb3\x63\x9d\x7d\xa1\x16\x65\x1a\x17\x81\xb1\x77\xd1\x61\x9a\x29\x56\x00\xe5\xb1\x39\x88\x17\x1b\xad\x7e\x51\x57\x37\x34\x7a\xbc\x27\xd2\x81\xf9\x97\xf7\xbb\xbc\xc7\x53\x16\x4f\x29\x65\x5f\x37\x8f\xe3\x63\xe6\x3f\x7c\xd0\x79\xc1\xf0\xd3\x87\xc6\x71\xe7\x36\xc8\x0a\xb9\x02\x01\x34\x94\xbc\xb6\xc9\xf5\xeb\x0e\xd0\x82\x7e\xc0\x0c\xce\x91\x11\xc2\xaa\x70\x39\x22\x1b\xcb\xd8\x08\x0c\xf9\xe3\xfd\xe4\xef\x8c\x68\xda\xd4\xde\xfa\xcf\xed\x0f\x4e\x95\x95\xb8\x2a\x7b\x1a\x96\x65\x4b\xf4\x71\x55\xc2\xcf\x0d\x7a\x2e\x83\x89\x30\xc5\xdc\xe6\x25\x99\x0d\x0d\xe7\x2e\xd6\x47\x09\xf9\x56\x09\xa6\xf8\xc2\x24\x7e\x1c\xd3\x19\xc4\x20\x0b\x3e\xa4\xe1\x17\x5d\x84\x18\x35\xca\xd6\x51\x80\x51\xf3\xe8\xe5\x46\x5e\xdc\x3e\xa5\x6e\xf5\x94\xba\xed\xff\xdd\xf6\xff\x3a\xfe\xdf\xaf\x7a\x64\xa7\x5c\x65\xfe\xef\xad\xe5\x92\x1a\x56\x1a\xd2\xac\xbc\xe0\x29\xa4\xc1\x72\xd5\x35\x79\x97\xad\x44\x22\xd5\x5b\xc9\xab\xa6\x2a\x2e\x7a\xf7\xad\x7f\x80\x69\xf8\xf5\x51\xd9\x95\xa6\xd3\x95\x03\x4d\xf3\x5a\x72\x35\xd9\x7f\x8c\xb3\x94\xa7\x8b\x62\x99\x27\xfe\xdf\x78\xe8\x0c\x39\xe5\x5e\xba\xd3\xbf\x9d\xec\x66\xfd\xe8\x1e\x3e\xf0\xaf\x74\x62\xd6\xd7\xed\x46\x20\xa3\xe0\x75\x78\x3f\x85\x07\x2a\x39\x69\xff\x3f\x00\x00\xff\xff\x78\xfb\xb7\x2b\xa3\xf0\x34\x00") + +func kubernetesapiV1_21_2SwaggerPbBytes() ([]byte, error) { + return bindataRead( + _kubernetesapiV1_21_2SwaggerPb, + "kubernetesapi/v1_21_2/swagger.pb", + ) +} + +func kubernetesapiV1_21_2SwaggerPb() (*asset, error) { + bytes, err := kubernetesapiV1_21_2SwaggerPbBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "kubernetesapi/v1_21_2/swagger.pb", size: 3469475, mode: os.FileMode(420), modTime: time.Unix(1658180420, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "kubernetesapi/v1_21_2/swagger.pb": kubernetesapiV1_21_2SwaggerPb, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "kubernetesapi": &bintree{nil, map[string]*bintree{ + "v1_21_2": &bintree{nil, map[string]*bintree{ + "swagger.pb": &bintree{kubernetesapiV1_21_2SwaggerPb, map[string]*bintree{}}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2/swagger.pb b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2/swagger.pb new file mode 100644 index 000000000..910436fc5 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1_21_2/swagger.pb @@ -0,0 +1,44195 @@ + +2.0 + +Kubernetesv1.21.2BÝè¢ð& +/api/v1/limitrangesØ&Ì +core_v1(list or watch objects of kind LimitRange*$listCoreV1LimitRangeForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J] + +401 + + Unauthorized +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.core.v1.LimitRangeListRhttpsj +x-kubernetes-actionlist +jL +x-kubernetes-group-version-kind)'kind: LimitRange +version: v1 +group: "" +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ³( +)/api/v1/watch/namespaces/{namespace}/pods…(— +core_v1owatch individual changes to a list of Pod. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NamespacedPodList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ñ( +@/apis/coordination.k8s.io/v1/watch/namespaces/{namespace}/leases¬(¾ +coordination_v1qwatch individual changes to a list of Lease. deprecated: use the 'watch' parameter with a list operation instead.*&watchCoordinationV1NamespacedLeaseList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷\ +&/apis/storage.k8s.io/v1/storageclassesÌ\‘& + +storage_v1*list or watch objects of kind StorageClass*listStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.StorageClassList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +"‰ + +storage_v1create a StorageClass*createStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF +D +Bbodybody *2 +0#/definitions/io.k8s.api.storage.v1.StorageClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jó +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass +H +201A +? +Created4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass +I +202B +@ +Accepted4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +*Ø, + +storage_v1!delete collection of StorageClass*%deleteStorageV1CollectionStorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ê +/api/À½ +coreget available API versions*getCoreAPIVersions2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJl +Q +200J +H +OKB +@ +>#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIVersions + +401 + + UnauthorizedRhttps©[ + /api/v1/nodes—[Ü% +core_v1"list or watch objects of kind Node*listCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JW +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.core.v1.NodeList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +"³ +core_v1 create a Node*createCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; +9 +7bodybody *' +%#/definitions/io.k8s.api.core.v1.NodeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÒ +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.core.v1.Node += +2016 +4 +Created) +' +%#/definitions/io.k8s.api.core.v1.Node +> +2027 +5 +Accepted) +' +%#/definitions/io.k8s.api.core.v1.Node + +401 + + UnauthorizedRhttpsjF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +j +x-kubernetes-actionpost +*®, +core_v1delete collection of Node*deleteCoreV1CollectionNode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ü( +;/api/v1/watch/namespaces/{namespace}/replicationcontrollers¼(Î +core_v1watch individual changes to a list of ReplicationController. deprecated: use the 'watch' parameter with a list operation instead.*.watchCoreV1NamespacedReplicationControllerList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: ReplicationController +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ô) +@/apis/batch/v1beta1/watch/namespaces/{namespace}/cronjobs/{name})ë + batch_v1beta1®watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 +20".pathname of the CronJob"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean —] +//apis/apps/v1/namespaces/{namespace}/daemonsetsã\ü% +apps_v1'list or watch objects of kind DaemonSet*listAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.apps.v1.DaemonSetList + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(version: v1 +group: apps +kind: DaemonSet +j +x-kubernetes-actionlist +"â +apps_v1create a DaemonSet*createAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.apps.v1.DaemonSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já +C +202< +: +Accepted. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet + +401 + + Unauthorized += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSetRhttpsj +x-kubernetes-actionpost +jM +x-kubernetes-group-version-kind*(version: v1 +group: apps +kind: DaemonSet +*É, +apps_v1delete collection of DaemonSet*)deleteAppsV1CollectionNamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ó +;/api/v1/namespaces/{namespace}/serviceaccounts/{name}/token³ "ˆ +core_v1 create token of a ServiceAccount*)createCoreV1NamespacedServiceAccountToken2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BM +K +Ibodybody *9 +7#/definitions/io.k8s.api.authentication.v1.TokenRequestJˆ +P +202I +G +Accepted; +9 +7#/definitions/io.k8s.api.authentication.v1.TokenRequest + +401 + + Unauthorized +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.authentication.v1.TokenRequest +O +201H +F +Created; +9 +7#/definitions/io.k8s.api.authentication.v1.TokenRequestRhttpsja +x-kubernetes-group-version-kind> +< +:#/definitions/io.k8s.api.core.v1.ReplicationControllerList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jW +x-kubernetes-group-version-kind42group: "" +kind: ReplicationController +version: v1 +"´ +core_v1create a ReplicationController*+createCoreV1NamespacedReplicationController2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL +J +Hbodybody *8 +6#/definitions/io.k8s.api.core.v1.ReplicationControllerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J… +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.core.v1.ReplicationController +N +201G +E +Created: +8 +6#/definitions/io.k8s.api.core.v1.ReplicationController +O +202H +F +Accepted: +8 +6#/definitions/io.k8s.api.core.v1.ReplicationController + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jW +x-kubernetes-group-version-kind42group: "" +kind: ReplicationController +version: v1 +*ë, +core_v1*delete collection of ReplicationController*5deleteCoreV1CollectionNamespacedReplicationController2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jW +x-kubernetes-group-version-kind42group: "" +kind: ReplicationController +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string õ +?/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status±û +apps_v1(read status of the specified StatefulSet*%readAppsV1NamespacedStatefulSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +Æ +apps_v1+replace status of the specified StatefulSet*(replaceAppsV1NamespacedStatefulSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB +@ +>bodybody *. +,#/definitions/io.k8s.api.apps.v1.StatefulSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +Bú +apps_v14partially update status of the specified StatefulSet*&patchAppsV1NamespacedStatefulSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +J8 +64"2pathname of the StatefulSet"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ß +/apis/autoscaling/ÈÅ + autoscalingget information of a group*getAutoscalingAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsâ +/api/v1/nodes/{name}/statusÂÓ +core_v1!read status of the specified Node*readCoreV1NodeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.core.v1.Node + +401 + + UnauthorizedRhttpsjF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +j +x-kubernetes-actionget + +core_v1$replace status of the specified Node*replaceCoreV1NodeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; +9 +7bodybody *' +%#/definitions/io.k8s.api.core.v1.NodeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.core.v1.Node += +2016 +4 +Created) +' +%#/definitions/io.k8s.api.core.v1.Node + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jF +x-kubernetes-group-version-kind#!kind: Node +version: v1 +group: "" +BÒ +core_v1-partially update status of the specified Node*patchCoreV1NodeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.core.v1.Node + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +J1 +/-"+pathname of the Node"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ä' +/apis/apps/v1/watch/deploymentsÀ'´ +apps_v1vwatch individual changes to a list of Deployment. deprecated: use the 'watch' parameter with a list operation instead.*)watchAppsV1DeploymentListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)version: v1 +group: apps +kind: Deployment +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‹^ +4/apis/rbac.authorization.k8s.io/v1beta1/clusterrolesÒ]½& +rbacAuthorization_v1beta1)list or watch objects of kind ClusterRole*'listRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jc +H +200A +? +OK9 +7 +5#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleList + +401 + + UnauthorizedRhttpsji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: ClusterRole +version: v1beta1 +j +x-kubernetes-actionlist +"¸ +rbacAuthorization_v1beta1create a ClusterRole*)createRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG +E +Cbodybody *3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jö +I +201B +@ +Created5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole +J +202C +A +Accepted5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole + +401 + + Unauthorized +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleRhttpsj +x-kubernetes-actionpost +ji +x-kubernetes-group-version-kindFDkind: ClusterRole +version: v1beta1 +group: rbac.authorization.k8s.io +*ƒ- +rbacAuthorization_v1beta1 delete collection of ClusterRole*3deleteRbacAuthorizationV1beta1CollectionClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +ji +x-kubernetes-group-version-kindFDkind: ClusterRole +version: v1beta1 +group: rbac.authorization.k8s.io +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ÷( +>/apis/apps/v1/watch/namespaces/{namespace}/controllerrevisions´(Æ +apps_v1~watch individual changes to a list of ControllerRevision. deprecated: use the 'watch' parameter with a list operation instead.*+watchAppsV1NamespacedControllerRevisionList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean …) +E/apis/coordination.k8s.io/v1beta1/watch/namespaces/{namespace}/leases»(Í +coordination_v1beta1qwatch individual changes to a list of Lease. deprecated: use the 'watch' parameter with a list operation instead.*+watchCoordinationV1beta1NamespacedLeaseList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: coordination.k8s.io +kind: Lease +version: v1beta1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean –( +'/apis/storage.k8s.io/v1/csinodes/{name}ê'ä + +storage_v1read the specified CSINode*readStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jU +x-kubernetes-group-version-kind20group: storage.k8s.io +kind: CSINode +version: v1 +­ + +storage_v1replace the specified CSINode*replaceStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.storage.v1.CSINodeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jU +x-kubernetes-group-version-kind20version: v1 +group: storage.k8s.io +kind: CSINode +*ã + +storage_v1delete a CSINode*deleteStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string JŸ +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode +D +202= +; +Accepted/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode + +401 + + UnauthorizedRhttpsjU +x-kubernetes-group-version-kind20version: v1 +group: storage.k8s.io +kind: CSINode +j +x-kubernetes-action delete +Bã + +storage_v1&partially update the specified CSINode*patchStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode + +401 + + UnauthorizedRhttpsjU +x-kubernetes-group-version-kind20group: storage.k8s.io +kind: CSINode +version: v1 +j +x-kubernetes-actionpatch +J4 +20".pathname of the CSINode"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¥) +1/api/v1/namespaces/{namespace}/limitranges/{name}ï(å +core_v1read the specified LimitRange*readCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.core.v1.LimitRange + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jL +x-kubernetes-group-version-kind)'version: v1 +group: "" +kind: LimitRange +® +core_v1 replace the specified LimitRange*!replaceCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.core.v1.LimitRangeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.core.v1.LimitRange +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.core.v1.LimitRange + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jL +x-kubernetes-group-version-kind)'version: v1 +group: "" +kind: LimitRange +*€ +core_v1delete a LimitRange* deleteCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjL +x-kubernetes-group-version-kind)'group: "" +kind: LimitRange +version: v1 +j +x-kubernetes-action delete +Bä +core_v1)partially update the specified LimitRange*patchCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.core.v1.LimitRange + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jL +x-kubernetes-group-version-kind)'group: "" +kind: LimitRange +version: v1 +J7 +53"1pathname of the LimitRange"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Â* +@/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}ý)‡ +extensions_v1beta1read the specified Ingress*&readExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.extensions.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +à +extensions_v1beta1replace the specified Ingress*)replaceExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.extensions.v1beta1.IngressBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.extensions.v1beta1.Ingress +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.extensions.v1beta1.Ingress + +401 + + UnauthorizedRhttpsjV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +j +x-kubernetes-actionput +*› +extensions_v1beta1delete an Ingress*(deleteExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj +x-kubernetes-action delete +jV +x-kubernetes-group-version-kind31kind: Ingress +version: v1beta1 +group: extensions +B† +extensions_v1beta1&partially update the specified Ingress*'patchExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.extensions.v1beta1.Ingress + +401 + + UnauthorizedRhttpsjV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +j +x-kubernetes-actionpatch +J4 +20".pathname of the Ingress"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ƒ) +A/apis/policy/v1/watch/namespaces/{namespace}/poddisruptionbudgets½(Ï + policy_v1watch individual changes to a list of PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead.*.watchPolicyV1NamespacedPodDisruptionBudgetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×' +/api/v1/watch/endpoints»'¯ +core_v1uwatch individual changes to a list of Endpoints. deprecated: use the 'watch' parameter with a list operation instead.*(watchCoreV1EndpointsListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Endpoints +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ß + +0/apis/authentication.k8s.io/v1beta1/tokenreviewsª +"œ +authentication_v1beta1create a TokenReview*&createAuthenticationV1beta1TokenReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ +O +Mbodybody *= +;#/definitions/io.k8s.api.authentication.v1beta1.TokenReviewJ” +N +200G +E +OK? += +;#/definitions/io.k8s.api.authentication.v1beta1.TokenReview +S +201L +J +Created? += +;#/definitions/io.k8s.api.authentication.v1beta1.TokenReview +T +202M +K +Accepted? += +;#/definitions/io.k8s.api.authentication.v1beta1.TokenReview + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: authentication.k8s.io +kind: TokenReview +version: v1beta1 +j +x-kubernetes-actionpost +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO +MKIquery-If 'true', then the output is pretty printed."pretty2string á] +9/apis/events.k8s.io/v1beta1/namespaces/{namespace}/events£]& +events_v1beta1#list or watch objects of kind Event* listEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.events.v1beta1.EventList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +"ÿ +events_v1beta1create an Event*"createEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.events.v1beta1.EventBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jê + +401 + + Unauthorized +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.events.v1beta1.Event +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.events.v1beta1.Event +F +202? += +Accepted1 +/ +-#/definitions/io.k8s.api.events.v1beta1.EventRhttpsj +x-kubernetes-actionpost +jW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +*Ù, +events_v1beta1delete collection of Event*,deleteEventsV1beta1CollectionNamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¬) +C/apis/flowcontrol.apiserver.k8s.io/v1beta1/watch/flowschemas/{name}ä(Ÿ +flowcontrolApiserver_v1beta1±watch changes to an object of kind FlowSchema. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jk +x-kubernetes-group-version-kindHFkind: FlowSchema +version: v1beta1 +group: flowcontrol.apiserver.k8s.io +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 +53"1pathname of the FlowSchema"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ë( +//api/v1/watch/namespaces/{namespace}/configmaps—(© +core_v1uwatch individual changes to a list of ConfigMap. deprecated: use the 'watch' parameter with a list operation instead.*"watchCoreV1NamespacedConfigMapList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jK +x-kubernetes-group-version-kind(&version: v1 +group: "" +kind: ConfigMap +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À) +D/apis/apiextensions.k8s.io/v1/watch/customresourcedefinitions/{name}÷(¤ +apiextensions_v1¿watch changes to an object of kind CustomResourceDefinition. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*,watchApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjl +x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JE +CA"?path$name of the CustomResourceDefinition"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ð) +>/apis/apps/v1/watch/namespaces/{namespace}/statefulsets/{name})å +apps_v1²watch changes to an object of kind StatefulSet. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.* watchAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 +64"2pathname of the StatefulSet"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ú +/apis/storage.k8s.io/À½ +storageget information of a group*getStorageAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi + +401 + + Unauthorized +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupRhttps•' +#/apis/coordination.k8s.io/v1/leasesí&á +coordination_v1#list or watch objects of kind Lease*'listCoordinationV1LeaseForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.coordination.v1.LeaseList + +401 + + UnauthorizedRhttpsjX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean «\ + /apis/storage.k8s.io/v1/csinodes†\ý% + +storage_v1%list or watch objects of kind CSINode*listStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J] +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.storage.v1.CSINodeList + +401 + + UnauthorizedRhttpsjU +x-kubernetes-group-version-kind20kind: CSINode +version: v1 +group: storage.k8s.io +j +x-kubernetes-actionlist +"æ + +storage_v1create a CSINode*createStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.storage.v1.CSINodeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jä +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode +D +202= +; +Accepted/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode + +401 + + UnauthorizedRhttpsjU +x-kubernetes-group-version-kind20group: storage.k8s.io +kind: CSINode +version: v1 +j +x-kubernetes-actionpost +*É, + +storage_v1delete collection of CSINode* deleteStorageV1CollectionCSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjU +x-kubernetes-group-version-kind20version: v1 +group: storage.k8s.io +kind: CSINode +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ñ& +/api/v1/componentstatusesÓ&Ç +core_v1$list objects of kind ComponentStatus*listCoreV1ComponentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.core.v1.ComponentStatusList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jQ +x-kubernetes-group-version-kind.,version: v1 +group: "" +kind: ComponentStatus +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ä' +/apis/apps/v1/watch/replicasetsÀ'´ +apps_v1vwatch individual changes to a list of ReplicaSet. deprecated: use the 'watch' parameter with a list operation instead.*)watchAppsV1ReplicaSetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)group: apps +kind: ReplicaSet +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ä +group: scheduling.k8s.io +kind: PriorityClass +version: v1beta1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean * +N/apis/networking.k8s.io/v1/watch/namespaces/{namespace}/networkpolicies/{name}®)„ + networking_v1´watch changes to an object of kind NetworkPolicy. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*(watchNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j^ +x-kubernetes-group-version-kind;9group: networking.k8s.io +kind: NetworkPolicy +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: +86"4pathname of the NetworkPolicy"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ð\ +%/api/v1/namespaces/{namespace}/events¦\ê% +core_v1#list or watch objects of kind Event*listCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.EventList + +401 + + UnauthorizedRhttpsjG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +j +x-kubernetes-actionlist +"Å +core_v1create an Event*createCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B< +: +8bodybody *( +&#/definitions/io.k8s.api.core.v1.EventBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÕ +9 +2002 +0 +OK* +( +&#/definitions/io.k8s.api.core.v1.Event +> +2017 +5 +Created* +( +&#/definitions/io.k8s.api.core.v1.Event +? +2028 +6 +Accepted* +( +&#/definitions/io.k8s.api.core.v1.Event + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +*», +core_v1delete collection of Event*%deleteCoreV1CollectionNamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* +x-kubernetes-actiondeletecollection +jG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string á +//api/v1/namespaces/{namespace}/pods/{name}/exec­  +core_v1#connect GET requests to exec of Pod*!connectCoreV1GetNamespacedPodExec2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jP +x-kubernetes-group-version-kind-+group: "" +kind: PodExecOptions +version: v1 +"’ +core_v1$connect POST requests to exec of Pod*"connectCoreV1PostNamespacedPodExec2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+group: "" +kind: PodExecOptions +version: v1 +j! +x-kubernetes-action +connect +Ju +sqoqueryRCommand is the remote command to execute. argv array. Not executed within a shell."command2string J˜ +•’querypContainer in which to execute the command. Defaults to only container if there is only one container in the pod." container2string J; +97"5pathname of the PodExecOptions"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜Jq +omkqueryNRedirect the standard error stream of the pod for this call. Defaults to true."stderr2boolean Jq +omkqueryORedirect the standard input stream of the pod for this call. Defaults to false."stdin2boolean Jr +pnlqueryORedirect the standard output stream of the pod for this call. Defaults to true."stdout2boolean Jx +vtrqueryXTTY if true indicates that a tty will be allocated for the exec call. Defaults to false."tty2boolean ¨, +K/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}Ø+· +autoscaling_v1*read the specified HorizontalPodAutoscaler*2readAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jm +R +200K +I +OKC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +¨ +autoscaling_v1-replace the specified HorizontalPodAutoscaler*5replaceAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BU +S +Qbodybody *A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÆ +W +201P +N +CreatedC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler + +401 + + Unauthorized +R +200K +I +OKC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerRhttpsj +x-kubernetes-actionput +jb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +*¾ +autoscaling_v1 delete a HorizontalPodAutoscaler*4deleteAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +B¶ +autoscaling_v16partially update the specified HorizontalPodAutoscaler*3patchAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jm +R +200K +I +OKC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +JD +B@">path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ú& +/apis/batch/v1/jobsÂ&¶ +batch_v1!list or watch objects of kind Job*listBatchV1JobForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JW +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.batch.v1.JobList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ü( +;/api/v1/watch/namespaces/{namespace}/persistentvolumeclaims¼(Î +core_v1watch individual changes to a list of PersistentVolumeClaim. deprecated: use the 'watch' parameter with a list operation instead.*.watchCoreV1NamespacedPersistentVolumeClaimList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42version: v1 +group: "" +kind: PersistentVolumeClaim +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Î- +C/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/{name}†-ò +apiextensions_v1beta1+read the specified CustomResourceDefinition*0readApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J’ + +401 + + Unauthorized +w +200p +n +OKh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionRhttpsj +x-kubernetes-actionget +jq +x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1beta1 +¬ + +apiextensions_v1beta1.replace the specified CustomResourceDefinition*3replaceApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bz +x +vbodybody *f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J + +401 + + Unauthorized +w +200p +n +OKh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition +| +201u +s +Createdh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionRhttpsj +x-kubernetes-actionput +jq +x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1beta1 +*Ó +apiextensions_v1beta1!delete a CustomResourceDefinition*2deleteApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jq +x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1beta1 +Bñ +apiextensions_v1beta17partially update the specified CustomResourceDefinition*1patchApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J’ +w +200p +n +OKh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jq +x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1beta1 +JE +CA"?path$name of the CustomResourceDefinition"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‚ +#/apis/flowcontrol.apiserver.k8s.io/Ú× +flowcontrolApiserverget information of a group*getFlowcontrolApiserverAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsõ& +/api/v1/podtemplatesÜ&Ð +core_v1)list or watch objects of kind PodTemplate*%listCoreV1PodTemplateForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.core.v1.PodTemplateList + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(group: "" +kind: PodTemplate +version: v1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¾) +6/api/v1/watch/namespaces/{namespace}/configmaps/{name}ƒ)Ý +core_v1°watch changes to an object of kind ConfigMap. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: ConfigMap +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 +42"0pathname of the ConfigMap"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ï( +I/apis/admissionregistration.k8s.io/v1/watch/mutatingwebhookconfigurations¡(• +admissionregistration_v1ˆwatch individual changes to a list of MutatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*createAdmissionregistrationV1beta1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bi +g +ebodybody *U +S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfigurationBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÜ + +401 + + Unauthorized +f +200_ +] +OKW +U +S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration +k +201d +b +CreatedW +U +S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration +l +202e +c +AcceptedW +U +S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfigurationRhttpsj +x-kubernetes-actionpost +j} +x-kubernetes-group-version-kindZXgroup: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +version: v1beta1 +*Á- +admissionregistration_v1beta11delete collection of MutatingWebhookConfiguration*HdeleteAdmissionregistrationV1beta1CollectionMutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j} +x-kubernetes-group-version-kindZXgroup: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +version: v1beta1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string È' +/api/v1/watch/events¯'£ +core_v1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.*$watchCoreV1EventListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ï_ + +<bodybody *, +*#/definitions/io.k8s.api.rbac.v1beta1.RoleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já +C +202< +: +Accepted. +, +*#/definitions/io.k8s.api.rbac.v1beta1.Role + +401 + + Unauthorized += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.rbac.v1beta1.Role +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.rbac.v1beta1.RoleRhttpsj +x-kubernetes-actionpost +jb +x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io +kind: Role +version: v1beta1 +*ø, +rbacAuthorization_v1beta1delete collection of Role*6deleteRbacAuthorizationV1beta1CollectionNamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* +x-kubernetes-actiondeletecollection +jb +x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io +kind: Role +version: v1beta1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ž( +1/apis/apiregistration.k8s.io/v1/watch/apiservicesØ'Ì +apiregistration_v1vwatch individual changes to a list of APIService. deprecated: use the 'watch' parameter with a list operation instead.*$watchApiregistrationV1APIServiceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j` +x-kubernetes-group-version-kind=;kind: APIService +group: apiregistration.k8s.io +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean þ' +)/apis/coordination.k8s.io/v1/watch/leasesÐ'Ä +coordination_v1qwatch individual changes to a list of Lease. deprecated: use the 'watch' parameter with a list operation instead.*,watchCoordinationV1LeaseListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ü +/apis/authorization.k8s.io/v1/ÙÖ +authorization_v1get available resources*getAuthorizationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsù( +L/apis/flowcontrol.apiserver.k8s.io/v1beta1/watch/prioritylevelconfigurations¨(œ +flowcontrolApiserver_v1beta1†watch individual changes to a list of PriorityLevelConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*>watchFlowcontrolApiserverV1beta1PriorityLevelConfigurationList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj{ +x-kubernetes-group-version-kindXVkind: PriorityLevelConfiguration +version: v1beta1 +group: flowcontrol.apiserver.k8s.io +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean —) +F/apis/policy/v1beta1/watch/namespaces/{namespace}/poddisruptionbudgetsÌ(Þ +policy_v1beta1watch individual changes to a list of PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead.*3watchPolicyV1beta1NamespacedPodDisruptionBudgetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ’) +0/api/v1/namespaces/{namespace}/configmaps/{name}Ý(á +core_v1read the specified ConfigMap*readCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.ConfigMap + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jK +x-kubernetes-group-version-kind(&group: "" +kind: ConfigMap +version: v1 +¨ +core_v1replace the specified ConfigMap* replaceCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.core.v1.ConfigMapBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.ConfigMap +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.core.v1.ConfigMap + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jK +x-kubernetes-group-version-kind(&group: "" +kind: ConfigMap +version: v1 +*ý +core_v1delete a ConfigMap*deleteCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj +x-kubernetes-action delete +jK +x-kubernetes-group-version-kind(&version: v1 +group: "" +kind: ConfigMap +Bà +core_v1(partially update the specified ConfigMap*patchCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.ConfigMap + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: ConfigMap +version: v1 +j +x-kubernetes-actionpatch +J6 +42"0pathname of the ConfigMap"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ž +6/api/v1/namespaces/{namespace}/pods/{name}/portforwardÓ¥ +core_v1*connect GET requests to portforward of Pod*(connectCoreV1GetNamespacedPodPortforward2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: PodPortForwardOptions +version: v1 +j! +x-kubernetes-action +connect +"§ +core_v1+connect POST requests to portforward of Pod*)connectCoreV1PostNamespacedPodPortforward2*/*:*/*J7 + +401 + + Unauthorized + +200 + +OK + ² +stringRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: PodPortForwardOptions +version: v1 +j! +x-kubernetes-action +connect +JB +@>"<path!name of the PodPortForwardOptions"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JY +WUSquery7List of ports to forward Required when using WebSockets"ports2integer ° +B/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/scaleé +core_v11read scale of the specified ReplicationController*.readCoreV1NamespacedReplicationControllerScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +Ü +core_v14replace scale of the specified ReplicationController*1replaceCoreV1NamespacedReplicationControllerScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.autoscaling.v1.ScaleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +j +x-kubernetes-actionput +BŽ +core_v1=partially update scale of the specified ReplicationController*/patchCoreV1NamespacedReplicationControllerScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +J2 +0.",pathname of the Scale"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ï( +0/api/v1/watch/namespaces/{namespace}/limitrangesš(¬ +core_v1vwatch individual changes to a list of LimitRange. deprecated: use the 'watch' parameter with a list operation instead.*#watchCoreV1NamespacedLimitRangeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jL +x-kubernetes-group-version-kind)'kind: LimitRange +version: v1 +group: "" +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ó( +-/apis/storage.k8s.io/v1/watch/csinodes/{name}¡(ß + +storage_v1®watch changes to an object of kind CSINode. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jU +x-kubernetes-group-version-kind20group: storage.k8s.io +kind: CSINode +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 +20".pathname of the CSINode"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ì' +/api/v1/watch/secrets²'¦ +core_v1rwatch individual changes to a list of Secret. deprecated: use the 'watch' parameter with a list operation instead.*%watchCoreV1SecretListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jH +x-kubernetes-group-version-kind%#group: "" +kind: Secret +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¹` +C/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurationsñ_Š' +admissionregistration_v1:list or watch objects of kind MutatingWebhookConfiguration*7listAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J€ +e +200^ +\ +OKV +T +R#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jx +x-kubernetes-group-version-kindUSversion: v1 +group: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +"Û + +admissionregistration_v1%create a MutatingWebhookConfiguration*9createAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bd +b +`bodybody *P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÍ +a +200Z +X +OKR +P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration +f +201_ +] +CreatedR +P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration +g +202` +^ +AcceptedR +P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jx +x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +version: v1 +*²- +admissionregistration_v11delete collection of MutatingWebhookConfiguration*CdeleteAdmissionregistrationV1CollectionMutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jx +x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +version: v1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string –` +7/apis/apiextensions.k8s.io/v1/customresourcedefinitionsÚ_÷& +apiextensions_v16list or watch objects of kind CustomResourceDefinition*+listApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J‘ +v +200o +m +OKg +e +c#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jl +x-kubernetes-group-version-kindIGkind: CustomResourceDefinition +version: v1 +group: apiextensions.k8s.io +"û + +apiextensions_v1!create a CustomResourceDefinition*-createApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bu +s +qbodybody *a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J€ +r +200k +i +OKc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition +w +201p +n +Createdc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition +x +202q +o +Acceptedc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jl +x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1 +*Ž- +apiextensions_v1-delete collection of CustomResourceDefinition*7deleteApiextensionsV1CollectionCustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jl +x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ô, +>/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}±,Þ +apiextensions_v1+read the specified CustomResourceDefinition*+readApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J + +401 + + Unauthorized +r +200k +i +OKc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionRhttpsj +x-kubernetes-actionget +jl +x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1 +Ž + +apiextensions_v1.replace the specified CustomResourceDefinition*.replaceApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bu +s +qbodybody *a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J† +r +200k +i +OKc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition +w +201p +n +Createdc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jl +x-kubernetes-group-version-kindIGversion: v1 +group: apiextensions.k8s.io +kind: CustomResourceDefinition +*Ä +apiextensions_v1!delete a CustomResourceDefinition*-deleteApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jl +x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1 +BÝ +apiextensions_v17partially update the specified CustomResourceDefinition*,patchApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J +r +200k +i +OKc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jl +x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1 +JE +CA"?path$name of the CustomResourceDefinition"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string É) +0/apis/networking.k8s.io/v1/ingressclasses/{name}”)„ + networking_v1read the specified IngressClass*readNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1.IngressClass + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: IngressClass +version: v1 +j +x-kubernetes-actionget +Ý + networking_v1"replace the specified IngressClass*replaceNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.networking.v1.IngressClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1.IngressClass +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.networking.v1.IngressClass + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: IngressClass +version: v1 +j +x-kubernetes-actionput +*˜ + networking_v1delete an IngressClass*deleteNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: IngressClass +version: v1 +j +x-kubernetes-action delete +Bƒ + networking_v1+partially update the specified IngressClass*patchNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1.IngressClass + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: IngressClass +version: v1 +j +x-kubernetes-actionpatch +J9 +75"3pathname of the IngressClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ñ) +L/apis/rbac.authorization.k8s.io/v1/watch/namespaces/{namespace}/roles/{name} )ÿ +rbacAuthorization_v1«watch changes to an object of kind Role. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*&watchRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J1 +/-"+pathname of the Role"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean §' +/api/v1/persistentvolumeclaims„'ø +core_v13list or watch objects of kind PersistentVolumeClaim*/listCoreV1PersistentVolumeClaimForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jh +M +200F +D +OK> +< +:#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimList + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: PersistentVolumeClaim +version: v1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ã) +7/api/v1/watch/namespaces/{namespace}/limitranges/{name}‡)à +core_v1±watch changes to an object of kind LimitRange. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jL +x-kubernetes-group-version-kind)'group: "" +kind: LimitRange +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 +53"1pathname of the LimitRange"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean µ( +3/apis/autoscaling/v1/watch/horizontalpodautoscalersý'ñ +autoscaling_v1ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.*=watchAutoscalingV1HorizontalPodAutoscalerListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×( +6/apis/apps/v1/watch/namespaces/{namespace}/replicasetsœ(® +apps_v1vwatch individual changes to a list of ReplicaSet. deprecated: use the 'watch' parameter with a list operation instead.*#watchAppsV1NamespacedReplicaSetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)group: apps +kind: ReplicaSet +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ð + /apis/policy/¾» +policyget information of a group*getPolicyAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps) +;/apis/networking.k8s.io/v1beta1/watch/ingressclasses/{name}Í(† +networking_v1beta1³watch changes to an object of kind IngressClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jb +x-kubernetes-group-version-kind?=group: networking.k8s.io +kind: IngressClass +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 +75"3pathname of the IngressClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean “ +I/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/statusÅ£ + policy_v10read status of the specified PodDisruptionBudget*/readPolicyV1NamespacedPodDisruptionBudgetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jd + +401 + + Unauthorized +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetRhttpsj +x-kubernetes-actionget +jY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +‚ + policy_v13replace status of the specified PodDisruptionBudget*2replacePolicyV1NamespacedPodDisruptionBudgetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL +J +Hbodybody *8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J´ +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget +N +201G +E +Created: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jY +x-kubernetes-group-version-kind64kind: PodDisruptionBudget +version: v1 +group: policy +B¢ + policy_v1<":pathname of the PodDisruptionBudget"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ( +./apis/policy/v1beta1/watch/podsecuritypoliciesÚ'Î +policy_v1beta1}watch individual changes to a list of PodSecurityPolicy. deprecated: use the 'watch' parameter with a list operation instead.*'watchPolicyV1beta1PodSecurityPolicyList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj\ +x-kubernetes-group-version-kind97group: policy +kind: PodSecurityPolicy +version: v1beta1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ñ' +/api/v1/namespaces/{name}Ó'× +core_v1read the specified Namespace*readCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Namespace + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +ž +core_v1replace the specified Namespace*replaceCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.core.v1.NamespaceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.core.v1.Namespace + +401 + + Unauthorized += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.NamespaceRhttpsj +x-kubernetes-actionput +jK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +*ó +core_v1delete a Namespace*deleteCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +j +x-kubernetes-action delete +BÖ +core_v1(partially update the specified Namespace*patchCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Namespace + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +j +x-kubernetes-actionpatch +J6 +42"0pathname of the Namespace"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¢( +/api/v1/watch/nodes/{name}ƒ(Ä +core_v1«watch changes to an object of kind Node. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J1 +/-"+pathname of the Node"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ð +/apis/discovery.k8s.io/v1/ÑÎ + discovery_v1get available resources*getDiscoveryV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps…) +E/apis/discovery.k8s.io/v1/watch/namespaces/{namespace}/endpointslices»(Í + discovery_v1ywatch individual changes to a list of EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead.*+watchDiscoveryV1NamespacedEndpointSliceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j] +x-kubernetes-group-version-kind:8kind: EndpointSlice +version: v1 +group: discovery.k8s.io +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean å* +2/apis/apiregistration.k8s.io/v1/apiservices/{name}®*¥ +apiregistration_v1read the specified APIService*readApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jy +^ +200W +U +OKO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +® +apiregistration_v1 replace the specified APIService*"replaceApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ba +_ +]bodybody *M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÞ + +401 + + Unauthorized +^ +200W +U +OKO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService +c +201\ +Z +CreatedO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceRhttpsj +x-kubernetes-actionput +j` +x-kubernetes-group-version-kind=;version: v1 +kind: APIService +group: apiregistration.k8s.io +*¡ +apiregistration_v1delete an APIService*!deleteApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +B¤ +apiregistration_v1)partially update the specified APIService* patchApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jy + +401 + + Unauthorized +^ +200W +U +OKO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceRhttpsj +x-kubernetes-actionpatch +j` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +J7 +53"1pathname of the APIService"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string í( +?/apis/events.k8s.io/v1beta1/watch/namespaces/{namespace}/events©(» +events_v1beta1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.*%watchEventsV1beta1NamespacedEventList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ž +$/apis/authentication.k8s.io/v1beta1/åâ +authentication_v1beta1get available resources*$getAuthenticationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps”] +'/apis/storage.k8s.io/v1beta1/csidriversè\™& +storage_v1beta1'list or watch objects of kind CSIDriver*listStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jd +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.storage.v1beta1.CSIDriverList + +401 + + UnauthorizedRhttpsj\ +x-kubernetes-group-version-kind97group: storage.k8s.io +kind: CSIDriver +version: v1beta1 +j +x-kubernetes-actionlist +"— +storage_v1beta1create a CSIDriver*createStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BH +F +Dbodybody *4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriverBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jù +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver +J +201C +A +Created6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver +K +202D +B +Accepted6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j\ +x-kubernetes-group-version-kind97kind: CSIDriver +version: v1beta1 +group: storage.k8s.io +*Þ, +storage_v1beta1delete collection of CSIDriver*'deleteStorageV1beta1CollectionCSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j\ +x-kubernetes-group-version-kind97group: storage.k8s.io +kind: CSIDriver +version: v1beta1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¿+ +7/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}ƒ+¹ +apiregistration_v1beta1read the specified APIService*$readApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J~ +c +200\ +Z +OKT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +j +x-kubernetes-actionget +Ì +apiregistration_v1beta1 replace the specified APIService*'replaceApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bf +d +bbodybody *R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jè +c +200\ +Z +OKT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService +h +201a +_ +CreatedT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +j +x-kubernetes-actionput +*° +apiregistration_v1beta1delete an APIService*&deleteApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +j +x-kubernetes-action delete +B¸ +apiregistration_v1beta1)partially update the specified APIService*%patchApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J~ + +401 + + Unauthorized +c +200\ +Z +OKT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceRhttpsje +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +j +x-kubernetes-actionpatch +J7 +53"1pathname of the APIService"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ã( +9/apis/batch/v1beta1/watch/namespaces/{namespace}/cronjobs¥(· + batch_v1beta1swatch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.*&watchBatchV1beta1NamespacedCronJobList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ƒ) +E/apis/rbac.authorization.k8s.io/v1/watch/namespaces/{namespace}/roles¹(Ë +rbacAuthorization_v1pwatch individual changes to a list of Role. deprecated: use the 'watch' parameter with a list operation instead.**watchRbacAuthorizationV1NamespacedRoleList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj] +x-kubernetes-group-version-kind:8version: v1 +group: rbac.authorization.k8s.io +kind: Role +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ö' ++/apis/storage.k8s.io/v1beta1/watch/csinodesÆ'º +storage_v1beta1swatch individual changes to a list of CSINode. deprecated: use the 'watch' parameter with a list operation instead.*watchStorageV1beta1CSINodeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jZ +x-kubernetes-group-version-kind75version: v1beta1 +group: storage.k8s.io +kind: CSINode +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean š] +*/api/v1/namespaces/{namespace}/limitrangesë\þ% +core_v1(list or watch objects of kind LimitRange*listCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J] +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.core.v1.LimitRangeList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jL +x-kubernetes-group-version-kind)'group: "" +kind: LimitRange +version: v1 +"ç +core_v1create a LimitRange* createCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.core.v1.LimitRangeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jä +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.core.v1.LimitRange +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.core.v1.LimitRange +D +202= +; +Accepted/ +- ++#/definitions/io.k8s.api.core.v1.LimitRange + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jL +x-kubernetes-group-version-kind)'group: "" +kind: LimitRange +version: v1 +*Ê, +core_v1delete collection of LimitRange**deleteCoreV1CollectionNamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjL +x-kubernetes-group-version-kind)'group: "" +kind: LimitRange +version: v1 +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ˆ] +./apis/batch/v1/namespaces/{namespace}/cronjobsÕ\ø% +batch_v1%list or watch objects of kind CronJob*listBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.batch.v1.CronJobList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +"Û +batch_v1create a CronJob*createBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B? += +;bodybody *+ +)#/definitions/io.k8s.api.batch.v1.CronJobBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÞ +A +201: +8 +Created- ++ +)#/definitions/io.k8s.api.batch.v1.CronJob +B +202; +9 +Accepted- ++ +)#/definitions/io.k8s.api.batch.v1.CronJob + +401 + + Unauthorized +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.batch.v1.CronJobRhttpsj +x-kubernetes-actionpost +jL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +*Æ, +batch_v1delete collection of CronJob*(deleteBatchV1CollectionNamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jL +x-kubernetes-group-version-kind)'version: v1 +group: batch +kind: CronJob +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ò +E/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/statusˆÐ +certificates_v16read status of the specified CertificateSigningRequest*1readCertificatesV1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jp +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +Ç +certificates_v19replace status of the specified CertificateSigningRequest*4replaceCertificatesV1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX +V +Tbodybody *D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÌ +Z +201S +Q +CreatedF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestRhttpsj +x-kubernetes-actionput +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +BÏ +certificates_v1Bpartially update status of the specified CertificateSigningRequest*2patchCertificatesV1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jp + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestRhttpsj +x-kubernetes-actionpatch +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +JF +DB"@path%name of the CertificateSigningRequest"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string  +J/apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}/statusÎä +certificates_v1beta16read status of the specified CertificateSigningRequest*6readCertificatesV1beta1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ju +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jq +x-kubernetes-group-version-kindNLgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1beta1 +å +certificates_v1beta19replace status of the specified CertificateSigningRequest*9replaceCertificatesV1beta1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] +[ +Ybodybody *I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÖ +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest +_ +201X +V +CreatedK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jq +x-kubernetes-group-version-kindNLkind: CertificateSigningRequest +version: v1beta1 +group: certificates.k8s.io +Bã +certificates_v1beta1Bpartially update status of the specified CertificateSigningRequest*7patchCertificatesV1beta1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ju +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jq +x-kubernetes-group-version-kindNLversion: v1beta1 +group: certificates.k8s.io +kind: CertificateSigningRequest +JF +DB"@path%name of the CertificateSigningRequest"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Û( +7/apis/apps/v1/watch/namespaces/{namespace}/statefulsetsŸ(± +apps_v1wwatch individual changes to a list of StatefulSet. deprecated: use the 'watch' parameter with a list operation instead.*$watchAppsV1NamespacedStatefulSetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ´/ +/api/v1/namespaces/ð% +core_v1'list or watch objects of kind Namespace*listCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.NamespaceList + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +j +x-kubernetes-actionlist +"Ö +core_v1create a Namespace*createCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.core.v1.NamespaceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Namespace +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.core.v1.Namespace +C +202< +: +Accepted. +, +*#/definitions/io.k8s.api.core.v1.Namespace + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +j +x-kubernetes-actionpost +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ð' +/api/v1/watch/servicesµ'© +core_v1swatch individual changes to a list of Service. deprecated: use the 'watch' parameter with a list operation instead.*&watchCoreV1ServiceListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean  +S/apis/authorization.k8s.io/v1beta1/namespaces/{namespace}/localsubjectaccessreviewsê "ú +authorization_v1beta1!create a LocalSubjectAccessReview* +<bodybody *, +*#/definitions/io.k8s.api.core.v1.ConfigMapBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.ConfigMap +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.core.v1.ConfigMap +C +202< +: +Accepted. +, +*#/definitions/io.k8s.api.core.v1.ConfigMap + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: ConfigMap +version: v1 +j +x-kubernetes-actionpost +*Ç, +core_v1delete collection of ConfigMap*)deleteCoreV1CollectionNamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jK +x-kubernetes-group-version-kind(&group: "" +kind: ConfigMap +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¼) +O/apis/autoscaling/v2beta2/watch/namespaces/{namespace}/horizontalpodautoscalersè(ú +autoscaling_v2beta2ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.* +< +OK6 +4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j\ +x-kubernetes-group-version-kind97group: node.k8s.io +kind: RuntimeClass +version: v1beta1 +× + node_v1beta1"replace the specified RuntimeClass*replaceNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BH +F +Dbodybody *4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¬ +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass +J +201C +A +Created6 +4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j\ +x-kubernetes-group-version-kind97group: node.k8s.io +kind: RuntimeClass +version: v1beta1 +*” + node_v1beta1delete a RuntimeClass*deleteNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j\ +x-kubernetes-group-version-kind97group: node.k8s.io +kind: RuntimeClass +version: v1beta1 +Bÿ + node_v1beta1+partially update the specified RuntimeClass*patchNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j\ +x-kubernetes-group-version-kind97group: node.k8s.io +kind: RuntimeClass +version: v1beta1 +J9 +75"3pathname of the RuntimeClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string å* +G/apis/networking.k8s.io/v1beta1/namespaces/{namespace}/ingresses/{name}™*Ž +networking_v1beta1read the specified Ingress*&readNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +j +x-kubernetes-actionget +ç +networking_v1beta1replace the specified Ingress*)replaceNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.networking.v1beta1.IngressBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +j +x-kubernetes-actionput +*¢ +networking_v1beta1delete an Ingress*(deleteNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +j +x-kubernetes-action delete +B +networking_v1beta1&partially update the specified Ingress*'patchNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +J4 +20".pathname of the Ingress"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ´) +C/apis/rbac.authorization.k8s.io/v1/watch/clusterrolebindings/{name}ì(Ÿ +rbacAuthorization_v1¹watch changes to an object of kind ClusterRoleBinding. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jk +x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J? +=;"9pathname of the ClusterRoleBinding"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‘) + +< +OK6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver + +401 + + UnauthorizedRhttpsj\ +x-kubernetes-group-version-kind97group: storage.k8s.io +kind: CSIDriver +version: v1beta1 +j +x-kubernetes-actionget +× +storage_v1beta1replace the specified CSIDriver*replaceStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BH +F +Dbodybody *4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriverBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¬ +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver +J +201C +A +Created6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver + +401 + + UnauthorizedRhttpsj\ +x-kubernetes-group-version-kind97group: storage.k8s.io +kind: CSIDriver +version: v1beta1 +j +x-kubernetes-actionput +*† +storage_v1beta1delete a CSIDriver*deleteStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J­ +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver +K +202D +B +Accepted6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j\ +x-kubernetes-group-version-kind97group: storage.k8s.io +kind: CSIDriver +version: v1beta1 +Bÿ +storage_v1beta1(partially update the specified CSIDriver*patchStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j\ +x-kubernetes-group-version-kind97group: storage.k8s.io +kind: CSIDriver +version: v1beta1 +J6 +42"0pathname of the CSIDriver"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ”) +;/apis/storage.k8s.io/v1beta1/watch/volumeattachments/{name}Ô(‰ +storage_v1beta1·watch changes to an object of kind VolumeAttachment. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*#watchStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jc +x-kubernetes-group-version-kind@>kind: VolumeAttachment +version: v1beta1 +group: storage.k8s.io +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J= +;9"7pathname of the VolumeAttachment"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean •] +(/apis/node.k8s.io/v1beta1/runtimeclassesè\™& + node_v1beta1*list or watch objects of kind RuntimeClass*listNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jd +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.node.v1beta1.RuntimeClassList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j\ +x-kubernetes-group-version-kind97group: node.k8s.io +kind: RuntimeClass +version: v1beta1 +"— + node_v1beta1create a RuntimeClass*createNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BH +F +Dbodybody *4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jù +K +202D +B +Accepted6 +4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass + +401 + + Unauthorized +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass +J +201C +A +Created6 +4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClassRhttpsj\ +x-kubernetes-group-version-kind97group: node.k8s.io +kind: RuntimeClass +version: v1beta1 +j +x-kubernetes-actionpost +*Þ, + node_v1beta1!delete collection of RuntimeClass*'deleteNodeV1beta1CollectionRuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* +x-kubernetes-actiondeletecollection +j\ +x-kubernetes-group-version-kind97group: node.k8s.io +kind: RuntimeClass +version: v1beta1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ”* +S/apis/rbac.authorization.k8s.io/v1/watch/namespaces/{namespace}/rolebindings/{name}¼)” +rbacAuthorization_v1²watch changes to an object of kind RoleBinding. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*-watchRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: RoleBinding +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 +64"2pathname of the RoleBinding"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean à' +/api/v1/watch/podtemplatesÁ'µ +core_v1wwatch individual changes to a list of PodTemplate. deprecated: use the 'watch' parameter with a list operation instead.**watchCoreV1PodTemplateListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(kind: PodTemplate +version: v1 +group: "" +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚ + /apis/apiregistration.k8s.io/v1/ÝÚ +apiregistration_v1get available resources* getApiregistrationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps‹ +#/apis/authorization.k8s.io/v1beta1/ãà +authorization_v1beta1get available resources*#getAuthorizationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsü* +H/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}¯*’ + networking_v1 read the specified NetworkPolicy*'readNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicy + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j^ +x-kubernetes-group-version-kind;9kind: NetworkPolicy +version: v1 +group: networking.k8s.io +í + networking_v1#replace the specified NetworkPolicy**replaceNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ +H +Fbodybody *6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicyBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J° +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicy +L +201E +C +Created8 +6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicy + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j^ +x-kubernetes-group-version-kind;9group: networking.k8s.io +kind: NetworkPolicy +version: v1 +*¤ + networking_v1delete a NetworkPolicy*)deleteNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j^ +x-kubernetes-group-version-kind;9kind: NetworkPolicy +version: v1 +group: networking.k8s.io +B‘ + networking_v1,partially update the specified NetworkPolicy*(patchNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicy + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: networking.k8s.io +kind: NetworkPolicy +version: v1 +j +x-kubernetes-actionpatch +J: +86"4pathname of the NetworkPolicy"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string †) +F/apis/networking.k8s.io/v1beta1/watch/namespaces/{namespace}/ingresses»(Í +networking_v1beta1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*+watchNetworkingV1beta1NamespacedIngressList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ˆ] +(/api/v1/namespaces/{namespace}/endpointsÛ\ú% +core_v1'list or watch objects of kind Endpoints*listCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.EndpointsList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jK +x-kubernetes-group-version-kind(&group: "" +kind: Endpoints +version: v1 +"Þ +core_v1create Endpoints*createCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.core.v1.EndpointsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já +C +202< +: +Accepted. +, +*#/definitions/io.k8s.api.core.v1.Endpoints + +401 + + Unauthorized += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Endpoints +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.core.v1.EndpointsRhttpsj +x-kubernetes-actionpost +jK +x-kubernetes-group-version-kind(&group: "" +kind: Endpoints +version: v1 +*Ç, +core_v1delete collection of Endpoints*)deleteCoreV1CollectionNamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jK +x-kubernetes-group-version-kind(&version: v1 +group: "" +kind: Endpoints +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¿( +,/api/v1/watch/namespaces/{namespace}/secretsŽ(  +core_v1rwatch individual changes to a list of Secret. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NamespacedSecretList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jH +x-kubernetes-group-version-kind%#group: "" +kind: Secret +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean é +=/apis/apps/v1/namespaces/{namespace}/deployments/{name}/scale§ù +apps_v1&read scale of the specified Deployment*#readAppsV1NamespacedDeploymentScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ + +401 + + Unauthorized +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.ScaleRhttpsjP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +j +x-kubernetes-actionget +Æ +apps_v1)replace scale of the specified Deployment*&replaceAppsV1NamespacedDeploymentScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.autoscaling.v1.ScaleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + Unauthorized +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.ScaleRhttpsj +x-kubernetes-actionput +jP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +Bø +apps_v12partially update scale of the specified Deployment*$patchAppsV1NamespacedDeploymentScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +j +x-kubernetes-actionpatch +J2 +0.",pathname of the Scale"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string î& +/apis/batch/v1/cronjobsÒ&Æ +batch_v1%list or watch objects of kind CronJob*"listBatchV1CronJobForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.batch.v1.CronJobList + +401 + + UnauthorizedRhttpsjL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‰ +C/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/statusÁ¡ +core_v12read status of the specified ReplicationController*/readCoreV1NamespacedReplicationControllerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jd +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.core.v1.ReplicationController + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: ReplicationController +version: v1 +j +x-kubernetes-actionget +€ +core_v15replace status of the specified ReplicationController*2replaceCoreV1NamespacedReplicationControllerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL +J +Hbodybody *8 +6#/definitions/io.k8s.api.core.v1.ReplicationControllerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J´ +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.core.v1.ReplicationController +N +201G +E +Created: +8 +6#/definitions/io.k8s.api.core.v1.ReplicationController + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: ReplicationController +version: v1 +j +x-kubernetes-actionput +B  +core_v1>partially update status of the specified ReplicationController*0patchCoreV1NamespacedReplicationControllerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jd +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.core.v1.ReplicationController + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jW +x-kubernetes-group-version-kind42version: v1 +group: "" +kind: ReplicationController +JB +@>"<path!name of the ReplicationController"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¦] +0/apis/apps/v1/namespaces/{namespace}/deploymentsñ\€& +apps_v1(list or watch objects of kind Deployment*listAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J] + +401 + + Unauthorized +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.apps.v1.DeploymentListRhttpsj +x-kubernetes-actionlist +jN +x-kubernetes-group-version-kind+)group: apps +kind: Deployment +version: v1 +"é +apps_v1create a Deployment* createAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.apps.v1.DeploymentBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jä +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment +D +202= +; +Accepted/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jN +x-kubernetes-group-version-kind+)group: apps +kind: Deployment +version: v1 +*Ì, +apps_v1delete collection of Deployment**deleteAppsV1CollectionNamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* +x-kubernetes-actiondeletecollection +jN +x-kubernetes-group-version-kind+)version: v1 +group: apps +kind: Deployment +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ÷& +/apis/events.k8s.io/v1/eventsÕ&É + events_v1#list or watch objects of kind Event*!listEventsV1EventForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.events.v1.EventList + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ’* +F/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}Ç)þ +rbacAuthorization_v1read the specified Role*%readRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.rbac.v1.Role + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +j +x-kubernetes-actionget +» +rbacAuthorization_v1replace the specified Role*(replaceRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; +9 +7bodybody *' +%#/definitions/io.k8s.api.rbac.v1.RoleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ + +401 + + Unauthorized +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.rbac.v1.Role += +2016 +4 +Created) +' +%#/definitions/io.k8s.api.rbac.v1.RoleRhttpsj +x-kubernetes-actionput +j] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +*Ÿ +rbacAuthorization_v1 delete a Role*'deleteRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +Bý +rbacAuthorization_v1#partially update the specified Role*&patchRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.rbac.v1.Role + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +J1 +/-"+pathname of the Role"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¢* +U/apis/storage.k8s.io/v1beta1/watch/namespaces/{namespace}/csistoragecapacities/{name}È)™ +storage_v1beta1¹watch changes to an object of kind CSIStorageCapacity. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*/watchStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: storage.k8s.io +kind: CSIStorageCapacity +version: v1beta1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J? +=;"9pathname of the CSIStorageCapacity"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ø' +/api/v1/watch/configmaps»'¯ +core_v1uwatch individual changes to a list of ConfigMap. deprecated: use the 'watch' parameter with a list operation instead.*(watchCoreV1ConfigMapListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jK +x-kubernetes-group-version-kind(&group: "" +kind: ConfigMap +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Õ +=/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status“ó +apps_v1&read status of the specified DaemonSet*#readAppsV1NamespacedDaemonSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jM +x-kubernetes-group-version-kind*(kind: DaemonSet +version: v1 +group: apps +º +apps_v1)replace status of the specified DaemonSet*&replaceAppsV1NamespacedDaemonSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.apps.v1.DaemonSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ + +401 + + Unauthorized += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSetRhttpsjM +x-kubernetes-group-version-kind*(kind: DaemonSet +version: v1 +group: apps +j +x-kubernetes-actionput +Bò +apps_v12partially update status of the specified DaemonSet*$patchAppsV1NamespacedDaemonSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +J6 +42"0pathname of the DaemonSet"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¼* +V/apis/autoscaling/v2beta2/watch/namespaces/{namespace}/horizontalpodautoscalers/{name}á)­ +autoscaling_v2beta2¾watch changes to an object of kind HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*8watchAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta2 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JD +B@">path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Û) +1/apis/scheduling.k8s.io/v1/priorityclasses/{name}¥)ˆ + scheduling_v1 read the specified PriorityClass*readSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j^ +x-kubernetes-group-version-kind;9group: scheduling.k8s.io +kind: PriorityClass +version: v1 +ã + scheduling_v1#replace the specified PriorityClass* replaceSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ +H +Fbodybody *6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J° +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClass +L +201E +C +Created8 +6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClass + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: scheduling.k8s.io +kind: PriorityClass +version: v1 +j +x-kubernetes-actionput +*š + scheduling_v1delete a PriorityClass*deleteSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j^ +x-kubernetes-group-version-kind;9group: scheduling.k8s.io +kind: PriorityClass +version: v1 +B‡ + scheduling_v1,partially update the specified PriorityClass*patchSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j^ +x-kubernetes-group-version-kind;9kind: PriorityClass +version: v1 +group: scheduling.k8s.io +J: +86"4pathname of the PriorityClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ò( +A/apis/networking.k8s.io/v1/watch/namespaces/{namespace}/ingresses¬(¾ + networking_v1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*&watchNetworkingV1NamespacedIngressList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jX +x-kubernetes-group-version-kind53kind: Ingress +version: v1 +group: networking.k8s.io +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ´' +/api/v1/watch/nodesœ' +core_v1pwatch individual changes to a list of Node. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NodeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jF +x-kubernetes-group-version-kind#!version: v1 +group: "" +kind: Node +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean á` +path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ô) +L/apis/coordination.k8s.io/v1beta1/watch/namespaces/{namespace}/leases/{name}£) +coordination_v1beta1¬watch changes to an object of kind Lease. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*'watchCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j] +x-kubernetes-group-version-kind:8kind: Lease +version: v1beta1 +group: coordination.k8s.io +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J2 +0.",pathname of the Lease"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¦( +3/apis/discovery.k8s.io/v1beta1/watch/endpointslicesî'â +discovery_v1beta1ywatch individual changes to a list of EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead.*6watchDiscoveryV1beta1EndpointSliceListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=group: discovery.k8s.io +kind: EndpointSlice +version: v1beta1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¼^ +@/apis/networking.k8s.io/v1beta1/namespaces/{namespace}/ingresses÷]§& +networking_v1beta1%list or watch objects of kind Ingress*&listNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je + +401 + + Unauthorized +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.networking.v1beta1.IngressListRhttpsj +x-kubernetes-actionlist +j] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +"© +networking_v1beta1create an Ingress*(createNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.networking.v1beta1.IngressBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress +L +202E +C +Accepted7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +j +x-kubernetes-actionpost +*ë, +networking_v1beta1delete collection of Ingress*2deleteNetworkingV1beta1CollectionNamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ©0 +'/api/v1/namespaces/{namespace}/servicesý/ò% +core_v1%list or watch objects of kind Service*listCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.core.v1.ServiceList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +"Ò +core_v1create a Service*createCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> +< +:bodybody ** +(#/definitions/io.k8s.api.core.v1.ServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÛ +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Service +@ +2019 +7 +Created, +* +(#/definitions/io.k8s.api.core.v1.Service +A +202: +8 +Accepted, +* +(#/definitions/io.k8s.api.core.v1.Service + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string È' +/api/v1/watch/namespaces«'Ÿ +core_v1uwatch individual changes to a list of Namespace. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NamespaceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ó( +1/api/v1/watch/namespaces/{namespace}/podtemplates(¯ +core_v1wwatch individual changes to a list of PodTemplate. deprecated: use the 'watch' parameter with a list operation instead.*$watchCoreV1NamespacedPodTemplateList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jM +x-kubernetes-group-version-kind*(group: "" +kind: PodTemplate +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À' +/api/v1/watch/pods©' +core_v1owatch individual changes to a list of Pod. deprecated: use the 'watch' parameter with a list operation instead.*"watchCoreV1PodListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjE +x-kubernetes-group-version-kind" kind: Pod +version: v1 +group: "" +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¿( +=/apis/certificates.k8s.io/v1/watch/certificatesigningrequestsý'ñ +certificates_v1…watch individual changes to a list of CertificateSigningRequest. deprecated: use the 'watch' parameter with a list operation instead.*0watchCertificatesV1CertificateSigningRequestList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷ +N/apis/networking.k8s.io/v1beta1/namespaces/{namespace}/ingresses/{name}/status¤ž +networking_v1beta1$read status of the specified Ingress*,readNetworkingV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j] +x-kubernetes-group-version-kind:8kind: Ingress +version: v1beta1 +group: networking.k8s.io +÷ +networking_v1beta1'replace status of the specified Ingress*/replaceNetworkingV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.networking.v1beta1.IngressBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j] +x-kubernetes-group-version-kind:8version: v1beta1 +group: networking.k8s.io +kind: Ingress +B +networking_v1beta10partially update status of the specified Ingress*-patchNetworkingV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8version: v1beta1 +group: networking.k8s.io +kind: Ingress +j +x-kubernetes-actionpatch +J4 +20".pathname of the Ingress"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ÿ) +L/apis/rbac.authorization.k8s.io/v1/watch/namespaces/{namespace}/rolebindingsÎ(à +rbacAuthorization_v1wwatch individual changes to a list of RoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*1watchRbacAuthorizationV1NamespacedRoleBindingList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: RoleBinding +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‡( +0/apis/scheduling.k8s.io/v1/watch/priorityclassesÒ'Æ + scheduling_v1ywatch individual changes to a list of PriorityClass. deprecated: use the 'watch' parameter with a list operation instead.*"watchSchedulingV1PriorityClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: scheduling.k8s.io +kind: PriorityClass +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ü( +*/apis/node.k8s.io/v1/runtimeclasses/{name}­(ì +node_v1read the specified RuntimeClass*readNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.node.v1.RuntimeClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jW +x-kubernetes-group-version-kind42version: v1 +group: node.k8s.io +kind: RuntimeClass +¹ +node_v1"replace the specified RuntimeClass*replaceNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.node.v1.RuntimeClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.node.v1.RuntimeClass +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.node.v1.RuntimeClass + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42version: v1 +group: node.k8s.io +kind: RuntimeClass +j +x-kubernetes-actionput +*… +node_v1delete a RuntimeClass*deleteNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jW +x-kubernetes-group-version-kind42group: node.k8s.io +kind: RuntimeClass +version: v1 +Bë +node_v1+partially update the specified RuntimeClass*patchNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.node.v1.RuntimeClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jW +x-kubernetes-group-version-kind42version: v1 +group: node.k8s.io +kind: RuntimeClass +J9 +75"3pathname of the RuntimeClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ç( +,/api/v1/namespaces/{namespace}/events/{name}–(Ñ +core_v1read the specified Event*readCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JT +9 +2002 +0 +OK* +( +&#/definitions/io.k8s.api.core.v1.Event + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jG +x-kubernetes-group-version-kind$"version: v1 +group: "" +kind: Event + +core_v1replace the specified Event*replaceCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B< +: +8bodybody *( +&#/definitions/io.k8s.api.core.v1.EventBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J” +9 +2002 +0 +OK* +( +&#/definitions/io.k8s.api.core.v1.Event +> +2017 +5 +Created* +( +&#/definitions/io.k8s.api.core.v1.Event + +401 + + UnauthorizedRhttpsjG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +j +x-kubernetes-actionput +*ò +core_v1delete an Event*deleteCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +j +x-kubernetes-action delete +BÐ +core_v1$partially update the specified Event*patchCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JT +9 +2002 +0 +OK* +( +&#/definitions/io.k8s.api.core.v1.Event + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +J2 +0.",pathname of the Event"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string à) +G/apis/coordination.k8s.io/v1/watch/namespaces/{namespace}/leases/{name}”)ò +coordination_v1¬watch changes to an object of kind Lease. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J2 +0.",pathname of the Lease"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ’( +./apis/coordination.k8s.io/v1beta1/watch/leasesß'Ó +coordination_v1beta1qwatch individual changes to a list of Lease. deprecated: use the 'watch' parameter with a list operation instead.*1watchCoordinationV1beta1LeaseListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j] +x-kubernetes-group-version-kind:8group: coordination.k8s.io +kind: Lease +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ã +/apis/networking.k8s.io/Æà + +networkingget information of a group*getNetworkingAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsÑ +/apis/node.k8s.io/º· +nodeget information of a group*getNodeAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsÍ& + /api/v1/pods¼&° +core_v1!list or watch objects of kind Pod*listCoreV1PodForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JV +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.PodList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ì\ +*/apis/batch/v1/namespaces/{namespace}/jobs\è% +batch_v1!list or watch objects of kind Job*listBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JW +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.batch.v1.JobList + +401 + + UnauthorizedRhttpsjH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +j +x-kubernetes-actionlist +"¿ +batch_v1 create a Job*createBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; +9 +7bodybody *' +%#/definitions/io.k8s.api.batch.v1.JobBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÒ +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.batch.v1.Job += +2016 +4 +Created) +' +%#/definitions/io.k8s.api.batch.v1.Job +> +2027 +5 +Accepted) +' +%#/definitions/io.k8s.api.batch.v1.Job + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +*º, +batch_v1delete collection of Job*$deleteBatchV1CollectionNamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Õ) +I/apis/certificates.k8s.io/v1beta1/watch/certificatesigningrequests/{name}‡)³ +certificates_v1beta1Àwatch changes to an object of kind CertificateSigningRequest. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*1watchCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jq +x-kubernetes-group-version-kindNLversion: v1beta1 +group: certificates.k8s.io +kind: CertificateSigningRequest +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JF +DB"@path%name of the CertificateSigningRequest"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ä+ +K/apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name}ô*¢ +discovery_v1beta1 read the specified EndpointSlice*+readDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jf + +401 + + Unauthorized +K +200D +B +OK< +: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceRhttpsjb +x-kubernetes-group-version-kind?=kind: EndpointSlice +version: v1beta1 +group: discovery.k8s.io +j +x-kubernetes-actionget +… +discovery_v1beta1#replace the specified EndpointSlice*.replaceDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN +L +Jbodybody *: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¸ +P +201I +G +Created< +: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice + +401 + + Unauthorized +K +200D +B +OK< +: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceRhttpsj +x-kubernetes-actionput +jb +x-kubernetes-group-version-kind?=kind: EndpointSlice +version: v1beta1 +group: discovery.k8s.io +*± +discovery_v1beta1delete an EndpointSlice*-deleteDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jb +x-kubernetes-group-version-kind?=group: discovery.k8s.io +kind: EndpointSlice +version: v1beta1 +B¡ +discovery_v1beta1,partially update the specified EndpointSlice*,patchDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jf +K +200D +B +OK< +: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jb +x-kubernetes-group-version-kind?=group: discovery.k8s.io +kind: EndpointSlice +version: v1beta1 +J: +86"4pathname of the EndpointSlice"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string  +6/apis/authorization.k8s.io/v1/selfsubjectaccessreviewsÔ +"Æ +authorization_v1 create a SelfSubjectAccessReview*,createAuthorizationV1SelfSubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BW +U +Sbodybody *C +A#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReviewJ¦ +T +200M +K +OKE +C +A#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReview +Y +201R +P +CreatedE +C +A#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReview +Z +202S +Q +AcceptedE +C +A#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReview + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jk +x-kubernetes-group-version-kindHFgroup: authorization.k8s.io +kind: SelfSubjectAccessReview +version: v1 +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¦ + /api/v1/componentstatuses/{name}ï +core_v1"read the specified ComponentStatus*readCoreV1ComponentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.core.v1.ComponentStatus + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jQ +x-kubernetes-group-version-kind.,group: "" +kind: ComponentStatus +version: v1 +J< +:8"6pathname of the ComponentStatus"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string á +/apis/node.k8s.io/v1/ÇÄ +node_v1get available resources*getNodeV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps°( +:/apis/rbac.authorization.k8s.io/v1beta1/watch/clusterrolesñ'å +rbacAuthorization_v1beta1wwatch individual changes to a list of ClusterRole. deprecated: use the 'watch' parameter with a list operation instead.*,watchRbacAuthorizationV1beta1ClusterRoleList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +ji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: ClusterRole +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ì( +./api/v1/namespaces/{namespace}/services/{name}¹(Ù +core_v1read the specified Service*readCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JV +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Service + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jI +x-kubernetes-group-version-kind&$kind: Service +version: v1 +group: "" +œ +core_v1replace the specified Service*replaceCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> +< +:bodybody ** +(#/definitions/io.k8s.api.core.v1.ServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J˜ +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Service +@ +2019 +7 +Created, +* +(#/definitions/io.k8s.api.core.v1.Service + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +*÷ +core_v1delete a Service*deleteCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +j +x-kubernetes-action delete +BØ +core_v1&partially update the specified Service*patchCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JV +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Service + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +J4 +20".pathname of the Service"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string –] +4/apis/events.k8s.io/v1/namespaces/{namespace}/eventsÝ\û% + events_v1#list or watch objects of kind Event*listEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.events.v1.EventList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +"Ü + events_v1create an Event*createEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> +< +:bodybody ** +(#/definitions/io.k8s.api.events.v1.EventBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÛ +@ +2019 +7 +Created, +* +(#/definitions/io.k8s.api.events.v1.Event +A +202: +8 +Accepted, +* +(#/definitions/io.k8s.api.events.v1.Event + +401 + + Unauthorized +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.events.v1.EventRhttpsj +x-kubernetes-actionpost +jR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +*Ê, + events_v1delete collection of Event*'deleteEventsV1CollectionNamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string —( +4/apis/networking.k8s.io/v1beta1/watch/ingressclassesÞ'Ò +networking_v1beta1xwatch individual changes to a list of IngressClass. deprecated: use the 'watch' parameter with a list operation instead.*&watchNetworkingV1beta1IngressClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=kind: IngressClass +version: v1beta1 +group: networking.k8s.io +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ë' +)/apis/node.k8s.io/v1/watch/runtimeclasses½'± +node_v1xwatch individual changes to a list of RuntimeClass. deprecated: use the 'watch' parameter with a list operation instead.*watchNodeV1RuntimeClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jW +x-kubernetes-group-version-kind42version: v1 +group: node.k8s.io +kind: RuntimeClass +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À( +)/apis/storage.k8s.io/v1/csidrivers/{name}’(ì + +storage_v1read the specified CSIDriver*readStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriver + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: storage.k8s.io +kind: CSIDriver +version: v1 +j +x-kubernetes-actionget +¹ + +storage_v1replace the specified CSIDriver*replaceStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.storage.v1.CSIDriverBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriver +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriver + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jW +x-kubernetes-group-version-kind42group: storage.k8s.io +kind: CSIDriver +version: v1 +*í + +storage_v1delete a CSIDriver*deleteStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J£ +F +202? += +Accepted1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriver + +401 + + Unauthorized +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriverRhttpsj +x-kubernetes-action delete +jW +x-kubernetes-group-version-kind42group: storage.k8s.io +kind: CSIDriver +version: v1 +Bë + +storage_v1(partially update the specified CSIDriver*patchStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriver + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jW +x-kubernetes-group-version-kind42group: storage.k8s.io +kind: CSIDriver +version: v1 +J6 +42"0pathname of the CSIDriver"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ÿ& +/api/v1/resourcequotasä&Ø +core_v1+list or watch objects of kind ResourceQuota*'listCoreV1ResourceQuotaForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.core.v1.ResourceQuotaList + +401 + + UnauthorizedRhttpsjO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean  ) +0/api/v1/watch/namespaces/{namespace}/pods/{name}ë(Ë +core_v1ªwatch changes to an object of kind Pod. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J0 +.,"*pathname of the Pod"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÿ +/apis/authentication.k8s.io/v1/ÛØ +authentication_v1get available resources*getAuthenticationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsŒ) +5/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}Ò(ß +batch_v1read the specified CronJob*readBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JW + +401 + + Unauthorized +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.batch.v1.CronJobRhttpsj +x-kubernetes-actionget +jL +x-kubernetes-group-version-kind)'kind: CronJob +version: v1 +group: batch +¤ +batch_v1replace the specified CronJob*replaceBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B? += +;bodybody *+ +)#/definitions/io.k8s.api.batch.v1.CronJobBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jš +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.batch.v1.CronJob +A +201: +8 +Created- ++ +)#/definitions/io.k8s.api.batch.v1.CronJob + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +*ü +batch_v1delete a CronJob*deleteBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +j +x-kubernetes-action delete +BÞ +batch_v1&partially update the specified CronJob*patchBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JW +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.batch.v1.CronJob + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +J4 +20".pathname of the CronJob"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ß( +4/api/v1/watch/namespaces/{namespace}/serviceaccounts¦(¸ +core_v1zwatch individual changes to a list of ServiceAccount. deprecated: use the 'watch' parameter with a list operation instead.*'watchCoreV1NamespacedServiceAccountList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jP +x-kubernetes-group-version-kind-+group: "" +kind: ServiceAccount +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ã( +-/api/v1/watch/namespaces/{namespace}/services‘(£ +core_v1swatch individual changes to a list of Service. deprecated: use the 'watch' parameter with a list operation instead.* watchCoreV1NamespacedServiceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jI +x-kubernetes-group-version-kind&$version: v1 +group: "" +kind: Service +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‹ +#/apis/apiextensions.k8s.io/v1beta1/ãà +apiextensions_v1beta1get available resources*#getApiextensionsV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsö +/apis/events.k8s.io/v1beta1/ÕÒ +events_v1beta1get available resources*getEventsV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsù( +6/apis/networking.k8s.io/v1/watch/ingressclasses/{name}¾(÷ + networking_v1³watch changes to an object of kind IngressClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: IngressClass +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 +75"3pathname of the IngressClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ”* +M/apis/policy/v1beta1/watch/namespaces/{namespace}/poddisruptionbudgets/{name}Â)’ +policy_v1beta1ºwatch changes to an object of kind PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*/watchPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J@ +><":pathname of the PodDisruptionBudget"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean €) +-/apis/storage.k8s.io/v1/storageclasses/{name}Î(ø + +storage_v1read the specified StorageClass*readStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +Ë + +storage_v1"replace the specified StorageClass*replaceStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF +D +Bbodybody *2 +0#/definitions/io.k8s.api.storage.v1.StorageClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¨ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass +H +201A +? +Created4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +*ü + +storage_v1delete a StorageClass*deleteStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J© +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass +I +202B +@ +Accepted4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass + +401 + + UnauthorizedRhttpsjZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +j +x-kubernetes-action delete +B÷ + +storage_v1+partially update the specified StorageClass*patchStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +J9 +75"3pathname of the StorageClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ý] +./apis/storage.k8s.io/v1beta1/volumeattachmentsÊ]µ& +storage_v1beta1.list or watch objects of kind VolumeAttachment*"listStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jc +x-kubernetes-group-version-kind@>group: storage.k8s.io +kind: VolumeAttachment +version: v1beta1 +"È +storage_v1beta1create a VolumeAttachment*$createStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO +M +Kbodybody *; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JŽ +L +200E +C +OK= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment +Q +201J +H +Created= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment +R +202K +I +Accepted= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment + +401 + + UnauthorizedRhttpsjc +x-kubernetes-group-version-kind@>group: storage.k8s.io +kind: VolumeAttachment +version: v1beta1 +j +x-kubernetes-actionpost +*ó, +storage_v1beta1%delete collection of VolumeAttachment*.deleteStorageV1beta1CollectionVolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjc +x-kubernetes-group-version-kind@>group: storage.k8s.io +kind: VolumeAttachment +version: v1beta1 +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ý) +5/api/v1/namespaces/{namespace}/serviceaccounts/{name}£)õ +core_v1!read the specified ServiceAccount*"readCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J] +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jP +x-kubernetes-group-version-kind-+version: v1 +group: "" +kind: ServiceAccount +Æ +core_v1$replace the specified ServiceAccount*%replaceCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BE +C +Abodybody *1 +/#/definitions/io.k8s.api.core.v1.ServiceAccountBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¦ +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount +G +201@ +> +Created3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jP +x-kubernetes-group-version-kind-+kind: ServiceAccount +version: v1 +group: "" +*ø +core_v1delete a ServiceAccount*$deleteCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J§ +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount +H +202A +? +Accepted3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+group: "" +kind: ServiceAccount +version: v1 +j +x-kubernetes-action delete +Bô +core_v1-partially update the specified ServiceAccount*#patchCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J] +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+group: "" +kind: ServiceAccount +version: v1 +j +x-kubernetes-actionpatch +J; +97"5pathname of the ServiceAccount"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string »( ++/api/v1/watch/namespaces/{namespace}/events‹( +core_v1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NamespacedEventList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ë* +?/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}‡*‡ +apps_v1%read the specified ControllerRevision*&readAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevision + +401 + + UnauthorizedRhttpsjV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +j +x-kubernetes-actionget +à +apps_v1(replace the specified ControllerRevision*)replaceAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevisionBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevision +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevision + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jV +x-kubernetes-group-version-kind31kind: ControllerRevision +version: v1 +group: apps +*š +apps_v1delete a ControllerRevision*(deleteAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +B† +apps_v11partially update the specified ControllerRevision*'patchAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevision + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +J? +=;"9pathname of the ControllerRevision"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string –' +$/apis/networking.k8s.io/v1/ingressesí&á + networking_v1%list or watch objects of kind Ingress*'listNetworkingV1IngressForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.networking.v1.IngressList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jX +x-kubernetes-group-version-kind53version: v1 +group: networking.k8s.io +kind: Ingress +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean å +>/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status¢÷ +apps_v1'read status of the specified ReplicaSet*$readAppsV1NamespacedReplicaSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jN +x-kubernetes-group-version-kind+)group: apps +kind: ReplicaSet +version: v1 +À +apps_v1*replace status of the specified ReplicaSet*'replaceAppsV1NamespacedReplicaSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.apps.v1.ReplicaSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)version: v1 +group: apps +kind: ReplicaSet +j +x-kubernetes-actionput +Bö +apps_v13partially update status of the specified ReplicaSet*%patchAppsV1NamespacedReplicaSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jN +x-kubernetes-group-version-kind+)group: apps +kind: ReplicaSet +version: v1 +J7 +53"1pathname of the ReplicaSet"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ó) +E/apis/apps/v1/watch/namespaces/{namespace}/controllerrevisions/{name}©)ú +apps_v1¹watch changes to an object of kind ControllerRevision. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*'watchAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj +x-kubernetes-actionwatch +jV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J? +=;"9pathname of the ControllerRevision"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean æ' +#/apis/events.k8s.io/v1/watch/events¾'² + events_v1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.*&watchEventsV1EventListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ó +/apis/scheduling.k8s.io/v1/ÓÐ + scheduling_v1get available resources*getSchedulingV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsŒ +"/.well-known/openid-configuration/åâ + WellKnownWget service account issuer OpenID configuration, also known as the 'OIDC discovery doc'**getServiceAccountIssuerOpenIDConfiguration2application/jsonJ7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsö' +*/api/v1/namespaces/{namespace}/pods/{name}Ç'É +core_v1read the specified Pod*readCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JR +7 +2000 +. +OK( +& +$#/definitions/io.k8s.api.core.v1.Pod + +401 + + UnauthorizedRhttpsjE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +j +x-kubernetes-actionget +„ +core_v1replace the specified Pod*replaceCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B: +8 +6bodybody *& +$#/definitions/io.k8s.api.core.v1.PodBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J +7 +2000 +. +OK( +& +$#/definitions/io.k8s.api.core.v1.Pod +< +2015 +3 +Created( +& +$#/definitions/io.k8s.api.core.v1.Pod + +401 + + UnauthorizedRhttpsjE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +j +x-kubernetes-actionput +*Á +core_v1 delete a Pod*deleteCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J‘ +7 +2000 +. +OK( +& +$#/definitions/io.k8s.api.core.v1.Pod += +2026 +4 +Accepted( +& +$#/definitions/io.k8s.api.core.v1.Pod + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +BÈ +core_v1"partially update the specified Pod*patchCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JR +7 +2000 +. +OK( +& +$#/definitions/io.k8s.api.core.v1.Pod + +401 + + UnauthorizedRhttpsjE +x-kubernetes-group-version-kind" version: v1 +group: "" +kind: Pod +j +x-kubernetes-actionpatch +J0 +.,"*pathname of the Pod"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ­ +3/api/v1/namespaces/{namespace}/pods/{name}/evictionõ +"Î +core_v1create eviction of a Pod*!createCoreV1NamespacedPodEviction2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF +D +Bbodybody *2 +0#/definitions/io.k8s.api.policy.v1beta1.EvictionJó + +401 + + Unauthorized +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.policy.v1beta1.Eviction +H +201A +? +Created4 +2 +0#/definitions/io.k8s.api.policy.v1beta1.Eviction +I +202B +@ +Accepted4 +2 +0#/definitions/io.k8s.api.policy.v1beta1.EvictionRhttpsj +x-kubernetes-actionpost +jS +x-kubernetes-group-version-kind0.group: policy +kind: Eviction +version: v1beta1 +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J5 +31"/pathname of the Eviction"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ª +;/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}ê§ +core_v1(connect GET requests to proxy of Service*.connectCoreV1GetNamespacedServiceProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20kind: ServiceProxyOptions +version: v1 +group: "" +§ +core_v1(connect PUT requests to proxy of Service*.connectCoreV1PutNamespacedServiceProxyWithPath2*/*:*/*J7 + +401 + + Unauthorized + +200 + +OK + ² +stringRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +"© +core_v1)connect POST requests to proxy of Service*/connectCoreV1PostNamespacedServiceProxyWithPath2*/*:*/*J7 + +401 + + Unauthorized + +200 + +OK + ² +stringRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +*­ +core_v1+connect DELETE requests to proxy of Service*1connectCoreV1DeleteNamespacedServiceProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +2¯ +core_v1,connect OPTIONS requests to proxy of Service*2connectCoreV1OptionsNamespacedServiceProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +:© +core_v1)connect HEAD requests to proxy of Service*/connectCoreV1HeadNamespacedServiceProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +B« +core_v1*connect PATCH requests to proxy of Service*0connectCoreV1PatchNamespacedServiceProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +J@ +><":pathname of the ServiceProxyOptions"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜J5 +31"/pathpath to the resource"path*string˜JÐ +ÍÊÇquery¬Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy."path2string û* +=/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/{name}¹*¬ +rbacAuthorization_v1%read the specified ClusterRoleBinding*)readRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jk +x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1 +… +rbacAuthorization_v1(replace the specified ClusterRoleBinding*,replaceRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBindingBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jk +x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1 +*¿ +rbacAuthorization_v1delete a ClusterRoleBinding*+deleteRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjk +x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1 +j +x-kubernetes-action delete +B« +rbacAuthorization_v11partially update the specified ClusterRoleBinding**patchRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jk +x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1 +J? +=;"9pathname of the ClusterRoleBinding"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¿^ +5/api/v1/namespaces/{namespace}/persistentvolumeclaims…^ª& +core_v13list or watch objects of kind PersistentVolumeClaim*)listCoreV1NamespacedPersistentVolumeClaim2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jh +M +200F +D +OK> +< +:#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimList + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42version: v1 +group: "" +kind: PersistentVolumeClaim +j +x-kubernetes-actionlist +"´ +core_v1create a PersistentVolumeClaim*+createCoreV1NamespacedPersistentVolumeClaim2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL +J +Hbodybody *8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J… + +401 + + Unauthorized +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim +N +201G +E +Created: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim +O +202H +F +Accepted: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimRhttpsj +x-kubernetes-actionpost +jW +x-kubernetes-group-version-kind42group: "" +kind: PersistentVolumeClaim +version: v1 +*ë, +core_v1*delete collection of PersistentVolumeClaim*5deleteCoreV1CollectionNamespacedPersistentVolumeClaim2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jW +x-kubernetes-group-version-kind42group: "" +kind: PersistentVolumeClaim +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ó( +B/apis/certificates.k8s.io/v1beta1/watch/certificatesigningrequestsŒ(€ +certificates_v1beta1…watch individual changes to a list of CertificateSigningRequest. deprecated: use the 'watch' parameter with a list operation instead.*5watchCertificatesV1beta1CertificateSigningRequestList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jq +x-kubernetes-group-version-kindNLgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÿ +/apis/discovery.k8s.io/v1beta1/ÛØ +discovery_v1beta1get available resources*getDiscoveryV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps¡' +"/apis/extensions/v1beta1/ingressesú&î +extensions_v1beta1%list or watch objects of kind Ingress*,listExtensionsV1beta1IngressForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.extensions.v1beta1.IngressList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean  ' +!/apis/apps/v1/controllerrevisionsú&î +apps_v10list or watch objects of kind ControllerRevision*,listAppsV1ControllerRevisionForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.apps.v1.ControllerRevisionList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean —( +0/apis/networking.k8s.io/v1/watch/networkpoliciesâ'Ö + networking_v1ywatch individual changes to a list of NetworkPolicy. deprecated: use the 'watch' parameter with a list operation instead.*2watchNetworkingV1NetworkPolicyListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: networking.k8s.io +kind: NetworkPolicy +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean —) +J/apis/rbac.authorization.k8s.io/v1beta1/watch/namespaces/{namespace}/rolesÈ(Ú +rbacAuthorization_v1beta1pwatch individual changes to a list of Role. deprecated: use the 'watch' parameter with a list operation instead.*/watchRbacAuthorizationV1beta1NamespacedRoleList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=kind: Role +version: v1beta1 +group: rbac.authorization.k8s.io +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ·* +5/apis/storage.k8s.io/v1beta1/volumeattachments/{name}ý)œ +storage_v1beta1#read the specified VolumeAttachment*"readStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment + +401 + + UnauthorizedRhttpsjc +x-kubernetes-group-version-kind@>group: storage.k8s.io +kind: VolumeAttachment +version: v1beta1 +j +x-kubernetes-actionget + +storage_v1beta1&replace the specified VolumeAttachment*%replaceStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO +M +Kbodybody *; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jº +L +200E +C +OK= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment +Q +201J +H +Created= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment + +401 + + UnauthorizedRhttpsjc +x-kubernetes-group-version-kind@>group: storage.k8s.io +kind: VolumeAttachment +version: v1beta1 +j +x-kubernetes-actionput +*© +storage_v1beta1delete a VolumeAttachment*$deleteStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment +R +202K +I +Accepted= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jc +x-kubernetes-group-version-kind@>group: storage.k8s.io +kind: VolumeAttachment +version: v1beta1 +B› +storage_v1beta1/partially update the specified VolumeAttachment*#patchStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment + +401 + + UnauthorizedRhttpsjc +x-kubernetes-group-version-kind@>version: v1beta1 +group: storage.k8s.io +kind: VolumeAttachment +j +x-kubernetes-actionpatch +J= +;9"7pathname of the VolumeAttachment"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ª) +2/api/v1/watch/namespaces/{namespace}/events/{name}ó(Ñ +core_v1¬watch changes to an object of kind Event. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J2 +0.",pathname of the Event"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean »( +/api/v1/watch/namespaces/{name}—(Ó +core_v1°watch changes to an object of kind Namespace. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj +x-kubernetes-actionwatch +jK +x-kubernetes-group-version-kind(&kind: Namespace +version: v1 +group: "" +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 +42"0pathname of the Namespace"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean „ +#/apis/admissionregistration.k8s.io/ÜÙ +admissionregistrationget information of a group* getAdmissionregistrationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps÷) +M/apis/networking.k8s.io/v1beta1/watch/namespaces/{namespace}/ingresses/{name}¥) +networking_v1beta1®watch changes to an object of kind Ingress. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*'watchNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 +20".pathname of the Ingress"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ù + /apis/rbac.authorization.k8s.io/ÔÑ +rbacAuthorizationget information of a group*getRbacAuthorizationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsÝ( +//apis/storage.k8s.io/v1/watch/csidrivers/{name}©(å + +storage_v1°watch changes to an object of kind CSIDriver. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jW +x-kubernetes-group-version-kind42group: storage.k8s.io +kind: CSIDriver +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 +42"0pathname of the CSIDriver"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÿ' +*/apis/networking.k8s.io/v1/watch/ingressesÐ'Ä + networking_v1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*,watchNetworkingV1IngressListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jX +x-kubernetes-group-version-kind53group: networking.k8s.io +kind: Ingress +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ð] +./apis/networking.k8s.io/v1beta1/ingressclasses½]±& +networking_v1beta1*list or watch objects of kind IngressClass*!listNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jj +O +200H +F +OK@ +> +<#/definitions/io.k8s.api.networking.v1beta1.IngressClassList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jb +x-kubernetes-group-version-kind?=group: networking.k8s.io +kind: IngressClass +version: v1beta1 +" +networking_v1beta1create an IngressClass*#createNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN +L +Jbodybody *: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J‹ +K +200D +B +OK< +: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClass +P +201I +G +Created< +: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClass +Q +202J +H +Accepted< +: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jb +x-kubernetes-group-version-kind?=group: networking.k8s.io +kind: IngressClass +version: v1beta1 +*ð, +networking_v1beta1!delete collection of IngressClass*-deleteNetworkingV1beta1CollectionIngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=group: networking.k8s.io +kind: IngressClass +version: v1beta1 +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string …* +Q/apis/rbac.authorization.k8s.io/v1beta1/watch/namespaces/{namespace}/roles/{name}¯)Ž +rbacAuthorization_v1beta1«watch changes to an object of kind Role. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*+watchRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jb +x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io +kind: Role +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J1 +/-"+pathname of the Role"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ƒ) +N/apis/admissionregistration.k8s.io/v1beta1/watch/mutatingwebhookconfigurations°(¤ +admissionregistration_v1beta1ˆwatch individual changes to a list of MutatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*AwatchAdmissionregistrationV1beta1MutatingWebhookConfigurationList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j} +x-kubernetes-group-version-kindZXkind: MutatingWebhookConfiguration +version: v1beta1 +group: admissionregistration.k8s.io +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean É( +8/apis/autoscaling/v2beta1/watch/horizontalpodautoscalersŒ(€ +autoscaling_v2beta1ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.*BwatchAutoscalingV2beta1HorizontalPodAutoscalerListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean è +0/api/v1/namespaces/{namespace}/pods/{name}/proxy³“ +core_v1$connect GET requests to proxy of Pod*"connectCoreV1GetNamespacedPodProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +“ +core_v1$connect PUT requests to proxy of Pod*"connectCoreV1PutNamespacedPodProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +"• +core_v1%connect POST requests to proxy of Pod*#connectCoreV1PostNamespacedPodProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +*™ +core_v1'connect DELETE requests to proxy of Pod*%connectCoreV1DeleteNamespacedPodProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +2› +core_v1(connect OPTIONS requests to proxy of Pod*&connectCoreV1OptionsNamespacedPodProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,version: v1 +group: "" +kind: PodProxyOptions +:• +core_v1%connect HEAD requests to proxy of Pod*#connectCoreV1HeadNamespacedPodProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +B— +core_v1&connect PATCH requests to proxy of Pod*$connectCoreV1PatchNamespacedPodProxy2*/*:*/*J7 + +401 + + Unauthorized + +200 + +OK + ² +stringRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +J< +:8"6pathname of the PodProxyOptions"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜Ja +_][queryAPath is the URL path to use for the current proxy request to pod."path2string Ç] +-/api/v1/namespaces/{namespace}/resourcequotas•]Š& +core_v1+list or watch objects of kind ResourceQuota*!listCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.core.v1.ResourceQuotaList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +"ü +core_v1create a ResourceQuota*#createCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.core.v1.ResourceQuotaBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jí +G +202@ +> +Accepted2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota + +401 + + Unauthorized +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuotaRhttpsj +x-kubernetes-actionpost +jO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +*Ó, +core_v1"delete collection of ResourceQuota*-deleteCoreV1CollectionNamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ®' +(/apis/discovery.k8s.io/v1/endpointslices'õ + discovery_v1+list or watch objects of kind EndpointSlice*,listDiscoveryV1EndpointSliceForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.discovery.v1.EndpointSliceList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¤( +3/apis/rbac.authorization.k8s.io/v1beta1/watch/rolesì'à +rbacAuthorization_v1beta1pwatch individual changes to a list of Role. deprecated: use the 'watch' parameter with a list operation instead.*5watchRbacAuthorizationV1beta1RoleListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jb +x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io +kind: Role +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Í + /version/¿¼ +versionget the code version*getCodeVersion2application/json:application/jsonJ` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.apimachinery.pkg.version.Info + +401 + + UnauthorizedRhttpsü +/apis/apiextensions.k8s.io/v1/ÙÖ +apiextensions_v1get available resources*getApiextensionsV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsÀ( +1/apis/batch/v1/namespaces/{namespace}/jobs/{name}Š(Ï +batch_v1read the specified Job*readBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.batch.v1.Job + +401 + + UnauthorizedRhttpsjH +x-kubernetes-group-version-kind%#version: v1 +group: batch +kind: Job +j +x-kubernetes-actionget +Œ +batch_v1replace the specified Job*replaceBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; +9 +7bodybody *' +%#/definitions/io.k8s.api.batch.v1.JobBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.batch.v1.Job += +2016 +4 +Created) +' +%#/definitions/io.k8s.api.batch.v1.Job + +401 + + UnauthorizedRhttpsjH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +j +x-kubernetes-actionput +*ð +batch_v1 delete a Job*deleteBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj +x-kubernetes-action delete +jH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +BÎ +batch_v1"partially update the specified Job*patchBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.batch.v1.Job + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +J0 +.,"*pathname of the Job"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¼, +C/apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}ô+Ô +certificates_v1beta1,read the specified CertificateSigningRequest*0readCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ju +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jq +x-kubernetes-group-version-kindNLgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1beta1 +Õ +certificates_v1beta1/replace the specified CertificateSigningRequest*3replaceCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] +[ +Ybodybody *I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÖ +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest +_ +201X +V +CreatedK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsjq +x-kubernetes-group-version-kindNLkind: CertificateSigningRequest +version: v1beta1 +group: certificates.k8s.io +j +x-kubernetes-actionput +*Ó +certificates_v1beta1"delete a CertificateSigningRequest*2deleteCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jq +x-kubernetes-group-version-kindNLkind: CertificateSigningRequest +version: v1beta1 +group: certificates.k8s.io +BÓ +certificates_v1beta18partially update the specified CertificateSigningRequest*1patchCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ju +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jq +x-kubernetes-group-version-kindNLgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1beta1 +JF +DB"@path%name of the CertificateSigningRequest"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‚ +/logs/{logpath}o8 +logs*logFileHandlerJ + +401 + + UnauthorizedRhttpsJ3 +1/"-pathpath to the log"logpath*string˜` +I/apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalersÏ_ä& +autoscaling_v2beta25list or watch objects of kind HorizontalPodAutoscaler*7listAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jv +[ +200T +R +OKL +J +H#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta2 +"˜ + +autoscaling_v2beta2 create a HorizontalPodAutoscaler*9createAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ +X +Vbodybody *F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¯ + +401 + + Unauthorized +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler +\ +201U +S +CreatedH +F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler +] +202V +T +AcceptedH +F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerRhttpsjg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta2 +j +x-kubernetes-actionpost +*—- +autoscaling_v2beta2,delete collection of HorizontalPodAutoscaler*CdeleteAutoscalingV2beta2CollectionNamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta2 +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string â' +&/apis/storage.k8s.io/v1/watch/csinodes·'« + +storage_v1swatch individual changes to a list of CSINode. deprecated: use the 'watch' parameter with a list operation instead.*watchStorageV1CSINodeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jU +x-kubernetes-group-version-kind20group: storage.k8s.io +kind: CSINode +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ù +/apis/storage.k8s.io/v1beta1/×Ô +storage_v1beta1get available resources*getStorageV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttps‰ +C/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/statusÁ¡ +core_v12read status of the specified PersistentVolumeClaim*/readCoreV1NamespacedPersistentVolumeClaimStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jd +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jW +x-kubernetes-group-version-kind42group: "" +kind: PersistentVolumeClaim +version: v1 +€ +core_v15replace status of the specified PersistentVolumeClaim*2replaceCoreV1NamespacedPersistentVolumeClaimStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL +J +Hbodybody *8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J´ +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim +N +201G +E +Created: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: PersistentVolumeClaim +version: v1 +j +x-kubernetes-actionput +B  +core_v1>partially update status of the specified PersistentVolumeClaim*0patchCoreV1NamespacedPersistentVolumeClaimStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jd +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: PersistentVolumeClaim +version: v1 +j +x-kubernetes-actionpatch +JB +@>"<path!name of the PersistentVolumeClaim"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ” +&/apis/admissionregistration.k8s.io/v1/éæ +admissionregistration_v1get available resources*&getAdmissionregistrationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsÓ( +B/apis/apiextensions.k8s.io/v1beta1/watch/customresourcedefinitionsŒ(€ +apiextensions_v1beta1„watch individual changes to a list of CustomResourceDefinition. deprecated: use the 'watch' parameter with a list operation instead.*5watchApiextensionsV1beta1CustomResourceDefinitionList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjq +x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1beta1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¼* +V/apis/autoscaling/v2beta1/watch/namespaces/{namespace}/horizontalpodautoscalers/{name}á)­ +autoscaling_v2beta1¾watch changes to an object of kind HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*8watchAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JD +B@">path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean õ' +2/apis/autoscaling/v2beta1/horizontalpodautoscalers¾'² +autoscaling_v2beta15list or watch objects of kind HorizontalPodAutoscaler*=listAutoscalingV2beta1HorizontalPodAutoscalerForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jv +[ +200T +R +OKL +J +H#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean à +G/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/approval”Ô +certificates_v18read approval of the specified CertificateSigningRequest*3readCertificatesV1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jp +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsjl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +j +x-kubernetes-actionget +Ë +certificates_v1;replace approval of the specified CertificateSigningRequest*6replaceCertificatesV1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX +V +Tbodybody *D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÌ +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest +Z +201S +Q +CreatedF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +BÓ +certificates_v1Dpartially update approval of the specified CertificateSigningRequest*4patchCertificatesV1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jp +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +JF +DB"@path%name of the CertificateSigningRequest"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¯' +$/apis/policy/v1/poddisruptionbudgets†'ú + policy_v11list or watch objects of kind PodDisruptionBudget*/listPolicyV1PodDisruptionBudgetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jh +M +200F +D +OK> +< +:#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ©] ++/api/v1/namespaces/{namespace}/podtemplatesù\‚& +core_v1)list or watch objects of kind PodTemplate*listCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.core.v1.PodTemplateList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jM +x-kubernetes-group-version-kind*(group: "" +kind: PodTemplate +version: v1 +"î +core_v1create a PodTemplate*!createCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB +@ +>bodybody *. +,#/definitions/io.k8s.api.core.v1.PodTemplateBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jç +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplate +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplate +E +202> +< +Accepted0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplate + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(group: "" +kind: PodTemplate +version: v1 +j +x-kubernetes-actionpost +*Í, +core_v1 delete collection of PodTemplate*+deleteCoreV1CollectionNamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* +x-kubernetes-actiondeletecollection +jM +x-kubernetes-group-version-kind*(version: v1 +group: "" +kind: PodTemplate +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ö* +"<path!name of the ReplicationController"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ’* +W/apis/admissionregistration.k8s.io/v1beta1/watch/validatingwebhookconfigurations/{name}¶)Ý +admissionregistration_v1beta1Åwatch changes to an object of kind ValidatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*?watchAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1beta1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JK +IG"Epath*name of the ValidatingWebhookConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‡ +5/apis/authorization.k8s.io/v1/selfsubjectrulesreviewsÍ +"¿ +authorization_v1create a SelfSubjectRulesReview*+createAuthorizationV1SelfSubjectRulesReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BV +T +Rbodybody *B +@#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReviewJ£ + +401 + + Unauthorized +S +200L +J +OKD +B +@#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReview +X +201Q +O +CreatedD +B +@#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReview +Y +202R +P +AcceptedD +B +@#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReviewRhttpsj +x-kubernetes-actionpost +jj +x-kubernetes-group-version-kindGEgroup: authorization.k8s.io +kind: SelfSubjectRulesReview +version: v1 +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ) +8/apis/storage.k8s.io/v1beta1/watch/storageclasses/{name}Ä(ý +storage_v1beta1³watch changes to an object of kind StorageClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j_ +x-kubernetes-group-version-kind<:group: storage.k8s.io +kind: StorageClass +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 +75"3pathname of the StorageClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×) +;/api/v1/watch/namespaces/{namespace}/serviceaccounts/{name}—)ì +core_v1µwatch changes to an object of kind ServiceAccount. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*#watchCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+kind: ServiceAccount +version: v1 +group: "" +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J; +97"5pathname of the ServiceAccount"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷( +K/apis/admissionregistration.k8s.io/v1/watch/validatingwebhookconfigurations§(› +admissionregistration_v1Šwatch individual changes to a list of ValidatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*>watchAdmissionregistrationV1ValidatingWebhookConfigurationList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjz +x-kubernetes-group-version-kindWUgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean †_ +D/apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices½^»& +discovery_v1beta1+list or watch objects of kind EndpointSlice*+listDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jj +O +200H +F +OK@ +> +<#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jb +x-kubernetes-group-version-kind?=group: discovery.k8s.io +kind: EndpointSlice +version: v1beta1 +"Ì +discovery_v1beta1create an EndpointSlice*-createDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN +L +Jbodybody *: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J‹ + +401 + + Unauthorized +K +200D +B +OK< +: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice +P +201I +G +Created< +: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice +Q +202J +H +Accepted< +: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceRhttpsjb +x-kubernetes-group-version-kind?=kind: EndpointSlice +version: v1beta1 +group: discovery.k8s.io +j +x-kubernetes-actionpost +*ú, +discovery_v1beta1"delete collection of EndpointSlice*7deleteDiscoveryV1beta1CollectionNamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=kind: EndpointSlice +version: v1beta1 +group: discovery.k8s.io +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ©^ +6/apis/rbac.authorization.k8s.io/v1/clusterrolebindingsî]Å& +rbacAuthorization_v10list or watch objects of kind ClusterRoleBinding*)listRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.rbac.v1.ClusterRoleBindingList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jk +x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1 +"Æ +rbacAuthorization_v1create a ClusterRoleBinding*+createRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBindingBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding +L +202E +C +Accepted7 +5 +3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jk +x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1 +*‰- +rbacAuthorization_v1'delete collection of ClusterRoleBinding*5deleteRbacAuthorizationV1CollectionClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jk +x-kubernetes-group-version-kindHFkind: ClusterRoleBinding +version: v1 +group: rbac.authorization.k8s.io +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ð* +"<path!name of the PersistentVolumeClaim"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ½- +J/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}î,ð +admissionregistration_v1/read the specified MutatingWebhookConfiguration*7readAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J| +a +200Z +X +OKR +P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jx +x-kubernetes-group-version-kindUSkind: MutatingWebhookConfiguration +version: v1 +group: admissionregistration.k8s.io +ÿ +admissionregistration_v12replace the specified MutatingWebhookConfiguration*:replaceAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bd +b +`bodybody *P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jä +f +201_ +] +CreatedR +P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration + +401 + + Unauthorized +a +200Z +X +OKR +P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationRhttpsjx +x-kubernetes-group-version-kindUSkind: MutatingWebhookConfiguration +version: v1 +group: admissionregistration.k8s.io +j +x-kubernetes-actionput +*è +admissionregistration_v1%delete a MutatingWebhookConfiguration*9deleteAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jx +x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +version: v1 +Bï +admissionregistration_v1;partially update the specified MutatingWebhookConfiguration*8patchAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J| + +401 + + Unauthorized +a +200Z +X +OKR +P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationRhttpsj +x-kubernetes-actionpatch +jx +x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +version: v1 +JI +GE"Cpath(name of the MutatingWebhookConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ä +E/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/statusšî +apiextensions_v15read status of the specified CustomResourceDefinition*1readApiextensionsV1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J +r +200k +i +OKc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jl +x-kubernetes-group-version-kindIGkind: CustomResourceDefinition +version: v1 +group: apiextensions.k8s.io +ž + +apiextensions_v18replace status of the specified CustomResourceDefinition*4replaceApiextensionsV1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bu +s +qbodybody *a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J† +r +200k +i +OKc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition +w +201p +n +Createdc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jl +x-kubernetes-group-version-kindIGkind: CustomResourceDefinition +version: v1 +group: apiextensions.k8s.io +Bí +apiextensions_v1Apartially update status of the specified CustomResourceDefinition*2patchApiextensionsV1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J +r +200k +i +OKc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsjl +x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1 +j +x-kubernetes-actionpatch +JE +CA"?path$name of the CustomResourceDefinition"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string “( +//apis/networking.k8s.io/v1beta1/watch/ingressesß'Ó +networking_v1beta1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*1watchNetworkingV1beta1IngressListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ²] +)/apis/storage.k8s.io/v1/volumeattachments„]¡& + +storage_v1.list or watch objects of kind VolumeAttachment*listStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jf +K +200D +B +OK< +: +8#/definitions/io.k8s.api.storage.v1.VolumeAttachmentList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j^ +x-kubernetes-group-version-kind;9version: v1 +group: storage.k8s.io +kind: VolumeAttachment +"¥ + +storage_v1create a VolumeAttachment*createStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ +H +Fbodybody *6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachmentBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jÿ +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment +L +201E +C +Created8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment +M +202F +D +Accepted8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: storage.k8s.io +kind: VolumeAttachment +version: v1 +j +x-kubernetes-actionpost +*ä, + +storage_v1%delete collection of VolumeAttachment*)deleteStorageV1CollectionVolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9kind: VolumeAttachment +version: v1 +group: storage.k8s.io +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ä) +2/apis/storage.k8s.io/v1beta1/storageclasses/{name}­)Œ +storage_v1beta1read the specified StorageClass*readStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jc +H +200A +? +OK9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j_ +x-kubernetes-group-version-kind<:group: storage.k8s.io +kind: StorageClass +version: v1beta1 +é +storage_v1beta1"replace the specified StorageClass*!replaceStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BK +I +Gbodybody *7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J² +H +200A +? +OK9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass +M +201F +D +Created9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j_ +x-kubernetes-group-version-kind<:group: storage.k8s.io +kind: StorageClass +version: v1beta1 +*• +storage_v1beta1delete a StorageClass* deleteStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J³ +H +200A +? +OK9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass +N +202G +E +Accepted9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j_ +x-kubernetes-group-version-kind<:group: storage.k8s.io +kind: StorageClass +version: v1beta1 +B‹ +storage_v1beta1+partially update the specified StorageClass*patchStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jc +H +200A +? +OK9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass + +401 + + UnauthorizedRhttpsj_ +x-kubernetes-group-version-kind<:group: storage.k8s.io +kind: StorageClass +version: v1beta1 +j +x-kubernetes-actionpatch +J9 +75"3pathname of the StorageClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ý\ +/api/v1/persistentvolumes¿\Œ& +core_v1.list or watch objects of kind PersistentVolume*listCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jc +H +200A +? +OK9 +7 +5#/definitions/io.k8s.api.core.v1.PersistentVolumeList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +"‡ +core_v1create a PersistentVolume*createCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG +E +Cbodybody *3 +1#/definitions/io.k8s.api.core.v1.PersistentVolumeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jö +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume +I +201B +@ +Created5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume +J +202C +A +Accepted5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jR +x-kubernetes-group-version-kind/-version: v1 +group: "" +kind: PersistentVolume +*Ò, +core_v1%delete collection of PersistentVolume*&deleteCoreV1CollectionPersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ê + /apis/apps/º· +appsget information of a group*getAppsAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps¿( +0/apis/batch/v1/watch/namespaces/{namespace}/jobsŠ(œ +batch_v1owatch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.*watchBatchV1NamespacedJobList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean á* +F/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leases/{name}–*Ž +coordination_v1beta1read the specified Lease*&readCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.coordination.v1beta1.Lease + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j] +x-kubernetes-group-version-kind:8group: coordination.k8s.io +kind: Lease +version: v1beta1 +ç +coordination_v1beta1replace the specified Lease*)replaceCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.coordination.v1beta1.LeaseBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.coordination.v1beta1.Lease +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.coordination.v1beta1.Lease + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j] +x-kubernetes-group-version-kind:8group: coordination.k8s.io +kind: Lease +version: v1beta1 +*¡ +coordination_v1beta1delete a Lease*(deleteCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj] +x-kubernetes-group-version-kind:8group: coordination.k8s.io +kind: Lease +version: v1beta1 +j +x-kubernetes-action delete +B +coordination_v1beta1$partially update the specified Lease*'patchCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.coordination.v1beta1.Lease + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: coordination.k8s.io +kind: Lease +version: v1beta1 +j +x-kubernetes-actionpatch +J2 +0.",pathname of the Lease"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ê& +/api/v1/endpointsÔ&È +core_v1'list or watch objects of kind Endpoints*#listCoreV1EndpointsForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.EndpointsList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jK +x-kubernetes-group-version-kind(&group: "" +kind: Endpoints +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×& +/api/v1/eventsÄ&¸ +core_v1#list or watch objects of kind Event*listCoreV1EventForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.EventList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jG +x-kubernetes-group-version-kind$"group: "" +kind: Event +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‹) +P/apis/admissionregistration.k8s.io/v1beta1/watch/validatingwebhookconfigurations¶(ª +admissionregistration_v1beta1Šwatch individual changes to a list of ValidatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*CwatchAdmissionregistrationV1beta1ValidatingWebhookConfigurationList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j +x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ˆ) +5/apis/policy/v1beta1/watch/podsecuritypolicies/{name}Î(‚ +policy_v1beta1¸watch changes to an object of kind PodSecurityPolicy. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*#watchPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j\ +x-kubernetes-group-version-kind97group: policy +kind: PodSecurityPolicy +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J> +<:"8pathname of the PodSecurityPolicy"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ì +/apis/apiextensions.k8s.io/ÌÉ + apiextensionsget information of a group*getApiextensionsAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps„ +8/apis/batch/v1/namespaces/{namespace}/jobs/{name}/statusÇß +batch_v1 read status of the specified Job*readBatchV1NamespacedJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.batch.v1.Job + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +œ +batch_v1#replace status of the specified Job*!replaceBatchV1NamespacedJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; +9 +7bodybody *' +%#/definitions/io.k8s.api.batch.v1.JobBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ += +2016 +4 +Created) +' +%#/definitions/io.k8s.api.batch.v1.Job + +401 + + Unauthorized +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.batch.v1.JobRhttpsj +x-kubernetes-actionput +jH +x-kubernetes-group-version-kind%#kind: Job +version: v1 +group: batch +BÞ +batch_v1,partially update status of the specified Job*patchBatchV1NamespacedJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.batch.v1.Job + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +J0 +.,"*pathname of the Job"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ü +T/apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurations/{name}/status£€ +flowcontrolApiserver_v1beta17read status of the specified PriorityLevelConfiguration*?readFlowcontrolApiserverV1beta1PriorityLevelConfigurationStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ju +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j{ +x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io +kind: PriorityLevelConfiguration +version: v1beta1 + + +flowcontrolApiserver_v1beta1:replace status of the specified PriorityLevelConfiguration*BreplaceFlowcontrolApiserverV1beta1PriorityLevelConfigurationStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] +[ +Ybodybody *I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÖ +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration +_ +201X +V +CreatedK +I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j{ +x-kubernetes-group-version-kindXVversion: v1beta1 +group: flowcontrol.apiserver.k8s.io +kind: PriorityLevelConfiguration +Bÿ +flowcontrolApiserver_v1beta1Cpartially update status of the specified PriorityLevelConfiguration*@patchFlowcontrolApiserverV1beta1PriorityLevelConfigurationStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ju +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j{ +x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io +kind: PriorityLevelConfiguration +version: v1beta1 +JG +EC"Apath&name of the PriorityLevelConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ( +*/apis/policy/v1/watch/poddisruptionbudgetsá'Õ + policy_v1watch individual changes to a list of PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead.*4watchPolicyV1PodDisruptionBudgetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ò) +:/api/v1/watch/namespaces/{namespace}/resourcequotas/{name}“)é +core_v1´watch changes to an object of kind ResourceQuota. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: +86"4pathname of the ResourceQuota"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¯) +3/api/v1/watch/namespaces/{namespace}/secrets/{name}÷(Ô +core_v1­watch changes to an object of kind Secret. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jH +x-kubernetes-group-version-kind%#group: "" +kind: Secret +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J3 +1/"-pathname of the Secret"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Í + /apis/batch/¼¹ +batchget information of a group*getBatchAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps‡* +A/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases/{name}Á)ú +coordination_v1read the specified Lease*!readCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.coordination.v1.Lease + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +É +coordination_v1replace the specified Lease*$replaceCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.coordination.v1.LeaseBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.coordination.v1.Lease +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.coordination.v1.Lease + +401 + + UnauthorizedRhttpsjX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +j +x-kubernetes-actionput +*’ +coordination_v1delete a Lease*#deleteCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +Bù +coordination_v1$partially update the specified Lease*"patchCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.coordination.v1.Lease + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +J2 +0.",pathname of the Lease"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string –* +//apis/policy/v1beta1/podsecuritypolicies/{name}â)• +policy_v1beta1$read the specified PodSecurityPolicy*"readPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j\ +x-kubernetes-group-version-kind97group: policy +kind: PodSecurityPolicy +version: v1beta1 +ú +policy_v1beta1'replace the specified PodSecurityPolicy*%replacePolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO +M +Kbodybody *; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jº +Q +201J +H +Created= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyRhttpsj +x-kubernetes-actionput +j\ +x-kubernetes-group-version-kind97version: v1beta1 +group: policy +kind: PodSecurityPolicy +*¢ +policy_v1beta1delete a PodSecurityPolicy*$deletePolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy +R +202K +I +Accepted= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j\ +x-kubernetes-group-version-kind97group: policy +kind: PodSecurityPolicy +version: v1beta1 +B” +policy_v1beta10partially update the specified PodSecurityPolicy*#patchPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy + +401 + + UnauthorizedRhttpsj\ +x-kubernetes-group-version-kind97group: policy +kind: PodSecurityPolicy +version: v1beta1 +j +x-kubernetes-actionpatch +J> +<:"8pathname of the PodSecurityPolicy"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ú) +B/api/v1/watch/namespaces/{namespace}/persistentvolumeclaims/{name}³) +core_v1¼watch changes to an object of kind PersistentVolumeClaim. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchCoreV1NamespacedPersistentVolumeClaim2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jW +x-kubernetes-group-version-kind42group: "" +kind: PersistentVolumeClaim +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JB +@>"<path!name of the PersistentVolumeClaim"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean  ) +6/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}å(ã +apps_v1read the specified DaemonSet*readAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +ª +apps_v1replace the specified DaemonSet* replaceAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.apps.v1.DaemonSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +*ÿ +apps_v1delete a DaemonSet*deleteAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +j +x-kubernetes-action delete +Bâ +apps_v1(partially update the specified DaemonSet*patchAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.apps.v1.DaemonSet + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +j +x-kubernetes-actionpatch +J6 +42"0pathname of the DaemonSet"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¨* +Q/apis/autoscaling/v1/watch/namespaces/{namespace}/horizontalpodautoscalers/{name}Ò)ž +autoscaling_v1¾watch changes to an object of kind HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*3watchAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JD +B@">path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚- +P/apis/autoscaling/v2beta1/namespaces/{namespace}/horizontalpodautoscalers/{name}­,Ë +autoscaling_v2beta1*read the specified HorizontalPodAutoscaler*7readAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jr +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jg +x-kubernetes-group-version-kindDBversion: v2beta1 +group: autoscaling +kind: HorizontalPodAutoscaler +Æ +autoscaling_v2beta1-replace the specified HorizontalPodAutoscaler*:replaceAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ +X +Vbodybody *F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÐ +\ +201U +S +CreatedH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + +401 + + Unauthorized +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerRhttpsj +x-kubernetes-actionput +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +*Í +autoscaling_v2beta1 delete a HorizontalPodAutoscaler*9deleteAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +j +x-kubernetes-action delete +BÊ +autoscaling_v2beta16partially update the specified HorizontalPodAutoscaler*8patchAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jr +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsjg +x-kubernetes-group-version-kindDBkind: HorizontalPodAutoscaler +version: v2beta1 +group: autoscaling +j +x-kubernetes-actionpatch +JD +B@">path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string È) +A/apis/events.k8s.io/v1/watch/namespaces/{namespace}/events/{name}‚)à + events_v1¬watch changes to an object of kind Event. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J2 +0.",pathname of the Event"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷' +,/apis/storage.k8s.io/v1/watch/storageclassesÆ'º + +storage_v1xwatch individual changes to a list of StorageClass. deprecated: use the 'watch' parameter with a list operation instead.*watchStorageV1StorageClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ² + /api/v1/namespaces/{name}/statusç +core_v1&read status of the specified Namespace*readCoreV1NamespaceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Namespace + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jK +x-kubernetes-group-version-kind(&version: v1 +group: "" +kind: Namespace +® +core_v1)replace status of the specified Namespace*replaceCoreV1NamespaceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.core.v1.NamespaceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Namespace +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.core.v1.Namespace + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +j +x-kubernetes-actionput +Bæ +core_v12partially update status of the specified Namespace*patchCoreV1NamespaceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Namespace + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jK +x-kubernetes-group-version-kind(&version: v1 +group: "" +kind: Namespace +J6 +42"0pathname of the Namespace"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ó& +/apis/apps/v1/daemonsetsÖ&Ê +apps_v1'list or watch objects of kind DaemonSet*#listAppsV1DaemonSetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.apps.v1.DaemonSetList + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ë) +=/apis/apps/v1/watch/namespaces/{namespace}/replicasets/{name}‰)â +apps_v1±watch changes to an object of kind ReplicaSet. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)group: apps +kind: ReplicaSet +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 +53"1pathname of the ReplicaSet"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ó] +3/apis/batch/v1beta1/namespaces/{namespace}/cronjobs›]Œ& + batch_v1beta1%list or watch objects of kind CronJob*!listBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.batch.v1beta1.CronJobList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +"þ + batch_v1beta1create a CronJob*#createBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJobBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jí +G +202@ +> +Accepted2 +0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJob + +401 + + Unauthorized +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJob +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJobRhttpsj +x-kubernetes-actionpost +jQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +*Õ, + batch_v1beta1delete collection of CronJob*-deleteBatchV1beta1CollectionNamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* +x-kubernetes-actiondeletecollection +jQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ç +/apis/events.k8s.io/v1/ËÈ + events_v1get available resources*getEventsV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsþ] +//apis/scheduling.k8s.io/v1beta1/priorityclassesÊ]µ& +scheduling_v1beta1+list or watch objects of kind PriorityClass*"listSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClassList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jc +x-kubernetes-group-version-kind@>version: v1beta1 +group: scheduling.k8s.io +kind: PriorityClass +"È +scheduling_v1beta1create a PriorityClass*$createSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO +M +Kbodybody *; +9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JŽ +L +200E +C +OK= +; +9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass +Q +201J +H +Created= +; +9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass +R +202K +I +Accepted= +; +9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jc +x-kubernetes-group-version-kind@>group: scheduling.k8s.io +kind: PriorityClass +version: v1beta1 +*ó, +scheduling_v1beta1"delete collection of PriorityClass*.deleteSchedulingV1beta1CollectionPriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jc +x-kubernetes-group-version-kind@>version: v1beta1 +group: scheduling.k8s.io +kind: PriorityClass +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ë& +/api/v1/configmapsÔ&È +core_v1'list or watch objects of kind ConfigMap*#listCoreV1ConfigMapForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ConfigMapList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jK +x-kubernetes-group-version-kind(&group: "" +kind: ConfigMap +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÿ + +2/api/v1/namespaces/{namespace}/pods/{name}/bindingÈ +"¢ +core_v1create binding of a Pod* createCoreV1NamespacedPodBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> +< +:bodybody ** +(#/definitions/io.k8s.api.core.v1.BindingJÛ + +401 + + Unauthorized +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Binding +@ +2019 +7 +Created, +* +(#/definitions/io.k8s.api.core.v1.Binding +A +202: +8 +Accepted, +* +(#/definitions/io.k8s.api.core.v1.BindingRhttpsjI +x-kubernetes-group-version-kind&$group: "" +kind: Binding +version: v1 +j +x-kubernetes-actionpost +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J4 +20".pathname of the Binding"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string é +W/apis/autoscaling/v2beta1/namespaces/{namespace}/horizontalpodautoscalers/{name}/statusÛ +autoscaling_v2beta14read status of the specified HorizontalPodAutoscaler*=readAutoscalingV2beta1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jr +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsjg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +j +x-kubernetes-actionget +Ö +autoscaling_v2beta17replace status of the specified HorizontalPodAutoscaler*@replaceAutoscalingV2beta1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ +X +Vbodybody *F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÐ +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler +\ +201U +S +CreatedH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +BÚ +autoscaling_v2beta1@partially update status of the specified HorizontalPodAutoscaler*>patchAutoscalingV2beta1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jr +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsjg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +j +x-kubernetes-actionpatch +JD +B@">path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ç' +-/apis/discovery.k8s.io/v1beta1/endpointslices•'‰ +discovery_v1beta1+list or watch objects of kind EndpointSlice*1listDiscoveryV1beta1EndpointSliceForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jj +O +200H +F +OK@ +> +<#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceList + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=kind: EndpointSlice +version: v1beta1 +group: discovery.k8s.io +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ý +/openid/v1/jwks/èå +openid\get service account issuer OpenID JSON Web Key Set (contains public token verification keys)*#getServiceAccountIssuerOpenIDKeyset2application/jwk-set+jsonJ7 + +401 + + Unauthorized + +200 + +OK + ² +stringRhttpsþ) +R/apis/admissionregistration.k8s.io/v1/watch/validatingwebhookconfigurations/{name}§)Î +admissionregistration_v1Åwatch changes to an object of kind ValidatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*:watchAdmissionregistrationV1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjz +x-kubernetes-group-version-kindWUgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JK +IG"Epath*name of the ValidatingWebhookConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean · +;/apis/authorization.k8s.io/v1beta1/selfsubjectaccessreviews÷ +"é +authorization_v1beta1 create a SelfSubjectAccessReview*1createAuthorizationV1beta1SelfSubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B\ +Z +Xbodybody *H +F#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewJµ +_ +202X +V +AcceptedJ +H +F#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview + +401 + + Unauthorized +Y +200R +P +OKJ +H +F#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview +^ +201W +U +CreatedJ +H +F#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewRhttpsj +x-kubernetes-actionpost +jp +x-kubernetes-group-version-kindMKgroup: authorization.k8s.io +kind: SelfSubjectAccessReview +version: v1beta1 +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ï( +4/apis/batch/v1/watch/namespaces/{namespace}/cronjobs–(¨ +batch_v1swatch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.*!watchBatchV1NamespacedCronJobList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À) +;/apis/batch/v1/watch/namespaces/{namespace}/cronjobs/{name}€)Ü +batch_v1®watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 +20".pathname of the CronJob"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ž^ +8/apis/apps/v1/namespaces/{namespace}/controllerrevisionsá] & +apps_v10list or watch objects of kind ControllerRevision*&listAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je + +401 + + Unauthorized +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.apps.v1.ControllerRevisionListRhttpsj +x-kubernetes-actionlist +jV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +"¡ +apps_v1create a ControllerRevision*(createAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevisionBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevision +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevision +L +202E +C +Accepted7 +5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevision + +401 + + UnauthorizedRhttpsjV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +j +x-kubernetes-actionpost +*ä, +apps_v1'delete collection of ControllerRevision*2deleteAppsV1CollectionNamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jV +x-kubernetes-group-version-kind31group: apps +kind: ControllerRevision +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ü' +-/apis/autoscaling/v1/horizontalpodautoscalersª'ž +autoscaling_v15list or watch objects of kind HorizontalPodAutoscaler*8listAutoscalingV1HorizontalPodAutoscalerForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jq +V +200O +M +OKG +E +C#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=version: v1 +group: autoscaling +kind: HorizontalPodAutoscaler +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ü +D/apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas/{name}/status³À +flowcontrolApiserver_v1beta1'read status of the specified FlowSchema*/readFlowcontrolApiserverV1beta1FlowSchemaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema + +401 + + UnauthorizedRhttpsjk +x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io +kind: FlowSchema +version: v1beta1 +j +x-kubernetes-actionget +¡ +flowcontrolApiserver_v1beta1*replace status of the specified FlowSchema*2replaceFlowcontrolApiserverV1beta1FlowSchemaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BM +K +Ibodybody *9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¶ +O +201H +F +Created; +9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema + +401 + + Unauthorized +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaRhttpsjk +x-kubernetes-group-version-kindHFkind: FlowSchema +version: v1beta1 +group: flowcontrol.apiserver.k8s.io +j +x-kubernetes-actionput +B¿ +flowcontrolApiserver_v1beta13partially update status of the specified FlowSchema*0patchFlowcontrolApiserverV1beta1FlowSchemaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jk +x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io +kind: FlowSchema +version: v1beta1 +J7 +53"1pathname of the FlowSchema"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ­ +1/api/v1/namespaces/{namespace}/pods/{name}/attach÷ – +core_v1%connect GET requests to attach of Pod*#connectCoreV1GetNamespacedPodAttach2*/*:*/*J7 + +401 + + Unauthorized + +200 + +OK + ² +stringRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-group: "" +kind: PodAttachOptions +version: v1 +"˜ +core_v1&connect POST requests to attach of Pod*$connectCoreV1PostNamespacedPodAttach2*/*:*/*J7 + +401 + + Unauthorized + +200 + +OK + ² +stringRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-group: "" +kind: PodAttachOptions +version: v1 +Jœ +™–“querytThe container in which to execute the command. Defaults to only container if there is only one container in the pod." container2string J= +;9"7pathname of the PodAttachOptions"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜Jƒ +€~|query_Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true."stderr2boolean J +}{query_Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false."stdin2boolean Jƒ +€~|query_Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true."stdout2boolean Jð +íêçqueryÌTTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false."tty2boolean á& +/api/v1/servicesÌ&À +core_v1%list or watch objects of kind Service*!listCoreV1ServiceForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.core.v1.ServiceList + +401 + + UnauthorizedRhttpsjI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ü +/apis/extensions/Æà + +extensionsget information of a group*getExtensionsAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsâ] +(/apis/policy/v1beta1/podsecuritypoliciesµ]®& +policy_v1beta1/list or watch objects of kind PodSecurityPolicy*"listPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j\ +x-kubernetes-group-version-kind97version: v1beta1 +group: policy +kind: PodSecurityPolicy +"Á +policy_v1beta1create a PodSecurityPolicy*$createPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO +M +Kbodybody *; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JŽ +L +200E +C +OK= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy +Q +201J +H +Created= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy +R +202K +I +Accepted= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j\ +x-kubernetes-group-version-kind97version: v1beta1 +group: policy +kind: PodSecurityPolicy +*ì, +policy_v1beta1&delete collection of PodSecurityPolicy*.deletePolicyV1beta1CollectionPodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j\ +x-kubernetes-group-version-kind97group: policy +kind: PodSecurityPolicy +version: v1beta1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string È) +4/api/v1/namespaces/{namespace}/resourcequotas/{name})ñ +core_v1 read the specified ResourceQuota*!readCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jO +x-kubernetes-group-version-kind,*version: v1 +group: "" +kind: ResourceQuota +À +core_v1#replace the specified ResourceQuota*$replaceCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.core.v1.ResourceQuotaBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +*ó +core_v1delete a ResourceQuota*#deleteCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J¥ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota +G +202@ +> +Accepted2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +Bð +core_v1,partially update the specified ResourceQuota*"patchCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ + +401 + + Unauthorized +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuotaRhttpsj +x-kubernetes-actionpatch +jO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +J: +86"4pathname of the ResourceQuota"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¿. +Q/apis/admissionregistration.k8s.io/v1beta1/validatingwebhookconfigurations/{name}é- +admissionregistration_v1beta11read the specified ValidatingWebhookConfiguration*>readAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jƒ +h +200a +_ +OKY +W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j +x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1beta1 +© + +admissionregistration_v1beta14replace the specified ValidatingWebhookConfiguration*AreplaceAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bk +i +gbodybody *W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jò + +401 + + Unauthorized +h +200a +_ +OKY +W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration +m +201f +d +CreatedY +W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationRhttpsj +x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1beta1 +j +x-kubernetes-actionput +*ý +admissionregistration_v1beta1'delete a ValidatingWebhookConfiguration*@deleteAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j +x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1beta1 +BŒ +admissionregistration_v1beta1=partially update the specified ValidatingWebhookConfiguration*?patchAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jƒ +h +200a +_ +OKY +W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j +x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1beta1 +JK +IG"Epath*name of the ValidatingWebhookConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¸( +group: scheduling.k8s.io +kind: PriorityClass +version: v1beta1 +j +x-kubernetes-actionget + +scheduling_v1beta1#replace the specified PriorityClass*%replaceSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO +M +Kbodybody *; +9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jº +L +200E +C +OK= +; +9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass +Q +201J +H +Created= +; +9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jc +x-kubernetes-group-version-kind@>group: scheduling.k8s.io +kind: PriorityClass +version: v1beta1 +*© +scheduling_v1beta1delete a PriorityClass*$deleteSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjc +x-kubernetes-group-version-kind@>group: scheduling.k8s.io +kind: PriorityClass +version: v1beta1 +j +x-kubernetes-action delete +B› +scheduling_v1beta1,partially update the specified PriorityClass*#patchSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass + +401 + + UnauthorizedRhttpsjc +x-kubernetes-group-version-kind@>group: scheduling.k8s.io +kind: PriorityClass +version: v1beta1 +j +x-kubernetes-actionpatch +J: +86"4pathname of the PriorityClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ³) +7/apis/apps/v1/namespaces/{namespace}/deployments/{name}÷(ç +apps_v1read the specified Deployment*readAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jN +x-kubernetes-group-version-kind+)group: apps +kind: Deployment +version: v1 +° +apps_v1 replace the specified Deployment*!replaceAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.apps.v1.DeploymentBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment + +401 + + Unauthorized +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.DeploymentRhttpsj +x-kubernetes-actionput +jN +x-kubernetes-group-version-kind+)kind: Deployment +version: v1 +group: apps +*‚ +apps_v1delete a Deployment* deleteAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)version: v1 +group: apps +kind: Deployment +j +x-kubernetes-action delete +Bæ +apps_v1)partially update the specified Deployment*patchAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)group: apps +kind: Deployment +version: v1 +j +x-kubernetes-actionpatch +J7 +53"1pathname of the Deployment"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ù +/apis/certificates.k8s.io/v1/×Ô +certificates_v1get available resources*getCertificatesV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsé +/apis/coordination.k8s.io/ÊÇ + coordinationget information of a group*getCoordinationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps´' +*/apis/networking.k8s.io/v1/networkpolicies…'ù + networking_v1+list or watch objects of kind NetworkPolicy*-listNetworkingV1NetworkPolicyForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jf +K +200D +B +OK< +: +8#/definitions/io.k8s.api.networking.v1.NetworkPolicyList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j^ +x-kubernetes-group-version-kind;9group: networking.k8s.io +kind: NetworkPolicy +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Â] ++/apis/storage.k8s.io/v1beta1/storageclasses’]¥& +storage_v1beta1*list or watch objects of kind StorageClass*listStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.api.storage.v1beta1.StorageClassListRhttpsj_ +x-kubernetes-group-version-kind<:group: storage.k8s.io +kind: StorageClass +version: v1beta1 +j +x-kubernetes-actionlist +"¬ +storage_v1beta1create a StorageClass* createStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BK +I +Gbodybody *7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J‚ +N +202G +E +Accepted9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass + +401 + + Unauthorized +H +200A +? +OK9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass +M +201F +D +Created9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClassRhttpsj +x-kubernetes-actionpost +j_ +x-kubernetes-group-version-kind<:version: v1beta1 +group: storage.k8s.io +kind: StorageClass +*ç, +storage_v1beta1!delete collection of StorageClass**deleteStorageV1beta1CollectionStorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j_ +x-kubernetes-group-version-kind<:group: storage.k8s.io +kind: StorageClass +version: v1beta1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ü' +/api/v1/watch/limitranges¾'² +core_v1vwatch individual changes to a list of LimitRange. deprecated: use the 'watch' parameter with a list operation instead.*)watchCoreV1LimitRangeListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjL +x-kubernetes-group-version-kind)'version: v1 +group: "" +kind: LimitRange +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Û( +3/api/v1/watch/namespaces/{namespace}/resourcequotas£(µ +core_v1ywatch individual changes to a list of ResourceQuota. deprecated: use the 'watch' parameter with a list operation instead.*&watchCoreV1NamespacedResourceQuotaList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjO +x-kubernetes-group-version-kind,*kind: ResourceQuota +version: v1 +group: "" +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ê* +F/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name}Ÿ*Ž + discovery_v1 read the specified EndpointSlice*&readDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSlice + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +ç + discovery_v1#replace the specified EndpointSlice*)replaceDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSliceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSlice +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSlice + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +*¢ + discovery_v1delete an EndpointSlice*(deleteDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +B + discovery_v1,partially update the specified EndpointSlice*'patchDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSlice + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +j +x-kubernetes-actionpatch +J: +86"4pathname of the EndpointSlice"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ú' +(/apis/events.k8s.io/v1beta1/watch/eventsÍ'Á +events_v1beta1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.*+watchEventsV1beta1EventListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean © +5/api/v1/namespaces/{namespace}/services/{name}/statusïé +core_v1$read status of the specified Service*!readCoreV1NamespacedServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JV +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Service + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +¬ +core_v1'replace status of the specified Service*$replaceCoreV1NamespacedServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> +< +:bodybody ** +(#/definitions/io.k8s.api.core.v1.ServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J˜ +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Service +@ +2019 +7 +Created, +* +(#/definitions/io.k8s.api.core.v1.Service + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +Bè +core_v10partially update status of the specified Service*"patchCoreV1NamespacedServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JV +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Service + +401 + + UnauthorizedRhttpsjI +x-kubernetes-group-version-kind&$group: "" +kind: Service +version: v1 +j +x-kubernetes-actionpatch +J4 +20".pathname of the Service"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ã- +L/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}’-ø +admissionregistration_v11read the specified ValidatingWebhookConfiguration*9readAdmissionregistrationV1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J~ +c +200\ +Z +OKT +R +P#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jz +x-kubernetes-group-version-kindWUgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1 +‹ + +admissionregistration_v14replace the specified ValidatingWebhookConfiguration* +< +OK6 +4 +2#/definitions/io.k8s.api.networking.v1.IngressList + +401 + + UnauthorizedRhttpsjX +x-kubernetes-group-version-kind53kind: Ingress +version: v1 +group: networking.k8s.io +j +x-kubernetes-actionlist +"† + networking_v1create an Ingress*#createNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.networking.v1.IngressBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jí +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.networking.v1.Ingress +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.networking.v1.Ingress +G +202@ +> +Accepted2 +0 +.#/definitions/io.k8s.api.networking.v1.Ingress + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jX +x-kubernetes-group-version-kind53group: networking.k8s.io +kind: Ingress +version: v1 +*Ü, + networking_v1delete collection of Ingress*-deleteNetworkingV1CollectionNamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jX +x-kubernetes-group-version-kind53group: networking.k8s.io +kind: Ingress +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ú) +B/api/v1/watch/namespaces/{namespace}/replicationcontrollers/{name}³) +core_v1¼watch changes to an object of kind ReplicationController. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchCoreV1NamespacedReplicationController2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj +x-kubernetes-actionwatch +jW +x-kubernetes-group-version-kind42version: v1 +group: "" +kind: ReplicationController +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JB +@>"<path!name of the ReplicationController"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ·' +-/apis/rbac.authorization.k8s.io/v1beta1/roles…'ù +rbacAuthorization_v1beta1"list or watch objects of kind Role*0listRbacAuthorizationV1beta1RoleForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.rbac.v1beta1.RoleList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jb +x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io +kind: Role +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ` +I/apis/autoscaling/v2beta1/namespaces/{namespace}/horizontalpodautoscalersÏ_ä& +autoscaling_v2beta15list or watch objects of kind HorizontalPodAutoscaler*7listAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jv +[ +200T +R +OKL +J +H#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jg +x-kubernetes-group-version-kindDBversion: v2beta1 +group: autoscaling +kind: HorizontalPodAutoscaler +"˜ + +autoscaling_v2beta1 create a HorizontalPodAutoscaler*9createAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ +X +Vbodybody *F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¯ +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler +\ +201U +S +CreatedH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler +] +202V +T +AcceptedH +F +D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta1 +*—- +autoscaling_v2beta1,delete collection of HorizontalPodAutoscaler*CdeleteAutoscalingV2beta1CollectionNamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jg +x-kubernetes-group-version-kindDBversion: v2beta1 +group: autoscaling +kind: HorizontalPodAutoscaler +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ó +/apis/networking.k8s.io/v1/ÓÐ + networking_v1get available resources*getNetworkingV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps¢_ +K/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/rolebindingsÒ^Ç& +rbacAuthorization_v1beta1)list or watch objects of kind RoleBinding*1listRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jc +H +200A +? +OK9 +7 +5#/definitions/io.k8s.api.rbac.v1beta1.RoleBindingList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +ji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: RoleBinding +version: v1beta1 +" +rbacAuthorization_v1beta1create a RoleBinding*3createRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG +E +Cbodybody *3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBindingBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jö +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding +I +201B +@ +Created5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding +J +202C +A +Accepted5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +ji +x-kubernetes-group-version-kindFDversion: v1beta1 +group: rbac.authorization.k8s.io +kind: RoleBinding +*- +rbacAuthorization_v1beta1 delete collection of RoleBinding*=deleteRbacAuthorizationV1beta1CollectionNamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +ji +x-kubernetes-group-version-kindFDkind: RoleBinding +version: v1beta1 +group: rbac.authorization.k8s.io +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‘ +%/apis/apiregistration.k8s.io/v1beta1/çä +apiregistration_v1beta1get available resources*%getApiregistrationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsÓ( +5/apis/apps/v1/watch/namespaces/{namespace}/daemonsets™(« +apps_v1uwatch individual changes to a list of DaemonSet. deprecated: use the 'watch' parameter with a list operation instead.*"watchAppsV1NamespacedDaemonSetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×( +6/apis/apps/v1/watch/namespaces/{namespace}/deploymentsœ(® +apps_v1vwatch individual changes to a list of Deployment. deprecated: use the 'watch' parameter with a list operation instead.*#watchAppsV1NamespacedDeploymentList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)group: apps +kind: Deployment +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ž +R/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/statusÇÇ +autoscaling_v14read status of the specified HorizontalPodAutoscaler*8readAutoscalingV1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jm +R +200K +I +OKC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=kind: HorizontalPodAutoscaler +version: v1 +group: autoscaling +j +x-kubernetes-actionget +¸ +autoscaling_v17replace status of the specified HorizontalPodAutoscaler*;replaceAutoscalingV1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BU +S +Qbodybody *A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÆ +R +200K +I +OKC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler +W +201P +N +CreatedC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jb +x-kubernetes-group-version-kind?=version: v1 +group: autoscaling +kind: HorizontalPodAutoscaler +BÆ +autoscaling_v1@partially update status of the specified HorizontalPodAutoscaler*9patchAutoscalingV1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jm +R +200K +I +OKC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +JD +B@">path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string À] +//apis/rbac.authorization.k8s.io/v1/clusterrolesŒ]©& +rbacAuthorization_v1)list or watch objects of kind ClusterRole*"listRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.rbac.v1.ClusterRoleList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: ClusterRole +version: v1 +"• +rbacAuthorization_v1create a ClusterRole*$createRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB +@ +>bodybody *. +,#/definitions/io.k8s.api.rbac.v1.ClusterRoleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jç +E +202> +< +Accepted0 +. +,#/definitions/io.k8s.api.rbac.v1.ClusterRole + +401 + + Unauthorized +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.rbac.v1.ClusterRole +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.rbac.v1.ClusterRoleRhttpsj +x-kubernetes-actionpost +jd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: ClusterRole +version: v1 +*ô, +rbacAuthorization_v1 delete collection of ClusterRole*.deleteRbacAuthorizationV1CollectionClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjd +x-kubernetes-group-version-kindA?version: v1 +group: rbac.authorization.k8s.io +kind: ClusterRole +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ³) +7/apis/apps/v1/namespaces/{namespace}/replicasets/{name}÷(ç +apps_v1read the specified ReplicaSet*readAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY + +401 + + Unauthorized +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSetRhttpsj +x-kubernetes-actionget +jN +x-kubernetes-group-version-kind+)group: apps +kind: ReplicaSet +version: v1 +° +apps_v1 replace the specified ReplicaSet*!replaceAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.apps.v1.ReplicaSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jN +x-kubernetes-group-version-kind+)kind: ReplicaSet +version: v1 +group: apps +*‚ +apps_v1delete a ReplicaSet* deleteAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)group: apps +kind: ReplicaSet +version: v1 +j +x-kubernetes-action delete +Bæ +apps_v1)partially update the specified ReplicaSet*patchAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jN +x-kubernetes-group-version-kind+)group: apps +kind: ReplicaSet +version: v1 +J7 +53"1pathname of the ReplicaSet"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‡' +/apis/batch/v1beta1/cronjobsæ&Ú + batch_v1beta1%list or watch objects of kind CronJob*'listBatchV1beta1CronJobForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J` +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.batch.v1beta1.CronJobList + +401 + + UnauthorizedRhttpsjQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean £* +5/apis/networking.k8s.io/v1beta1/ingressclasses/{name}é)˜ +networking_v1beta1read the specified IngressClass*!readNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jf +K +200D +B +OK< +: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jb +x-kubernetes-group-version-kind?=group: networking.k8s.io +kind: IngressClass +version: v1beta1 +û +networking_v1beta1"replace the specified IngressClass*$replaceNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN +L +Jbodybody *: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¸ +K +200D +B +OK< +: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClass +P +201I +G +Created< +: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jb +x-kubernetes-group-version-kind?=group: networking.k8s.io +kind: IngressClass +version: v1beta1 +*§ +networking_v1beta1delete an IngressClass*#deleteNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=group: networking.k8s.io +kind: IngressClass +version: v1beta1 +j +x-kubernetes-action delete +B— +networking_v1beta1+partially update the specified IngressClass*"patchNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jf +K +200D +B +OK< +: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jb +x-kubernetes-group-version-kind?=group: networking.k8s.io +kind: IngressClass +version: v1beta1 +J9 +75"3pathname of the IngressClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ü+ +G/apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}+§ +policy_v1beta1&read the specified PodDisruptionBudget*.readPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ji +N +200G +E +OK? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 + +policy_v1beta1)replace the specified PodDisruptionBudget*1replacePolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ +O +Mbodybody *= +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¾ + +401 + + Unauthorized +N +200G +E +OK? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget +S +201L +J +Created? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetRhttpsj +x-kubernetes-actionput +j^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +*² +policy_v1beta1delete a PodDisruptionBudget*0deletePolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj +x-kubernetes-action delete +j^ +x-kubernetes-group-version-kind;9version: v1beta1 +group: policy +kind: PodDisruptionBudget +B¦ +policy_v1beta12partially update the specified PodDisruptionBudget*/patchPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ji +N +200G +E +OK? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +J@ +><":pathname of the PodDisruptionBudget"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‹( +1/apis/storage.k8s.io/v1beta1/watch/storageclassesÕ'É +storage_v1beta1xwatch individual changes to a list of StorageClass. deprecated: use the 'watch' parameter with a list operation instead.*#watchStorageV1beta1StorageClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j_ +x-kubernetes-group-version-kind<:group: storage.k8s.io +kind: StorageClass +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean û +/apis/extensions/v1beta1/ÝÚ +extensions_v1beta1get available resources* getExtensionsV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps½^ +6/apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas‚^É& +flowcontrolApiserver_v1beta1(list or watch objects of kind FlowSchema*)listFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ji +N +200G +E +OK? += +;#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jk +x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io +kind: FlowSchema +version: v1beta1 +"Ö +flowcontrolApiserver_v1beta1create a FlowSchema*+createFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BM +K +Ibodybody *9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jˆ +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema +O +201H +F +Created; +9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema +P +202I +G +Accepted; +9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jk +x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io +kind: FlowSchema +version: v1beta1 +*‰- +flowcontrolApiserver_v1beta1delete collection of FlowSchema*5deleteFlowcontrolApiserverV1beta1CollectionFlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jk +x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io +kind: FlowSchema +version: v1beta1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ˜ +(/apis/rbac.authorization.k8s.io/v1beta1/ëè +rbacAuthorization_v1beta1get available resources*'getRbacAuthorizationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttps( +./apis/rbac.authorization.k8s.io/v1/watch/rolesÝ'Ñ +rbacAuthorization_v1pwatch individual changes to a list of Role. deprecated: use the 'watch' parameter with a list operation instead.*0watchRbacAuthorizationV1RoleListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +j] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ý& +/apis/apps/v1/statefulsetsÞ&Ò +apps_v1)list or watch objects of kind StatefulSet*%listAppsV1StatefulSetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.apps.v1.StatefulSetList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean à +/apis/discovery.k8s.io/ÄÁ + discoveryget information of a group*getDiscoveryAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps™) +J/apis/discovery.k8s.io/v1beta1/watch/namespaces/{namespace}/endpointslicesÊ(Ü +discovery_v1beta1ywatch individual changes to a list of EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead.*0watchDiscoveryV1beta1NamespacedEndpointSliceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jb +x-kubernetes-group-version-kind?=group: discovery.k8s.io +kind: EndpointSlice +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‹* +B/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}Ä)ú + networking_v1read the specified Ingress*!readNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.networking.v1.Ingress + +401 + + UnauthorizedRhttpsjX +x-kubernetes-group-version-kind53group: networking.k8s.io +kind: Ingress +version: v1 +j +x-kubernetes-actionget +É + networking_v1replace the specified Ingress*$replaceNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.networking.v1.IngressBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.networking.v1.Ingress +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.networking.v1.Ingress + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jX +x-kubernetes-group-version-kind53group: networking.k8s.io +kind: Ingress +version: v1 +*“ + networking_v1delete an Ingress*#deleteNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jX +x-kubernetes-group-version-kind53version: v1 +group: networking.k8s.io +kind: Ingress +Bù + networking_v1&partially update the specified Ingress*"patchNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.networking.v1.Ingress + +401 + + UnauthorizedRhttpsjX +x-kubernetes-group-version-kind53kind: Ingress +version: v1 +group: networking.k8s.io +j +x-kubernetes-actionpatch +J4 +20".pathname of the Ingress"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ô +/api/v1/ÇÄ +core_v1get available resources*getCoreV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps¦] +0/apis/apps/v1/namespaces/{namespace}/replicasetsñ\€& +apps_v1(list or watch objects of kind ReplicaSet*listAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J] +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.apps.v1.ReplicaSetList + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)kind: ReplicaSet +version: v1 +group: apps +j +x-kubernetes-actionlist +"é +apps_v1create a ReplicaSet* createAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.apps.v1.ReplicaSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jä +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet +D +202= +; +Accepted/ +- ++#/definitions/io.k8s.api.apps.v1.ReplicaSet + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)version: v1 +group: apps +kind: ReplicaSet +j +x-kubernetes-actionpost +*Ì, +apps_v1delete collection of ReplicaSet**deleteAppsV1CollectionNamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)version: v1 +group: apps +kind: ReplicaSet +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string â+ +>/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}Ÿ+À +certificates_v1,read the specified CertificateSigningRequest*+readCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jp + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestRhttpsj +x-kubernetes-actionget +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +· +certificates_v1/replace the specified CertificateSigningRequest*.replaceCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX +V +Tbodybody *D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÌ +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest +Z +201S +Q +CreatedF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +*Ä +certificates_v1"delete a CertificateSigningRequest*-deleteCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +B¿ +certificates_v18partially update the specified CertificateSigningRequest*,patchCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jp +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +JF +DB"@path%name of the CertificateSigningRequest"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string  ^ +9/apis/extensions/v1beta1/namespaces/{namespace}/ingressesâ] & +extensions_v1beta1%list or watch objects of kind Ingress*&listExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.extensions.v1beta1.IngressList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +"¢ +extensions_v1beta1create an Ingress*(createExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.extensions.v1beta1.IngressBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.extensions.v1beta1.Ingress +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.extensions.v1beta1.Ingress +L +202E +C +Accepted7 +5 +3#/definitions/io.k8s.api.extensions.v1beta1.Ingress + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +*ä, +extensions_v1beta1delete collection of Ingress*2deleteExtensionsV1beta1CollectionNamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Þ( +&/api/v1/watch/persistentvolumes/{name}³(è +core_v1·watch changes to an object of kind PersistentVolume. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj +x-kubernetes-actionwatch +jR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J= +;9"7pathname of the PersistentVolume"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‰( +$/api/v1/watch/replicationcontrollersà'Ô +core_v1watch individual changes to a list of ReplicationController. deprecated: use the 'watch' parameter with a list operation instead.*4watchCoreV1ReplicationControllerListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: "" +kind: ReplicationController +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ø& +/apis/apps/v1/deploymentsÚ&Î +apps_v1(list or watch objects of kind Deployment*$listAppsV1DeploymentForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J] +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.apps.v1.DeploymentList + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)version: v1 +group: apps +kind: Deployment +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean é +/apis/certificates.k8s.io/ÊÇ + certificatesget information of a group*getCertificatesAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps„_ +7/apis/certificates.k8s.io/v1/certificatesigningrequestsÈ^Ù& +certificates_v17list or watch objects of kind CertificateSigningRequest*+listCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jt +Y +200R +P +OKJ +H +F#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +"‡ + +certificates_v1"create a CertificateSigningRequest*-createCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX +V +Tbodybody *D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J© +U +200N +L +OKF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest +Z +201S +Q +CreatedF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest +[ +202T +R +AcceptedF +D +B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jl +x-kubernetes-group-version-kindIGkind: CertificateSigningRequest +version: v1 +group: certificates.k8s.io +*Ž- +certificates_v1.delete collection of CertificateSigningRequest*7deleteCertificatesV1CollectionCertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ø( +?/apis/extensions/v1beta1/watch/namespaces/{namespace}/ingresses´(Æ +extensions_v1beta1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*+watchExtensionsV1beta1NamespacedIngressList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean à +/apis/policy/v1/ËÈ + policy_v1get available resources*getPolicyV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsã +/apis/scheduling.k8s.io/Æà + +schedulingget information of a group*getSchedulingAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps³) +Q/apis/rbac.authorization.k8s.io/v1beta1/watch/namespaces/{namespace}/rolebindingsÝ(ï +rbacAuthorization_v1beta1wwatch individual changes to a list of RoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*6watchRbacAuthorizationV1beta1NamespacedRoleBindingList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: RoleBinding +version: v1beta1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ´ +4/api/v1/namespaces/{namespace}/services/{name}/proxyûŸ +core_v1(connect GET requests to proxy of Service*&connectCoreV1GetNamespacedServiceProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +Ÿ +core_v1(connect PUT requests to proxy of Service*&connectCoreV1PutNamespacedServiceProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20version: v1 +group: "" +kind: ServiceProxyOptions +"¡ +core_v1)connect POST requests to proxy of Service*'connectCoreV1PostNamespacedServiceProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +j! +x-kubernetes-action +connect +*¥ +core_v1+connect DELETE requests to proxy of Service*)connectCoreV1DeleteNamespacedServiceProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +2§ +core_v1,connect OPTIONS requests to proxy of Service**connectCoreV1OptionsNamespacedServiceProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +:¡ +core_v1)connect HEAD requests to proxy of Service*'connectCoreV1HeadNamespacedServiceProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjU +x-kubernetes-group-version-kind20group: "" +kind: ServiceProxyOptions +version: v1 +j! +x-kubernetes-action +connect +B£ +core_v1*connect PATCH requests to proxy of Service*(connectCoreV1PatchNamespacedServiceProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjU +x-kubernetes-group-version-kind20kind: ServiceProxyOptions +version: v1 +group: "" +j! +x-kubernetes-action +connect +J@ +><":pathname of the ServiceProxyOptions"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JÐ +ÍÊÇquery¬Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy."path2string Ü' +/apis/batch/v1/watch/cronjobsº'® +batch_v1swatch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.*'watchBatchV1CronJobListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jL +x-kubernetes-group-version-kind)'group: batch +kind: CronJob +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean × +/apis/events.k8s.io/¾» +eventsget information of a group*getEventsAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttps…( +(/apis/extensions/v1beta1/watch/ingressesØ'Ì +extensions_v1beta1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*1watchExtensionsV1beta1IngressListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jV +x-kubernetes-group-version-kind31group: extensions +kind: Ingress +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×^ +F/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindingsŒ^³& +rbacAuthorization_v1)list or watch objects of kind RoleBinding*,listRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.rbac.v1.RoleBindingList + +401 + + UnauthorizedRhttpsjd +x-kubernetes-group-version-kindA?version: v1 +group: rbac.authorization.k8s.io +kind: RoleBinding +j +x-kubernetes-actionlist +"Ÿ +rbacAuthorization_v1create a RoleBinding*.createRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB +@ +>bodybody *. +,#/definitions/io.k8s.api.rbac.v1.RoleBindingBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jç +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.rbac.v1.RoleBinding +E +202> +< +Accepted0 +. +,#/definitions/io.k8s.api.rbac.v1.RoleBinding + +401 + + Unauthorized +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.rbac.v1.RoleBindingRhttpsj +x-kubernetes-actionpost +jd +x-kubernetes-group-version-kindA?kind: RoleBinding +version: v1 +group: rbac.authorization.k8s.io +*þ, +rbacAuthorization_v1 delete collection of RoleBinding*8deleteRbacAuthorizationV1CollectionNamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: RoleBinding +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string œ( +5/apis/rbac.authorization.k8s.io/v1/watch/clusterrolesâ'Ö +rbacAuthorization_v1wwatch individual changes to a list of ClusterRole. deprecated: use the 'watch' parameter with a list operation instead.*'watchRbacAuthorizationV1ClusterRoleList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jd +x-kubernetes-group-version-kindA?version: v1 +group: rbac.authorization.k8s.io +kind: ClusterRole +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean æ( + /api/v1/persistentvolumes/{name}Á(ó +core_v1#read the specified PersistentVolume*readCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +È +core_v1&replace the specified PersistentVolume*replaceCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG +E +Cbodybody *3 +1#/definitions/io.k8s.api.core.v1.PersistentVolumeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jª +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume +I +201B +@ +Created5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +j +x-kubernetes-actionput +*ø +core_v1delete a PersistentVolume*deleteCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J« +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume +J +202C +A +Accepted5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +j +x-kubernetes-action delete +Bò +core_v1/partially update the specified PersistentVolume*patchCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +J= +;9"7pathname of the PersistentVolume"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Æ) + +< +Created1 +/ +-#/definitions/io.k8s.api.node.v1.RuntimeClass +F +202? += +Accepted1 +/ +-#/definitions/io.k8s.api.node.v1.RuntimeClass + +401 + + Unauthorized +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.node.v1.RuntimeClassRhttpsj +x-kubernetes-actionpost +jW +x-kubernetes-group-version-kind42group: node.k8s.io +kind: RuntimeClass +version: v1 +*Ï, +node_v1!delete collection of RuntimeClass*"deleteNodeV1CollectionRuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42kind: RuntimeClass +version: v1 +group: node.k8s.io +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ±\ +#/api/v1/namespaces/{namespace}/pods‰\â% +core_v1!list or watch objects of kind Pod*listCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JV + +401 + + Unauthorized +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.PodListRhttpsj +x-kubernetes-actionlist +jE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +"¶ +core_v1 create a Pod*createCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B: +8 +6bodybody *& +$#/definitions/io.k8s.api.core.v1.PodBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÏ + +401 + + Unauthorized +7 +2000 +. +OK( +& +$#/definitions/io.k8s.api.core.v1.Pod +< +2015 +3 +Created( +& +$#/definitions/io.k8s.api.core.v1.Pod += +2026 +4 +Accepted( +& +$#/definitions/io.k8s.api.core.v1.PodRhttpsjE +x-kubernetes-group-version-kind" kind: Pod +version: v1 +group: "" +j +x-kubernetes-actionpost +*µ, +core_v1delete collection of Pod*#deleteCoreV1CollectionNamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Þ\ +&/api/v1/namespaces/{namespace}/secrets³\î% +core_v1$list or watch objects of kind Secret*listCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.core.v1.SecretList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jH +x-kubernetes-group-version-kind%#group: "" +kind: Secret +version: v1 +"Ë +core_v1create a Secret*createCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B= +; +9bodybody *) +'#/definitions/io.k8s.api.core.v1.SecretBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JØ +: +2003 +1 +OK+ +) +'#/definitions/io.k8s.api.core.v1.Secret +? +2018 +6 +Created+ +) +'#/definitions/io.k8s.api.core.v1.Secret +@ +2029 +7 +Accepted+ +) +'#/definitions/io.k8s.api.core.v1.Secret + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jH +x-kubernetes-group-version-kind%#version: v1 +group: "" +kind: Secret +*¾, +core_v1delete collection of Secret*&deleteCoreV1CollectionNamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jH +x-kubernetes-group-version-kind%#group: "" +kind: Secret +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Á) +D/apis/certificates.k8s.io/v1/watch/certificatesigningrequests/{name}ø(¤ +certificates_v1Àwatch changes to an object of kind CertificateSigningRequest. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*,watchCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jl +x-kubernetes-group-version-kindIGgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JF +DB"@path%name of the CertificateSigningRequest"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ï +/apis/policy/v1beta1/ÕÒ +policy_v1beta1get available resources*getPolicyV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps–) +=/apis/apiregistration.k8s.io/v1beta1/watch/apiservices/{name}Ô( +apiregistration_v1beta1±watch changes to an object of kind APIService. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*%watchApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@version: v1beta1 +kind: APIService +group: apiregistration.k8s.io +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 +53"1pathname of the APIService"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ù( +:/apis/events.k8s.io/v1/watch/namespaces/{namespace}/eventsš(¬ + events_v1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.* watchEventsV1NamespacedEventList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ú( +,/apis/storage.k8s.io/v1beta1/csinodes/{name}É(ø +storage_v1beta1read the specified CSINode*readStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINode + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: CSINode +version: v1beta1 +Ë +storage_v1beta1replace the specified CSINode*replaceStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF +D +Bbodybody *2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINodeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¨ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINode +H +201A +? +Created4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINode + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: CSINode +version: v1beta1 +*ü +storage_v1beta1delete a CSINode*deleteStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J© +I +202B +@ +Accepted4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINode + +401 + + Unauthorized +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINodeRhttpsj +x-kubernetes-action delete +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: CSINode +version: v1beta1 +B÷ +storage_v1beta1&partially update the specified CSINode*patchStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINode + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jZ +x-kubernetes-group-version-kind75kind: CSINode +version: v1beta1 +group: storage.k8s.io +J4 +20".pathname of the CSINode"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ö) +6/apis/rbac.authorization.k8s.io/v1/clusterroles/{name}») +rbacAuthorization_v1read the specified ClusterRole*"readRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.rbac.v1.ClusterRole + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: ClusterRole +version: v1 +Û +rbacAuthorization_v1!replace the specified ClusterRole*%replaceRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB +@ +>bodybody *. +,#/definitions/io.k8s.api.rbac.v1.ClusterRoleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.rbac.v1.ClusterRole +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.rbac.v1.ClusterRole + +401 + + UnauthorizedRhttpsjd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: ClusterRole +version: v1 +j +x-kubernetes-actionput +*ª +rbacAuthorization_v1delete a ClusterRole*$deleteRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: ClusterRole +version: v1 +B +rbacAuthorization_v1*partially update the specified ClusterRole*#patchRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.rbac.v1.ClusterRole + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jd +x-kubernetes-group-version-kindA?kind: ClusterRole +version: v1 +group: rbac.authorization.k8s.io +J8 +64"2pathname of the ClusterRole"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ž' +(/apis/rbac.authorization.k8s.io/v1/rolesñ&å +rbacAuthorization_v1"list or watch objects of kind Role*+listRbacAuthorizationV1RoleForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JW +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.rbac.v1.RoleList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À( +:/apis/rbac.authorization.k8s.io/v1beta1/watch/rolebindings(õ +rbacAuthorization_v1beta1wwatch individual changes to a list of RoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*bodybody *. +,#/definitions/io.k8s.api.apps.v1.StatefulSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet + +401 + + UnauthorizedRhttpsjO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +j +x-kubernetes-actionput +*… +apps_v1delete a StatefulSet*!deleteAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +Bê +apps_v1*partially update the specified StatefulSet* patchAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jO +x-kubernetes-group-version-kind,*version: v1 +group: apps +kind: StatefulSet +J8 +64"2pathname of the StatefulSet"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string è' + /apis/apps/v1/watch/statefulsetsÃ'· +apps_v1wwatch individual changes to a list of StatefulSet. deprecated: use the 'watch' parameter with a list operation instead.**watchAppsV1StatefulSetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean é +W/apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{name}/statusÛ +autoscaling_v2beta24read status of the specified HorizontalPodAutoscaler*=readAutoscalingV2beta2NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jr +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta2 +Ö +autoscaling_v2beta27replace status of the specified HorizontalPodAutoscaler*@replaceAutoscalingV2beta2NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ +X +Vbodybody *F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÐ +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler +\ +201U +S +CreatedH +F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsjg +x-kubernetes-group-version-kindDBkind: HorizontalPodAutoscaler +version: v2beta2 +group: autoscaling +j +x-kubernetes-actionput +BÚ +autoscaling_v2beta2@partially update status of the specified HorizontalPodAutoscaler*>patchAutoscalingV2beta2NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jr +W +200P +N +OKH +F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta2 +JD +B@">path#name of the HorizontalPodAutoscaler"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ý +/apis/batch/v1/ÉÆ +batch_v1get available resources*getBatchV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsú +./api/v1/namespaces/{namespace}/pods/{name}/logÇÄ +core_v1read log of the specified Pod*readCoreV1NamespacedPodLog2 +text/plain2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjE +x-kubernetes-group-version-kind" kind: Pod +version: v1 +group: "" +j +x-kubernetes-actionget +J +Š‡queryhThe container for which to stream logs. Defaults to only container if there is one container in the pod." container2string JW +USQquery4Follow the log stream of the pod. Defaults to false."follow2boolean JŒ +‰†ƒqueryÏinsecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet)."insecureSkipTLSVerifyBackend2boolean J€ +ýú÷queryÕIf set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit." +limitBytes2integer J0 +.,"*pathname of the Pod"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jb +`^\query=Return previous terminated container logs. Defaults to false."previous2boolean JÍ +ÊÇÄquery A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified." sinceSeconds2integer JÈ +Å¿queryžIf set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime" tailLines2integer J› +˜•’queryqIf true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false." +timestamps2boolean ×' +1/apis/storage.k8s.io/v1beta1/csistoragecapacities¡'• +storage_v1beta10list or watch objects of kind CSIStorageCapacity*4listStorageV1beta1CSIStorageCapacityForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jm +R +200K +I +OKC +A +?#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +je +x-kubernetes-group-version-kindB@group: storage.k8s.io +kind: CSIStorageCapacity +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ³_ +H/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacitiesæ^Ç& +storage_v1beta10list or watch objects of kind CSIStorageCapacity*.listStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jm +R +200K +I +OKC +A +?#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +je +x-kubernetes-group-version-kindB@version: v1beta1 +group: storage.k8s.io +kind: CSIStorageCapacity +"à +storage_v1beta1create a CSIStorageCapacity*0createStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ +O +Mbodybody *= +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J” +N +200G +E +OK? += +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity +S +201L +J +Created? += +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity +T +202M +K +Accepted? += +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: storage.k8s.io +kind: CSIStorageCapacity +version: v1beta1 +j +x-kubernetes-actionpost +*ƒ- +storage_v1beta1'delete collection of CSIStorageCapacity*:deleteStorageV1beta1CollectionNamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: storage.k8s.io +kind: CSIStorageCapacity +version: v1beta1 +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string · +!/api/v1/nodes/{name}/proxy/{path}‘” +core_v1%connect GET requests to proxy of Node*!connectCoreV1GetNodeProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +” +core_v1%connect PUT requests to proxy of Node*!connectCoreV1PutNodeProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-version: v1 +group: "" +kind: NodeProxyOptions +"– +core_v1&connect POST requests to proxy of Node*"connectCoreV1PostNodeProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-version: v1 +group: "" +kind: NodeProxyOptions +*š +core_v1(connect DELETE requests to proxy of Node*$connectCoreV1DeleteNodeProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +2œ +core_v1)connect OPTIONS requests to proxy of Node*%connectCoreV1OptionsNodeProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +j! +x-kubernetes-action +connect +:– +core_v1&connect HEAD requests to proxy of Node*"connectCoreV1HeadNodeProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +j! +x-kubernetes-action +connect +B˜ +core_v1'connect PATCH requests to proxy of Node*#connectCoreV1PatchNodeProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +J= +;9"7pathname of the NodeProxyOptions"name*string˜J5 +31"/pathpath to the resource"path*string˜Jb +`^\queryBPath is the URL path to use for the current proxy request to node."path2string »^ +?/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices÷]§& + discovery_v1+list or watch objects of kind EndpointSlice*&listDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.discovery.v1.EndpointSliceList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +"© + discovery_v1create an EndpointSlice*(createDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSliceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSlice +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSlice +L +202E +C +Accepted7 +5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSlice + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +*ë, + discovery_v1"delete collection of EndpointSlice*2deleteDiscoveryV1CollectionNamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ³] +*/apis/scheduling.k8s.io/v1/priorityclasses„]¡& + scheduling_v1+list or watch objects of kind PriorityClass*listSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jf +K +200D +B +OK< +: +8#/definitions/io.k8s.api.scheduling.v1.PriorityClassList + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: scheduling.k8s.io +kind: PriorityClass +version: v1 +j +x-kubernetes-actionlist +"¥ + scheduling_v1create a PriorityClass*createSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ +H +Fbodybody *6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jÿ +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClass +L +201E +C +Created8 +6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClass +M +202F +D +Accepted8 +6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClass + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j^ +x-kubernetes-group-version-kind;9group: scheduling.k8s.io +kind: PriorityClass +version: v1 +*ä, + scheduling_v1"delete collection of PriorityClass*)deleteSchedulingV1CollectionPriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: scheduling.k8s.io +kind: PriorityClass +version: v1 +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‚ + /apis/scheduling.k8s.io/v1beta1/ÝÚ +scheduling_v1beta1get available resources* getSchedulingV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps¯ +J/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/{name}/statusà‚ +apiextensions_v1beta15read status of the specified CustomResourceDefinition*6readApiextensionsV1beta1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J’ +w +200p +n +OKh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jq +x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io +kind: CustomResourceDefinition +version: v1beta1 +¼ + +apiextensions_v1beta18replace status of the specified CustomResourceDefinition*9replaceApiextensionsV1beta1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bz +x +vbodybody *f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J +w +200p +n +OKh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition +| +201u +s +Createdh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jq +x-kubernetes-group-version-kindNLversion: v1beta1 +group: apiextensions.k8s.io +kind: CustomResourceDefinition +B +apiextensions_v1beta1Apartially update status of the specified CustomResourceDefinition*7patchApiextensionsV1beta1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J’ +w +200p +n +OKh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jq +x-kubernetes-group-version-kindNLversion: v1beta1 +group: apiextensions.k8s.io +kind: CustomResourceDefinition +JE +CA"?path$name of the CustomResourceDefinition"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ï +/apis/authentication.k8s.io/ÎË +authenticationget information of a group*getAuthenticationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsñ( +4/apis/storage.k8s.io/v1beta1/watch/csidrivers/{name}¸(ô +storage_v1beta1°watch changes to an object of kind CSIDriver. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j\ +x-kubernetes-group-version-kind97group: storage.k8s.io +kind: CSIDriver +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 +42"0pathname of the CSIDriver"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷ +7/apis/storage.k8s.io/v1/volumeattachments/{name}/status»˜ + +storage_v1-read status of the specified VolumeAttachment*#readStorageV1VolumeAttachmentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j^ +x-kubernetes-group-version-kind;9group: storage.k8s.io +kind: VolumeAttachment +version: v1 +ó + +storage_v10replace status of the specified VolumeAttachment*&replaceStorageV1VolumeAttachmentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ +H +Fbodybody *6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachmentBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J° +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment +L +201E +C +Created8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j^ +x-kubernetes-group-version-kind;9group: storage.k8s.io +kind: VolumeAttachment +version: v1 +B— + +storage_v19partially update status of the specified VolumeAttachment*$patchStorageV1VolumeAttachmentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9kind: VolumeAttachment +version: v1 +group: storage.k8s.io +j +x-kubernetes-actionpatch +J= +;9"7pathname of the VolumeAttachment"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string í( +3/apis/storage.k8s.io/v1/watch/storageclasses/{name}µ(î + +storage_v1³watch changes to an object of kind StorageClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: StorageClass +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 +75"3pathname of the StorageClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¢a +J/apis/admissionregistration.k8s.io/v1beta1/validatingwebhookconfigurationsÓ`¦' +admissionregistration_v1beta1listAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J‡ +l +200e +c +OK] +[ +Y#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j +x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1beta1 +"Œ +admissionregistration_v1beta1'create a ValidatingWebhookConfiguration*@createAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bk +i +gbodybody *W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jâ +h +200a +_ +OKY +W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration +m +201f +d +CreatedY +W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration +n +202g +e +AcceptedY +W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j +x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io +kind: ValidatingWebhookConfiguration +version: v1beta1 +*Ç- +admissionregistration_v1beta13delete collection of ValidatingWebhookConfiguration*JdeleteAdmissionregistrationV1beta1CollectionValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j +x-kubernetes-group-version-kind\Zkind: ValidatingWebhookConfiguration +version: v1beta1 +group: admissionregistration.k8s.io +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string · + ++/apis/authentication.k8s.io/v1/tokenreviews‡ +"ù +authentication_v1create a TokenReview*!createAuthenticationV1TokenReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL +J +Hbodybody *8 +6#/definitions/io.k8s.api.authentication.v1.TokenReviewJ… +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.authentication.v1.TokenReview +N +201G +E +Created: +8 +6#/definitions/io.k8s.api.authentication.v1.TokenReview +O +202H +F +Accepted: +8 +6#/definitions/io.k8s.api.authentication.v1.TokenReview + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j` +x-kubernetes-group-version-kind=;group: authentication.k8s.io +kind: TokenReview +version: v1 +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO +MKIquery-If 'true', then the output is pretty printed."pretty2string õ' +2/apis/autoscaling/v2beta2/horizontalpodautoscalers¾'² +autoscaling_v2beta25list or watch objects of kind HorizontalPodAutoscaler*=listAutoscalingV2beta2HorizontalPodAutoscalerForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jv +[ +200T +R +OKL +J +H#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta2 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean –_ +@/apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgetsÑ^À& +policy_v1beta11list or watch objects of kind PodDisruptionBudget*.listPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jm +R +200K +I +OKC +A +?#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetList + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +j +x-kubernetes-actionlist +"Ù +policy_v1beta1create a PodDisruptionBudget*0createPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ +O +Mbodybody *= +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J” + +401 + + Unauthorized +N +200G +E +OK? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget +S +201L +J +Created? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget +T +202M +K +Accepted? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetRhttpsj^ +x-kubernetes-group-version-kind;9kind: PodDisruptionBudget +version: v1beta1 +group: policy +j +x-kubernetes-actionpost +*ü, +policy_v1beta1(delete collection of PodDisruptionBudget*:deletePolicyV1beta1CollectionNamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ô^ +;/apis/rbac.authorization.k8s.io/v1beta1/clusterrolebindings´^Ù& +rbacAuthorization_v1beta10list or watch objects of kind ClusterRoleBinding*.listRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jj +O +200H +F +OK@ +> +<#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jp +x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1beta1 +"é +rbacAuthorization_v1beta1create a ClusterRoleBinding*0createRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN +L +Jbodybody *: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J‹ +K +200D +B +OK< +: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding +P +201I +G +Created< +: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding +Q +202J +H +Accepted< +: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding + +401 + + UnauthorizedRhttpsjp +x-kubernetes-group-version-kindMKkind: ClusterRoleBinding +version: v1beta1 +group: rbac.authorization.k8s.io +j +x-kubernetes-actionpost +*˜- +rbacAuthorization_v1beta1'delete collection of ClusterRoleBinding*:deleteRbacAuthorizationV1beta1CollectionClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jp +x-kubernetes-group-version-kindMKkind: ClusterRoleBinding +version: v1beta1 +group: rbac.authorization.k8s.io +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ´ + +'/api/v1/namespaces/{namespace}/bindingsˆ +"˜ +core_v1create a Binding*createCoreV1NamespacedBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> +< +:bodybody ** +(#/definitions/io.k8s.api.core.v1.BindingJÛ +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.core.v1.Binding +@ +2019 +7 +Created, +* +(#/definitions/io.k8s.api.core.v1.Binding +A +202: +8 +Accepted, +* +(#/definitions/io.k8s.api.core.v1.Binding + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jI +x-kubernetes-group-version-kind&$version: v1 +group: "" +kind: Binding +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ô) +P/apis/admissionregistration.k8s.io/v1/watch/mutatingwebhookconfigurations/{name}Ÿ)È +admissionregistration_v1Ãwatch changes to an object of kind MutatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*8watchAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jx +x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JI +GE"Cpath(name of the MutatingWebhookConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¼) +O/apis/autoscaling/v2beta1/watch/namespaces/{namespace}/horizontalpodautoscalersè(ú +autoscaling_v2beta1ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.* +< +:#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetList + +401 + + UnauthorizedRhttpsjY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +j +x-kubernetes-actionlist +"¶ + policy_v1create a PodDisruptionBudget*+createPolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL +J +Hbodybody *8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J… + +401 + + Unauthorized +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget +N +201G +E +Created: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget +O +202H +F +Accepted: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetRhttpsj +x-kubernetes-actionpost +jY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +*í, + policy_v1(delete collection of PodDisruptionBudget*5deletePolicyV1CollectionNamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjY +x-kubernetes-group-version-kind64version: v1 +group: policy +kind: PodDisruptionBudget +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ò_ +D/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers‰_Ð& +autoscaling_v15list or watch objects of kind HorizontalPodAutoscaler*2listAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jq +V +200O +M +OKG +E +C#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +"õ +autoscaling_v1 create a HorizontalPodAutoscaler*4createAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BU +S +Qbodybody *A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  +R +200K +I +OKC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler +W +201P +N +CreatedC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler +X +202Q +O +AcceptedC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jb +x-kubernetes-group-version-kind?=kind: HorizontalPodAutoscaler +version: v1 +group: autoscaling +*ˆ- +autoscaling_v1,delete collection of HorizontalPodAutoscaler*>deleteAutoscalingV1CollectionNamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¬( +5/apis/rbac.authorization.k8s.io/v1/watch/rolebindingsò'æ +rbacAuthorization_v1wwatch individual changes to a list of RoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*7watchRbacAuthorizationV1RoleBindingListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: RoleBinding +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean É\ +"/apis/storage.k8s.io/v1/csidrivers¢\…& + +storage_v1'list or watch objects of kind CSIDriver*listStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.storage.v1.CSIDriverList + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: storage.k8s.io +kind: CSIDriver +version: v1 +j +x-kubernetes-actionlist +"ô + +storage_v1create a CSIDriver*createStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.storage.v1.CSIDriverBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jê +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriver +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriver +F +202? += +Accepted1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriver + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42group: storage.k8s.io +kind: CSIDriver +version: v1 +j +x-kubernetes-actionpost +*Ï, + +storage_v1delete collection of CSIDriver*"deleteStorageV1CollectionCSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jW +x-kubernetes-group-version-kind42version: v1 +group: storage.k8s.io +kind: CSIDriver +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string å +"/api/v1/namespaces/{name}/finalize¾ ø +core_v1+replace finalize of the specified Namespace*replaceCoreV1NamespaceFinalize2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.core.v1.NamespaceJœ += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Namespace +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.core.v1.Namespace + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jK +x-kubernetes-group-version-kind(&group: "" +kind: Namespace +version: v1 +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J6 +42"0pathname of the Namespace"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string È +/apis/½º +apisget available API versions*getAPIVersions2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJm +R +200K +I +OKC +A +?#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList + +401 + + UnauthorizedRhttpsˆ* +U/apis/admissionregistration.k8s.io/v1beta1/watch/mutatingwebhookconfigurations/{name}®)× +admissionregistration_v1beta1Ãwatch changes to an object of kind MutatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*=watchAdmissionregistrationV1beta1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj +x-kubernetes-actionwatch +j} +x-kubernetes-group-version-kindZXversion: v1beta1 +group: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JI +GE"Cpath(name of the MutatingWebhookConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¯ +:/apis/authorization.k8s.io/v1beta1/selfsubjectrulesreviewsð +"â +authorization_v1beta1create a SelfSubjectRulesReview*0createAuthorizationV1beta1SelfSubjectRulesReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B[ +Y +Wbodybody *G +E#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectRulesReviewJ² +X +200Q +O +OKI +G +E#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectRulesReview +] +201V +T +CreatedI +G +E#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectRulesReview +^ +202W +U +AcceptedI +G +E#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectRulesReview + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jo +x-kubernetes-group-version-kindLJgroup: authorization.k8s.io +kind: SelfSubjectRulesReview +version: v1beta1 +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO +MKIquery-If 'true', then the output is pretty printed."pretty2string É( +8/apis/autoscaling/v2beta2/watch/horizontalpodautoscalersŒ(€ +autoscaling_v2beta2ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.*BwatchAutoscalingV2beta2HorizontalPodAutoscalerListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# +x-kubernetes-action  +watchlist +jg +x-kubernetes-group-version-kindDBgroup: autoscaling +kind: HorizontalPodAutoscaler +version: v2beta2 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¡ ++/apis/flowcontrol.apiserver.k8s.io/v1beta1/ñî +flowcontrolApiserver_v1beta1get available resources**getFlowcontrolApiserverV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsñ+ +R/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/rolebindings/{name}š+® +rbacAuthorization_v1beta1read the specified RoleBinding*1readRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J_ + +401 + + Unauthorized +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBindingRhttpsji +x-kubernetes-group-version-kindFDversion: v1beta1 +group: rbac.authorization.k8s.io +kind: RoleBinding +j +x-kubernetes-actionget +ƒ +rbacAuthorization_v1beta1!replace the specified RoleBinding*4replaceRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG +E +Cbodybody *3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBindingBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jª +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding +I +201B +@ +Created5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +ji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: RoleBinding +version: v1beta1 +*à +rbacAuthorization_v1beta1delete a RoleBinding*3deleteRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +ji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: RoleBinding +version: v1beta1 +B­ +rbacAuthorization_v1beta1*partially update the specified RoleBinding*2patchRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding + +401 + + UnauthorizedRhttpsji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: RoleBinding +version: v1beta1 +j +x-kubernetes-actionpatch +J8 +64"2pathname of the RoleBinding"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ™. +O/apis/admissionregistration.k8s.io/v1beta1/mutatingwebhookconfigurations/{name}Å-… +admissionregistration_v1beta1/read the specified MutatingWebhookConfiguration*deleteAdmissionregistrationV1beta1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j} +x-kubernetes-group-version-kindZXversion: v1beta1 +group: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +B„ +admissionregistration_v1beta1;partially update the specified MutatingWebhookConfiguration*=patchAdmissionregistrationV1beta1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J +f +200_ +] +OKW +U +S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration + +401 + + UnauthorizedRhttpsj} +x-kubernetes-group-version-kindZXgroup: admissionregistration.k8s.io +kind: MutatingWebhookConfiguration +version: v1beta1 +j +x-kubernetes-actionpatch +JI +GE"Cpath(name of the MutatingWebhookConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string —+ +M/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings/{name}Å*š +rbacAuthorization_v1read the specified RoleBinding*,readRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.rbac.v1.RoleBinding + +401 + + UnauthorizedRhttpsjd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: RoleBinding +version: v1 +j +x-kubernetes-actionget +å +rbacAuthorization_v1!replace the specified RoleBinding*/replaceRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB +@ +>bodybody *. +,#/definitions/io.k8s.api.rbac.v1.RoleBindingBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.rbac.v1.RoleBinding +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.rbac.v1.RoleBinding + +401 + + UnauthorizedRhttpsjd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: RoleBinding +version: v1 +j +x-kubernetes-actionput +*´ +rbacAuthorization_v1delete a RoleBinding*.deleteRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: RoleBinding +version: v1 +j +x-kubernetes-action delete +B™ +rbacAuthorization_v1*partially update the specified RoleBinding*-patchRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.rbac.v1.RoleBinding + +401 + + UnauthorizedRhttpsjd +x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io +kind: RoleBinding +version: v1 +j +x-kubernetes-actionpatch +J8 +64"2pathname of the RoleBinding"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ì* +K/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles/{name}œ*’ +rbacAuthorization_v1beta1read the specified Role**readRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.rbac.v1beta1.Role + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=kind: Role +version: v1beta1 +group: rbac.authorization.k8s.io +j +x-kubernetes-actionget +Ù +rbacAuthorization_v1beta1replace the specified Role*-replaceRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ +> +<bodybody *, +*#/definitions/io.k8s.api.rbac.v1beta1.RoleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.rbac.v1beta1.Role +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.rbac.v1beta1.Role + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jb +x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io +kind: Role +version: v1beta1 +*® +rbacAuthorization_v1beta1 delete a Role*,deleteRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io +kind: Role +version: v1beta1 +j +x-kubernetes-action delete +B‘ +rbacAuthorization_v1beta1#partially update the specified Role*+patchRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.rbac.v1beta1.Role + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jb +x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io +kind: Role +version: v1beta1 +J1 +/-"+pathname of the Role"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string þ' +-/apis/storage.k8s.io/v1beta1/watch/csidriversÌ'À +storage_v1beta1uwatch individual changes to a list of CSIDriver. deprecated: use the 'watch' parameter with a list operation instead.* watchStorageV1beta1CSIDriverList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j\ +x-kubernetes-group-version-kind97group: storage.k8s.io +kind: CSIDriver +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Õ+ +B/apis/rbac.authorization.k8s.io/v1beta1/clusterrolebindings/{name}Ž+À +rbacAuthorization_v1beta1%read the specified ClusterRoleBinding*.readRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jf + +401 + + Unauthorized +K +200D +B +OK< +: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingRhttpsjp +x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1beta1 +j +x-kubernetes-actionget +£ +rbacAuthorization_v1beta1(replace the specified ClusterRoleBinding*1replaceRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN +L +Jbodybody *: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¸ +P +201I +G +Created< +: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding + +401 + + Unauthorized +K +200D +B +OK< +: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingRhttpsj +x-kubernetes-actionput +jp +x-kubernetes-group-version-kindMKversion: v1beta1 +group: rbac.authorization.k8s.io +kind: ClusterRoleBinding +*Î +rbacAuthorization_v1beta1delete a ClusterRoleBinding*0deleteRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jp +x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1beta1 +B¿ +rbacAuthorization_v1beta11partially update the specified ClusterRoleBinding*/patchRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jf +K +200D +B +OK< +: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jp +x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1beta1 +J? +=;"9pathname of the ClusterRoleBinding"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‰ +;/api/v1/namespaces/{namespace}/resourcequotas/{name}/statusÉ +core_v1*read status of the specified ResourceQuota*'readCoreV1NamespacedResourceQuotaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jO +x-kubernetes-group-version-kind,*kind: ResourceQuota +version: v1 +group: "" +Ð +core_v1-replace status of the specified ResourceQuota**replaceCoreV1NamespacedResourceQuotaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.core.v1.ResourceQuotaBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota + +401 + + UnauthorizedRhttpsjO +x-kubernetes-group-version-kind,*version: v1 +group: "" +kind: ResourceQuota +j +x-kubernetes-actionput +B€ +core_v16partially update status of the specified ResourceQuota*(patchCoreV1NamespacedResourceQuotaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.core.v1.ResourceQuota + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jO +x-kubernetes-group-version-kind,*group: "" +kind: ResourceQuota +version: v1 +J: +86"4pathname of the ResourceQuota"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string å +>/apis/apps/v1/namespaces/{namespace}/deployments/{name}/status¢÷ +apps_v1'read status of the specified Deployment*$readAppsV1NamespacedDeploymentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment + +401 + + UnauthorizedRhttpsjN +x-kubernetes-group-version-kind+)group: apps +kind: Deployment +version: v1 +j +x-kubernetes-actionget +À +apps_v1*replace status of the specified Deployment*'replaceAppsV1NamespacedDeploymentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA +? +=bodybody *- ++#/definitions/io.k8s.api.apps.v1.DeploymentBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment +C +201< +: +Created/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jN +x-kubernetes-group-version-kind+)group: apps +kind: Deployment +version: v1 +Bö +apps_v13partially update status of the specified Deployment*%patchAppsV1NamespacedDeploymentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jN +x-kubernetes-group-version-kind+)kind: Deployment +version: v1 +group: apps +J7 +53"1pathname of the Deployment"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¨) +J/apis/autoscaling/v1/watch/namespaces/{namespace}/horizontalpodautoscalersÙ(ë +autoscaling_v1ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.*7watchAutoscalingV1NamespacedHorizontalPodAutoscalerList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsjb +x-kubernetes-group-version-kind?=group: autoscaling +kind: HorizontalPodAutoscaler +version: v1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean œ) +;/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}Ü(â + events_v1read the specified Event*readEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JV +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.events.v1.Event + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +¥ + events_v1replace the specified Event*replaceEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> +< +:bodybody ** +(#/definitions/io.k8s.api.events.v1.EventBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J˜ + +401 + + Unauthorized +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.events.v1.Event +@ +2019 +7 +Created, +* +(#/definitions/io.k8s.api.events.v1.EventRhttpsj +x-kubernetes-actionput +jR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +* + events_v1delete an Event*deleteEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj +x-kubernetes-action delete +jR +x-kubernetes-group-version-kind/-kind: Event +version: v1 +group: events.k8s.io +Bá + events_v1$partially update the specified Event*patchEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JV +; +2004 +2 +OK, +* +(#/definitions/io.k8s.api.events.v1.Event + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jR +x-kubernetes-group-version-kind/-group: events.k8s.io +kind: Event +version: v1 +J2 +0.",pathname of the Event"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ï] +:/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases°]“& +coordination_v1#list or watch objects of kind Lease*!listCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J` + +401 + + Unauthorized +E +200> +< +OK6 +4 +2#/definitions/io.k8s.api.coordination.v1.LeaseListRhttpsj +x-kubernetes-actionlist +jX +x-kubernetes-group-version-kind53version: v1 +group: coordination.k8s.io +kind: Lease +"… +coordination_v1create a Lease*#createCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.coordination.v1.LeaseBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jí + +401 + + Unauthorized +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.coordination.v1.Lease +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.coordination.v1.Lease +G +202@ +> +Accepted2 +0 +.#/definitions/io.k8s.api.coordination.v1.LeaseRhttpsj +x-kubernetes-actionpost +jX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +*Ü, +coordination_v1delete collection of Lease*-deleteCoordinationV1CollectionNamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jX +x-kubernetes-group-version-kind53group: coordination.k8s.io +kind: Lease +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string È' +)/apis/policy/v1beta1/poddisruptionbudgetsš'Ž +policy_v1beta11list or watch objects of kind PodDisruptionBudget*4listPolicyV1beta1PodDisruptionBudgetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jm +R +200K +I +OKC +A +?#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean €) +6/apis/storage.k8s.io/v1/watch/volumeattachments/{name}Å(ú + +storage_v1·watch changes to an object of kind VolumeAttachment. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: storage.k8s.io +kind: VolumeAttachment +version: v1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J= +;9"7pathname of the VolumeAttachment"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Þ +7/api/v1/namespaces/{namespace}/pods/{name}/proxy/{path}¢› +core_v1$connect GET requests to proxy of Pod**connectCoreV1GetNamespacedPodProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,version: v1 +group: "" +kind: PodProxyOptions +› +core_v1$connect PUT requests to proxy of Pod**connectCoreV1PutNamespacedPodProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +j! +x-kubernetes-action +connect +" +core_v1%connect POST requests to proxy of Pod*+connectCoreV1PostNamespacedPodProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +*¡ +core_v1'connect DELETE requests to proxy of Pod*-connectCoreV1DeleteNamespacedPodProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +2£ +core_v1(connect OPTIONS requests to proxy of Pod*.connectCoreV1OptionsNamespacedPodProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,kind: PodProxyOptions +version: v1 +group: "" +: +core_v1%connect HEAD requests to proxy of Pod*+connectCoreV1HeadNamespacedPodProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +BŸ +core_v1&connect PATCH requests to proxy of Pod*,connectCoreV1PatchNamespacedPodProxyWithPath2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jQ +x-kubernetes-group-version-kind.,group: "" +kind: PodProxyOptions +version: v1 +J< +:8"6pathname of the PodProxyOptions"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜J5 +31"/pathpath to the resource"path*string˜Ja +_][queryAPath is the URL path to use for the current proxy request to pod."path2string „' +/api/v1/serviceaccountsè&Ü +core_v1,list or watch objects of kind ServiceAccount*(listCoreV1ServiceAccountForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Ja +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.core.v1.ServiceAccountList + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+version: v1 +group: "" +kind: ServiceAccount +j +x-kubernetes-actionlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean º^ ++/apis/apiregistration.k8s.io/v1/apiservicesŠ^¾& +apiregistration_v1(list or watch objects of kind APIService*listApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J} + +401 + + Unauthorized +b +200[ +Y +OKS +Q +O#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceListRhttpsj` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +j +x-kubernetes-actionlist +"ˆ + +apiregistration_v1create an APIService*!createApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ba +_ +]bodybody *M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÄ +^ +200W +U +OKO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService +c +201\ +Z +CreatedO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService +d +202] +[ +AcceptedO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j` +x-kubernetes-group-version-kind=;kind: APIService +group: apiregistration.k8s.io +version: v1 +*ê, +apiregistration_v1delete collection of APIService*+deleteApiregistrationV1CollectionAPIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string « +L/apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}/approvalÚè +certificates_v1beta18read approval of the specified CertificateSigningRequest*8readCertificatesV1beta1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ju +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsjq +x-kubernetes-group-version-kindNLgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1beta1 +j +x-kubernetes-actionget +é +certificates_v1beta1;replace approval of the specified CertificateSigningRequest*;replaceCertificatesV1beta1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] +[ +Ybodybody *I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÖ +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest +_ +201X +V +CreatedK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsjq +x-kubernetes-group-version-kindNLgroup: certificates.k8s.io +kind: CertificateSigningRequest +version: v1beta1 +j +x-kubernetes-actionput +Bç +certificates_v1beta1Dpartially update approval of the specified CertificateSigningRequest*9patchCertificatesV1beta1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ju +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jq +x-kubernetes-group-version-kindNLversion: v1beta1 +group: certificates.k8s.io +kind: CertificateSigningRequest +JF +DB"@path%name of the CertificateSigningRequest"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ç( +2/apis/storage.k8s.io/v1beta1/watch/csinodes/{name}°(î +storage_v1beta1®watch changes to an object of kind CSINode. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: CSINode +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 +20".pathname of the CSINode"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¯' +)/apis/networking.k8s.io/v1beta1/ingresses'õ +networking_v1beta1%list or watch objects of kind Ingress*,listNetworkingV1beta1IngressForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je + +401 + + Unauthorized +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.networking.v1beta1.IngressListRhttpsj +x-kubernetes-actionlist +j] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: Ingress +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean €* +H/apis/policy/v1/watch/namespaces/{namespace}/poddisruptionbudgets/{name}³)ƒ + policy_v1ºwatch changes to an object of kind PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchPolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jY +x-kubernetes-group-version-kind64version: v1 +group: policy +kind: PodDisruptionBudget +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J@ +><":pathname of the PodDisruptionBudget"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Þ +N/apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/status‹· +policy_v1beta10read status of the specified PodDisruptionBudget*4readPolicyV1beta1NamespacedPodDisruptionBudgetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ji +N +200G +E +OK? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +j^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +  +policy_v1beta13replace status of the specified PodDisruptionBudget*7replacePolicyV1beta1NamespacedPodDisruptionBudgetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ +O +Mbodybody *= +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¾ +N +200G +E +OK? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget +S +201L +J +Created? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +j^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +B¶ +policy_v1beta1<":pathname of the PodDisruptionBudget"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ’) +group: scheduling.k8s.io +kind: PriorityClass +version: v1beta1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: +86"4pathname of the PriorityClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ˆ +"/apis/coordination.k8s.io/v1beta1/áÞ +coordination_v1beta1get available resources*"getCoordinationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsã) +H/apis/networking.k8s.io/v1/watch/namespaces/{namespace}/ingresses/{name}–)ò + networking_v1®watch changes to an object of kind Ingress. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jX +x-kubernetes-group-version-kind53group: networking.k8s.io +kind: Ingress +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 +20".pathname of the Ingress"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Š) +G/apis/networking.k8s.io/v1/watch/namespaces/{namespace}/networkpolicies¾(Ð + networking_v1ywatch individual changes to a list of NetworkPolicy. deprecated: use the 'watch' parameter with a list operation instead.*,watchNetworkingV1NamespacedNetworkPolicyList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9kind: NetworkPolicy +version: v1 +group: networking.k8s.io +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‰ +#/apis/rbac.authorization.k8s.io/v1/áÞ +rbacAuthorization_v1get available resources*"getRbacAuthorizationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsé +1/api/v1/namespaces/{namespace}/pods/{name}/status³Ù +core_v1 read status of the specified Pod*readCoreV1NamespacedPodStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JR +7 +2000 +. +OK( +& +$#/definitions/io.k8s.api.core.v1.Pod + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jE +x-kubernetes-group-version-kind" group: "" +kind: Pod +version: v1 +” +core_v1#replace status of the specified Pod* replaceCoreV1NamespacedPodStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B: +8 +6bodybody *& +$#/definitions/io.k8s.api.core.v1.PodBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J +7 +2000 +. +OK( +& +$#/definitions/io.k8s.api.core.v1.Pod +< +2015 +3 +Created( +& +$#/definitions/io.k8s.api.core.v1.Pod + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jE +x-kubernetes-group-version-kind" version: v1 +group: "" +kind: Pod +BØ +core_v1,partially update status of the specified Pod*patchCoreV1NamespacedPodStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JR +7 +2000 +. +OK( +& +$#/definitions/io.k8s.api.core.v1.Pod + +401 + + UnauthorizedRhttpsjE +x-kubernetes-group-version-kind" kind: Pod +version: v1 +group: "" +j +x-kubernetes-actionpatch +J0 +.,"*pathname of the Pod"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ì' +/api/v1/watch/serviceaccountsÊ'¾ +core_v1zwatch individual changes to a list of ServiceAccount. deprecated: use the 'watch' parameter with a list operation instead.*-watchCoreV1ServiceAccountListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jP +x-kubernetes-group-version-kind-+kind: ServiceAccount +version: v1 +group: "" +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean µ] +1/apis/apps/v1/namespaces/{namespace}/statefulsetsÿ\„& +apps_v1)list or watch objects of kind StatefulSet*listAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J^ +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.apps.v1.StatefulSetList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +"ð +apps_v1create a StatefulSet*!createAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB +@ +>bodybody *. +,#/definitions/io.k8s.api.apps.v1.StatefulSetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jç + +401 + + Unauthorized +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet +E +202> +< +Accepted0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSetRhttpsj +x-kubernetes-actionpost +jO +x-kubernetes-group-version-kind,*group: apps +kind: StatefulSet +version: v1 +*Ï, +apps_v1 delete collection of StatefulSet*+deleteAppsV1CollectionNamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjO +x-kubernetes-group-version-kind,*kind: StatefulSet +version: v1 +group: apps +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string þ +/apis/autoscaling/v2beta2/ßÜ +autoscaling_v2beta2get available resources*!getAutoscalingV2beta2APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttps¨* +X/apis/rbac.authorization.k8s.io/v1beta1/watch/namespaces/{namespace}/rolebindings/{name}Ë)£ +rbacAuthorization_v1beta1²watch changes to an object of kind RoleBinding. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*2watchRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: RoleBinding +version: v1beta1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 +64"2pathname of the RoleBinding"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ³( +7/apis/storage.k8s.io/v1beta1/watch/csistoragecapacities÷'ë +storage_v1beta1~watch individual changes to a list of CSIStorageCapacity. deprecated: use the 'watch' parameter with a list operation instead.*9watchStorageV1beta1CSIStorageCapacityListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: storage.k8s.io +kind: CSIStorageCapacity +version: v1beta1 +j# +x-kubernetes-action  +watchlist +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean H +/logs/>< +logs*logFileListHandlerJ + +401 + + UnauthorizedRhttpsÿ+ +O/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name}«+® +storage_v1beta1%read the specified CSIStorageCapacity*.readStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ji +N +200G +E +OK? += +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +je +x-kubernetes-group-version-kindB@group: storage.k8s.io +kind: CSIStorageCapacity +version: v1beta1 +— +storage_v1beta1(replace the specified CSIStorageCapacity*1replaceStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ +O +Mbodybody *= +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¾ +N +200G +E +OK? += +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity +S +201L +J +Created? += +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +je +x-kubernetes-group-version-kindB@kind: CSIStorageCapacity +version: v1beta1 +group: storage.k8s.io +*¹ +storage_v1beta1delete a CSIStorageCapacity*0deleteStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +je +x-kubernetes-group-version-kindB@version: v1beta1 +group: storage.k8s.io +kind: CSIStorageCapacity +B­ +storage_v1beta11partially update the specified CSIStorageCapacity*/patchStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ji +N +200G +E +OK? += +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@version: v1beta1 +group: storage.k8s.io +kind: CSIStorageCapacity +j +x-kubernetes-actionpatch +J? +=;"9pathname of the CSIStorageCapacity"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ö] +./api/v1/namespaces/{namespace}/serviceaccounts£]Ž& +core_v1,list or watch objects of kind ServiceAccount*"listCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ja + +401 + + Unauthorized +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.core.v1.ServiceAccountListRhttpsjP +x-kubernetes-group-version-kind-+version: v1 +group: "" +kind: ServiceAccount +j +x-kubernetes-actionlist +"ƒ +core_v1create a ServiceAccount*$createCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BE +C +Abodybody *1 +/#/definitions/io.k8s.api.core.v1.ServiceAccountBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jð +B +200; +9 +OK3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount +G +201@ +> +Created3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount +H +202A +? +Accepted3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jP +x-kubernetes-group-version-kind-+kind: ServiceAccount +version: v1 +group: "" +*Ö, +core_v1#delete collection of ServiceAccount*.deleteCoreV1CollectionNamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jP +x-kubernetes-group-version-kind-+group: "" +kind: ServiceAccount +version: v1 +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Á +/api/v1/nodes/{name}/proxy¢Œ +core_v1%connect GET requests to proxy of Node*connectCoreV1GetNodeProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-kind: NodeProxyOptions +version: v1 +group: "" +Œ +core_v1%connect PUT requests to proxy of Node*connectCoreV1PutNodeProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +"Ž +core_v1&connect POST requests to proxy of Node*connectCoreV1PostNodeProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsj! +x-kubernetes-action +connect +jR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +*’ +core_v1(connect DELETE requests to proxy of Node*connectCoreV1DeleteNodeProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +j! +x-kubernetes-action +connect +2” +core_v1)connect OPTIONS requests to proxy of Node*connectCoreV1OptionsNodeProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +j! +x-kubernetes-action +connect +:Ž +core_v1&connect HEAD requests to proxy of Node*connectCoreV1HeadNodeProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: NodeProxyOptions +version: v1 +j! +x-kubernetes-action +connect +B +core_v1'connect PATCH requests to proxy of Node*connectCoreV1PatchNodeProxy2*/*:*/*J7 + +200 + +OK + ² +string + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-kind: NodeProxyOptions +version: v1 +group: "" +j! +x-kubernetes-action +connect +J= +;9"7pathname of the NodeProxyOptions"name*string˜Jb +`^\queryBPath is the URL path to use for the current proxy request to node."path2string ­` +F/apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurationsâ_‰' +flowcontrolApiserver_v1beta18list or watch objects of kind PriorityLevelConfiguration*9listFlowcontrolApiserverV1beta1PriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jy + +401 + + Unauthorized +^ +200W +U +OKO +M +K#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationListRhttpsj{ +x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io +kind: PriorityLevelConfiguration +version: v1beta1 +j +x-kubernetes-actionlist +"Æ + +flowcontrolApiserver_v1beta1#create a PriorityLevelConfiguration*;createFlowcontrolApiserverV1beta1PriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] +[ +Ybodybody *I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¸ +Z +200S +Q +OKK +I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration +_ +201X +V +CreatedK +I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration +` +202Y +W +AcceptedK +I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j{ +x-kubernetes-group-version-kindXVversion: v1beta1 +group: flowcontrol.apiserver.k8s.io +kind: PriorityLevelConfiguration +*¹- +flowcontrolApiserver_v1beta1/delete collection of PriorityLevelConfiguration*EdeleteFlowcontrolApiserverV1beta1CollectionPriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +j{ +x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io +kind: PriorityLevelConfiguration +version: v1beta1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¤( +//apis/policy/v1beta1/watch/poddisruptionbudgetsð'ä +policy_v1beta1watch individual changes to a list of PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead.*9watchPolicyV1beta1PodDisruptionBudgetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j^ +x-kubernetes-group-version-kind;9group: policy +kind: PodDisruptionBudget +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean î] +?/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolesª]—& +rbacAuthorization_v1"list or watch objects of kind Role*%listRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JW + +401 + + Unauthorized +< +2005 +3 +OK- ++ +)#/definitions/io.k8s.api.rbac.v1.RoleListRhttpsj] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +j +x-kubernetes-actionlist +"î +rbacAuthorization_v1 create a Role*'createRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; +9 +7bodybody *' +%#/definitions/io.k8s.api.rbac.v1.RoleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÒ + +401 + + Unauthorized +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.rbac.v1.Role += +2016 +4 +Created) +' +%#/definitions/io.k8s.api.rbac.v1.Role +> +2027 +5 +Accepted) +' +%#/definitions/io.k8s.api.rbac.v1.RoleRhttpsj +x-kubernetes-actionpost +j] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +*é, +rbacAuthorization_v1delete collection of Role*1deleteRbacAuthorizationV1CollectionNamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io +kind: Role +version: v1 +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ’( +./apis/discovery.k8s.io/v1/watch/endpointslicesß'Ó + discovery_v1ywatch individual changes to a list of EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead.*1watchDiscoveryV1EndpointSliceListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j] +x-kubernetes-group-version-kind:8group: discovery.k8s.io +kind: EndpointSlice +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¥] +)/apis/networking.k8s.io/v1/ingressclasses÷\& + networking_v1*list or watch objects of kind IngressClass*listNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je +J +200C +A +OK; +9 +7#/definitions/io.k8s.api.networking.v1.IngressClassList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: IngressClass +version: v1 +"Ÿ + networking_v1create an IngressClass*createNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI +G +Ebodybody *5 +3#/definitions/io.k8s.api.networking.v1.IngressClassBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü +F +200? += +OK7 +5 +3#/definitions/io.k8s.api.networking.v1.IngressClass +K +201D +B +Created7 +5 +3#/definitions/io.k8s.api.networking.v1.IngressClass +L +202E +C +Accepted7 +5 +3#/definitions/io.k8s.api.networking.v1.IngressClass + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: IngressClass +version: v1 +j +x-kubernetes-actionpost +*á, + networking_v1!delete collection of IngressClass*(deleteNetworkingV1CollectionIngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj] +x-kubernetes-group-version-kind:8group: networking.k8s.io +kind: IngressClass +version: v1 +j* +x-kubernetes-actiondeletecollection +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ž) +2/api/v1/namespaces/{namespace}/podtemplates/{name}ç(é +core_v1read the specified PodTemplate*readCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplate + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jM +x-kubernetes-group-version-kind*(version: v1 +group: "" +kind: PodTemplate +´ +core_v1!replace the specified PodTemplate*"replaceCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB +@ +>bodybody *. +,#/definitions/io.k8s.api.core.v1.PodTemplateBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplate +D +201= +; +Created0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplate + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jM +x-kubernetes-group-version-kind*(group: "" +kind: PodTemplate +version: v1 +*é +core_v1delete a PodTemplate*!deleteCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J¡ +E +202> +< +Accepted0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplate + +401 + + Unauthorized +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplateRhttpsj +x-kubernetes-action delete +jM +x-kubernetes-group-version-kind*(group: "" +kind: PodTemplate +version: v1 +Bè +core_v1*partially update the specified PodTemplate* patchCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ +? +2008 +6 +OK0 +. +,#/definitions/io.k8s.api.core.v1.PodTemplate + +401 + + UnauthorizedRhttpsjM +x-kubernetes-group-version-kind*(version: v1 +group: "" +kind: PodTemplate +j +x-kubernetes-actionpatch +J8 +64"2pathname of the PodTemplate"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ù( +-/api/v1/namespaces/{namespace}/secrets/{name}§(Õ +core_v1read the specified Secret*readCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JU +: +2003 +1 +OK+ +) +'#/definitions/io.k8s.api.core.v1.Secret + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jH +x-kubernetes-group-version-kind%#kind: Secret +version: v1 +group: "" +– +core_v1replace the specified Secret*replaceCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B= +; +9bodybody *) +'#/definitions/io.k8s.api.core.v1.SecretBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J– +: +2003 +1 +OK+ +) +'#/definitions/io.k8s.api.core.v1.Secret +? +2018 +6 +Created+ +) +'#/definitions/io.k8s.api.core.v1.Secret + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jH +x-kubernetes-group-version-kind%#group: "" +kind: Secret +version: v1 +*ô +core_v1delete a Secret*deleteCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jH +x-kubernetes-group-version-kind%#version: v1 +group: "" +kind: Secret +BÔ +core_v1%partially update the specified Secret*patchCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JU +: +2003 +1 +OK+ +) +'#/definitions/io.k8s.api.core.v1.Secret + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jH +x-kubernetes-group-version-kind%#group: "" +kind: Secret +version: v1 +J3 +1/"-pathname of the Secret"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‰( +$/api/v1/watch/persistentvolumeclaimsà'Ô +core_v1watch individual changes to a list of PersistentVolumeClaim. deprecated: use the 'watch' parameter with a list operation instead.*4watchCoreV1PersistentVolumeClaimListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jW +x-kubernetes-group-version-kind42group: "" +kind: PersistentVolumeClaim +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ã +>/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}/status€É +apiregistration_v1beta1'read status of the specified APIService**readApiregistrationV1beta1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J~ +c +200\ +Z +OKT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +j +x-kubernetes-actionget +Ü +apiregistration_v1beta1*replace status of the specified APIService*-replaceApiregistrationV1beta1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bf +d +bbodybody *R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jè +c +200\ +Z +OKT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService +h +201a +_ +CreatedT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +j +x-kubernetes-actionput +BÈ +apiregistration_v1beta13partially update status of the specified APIService*+patchApiregistrationV1beta1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J~ +c +200\ +Z +OKT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService + +401 + + UnauthorizedRhttpsje +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +j +x-kubernetes-actionpatch +J7 +53"1pathname of the APIService"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ü& +/api/v1/secretsÈ&¼ +core_v1$list or watch objects of kind Secret* listCoreV1SecretForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JY +> +2007 +5 +OK/ +- ++#/definitions/io.k8s.api.core.v1.SecretList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jH +x-kubernetes-group-version-kind%#kind: Secret +version: v1 +group: "" +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ê^ +A/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies„^«& + networking_v1+list or watch objects of kind NetworkPolicy*'listNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jf +K +200D +B +OK< +: +8#/definitions/io.k8s.api.networking.v1.NetworkPolicyList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +j^ +x-kubernetes-group-version-kind;9kind: NetworkPolicy +version: v1 +group: networking.k8s.io +"¯ + networking_v1create a NetworkPolicy*)createNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ +H +Fbodybody *6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicyBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jÿ +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicy +L +201E +C +Created8 +6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicy +M +202F +D +Accepted8 +6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicy + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +j^ +x-kubernetes-group-version-kind;9group: networking.k8s.io +kind: NetworkPolicy +version: v1 +*î, + networking_v1"delete collection of NetworkPolicy*3deleteNetworkingV1CollectionNamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj^ +x-kubernetes-group-version-kind;9group: networking.k8s.io +kind: NetworkPolicy +version: v1 +j* +x-kubernetes-actiondeletecollection +J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¢ +'/api/v1/persistentvolumes/{name}/statusöƒ +core_v1-read status of the specified PersistentVolume* readCoreV1PersistentVolumeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +Ø +core_v10replace status of the specified PersistentVolume*#replaceCoreV1PersistentVolumeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG +E +Cbodybody *3 +1#/definitions/io.k8s.api.core.v1.PersistentVolumeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jª +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume +I +201B +@ +Created5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume + +401 + + UnauthorizedRhttpsjR +x-kubernetes-group-version-kind/-group: "" +kind: PersistentVolume +version: v1 +j +x-kubernetes-actionput +B‚ +core_v19partially update status of the specified PersistentVolume*!patchCoreV1PersistentVolumeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jR +x-kubernetes-group-version-kind/-kind: PersistentVolume +version: v1 +group: "" +J= +;9"7pathname of the PersistentVolume"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ð* +;/apis/rbac.authorization.k8s.io/v1beta1/clusterroles/{name}*¤ +rbacAuthorization_v1beta1read the specified ClusterRole*'readRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J_ + +401 + + Unauthorized +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleRhttpsj +x-kubernetes-actionget +ji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: ClusterRole +version: v1beta1 +ù +rbacAuthorization_v1beta1!replace the specified ClusterRole**replaceRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG +E +Cbodybody *3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jª +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole +I +201B +@ +Created5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +ji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: ClusterRole +version: v1beta1 +*¹ +rbacAuthorization_v1beta1delete a ClusterRole*)deleteRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +ji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: ClusterRole +version: v1beta1 +B£ +rbacAuthorization_v1beta1*partially update the specified ClusterRole*(patchRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole + +401 + + UnauthorizedRhttpsji +x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io +kind: ClusterRole +version: v1beta1 +j +x-kubernetes-actionpatch +J8 +64"2pathname of the ClusterRole"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ó) +0/apis/storage.k8s.io/v1/volumeattachments/{name}ž)ˆ + +storage_v1#read the specified VolumeAttachment*readStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jb + +401 + + Unauthorized +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachmentRhttpsj^ +x-kubernetes-group-version-kind;9group: storage.k8s.io +kind: VolumeAttachment +version: v1 +j +x-kubernetes-actionget +ã + +storage_v1&replace the specified VolumeAttachment* replaceStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ +H +Fbodybody *6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachmentBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J° +L +201E +C +Created8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment + +401 + + Unauthorized +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachmentRhttpsj +x-kubernetes-actionput +j^ +x-kubernetes-group-version-kind;9group: storage.k8s.io +kind: VolumeAttachment +version: v1 +* + +storage_v1delete a VolumeAttachment*deleteStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J± +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment +M +202F +D +Accepted8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +j^ +x-kubernetes-group-version-kind;9version: v1 +group: storage.k8s.io +kind: VolumeAttachment +B‡ + +storage_v1/partially update the specified VolumeAttachment*patchStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jb +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j^ +x-kubernetes-group-version-kind;9group: storage.k8s.io +kind: VolumeAttachment +version: v1 +J= +;9"7pathname of the VolumeAttachment"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string §' +/api/v1/replicationcontrollers„'ø +core_v13list or watch objects of kind ReplicationController*/listCoreV1ReplicationControllerForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jh +M +200F +D +OK> +< +:#/definitions/io.k8s.api.core.v1.ReplicationControllerList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jW +x-kubernetes-group-version-kind42group: "" +kind: ReplicationController +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ö\ +%/apis/storage.k8s.io/v1beta1/csinodesÌ\‘& +storage_v1beta1%list or watch objects of kind CSINode*listStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jb + +401 + + Unauthorized +G +200@ +> +OK8 +6 +4#/definitions/io.k8s.api.storage.v1beta1.CSINodeListRhttpsj +x-kubernetes-actionlist +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: CSINode +version: v1beta1 +"‰ +storage_v1beta1create a CSINode*createStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF +D +Bbodybody *2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINodeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jó + +401 + + Unauthorized +C +200< +: +OK4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINode +H +201A +? +Created4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINode +I +202B +@ +Accepted4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINodeRhttpsj +x-kubernetes-actionpost +jZ +x-kubernetes-group-version-kind75version: v1beta1 +group: storage.k8s.io +kind: CSINode +*Ø, +storage_v1beta1delete collection of CSINode*%deleteStorageV1beta1CollectionCSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj* +x-kubernetes-actiondeletecollection +jZ +x-kubernetes-group-version-kind75group: storage.k8s.io +kind: CSINode +version: v1beta1 +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ê +/apis/storage.k8s.io/v1/ÍÊ + +storage_v1get available resources*getStorageV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsÔ) +I/apis/apiextensions.k8s.io/v1beta1/watch/customresourcedefinitions/{name}†)³ +apiextensions_v1beta1¿watch changes to an object of kind CustomResourceDefinition. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*1watchApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj +x-kubernetes-actionwatch +jq +x-kubernetes-group-version-kindNLversion: v1beta1 +group: apiextensions.k8s.io +kind: CustomResourceDefinition +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JE +CA"?path$name of the CustomResourceDefinition"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean †_ +0/apis/apiregistration.k8s.io/v1beta1/apiservicesÑ^Ó& +apiregistration_v1beta1(list or watch objects of kind APIService*$listApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J‚ +g +200` +^ +OKX +V +T#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +je +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +"« + +apiregistration_v1beta1create an APIService*&createApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bf +d +bbodybody *R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÓ + +401 + + Unauthorized +c +200\ +Z +OKT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService +h +201a +_ +CreatedT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService +i +202b +` +AcceptedT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceRhttpsje +x-kubernetes-group-version-kindB@version: v1beta1 +kind: APIService +group: apiregistration.k8s.io +j +x-kubernetes-actionpost +*ù, +apiregistration_v1beta1delete collection of APIService*0deleteApiregistrationV1beta1CollectionAPIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string BÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Bž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* +x-kubernetes-actiondeletecollection +je +x-kubernetes-group-version-kindB@group: apiregistration.k8s.io +version: v1beta1 +kind: APIService +JO +MKIquery-If 'true', then the output is pretty printed."pretty2string à' +/apis/apps/v1/watch/daemonsets½'± +apps_v1uwatch individual changes to a list of DaemonSet. deprecated: use the 'watch' parameter with a list operation instead.*(watchAppsV1DaemonSetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jM +x-kubernetes-group-version-kind*(group: apps +kind: DaemonSet +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean š +N/apis/authorization.k8s.io/v1/namespaces/{namespace}/localsubjectaccessreviewsÇ "× +authorization_v1!create a LocalSubjectAccessReview*7createAuthorizationV1NamespacedLocalSubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX +V +Tbodybody *D +B#/definitions/io.k8s.api.authorization.v1.LocalSubjectAccessReviewJ© +[ +202T +R +AcceptedF +D +B#/definitions/io.k8s.api.authorization.v1.LocalSubjectAccessReview + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.api.authorization.v1.LocalSubjectAccessReview +Z +201S +Q +CreatedF +D +B#/definitions/io.k8s.api.authorization.v1.LocalSubjectAccessReviewRhttpsj +x-kubernetes-actionpost +jl +x-kubernetes-group-version-kindIGgroup: authorization.k8s.io +kind: LocalSubjectAccessReview +version: v1 +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ò +/apis/apiregistration.k8s.io/ÐÍ +apiregistrationget information of a group*getApiregistrationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsì +/apis/batch/v1beta1/ÓÐ + batch_v1beta1get available resources*getBatchV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsø +9/apis/apiregistration.k8s.io/v1/apiservices/{name}/statusºµ +apiregistration_v1'read status of the specified APIService*%readApiregistrationV1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jy +^ +200W +U +OKO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService + +401 + + UnauthorizedRhttpsj` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +j +x-kubernetes-actionget +¾ +apiregistration_v1*replace status of the specified APIService*(replaceApiregistrationV1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ba +_ +]bodybody *M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÞ + +401 + + Unauthorized +^ +200W +U +OKO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService +c +201\ +Z +CreatedO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceRhttpsj +x-kubernetes-actionput +j` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +B´ +apiregistration_v13partially update status of the specified APIService*&patchApiregistrationV1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jy +^ +200W +U +OKO +M +K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +j` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +J7 +53"1pathname of the APIService"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ì +/apis/authorization.k8s.io/ÌÉ + authorizationget information of a group*getAuthorizationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi +N +200G +E +OK? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup + +401 + + UnauthorizedRhttpsá( +0/apis/node.k8s.io/v1/watch/runtimeclasses/{name}¬(å +node_v1³watch changes to an object of kind RuntimeClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jW +x-kubernetes-group-version-kind42kind: RuntimeClass +version: v1 +group: node.k8s.io +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 +75"3pathname of the RuntimeClass"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÿ' +./apis/node.k8s.io/v1beta1/watch/runtimeclassesÌ'À + node_v1beta1xwatch individual changes to a list of RuntimeClass. deprecated: use the 'watch' parameter with a list operation instead.* watchNodeV1beta1RuntimeClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +j\ +x-kubernetes-group-version-kind97group: node.k8s.io +kind: RuntimeClass +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ’' +/api/v1/nodes/{name}ù&à +core_v1read the specified Node*readCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.core.v1.Node + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +€ +core_v1replace the specified Node*replaceCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; +9 +7bodybody *' +%#/definitions/io.k8s.api.core.v1.NodeBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.core.v1.Node += +2016 +4 +Created) +' +%#/definitions/io.k8s.api.core.v1.Node + +401 + + UnauthorizedRhttpsjF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +j +x-kubernetes-actionput +*ä +core_v1 delete a Node*deleteCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjF +x-kubernetes-group-version-kind#!group: "" +kind: Node +version: v1 +j +x-kubernetes-action delete +B +core_v1#partially update the specified Node*patchCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS +8 +2001 +/ +OK) +' +%#/definitions/io.k8s.api.core.v1.Node + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jF +x-kubernetes-group-version-kind#!kind: Node +version: v1 +group: "" +J1 +/-"+pathname of the Node"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ú +/apis/apps/v1/ÇÄ +apps_v1get available resources*getAppsV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsï + +2/apis/authorization.k8s.io/v1/subjectaccessreviews¸ +"ª +authorization_v1create a SubjectAccessReview*(createAuthorizationV1SubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BS +Q +Obodybody *? +=#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewJš +P +200I +G +OKA +? +=#/definitions/io.k8s.api.authorization.v1.SubjectAccessReview +U +201N +L +CreatedA +? +=#/definitions/io.k8s.api.authorization.v1.SubjectAccessReview +V +202O +M +AcceptedA +? +=#/definitions/io.k8s.api.authorization.v1.SubjectAccessReview + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpost +jg +x-kubernetes-group-version-kindDBversion: v1 +group: authorization.k8s.io +kind: SubjectAccessReview +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO +MKIquery-If 'true', then the output is pretty printed."pretty2string — +7/apis/authorization.k8s.io/v1beta1/subjectaccessreviewsÛ +"Í +authorization_v1beta1create a SubjectAccessReview*-createAuthorizationV1beta1SubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX +V +Tbodybody *D +B#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewJ© +[ +202T +R +AcceptedF +D +B#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReview + +401 + + Unauthorized +U +200N +L +OKF +D +B#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReview +Z +201S +Q +CreatedF +D +B#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewRhttpsjl +x-kubernetes-group-version-kindIGkind: SubjectAccessReview +version: v1beta1 +group: authorization.k8s.io +j +x-kubernetes-actionpost +Jž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO +MKIquery-If 'true', then the output is pretty printed."pretty2string æ) +:/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}§)ó + batch_v1beta1read the specified CronJob*!readBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJob + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 + + batch_v1beta1replace the specified CronJob*$replaceBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD +B +@bodybody *0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJobBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ +F +201? += +Created2 +0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJob + +401 + + Unauthorized +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJobRhttpsj +x-kubernetes-actionput +jQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +*‹ + batch_v1beta1delete a CronJob*#deleteBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» + +401 + + Unauthorized +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +j +x-kubernetes-action delete +Bò + batch_v1beta1&partially update the specified CronJob*"patchBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ +A +200: +8 +OK2 +0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJob + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jQ +x-kubernetes-group-version-kind.,group: batch +kind: CronJob +version: v1beta1 +J4 +20".pathname of the CronJob"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ¸( + +<bodybody *, +*#/definitions/io.k8s.api.core.v1.EndpointsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Endpoints +B +201; +9 +Created. +, +*#/definitions/io.k8s.api.core.v1.Endpoints + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Endpoints +version: v1 +j +x-kubernetes-actionput +*û +core_v1delete Endpoints*deleteCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Endpoints +version: v1 +j +x-kubernetes-action delete +Bà +core_v1(partially update the specified Endpoints*patchCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX += +2006 +4 +OK. +, +*#/definitions/io.k8s.api.core.v1.Endpoints + +401 + + UnauthorizedRhttpsjK +x-kubernetes-group-version-kind(&group: "" +kind: Endpoints +version: v1 +j +x-kubernetes-actionpatch +J6 +42"0pathname of the Endpoints"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Ì' +/apis/batch/v1/watch/jobs®'¢ +batch_v1owatch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.*#watchBatchV1JobListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jH +x-kubernetes-group-version-kind%#group: batch +kind: Job +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ö) +@/apis/events.k8s.io/v1beta1/namespaces/{namespace}/events/{name}±)ö +events_v1beta1read the specified Event* readEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.events.v1beta1.Event + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionget +jW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +à +events_v1beta1replace the specified Event*#replaceEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.events.v1beta1.EventBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.events.v1beta1.Event +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.events.v1beta1.Event + +401 + + UnauthorizedRhttpsjW +x-kubernetes-group-version-kind42kind: Event +version: v1beta1 +group: events.k8s.io +j +x-kubernetes-actionput +* +events_v1beta1delete an Event*"deleteEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +Bõ +events_v1beta1$partially update the specified Event*!patchEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.events.v1beta1.Event + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +J2 +0.",pathname of the Event"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ‚+ +B/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}»*“ + policy_v1&read the specified PodDisruptionBudget*)readPolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jd +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget + +401 + + UnauthorizedRhttpsjY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +j +x-kubernetes-actionget +ò + policy_v1)replace the specified PodDisruptionBudget*,replacePolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL +J +Hbodybody *8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J´ +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget +N +201G +E +Created: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +*£ + policy_v1delete a PodDisruptionBudget*+deletePolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT +R +Pbodybody*B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä +áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ +ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ +…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» +L +200E +C +OK= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status +R +202K +I +Accepted= +; +9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status + +401 + + UnauthorizedRhttpsj +x-kubernetes-action delete +jY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +B’ + policy_v12partially update the specified PodDisruptionBudget**patchPolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jd +I +200B +@ +OK: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget + +401 + + UnauthorizedRhttpsjY +x-kubernetes-group-version-kind64group: policy +kind: PodDisruptionBudget +version: v1 +j +x-kubernetes-actionpatch +J@ +><":pathname of the PodDisruptionBudget"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ×` +E/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations`’' +admissionregistration_v1/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/scale­û +apps_v1'read scale of the specified StatefulSet*$readAppsV1NamespacedStatefulSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +j +x-kubernetes-actionget +È +apps_v1*replace scale of the specified StatefulSet*'replaceAppsV1NamespacedStatefulSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.autoscaling.v1.ScaleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +j +x-kubernetes-actionput +Bú +apps_v13partially update scale of the specified StatefulSet*%patchAppsV1NamespacedStatefulSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jP +x-kubernetes-group-version-kind-+version: v1 +group: autoscaling +kind: Scale +J2 +0.",pathname of the Scale"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string ' +"/apis/events.k8s.io/v1beta1/eventsé&Ý +events_v1beta1#list or watch objects of kind Event*&listEventsV1beta1EventForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J_ +D +200= +; +OK5 +3 +1#/definitions/io.k8s.api.events.v1beta1.EventList + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionlist +jW +x-kubernetes-group-version-kind42group: events.k8s.io +kind: Event +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚ + /apis/networking.k8s.io/v1beta1/ÝÚ +networking_v1beta1get available resources* getNetworkingV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsê' +(/apis/storage.k8s.io/v1/watch/csidrivers½'± + +storage_v1uwatch individual changes to a list of CSIDriver. deprecated: use the 'watch' parameter with a list operation instead.*watchStorageV1CSIDriverList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jW +x-kubernetes-group-version-kind42group: storage.k8s.io +kind: CSIDriver +version: v1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚) +8/apis/apiregistration.k8s.io/v1/watch/apiservices/{name}Å(€ +apiregistration_v1±watch changes to an object of kind APIService. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.* watchApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +j` +x-kubernetes-group-version-kind=;group: apiregistration.k8s.io +version: v1 +kind: APIService +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 +53"1pathname of the APIService"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean þ +/apis/autoscaling/v2beta1/ßÜ +autoscaling_v2beta1get available resources*!getAutoscalingV2beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsˆ +"/apis/certificates.k8s.io/v1beta1/áÞ +certificates_v1beta1get available resources*"getCertificatesV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp +U +200N +L +OKF +D +B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList + +401 + + UnauthorizedRhttpsü) +S/apis/flowcontrol.apiserver.k8s.io/v1beta1/watch/prioritylevelconfigurations/{name}¤)Ï +flowcontrolApiserver_v1beta1Áwatch changes to an object of kind PriorityLevelConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*:watchFlowcontrolApiserverV1beta1PriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk + +401 + + Unauthorized +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj{ +x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io +kind: PriorityLevelConfiguration +version: v1beta1 +j +x-kubernetes-actionwatch +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JG +EC"Apath&name of the PriorityLevelConfiguration"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean È) +H/apis/rbac.authorization.k8s.io/v1beta1/watch/clusterrolebindings/{name}û(® +rbacAuthorization_v1beta1¹watch changes to an object of kind ClusterRoleBinding. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*/watchRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jp +x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io +kind: ClusterRoleBinding +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J? +=;"9pathname of the ClusterRoleBinding"name*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean È) +8/api/v1/watch/namespaces/{namespace}/podtemplates/{name}‹)ã +core_v1²watch changes to an object of kind PodTemplate. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.* watchCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionwatch +jM +x-kubernetes-group-version-kind*(version: v1 +group: "" +kind: PodTemplate +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 +64"2pathname of the PodTemplate"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean é +=/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/scale§ù +apps_v1&read scale of the specified ReplicaSet*#readAppsV1NamespacedReplicaSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsjP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +j +x-kubernetes-actionget +Æ +apps_v1)replace scale of the specified ReplicaSet*&replaceAppsV1NamespacedReplicaSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC +A +?bodybody */ +-#/definitions/io.k8s.api.autoscaling.v1.ScaleBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– +“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale +E +201> +< +Created1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionput +jP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +Bø +apps_v12partially update scale of the specified ReplicaSet*$patchAppsV1NamespacedReplicaSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN +L +Jbodybody *: +8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž +›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ +¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ +ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ +@ +2009 +7 +OK1 +/ +-#/definitions/io.k8s.api.autoscaling.v1.Scale + +401 + + UnauthorizedRhttpsj +x-kubernetes-actionpatch +jP +x-kubernetes-group-version-kind-+group: autoscaling +kind: Scale +version: v1 +J2 +0.",pathname of the Scale"name*string˜J` +^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO +MKIquery-If 'true', then the output is pretty printed."pretty2string š( +4/apis/storage.k8s.io/v1beta1/watch/volumeattachmentsá'Õ +storage_v1beta1|watch individual changes to a list of VolumeAttachment. deprecated: use the 'watch' parameter with a list operation instead.*'watchStorageV1beta1VolumeAttachmentList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk +P +200I +G +OKA +? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent + +401 + + UnauthorizedRhttpsj# +x-kubernetes-action  +watchlist +jc +x-kubernetes-group-version-kind@>group: storage.k8s.io +kind: VolumeAttachment +version: v1beta1 +J‚ +ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï +ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". + +This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ +„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ +„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú + +÷ +ô +ñ +queryÔ +limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. + +The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO +MKIquery-If 'true', then the output is pretty printed."pretty2string Jû +øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersion2string JÚ +×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. + +Defaults to unset"resourceVersionMatch2string Jž +›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± +®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J½÷0 +§ +"io.k8s.api.core.v1.NFSVolumeSource€"€Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.šserveršpath² +objectÊÞ +€ +pathx"kPath that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs² +string +à +readOnly¶"§ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs² +boolean +’ +server‡"zServer is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs² +string +È +$io.k8s.api.discovery.v1beta1.ForZoneŸ"LForZone provides information about which zones should consume this endpoint.šname² +objectÊ< +: +name2"%name represents the name of the zone.² +string +¦ +io.k8s.api.events.v1beta1.Event‚"¾Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.š eventTime² +objectÊÆ +| +reasonr"ereason is why the action was taken. It is human-readable. This field can have at most 128 characters.² +string +Ñ +relatedÅ +0#/definitions/io.k8s.api.core.v1.ObjectReference"related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object. +¹ +reportingController¡"“reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.² +string +¦ +action›"action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field can have at most 128 characters.² +string +ˆ +deprecatedCountuint32"`deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.² +integer +¿ +deprecatedLastTimestamp£ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"hdeprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Á +deprecatedFirstTimestamp¤ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"ideprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. +— + eventTime‰ +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"IeventTime is the time when this Event was first observed. It is required. +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¦ +deprecatedSource‘ +,#/definitions/io.k8s.api.core.v1.EventSource"adeprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. +¶ + regarding¨ +0#/definitions/io.k8s.api.core.v1.ObjectReference"óregarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object. +Ÿ +series” +3#/definitions/io.k8s.api.events.v1beta1.EventSeries"]series is data about the Event series this event represents or nil if it's a singleton Event. +½ +note´"¦note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.² +string + +reportingInstance¬"žreportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.² +string +† +type~"qtype is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable.² +stringú] +x-kubernetes-group-version-kind:8- group: events.k8s.io + kind: Event + version: v1beta1 + +î +?io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatusª² +objectÊ› +˜ + certificateˆbyte"OIf request was approved, the controller will place the issued certificate here.² +stringú# +x-kubernetes-list-type atomic + +ý + +conditionsî">Conditions applied to the request, such as approval or denial.² +arrayºT +R +P#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestConditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap + +® + +"io.k8s.api.core.v1.ComponentStatus‡ +"yComponentStatus (and ComponentStatusList) holds the cluster validation info. Deprecated: This API is deprecated in v1.19+² +objectÊ£ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Î + +conditions¿"%List of component conditions observed² +arrayº7 +5 +3#/definitions/io.k8s.api.core.v1.ComponentConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúW +x-kubernetes-group-version-kind42- group: "" + kind: ComponentStatus + version: v1 + +˜ +*io.k8s.api.core.v1.NodeSelectorRequirementé"wA node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.škeyšoperator² +objectÊÐ +û +valuesð"ÓAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.² +arrayº + ² +string +? +key8"+The label key that the selector applies to.² +string +Ž +operator"tRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.² +string +’5 +'io.k8s.api.core.v1.PersistentVolumeSpecæ4"APersistentVolumeSpec is the specification of a persistent volume.² +objectÊ”4 +¥ +portworxVolume’ +5#/definitions/io.k8s.api.core.v1.PortworxVolumeSource"YPortworxVolume represents a portworx volume attached and mounted on kubelets host machine +¦ +scaleIOš +>#/definitions/io.k8s.api.core.v1.ScaleIOPersistentVolumeSource"XScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + + storageosó +@#/definitions/io.k8s.api.core.v1.StorageOSPersistentVolumeSource"®StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md +î + mountOptionsÝ"ÀA list of mount options, e.g. ["ro", "soft"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options² +arrayº + ² +string +Á +nfs¹ +0#/definitions/io.k8s.api.core.v1.NFSVolumeSource"„NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs +– +cephfs‹ +=#/definitions/io.k8s.api.core.v1.CephFSPersistentVolumeSource"JCephFS represents a Ceph FS mount on the host that shares a pod's lifetime +½ +gcePersistentDisk§ +>#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"äGCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk +ä + nodeAffinityÓ +3#/definitions/io.k8s.api.core.v1.VolumeNodeAffinity"›NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume. +± +awsElasticBlockStore˜ +A#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"ÒAWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore +Ö +claimRefÉ +0#/definitions/io.k8s.api.core.v1.ObjectReference"”ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding +ª +fc£ +/#/definitions/io.k8s.api.core.v1.FCVolumeSource"pFC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. +í +flockerá +4#/definitions/io.k8s.api.core.v1.FlockerVolumeSource"¨Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running +… + glusterfs÷ +@#/definitions/io.k8s.api.core.v1.GlusterfsPersistentVolumeSource"²Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md +| +locals +2#/definitions/io.k8s.api.core.v1.LocalVolumeSource"=Local represents directly-attached storage with node affinity +É + +volumeModeº"¬volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.² +string +ð +capacityã"“A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacityª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +Ú +cinderÏ +=#/definitions/io.k8s.api.core.v1.CinderPersistentVolumeSource"Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + +quobyteƒ +4#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource"KQuobyte represents a Quobyte mount on the host that shares a pod's lifetime +œ + azureDiskŽ +6#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource"TAzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +Õ +iscsiË +<#/definitions/io.k8s.api.core.v1.ISCSIPersistentVolumeSource"ŠISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. +¨ +storageClassName“"…Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.² +string +¼ + accessModes¬"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes² +arrayº + ² +string +õ +hostPathè +5#/definitions/io.k8s.api.core.v1.HostPathVolumeSource"®HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath +Ó +rbdË +:#/definitions/io.k8s.api.core.v1.RBDPersistentVolumeSource"ŒRBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md +¶ + +flexVolume§ +;#/definitions/io.k8s.api.core.v1.FlexPersistentVolumeSource"hFlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. +¼ +persistentVolumeReclaimPolicyš"ŒWhat happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming² +string +Î +photonPersistentDiskµ +A#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"pPhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine +¬ + vsphereVolumeš +?#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"WVsphereVolume represents a vSphere volume attached and mounted on kubelets host machine +© + azureFile› +@#/definitions/io.k8s.api.core.v1.AzureFilePersistentVolumeSource"WAzureFile represents an Azure File Service mount on the host and bind mount to the pod. +– +csiŽ +:#/definitions/io.k8s.api.core.v1.CSIPersistentVolumeSource"PCSI represents storage that is handled by an external CSI driver (Beta feature). +¶ +io.k8s.api.core.v1.Pod› "wPod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.² +objectÊÅ + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ö +specÍ +(#/definitions/io.k8s.api.core.v1.PodSpec" Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +— +statusŒ +*#/definitions/io.k8s.api.core.v1.PodStatus"ÝMost recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúK +x-kubernetes-group-version-kind(&- kind: Pod + version: v1 + group: "" + +— +io.k8s.api.core.v1.PodConditionó"DPodCondition contains details for the current condition of this pod.štypešstatus² +objectÊŽ +X +messageM"@Human-readable message indicating details about last transition.² +string +^ +reasonT"GUnique, one-word, CamelCase reason for the condition's last transition.² +string +· +status¬"žStatus is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions² +string +“ +typeŠ"}Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions² +string +n + lastProbeTime] +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time""Last time we probed the condition. +‘ +lastTransitionTime{ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. +é +/io.k8s.api.authentication.v1.TokenRequestStatusµ"4TokenRequestStatus is the result of a token request.štokenšexpirationTimestamp² +objectÊÒ +– +expirationTimestamp +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"DExpirationTimestamp is the time of expiration of the returned token. +7 +token."!Token is the opaque bearer token.² +string +© +0io.k8s.api.autoscaling.v2beta2.MetricValueStatusô"6MetricValueStatus holds the current value for a metric² +objectÊ­ +ì +averageUtilizationÕint32"¿currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.² +integer +· + averageValue¦ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"gaverageValue is the current value of the average of the metric across all relevant pods (as a quantity) + +valuex +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"9value is the current value of the metric (as a quantity). +¼ +!io.k8s.api.core.v1.SELinuxOptions–"shortNames is a list of suggested short names of the resource.² +arrayº + ² +string +Ì +storageVersionHashµ"§The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.² +string +« +verbs¡"„verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)² +arrayº + ² +string += +name5"(name is the plural name of the resource.² +string +T + +namespacedF"8namespaced indicates if a resource is namespaced or not.² +boolean +d +kind\"Okind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')² +string +ž + singularName"ÿsingularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.² +string +‰ +versioný"ïversion is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".² +string +} + +categorieso"Scategories is a list of the grouped resources this resource belongs to (e.g. 'all')² +arrayº + ² +string +Ç +group½"¯group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale".² +string +Ÿ +=io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirementÝ"xA label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.škeyšoperator² +objectÊà +š +key’"2key is the label key that the selector applies to.² +stringú' +x-kubernetes-patch-strategymerge +ú& +x-kubernetes-patch-merge-keykey + +Ž +operator"toperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.² +string +’ +values‡"êvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.² +arrayº + ² +string +µ +6io.k8s.api.authorization.v1beta1.NonResourceAttributesú"{NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface² +objectÊo +8 +path0"#Path is the URL path of the request² +string +3 +verb+"Verb is the standard HTTP verb² +string +° +Xio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatusÓ"ÅCustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. Status is represented by the `.status` JSON path inside of a CustomResource. When set, * exposes a /status subresource for the custom resource * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza² +object +Ð + +.io.k8s.api.autoscaling.v2beta2.HPAScalingRules +"”HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.² +objectÊ÷ +Ì +stabilizationWindowSeconds­int32"—StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).² +integer +‘ +policies„"³policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid² +arrayºA +? +=#/definitions/io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy +‘ + selectPolicy€"sselectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.² +string +Ò +4io.k8s.api.core.v1.ScopedResourceSelectorRequirement™"A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.š scopeNamešoperator² +objectÊà +ƒ +operatorw"jRepresents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.² +string +M + scopeName@"3The name of the scope that the selector applies to.² +string +ˆ +valuesý"àAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.² +arrayº + ² +string +À +Wio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScaleä "^CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.šspecReplicasPathšstatusReplicasPath² +objectÊÍ +á +labelSelectorPathË"½labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.² +string +ã +specReplicasPathÎ"ÀspecReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.² +string +€ +statusReplicasPathé"ÛstatusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.² +string +ì +*io.k8s.api.authentication.v1beta1.UserInfo½"ZUserInfo holds the information about the user needed to implement the user.Info interface.² +objectÊÒ +n +extrae"9Any additional information provided by the authenticator.ª +² +arrayº + ² +string² +object +Q +groupsG"+The names of groups this user is a part of.² +arrayº + ² +string +® +uid¦"˜A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.² +string +\ +usernameP"CThe name that uniquely identifies this user among all active users.² +string +ü +5io.k8s.api.autoscaling.v1.CrossVersionObjectReferenceÂ"bCrossVersionObjectReference contains enough information to let you identify the referred resource.škindšname² +objectÊÁ +˜ +kind"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"² +string +l +named"WName of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names² +string +6 + +apiVersion("API version of the referent² +string +Ü +"io.k8s.api.apps.v1.StatefulSetListµ"0StatefulSetList is a collection of StatefulSets.šitems² +objectÊ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +F +items=² +arrayº0 +. +,#/definitions/io.k8s.api.apps.v1.StatefulSet +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +I +metadata= +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMetaúY +x-kubernetes-group-version-kind64- group: apps + kind: StatefulSetList + version: v1 + +ã +io.k8s.api.storage.v1.CSIDriver¿ "ÎCSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.šspec² +objectÊø +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +™ +metadataŒ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ÊStandard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +] +specU +1#/definitions/io.k8s.api.storage.v1.CSIDriverSpec" Specification of the CSI Driver.ú] +x-kubernetes-group-version-kind:8- group: storage.k8s.io + kind: CSIDriver + version: v1 + +½ +-io.k8s.api.extensions.v1beta1.HTTPIngressPath‹ "oHTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.šbackend² +objectÊ +¤ +backend˜ +:#/definitions/io.k8s.api.extensions.v1beta1.IngressBackend"ZBackend defines the referenced service endpoint to which the traffic will be forwarded to. +ž +path•"‡Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.² +string +¶ +pathType©"›PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is + done on a path element by element basis. A path element refers is the + list of labels in the path split by the '/' separator. A request is a + match for path p if every p is an element-wise prefix of p of the + request path. Note that if the last element of the path is a substring + of the last element in request path, it is not a match (e.g. /foo/bar + matches /foo/bar/baz, but does not match /foo/barbaz). +* ImplementationSpecific: Interpretation of the Path matching is up to + the IngressClass. Implementations can treat this as a separate PathType + or treat it identically to Prefix or Exact path types. +Implementations are required to support all path types. Defaults to ImplementationSpecific.² +string +¤ + +=io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationListâ "OPriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.šitems² +objectÊñ +â +metadataÕ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"•`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Œ +items‚"(`items` is a list of request-priorities.² +arrayºK +I +G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringú… +x-kubernetes-group-version-kindb`- group: flowcontrol.apiserver.k8s.io + kind: PriorityLevelConfigurationList + version: v1beta1 + + +(io.k8s.api.core.v1.ContainerStateWaitingã"8ContainerStateWaiting is a waiting state of a container.² +objectÊš +O +messageD"7Message regarding why the container is not yet running.² +string +G +reason="0(brief) reason the container is not yet running.² +string +» +#io.k8s.api.core.v1.PersistentVolume“ "±PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes² +objectÊõ + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Š +spec +5#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec"ÇSpec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes +“ +statusˆ +7#/definitions/io.k8s.api.core.v1.PersistentVolumeStatus"ÌStatus represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumesúX +x-kubernetes-group-version-kind53- group: "" + kind: PersistentVolume + version: v1 + +Á +>io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehaviorþ"™HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).² +objectÊÓ +° + scaleDown¢ +<#/definitions/io.k8s.api.autoscaling.v2beta2.HPAScalingRules"áscaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used). + +scaleUp‘ +<#/definitions/io.k8s.api.autoscaling.v2beta2.HPAScalingRules"ÐscaleUp is scaling policy for scaling Up. If not set, the default value is the higher of: + * increase no more than 4 pods per 60 seconds + * double the number of pods per 60 seconds +No stabilization is used. +þ +$io.k8s.api.batch.v1beta1.CronJobListÕ")CronJobList is a collection of cron jobs.šitems² +objectʵ +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +h +items_"items is the list of CronJobs.² +arrayº2 +0 +.#/definitions/io.k8s.api.batch.v1beta1.CronJob +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringú[ +x-kubernetes-group-version-kind86- group: batch + kind: CronJobList + version: v1beta1 + +Õ +!io.k8s.api.core.v1.LimitRangeItem¯ "SLimitRangeItem defines a min/max usage limit for any resource that matches on kind.štype² +objectÊÄ +² +default¦"WDefault resource requirement limit value by resource name if resource limit is omitted.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +Ó +defaultRequestÀ"qDefaultRequest is the default resource requirement request value by resource name if resource request is omitted.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +‹ +maxƒ"4Max usage constraints on this kind by resource name.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +× +maxLimitRequestRatio¾"îMaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +‹ +minƒ"4Min usage constraints on this kind by resource name.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +A +type9",Type of resource that this limit applies to.² +string +Ä +"io.k8s.api.storage.v1.StorageClass"ãStorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned. + +StorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.š provisioner² +objectÊ· +n +allowVolumeExpansionV"HAllowVolumeExpansion shows whether the storage class allow volume expand² +boolean +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +ì + mountOptionsÛ"¾Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. ["ro", "soft"]. Not validated - mount of the PVs will simply fail if one is invalid.² +arrayº + ² +string +ø +volumeBindingModeâ"ÔVolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.² +string +Š +allowedTopologiesô"«Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.² +arrayº9 +7 +5#/definitions/io.k8s.api.core.v1.TopologySelectorTerm +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +‘ + +parameters‚"eParameters holds the parameters for the provisioner that should create volumes of this storage class.ª + ² +string² +object +N + provisioner?"2Provisioner indicates the type of the provisioner.² +string +— + reclaimPolicy…"xDynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.² +stringú` +x-kubernetes-group-version-kind=;- group: storage.k8s.io + kind: StorageClass + version: v1 + +  + +=io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceÞ "[APIService represents a server for a particular GroupVersion. Name must be "version.group".² +objectʉ +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +¢ +spec™ +O#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec"FSpec contains information for locating and communicating with a server +— +statusŒ +Q#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus"7Status contains derived information about an API server +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúf +x-kubernetes-group-version-kindCA- kind: APIService + version: v1 + group: apiregistration.k8s.io + +Ó +"io.k8s.api.apps.v1.StatefulSetSpec¬"8A StatefulSetSpec is the specification of a StatefulSet.šselectorštemplateš serviceName² +objectÊ¿ + +replicas‚int32"ìreplicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.² +integer +­ +revisionHistoryLimit”int32"þrevisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.² +integer +¨ +selector› +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"Öselector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +û + serviceNameë"ÝserviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller.² +string +¬ +templateŸ +0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"êtemplate is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. +å +updateStrategyÒ +:#/definitions/io.k8s.api.apps.v1.StatefulSetUpdateStrategy"“updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template. +þ +volumeClaimTemplateså"›volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.² +arrayº: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim +¹ +podManagementPolicy¡"“podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.² +string +¶ +,io.k8s.api.authentication.v1.TokenReviewSpec…"ETokenReviewSpec is a description of the token authentication request.² +objectʯ +ó + audienceså"ÈAudiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.² +arrayº + ² +string +7 +token."!Token is the opaque bearer token.² +string +† +io.k8s.api.core.v1.ServicePortã"3ServicePort contains information on service's port.šport² +objectʘ +â + +targetPortÓ +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"‘Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service +½ + appProtocol­"ŸThe application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.² +string +² +name©"›The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.² +string +‚ +nodePortõint32"ßThe port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport² +integer +K +portCint32".The port that will be exposed by this service.² +integer +j +protocol^"QThe IP protocol for this port. Supports "TCP", "UDP", and "SCTP". Default is TCP.² +string +ç +*io.k8s.api.flowcontrol.v1beta1.UserSubject¸"=UserSubject holds detailed information for user-kind subject.šname² +objectÊd +b +nameZ"M`name` is the username that matches, or "*" to match all usernames. Required.² +string +Ÿ +$io.k8s.api.node.v1beta1.RuntimeClassö"ØRuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.mdšhandler² +objectÊ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Ð +handlerÄ"¶Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called "runc" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +µ +metadata¨ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +ô +overheadç +.#/definitions/io.k8s.api.node.v1beta1.Overhead"´Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. +› + +schedulingŒ +0#/definitions/io.k8s.api.node.v1beta1.Scheduling"×Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.úb +x-kubernetes-group-version-kind?=- group: node.k8s.io + kind: RuntimeClass + version: v1beta1 + +Ø +io.k8s.api.rbac.v1.Role¼"hRole is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.² +objectÊÝ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +h +metadata\ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. +t +rulesk"-Rules holds all the PolicyRules for this Role² +arrayº/ +- ++#/definitions/io.k8s.api.rbac.v1.PolicyRuleúc +x-kubernetes-group-version-kind@>- version: v1 + group: rbac.authorization.k8s.io + kind: Role + +µ +]io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceStatusÓ"ÅCustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. Status is represented by the `.status` JSON path inside of a CustomResource. When set, * exposes a /status subresource for the custom resource * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza² +object +à +?io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerConditionÿ"eHorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.štypešstatus² +objectÊù +« +lastTransitionTime” +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"YlastTransitionTime is the last time the condition transitioned from one status to another +g +message\"Omessage is a human-readable explanation containing details about the transition² +string +P +reasonF"9reason is the reason for the condition's last transition.² +string +S +statusI"int64")min is the start of the range, inclusive.² +integer +C +max<int64"'max is the end of the range, inclusive.² +integer +— +Qio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfigÁ "WWebhookClientConfig contains the information to make a TLS connection with the webhook.² +objectÊÙ +à +caBundle¶byte"¢caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.² +string +› +service +\#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference"®service is a reference to the service for this webhook. Either service or url must be specified. + +If the webhook is running within the cluster, then you should use `service`. +ò +urlê"Üurl gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. + +The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. + +Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. + +The scheme must be "https"; the URL must begin with "https://". + +A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. + +Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.² +string +ð +4io.k8s.api.certificates.v1.CertificateSigningRequest·"¬CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued. + +Kubelets use this API to obtain: + 1. client certificates to authenticate to kube-apiserver (with the "kubernetes.io/kube-apiserver-client-kubelet" signerName). + 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the "kubernetes.io/kubelet-serving" signerName). + +This API can be used to request client certificates to authenticate to kube-apiserver (with the "kubernetes.io/kube-apiserver-client" signerName), or to obtain certificates from custom non-Kubernetes signers.šspec² +objectÊý +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +« +spec¢ +F#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestSpec"×spec contains the certificate request, and is immutable after creation. Only the request, signerName, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users. +‚ +status÷ +H#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestStatus"ªstatus contains information about whether the request is approved or denied, and the certificate issued by the signer, or the failure condition indicating signer failure. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúr +x-kubernetes-group-version-kindOM- group: certificates.k8s.io + kind: CertificateSigningRequest + version: v1 + +· +"io.k8s.api.core.v1.PodTemplateSpec"QPodTemplateSpec describes the data a pod should have when created from a template² +objectÊ® +Ö +specÍ +(#/definitions/io.k8s.api.core.v1.PodSpec" Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +ì +#io.k8s.api.core.v1.NodeConfigStatusÄ"WNodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.² +objectÊÜ +Á +active¶ +1#/definitions/io.k8s.api.core.v1.NodeConfigSource"€Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error. +ù +assignedì +1#/definitions/io.k8s.api.core.v1.NodeConfigSource"¶Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned. +  +error–"ˆError describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.² +string +ö + lastKnownGoodä +1#/definitions/io.k8s.api.core.v1.NodeConfigSource"®LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future. +• +0io.k8s.apimachinery.pkg.apis.meta.v1.APIVersionsà "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.šversionsšserverAddressByClientCIDRs² +objectÊà +Y +versionsM"1versions are the api versions that are available.² +arrayº + ² +string +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +è +serverAddressByClientCIDRsÉ"éa map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.² +arrayºP +N +L#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDRúS +x-kubernetes-group-version-kind0.- group: "" + kind: APIVersions + version: v1 + +• +&io.k8s.api.storage.v1beta1.VolumeErrorê"DVolumeError captures an error encountered during a volume operation.² +objectÊ• +® +message¢"”String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.² +string +b +timeZ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"Time the error was encountered. +È +Sio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBoolq"oJSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. + +!io.k8s.api.apps.v1.DeploymentListç"(DeploymentList is a list of Deployments.šitems² +objectÊË +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +h +items_"!Items is the list of Deployments.² +arrayº/ +- ++#/definitions/io.k8s.api.apps.v1.Deployment +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +b +metadataV +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úX +x-kubernetes-group-version-kind53- group: apps + kind: DeploymentList + version: v1 + +Ö +.io.k8s.api.policy.v1.PodDisruptionBudgetStatus£"ŠPodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.šdisruptionsAllowedšcurrentHealthyšdesiredHealthyš expectedPods² +objectÊÁ +â +observedGenerationËint64"µMost recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.² +integer +ô + +conditionså"ôConditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute + the number of allowed disruptions. Therefore no disruptions are + allowed and the status of the condition will be False. +- InsufficientPods: The number of pods are either at or below the number + required by the PodDisruptionBudget. No disruptions are + allowed and the status of the condition will be False. +- SufficientPods: There are more pods than required by the PodDisruptionBudget. + The condition will be True, and the number of allowed + disruptions are provided by the disruptionsAllowed property.² +arrayº@ +> +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Conditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap +ú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +E +currentHealthy3int32"current number of healthy pods² +integer +M +desiredHealthy;int32"&minimum desired number of healthy pods² +integer + + disruptedPodsû"¯DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.ª; +9 +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time² +object +` +disruptionsAllowedJint32"5Number of pod disruptions that are currently allowed.² +integer +[ + expectedPodsKint32"6total number of pods counted by this disruption budget² +integer +á +io.k8s.api.core.v1.Capabilities½"targetValue is the target value of the metric (as a quantity). +¶ + averageValue¥ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"faverageValue is the target value of the average of the metric across all relevant pods (as a quantity) +L + +metricName>"1metricName is the name of the metric in question.² +string + +)io.k8s.api.extensions.v1beta1.IngressListá"'IngressList is a collection of Ingress.šitems² +objectʾ +Ð +metadataà +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +l +itemsc"Items is the list of Ingress.² +arrayº7 +5 +3#/definitions/io.k8s.api.extensions.v1beta1.Ingress +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringú` +x-kubernetes-group-version-kind=;- group: extensions + kind: IngressList + version: v1beta1 + +ç +%io.k8s.api.authentication.v1.UserInfo½"ZUserInfo holds the information about the user needed to implement the user.Info interface.² +objectÊÒ +n +extrae"9Any additional information provided by the authenticator.ª +² +arrayº + ² +string² +object +Q +groupsG"+The names of groups this user is a part of.² +arrayº + ² +string +® +uid¦"˜A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.² +string +\ +usernameP"CThe name that uniquely identifies this user among all active users.² +string +„ + +io.k8s.api.core.v1.NodeAffinityà ";Node affinity is a group of node affinity scheduling rules.² +objectÊ” +Þ +/preferredDuringSchedulingIgnoredDuringExecutionª"ÞThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.² +arrayº< +: +8#/definitions/io.k8s.api.core.v1.PreferredSchedulingTerm +° +.requiredDuringSchedulingIgnoredDuringExecutioný +-#/definitions/io.k8s.api.core.v1.NodeSelector"ËIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. +‡ +$io.k8s.api.networking.v1.IngressListÞ"'IngressList is a collection of Ingress.šitems² +objectʹ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +g +items^"Items is the list of Ingress.² +arrayº2 +0 +.#/definitions/io.k8s.api.networking.v1.Ingress +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ð +metadataà +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúb +x-kubernetes-group-version-kind?=- group: networking.k8s.io + kind: IngressList + version: v1 + +À +!io.k8s.api.core.v1.ContainerImageš"Describe a container imagešnames² +objectÊç +¡ +names—"{Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]² +arrayº + ² +string +A + sizeBytes4int64"The size of the image in bytes.² +integer +¼ +&io.k8s.api.core.v1.GitRepoVolumeSource‘"÷Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling. + +DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.š +repository² +objectÊû +‹ + directoryý"ïTarget directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.² +string +) + +repository"Repository URL² +string +@ +revision4"'Commit hash for the specified revision.² +string +¿ +.io.k8s.api.discovery.v1beta1.EndpointSliceListŒ"6EndpointSliceList represents a list of endpoint slicesšitems² +objectÊÎ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +k +itemsb"List of endpoint slices² +arrayº< +: +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +b +metadataV +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úl +x-kubernetes-group-version-kindIG- kind: EndpointSliceList + version: v1beta1 + group: discovery.k8s.io + + +&io.k8s.api.networking.v1.NetworkPolicy— "INetworkPolicy describes what network traffic is allowed for a set of Pods² +objectÊÖ +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + +specy +8#/definitions/io.k8s.api.networking.v1.NetworkPolicySpec"=Specification of the desired behavior for this NetworkPolicy. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúd +x-kubernetes-group-version-kindA?- kind: NetworkPolicy + version: v1 + group: networking.k8s.io + +à +!io.k8s.api.batch.v1.CronJobStatus"9CronJobStatus represents the current state of a cron job.² +objectÊÓ +¡ +active–"-A list of pointers to currently running jobs.² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.ObjectReferenceú# +x-kubernetes-list-type atomic + +– +lastScheduleTime +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"FInformation when was the last time the job was successfully scheduled. +“ +lastSuccessfulTime} +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"BInformation when was the last time the job successfully completed. +Ö +io.k8s.api.batch.v1.JobList¶" JobList is a collection of jobs.šitems² +objectʨ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +[ +itemsR"items is the list of Jobs.² +arrayº) +' +%#/definitions/io.k8s.api.batch.v1.Job +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúR +x-kubernetes-group-version-kind/-- group: batch + kind: JobList + version: v1 + +ˆ + +/io.k8s.api.authorization.v1.SubjectAccessReviewÔ "PSubjectAccessReview checks whether or not a user or group can perform an action.šspec² +objectÊü +§ +statusœ +C#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or not +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +… +spec} +A#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewSpec"8Spec holds information about the request being evaluatedúm +x-kubernetes-group-version-kindJH- group: authorization.k8s.io + kind: SubjectAccessReview + version: v1 + +‰ +1io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerÓ +"-configuration of a horizontal pod autoscaler.² +objectʪ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ð +metadataà +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ù +specÐ +C#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec"ˆbehaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. +| +statusr +E#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus")current information about the autoscaler.úh +x-kubernetes-group-version-kindEC- version: v1 + group: autoscaling + kind: HorizontalPodAutoscaler + +ƒ +0io.k8s.api.policy.v1beta1.FSGroupStrategyOptionsÎ"YFSGroupStrategyOptions defines the strategy type and options used to create the strategy.² +objectÊä +÷ +rangesì"©ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.² +arrayº3 +1 +/#/definitions/io.k8s.api.policy.v1beta1.IDRange +h +rule`"Srule is the strategy that will dictate what FSGroup is used in the SecurityContext.² +string +³ +'io.k8s.api.rbac.v1beta1.RoleBindingList‡ "¤RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.22.šitems² +objectÊÓ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +l +itemsc"Items is a list of RoleBindings² +arrayº5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +f +metadataZ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.ús +x-kubernetes-group-version-kindPN- group: rbac.authorization.k8s.io + kind: RoleBindingList + version: v1beta1 + +˜ + +Wio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition¼ "KCustomResourceColumnDefinition specifies a column for server side printing.šnameštypešjsonPath² +objectÊÇ + +priority€int32"êpriority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.² +integer +¯ +type¦"˜type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.² +string +W + descriptionH";description is a human readable description of this column.² +string +¸ +format­"Ÿformat is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.² +string +ª +jsonPath"jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.² +string +B +name:"-name is a human readable name for the column.² +string +è +=io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery¦"wGroupVersion contains the "group/version" and "version" string of a version. It is made a struct to keep extensibility.š groupVersionšversion² +objectÊ… +i + groupVersionY"LgroupVersion specifies the API group and version in the form "group/version"² +string +— +version‹"~version specifies the version in the form of "version". This is to save the clients the trouble of splitting the GroupVersion.² +string +Æ +)io.k8s.api.authentication.v1.TokenRequest˜":TokenRequest requests a token for a given service account.šspec² +objectÊÜ +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +E +spec= +;#/definitions/io.k8s.api.authentication.v1.TokenRequestSpec +I +status? +=#/definitions/io.k8s.api.authentication.v1.TokenRequestStatus +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúg +x-kubernetes-group-version-kindDB- group: authentication.k8s.io + kind: TokenRequest + version: v1 + +– +3io.k8s.api.core.v1.PhotonPersistentDiskVolumeSourceÞ"8Represents a Photon Controller persistent disk resource.špdID² +objectÊŽ +I +pdIDA"4ID that identifies Photon Controller persistent disk² +string +À +fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² +string +£ +:io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerListä"KHorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.šitems² +objectÊŒ +˜ +itemsŽ"7items is the list of horizontal pod autoscaler objects.² +arrayºH +F +D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +r +metadataf +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"'metadata is the standard list metadata. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúq +x-kubernetes-group-version-kindNL- group: autoscaling + kind: HorizontalPodAutoscalerList + version: v2beta2 + +ž +8io.k8s.api.certificates.v1.CertificateSigningRequestSpecá"?CertificateSigningRequestSpec contains the certificate request.šrequestš +signerName² +objectÊú +ò +requestæbyte"¬request contains an x509 certificate signing request encoded in a "CERTIFICATE REQUEST" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.² +stringú# +x-kubernetes-list-type atomic + + + +signerName"ósignerName indicates the requested signer, and is a qualified name. + +List/watch requests for CertificateSigningRequests can filter on this field using a "spec.signerName=NAME" fieldSelector. + +Well-known Kubernetes signers are: + 1. "kubernetes.io/kube-apiserver-client": issues client certificates that can be used to authenticate to kube-apiserver. + Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the "csrsigning" controller in kube-controller-manager. + 2. "kubernetes.io/kube-apiserver-client-kubelet": issues client certificates that kubelets use to authenticate to kube-apiserver. + Requests for this signer can be auto-approved by the "csrapproving" controller in kube-controller-manager, and can be issued by the "csrsigning" controller in kube-controller-manager. + 3. "kubernetes.io/kubelet-serving" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely. + Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the "csrsigning" controller in kube-controller-manager. + +More details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers + +Custom signerNames can also be specified. The signer defines: + 1. Trust distribution: how trust (CA bundles) are distributed. + 2. Permitted subjects: and behavior when a disallowed subject is requested. + 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested. + 4. Required, permitted, or forbidden key usages / extended key usages. + 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin. + 6. Whether or not requests for CA certificates are allowed.² +string +™ +uid‘"ƒuid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.² +string +„ +usagesù"¶usages specifies a set of key usages requested in the issued certificate. + +Requests for TLS client certificates typically request: "digital signature", "key encipherment", "client auth". + +Requests for TLS serving certificates typically request: "key encipherment", "digital signature", "server auth". + +Valid values are: + "signing", "digital signature", "content commitment", + "key encipherment", "key agreement", "data encipherment", + "cert sign", "crl sign", "encipher only", "decipher only", "any", + "server auth", "client auth", + "code signing", "email protection", "s/mime", + "ipsec end system", "ipsec tunnel", "ipsec user", + "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"² +arrayº + ² +stringú# +x-kubernetes-list-type atomic + +¤ +username—"‰username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.² +string +Å +extra»"Žextra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.ª +² +arrayº + ² +string² +object +Ý +groupsÒ"groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.² +arrayº + ² +stringú# +x-kubernetes-list-type atomic + +â +'io.k8s.api.core.v1.PersistentVolumeList¶ "9PersistentVolumeList is a list of PersistentVolume items.šitems² +objectÊ… +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +³ +items©"eList of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes² +arrayº5 +3 +1#/definitions/io.k8s.api.core.v1.PersistentVolume +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú\ +x-kubernetes-group-version-kind97- group: "" + kind: PersistentVolumeList + version: v1 + +¢ +Kio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceConditionÒ"NAPIServiceCondition describes the state of an APIService at a particular pointštypešstatus² +objectÊã +X +messageM"@Human-readable message indicating details about last transition.² +string +^ +reasonT"GUnique, one-word, CamelCase reason for the condition's last transition.² +string +Z +statusP"CStatus is the status of the condition. Can be True, False, Unknown.² +string +7 +type/""Type is the type of the condition.² +string +‘ +lastTransitionTime{ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. +° +1io.k8s.api.authorization.v1.NonResourceAttributesú"{NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface² +objectÊo +8 +path0"#Path is the URL path of the request² +string +3 +verb+"Verb is the standard HTTP verb² +string +Î +7io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec’"¦SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set² +objectÊÚ +ª +nonResourceAttributes +?#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes"MNonResourceAttributes describes information for a non-resource access request +ª +resourceAttributes“ +<#/definitions/io.k8s.api.authorization.v1.ResourceAttributes"SResourceAuthorizationAttributes describes information for a resource access request +ÿ +!io.k8s.api.storage.v1.CSINodeListÙ"/CSINodeList is a collection of CSINode objects.šitems² +objectʯ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +c +itemsZ"items is the list of CSINode² +arrayº/ +- ++#/definitions/io.k8s.api.storage.v1.CSINode +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataú_ +x-kubernetes-group-version-kind<:- group: storage.k8s.io + kind: CSINodeList + version: v1 + +Á +Vio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceConversionæ +"MCustomResourceConversion describes how to convert different versions of a CR.šstrategy² +objectÊý +œ +conversionReviewVersionsÿ"âconversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail. Defaults to `["v1beta1"]`.² +arrayº + ² +string +Ñ +strategyÄ"¶strategy specifies how custom resources are converted between versions. Allowed values are: - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set.² +string +‡ +webhookClientConfigï +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig"‹webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. Required when `strategy` is set to `Webhook`. +ð +"io.k8s.api.core.v1.PodAffinityTermÉ "ßDefines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is runningš topologyKey² +objectÊÊ + +Ž + labelSelector} +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"9A label query over a set of resources, in this case pods. +õ +namespaceSelectorß +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"šA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. +Å + +namespaces¶"™namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"² +arrayº + ² +string +ö + topologyKeyæ"ØThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.² +string +¬ +1io.k8s.api.networking.v1.NetworkPolicyIngressRuleö"´NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.² +objectÊ° +Ù +fromÐ"„List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.² +arrayº< +: +8#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer +Ñ +portsÇ"ûList of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.² +arrayº< +: +8#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort +Ý + +&io.k8s.api.core.v1.ScaleIOVolumeSource² +":ScaleIOVolumeSource represents a persistent ScaleIO volumešgatewayšsystemš secretRef² +objectÊÈ +Ð + secretRef +5#/definitions/io.k8s.api.core.v1.LocalObjectReference"ˆSecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. +’ + storageMode‚"uIndicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.² +string +¡ +fsType–"ˆFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".² +string +D +gateway9",The host address of the ScaleIO API Gateway.² +string +` + +sslEnabledR"DFlag to enable/disable SSL communication with Gateway, default false² +boolean +[ + storagePoolL"?The ScaleIO Storage Pool associated with the protection domain.² +string +O +systemE"8The name of the storage system as configured in ScaleIO.² +string + + +volumeNames"fThe name of a volume already created in the ScaleIO system that is associated with this volume source.² +string +f +protectionDomainR"EThe name of the ScaleIO Protection Domain for the configured storage.² +string +x +readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean + +"io.k8s.api.core.v1.SecurityContextè"ÜSecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.² +objectÊú +¥ + +privileged–"‡Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.² +boolean +Š + procMountü"îprocMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.² +string +½ + runAsNonRoot¬"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² +boolean +° + runAsUser¢int64"ŒThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² +integer +ô +seLinuxOptionsá +/#/definitions/io.k8s.api.core.v1.SELinuxOptions"­The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +ç +seccompProfileÔ +/#/definitions/io.k8s.api.core.v1.SeccompProfile" The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. +Õ +allowPrivilegeEscalation¸"©AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN² +boolean +Æ + capabilitiesµ +-#/definitions/io.k8s.api.core.v1.Capabilities"ƒThe capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. +Ä +windowsOptions± +>#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions"îThe Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +q +readOnlyRootFilesystemW"IWhether this container has a read-only root filesystem. Default is false.² +boolean +“ + +runAsGroup„int64"îThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² +integer +† + io.k8s.api.networking.v1.IPBlocká"ãIPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.šcidr² +objectÊå +v +cidrn"aCIDR is a string representing the IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64"² +string +ê +exceptß"ÂExcept is a slice of CIDRs that should not be included within an IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" Except values will be rejected if they are outside the CIDR range² +arrayº + ² +string +· + io.k8s.api.coordination.v1.Lease’ "Lease defines a lease concept.² +objectÊ‚ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +µ +metadata¨ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ê +specÁ +2#/definitions/io.k8s.api.coordination.v1.LeaseSpec"ŠSpecification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusú^ +x-kubernetes-group-version-kind;9- group: coordination.k8s.io + kind: Lease + version: v1 + +“ +"io.k8s.api.core.v1.NamespaceStatusì"GNamespaceStatus is information about the current status of a Namespace.² +objectÊ” +õ + +conditionsæ"LRepresents the latest available observations of a namespace's current state.² +arrayº7 +5 +3#/definitions/io.k8s.api.core.v1.NamespaceConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +™ +phase"Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/² +string +‘ +io.k8s.api.batch.v1.JobStatusï"0JobStatus represents the current state of a Job.² +objectÊ® +¶ + +conditions§"ëThe latest available observations of an object's current state. When a Job fails, one of the conditions will have type "Failed" and status true. When a Job is suspended, one of the conditions will have type "Suspended" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type "Complete" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/² +arrayº2 +0 +.#/definitions/io.k8s.api.batch.v1.JobConditionú# +x-kubernetes-list-type atomic +ú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +M +failedCint32".The number of pods which reached phase Failed.² +integer +å + startTime× +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"›Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC. +S + succeededFint32"1The number of pods which reached phase Succeeded.² +integer +C +active9int32"$The number of actively running pods.² +integer +Ü +completedIndexesÇ"¹CompletedIndexes holds the completed indexes when .spec.completionMode = "Indexed" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as "1,3-5,7".² +string + +completionTime¯ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"óRepresents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully. +à +4io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRuleŠ"·NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.šverbsšnonResourceURLs² +objectʧ +Ð +nonResourceURLs¼"ü`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: + - "/healthz" is legal + - "/hea*" is illegal + - "/hea" is legal but matches nothing + - "/hea/*" also matches nothing + - "/healthz/*" matches all per-component health checks. +"*" matches all non-resource urls. if it is present, it must be the only entry. Required.² +arrayº + ² +stringú +x-kubernetes-list-typeset + +Ñ +verbsÇ"‡`verbs` is a list of matching verbs and may not be empty. "*" matches all verbs. If it is present, it must be the only entry. Required.² +arrayº + ² +stringú +x-kubernetes-list-typeset + +û + +[io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames› +"XCustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinitionšpluralškind² +objectÊ¢ +ô + +categorieså"Ècategories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.² +arrayº + ² +string +¾ +kindµ"§kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.² +string +q +listKinde"XlistKind is the serialized kind of the list for this resource. Defaults to "`kind`List".² +string + +plural‚"ôplural is the plural name of the resource to serve. The custom resources are served under `/apis///.../`. Must match the name of the CustomResourceDefinition (in the form `.`). Must be all lowercase.² +string +á + +shortNamesÒ"µshortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get `. It must be all lowercase.² +arrayº + ² +string +€ +singulart"gsingular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.² +string +Ì +%io.k8s.api.apps.v1.DaemonSetCondition¢"IDaemonSetCondition describes the state of a DaemonSet at a certain point.štypešstatus² +objectʸ +‘ +lastTransitionTime{ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. +Y +messageN"AA human readable message indicating details about the transition.² +string +F +reason<"/The reason for the condition's last transition.² +string +L +statusB"5Status of the condition, one of True, False, Unknown.² +string +1 +type)"Type of DaemonSet condition.² +string +ž +io.k8s.api.batch.v1.Job‚ "1Job represents the configuration of a single job.² +objectÊï +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Õ +specÌ +)#/definitions/io.k8s.api.batch.v1.JobSpec"žSpecification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + +status· ++#/definitions/io.k8s.api.batch.v1.JobStatus"‡Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúN +x-kubernetes-group-version-kind+)- group: batch + kind: Job + version: v1 + +Òg +io.k8s.api.core.v1.ServiceSpec¯g"FServiceSpec describes the attributes that a user creates on a service.² +objectÊØf +× +loadBalancerClassÁ"³loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.² +string +ã +loadBalancerIPÐ"ÂOnly applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.² +string +“ +loadBalancerSourceRangesö"ÙIf specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/² +arrayº + ² +string +« +selectorž"€Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/ª + ² +string² +object +¥ +sessionAffinity‘"ƒSupports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies² +string +Õ + topologyKeysÄ"§topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value "*" may be used to mean "any topology". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version.² +arrayº + ² +string +Á +externalTrafficPolicy§"™externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. "Local" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. "Cluster" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.² +string +ë + +clusterIPsÜ "™ ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are "None", empty string (""), or a valid IP address. Setting this to "None" makes a "headless service" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value. + +Unless the "IPv6DualStack" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies² +arrayº + ² +stringú# +x-kubernetes-list-type atomic + +• +healthCheckNodePortýint32"çhealthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).² +integer +ö +internalTrafficPolicyÜ"ÎInternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. "Cluster" routes internal traffic to a Service to all endpoints. "Local" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is "Cluster".² +string +ˆ +ipFamilyPolicyõ"çIPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the "IPv6DualStack" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be "SingleStack" (a single IP family), "PreferDualStack" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or "RequireDualStack" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.² +string +¬ +publishNotReadyAddresses"€publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered "ready" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.² +boolean +‰ +allocateLoadBalancerNodePortsç"ØallocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is "true". It may be set to "false" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.² +boolean +š +sessionAffinityConfig€ +6#/definitions/io.k8s.api.core.v1.SessionAffinityConfig"FsessionAffinityConfig contains the configurations of session affinity. +ñ + +ipFamiliesâ"ŸIPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the "IPv6DualStack" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are "IPv4" and "IPv6". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to "headless" services. This field will be wiped when updating a Service to type ExternalName. + +This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.² +arrayº + ² +stringú# +x-kubernetes-list-type atomic + +ò + externalIPsâ"ÅexternalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.² +arrayº + ² +string +¸ + externalName§"™externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".² +string +• +ports‹"ŸThe list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies² +arrayº0 +. +,#/definitions/io.k8s.api.core.v1.ServicePortú2 +x-kubernetes-list-map-keys- port +- protocol +ú +x-kubernetes-list-typemap +ú' +x-kubernetes-patch-merge-keyport +ú' +x-kubernetes-patch-strategymerge + +‹ +type‚"ôtype determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. "ClusterIP" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is "None", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. "NodePort" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. "LoadBalancer" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. "ExternalName" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types² +string +á + clusterIPÓ"ÅclusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are "None", empty string (""), or a valid IP address. Setting this to "None" makes a "headless service" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies² +string +™ +-io.k8s.apimachinery.pkg.api.resource.Quantityç"ÙQuantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. + +The serialization format is: + + ::= + (Note that may be empty, from the "" case in .) + ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= "+" | "-" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei + (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) + ::= m | "" | k | M | G | T | P | E + (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) + ::= "e" | "E" + +No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. + +When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. + +Before serializing, Quantity will be put in "canonical form". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: + a. No precision is lost + b. No fractional digits will be emitted + c. The exponent (or suffix) is as large as possible. +The sign will be omitted unless the number is negative. + +Examples: + 1.5 will be serialized as "1500m" + 1.5Gi will be serialized as "1536Mi" + +Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. + +Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) + +This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.² +string +Ù +1io.k8s.api.autoscaling.v2beta2.ObjectMetricSource£"‰ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).šdescribedObjectštargetšmetric² +objectÊä +] +describedObjectJ +H#/definitions/io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference +ƒ +metricy +=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector +} +targets +9#/definitions/io.k8s.api.autoscaling.v2beta2.MetricTarget"6target specifies the target value for the given metric +® +(io.k8s.api.core.v1.AzureFileVolumeSource"WAzureFile represents an Azure File Service mount on the host and bind mount to the pod.š +secretNameš shareName² +objectÊ€ +x +readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +^ + +secretNameP"Cthe name of secret that contains Azure Storage Account Name and Key² +string +$ + shareName" +Share Name² +string +ƒ +,io.k8s.api.core.v1.PersistentVolumeClaimSpecÒ "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes² +objectÊ¿ +Ë + accessModes»"žAccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1² +arrayº + ² +string +Ô + +dataSourceÅ +:#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference"†This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. +Þ + resourcesÐ +5#/definitions/io.k8s.api.core.v1.ResourceRequirements"–Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +ƒ +selectorw +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"3A label query over volumes to consider for binding. +¤ +storageClassName"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1² +string + + +volumeModeŽ"€volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.² +string +j + +volumeName\"OVolumeName is the binding reference to the PersistentVolume backing this claim.² +string +Æ +-io.k8s.api.flowcontrol.v1beta1.FlowSchemaList” "/FlowSchemaList is a list of FlowSchema objects.šitems² +objectÊÔ +Þ +metadataÑ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‘`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +t +itemsk"!`items` is a list of FlowSchemas.² +arrayº; +9 +7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúu +x-kubernetes-group-version-kindRP- group: flowcontrol.apiserver.k8s.io + kind: FlowSchemaList + version: v1beta1 + +Ó +$io.k8s.api.networking.v1.IngressRuleª "ìIngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.² +objectʬ +â + +hostÙ +"Ë +Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the "host" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to + the IP in the Spec of the parent Ingress. +2. The `:` delimiter is not respected because ports are not allowed. + Currently the port of an Ingress is implicitly :80 for http and + :443 for https. +Both these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue. + +Host can be "precise" which is a domain name without the terminating dot of a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name prefixed with a single wildcard label (e.g. "*.foo.com"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.² +string +E +http= +;#/definitions/io.k8s.api.networking.v1.HTTPIngressRuleValue +º +%io.k8s.api.networking.v1beta1.Ingress "€Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.² +objectʘ + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ý +specÔ +7#/definitions/io.k8s.api.networking.v1beta1.IngressSpec"˜Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +ã +statusØ +9#/definitions/io.k8s.api.networking.v1beta1.IngressStatus"šStatus is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúc +x-kubernetes-group-version-kind@>- group: networking.k8s.io + kind: Ingress + version: v1beta1 + +ä + io.k8s.api.core.v1.EndpointsList¿"%EndpointsList is a list of endpoints.šitems² +objectÊ© +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +X +itemsO"List of endpoints.² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.Endpoints +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúU +x-kubernetes-group-version-kind20- version: v1 + group: "" + kind: EndpointsList + +Ä +"io.k8s.api.core.v1.ObjectReference "]ObjectReference contains enough information to let you inspect or modify the referred object.² +objectʯ +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +Ž + namespace€"sNamespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/² +string +æ +resourceVersionÒ"ÄSpecific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency² +string + +uidz"mUID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids² +string +7 + +apiVersion)"API version of the referent.² +string +Ô + fieldPathÆ"¸If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.² +string +— +kindŽ"€Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +° +.io.k8s.api.core.v1.ReplicationControllerStatusý"VReplicationControllerStatus represents the current status of a replication controller.šreplicas² +objectÊ‹ + +availableReplicas{int32"fThe number of available replicas (ready for at least minReadySeconds) for this replication controller.² +integer +Ž + +conditionsÿ"YRepresents the latest available observations of a replication controller's current state.² +arrayºC +A +?#/definitions/io.k8s.api.core.v1.ReplicationControllerConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +— +fullyLabeledReplicasint32"jThe number of pods that have labels matching the labels of the pod template of the replication controller.² +integer +‹ +observedGenerationuint64"`ObservedGeneration reflects the generation of the most recently observed replication controller.² +integer +c + readyReplicasRint32"=The number of ready replicas for this replication controller.² +integer +× +replicasÊint32"´Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller² +integer +Û +*io.k8s.api.networking.v1beta1.IngressClass¬ "óIngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.² +objectʼ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +ç +specÞ +<#/definitions/io.k8s.api.networking.v1beta1.IngressClassSpec"Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúh +x-kubernetes-group-version-kindEC- version: v1beta1 + group: networking.k8s.io + kind: IngressClass + +æ +'io.k8s.api.policy.v1beta1.HostPortRangeº"HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.šminšmax² +objectÊŒ +C +max<int32"'max is the end of the range, inclusive.² +integer +E +min>int32")min is the start of the range, inclusive.² +integer +î +4io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListµ "¦APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.š groupVersionš resources² +objectʈ +§ + resources™"Hresources contains the name of the resources and if they are namespaced.² +arrayºB +@ +>#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +_ + groupVersionO"BgroupVersion is the group and version this APIResourceList is for.² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúW +x-kubernetes-group-version-kind42- version: v1 + group: "" + kind: APIResourceList + +Í +io.k8s.api.core.v1.EventList¬"EventList is a list of events.šitems² +objectÊ¡ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +P +itemsG"List of events² +arrayº* +( +&#/definitions/io.k8s.api.core.v1.Event +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúQ +x-kubernetes-group-version-kind.,- group: "" + kind: EventList + version: v1 + +õ +#io.k8s.api.core.v1.PodReadinessGateÍ":PodReadinessGate contains the reference to a pod conditionš conditionType² +objectÊs +q + conditionType`"SConditionType refers to a condition in the pod's condition list with matching type.² +string +Å +\io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceScaleä "^CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.šspecReplicasPathšstatusReplicasPath² +objectÊÍ +€ +statusReplicasPathé"ÛstatusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.² +string +á +labelSelectorPathË"½labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.² +string +ã +specReplicasPathÎ"ÀspecReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.² +string +† +0io.k8s.apimachinery.pkg.apis.meta.v1.StatusCauseÑ"xStatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.² +objectÊÈ +¶ +field¬"žThe field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. + +Examples: + "name" - the field "name" on the current resource + "items[0].name" - the field "name" on the first array entry in "items"² +string + +messaget"gA human-readable description of the cause of the error. This field may be presented as-is to a reader.² +string +‹ +reason€"sA machine-readable description of the cause of the error. If this value is empty there is no information available.² +string +ª +io.k8s.api.core.v1.SecretŒ"‚Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.² +objectʧ +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ê + +stringData»"stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API.ª + ² +string² +object +M +typeE"8Used to facilitate programmatic handling of secret data.² +string +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Î +dataÅ"¡Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4ª +byte² +string² +object +å + immutable×"ÈImmutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.² +booleanúN +x-kubernetes-group-version-kind+)- version: v1 + group: "" + kind: Secret + +å +2io.k8s.api.core.v1.StorageOSPersistentVolumeSource® "2Represents a StorageOS persistent volume resource.² +objectÊë +À +fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² +string +x +readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +Ê + secretRef¼ +0#/definitions/io.k8s.api.core.v1.ObjectReference"‡SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. +‹ + +volumeName}"pVolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.² +string +Ð +volumeNamespace¼"®VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.² +string +ñ +io.k8s.api.node.v1.RuntimeClassÍ"ËRuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/šhandler² +objectʆ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Ð +handlerÄ"¶Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called "runc" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +µ +metadata¨ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +â +overheadÕ +)#/definitions/io.k8s.api.node.v1.Overhead"§Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ +This field is in beta starting v1.18 and is only honored by servers that enable the PodOverhead feature. +– + +scheduling‡ ++#/definitions/io.k8s.api.node.v1.Scheduling"×Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.ú] +x-kubernetes-group-version-kind:8- group: node.k8s.io + kind: RuntimeClass + version: v1 + +û +io.k8s.api.core.v1.NodeAddressØ"8NodeAddress contains information for the node's address.štypešaddress² +objectÊ +) +address"The node address.² +string +R +typeJ"=Node address type, one of Hostname, ExternalIP or InternalIP.² +string +› + +,io.k8s.api.core.v1.PersistentVolumeClaimListê "CPersistentVolumeClaimList is a list of PersistentVolumeClaim items.šitems² +objectʪ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Ø +itemsÎ"„A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims² +arrayº: +8 +6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúa +x-kubernetes-group-version-kind><- kind: PersistentVolumeClaimList + version: v1 + group: "" + +ž +*io.k8s.api.apps.v1.DaemonSetUpdateStrategyï"XDaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.² +objectʆ +o +typeg"ZType of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.² +string +’ + rollingUpdate€ +7#/definitions/io.k8s.api.apps.v1.RollingUpdateDaemonSet"ERolling update config params. Present only if type = "RollingUpdate". +ä +,io.k8s.api.core.v1.CSIPersistentVolumeSource³"RRepresents storage that is managed by an external CSI volume driver (Beta feature)šdriverš volumeHandle² +objectʸ +­ +controllerPublishSecretRefŽ +0#/definitions/io.k8s.api.core.v1.SecretReference"ÙControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. +² + volumeHandle¡"“VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.² +string +Ž +fsTypeƒ"vFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs".² +string +• +nodePublishSecretRefü +0#/definitions/io.k8s.api.core.v1.SecretReference"ÇNodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. +¡ +nodeStageSecretRefŠ +0#/definitions/io.k8s.api.core.v1.SecretReference"ÕNodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. +x +readOnlyl"^Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).² +boolean +U +volumeAttributesA"$Attributes of the volume to publish.ª + ² +string² +object +× +controllerExpandSecretRef¹ +0#/definitions/io.k8s.api.core.v1.SecretReference"„ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed. +Y +driverO"BDriver is the name of the driver to use for this volume. Required.² +string +Ù +Bio.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition’"LPriorityLevelConfigurationCondition defines the condition of priority level.² +objectʵ +g +message\"O`message` is a human-readable message indicating details about last transition.² +string +l +reasonb"U`reason` is a unique, one-word, CamelCase reason for the condition's last transition.² +string +f +status\"O`status` is the status of the condition. Can be True, False, Unknown. Required.² +string +C +type;".`type` is the type of the condition. Required.² +string +® +lastTransitionTime— +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"\`lastTransitionTime` is the last time the condition transitioned from one status to another. +• +/io.k8s.api.storage.v1beta1.VolumeAttachmentSpecá"HVolumeAttachmentSpec is the specification of a VolumeAttachment request.šattacheršsourcešnodeName² +objectÊé +— +attacherŠ"}Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().² +string +H +nodeName<"/The node that the volume should be attached to.² +string +‚ +sourcex +?#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentSource"5Source represents the volume that should be attached. +û +%io.k8s.api.discovery.v1.EndpointSliceÑ"ÜEndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.š addressTypeš endpoints² +objectÊã +ð + addressTypeà"ÒaddressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.² +string +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +ß + endpointsÑ"jendpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.² +arrayº2 +0 +.#/definitions/io.k8s.api.discovery.v1.Endpointú# +x-kubernetes-list-type atomic + +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +h +metadata\ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. +¤ +portsš"®ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates "all ports". Each slice may include a maximum of 100 ports.² +arrayº6 +4 +2#/definitions/io.k8s.api.discovery.v1.EndpointPortú# +x-kubernetes-list-type atomic +úc +x-kubernetes-group-version-kind@>- group: discovery.k8s.io + kind: EndpointSlice + version: v1 + +å +'io.k8s.api.storage.v1beta1.TokenRequest¹": will be created for the connection.² +string +0 +iqn)"Target iSCSI Qualified Name.² +string +n +iscsiInterface\"OiSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).² +string +4 +lun-int32"iSCSI Target Lun number.² +integer +³ +portals§"ŠiSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).² +arrayº + ² +string +k +readOnly_"QReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.² +boolean +z + secretRefm +0#/definitions/io.k8s.api.core.v1.SecretReference"9CHAP Secret for iSCSI target and initiator authentication +¤ + targetPortal“"…iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).² +string +R +chapAuthSession?"1whether support iSCSI Session CHAP authentication² +boolean +â + io.k8s.api.core.v1.NamespaceList½ "&NamespaceList is a list of Namespaces.šitems² +objectʦ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Ô +itemsÊ"ŒItems is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.Namespace +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúU +x-kubernetes-group-version-kind20- kind: NamespaceList + version: v1 + group: "" + +¥ +0io.k8s.api.networking.v1.NetworkPolicyEgressRuleð"ÐNetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8² +objectÊŽ +¯ +ports¥"ÙList of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.² +arrayº< +: +8#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort +Ù +toÒ"†List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.² +arrayº< +: +8#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer +Ž +6io.k8s.api.authorization.v1.SelfSubjectRulesReviewSpecT² +objectÊF +D + namespace7"*Namespace to evaluate rules for. Required.² +string +ú +io.k8s.api.core.v1.NodeListÚ"ONodeList is the whole list of all Nodes which have been registered with master.šitems² +objectÊŸ +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +N +itemsE" List of nodes² +arrayº) +' +%#/definitions/io.k8s.api.core.v1.Node +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúP +x-kubernetes-group-version-kind-+- group: "" + kind: NodeList + version: v1 + +• +$io.k8s.api.core.v1.LocalVolumeSourceì"LLocal represents directly-attached storage with node affinity (Beta feature)špath² +objectʈ +€ +fsTypeõ"çFilesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified.² +string +‚ +pathz"mThe full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).² +string +Ý +)io.k8s.api.networking.v1beta1.IngressRule¯ "ìIngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.² +objectʱ +â + +hostÙ +"Ë +Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the "host" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to + the IP in the Spec of the parent Ingress. +2. The `:` delimiter is not respected because ports are not allowed. + Currently the port of an Ingress is implicitly :80 for http and + :443 for https. +Both these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue. + +Host can be "precise" which is a domain name without the terminating dot of a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name prefixed with a single wildcard label (e.g. "*.foo.com"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.² +string +J +httpB +@#/definitions/io.k8s.api.networking.v1beta1.HTTPIngressRuleValue +‚ + io.k8s.api.rbac.v1beta1.RoleListÝ"RoleList is a collection of Roles Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.22.šitems² +objectÊÅ +f +metadataZ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +^ +itemsU"Items is a list of Roles² +arrayº. +, +*#/definitions/io.k8s.api.rbac.v1beta1.Role +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúl +x-kubernetes-group-version-kindIG- group: rbac.authorization.k8s.io + kind: RoleList + version: v1beta1 + +Ž +&io.k8s.api.storage.v1beta1.CSINodeListã"/CSINodeList is a collection of CSINode objects.šitems² +objectÊ´ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +h +items_"items is the list of CSINode² +arrayº4 +2 +0#/definitions/io.k8s.api.storage.v1beta1.CSINode +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúd +x-kubernetes-group-version-kindA?- kind: CSINodeList + version: v1beta1 + group: storage.k8s.io + +Í +3io.k8s.api.authorization.v1.SubjectAccessReviewSpec•"¢SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set² +objectÊá +ª +nonResourceAttributes +?#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes"MNonResourceAttributes describes information for a non-resource access request +ª +resourceAttributes“ +<#/definitions/io.k8s.api.authorization.v1.ResourceAttributes"SResourceAuthorizationAttributes describes information for a resource access request +> +uid7"*UID information about the requesting user.² +string +« +user¢"”User is the user you're testing for. If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups² +string +Æ +extra¼"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.ª +² +arrayº + ² +string² +object +N +groupsD"(Groups is the groups you're testing for.² +arrayº + ² +string + +io.k8s.api.core.v1.EventSeriesÞ"qEventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.² +objectÊÜ +` +countWint32"BNumber of occurrences in this series up to the last heartbeat time² +integer +x +lastObservedTimed +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"$Time of the last occurrence observed +Ò +)io.k8s.api.extensions.v1beta1.IngressSpec¤ ";IngressSpec describes the Ingress the user wishes to exist.² +objectÊØ +º +backend® +:#/definitions/io.k8s.api.extensions.v1beta1.IngressBackend"ïA default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default. +´ +ingressClassNameŸ"‘IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.² +string +Ø +rulesÎ"ƒA list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.² +arrayº; +9 +7#/definitions/io.k8s.api.extensions.v1beta1.IngressRule +† +tlsþ"´TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.² +arrayº: +8 +6#/definitions/io.k8s.api.extensions.v1beta1.IngressTLS +§ +*io.k8s.api.networking.v1.NetworkPolicyListø"5NetworkPolicyList is a list of NetworkPolicy objects.šitems² +objectÊ¿ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +r +itemsi""Items is a list of schema objects.² +arrayº8 +6 +4#/definitions/io.k8s.api.networking.v1.NetworkPolicy +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúh +x-kubernetes-group-version-kindEC- group: networking.k8s.io + kind: NetworkPolicyList + version: v1 + +Ò +=io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON"JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. +µ +2io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorþ"ËA label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.² +objectÊü +É +matchExpressions´"VmatchExpressions is a list of label selector requirements. The requirements are ANDed.² +arrayºO +M +K#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement +­ + matchLabels"ÿmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.ª + ² +string² +objectú" +x-kubernetes-map-type atomic + +Š +&io.k8s.api.core.v1.ResourceQuotaStatusß"FResourceQuotaStatus defines the enforced hard limits and observed use.² +objectʈ +à +hard×"‡Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +¢ +used™"JUsed is the current observed total usage of the resource in the namespace.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +Ú +*io.k8s.api.core.v1.WeightedPodAffinityTerm«"vThe weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)šweightšpodAffinityTerm² +objectʉ +u +weightkint32"Vweight associated with matching the corresponding podAffinityTerm, in the range 1-100.² +integer + +podAffinityTerm| +0#/definitions/io.k8s.api.core.v1.PodAffinityTerm"HRequired. A pod affinity term, associated with the corresponding weight. +¹ +1io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRuleƒ" ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) least one member of namespaces matches the request.šverbsš apiGroupsš resources² +objectʱ +´ + clusterScope£"”`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.² +boolean +÷ + +namespacesè"¨`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains "*". Note that "*" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.² +arrayº + ² +stringú +x-kubernetes-list-typeset + +Í + resources¿"ÿ`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ "services", "nodes/status" ]. This list may not be empty. "*" matches all resources and, if present, must be the only entry. Required.² +arrayº + ² +stringú +x-kubernetes-list-typeset + +Ì +verbsÂ"‚`verbs` is a list of matching verbs and may not be empty. "*" matches all verbs and, if present, must be the only entry. Required.² +arrayº + ² +stringú +x-kubernetes-list-typeset + +Þ + apiGroupsÐ"`apiGroups` is a list of matching API groups and may not be empty. "*" matches all API groups and, if present, must be the only entry. Required.² +arrayº + ² +stringú +x-kubernetes-list-typeset + +÷ +-io.k8s.api.policy.v1beta1.PodDisruptionBudgetÅ "hPodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods² +objectÊå +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + +spec„ +?#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetSpec"ASpecification of the desired behavior of the PodDisruptionBudget. +ˆ +status~ +A#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetStatus"9Most recently observed status of the PodDisruptionBudget.úd +x-kubernetes-group-version-kindA?- group: policy + kind: PodDisruptionBudget + version: v1beta1 + +…, +:io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookÆ+"`ValidatingWebhook describes an admission webhook and the resources and operations it applies to.šnameš clientConfig² +objectÊ¿* +é +objectSelectorÖ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‘ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything. +î +rulesä"‡Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.² +arrayºM +K +I#/definitions/io.k8s.api.admissionregistration.v1beta1.RuleWithOperations +œ +timeoutSeconds‰int32"óTimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.² +integer +° +admissionReviewVersions”"÷AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.² +arrayº + ² +string +ž + clientConfig +J#/definitions/io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig"?ClientConfig defines how to communicate with the hook. Required +— + matchPolicy‡"ùmatchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". + +- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + +- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + +Defaults to "Exact"² +string +Ø + sideEffectsÈ"ºSideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.² +string +® + failurePolicyœ"ŽFailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.² +string +ç +nameÞ"ÐThe name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" is the name of the webhook, and kubernetes.io is the name of the organization. Required.² +string +Ü +namespaceSelectorÆ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. + +For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "runlevel", + "operator": "NotIn", + "values": [ + "0", + "1" + ] + } + ] +} + +If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "environment", + "operator": "In", + "values": [ + "prod", + "staging" + ] + } + ] +} + +See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors. + +Default to the empty LabelSelector, which matches everything. +… +$io.k8s.api.coordination.v1.LeaseListÜ"%LeaseList is a list of Lease objects.šitems² +objectʹ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +l +itemsc""Items is a list of schema objects.² +arrayº2 +0 +.#/definitions/io.k8s.api.coordination.v1.Lease +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúb +x-kubernetes-group-version-kind?=- version: v1 + group: coordination.k8s.io + kind: LeaseList + +« +io.k8s.api.core.v1.SecretList‰ "SecretList is a list of Secret.šitems² +objectÊü +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +ª +items "fItems is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret² +arrayº+ +) +'#/definitions/io.k8s.api.core.v1.Secret +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúR +x-kubernetes-group-version-kind/-- group: "" + kind: SecretList + version: v1 + + +#io.k8s.api.node.v1.RuntimeClassListè"3RuntimeClassList is a list of RuntimeClass objects.šitems² +objectʸ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +k +itemsb""Items is a list of schema objects.² +arrayº1 +/ +-#/definitions/io.k8s.api.node.v1.RuntimeClass +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúa +x-kubernetes-group-version-kind><- group: node.k8s.io + kind: RuntimeClassList + version: v1 + +Ÿ +3io.k8s.api.autoscaling.v2beta1.ExternalMetricStatusç"nExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.š +metricNameš currentValue² +objectÊÌ +Ž + currentValue~ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"?currentValue is the current value of the metric (as a quantity) +d + +metricNameV"ImetricName is the name of a metric used for autoscaling in metric system.² +string +§ +metricSelector” +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"PmetricSelector is used to identify a specific time series within a given metric. +¨ +currentAverageValue +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"QcurrentAverageValue is the current value of metric averaged over autoscaled pods. +ì ++io.k8s.api.autoscaling.v2beta2.MetricStatus¼">MetricStatus describes the last-read state of a single metric.štype² +objectÊæ +Ë +containerResourceµ +J#/definitions/io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus"æcontainer resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. +ô +externalç +A#/definitions/io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus"¡external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). + +object· +?#/definitions/io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus"tobject refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). +• +podsŒ +=#/definitions/io.k8s.api.autoscaling.v2beta2.PodsMetricStatus"Êpods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. +™ +resourceŒ +A#/definitions/io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus"Æresource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. +¥ +typeœ"Žtype is the type of metric source. It will be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled² +string +î +#io.k8s.api.core.v1.FlexVolumeSourceÆ"hFlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.šdriver² +objectÊÄ +Ä +fsType¹"«Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.² +string +O +optionsD"'Optional: Extra command options if any.ª + ² +string² +object +‚ +readOnlyv"hOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +Ó + secretRefÅ +5#/definitions/io.k8s.api.core.v1.LocalObjectReference"‹Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. +O +driverE"8Driver is the name of the driver to use for this volume.² +string +¥ +?io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatusá"VPriorityLevelConfigurationStatus represents the current state of a "request-priority".² +objectÊú +÷ + +conditionsè"8`conditions` is the current state of "request-priority".² +arrayºT +R +P#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationConditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap + +Œ +%io.k8s.api.rbac.v1.ClusterRoleBindingâ +"ŸClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.šroleRef² +objectʳ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +h +metadata\ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. + +roleRef¶ +(#/definitions/io.k8s.api.rbac.v1.RoleRef"‰RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. +„ +subjectsx"=Subjects holds references to the objects the role applies to.² +arrayº, +* +(#/definitions/io.k8s.api.rbac.v1.Subjectúq +x-kubernetes-group-version-kindNL- group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + version: v1 + +ý + +io.k8s.api.rbac.v1.PolicyRuleÛ +"¡PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.šverbs² +objectÊ  +ó + apiGroupså"ÈAPIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.² +arrayº + ² +string +¼ +nonResourceURLs¨"‹NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both.² +arrayº + ² +string +¨ + resourceNames–"zResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.² +arrayº + ² +string +† + resourcesy"]Resources is a list of resources this rule applies to. ResourceAll represents all resources.² +arrayº + ² +string +´ +verbsª"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.² +arrayº + ² +string +à +7io.k8s.api.authorization.v1beta1.SelfSubjectRulesReview¤"ˆSelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.šspec² +objectÊ‹ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + +spec† +I#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectRulesReviewSpec"9Spec holds information about the request being evaluated. +¬ +status¡ +G#/definitions/io.k8s.api.authorization.v1beta1.SubjectRulesReviewStatus"VStatus is filled in by the server and indicates the set of actions a user can perform.úu +x-kubernetes-group-version-kindRP- group: authorization.k8s.io + kind: SelfSubjectRulesReview + version: v1beta1 + + +io.k8s.api.autoscaling.v1.Scalež "2Scale represents a scaling request for a resource.² +objectÊ‚ + +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ñ +metadataÄ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‚Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. +Ñ +specÈ +1#/definitions/io.k8s.api.autoscaling.v1.ScaleSpec"’defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. +Ú +statusÏ +3#/definitions/io.k8s.api.autoscaling.v1.ScaleStatus"—current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúV +x-kubernetes-group-version-kind31- group: autoscaling + kind: Scale + version: v1 + +è +Iio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReferenceš";ServiceReference holds a reference to Service.legacy.k8s.ioš namespacešname² +objectÊ» +> +name6")name is the name of the service. Required² +string +M + namespace@"3namespace is the namespace of the service. Required² +string +Y +pathQ"Dpath is an optional URL path at which the webhook will be contacted.² +string +Î +portÅint32"¯port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.² +integer +Ò +Cio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReferenceŠ";ServiceReference holds a reference to Service.legacy.k8s.io² +objectʾ +4 +name,"Name is the name of the service² +string +C + namespace6")Namespace is the namespace of the service² +string +À +port·int32"¡If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).² +integer +¾ +-io.k8s.api.authentication.v1.TokenRequestSpecŒ "HTokenRequestSpec contains client provided parameters of a token request.š audiences² +objectʧ +• + audiences‡"êAudiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.² +arrayº + ² +string +Ž +boundObjectRefû +?#/definitions/io.k8s.api.authentication.v1.BoundObjectReference"·BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation. +û +expirationSecondsåint64"ÏExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.² +integer +æ +8io.k8s.api.networking.v1.IngressClassParametersReference©"}IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.škindšname² +objectÊ + + namespace´"¦Namespace is the namespace of the resource being referenced. This field is required when scope is set to "Namespace" and must be unset when scope is set to "Cluster".² +string +Þ +scopeÔ"ÆScope represents if this refers to a cluster or namespace scoped resource. This may be set to "Cluster" (default) or "Namespace". Field can be enabled with IngressClassNamespacedParams feature gate.² +string +Ú +apiGroupÍ"¿APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.² +string +C +kind;".Kind is the type of resource being referenced.² +string +C +name;".Name is the name of resource being referenced.² +string + +io.k8s.api.core.v1.Probeä"€Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.² +objectÊÒ +‘ +execˆ ++#/definitions/io.k8s.api.core.v1.ExecAction"YOne and only one of the following should be specified. Exec specifies the action to take. +§ +failureThreshold’int32"}Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.² +integer +k +httpGet` +.#/definitions/io.k8s.api.core.v1.HTTPGetAction".HTTPGet specifies the http request to perform. +} + periodSecondslint32"WHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.² +integer +â +timeoutSecondsÏint32"¹Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes² +integer +â +initialDelaySecondsÊint32"´Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes² +integer +Î +successThreshold¹int32"£Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.² +integer +‘ + tcpSocketƒ +0#/definitions/io.k8s.api.core.v1.TCPSocketAction"OTCPSocket specifies an action involving a TCP port. TCP hooks not yet supported +ö +terminationGracePeriodSecondsÔint64"¾Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.² +integer +Ü +,io.k8s.api.storage.v1.VolumeAttachmentStatus«"CVolumeAttachmentStatus is the status of a VolumeAttachment request.šattached² +objectÊÌ +æ + attachErrorÖ +/#/definitions/io.k8s.api.storage.v1.VolumeError"¢The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. +± +attached¤"•Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.² +boolean +à +attachmentMetadata¬"ŽUpon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.ª + ² +string² +object +æ + detachErrorÖ +/#/definitions/io.k8s.api.storage.v1.VolumeError"¢The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher. +‰ +'io.k8s.api.core.v1.HostPathVolumeSourceÝ"vRepresents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.špath² +objectÊÏ +Å +path¼"®Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath² +string +„ +type|"oType for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath² +string +Æ +$io.k8s.api.core.v1.ISCSIVolumeSource "’Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.š targetPortalšiqnšlun² +objectÊÞ +n +iscsiInterface\"OiSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).² +string +4 +lun-int32"iSCSI Target Lun number.² +integer + + secretRefr +5#/definitions/io.k8s.api.core.v1.LocalObjectReference"9CHAP Secret for iSCSI target and initiator authentication +0 +iqn)"Target iSCSI Qualified Name.² +string +³ +portals§"ŠiSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).² +arrayº + ² +string +k +readOnly_"QReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.² +boolean +¤ + targetPortal“"…iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).² +string +V +chapAuthDiscoveryA"3whether support iSCSI Discovery CHAP authentication² +boolean +R +chapAuthSession?"1whether support iSCSI Session CHAP authentication² +boolean +µ +fsTypeª"œFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi² +string +Ô + initiatorNameÂ"´Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.² +string +– +'io.k8s.api.core.v1.PortworxVolumeSourceê";PortworxVolumeSource represents a Portworx volume resource.švolumeID² +objectÊ“ +Í +fsTypeÂ"´FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.² +string +x +readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +G +volumeID;".VolumeID uniquely identifies a Portworx volume² +string +¬ +@io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationç +"‚MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.² +objectÊÒ +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ñ +metadataÄ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‚Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. +þ +webhooksñ"IWebhooks is a list of webhooks and the affected resources and operations.² +arrayºE +C +A#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookú' +x-kubernetes-patch-strategymerge +ú' +x-kubernetes-patch-merge-keyname + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringú~ +x-kubernetes-group-version-kind[Y- group: admissionregistration.k8s.io + kind: MutatingWebhookConfiguration + version: v1 + +¼ +)io.k8s.api.coordination.v1beta1.LeaseSpecŽ"(LeaseSpec is a specification of a Lease.² +objectÊÕ +e +holderIdentityS"FholderIdentity contains the identity of the holder of a current lease.² +string +È +leaseDurationSeconds¯int32"™leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.² +integer +r +leaseTransitions^int32"IleaseTransitions is the number of transitions of a lease between holders.² +integer +  + renewTime’ +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"RrenewTime is a time when the current holder of a lease has last updated the lease. +‰ + acquireTimez +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime":acquireTime is a time when the current lease was acquired. +¸ +,io.k8s.api.apps.v1.StatefulSetUpdateStrategy‡"ÏStatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.² +objectʦ +¹ + rollingUpdate§ +A#/definitions/io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy"bRollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. +h +type`"SType indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.² +string +² +=io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpecð"¾This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.šrequest² +objectÊ– +¤ +usages™"ÖallowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +Valid values are: + "signing", + "digital signature", + "content commitment", + "key encipherment", + "key agreement", + "data encipherment", + "cert sign", + "crl sign", + "encipher only", + "decipher only", + "any", + "server auth", + "client auth", + "code signing", + "email protection", + "s/mime", + "ipsec end system", + "ipsec tunnel", + "ipsec user", + "timestamping", + "ocsp signing", + "microsoft sgc", + "netscape sgc"² +arrayº + ² +stringú# +x-kubernetes-list-type atomic + +d +usernameX"KInformation about the requesting user. See user.Info interface for details.² +string +† +extra}"QExtra information about the requesting user. See user.Info interface for details.ª +² +arrayº + ² +string² +object +ž +groups“"QGroup information about the requesting user. See user.Info interface for details.² +arrayº + ² +stringú# +x-kubernetes-list-type atomic + +c +requestXbyte"Base64-encoded PKCS#10 CSR data² +stringú# +x-kubernetes-list-type atomic + +’ + +signerNameƒ"õRequested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted: + 1. If it's a kubelet client certificate, it is assigned + "kubernetes.io/kube-apiserver-client-kubelet". + 2. If it's a kubelet serving certificate, it is assigned + "kubernetes.io/kubelet-serving". + 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown". +Distribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.² +string +c +uid\"OUID information about the requesting user. See user.Info interface for details.² +string +õ +#io.k8s.api.core.v1.VolumeProjectionÍ"HProjection that may be projected along with other supported volume types² +objectÊô +’ +serviceAccountToken{ +>#/definitions/io.k8s.api.core.v1.ServiceAccountTokenProjection"9information about the serviceAccountToken data to project +t + configMapg +4#/definitions/io.k8s.api.core.v1.ConfigMapProjection"/information about the configMap data to project +z + downwardAPIk +6#/definitions/io.k8s.api.core.v1.DownwardAPIProjection"1information about the downwardAPI data to project +k +secreta +1#/definitions/io.k8s.api.core.v1.SecretProjection",information about the secret data to project +¹ +Eio.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfigurationï "ÿMutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead.² +objectÊ× +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ñ +metadataÄ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‚Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. +ƒ +webhooksö"IWebhooks is a list of webhooks and the affected resources and operations.² +arrayºJ +H +F#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookú' +x-kubernetes-patch-merge-keyname +ú' +x-kubernetes-patch-strategymerge +úƒ +x-kubernetes-group-version-kind`^- group: admissionregistration.k8s.io + kind: MutatingWebhookConfiguration + version: v1beta1 + +È +Gio.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationü "ˆValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead.² +objectÊÙ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ñ +metadataÄ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‚Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. +… +webhooksø"IWebhooks is a list of webhooks and the affected resources and operations.² +arrayºL +J +H#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookú' +x-kubernetes-patch-strategymerge +ú' +x-kubernetes-patch-merge-keyname +ú… +x-kubernetes-group-version-kindb`- version: v1beta1 + group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration + +© ++io.k8s.api.storage.v1beta1.StorageClassListù"4StorageClassList is a collection of storage classes.šitems² +objectÊÀ +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +t +itemsk"#Items is the list of StorageClasses² +arrayº9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.StorageClass +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúi +x-kubernetes-group-version-kindFD- group: storage.k8s.io + kind: StorageClassList + version: v1beta1 + +´ + +Bio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceí "[APIService represents a server for a particular GroupVersion. Name must be "version.group".² +objectÊ“ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +§ +specž +T#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceSpec"FSpec contains information for locating and communicating with a server +œ +status‘ +V#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceStatus"7Status contains derived information about an API serverúk +x-kubernetes-group-version-kindHF- group: apiregistration.k8s.io + kind: APIService + version: v1beta1 + +± +3io.k8s.api.autoscaling.v2beta2.ExternalMetricSourceù"ÐExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).šmetricštarget² +objectÊ… +ƒ +metricy +=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector +} +targets +9#/definitions/io.k8s.api.autoscaling.v2beta2.MetricTarget"6target specifies the target value for the given metric +Ž + +*io.k8s.api.networking.v1.NetworkPolicyPeerß "lNetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed² +objectÊâ +« +ipBlockŸ +.#/definitions/io.k8s.api.networking.v1.IPBlock"mIPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be. +Ú +namespaceSelectorÄ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ÿSelects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces. + +If PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. +Ô + podSelectorÄ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ÿThis is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods. + +If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. +è +(io.k8s.api.core.v1.EphemeralVolumeSource» "JRepresents an ephemeral volume that is handled by a normal storage driver.² +objectÊà +Ý +volumeClaimTemplateÅ +>#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimTemplate"‚Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). + +An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. + +This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. + +Required, must not be nil. +ö +io.k8s.api.core.v1.LifecycleÕ "ªLifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.² +objectÊ™ + + postStartó +(#/definitions/io.k8s.api.core.v1.Handler"ÆPostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +’ +preStop† +(#/definitions/io.k8s.api.core.v1.Handler"ÙPreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +Ü +*io.k8s.api.policy.v1beta1.AllowedCSIDriver­"RAllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.šname² +objectÊD +B +name:"-Name is the registered name of the CSI driver² +string +Š +*io.k8s.api.storage.v1.VolumeAttachmentSpecÛ"HVolumeAttachmentSpec is the specification of a VolumeAttachment request.šattacheršsourcešnodeName² +objectÊã +— +attacherŠ"}Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().² +string +H +nodeName<"/The node that the volume should be attached to.² +string +} +sources +:#/definitions/io.k8s.api.storage.v1.VolumeAttachmentSource"5Source represents the volume that should be attached. +º +Bio.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationó +"‰ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.² +objectÊÔ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ñ +metadataÄ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‚Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. +€ +webhooksó"IWebhooks is a list of webhooks and the affected resources and operations.² +arrayºG +E +C#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookú' +x-kubernetes-patch-merge-keyname +ú' +x-kubernetes-patch-strategymerge +ú€ +x-kubernetes-group-version-kind][- group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration + version: v1 + +Ø +3io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus  "ÝResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.šnamešcurrentAverageValue² +objectÊ” +× +currentAverageUtilization¹int32"£currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.² +integer +ó +currentAverageValueÛ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"›currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type. It will always be set, regardless of the corresponding metric specification. +B +name:"-name is the name of the resource in question.² +string +‘ +%io.k8s.api.core.v1.VolumeNodeAffinityç"^VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.² +objectÊy +w +requiredk +-#/definitions/io.k8s.api.core.v1.NodeSelector":Required specifies hard node constraints that must be met. +µ + io.k8s.api.node.v1beta1.Overhead"ROverhead structure represents the resource overhead associated with running a pod.² +objectÊ­ +ª +podFixed"NPodFixed represents the fixed resource overhead associated with running a pod.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +˜ +$io.k8s.api.apps.v1.StatefulSetStatusï "@StatefulSetStatus represents the current state of a StatefulSet.šreplicas² +objectÊ“ +ù + +conditionsê"NRepresents the latest available observations of a statefulset's current state.² +arrayº9 +7 +5#/definitions/io.k8s.api.apps.v1.StatefulSetConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +¤ +currentRevision"‚currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).² +string +¯ +updatedReplicas›int32"…updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.² +integer +f +replicasZint32"Ereplicas is the number of Pods created by the StatefulSet controller.² +integer +± +updateRevisionž"updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)² +string +þ +collisionCountëint32"ÕcollisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.² +integer +° +currentReplicasœint32"†currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.² +integer +Ü +observedGenerationÅint64"¯observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.² +integer +Œ + readyReplicas{int32"freadyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.² +integer +“ +;io.k8s.api.authorization.v1beta1.SelfSubjectRulesReviewSpecT² +objectÊF +D + namespace7"*Namespace to evaluate rules for. Required.² +string +„ +=io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpecÂ"OPriorityLevelConfigurationSpec specifies the configuration of a priority level.štype² +objectÊø +ì +limitedà +N#/definitions/io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration"`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `"Limited"`. +† +typeý"ï`type` indicates whether this priority level is subject to limitation on request execution. A value of `"Exempt"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `"Limited"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.² +stringú` +x-kubernetes-unionsIG- discriminator: type + fields-to-discriminateBy: + limited: Limited + +› +2io.k8s.api.policy.v1beta1.RunAsUserStrategyOptionsä"_RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.šrule² +objectÊí +ý +rangesò"¯ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.² +arrayº3 +1 +/#/definitions/io.k8s.api.policy.v1beta1.IDRange +k +rulec"Vrule is the strategy that will dictate the allowable RunAsUser values that may be set.² +string +Ù +io.k8s.api.core.v1.HTTPHeader·">HTTPHeader describes a custom header to be used in HTTP probesšnamešvalue² +objectÊZ +* +name""The header field name² +string +, +value#"The header field value² +string +ð +&io.k8s.api.core.v1.NodeDaemonEndpointsÅ"FNodeDaemonEndpoints lists ports opened by daemons running on the Node.² +objectÊo +m +kubeletEndpointZ +/#/definitions/io.k8s.api.core.v1.DaemonEndpoint"'Endpoint on which Kubelet is listening. +˜ +!io.k8s.api.core.v1.AttachedVolumeò"4AttachedVolume describes a volume attached to a nodešnameš +devicePath² +objectÊ™ +e + +devicePathW"JDevicePath represents the device path where the volume should be available² +string +0 +name("Name of the attached volume² +string +Ö +(io.k8s.api.core.v1.DownwardAPIVolumeFile© "XDownwardAPIVolumeFile represents information to create the file containing the pod fieldšpath² +objectʹ +î +resourceFieldRefÙ +6#/definitions/io.k8s.api.core.v1.ResourceFieldSelector"žSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. +¦ +fieldRef™ +4#/definitions/io.k8s.api.core.v1.ObjectFieldSelector"aRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported. +» +mode²int32"œOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² +integer +Þ +pathÕ"ÇRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'² +string +¦ +!io.k8s.api.core.v1.ServiceAccount€"ºServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets² +objectÊÛ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +× +automountServiceAccountToken¶"§AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.² +boolean +å +imagePullSecretsÐ"‡ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod² +arrayº9 +7 +5#/definitions/io.k8s.api.core.v1.LocalObjectReference +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ä +secrets¸" Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.ObjectReferenceú' +x-kubernetes-patch-strategymerge +ú' +x-kubernetes-patch-merge-keyname +úV +x-kubernetes-group-version-kind31- kind: ServiceAccount + version: v1 + group: "" + +® +"io.k8s.api.rbac.v1.ClusterRoleList‡"/ClusterRoleList is a collection of ClusterRolesšitems² +objectÊÎ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +g +items^"Items is a list of ClusterRoles² +arrayº0 +. +,#/definitions/io.k8s.api.rbac.v1.ClusterRole +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +f +metadataZ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.ún +x-kubernetes-group-version-kindKI- version: v1 + group: rbac.authorization.k8s.io + kind: ClusterRoleList + +à +Zio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionListä"KCustomResourceDefinitionList is a list of CustomResourceDefinition objects.šitems² +objectÊ‚ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +· +items­"6items list individual CustomResourceDefinition objects² +arrayºh +f +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +I +metadata= +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMetaú{ +x-kubernetes-group-version-kindXV- group: apiextensions.k8s.io + kind: CustomResourceDefinitionList + version: v1beta1 + +§ +6io.k8s.api.admissionregistration.v1.RuleWithOperationsì "‚RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.² +objectÊØ +¼ + apiVersions¬"APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.² +arrayº + ² +string +  + +operations‘"ôOperations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.² +arrayº + ² +string +– + resourcesˆ"ëResources is a list of resources this rule applies to. + +For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. + +If wildcard is present, the validation rule will ensure resources do not overlap with each other. + +Depending on the enclosing object, subresources might not be allowed. Required.² +arrayº + ² +string +£ +scope™"‹scope specifies the scope of this rule. Valid values are "Cluster", "Namespaced", and "*" "Cluster" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. "Namespaced" means that only namespaced resources will match this rule. "*" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is "*".² +string +´ + apiGroups¦"‰APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.² +arrayº + ² +string +¼ +3io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy„"kRollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.² +objectʈ +… + partitionxint32"cPartition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.² +integer +Ü +io.k8s.api.core.v1.ServiceList¹"%ServiceList holds a list of services.šitems² +objectÊ¥ +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +T +itemsK"List of services² +arrayº, +* +(#/definitions/io.k8s.api.core.v1.ServiceúS +x-kubernetes-group-version-kind0.- kind: ServiceList + version: v1 + group: "" + +Î +"io.k8s.api.storage.v1beta1.CSINode§ "ÊDEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.šspec² +objectÊá +| +metadatap +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"/metadata.name must be the Kubernetes node name. +d +spec\ +4#/definitions/io.k8s.api.storage.v1beta1.CSINodeSpec"$spec is the specification of CSINode +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringú` +x-kubernetes-group-version-kind=;- group: storage.k8s.io + kind: CSINode + version: v1beta1 + +· +$io.k8s.api.coordination.v1.LeaseSpecŽ"(LeaseSpec is a specification of a Lease.² +objectÊÕ +‰ + acquireTimez +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime":acquireTime is a time when the current lease was acquired. +e +holderIdentityS"FholderIdentity contains the identity of the holder of a current lease.² +string +È +leaseDurationSeconds¯int32"™leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.² +integer +r +leaseTransitions^int32"IleaseTransitions is the number of transitions of a lease between holders.² +integer +  + renewTime’ +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"RrenewTime is a time when the current holder of a lease has last updated the lease. +¦ +io.k8s.api.core.v1.HostAlias…"oHostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.² +objectÊ… +L + hostnames?"#Hostnames for the above IP address.² +arrayº + ² +string +5 +ip/""IP address of the host file entry.² +string +ƒ +3io.k8s.api.autoscaling.v2beta2.ResourceMetricStatusË"ÝResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.šnamešcurrent² +objectÊË +„ +currenty +>#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric +B +name:"-Name is the name of the resource in question.² +string +ð +(io.k8s.api.core.v1.ContainerStateRunningÃ"8ContainerStateRunning is a running state of a container.² +objectÊ{ +y + startedAtl +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"1Time at which the container was last (re-)started +‚ ++io.k8s.api.networking.v1.ServiceBackendPortÒ"8ServiceBackendPort is the service port being referenced.² +objectʉ + +number„int32"oNumber is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with "Name".² +integer +u +namem"`Name is the name of the port on the Service. This is a mutually exclusive setting with "Number".² +string +Ë +,io.k8s.api.networking.v1beta1.IngressBackendš"DIngressBackend describes all endpoints for a given service and port.² +objectÊÅ +ø +resourceë +:#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference"¬Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified. +I + serviceName:"-Specifies the name of the referenced service.² +string +} + servicePortn +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"-Specifies the port of the referenced service. +´ +io.k8s.api.rbac.v1beta1.Subject"ÆSubject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.škindšname² +objectʪ +à +apiGroup¶"¨APIGroup holds the API group of the referenced subject. Defaults to "" for ServiceAccount subjects. Defaults to "rbac.authorization.k8s.io" for User and Group subjects.² +string +à +kind×"ÉKind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". If the Authorizer does not recognized the kind value, the Authorizer should report an error.² +string +9 +name1"$Name of the object being referenced.² +string +à + namespaceµ"§Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty the Authorizer should report an error.² +string +” +Aio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpecÎ"£APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.šgroupPriorityMinimumšversionPriority² +objectÊð +ì +caBundleßbyte"¥CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.² +stringú# +x-kubernetes-list-type atomic + +C +group:"-Group is the API group name this server hosts² +string +± +groupPriorityMinimum˜int32"‚GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s² +integer +Í +insecureSkipTLSVerify³"¤InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.² +boolean +ô +serviceè +Q#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference"’Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled. +X +versionM"@Version is the API version this server hosts. For example, "v1"² +string +ä +versionPriorityÐint32"ºVersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.² +integer +ö + +name6")name is the name of the service. Required² +string +M + namespace@"3namespace is the namespace of the service. Required² +string +Y +pathQ"Dpath is an optional URL path at which the webhook will be contacted.² +string +Î +portÅint32"¯port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.² +integer +Ñ + io.k8s.api.core.v1.ScopeSelector¬"nA scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.² +objectÊ­ +ª +matchExpressions•"@A list of scope selector requirements by scope of the resources.² +arrayºF +D +B#/definitions/io.k8s.api.core.v1.ScopedResourceSelectorRequirement +Å +/io.k8s.api.storage.v1beta1.VolumeAttachmentList‘ "AVolumeAttachmentList is a collection of VolumeAttachment objects.šitems² +objectÊÇ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +{ +itemsr"&Items is the list of VolumeAttachments² +arrayº= +; +9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúm +x-kubernetes-group-version-kindJH- group: storage.k8s.io + kind: VolumeAttachmentList + version: v1beta1 + +ð + +"1metricName is the name of the metric in question.² +string +÷ +selectorê +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"¥selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. +€ +targetv +H#/definitions/io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference"*target is the described Kubernetes object. +€ +/io.k8s.api.autoscaling.v2beta2.PodsMetricStatusÌ"šPodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).šmetricšcurrent² +objectÊ +„ +currenty +>#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric +ƒ +metricy +=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector +² +0io.k8s.api.core.v1.WindowsSecurityContextOptionsý"OWindowsSecurityContextOptions contain Windows-specific options and credentials.² +objectÊ +m +gmsaCredentialSpecNameS"FGMSACredentialSpecName is the name of the GMSA credential spec to use.² +string +¿ + runAsUserName­"ŸThe UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² +string +é +gmsaCredentialSpecÒ"ÄGMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.² +string +¢ +io.k8s.api.node.v1.Scheduling€"TScheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.² +objectÊ› +ç + nodeSelectorÖ"¸nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.ª + ² +string² +object +® + tolerationsž"¹tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.² +arrayº/ +- ++#/definitions/io.k8s.api.core.v1.Tolerationú# +x-kubernetes-list-type atomic + +‡ +3io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReferenceÏ"ÇOwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.š +apiVersionškindšnamešuid² +objectÊÕ +l +named"WName of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names² +string +i +uidb"UUID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids² +string +7 + +apiVersion)"API version of the referent.² +string +Î +blockOwnerDeletion·"¨If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.² +boolean +V + +controllerH":If true, this reference points to the managing controller.² +boolean +— +kindŽ"€Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +« +&io.k8s.api.core.v1.ConfigMapProjection€ "åAdapts a ConfigMap into a projected volume. + +The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.² +objectʉ +ª +items "âIf unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.KeyToPath +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +S +optionalG"9Specify whether the ConfigMap or its keys must be defined² +boolean +¨ +#io.k8s.api.core.v1.NodeConfigSource€"uNodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.² +objectÊ{ +y + configMapl +:#/definitions/io.k8s.api.core.v1.ConfigMapNodeConfigSource".ConfigMap is a reference to a Node's ConfigMap +æ ++io.k8s.api.autoscaling.v2beta2.MetricTarget¶"aMetricTarget defines the target value, average value, or average utilization of a specific metricštype² +objectʽ +€ +valuew +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"8value is the target value of the metric (as a quantity). +™ +averageUtilization‚int32"ìaverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type² +integer +¶ + averageValue¥ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"faverageValue is the target value of the average of the metric across all relevant pods (as a quantity) +c +type["Ntype represents whether the metric type is Utilization, Value, or AverageValue² +string +ï +-io.k8s.api.storage.v1beta1.CSIStorageCapacity½"ÞCSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes. + +For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" + +The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero + +The producer of these objects can decide which approach is more suitable. + +They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.šstorageClassName² +objectÊÌ +Ç + nodeTopology¶ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ñNodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. +Ô +storageClassName¿"±The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.² +string +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Ò +capacityÅ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"…Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. + +The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +¶ +maximumVolumeSize  +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"àMaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. + +This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. + +metadata +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ÎStandard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name. + +Objects are namespaced. + +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúk +x-kubernetes-group-version-kindHF- group: storage.k8s.io + kind: CSIStorageCapacity + version: v1beta1 + +ì +!io.k8s.api.core.v1.SeccompProfileÆ"fSeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.štype² +objectÊÓ +¯ +localhostProfileš"ŒlocalhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".² +string +ž +type•"‡type indicates which kind of seccomp profile will be applied. Valid options are: + +Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.² +stringúr +x-kubernetes-unions[Y- discriminator: type + fields-to-discriminateBy: + localhostProfile: LocalhostProfile + +è +%io.k8s.api.discovery.v1beta1.Endpoint¾"FEndpoint represents a single logical "backend" implementing a service.š addresses² +objectÊÛ +” +hintsŠ +8#/definitions/io.k8s.api.discovery.v1beta1.EndpointHints"Nhints contains information associated with how an endpoint should be consumed. +Î +hostnameÁ"³hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.² +string +Ú +nodeNameÍ"¿nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.² +string + + targetRef‚ +0#/definitions/io.k8s.api.core.v1.ObjectReference"NtargetRef is a reference to a Kubernetes object that represents this endpoint. +ó +topologyæ"Ètopology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node + where the endpoint is located. This should match the corresponding + node label. +* topology.kubernetes.io/zone: the value indicates the zone where the + endpoint is located. This should match the corresponding node label. +* topology.kubernetes.io/region: the value indicates the region where the + endpoint is located. This should match the corresponding node label. +This field is deprecated and will be removed in future api versions.ª + ² +string² +object +î + addressesà" addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.² +arrayº + ² +stringú +x-kubernetes-list-typeset + +™ + +conditionsŠ +=#/definitions/io.k8s.api.discovery.v1beta1.EndpointConditions"Iconditions contains information about the current status of the endpoint. +Ÿ +(io.k8s.api.node.v1beta1.RuntimeClassListò"3RuntimeClassList is a list of RuntimeClass objects.šitems² +objectʽ +p +itemsg""Items is a list of schema objects.² +arrayº6 +4 +2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúf +x-kubernetes-group-version-kindCA- group: node.k8s.io + kind: RuntimeClassList + version: v1beta1 + +» +,io.k8s.api.policy.v1.PodDisruptionBudgetListŠ "@PodDisruptionBudgetList is a collection of PodDisruptionBudgets.šitems² +objectÊË +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ð +metadataà +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +y +itemsp"'Items is a list of PodDisruptionBudgets² +arrayº: +8 +6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetúc +x-kubernetes-group-version-kind@>- group: policy + kind: PodDisruptionBudgetList + version: v1 + +á +"io.k8s.api.storage.v1.TokenRequestº"": { + "token": , + "expirationTimestamp": , + }, + ... +} + +Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. + +This is a beta feature and only available when the CSIServiceAccountToken feature is enabled.² +arrayº9 +7 +5#/definitions/io.k8s.api.storage.v1beta1.TokenRequestú# +x-kubernetes-list-type atomic + +³ +volumeLifecycleModesš"ýVolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is "Persistent", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. + +This field is immutable.² +arrayº + ² +string +É +attachRequired¶"§attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. + +This field is immutable.² +boolean +Û + fsGroupPolicyÉ"»Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate. + +This field is immutable.² +string +Î +'io.k8s.api.storage.v1beta1.StorageClass¢"ãStorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned. + +StorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.š provisioner² +objectÊ· +n +allowVolumeExpansionV"HAllowVolumeExpansion shows whether the storage class allow volume expand² +boolean +Š +allowedTopologiesô"«Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.² +arrayº9 +7 +5#/definitions/io.k8s.api.core.v1.TopologySelectorTerm +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +ì + mountOptionsÛ"¾Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. ["ro", "soft"]. Not validated - mount of the PVs will simply fail if one is invalid.² +arrayº + ² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +‘ + +parameters‚"eParameters holds the parameters for the provisioner that should create volumes of this storage class.ª + ² +string² +object +N + provisioner?"2Provisioner indicates the type of the provisioner.² +string +— + reclaimPolicy…"xDynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.² +string +ø +volumeBindingModeâ"ÔVolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.² +stringúe +x-kubernetes-group-version-kindB@- group: storage.k8s.io + kind: StorageClass + version: v1beta1 + +¬ +Zio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArrayN"LJSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +ì +7io.k8s.api.admissionregistration.v1.WebhookClientConfig° "VWebhookClientConfig contains the information to make a TLS connection with the webhook² +objectÊÉ +Å +caBundle¸byte"¤`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.² +string +‡ +serviceû +B#/definitions/io.k8s.api.admissionregistration.v1.ServiceReference"´`service` is a reference to the service for this webhook. Either `service` or `url` must be specified. + +If the webhook is running within the cluster, then you should use `service`. +ô +urlì"Þ`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. + +The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. + +Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. + +The scheme must be "https"; the URL must begin with "https://". + +A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. + +Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.² +string +„ ++io.k8s.api.authorization.v1.NonResourceRuleÔ"LNonResourceRule holds information that describes a rule for the non-resourcešverbs² +objectÊï +Ë +nonResourceURLs·"šNonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. "*" means all.² +arrayº + ² +string +ž +verbs”"xVerb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all.² +arrayº + ² +string +¡ + io.k8s.api.core.v1.EnvFromSourceü":EnvFromSource represents the source of a set of ConfigMaps² +objectʱ +c + configMapRefS +3#/definitions/io.k8s.api.core.v1.ConfigMapEnvSource"The ConfigMap to select from +n +prefixd"WAn optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.² +string +Z + secretRefM +0#/definitions/io.k8s.api.core.v1.SecretEnvSource"The Secret to select from +› +io.k8s.api.core.v1.ExecActionù"1ExecAction describes a "run in container" action.² +objectÊ· +´ +command¨"‹Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.² +arrayº + ² +string +ù +io.k8s.api.core.v1.LimitRange× "OLimitRange sets resource usage limits for each kind of resource in a Namespace.² +objectÊ¢ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Í +specÄ +/#/definitions/io.k8s.api.core.v1.LimitRangeSpec"Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúR +x-kubernetes-group-version-kind/-- group: "" + kind: LimitRange + version: v1 + +É +.io.k8s.api.networking.v1beta1.IngressClassList–"3IngressClassList is a collection of IngressClasses.šitems² +objectÊÛ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +x +itemso"$Items is the list of IngressClasses.² +arrayº< +: +8#/definitions/io.k8s.api.networking.v1beta1.IngressClass +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +b +metadataV +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úl +x-kubernetes-group-version-kindIG- group: networking.k8s.io + kind: IngressClassList + version: v1beta1 + +ˆ +-io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupÖ"YAPIGroup contains the name, the supported versions, and the preferred version of a group.šnamešversions² +objectʇ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +3 +name+"name is the name of the group.² +string +Ç +preferredVersion² +K#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"cpreferredVersion is the version preferred by the API server, which probably is the storage version. +è +serverAddressByClientCIDRsÉ"éa map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.² +arrayºP +N +L#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR + +versions"2versions are the versions supported in this group.² +arrayºO +M +K#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscoveryúP +x-kubernetes-group-version-kind-+- group: "" + kind: APIGroup + version: v1 + +Ø +$io.k8s.apimachinery.pkg.version.Info¯"TInfo contains versioning information. how we'll want to distribute that information.šmajoršminorš +gitVersionš gitCommitš gitTreeStateš buildDateš goVersionšcompileršplatform² +objectÊä + + buildDate ² +string + + gitCommit ² +string + + gitTreeState ² +string + + +gitVersion ² +string + +major ² +string + +compiler ² +string + + goVersion ² +string + +minor ² +string + +platform ² +string +È +&io.k8s.api.apps.v1.DeploymentCondition"KDeploymentCondition describes the state of a deployment at a certain point.štypešstatus² +objectʱ +Y +messageN"AA human readable message indicating details about the transition.² +string +F +reason<"/The reason for the condition's last transition.² +string +L +statusB"5Status of the condition, one of True, False, Unknown.² +string +2 +type*"Type of deployment condition.² +string +‘ +lastTransitionTime{ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. +v +lastUpdateTimed +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time")The last time this condition was updated. +Õ& +io.k8s.api.batch.v1.JobSpecµ&"7JobSpec describes how the job execution will look like.štemplate² +objectÊâ% +Û +templateÎ +0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"™Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ +ó +ttlSecondsAfterFinished×int32"ÁttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.² +integer +¿ +completionMode¬"žCompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. + +`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. + +`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. + +This field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.² +string +Ð + completionsÀint32"ªSpecifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/² +integer +¹ +manualSelector¦"—manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector² +boolean +¦ + parallelism–int32"€Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/² +integer + +selector +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ËA label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +” +suspendˆ"ùSuspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false.² +boolean +æ +activeDeadlineSecondsÌint64"¶Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.² +integer +r + backoffLimitbint32"MSpecifies the number of retries before marking this job failed. Defaults to 6² +integer +’ +Xio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersionµ"//...` if `served` is true.² +string +Þ +schemaÓ +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation"pschema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. +h +served^"Pserved is a flag enabling/disabling this version from being served via REST APIs² +boolean +ª +storagež"storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true.² +boolean +Î + subresources½ +a#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources"Xsubresources specify what subresources this version of the defined custom resource have. +¹ +Fio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceListî"/APIServiceList is a list of APIService objects.šitems² +objectÊ´ +j +itemsa² +arrayºT +R +P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +I +metadata= +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúo +x-kubernetes-group-version-kindLJ- kind: APIServiceList + version: v1beta1 + group: apiregistration.k8s.io + + +/io.k8s.api.autoscaling.v2beta1.PodsMetricStatusÜ"šPodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).š +metricNamešcurrentAverageValue² +objectÊ +Å +currentAverageValue­ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"ncurrentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity) +K + +metricName="0metricName is the name of the metric in question² +string +õ +selectorè +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"£selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. +ð +/io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus¼">FlowSchemaStatus represents the current state of a FlowSchema.² +objectÊí +ê + +conditionsÛ";`conditions` is a list of the current states of FlowSchema.² +arrayºD +B +@#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaConditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap + +¹ +%io.k8s.api.autoscaling.v1.ScaleStatus"AScaleStatus represents the current status of a scale subresource.šreplicas² +objectʲ +Z +replicasNint32"9actual number of observed instances of the scaled object.² +integer +Ó +selectorÆ"¸label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors² +string +à +)io.k8s.api.core.v1.PersistentVolumeStatus•"DPersistentVolumeStatus is the current status of a persistent volume.² +objectÊÀ +j +message_"RA human-readable message indicating details about why the volume is in this state.² +string +» +phase±"£Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase² +string +“ +reasonˆ"{Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.² +string +¿ +'io.k8s.api.rbac.v1beta1.AggregationRule“"VAggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole² +objectʬ +© +clusterRoleSelectors"¼ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added² +arrayºD +B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +± +'io.k8s.api.rbac.v1beta1.ClusterRoleList… "¢ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.22.šitems² +objectÊÓ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +l +itemsc"Items is a list of ClusterRoles² +arrayº5 +3 +1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +f +metadataZ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.ús +x-kubernetes-group-version-kindPN- group: rbac.authorization.k8s.io + kind: ClusterRoleList + version: v1beta1 + +˜ +io.k8s.api.apps.v1.DaemonSet÷ "7DaemonSet represents the configuration of a daemon set.² +objectÊÙ + +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ó +specÊ +.#/definitions/io.k8s.api.apps.v1.DaemonSetSpec"—The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +® +status£ +0#/definitions/io.k8s.api.apps.v1.DaemonSetStatus"îThe current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúS +x-kubernetes-group-version-kind0.- group: apps + kind: DaemonSet + version: v1 + +• +io.k8s.api.apps.v1.Deploymentó"@Deployment enables declarative updates for Pods and ReplicaSets.² +objectÊË +o +statuse +1#/definitions/io.k8s.api.apps.v1.DeploymentStatus"0Most recently observed status of the Deployment. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +f +metadataZ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object metadata. +s +speck +/#/definitions/io.k8s.api.apps.v1.DeploymentSpec"8Specification of the desired behavior of the Deployment.úT +x-kubernetes-group-version-kind1/- group: apps + kind: Deployment + version: v1 + +í[ +io.k8s.api.core.v1.PodSpecÎ[""PodSpec is a description of a pod.š +containers² +objectÊŽ[ +ÿ +ephemeralContainersç"ÌList of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.² +arrayº7 +5 +3#/definitions/io.k8s.api.core.v1.EphemeralContainerú' +x-kubernetes-patch-merge-keyname +ú' +x-kubernetes-patch-strategymerge + +† +hostnamez"mSpecifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.² +string +‰ +initContainersö"äList of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.Containerú' +x-kubernetes-patch-merge-keyname +ú' +x-kubernetes-patch-strategymerge + +û +setHostnameAsFQDNå"ÖIf true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.² +boolean +ö + dnsPolicyè"ÚSet DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.² +string +Û +enableServiceLinksÄ"µEnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.² +boolean +“ +serviceAccount€"sDeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.² +string +ý +terminationGracePeriodSecondsÛint64"ÅOptional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.² +integer +” +priorityClassNameþ"ðIf specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.² +string +ñ +runtimeClassNameÜ"ÎRuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.² +string +g +affinity[ +)#/definitions/io.k8s.api.core.v1.Affinity".If specified, the pod's scheduling constraints +À + hostNetwork°"¡Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.² +boolean +R +hostPIDG"9Use the host's pid namespace. Optional: Default to false.² +boolean +Ï +priorityÂint32"¬The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.² +integer +q + tolerationsb"$If specified, the pod's tolerations.² +arrayº/ +- ++#/definitions/io.k8s.api.core.v1.Toleration +» + hostAliases«"›HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.HostAliasú% +x-kubernetes-patch-merge-keyip +ú' +x-kubernetes-patch-strategymerge + +R +hostIPCG"9Use the host's ipc namespace. Optional: Default to false.² +boolean +× +imagePullSecretsÂ"¥ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod² +arrayº9 +7 +5#/definitions/io.k8s.api.core.v1.LocalObjectReferenceú' +x-kubernetes-patch-strategymerge +ú' +x-kubernetes-patch-merge-keyname + +œ + nodeSelector‹"íNodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ª + ² +string² +object +„ +preemptionPolicyï"áPreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.² +string +ô +shareProcessNamespaceÚ"ËShare a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.² +boolean +þ +topologySpreadConstraintsà"ÐTopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.² +arrayº= +; +9#/definitions/io.k8s.api.core.v1.TopologySpreadConstraintúB +x-kubernetes-list-map-keys$"- topologyKey +- whenUnsatisfiable +ú +x-kubernetes-list-typemap +ú. +x-kubernetes-patch-merge-key topologyKey +ú' +x-kubernetes-patch-strategymerge + +• +automountServiceAccountTokenu"gAutomountServiceAccountToken indicates whether a service account token should be automatically mounted.² +boolean +Æ + dnsConfig¸ +-#/definitions/io.k8s.api.core.v1.PodDNSConfig"†Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. +Î + subdomainÀ"²If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all.² +string +á + restartPolicyÏ"ÁRestart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy² +string +£ + schedulerName‘"ƒIf specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.² +string +ó +readinessGatesà"›If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md² +arrayº5 +3 +1#/definitions/io.k8s.api.core.v1.PodReadinessGate +Ð +serviceAccountName¹"«ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/² +string +Ô +nodeNameÇ"¹NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.² +string +ð +overheadã"“Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +ó +securityContextß +3#/definitions/io.k8s.api.core.v1.PodSecurityContext"§SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. +¬ +volumes "†List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes² +arrayº+ +) +'#/definitions/io.k8s.api.core.v1.Volumeú' +x-kubernetes-patch-merge-keyname +ú2 +x-kubernetes-patch-strategymerge,retainKeys + +ü +activeDeadlineSecondsâint64"ÌOptional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.² +integer +¼ + +containers­"›List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.Containerú' +x-kubernetes-patch-merge-keyname +ú' +x-kubernetes-patch-strategymerge + +º +)io.k8s.api.networking.v1.IngressClassListŒ"3IngressClassList is a collection of IngressClasses.šitems² +objectÊÖ +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +b +metadataV +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +s +itemsj"$Items is the list of IngressClasses.² +arrayº7 +5 +3#/definitions/io.k8s.api.networking.v1.IngressClassúg +x-kubernetes-group-version-kindDB- group: networking.k8s.io + kind: IngressClassList + version: v1 + +î +"io.k8s.api.rbac.v1beta1.PolicyRuleÇ "¡PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.šverbs² +objectÊŒ + +ó + apiGroupså"ÈAPIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.² +arrayº + ² +string +¼ +nonResourceURLs¨"‹NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both.² +arrayº + ² +string +¨ + resourceNames–"zResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.² +arrayº + ² +string +ò + resourcesä"ÇResources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.² +arrayº + ² +string +´ +verbsª"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.² +arrayº + ² +string +Ï +!io.k8s.api.storage.v1.CSINodeSpec©"\CSINodeSpec holds information about the specification of all CSI drivers installed on a nodešdrivers² +objectʲ +¯ +drivers£"Šdrivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.² +arrayº5 +3 +1#/definitions/io.k8s.api.storage.v1.CSINodeDriverú' +x-kubernetes-patch-merge-keyname +ú' +x-kubernetes-patch-strategymerge + +í +Oio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray™"–JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes. +Æ +2io.k8s.api.core.v1.AzureFilePersistentVolumeSource"WAzureFile represents an Azure File Service mount on the host and bind mount to the pod.š +secretNameš shareName² +objectÊŽ +x +readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +^ + +secretNameP"Cthe name of secret that contains Azure Storage Account Name and Key² +string +‹ +secretNamespacex"kthe namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod² +string +$ + shareName" +Share Name² +string +£ +io.k8s.api.core.v1.PodList„ "PodList is a list of Pods.šitems² +objectÊÿ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +­ +items£"lList of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md² +arrayº( +& +$#/definitions/io.k8s.api.core.v1.Pod +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúO +x-kubernetes-group-version-kind,*- group: "" + kind: PodList + version: v1 + +‚ +#io.k8s.api.events.v1beta1.EventListÚ"%EventList is a list of Event objects.šitems² +objectʸ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +k +itemsb""items is a list of schema objects.² +arrayº1 +/ +-#/definitions/io.k8s.api.events.v1beta1.Event +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúa +x-kubernetes-group-version-kind><- version: v1beta1 + group: events.k8s.io + kind: EventList + +à +@io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration› "èLimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues: + * How are requests for this priority level limited? + * What should be done with requests that exceed the limit?² +objectÊ¡ +ö +assuredConcurrencySharesÙint32"Ã`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level: + + ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + +bigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.² +integer +¥ + limitResponse“ +:#/definitions/io.k8s.api.flowcontrol.v1beta1.LimitResponse"U`limitResponse` indicates what to do with requests that can not be executed right now +´ +$io.k8s.api.networking.v1.IngressSpec‹";IngressSpec describes the Ingress the user wishes to exist.² +objectÊ¿ +ß +defaultBackendÌ +5#/definitions/io.k8s.api.networking.v1.IngressBackend"’DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller. +´ +ingressClassNameŸ"‘IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.² +string +ù +rulesï"ƒA list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.² +arrayº6 +4 +2#/definitions/io.k8s.api.networking.v1.IngressRuleú# +x-kubernetes-list-type atomic + +§ +tlsŸ"´TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.² +arrayº5 +3 +1#/definitions/io.k8s.api.networking.v1.IngressTLSú# +x-kubernetes-list-type atomic + +ƒ +(io.k8s.api.storage.v1beta1.CSINodeDriverÖ "]CSINodeDriver holds information about the specification of one CSI driver installed on a nodešnamešnodeID² +objectÊØ +¨ + allocatable˜ +<#/definitions/io.k8s.api.storage.v1beta1.VolumeNodeResources"Xallocatable represents the volume resources of a node that are available for scheduling. +ª +name¡"“This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.² +string +¸ +nodeID­"ŸnodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as "node1", but the storage system may refer to the same node as "nodeA". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. "nodeA" instead of "node1". This field is required.² +string + + topologyKeys±"”topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. "company.com/zone", "company.com/region"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.² +arrayº + ² +string +§ +Uio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArrayN"LJSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. +– +4io.k8s.api.authorization.v1.LocalSubjectAccessReviewÝ "çLocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.šspec² +objectÊè +§ +statusœ +C#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or not +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +ñ +specè +A#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewSpec"¢Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.úr +x-kubernetes-group-version-kindOM- group: authorization.k8s.io + kind: LocalSubjectAccessReview + version: v1 + + +3io.k8s.api.authorization.v1beta1.ResourceAttributeså"tResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface² +objectÊà +‚ +namez"mName is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.² +string +ô + namespaceæ"ØNamespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview² +string +X +resourceL"?Resource is one of the existing resource types. "*" means all.² +string +^ + subresourceO"BSubresource is one of the existing resource types. "" means none.² +string +ƒ +verb{"nVerb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.² +string +S +versionH";Version is the API Version of the Resource. "*" means all.² +string +M +groupD"7Group is the API Group of the Resource. "*" means all.² +string +ü +&io.k8s.api.networking.v1.IngressStatusÑ"8IngressStatus describe the current state of the Ingress.² +objectʈ +… + loadBalanceru +3#/definitions/io.k8s.api.core.v1.LoadBalancerStatus">LoadBalancer contains the current status of the load-balancer. +Û +3io.k8s.api.policy.v1beta1.PodDisruptionBudgetStatus£"ŠPodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.šdisruptionsAllowedšcurrentHealthyšdesiredHealthyš expectedPods² +objectÊÁ +ô + +conditionså"ôConditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute + the number of allowed disruptions. Therefore no disruptions are + allowed and the status of the condition will be False. +- InsufficientPods: The number of pods are either at or below the number + required by the PodDisruptionBudget. No disruptions are + allowed and the status of the condition will be False. +- SufficientPods: There are more pods than required by the PodDisruptionBudget. + The condition will be True, and the number of allowed + disruptions are provided by the disruptionsAllowed property.² +arrayº@ +> +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Conditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap +ú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +E +currentHealthy3int32"current number of healthy pods² +integer +M +desiredHealthy;int32"&minimum desired number of healthy pods² +integer + + disruptedPodsû"¯DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.ª; +9 +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time² +object +` +disruptionsAllowedJint32"5Number of pod disruptions that are currently allowed.² +integer +[ + expectedPodsKint32"6total number of pods counted by this disruption budget² +integer +â +observedGenerationËint64"µMost recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.² +integer +Ý +$io.k8s.api.storage.v1beta1.CSIDriver´"¹CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.šspec² +objectÊý +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +™ +metadataŒ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ÊStandard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +b +specZ +6#/definitions/io.k8s.api.storage.v1beta1.CSIDriverSpec" Specification of the CSI Driver.úb +x-kubernetes-group-version-kind?=- group: storage.k8s.io + kind: CSIDriver + version: v1beta1 + +… +9io.k8s.api.authorization.v1beta1.SubjectRulesReviewStatusÇ "çSubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.š resourceRulesšnonResourceRulesš +incomplete² +objectÊž +Œ +evaluationErrorø"êEvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.² +string +Ý + +incompleteÎ"¿Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.² +boolean +› +nonResourceRules†"´NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.² +arrayºB +@ +>#/definitions/io.k8s.api.authorization.v1beta1.NonResourceRule +Ž + resourceRulesü"­ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.² +arrayº? += +;#/definitions/io.k8s.api.authorization.v1beta1.ResourceRule + +(io.k8s.api.networking.v1.HTTPIngressPathâ "oHTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.šbackend² +objectÊØ + +Ÿ +backend“ +5#/definitions/io.k8s.api.networking.v1.IngressBackend"ZBackend defines the referenced service endpoint to which the traffic will be forwarded to. +ž +path•"‡Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.² +string +’ +pathType…"÷PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is + done on a path element by element basis. A path element refers is the + list of labels in the path split by the '/' separator. A request is a + match for path p if every p is an element-wise prefix of p of the + request path. Note that if the last element of the path is a substring + of the last element in request path, it is not a match (e.g. /foo/bar + matches /foo/bar/baz, but does not match /foo/barbaz). +* ImplementationSpecific: Interpretation of the Path matching is up to + the IngressClass. Implementations can treat this as a separate PathType + or treat it identically to Prefix or Exact path types. +Implementations are required to support all path types.² +string +Þ + io.k8s.api.events.v1.EventSeries¹"èEventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.šcountšlastObservedTime² +objectʤ +n +counteint32"Pcount is the number of occurrences in this series up to the last heartbeat time.² +integer +± +lastObservedTimeœ +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"\lastObservedTime is the time when last Event from the series was seen before last heartbeat. +‹ +#io.k8s.api.storage.v1.CSIDriverListã"3CSIDriverList is a collection of CSIDriver objects.šitems² +objectʳ +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +g +items^"items is the list of CSIDriver² +arrayº1 +/ +-#/definitions/io.k8s.api.storage.v1.CSIDriverúa +x-kubernetes-group-version-kind><- group: storage.k8s.io + kind: CSIDriverList + version: v1 + +ñ +Sio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation™"YExternalDocumentation allows referencing an external resource for extended documentation.² +objectÊ0 + + description ² +string + +url ² +string +ÿ +5io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpecÅ"-specification of a horizontal pod autoscaler.šscaleTargetRefš maxReplicas² +objectÊè +× +targetCPUUtilizationPercentage´int32"žtarget average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.² +integer + + maxReplicas~int32"iupper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.² +integer +ò + minReplicasâint32"ÌminReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.² +integer +† +scaleTargetRefó +C#/definitions/io.k8s.api.autoscaling.v1.CrossVersionObjectReference"«reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource. +º + +#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric +] +describedObjectJ +H#/definitions/io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference +ƒ +metricy +=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector +™ +io.k8s.api.core.v1.EnvVarû"AEnvVar represents an environment variable present in a Container.šname² +objectÊ¢ +N +nameF"9Name of the environment variable. Must be a C_IDENTIFIER.² +string +» +value±"£Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".² +string +‘ + valueFromƒ +-#/definitions/io.k8s.api.core.v1.EnvVarSource"RSource for the environment variable's value. Cannot be used if value is not empty. +ä + io.k8s.api.core.v1.HTTPGetAction¿"=HTTPGetAction describes an action based on HTTP Get requests.šport² +objectÊê +7 +path/""Path to access on the HTTP server.² +string +Ç +port¾ +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"}Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. +R +schemeH";Scheme to use for connecting to the host. Defaults to HTTP.² +string +} +hostu"hHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.² +string +‘ + httpHeaders"CCustom headers to set in the request. HTTP allows repeated headers.² +arrayº/ +- ++#/definitions/io.k8s.api.core.v1.HTTPHeader +ž +.io.k8s.api.core.v1.PersistentVolumeClaimStatusë"OPersistentVolumeClaimStatus is the current status of a persistent volume claim.² +objectÊ‹ +Ò + accessModesÂ"¥AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1² +arrayº + ² +string +• +capacityˆ"9Represents the actual resources of the underlying volume.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +Ç + +conditions¸"‘Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.² +arrayºC +A +?#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +R +phaseI""1Group to map volume access to Default is no group² +string +‚ +readOnlyv"hReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.² +boolean +á +registryÔ"ÆRegistry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes² +string +œ +tenant‘"ƒTenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin² +string +ê? +Hio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps?"[JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).² +objectʱ> +$ + minPropertiesint64² +integer +p +anyOfg² +arrayºZ +X +V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps + + description ² +string +X +exampleM +K#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON +h +items_ +]#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray + +maxItemsint64² +integer + + maxLengthint64² +integer +} +patternPropertieshªZ +X +V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps² +object +n + externalDocs^ +\#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation +$ + maxPropertiesint64² +integer + +minItemsint64² +integer + +minimumdouble² +number +! + +multipleOfdouble² +number + +pattern ² +string + +$ref ² +string +d +enum\² +arrayºO +M +K#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON +& +required² +arrayº + ² +string + +maximumdouble² +number + + minLengthint64² +integer + +type ² +string +‡ +x-kubernetes-int-or-stringè"Ùx-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns: + +1) anyOf: + - type: integer + - type: string +2) allOf: + - anyOf: + - type: integer + - type: string + - ... zero or more² +boolean +Þ +x-kubernetes-list-typeÃ"µx-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values: + +1) `atomic`: the list is treated as a single entity, like a scalar. + Atomic lists will be entirely replaced when updated. This extension + may be used on any type of list (struct, scalar, ...). +2) `set`: + Sets are lists that must not have multiple items with the same value. Each + value must be a scalar, an object with x-kubernetes-map-type `atomic` or an + array with x-kubernetes-list-type `atomic`. +3) `map`: + These lists are like maps in that their elements have a non-index key + used to identify them. Order is preserved upon merge. The map tag + must only be used on a list with elements of type object. +Defaults to atomic for arrays.² +string +w + definitionshªZ +X +V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps² +object +v + +propertieshªZ +X +V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps² +object + +title ² +string +« +x-kubernetes-list-map-keysŒ"ïx-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map. + +This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported). + +The properties specified must either be required or have a default value, to ensure those properties are present for all list items.² +arrayº + ² +string +… + dependenciesuªg +e +c#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray² +object + +exclusiveMinimum ² +boolean +p +oneOfg² +arrayºZ +X +V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps +˜ +$x-kubernetes-preserve-unknown-fieldsï"àx-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.² +boolean +q +additionalItems^ +\#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool +p +allOfg² +arrayºZ +X +V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps + +default‘ +K#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON"Ádefault is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false. + +id ² +string + +nullable ² +boolean +Ó +x-kubernetes-embedded-resource°"¡x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).² +boolean +  +x-kubernetes-map-type†"øx-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values: + +1) `granular`: + These maps are actual maps (key-value pairs) and each fields are independent + from each other (they can each be manipulated by separate actors). This is + the default behaviour for all maps. +2) `atomic`: the list is treated as a single entity, like a scalar. + Atomic maps will be entirely replaced when updated.² +string + +$schema ² +string +v +additionalProperties^ +\#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool + +exclusiveMaximum ² +boolean +¶ +format«"format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: + +- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\d{3})\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\d{3}[- ]?\d{2}[- ]?\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339.² +string +_ +notX +V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps + + uniqueItems ² +boolean +Œ +7io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatusÐ"-current status of a horizontal pod autoscaleršcurrentReplicasšdesiredReplicas² +objectÊî +ß +currentCPUUtilizationPercentage»int32"¥current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.² +integer +f +currentReplicasSint32">current number of replicas of pods managed by this autoscaler.² +integer +f +desiredReplicasSint32">desired number of replicas of pods managed by this autoscaler.² +integer +Ù + lastScaleTimeÇ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"‹last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed. +^ +observedGenerationHint64"3most recent generation observed by this autoscaler.² +integer +à +?io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerConditionÿ"eHorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.štypešstatus² +objectÊù +« +lastTransitionTime” +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"YlastTransitionTime is the last time the condition transitioned from one status to another +g +message\"Omessage is a human-readable explanation containing details about the transition² +string +P +reasonF"9reason is the reason for the condition's last transition.² +string +S +statusI"#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric +B +name:"-Name is the name of the resource in question.² +string +b + containerU"HContainer is the name of the container in the pods of the scaling target² +string +ä +'io.k8s.api.networking.v1.IngressBackend¸"DIngressBackend describes all endpoints for a given service and port.² +objectÊã +± +resource¤ +:#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference"åResource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with "Service". +¬ +service  +<#/definitions/io.k8s.api.networking.v1.IngressServiceBackend"`Service references a Service as a Backend. This is a mutually exclusive setting with "Resource". +Ù +io.k8s.api.core.v1.Affinity¹"1Affinity is a group of affinity scheduling rules.² +objectÊ÷ +´ + podAffinity¤ +,#/definitions/io.k8s.api.core.v1.PodAffinity"tDescribes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). +Å +podAntiAffinity± +0#/definitions/io.k8s.api.core.v1.PodAntiAffinity"}Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). +v + nodeAffinityf +-#/definitions/io.k8s.api.core.v1.NodeAffinity"5Describes node affinity scheduling rules for the pod. +æ +(io.k8s.api.core.v1.GlusterfsVolumeSource¹"‹Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.š endpointsšpath² +objectʉ +« + endpoints"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod² +string +ƒ +path{"nPath is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod² +string +Ò +readOnlyÅ"¶ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod² +boolean +ë +%io.k8s.api.core.v1.ServiceAccountListÁ "6ServiceAccountList is a list of ServiceAccount objectsšitems² +objectÊ• +à +items¹"wList of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/² +arrayº3 +1 +/#/definitions/io.k8s.api.core.v1.ServiceAccount +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúZ +x-kubernetes-group-version-kind75- version: v1 + group: "" + kind: ServiceAccountList + +Ò +)io.k8s.api.networking.v1beta1.IngressSpec¤ ";IngressSpec describes the Ingress the user wishes to exist.² +objectÊØ +´ +ingressClassNameŸ"‘IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.² +string +Ø +rulesÎ"ƒA list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.² +arrayº; +9 +7#/definitions/io.k8s.api.networking.v1beta1.IngressRule +† +tlsþ"´TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.² +arrayº: +8 +6#/definitions/io.k8s.api.networking.v1beta1.IngressTLS +º +backend® +:#/definitions/io.k8s.api.networking.v1beta1.IngressBackend"ïA default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default. +˜ +.io.k8s.api.authorization.v1.ResourceAttributeså"tResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface² +objectÊà +ƒ +verb{"nVerb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.² +string +S +versionH";Version is the API Version of the Resource. "*" means all.² +string +M +groupD"7Group is the API Group of the Resource. "*" means all.² +string +‚ +namez"mName is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.² +string +ô + namespaceæ"ØNamespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview² +string +X +resourceL"?Resource is one of the existing resource types. "*" means all.² +string +^ + subresourceO"BSubresource is one of the existing resource types. "" means none.² +string +” +)io.k8s.api.coordination.v1beta1.LeaseListæ"%LeaseList is a list of Lease objects.šitems² +objectʾ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +q +itemsh""Items is a list of schema objects.² +arrayº7 +5 +3#/definitions/io.k8s.api.coordination.v1beta1.Lease +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúg +x-kubernetes-group-version-kindDB- group: coordination.k8s.io + kind: LeaseList + version: v1beta1 + +¨ +%io.k8s.api.core.v1.PodDNSConfigOption"9PodDNSConfigOption defines DNS resolver options of a pod.² +objectÊ6 + +name" Required.² +string + +value ² +string +Ö + io.k8s.api.core.v1.ResourceQuota± "FResourceQuota sets aggregate quota restrictions enforced per namespace² +objectÊ‚ + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +à +specº +2#/definitions/io.k8s.api.core.v1.ResourceQuotaSpec"ƒSpec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +ç +statusÜ +4#/definitions/io.k8s.api.core.v1.ResourceQuotaStatus"£Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúU +x-kubernetes-group-version-kind20- group: "" + kind: ResourceQuota + version: v1 + +¥ +%io.k8s.api.core.v1.SecretVolumeSourceû "íAdapts a Secret into a volume. + +The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.² +objectÊü +ê + defaultModeÚint32"ÄOptional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² +integer +¤ +itemsš"ÜIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.KeyToPath +P +optionalD"6Specify whether the Secret or its keys must be defined² +boolean +“ + +secretName„"wName of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret² +string +… +io.k8s.api.core.v1.Serviceæ "çService is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.² +objectÊ› + +þ +statusó +.#/definitions/io.k8s.api.core.v1.ServiceStatus"ÀMost recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Å +spec¼ +,#/definitions/io.k8s.api.core.v1.ServiceSpec"‹Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúO +x-kubernetes-group-version-kind,*- group: "" + kind: Service + version: v1 + +Ý +)io.k8s.api.extensions.v1beta1.IngressRule¯ "ìIngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.² +objectʱ +J +httpB +@#/definitions/io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue +â + +hostÙ +"Ë +Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the "host" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to + the IP in the Spec of the parent Ingress. +2. The `:` delimiter is not respected because ports are not allowed. + Currently the port of an Ingress is implicitly :80 for http and + :443 for https. +Both these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue. + +Host can be "precise" which is a domain name without the terminating dot of a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name prefixed with a single wildcard label (e.g. "*.foo.com"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.² +string +ƒ +!io.k8s.api.core.v1.EndpointSubsetÝ"³EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given: + { + Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], + Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] + } +The resulting set of endpoints can be viewed as: + a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], + b: [ 10.10.1.1:309, 10.10.2.2:309 ]² +objectʘ +ë + addressesÝ"™IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.EndpointAddress +© +notReadyAddresses“"ÏIP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.EndpointAddress +| +portss"3Port numbers available on the related IP addresses.² +arrayº1 +/ +-#/definitions/io.k8s.api.core.v1.EndpointPort +ì +&io.k8s.api.core.v1.FlockerVolumeSourceÁ"ÃRepresents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.² +objectÊì +ˆ + datasetNamey"lName of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated² +string +_ + datasetUUIDP"CUUID of the dataset. This is unique identifier of a Flocker dataset² +string +Œ +&io.k8s.api.storage.v1.VolumeAttachmentá "—VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node. + +VolumeAttachment objects are non-namespaced.šspec² +objectÊÊ +Ð +metadataà +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¤ +spec› +8#/definitions/io.k8s.api.storage.v1.VolumeAttachmentSpec"_Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system. +Ð +statusÅ +:#/definitions/io.k8s.api.storage.v1.VolumeAttachmentStatus"†Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúd +x-kubernetes-group-version-kindA?- group: storage.k8s.io + kind: VolumeAttachment + version: v1 + + +!io.k8s.api.storage.v1.VolumeErrorê"DVolumeError captures an error encountered during a volume operation.² +objectÊ• +® +message¢"”String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.² +string +b +timeZ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"Time the error was encountered. +í +1io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList·"RAPIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.šgroups² +objectÊô +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +u +groupsk"groups is a list of APIGroup.² +arrayº? += +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúT +x-kubernetes-group-version-kind1/- group: "" + kind: APIGroupList + version: v1 + +‡ +io.k8s.api.core.v1.Endpointsæ"ýEndpoints is a collection of endpoints that implement the actual service. Example: + Name: "mysvc", + Subsets: [ + { + Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], + Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] + }, + { + Addresses: [{"ip": "10.10.3.3"}], + Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] + }, + ]² +objectʃ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +® +subsets¢"ßThe set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.² +arrayº3 +1 +/#/definitions/io.k8s.api.core.v1.EndpointSubsetúQ +x-kubernetes-group-version-kind.,- group: "" + kind: Endpoints + version: v1 + +¢ + io.k8s.api.core.v1.ServiceStatusý"9ServiceStatus represents the current status of a service.² +objectʳ +” + +conditions…"Current service state² +arrayº@ +> +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Conditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap +ú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +™ + loadBalancerˆ +3#/definitions/io.k8s.api.core.v1.LoadBalancerStatus"QLoadBalancer contains the current status of the load-balancer, if one is present. + +!io.k8s.api.core.v1.LimitRangeSpec÷"NLimitRangeSpec defines a min/max usage limit for resources that match on kind.šlimits² +objectÊ +Œ +limits"?Limits is the list of LimitRangeItem objects that are enforced.² +arrayº3 +1 +/#/definitions/io.k8s.api.core.v1.LimitRangeItem +½ +-io.k8s.api.networking.v1beta1.HTTPIngressPath‹ "oHTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.šbackend² +objectÊ +¤ +backend˜ +:#/definitions/io.k8s.api.networking.v1beta1.IngressBackend"ZBackend defines the referenced service endpoint to which the traffic will be forwarded to. +ž +path•"‡Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.² +string +¶ +pathType©"›PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is + done on a path element by element basis. A path element refers is the + list of labels in the path split by the '/' separator. A request is a + match for path p if every p is an element-wise prefix of p of the + request path. Note that if the last element of the path is a substring + of the last element in request path, it is not a match (e.g. /foo/bar + matches /foo/bar/baz, but does not match /foo/barbaz). +* ImplementationSpecific: Interpretation of the Path matching is up to + the IngressClass. Implementations can treat this as a separate PathType + or treat it identically to Prefix or Exact path types. +Implementations are required to support all path types. Defaults to ImplementationSpecific.² +string +ý ++io.k8s.api.policy.v1beta1.PodSecurityPolicyÍ "˜PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21.² +objectʾ +j +specb +=#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicySpec"!spec defines the policy enforced. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúb +x-kubernetes-group-version-kind?=- group: policy + kind: PodSecurityPolicy + version: v1beta1 + +á +&io.k8s.api.scheduling.v1.PriorityClass¶"{PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.švalue² +objectÊ» +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +„ +preemptionPolicyï"áPreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.² +string +¨ +valuežint32"ˆThe value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.² +integer +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +‹ + description|"odescription is an arbitrary string that usually provides guidelines on when this priority class should be used.² +string +¦ + globalDefault”"…globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.² +booleanúd +x-kubernetes-group-version-kindA?- group: scheduling.k8s.io + kind: PriorityClass + version: v1 + +ö +4io.k8s.api.authorization.v1.SubjectRulesReviewStatus½ "çSubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.š resourceRulesšnonResourceRulesš +incomplete² +objectÊ” +Œ +evaluationErrorø"êEvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.² +string +Ý + +incompleteÎ"¿Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.² +boolean +– +nonResourceRules"´NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.² +arrayº= +; +9#/definitions/io.k8s.api.authorization.v1.NonResourceRule +‰ + resourceRules÷"­ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.² +arrayº: +8 +6#/definitions/io.k8s.api.authorization.v1.ResourceRule +‰ +0io.k8s.api.authorization.v1beta1.NonResourceRuleÔ"LNonResourceRule holds information that describes a rule for the non-resourcešverbs² +objectÊï +Ë +nonResourceURLs·"šNonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. "*" means all.² +arrayº + ² +string +ž +verbs”"xVerb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all.² +arrayº + ² +string +­ +:io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpecî"_HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.šscaleTargetRefš maxReplicas² +objectÊß +® +behavior¡ +L#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior"Ðbehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used. +© + maxReplicas™int32"ƒmaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.² +integer + +metricsõ"ªmetrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.² +arrayº; +9 +7#/definitions/io.k8s.api.autoscaling.v2beta2.MetricSpec +ò + minReplicasâint32"ÌminReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.² +integer +† +scaleTargetRefó +H#/definitions/io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference"¦scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count. +» +"io.k8s.api.core.v1.EndpointAddress”" +type6")Type of replication controller condition.² +string +• +lastTransitionTime +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"DThe last time the condition transitioned from one status to another. +Y +messageN"AA human readable message indicating details about the transition.² +string +F +reason<"/The reason for the condition's last transition.² +string +å +'io.k8s.api.core.v1.ConfigMapKeySelector¹"Selects a key from a ConfigMap.škey² +objectʃ +& +key"The key to select.² +string +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +R +optionalF"8Specify whether the ConfigMap or its key must be defined² +boolean +Æ +,io.k8s.api.core.v1.ConfigMapNodeConfigSource•"lConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.š namespacešnameškubeletConfigKey² +objectÊò +¹ +kubeletConfigKey¤"–KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.² +string +p +nameh"[Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.² +string + + namespacer"eNamespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.² +string +± +resourceVersion"ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.² +string +Œ +uid„"wUID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.² +string +à +(io.k8s.api.core.v1.StorageOSVolumeSource³ "2Represents a StorageOS persistent volume resource.² +objectÊð +À +fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² +string +x +readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +Ï + secretRefÁ +5#/definitions/io.k8s.api.core.v1.LocalObjectReference"‡SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. +‹ + +volumeName}"pVolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.² +string +Ð +volumeNamespace¼"®VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.² +string +Û +"io.k8s.api.policy.v1beta1.Eviction´ "ÒEviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.² +objectÊô +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +r + deleteOptionsa +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"DeleteOptions may be provided +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +€ +metadatat +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"3ObjectMeta describes the pod that is being evicted.úY +x-kubernetes-group-version-kind64- group: policy + kind: Eviction + version: v1beta1 + +  ++io.k8s.api.storage.v1beta1.VolumeAttachmentð "—VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node. + +VolumeAttachment objects are non-namespaced.šspec² +objectÊÔ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ð +metadataà +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +© +spec  +=#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentSpec"_Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system. +Õ +statusÊ +?#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentStatus"†Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.úi +x-kubernetes-group-version-kindFD- group: storage.k8s.io + kind: VolumeAttachment + version: v1beta1 + +å +Uio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec‹"PCustomResourceDefinitionSpec describes how a user wants their resource to appearšgroupšnamesšscopešversions² +objectʇ +¥ + +conversion– +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion"3conversion defines conversion settings for the CRD. +å +groupÛ"Ígroup is the API group of the defined custom resource. The custom resources are served under `/apis//...`. Must match the name of the CustomResourceDefinition (in the form `.`).² +string +´ +namesª +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames"Bnames specify the resource and kind names for the custom resource. +© +preserveUnknownFields"€preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details.² +boolean +› +scope‘"ƒscope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`.² +string +ó +versionsæ"ìversions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.² +arrayºj +h +f#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion +Ö +4io.k8s.api.admissionregistration.v1.ServiceReference";ServiceReference holds a reference to Service.legacy.k8s.ioš namespacešname² +objectʾ +@ +name8"+`name` is the name of the service. Required² +string +O + namespaceB"5`namespace` is the namespace of the service. Required² +string +f +path^"Q`path` is an optional URL path which will be sent in any request to this service.² +string +À +port·int32"¡If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).² +integer +¾ +/io.k8s.api.autoscaling.v2beta1.PodsMetricSourceŠ"åPodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.š +metricNameštargetAverageValue² +objectÊñ +K + +metricName="0metricName is the name of the metric in question² +string +Ü +selectorÏ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"Šselector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics. + +targetAverageValue« +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"ltargetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity) +Å +-io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta“"£ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.² +objectÊÞ +ä +continue×"Écontinue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.² +string +ß +remainingItemCountÈint64"²remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.² +integer +¥ +resourceVersion‘"ƒString that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency² +string +ê +selfLinkÝ"ÏselfLink is a URL representing this object. Populated by the system. Read-only. + +DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.² +string +ô +io.k8s.api.core.v1.NodeØ "iNode is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).² +objectÊ + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¿ +spec¶ +)#/definitions/io.k8s.api.core.v1.NodeSpec"ˆSpec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +ø +statusí ++#/definitions/io.k8s.api.core.v1.NodeStatus"½Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúL +x-kubernetes-group-version-kind)'- group: "" + kind: Node + version: v1 + +ý +,io.k8s.api.core.v1.RBDPersistentVolumeSourceÌ "ˆRepresents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.šmonitorsšimage² +objectÊŸ +€ +poolx"kThe rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +string +´ +readOnly§"˜ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +boolean +ô + secretRefæ +0#/definitions/io.k8s.api.core.v1.SecretReference"±SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it +‚ +userz"mThe rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +string +³ +fsType¨"šFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd² +string +r +imagei"\The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +string +« +keyringŸ"‘Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +string +Ž +monitors"eA collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +arrayº + ² +string +ë +%io.k8s.api.events.v1beta1.EventSeriesÁ"qEventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.šcountšlastObservedTime² +objectʤ +n +counteint32"Pcount is the number of occurrences in this series up to the last heartbeat time.² +integer +± +lastObservedTimeœ +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"\lastObservedTime is the time when last Event from the series was seen before last heartbeat. +ù +,io.k8s.api.policy.v1.PodDisruptionBudgetSpecÈ"BPodDisruptionBudgetSpec is a description of a PodDisruptionBudget.² +objectÊõ +Ç + minAvailable¶ +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"ôAn eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%". +µ +selector¨ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"·Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.ú) +x-kubernetes-patch-strategy +replace + +ð +maxUnavailableÝ +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"›An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable". +ê +Sio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources’"YCustomResourceSubresources defines the status and scale subresources for CustomResources.² +objectʨ +é +scaleß +e#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale"vscale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. +¹ +status® +f#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus"Ãstatus indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. + +Fio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceConditionÒ"NAPIServiceCondition describes the state of an APIService at a particular pointštypešstatus² +objectÊã +‘ +lastTransitionTime{ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. +X +messageM"@Human-readable message indicating details about last transition.² +string +^ +reasonT"GUnique, one-word, CamelCase reason for the condition's last transition.² +string +Z +statusP"CStatus is the status of the condition. Can be True, False, Unknown.² +string +7 +type/""Type is the type of the condition.² +string +Ù +io.k8s.api.batch.v1.CronJob¹ ":CronJob represents the configuration of a single cron job.² +objectÊ™ + +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +ö +specí +-#/definitions/io.k8s.api.batch.v1.CronJobSpec"»Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +Ë +statusÀ +/#/definitions/io.k8s.api.batch.v1.CronJobStatus"ŒCurrent status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúR +x-kubernetes-group-version-kind/-- group: batch + kind: CronJob + version: v1 + +¦ ++io.k8s.api.core.v1.ContainerStateTerminatedö">ContainerStateTerminated is a terminated state of a container.šexitCode² +objectÊœ + + startedAtt +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"9Time at which previous execution of the container started +R + containerIDC"6Container's ID in the format 'docker://'² +string +W +exitCodeKint32"6Exit status from the last termination of the container² +integer +t + +finishedAtf +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"+Time at which the container last terminated +O +messageD"7Message regarding the last termination of the container² +string +P +reasonF"9(brief) reason from the last termination of the container² +string +P +signalFint32"1Signal from the last termination of the container² +integer +$ +io.k8s.api.core.v1.PodStatusï#"¶PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.² +objectʧ" +g +hostIP]"PIP address of the host to which the pod is assigned. Empty if not yet scheduled.² +string +Û + startTimeÍ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"‘RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod. +x +podIPo"bIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.² +string +› + +conditionsŒ"xCurrent service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions² +arrayº1 +/ +-#/definitions/io.k8s.api.core.v1.PodConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +¬ +containerStatuses–"ÒThe list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.ContainerStatus +‰ +ephemeralContainerStatusesê"¦Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.ContainerStatus +ü +initContainerStatusesâ"žThe list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.ContainerStatus +k +message`"SA human readable message indicating details about why the pod is in this condition.² +string +Í +nominatedNodeName·"©nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.² +string +Æ +phase¼ "® The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values: + +Pending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod. + +More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase² +string +‡ +podIPsü"ðpodIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.² +arrayº* +( +&#/definitions/io.k8s.api.core.v1.PodIPú% +x-kubernetes-patch-merge-keyip +ú' +x-kubernetes-patch-strategymerge + +ƒ +qosClassö"èThe Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md² +string +v +reasonl"_A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'² +string +Ì +\io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatusë"RCustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition² +objectʈ +ù + acceptedNamesç +i#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames"zacceptedNames are the names that are actually being used to serve discovery. They may be different than the names in spec. +ª + +conditions›"Nconditions indicate state for particular aspects of a CustomResourceDefinition² +arrayºq +o +m#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionConditionú +x-kubernetes-list-typemap +ú' +x-kubernetes-list-map-keys - type + +Ü +storedVersionsÉ"¬storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list.² +arrayº + ² +string +´4 +3io.k8s.api.admissionregistration.v1.MutatingWebhookü3"^MutatingWebhook describes an admission webhook and the resources and operations it applies to.šnameš clientConfigš sideEffectsšadmissionReviewVersions² +objectÊÏ2 +œ +timeoutSeconds‰int32"óTimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.² +integer +– +admissionReviewVersionsú"ÝAdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.² +arrayº + ² +string +¬ + failurePolicyš"ŒFailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.² +string +é +objectSelectorÖ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‘ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything. +é +rulesß"‡Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.² +arrayºH +F +D#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations + +reinvocationPolicyù"ëreinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are "Never" and "IfNeeded". + +Never: the webhook will not be called more than once in a single admission evaluation. + +IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + +Defaults to "Never".² +string +õ + sideEffectså"×SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.² +string +™ + clientConfigˆ +E#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig"?ClientConfig defines how to communicate with the hook. Required +œ + matchPolicyŒ"þmatchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". + +- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + +- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + +Defaults to "Equivalent"² +string +ç +nameÞ"ÐThe name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" is the name of the webhook, and kubernetes.io is the name of the organization. Required.² +string +Ý +namespaceSelectorÇ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‚ NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. + +For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "runlevel", + "operator": "NotIn", + "values": [ + "0", + "1" + ] + } + ] +} + +If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "environment", + "operator": "In", + "values": [ + "prod", + "staging" + ] + } + ] +} + +See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors. + +Default to the empty LabelSelector, which matches everything. +· + io.k8s.api.apps.v1.DaemonSetSpec’ "3DaemonSetSpec is the specification of a daemon set.šselectorštemplate² +objectʸ + +’ +minReadySecondsþint32"èThe minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).² +integer +¿ +revisionHistoryLimit¦int32"The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.² +integer +Á +selector´ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ïA label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +„ +template÷ +0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"ÂAn object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template +“ +updateStrategy€ +8#/definitions/io.k8s.api.apps.v1.DaemonSetUpdateStrategy"DAn update strategy to replace existing DaemonSet pods with new pods. +Ö +io.k8s.api.core.v1.VolumeDevice²"JvolumeDevice describes a mapping of a raw block device within a container.šnameš +devicePath² +objectÊà +l + +devicePath^"QdevicePath is the path inside of the container that the device will be mapped to.² +string +S +nameK">name must match the name of a persistentVolumeClaim in the pod² +string +© +*io.k8s.api.scheduling.v1.PriorityClassListú"6PriorityClassList is a collection of priority classes.šitems² +objectÊÀ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +t +itemsk"$items is the list of PriorityClasses² +arrayº8 +6 +4#/definitions/io.k8s.api.scheduling.v1.PriorityClass +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúh +x-kubernetes-group-version-kindEC- version: v1 + group: scheduling.k8s.io + kind: PriorityClassList + +¶ +*io.k8s.api.storage.v1.VolumeAttachmentList‡ "AVolumeAttachmentList is a collection of VolumeAttachment objects.šitems² +objectÊ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +v +itemsm"&Items is the list of VolumeAttachments² +arrayº8 +6 +4#/definitions/io.k8s.api.storage.v1.VolumeAttachment +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúh +x-kubernetes-group-version-kindEC- group: storage.k8s.io + kind: VolumeAttachmentList + version: v1 + +« +Vio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionÐ "…CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format <.spec.name>.<.spec.group>. Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead.šspec² +objectʸ +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +® +spec¥ +h#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec"9spec describes how the user wants the resources to appear +º +status¯ +j#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatus"Astatus indicates the actual state of the CustomResourceDefinition +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúw +x-kubernetes-group-version-kindTR- kind: CustomResourceDefinition + version: v1beta1 + group: apiextensions.k8s.io + +Ü + +-io.k8s.api.authentication.v1beta1.TokenReviewª +"§TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.šspec² +objectÊü +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +ƒ +spec{ +?#/definitions/io.k8s.api.authentication.v1beta1.TokenReviewSpec"8Spec holds information about the request being evaluated +© +statusž +A#/definitions/io.k8s.api.authentication.v1beta1.TokenReviewStatus"YStatus is filled in by the server and indicates whether the request can be authenticated.úk +x-kubernetes-group-version-kindHF- group: authentication.k8s.io + kind: TokenReview + version: v1beta1 + +ì ++io.k8s.api.autoscaling.v2beta1.MetricStatus¼">MetricStatus describes the last-read state of a single metric.štype² +objectÊæ +ô +externalç +A#/definitions/io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus"¡external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). + +object· +?#/definitions/io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus"tobject refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). +• +podsŒ +=#/definitions/io.k8s.api.autoscaling.v2beta1.PodsMetricStatus"Êpods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. +™ +resourceŒ +A#/definitions/io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus"Æresource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. +¥ +typeœ"Žtype is the type of metric source. It will be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled² +string +Ë +containerResourceµ +J#/definitions/io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus"æcontainer resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. +  +%io.k8s.api.core.v1.ComponentConditionö"/Information about the condition of a component.štypešstatus² +objectʦ +s +messageh"[Message about the condition for a component. For example, information about a health check.² +string +z +statusp"cStatus of the condition for a component. Valid values for "Healthy": "True", "False", or "Unknown".² +string +N +typeF"9Type of condition for a component. Valid value: "Healthy"² +string +c +errorZ"MCondition error code for a component. For example, a health check error code.² +string +ž +(io.k8s.api.core.v1.ProjectedVolumeSourceñ"$Represents a projected volume source² +objectʼ +Î + defaultMode¾int32"¨Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² +integer +i +sources^"list of volume projections² +arrayº5 +3 +1#/definitions/io.k8s.api.core.v1.VolumeProjection + +*io.k8s.api.discovery.v1beta1.EndpointHintsà"KEndpointHints provides hints describing how an endpoint should be consumed.² +objectÊ„ + +forZonesô"ˆforZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries.² +arrayº6 +4 +2#/definitions/io.k8s.api.discovery.v1beta1.ForZoneú# +x-kubernetes-list-type atomic + +Ÿ +3io.k8s.api.flowcontrol.v1beta1.QueuingConfigurationç +"CQueuingConfiguration holds the configuration parameters for queuing² +objectÊ“ + +¢ +handSize•int32"ÿ`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.² +integer +“ +queueLengthLimitþint32"è`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.² +integer +Õ +queuesÊint32"´`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.² +integer +š +&io.k8s.api.storage.v1.StorageClassListï"4StorageClassList is a collection of storage classes.šitems² +objectÊ» +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +o +itemsf"#Items is the list of StorageClasses² +arrayº4 +2 +0#/definitions/io.k8s.api.storage.v1.StorageClassúd +x-kubernetes-group-version-kindA?- group: storage.k8s.io + kind: StorageClassList + version: v1 + +ú +.<.spec.group>.šspec² +objectÊ® +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +© +spec  +c#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec"9spec describes how the user wants the resources to appear +µ +statusª +e#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus"Astatus indicates the actual state of the CustomResourceDefinition +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúr +x-kubernetes-group-version-kindOM- group: apiextensions.k8s.io + kind: CustomResourceDefinition + version: v1 + +” +-io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1â"ÔFieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. + +Each key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set. + +The exact format is defined in sigs.k8s.io/structured-merge-diff² +object +» +1io.k8s.api.authentication.v1beta1.TokenReviewSpec…"ETokenReviewSpec is a description of the token authentication request.² +objectʯ +ó + audienceså"ÈAudiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.² +arrayº + ² +string +7 +token."!Token is the opaque bearer token.² +string +… +6io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerÊ "ÎHorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.² +objectÊú +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +à +metadataÓ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‘metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +„ +specû +H#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec"®spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + +status… +J#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus"7status is the current information about the autoscaler.úm +x-kubernetes-group-version-kindJH- group: autoscaling + kind: HorizontalPodAutoscaler + version: v2beta2 + +È +(io.k8s.api.core.v1.ResourceFieldSelector›"ZResourceFieldSelector represents container resources (cpu, memory) and their output formatšresource² +objectÊ¥ +Y + containerNameH";Container name: required for volumes, optional for env vars² +string + +divisor„ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"ESpecifies the output format of the exposed resources, defaults to "1" +5 +resource)"Required: resource to select² +string +‚ +'io.k8s.api.core.v1.ResourceRequirementsÖ"AResourceRequirements describes the compute resource requirements.² +objectÊ„ +ö +limitsë"›Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +ˆ +requestsû"«Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +¨ +3io.k8s.api.core.v1.TopologySelectorLabelRequirementð"~A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.škeyšvalues² +objectÊÒ +? +key8"+The label key that the selector applies to.² +string +Ž +valuesƒ"gAn array of string values. One value must match the label to be selected. Each entry in Values is ORed.² +arrayº + ² +string +ƒ +2io.k8s.api.extensions.v1beta1.HTTPIngressRuleValueÌ"£HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.špaths² +objectÊ +Œ +paths‚"4A collection of paths that map requests to backends.² +arrayº? += +;#/definitions/io.k8s.api.extensions.v1beta1.HTTPIngressPath +¦ + io.k8s.api.networking.v1.Ingress "€Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.² +objectÊŽ + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ø +specÏ +2#/definitions/io.k8s.api.networking.v1.IngressSpec"˜Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +Þ +statusÓ +4#/definitions/io.k8s.api.networking.v1.IngressStatus"šStatus is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusú^ +x-kubernetes-group-version-kind;9- group: networking.k8s.io + kind: Ingress + version: v1 + +Ö +&io.k8s.api.apps.v1.ReplicaSetCondition«"LReplicaSetCondition describes the state of a replica set at a certain point.štypešstatus² +objectʾ +• +lastTransitionTime +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"DThe last time the condition transitioned from one status to another. +Y +messageN"AA human readable message indicating details about the transition.² +string +F +reason<"/The reason for the condition's last transition.² +string +L +statusB"5Status of the condition, one of True, False, Unknown.² +string +3 +type+"Type of replica set condition.² +string +¼ +(io.k8s.api.authorization.v1.ResourceRule"¬ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.šverbs² +objectÊÉ +¹ + resourceNames§"ŠResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.² +arrayº + ² +string +ä + resourcesÖ"¹Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.² +arrayº + ² +string + +verbs“"wVerb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all.² +arrayº + ² +string +ƒ + apiGroupsõ"ØAPIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. "*" means all.² +arrayº + ² +string +ð +8io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview³ "öSelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means "in all namespaces". Self is a special case, because users should always be able to check whether they can perform an actionšspec² +objectÊ« +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +¯ +spec¦ +J#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewSpec"XSpec holds information about the request being evaluated. user and groups must be empty +¬ +status¡ +H#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or notúv +x-kubernetes-group-version-kindSQ- group: authorization.k8s.io + kind: SelfSubjectAccessReview + version: v1beta1 + + +:io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReferenceÂ"bCrossVersionObjectReference contains enough information to let you identify the referred resource.škindšname² +objectÊÁ +˜ +kind"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"² +string +l +named"WName of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names² +string +6 + +apiVersion("API version of the referent² +string +Ç +0io.k8s.api.core.v1.GCEPersistentDiskVolumeSource’ "ÁRepresents a Persistent Disk resource in Google Compute Engine. + +A GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.špdName² +objectʶ +² +pdName§"™Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk² +string +¾ +readOnly±"¢ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk² +boolean +Á +fsType¶"¨Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk² +string +ù + partitionëint32"ÕThe partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk² +integer +ä +io.k8s.api.core.v1.PodDNSConfigÀ"_PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.² +objectÊÐ +Ä + nameservers´"—A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.² +arrayº + ² +string +µ +options©"âA list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.² +arrayº7 +5 +3#/definitions/io.k8s.api.core.v1.PodDNSConfigOption +Î +searchesÁ"¤A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.² +arrayº + ² +string +ë + +0io.k8s.api.core.v1.ScaleIOPersistentVolumeSource¶ +"DScaleIOPersistentVolumeSource represents a persistent ScaleIO volumešgatewayšsystemš secretRef² +objectÊ +[ + storagePoolL"?The ScaleIO Storage Pool associated with the protection domain.² +string + + +volumeNames"fThe name of a volume already created in the ScaleIO system that is associated with this volume source.² +string +f +protectionDomainR"EThe name of the ScaleIO Protection Domain for the configured storage.² +string +x +readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +` + +sslEnabledR"DFlag to enable/disable SSL communication with Gateway, default false² +boolean +’ + storageMode‚"uIndicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.² +string +O +systemE"8The name of the storage system as configured in ScaleIO.² +string +  +fsType•"‡Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs"² +string +D +gateway9",The host address of the ScaleIO API Gateway.² +string +Ë + secretRef½ +0#/definitions/io.k8s.api.core.v1.SecretReference"ˆSecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. +³ +,io.k8s.api.core.v1.TypedLocalObjectReference‚"~TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.škindšname² +objectÊå +Ú +apiGroupÍ"¿APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.² +string +B +kind:"-Kind is the type of resource being referenced² +string +B +name:"-Name is the name of resource being referenced² +string +Û +9io.k8s.api.admissionregistration.v1beta1.ServiceReference";ServiceReference holds a reference to Service.legacy.k8s.ioš namespacešname² +objectʾ +@ +name8"+`name` is the name of the service. Required² +string +O + namespaceB"5`namespace` is the namespace of the service. Required² +string +f +path^"Q`path` is an optional URL path which will be sent in any request to this service.² +string +À +port·int32"¡If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).² +integer +“ +%io.k8s.api.apps.v1.DeploymentStrategyé"HDeploymentStrategy describes how to replace existing pods with new ones.² +objectÊ +h +type`"SType of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.² +string +£ + rollingUpdate‘ +8#/definitions/io.k8s.api.apps.v1.RollingUpdateDeployment"URolling update config params. Present only if DeploymentStrategyType = RollingUpdate. +“ +*io.k8s.apimachinery.pkg.apis.meta.v1.Patche"XPatch is provided to give a concrete name and type to the Kubernetes PATCH request body.² +object +Ú +io.k8s.api.core.v1.NodeStatus¸"=NodeStatus is information about the current status of a node.² +objectÊê +q +imagesg"%List of container images on this node² +arrayº3 +1 +/#/definitions/io.k8s.api.core.v1.ContainerImage +¯ +nodeInfo¢ +/#/definitions/io.k8s.api.core.v1.NodeSystemInfo"oSet of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info +d + volumesInUseT"8List of attachable volumes in use (mounted) by the node.² +arrayº + ² +string +¥ + +conditions–"€Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition² +arrayº2 +0 +.#/definitions/io.k8s.api.core.v1.NodeConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +t +daemonEndpointsa +4#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints")Endpoints of daemons running on the Node. +â +capacityÕ"…Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacityª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +‘ +config† +1#/definitions/io.k8s.api.core.v1.NodeConfigStatus"QStatus of the config assigned to the node via the dynamic Kubelet config feature. +Í +phaseÃ"µNodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.² +string +ƒ +volumesAttachedp".List of volumes that are attached to the node.² +arrayº3 +1 +/#/definitions/io.k8s.api.core.v1.AttachedVolume +¦ + addresses˜"„List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.² +arrayº0 +. +,#/definitions/io.k8s.api.core.v1.NodeAddressú' +x-kubernetes-patch-strategymerge +ú' +x-kubernetes-patch-merge-keytype + +Æ + allocatable¶"gAllocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.ª? += +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² +object +º +"io.k8s.api.rbac.v1.AggregationRule“"VAggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole² +objectʬ +© +clusterRoleSelectors"¼ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added² +arrayºD +B +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +à +"io.k8s.api.core.v1.SecretReference¹"lSecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace² +objectʼ +V +nameN"AName is unique within a namespace to reference a secret resource.² +string +b + namespaceU"HNamespace defines the space within which the secret name must be unique.² +string +¤ + +Iio.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfigurationListÖ "KMutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.šitems² +objectÊç +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +• +items‹"%List of MutatingWebhookConfiguration.² +arrayºW +U +S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú‡ +x-kubernetes-group-version-kinddb- version: v1beta1 + group: admissionregistration.k8s.io + kind: MutatingWebhookConfigurationList + +Ø +Cio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus"AAPIServiceStatus contains derived information about an API server² +objectʾ +» + +conditions¬"$Current service state of apiService.² +arrayºX +V +T#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceConditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap +ú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +ê +%io.k8s.api.core.v1.PodSecurityContextÀ"ôPodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.² +objectʺ +È +fsGroup¼int64"¦A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: + +1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- + +If unset, the Kubelet will not modify the ownership and permissions of any volume.² +integer +° +fsGroupChangePolicy˜"ŠfsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.² +string +À + runAsUser²int64"œThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.² +integer +… +seLinuxOptionsò +/#/definitions/io.k8s.api.core.v1.SELinuxOptions"¾The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. +Í +windowsOptionsº +>#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions"÷The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +£ + +runAsGroup”int64"þThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.² +integer +º + runAsNonRoot©"šIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² +boolean +~ +seccompProfilel +/#/definitions/io.k8s.api.core.v1.SeccompProfile"9The seccomp options to use by the containers in this pod. +ç +supplementalGroupsÐ"«A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.² +arrayº +int64² +integer +Ñ +sysctlsÅ"ŠSysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.² +arrayº+ +) +'#/definitions/io.k8s.api.core.v1.Sysctl +œ +$io.k8s.api.core.v1.SecretKeySelectoró",SecretKeySelector selects a key of a Secret.škey² +objectÊ° +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +O +optionalC"5Specify whether the Secret or its key must be defined² +boolean +V +keyO"BThe key of the secret to select from. Must be a valid secret key.² +string +‘ +(io.k8s.api.core.v1.SessionAffinityConfigä"HSessionAffinityConfig represents the configurations of session affinity.² +objectÊ‹ +ˆ +clientIP| +/#/definitions/io.k8s.api.core.v1.ClientIPConfig"IclientIP contains the configurations of Client IP based session affinity. +  +)io.k8s.api.storage.v1.VolumeNodeResourcesò"JVolumeNodeResources is a set of resource limits for scheduling of volumes.² +objectÊ— +” +countŠint32"ôMaximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.² +integer +ç +3io.k8s.api.autoscaling.v2beta2.ResourceMetricSource¯"ÊResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. Only one "target" type should be set.šnameštarget² +objectÊà +B +name:"-name is the name of the resource in question.² +string +} +targets +9#/definitions/io.k8s.api.autoscaling.v2beta2.MetricTarget"6target specifies the target value for the given metric +È +&io.k8s.api.batch.v1beta1.CronJobStatus"9CronJobStatus represents the current state of a cron job.² +objectÊÓ +“ +lastSuccessfulTime} +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"BInformation when was the last time the job successfully completed. +¡ +active–"-A list of pointers to currently running jobs.² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.ObjectReferenceú# +x-kubernetes-list-type atomic + +– +lastScheduleTime +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"FInformation when was the last time the job was successfully scheduled. + +-io.k8s.api.core.v1.FlexPersistentVolumeSourceÝ"ƒFlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.šdriver² +objectÊ¿ +O +driverE"8Driver is the name of the driver to use for this volume.² +string +Ä +fsType¹"«Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.² +string +O +optionsD"'Optional: Extra command options if any.ª + ² +string² +object +‚ +readOnlyv"hOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +Î + secretRefÀ +0#/definitions/io.k8s.api.core.v1.SecretReference"‹Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. +„ +io.k8s.api.rbac.v1.RoleListä"!RoleList is a collection of Rolesšitems² +objectÊÀ +f +metadataZ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Y +itemsP"Items is a list of Roles² +arrayº) +' +%#/definitions/io.k8s.api.rbac.v1.Role +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúg +x-kubernetes-group-version-kindDB- group: rbac.authorization.k8s.io + kind: RoleList + version: v1 + + +:io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReferenceÂ"bCrossVersionObjectReference contains enough information to let you identify the referred resource.škindšname² +objectÊÁ +l +named"WName of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names² +string +6 + +apiVersion("API version of the referent² +string +˜ +kind"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"² +string +ƒ +io.k8s.api.core.v1.EndpointPortß"5EndpointPort is a tuple that describes a single port.šport² +objectÊ’ +b +protocolV"IThe IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.² +string +½ + appProtocol­"ŸThe application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.² +string +¬ +name£"•The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.² +string += +port5int32" The port number of the endpoint.² +integer +º + +'.² +string +z +imageq"dThe image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images² +string +¿ + restartCount®int32"˜The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.² +integer +9 +imageID."!ImageID of the container's image.² +string +y + lastStatel +/#/definitions/io.k8s.api.core.v1.ContainerState"9Details about the container's last termination condition. +r +namej"]This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.² +string +V +readyM"?Specifies whether the container has passed its readiness probe.² +boolean +° +started¤"•Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.² +boolean +l +statec +/#/definitions/io.k8s.api.core.v1.ContainerState"0Details about the container's current condition. +© +&io.k8s.api.core.v1.LoadBalancerIngressþ"LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.² +objectÊÜ +{ +hostnameo"bHostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)² +string +{ +ipu"hIP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)² +string +ß +portsÕ"qPorts is a list of records of service ports If used, every port defined in the service should have an entry in it² +arrayº/ +- ++#/definitions/io.k8s.api.core.v1.PortStatusú# +x-kubernetes-list-type atomic + +‚ +*io.k8s.api.core.v1.PreferredSchedulingTermÓ"®An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).šweightš +preference² +objectÊý +‚ + +preferencet +1#/definitions/io.k8s.api.core.v1.NodeSelectorTerm"?A node selector term, associated with the corresponding weight. +v +weightlint32"WWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.² +integer +› +.io.k8s.api.storage.v1beta1.VolumeNodeResourcesè"JVolumeNodeResources is a set of resource limits for scheduling of volumes.² +objectÊ +Š +count€int32"êMaximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.² +integer +•4 +8io.k8s.api.admissionregistration.v1beta1.MutatingWebhookØ3"^MutatingWebhook describes an admission webhook and the resources and operations it applies to.šnameš clientConfig² +objectÊÓ2 +° +admissionReviewVersions”"÷AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.² +arrayº + ² +string +ž + clientConfig +J#/definitions/io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig"?ClientConfig defines how to communicate with the hook. Required +ç +nameÞ"ÐThe name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" is the name of the webhook, and kubernetes.io is the name of the organization. Required.² +string +Ý +namespaceSelectorÇ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‚ NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. + +For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "runlevel", + "operator": "NotIn", + "values": [ + "0", + "1" + ] + } + ] +} + +If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "environment", + "operator": "In", + "values": [ + "prod", + "staging" + ] + } + ] +} + +See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors. + +Default to the empty LabelSelector, which matches everything. +é +objectSelectorÖ +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‘ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything. + +reinvocationPolicyù"ëreinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are "Never" and "IfNeeded". + +Never: the webhook will not be called more than once in a single admission evaluation. + +IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + +Defaults to "Never".² +string +î +rulesä"‡Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.² +arrayºM +K +I#/definitions/io.k8s.api.admissionregistration.v1beta1.RuleWithOperations +œ +timeoutSeconds‰int32"óTimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.² +integer +® + failurePolicyœ"ŽFailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.² +string +— + matchPolicy‡"ùmatchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". + +- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + +- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + +Defaults to "Exact"² +string +Ø + sideEffectsÈ"ºSideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.² +string +… +6io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerÊ "ÎHorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.² +objectÊú +„ +specû +H#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec"®spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + +status… +J#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus"7status is the current information about the autoscaler. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +à +metadataÓ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‘metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúm +x-kubernetes-group-version-kindJH- group: autoscaling + kind: HorizontalPodAutoscaler + version: v2beta1 + +Ó +7io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry— "sManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.² +objectÊ“ + +fieldsV1 +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"QFieldsV1 holds the first JSON version format as described in the "FieldsV1" type. +W +managerL"?Manager is an identifier of the workflow managing these fields.² +string +² + operation¤"–Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.² +string +¦ +time +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"bTime is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' +– + +apiVersion‡"ùAPIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.² +string +  + +fieldsType‘"ƒFieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"² +string +¨A +/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMetaô@"lObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.² +objectÊ÷? +¢ + namespace”"†Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + +Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces² +string +³ +uid«"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. + +Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids² +string +À + annotations°"’Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotationsª + ² +string² +object + + clusterName"ÿThe name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.² +string +î +creationTimestampØ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"œCreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + +Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Þ + +finalizersÏ"ˆMust be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.² +arrayº + ² +stringú' +x-kubernetes-patch-strategymerge + +¶ + generateName¥"—GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + +If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). + +Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency² +string +‚ +labels÷"ÙMap of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labelsª + ² +string² +object +ê +selfLinkÝ"ÏSelfLink is a URL representing this object. Populated by the system. Read-only. + +DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.² +string +ð +deletionGracePeriodSecondsÑint64"»Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.² +integer +¤ +deletionTimestampŽ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"Ò +DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. + +Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +’ + +generationƒint64"nA sequence number representing a specific generation of the desired state. Populated by the system. Read-only.² +integer +ø +ownerReferencesä"¼List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.² +arrayºE +C +A#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReferenceú' +x-kubernetes-patch-strategymerge +ú& +x-kubernetes-patch-merge-keyuid + +ƒ + managedFieldsñ"˜ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object.² +arrayºI +G +E#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry +õ +nameì"ÞName must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names² +string +‘ +resourceVersioný"ïAn opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. + +Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency² +string +Ê +#io.k8s.api.apps.v1.ReplicaSetStatus¢"?ReplicaSetStatus represents the current status of a ReplicaSet.šreplicas² +objectÊÇ +Ø +replicasËint32"µReplicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller² +integer +… +availableReplicaspint32"[The number of available replicas (ready for at least minReadySeconds) for this replica set.² +integer +ø + +conditionsé"NRepresents the latest available observations of a replica set's current state.² +arrayº8 +6 +4#/definitions/io.k8s.api.apps.v1.ReplicaSetConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +‹ +fullyLabeledReplicassint32"^The number of pods that have labels matching the labels of the pod template of the replicaset.² +integer + +observedGenerationiint64"TObservedGeneration reflects the generation of the most recently observed ReplicaSet.² +integer +X + readyReplicasGint32"2The number of ready replicas for this replica set.² +integer +Œ +#io.k8s.api.core.v1.NodeSelectorTermä"¥A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.² +objectÊ­ +– +matchExpressions"6A list of node selector requirements by node's labels.² +arrayº< +: +8#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement +‘ + matchFields"6A list of node selector requirements by node's fields.² +arrayº< +: +8#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement +å +.io.k8s.api.rbac.v1beta1.ClusterRoleBindingList² "ºClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.22.šitems² +objectÊá +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +z +itemsq"&Items is a list of ClusterRoleBindings² +arrayº< +: +8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +f +metadataZ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.úz +x-kubernetes-group-version-kindWU- group: rbac.authorization.k8s.io + kind: ClusterRoleBindingList + version: v1beta1 + +½ +Wio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatusá"RCustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition² +objectÊþ +¥ + +conditions–"Nconditions indicate state for particular aspects of a CustomResourceDefinition² +arrayºl +j +h#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionConditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap + +Ü +storedVersionsÉ"¬storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list.² +arrayº + ² +string +ô + acceptedNamesâ +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames"zacceptedNames are the names that are actually being used to serve discovery. They may be different than the names in spec. +• + +Dio.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationListÌ "KMutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.šitems² +objectÊâ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string + +items†"%List of MutatingWebhookConfiguration.² +arrayºR +P +N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú‚ +x-kubernetes-group-version-kind_]- group: admissionregistration.k8s.io + kind: MutatingWebhookConfigurationList + version: v1 + +¶ + +!io.k8s.api.apps.v1.ReplicaSetSpec +"4ReplicaSetSpec is the specification of a ReplicaSet.šselector² +objectÊÀ +ƒ +minReadySecondsïint32"ÙMinimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)² +integer +¡ +replicas”int32"þReplicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller² +integer +ý +selectorð +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"«Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +“ +template† +0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"ÑTemplate is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template +Ù +%io.k8s.api.core.v1.LoadBalancerStatus¯"/? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.špaths² +objectÊ +Œ +paths‚"4A collection of paths that map requests to backends.² +arrayº? += +;#/definitions/io.k8s.api.networking.v1beta1.HTTPIngressPath + + +\io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition¼ "KCustomResourceColumnDefinition specifies a column for server side printing.šnameštypešJSONPath² +objectÊÇ +ª +JSONPath"JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.² +string +W + descriptionH";description is a human readable description of this column.² +string +¸ +format­"Ÿformat is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.² +string +B +name:"-name is a human readable name for the column.² +string + +priority€int32"êpriority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.² +integer +¯ +type¦"˜type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.² +string +í +io.k8s.api.apps.v1.ReplicaSetË "YReplicaSet ensures that a specified number of pod replicas are running at any given time.² +objectÊŠ +õ +specì +/#/definitions/io.k8s.api.apps.v1.ReplicaSetSpec"¸Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +Ç +status¼ +1#/definitions/io.k8s.api.apps.v1.ReplicaSetStatus"†Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +È +metadata» +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ùIf the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúT +x-kubernetes-group-version-kind1/- group: apps + kind: ReplicaSet + version: v1 + +ó +io.k8s.api.events.v1.EventListÐ"%EventList is a list of Event objects.šitems² +objectʳ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +f +items]""items is a list of schema objects.² +arrayº, +* +(#/definitions/io.k8s.api.events.v1.Event +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataú\ +x-kubernetes-group-version-kind97- group: events.k8s.io + kind: EventList + version: v1 + +ª + io.k8s.api.batch.v1.JobCondition…".JobCondition describes current state of a job.štypešstatus² +objectʶ +? +type7"*Type of job condition, Complete or Failed.² +string +p + lastProbeTime_ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"$Last time the condition was checked. +Œ +lastTransitionTimev +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time";Last time the condition transit from one status to another. +X +messageM"@Human readable message indicating details about last transition.² +string +J +reason@"3(brief) reason for the condition's last transition.² +string +L +statusB"5Status of the condition, one of True, False, Unknown.² +string +à +io.k8s.api.discovery.v1.ForZoneŸ"LForZone provides information about which zones should consume this endpoint.šname² +objectÊ< +: +name2"%name represents the name of the zone.² +string +Ó +*io.k8s.api.networking.v1.NetworkPolicySpec¤"?NetworkPolicySpec provides the specification of a NetworkPolicyš podSelector² +objectÊÆ +Ï +egressÄ"òList of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8² +arrayºB +@ +>#/definitions/io.k8s.api.networking.v1.NetworkPolicyEgressRule +ã +ingress×"„List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)² +arrayºC +A +?#/definitions/io.k8s.api.networking.v1.NetworkPolicyIngressRule +Û + podSelectorË +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"†Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace. +­ + policyTypes"€List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8² +arrayº + ² +string +…" +Zio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec¦!"PCustomResourceDefinitionSpec describes how a user wants their resource to appearšgroupšnamesšscope² +objectÊ­ +Ñ +preserveUnknownFields·"¨preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. If false, schemas must be defined for all versions. Defaults to true in v1beta for backwards compatibility. Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details.² +boolean +´ +scopeª"œscope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`.² +string +à + subresources² +f#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresources"Çsubresources specify what subresources the defined custom resource has. If present, this field configures subresources for all versions. Top-level and per-version subresources are mutually exclusive. +ª + +conversion› +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceConversion"3conversion defines conversion settings for the CRD. +å +groupÛ"Ígroup is the API group of the defined custom resource. The custom resources are served under `/apis//...`. Must match the name of the CustomResourceDefinition (in the form `.`).² +string +¹ +names¯ +i#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames"Bnames specify the resource and kind names for the custom resource. + +versions"‘versions is the list of all API versions of the defined custom resource. Optional if `version` is specified. The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.² +arrayºo +m +k#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion +Å +additionalPrinterColumns¨"ªadditionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If present, this field configures columns for all versions. Top-level and per-version columns are mutually exclusive. If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used.² +arrayºn +l +j#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition +Î + +validation¿ +d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation"Övalidation describes the schema used for validation and pruning of the custom resource. If present, this validation schema is used to validate all versions. Top-level and per-version schemas are mutually exclusive. +Ï +versionÃ"µversion is the API version of the defined custom resource. The custom resources are served under `/apis///...`. Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. Optional if `versions` is specified. Deprecated: use `versions` instead.² +string +Õ +5io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList›"*list of horizontal pod autoscaler objects.šitems² +objectÊé +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +… +items|"*list of horizontal pod autoscaler objects.² +arrayºC +A +?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +b +metadataV +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úl +x-kubernetes-group-version-kindIG- group: autoscaling + kind: HorizontalPodAutoscalerList + version: v1 + +é +The certificate request itself and any additional information. + +statusw +M#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus"&Derived information about the request.úw +x-kubernetes-group-version-kindTR- group: certificates.k8s.io + kind: CertificateSigningRequest + version: v1beta1 + +þ +io.k8s.api.core.v1.Bindingß "¡Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.štarget² +objectÊÑ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +} +targets +0#/definitions/io.k8s.api.core.v1.ObjectReference"?The target object that you want to bind to the standard object.úO +x-kubernetes-group-version-kind,*- group: "" + kind: Binding + version: v1 + +¢ +'io.k8s.api.core.v1.EmptyDirVolumeSourceö"uRepresents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.² +objectÊð +… +mediumú"ìWhat type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir² +string +å + sizeLimit× +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"—Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir +à +0io.k8s.api.core.v1.PersistentVolumeClaimTemplateŽ"sPersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.šspec² +objectʃ +è +metadataÛ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"™May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. +• +specŒ +:#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec"ÍThe specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. +õ +0io.k8s.api.core.v1.ServiceAccountTokenProjectionÀ"ìServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).špath² +objectÊ» +’ +audience…"÷Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.² +string +¹ +expirationSeconds£int64"ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.² +integer +h +path`"SPath is the path relative to the mount point of the file to project the token into.² +string +× +3io.k8s.api.autoscaling.v2beta2.ExternalMetricStatusŸ"nExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.šmetricšcurrent² +objectÊ +„ +currenty +>#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric +ƒ +metricy +=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector + +/io.k8s.api.autoscaling.v2beta2.PodsMetricSourceŽ"åPodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.šmetricštarget² +objectÊ… +ƒ +metricy +=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector +} +targets +9#/definitions/io.k8s.api.autoscaling.v2beta2.MetricTarget"6target specifies the target value for the given metric +¢9 +io.k8s.api.core.v1.Volume„9"[Volume represents a named volume in a pod that may be accessed by any container in the pod.šname² +objectÊ‘8 + +quobyteƒ +4#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource"KQuobyte represents a Quobyte mount on the host that shares a pod's lifetime +Ÿ + azureFile‘ +6#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource"WAzureFile represents an Azure File Service mount on the host and bind mount to the pod. +Ð +cinderÅ +3#/definitions/io.k8s.api.core.v1.CinderVolumeSource"Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md +ˆ + configMap{ +6#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource"AConfigMap represents a configMap that should populate this volume +Ä +nfs¼ +0#/definitions/io.k8s.api.core.v1.NFSVolumeSource"‡NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs +Î +photonPersistentDiskµ +A#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"pPhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine +» + ephemeral­ +6#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource"òEphemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. + +Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, +c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + +Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. + +Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. + +A pod can use both types of ephemeral volumes and persistent volumes at the same time. + +This is a beta feature and only available when the GenericEphemeralVolume feature gate is enabled. +Ø + glusterfsÊ +6#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource"Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md +É +rbdÁ +0#/definitions/io.k8s.api.core.v1.RBDVolumeSource"ŒRBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md +œ + azureDiskŽ +6#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource"TAzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +ž + downwardAPIŽ +8#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource"RDownwardAPI represents downward API about the pod that should populate this volume +É +gitRepo½ +4#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource"„GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. +œ +scaleIO +4#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource"XScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. +” +hostPath‡ +5#/definitions/io.k8s.api.core.v1.HostPathVolumeSource"ÍHostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath +® +name¥"—Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +¬ +persistentVolumeClaim’ +B#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource"ËPersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims +¤ +gcePersistentDiskŽ +>#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"ËGCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk +í +iscsiã +2#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource"¬ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md +Å +secretº +3#/definitions/io.k8s.api.core.v1.SecretVolumeSource"‚Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret +¬ + +flexVolume +1#/definitions/io.k8s.api.core.v1.FlexVolumeSource"hFlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. +± +awsElasticBlockStore˜ +A#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"ÒAWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore +º +csi² +0#/definitions/io.k8s.api.core.v1.CSIVolumeSource"~CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). +‹ + projected~ +6#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource"DItems for all in one resources secrets, configmaps, and downward API +¬ + vsphereVolumeš +?#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"WVsphereVolume represents a vSphere volume attached and mounted on kubelets host machine +™ + storageos‹ +6#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource"QStorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. +Œ +cephfs +3#/definitions/io.k8s.api.core.v1.CephFSVolumeSource"JCephFS represents a Ceph FS mount on the host that shares a pod's lifetime +Ö +emptyDirÉ +5#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource"EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir +ª +fc£ +/#/definitions/io.k8s.api.core.v1.FCVolumeSource"pFC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. +È +flocker¼ +4#/definitions/io.k8s.api.core.v1.FlockerVolumeSource"ƒFlocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running +¥ +portworxVolume’ +5#/definitions/io.k8s.api.core.v1.PortworxVolumeSource"YPortworxVolume represents a portworx volume attached and mounted on kubelets host machine +ó +"io.k8s.api.apps.v1.DaemonSetStatusÌ">DaemonSetStatus represents the current status of a daemon set.šcurrentNumberScheduledšnumberMisscheduledšdesiredNumberScheduledš numberReady² +objectʨ +˜ + numberReadyˆint32"sThe number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.² +integer +Å +numberUnavailable¯int32"™The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)² +integer +õ + +conditionsæ"LRepresents the latest available observations of a DaemonSet's current state.² +arrayº7 +5 +3#/definitions/io.k8s.api.apps.v1.DaemonSetConditionú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge + +ã +currentNumberScheduledÈint32"²The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/² +integer +Ê +numberAvailable¶int32" The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)² +integer +Ý +numberMisscheduledÆint32"°The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/² +integer +ä +collisionCountÑint32"»Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.² +integer +ô +desiredNumberScheduledÙint32"ÃThe total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/² +integer +l +observedGenerationVint64"AThe most recent generation observed by the daemon set controller.² +integer +l +updatedNumberScheduledRint32"=The total number of nodes that are running updated daemon pod² +integer +Ç +#io.k8s.api.autoscaling.v1.ScaleSpecŸ":ScaleSpec describes the attributes of a scale subresource.² +objectÊU +S +replicasGint32"2desired number of instances for the scaled object.² +integer +ê + +(io.k8s.api.policy.v1.PodDisruptionBudget½ +"hPodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods² +objectÊâ +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +‡ +spec +:#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec"ASpecification of the desired behavior of the PodDisruptionBudget. +ƒ +statusy +<#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetStatus"9Most recently observed status of the PodDisruptionBudget. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringú_ +x-kubernetes-group-version-kind<:- kind: PodDisruptionBudget + version: v1 + group: policy + +¯ +*io.k8s.api.apps.v1.RollingUpdateDeployment€ "7Spec to control the desired behavior of rolling update.² +objectʸ +å +maxSurgeØ +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"–The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods. +Í +maxUnavailableº +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"øThe maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods. +ø +3io.k8s.api.autoscaling.v2beta1.ResourceMetricSourceÀ"ÊResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. Only one "target" type should be set.šname² +objectÊÝ +B +name:"-name is the name of the resource in question.² +string +ð +targetAverageUtilizationÓint32"½targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.² +integer +£ +targetAverageValueŒ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"ÌtargetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type. +´ +io.k8s.api.rbac.v1.RoleBinding‘ "·RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.šroleRef² +objectÊÑ +à +roleRefÔ +(#/definitions/io.k8s.api.rbac.v1.RoleRef"§RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. +„ +subjectsx"=Subjects holds references to the objects the role applies to.² +arrayº, +* +(#/definitions/io.k8s.api.rbac.v1.Subject +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +h +metadata\ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata.új +x-kubernetes-group-version-kindGE- group: rbac.authorization.k8s.io + kind: RoleBinding + version: v1 + +° + +Kio.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationListà "OValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.šitems² +objectÊë +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +™ +items"'List of ValidatingWebhookConfiguration.² +arrayºY +W +U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú‰ +x-kubernetes-group-version-kindfd- group: admissionregistration.k8s.io + kind: ValidatingWebhookConfigurationList + version: v1beta1 + +„ +:io.k8s.api.certificates.v1.CertificateSigningRequestStatusÅ"ŽCertificateSigningRequestStatus contains conditions used to indicate approved/denied/failed status of the request, and the issued certificate.² +objectÊ¥ +Š + certificateú +byte"À +certificate is populated with an issued certificate by the signer after an Approved condition is present. This field is set via the /status subresource. Once populated, this field is immutable. + +If the certificate signing request is denied, a condition of type "Denied" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type "Failed" is added and this field remains empty. + +Validation requirements: + 1. certificate must contain one or more PEM blocks. + 2. All PEM blocks must have the "CERTIFICATE" label, contain no headers, and the encoded data + must be a BER-encoded ASN.1 Certificate structure as described in section 4 of RFC5280. + 3. Non-PEM content may appear before or after the "CERTIFICATE" PEM blocks and is unvalidated, + to allow for explanatory text as described in section 5.2 of RFC7468. + +If more than one PEM block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. + +The certificate is encoded in PEM format. + +When serialized as JSON or YAML, the data is additionally base64-encoded, so it consists of: + + base64( + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + )² +stringú# +x-kubernetes-list-type atomic + +• + +conditions†"[conditions applied to the request. Known conditions are "Approved", "Denied", and "Failed".² +arrayºO +M +K#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestConditionú' +x-kubernetes-list-map-keys - type +ú +x-kubernetes-list-typemap + + ++io.k8s.api.networking.v1beta1.IngressStatusÑ"8IngressStatus describe the current state of the Ingress.² +objectʈ +… + loadBalanceru +3#/definitions/io.k8s.api.core.v1.LoadBalancerStatus">LoadBalancer contains the current status of the load-balancer. +š +(io.k8s.api.storage.v1beta1.CSIDriverListí"3CSIDriverList is a collection of CSIDriver objects.šitems² +objectʸ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +l +itemsc"items is the list of CSIDriver² +arrayº6 +4 +2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúf +x-kubernetes-group-version-kindCA- version: v1beta1 + group: storage.k8s.io + kind: CSIDriverList + +ö + +Vio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames› +"XCustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinitionšpluralškind² +objectÊ¢ +q +listKinde"XlistKind is the serialized kind of the list for this resource. Defaults to "`kind`List".² +string + +plural‚"ôplural is the plural name of the resource to serve. The custom resources are served under `/apis///.../`. Must match the name of the CustomResourceDefinition (in the form `.`). Must be all lowercase.² +string +á + +shortNamesÒ"µshortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get `. It must be all lowercase.² +arrayº + ² +string +€ +singulart"gsingular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.² +string +ô + +categorieså"Ècategories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.² +arrayº + ² +string +¾ +kindµ"§kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.² +string +Ù +.io.k8s.apimachinery.pkg.apis.meta.v1.Condition¦ "TCondition contains details for one aspect of the current state of this API Resource.štypešstatusšlastTransitionTimešreasonšmessage² +objectʉ + +L +statusB"5status of the condition, one of True, False, Unknown.² +string +T +typeL"?type of condition in CamelCase or in foo.example.com/CamelCase.² +string +¸ +lastTransitionTime¡ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"ålastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + +messagev"imessage is a human readable message indicating details about the transition. This may be an empty string.² +string +È +observedGeneration±int64"›observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.² +integer +Ø +reasonÍ"¿reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.² +string + +/io.k8s.apimachinery.pkg.util.intstr.IntOrStringŽ int-or-string"ñIntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.² +string +ž +io.k8s.api.apps.v1.StatefulSetû +"ŸStatefulSet represents a set of pods with consistent identities. Identities are defined as: + - Network: A single stable DNS and hostname. + - Storage: As many VolumeClaims as requested. +The StatefulSet guarantees that a given network identity will always map to the same storage identity.² +objectÊò +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +t +specl +0#/definitions/io.k8s.api.apps.v1.StatefulSetSpec"8Spec defines the desired identities of pods in this set. +¯ +status¤ +2#/definitions/io.k8s.api.apps.v1.StatefulSetStatus"nStatus is the current status of Pods in this StatefulSet. This data may be out of date by some window of time. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúU +x-kubernetes-group-version-kind20- group: apps + kind: StatefulSet + version: v1 + +¿ +"io.k8s.api.core.v1.SecretEnvSource˜"¹SecretEnvSource selects a Secret to populate the environment variables with. + +The contents of the target Secret's Data field will represent the key-value pairs as environment variables.² +objectÊÍ +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +D +optional8"*Specify whether the Secret must be defined² +boolean +ë +io.k8s.api.core.v1.EventSourceÈ".EventSource contains information for an event.² +objectʉ +F + component9",Component from which the event is generated.² +string +? +host7"*Node name on which the event is generated.² +string +‹ +&io.k8s.api.flowcontrol.v1beta1.Subjectà"´Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.škind² +objectÊ +D +group; +9#/definitions/io.k8s.api.flowcontrol.v1beta1.GroupSubject + +kind"Required² +string +V +serviceAccountD +B#/definitions/io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject +B +user: +8#/definitions/io.k8s.api.flowcontrol.v1beta1.UserSubjectúŽ +x-kubernetes-unionswu- fields-to-discriminateBy: + group: Group + serviceAccount: ServiceAccount + user: User + discriminator: kind + +Ž + +io.k8s.api.core.v1.NodeSpecî ">NodeSpec describes the attributes that a node is created with.² +objectÊŸ +Ï + unschedulable½"®Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration² +boolean +Ö + configSourceÅ +1#/definitions/io.k8s.api.core.v1.NodeConfigSource"If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field +† + +externalIDx"kDeprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966² +string +Q +podCIDRF"9PodCIDR represents the pod IP range assigned to the node.² +string +¬ +podCIDRsŸ"ØpodCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.² +arrayº + ² +stringú' +x-kubernetes-patch-strategymerge + + + +providerIDs"fID of the node assigned by the cloud provider in the format: ://² +string +c +taintsY" If specified, the node's taints.² +arrayº* +( +&#/definitions/io.k8s.api.core.v1.Taint +ó +'io.k8s.api.core.v1.TopologySelectorTermÇ"‘A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.² +objectʤ +¡ +matchLabelExpressions‡"3A list of topology selector requirements by labels.² +arrayºE +C +A#/definitions/io.k8s.api.core.v1.TopologySelectorLabelRequirement +ð +6io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjectsµ "•PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.šsubjects² +objectʃ +Ù + resourceRulesÇ"Î`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.² +arrayºC +A +?#/definitions/io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRuleú# +x-kubernetes-list-type atomic + + +subjectsô"†subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.² +arrayº8 +6 +4#/definitions/io.k8s.api.flowcontrol.v1beta1.Subjectú# +x-kubernetes-list-type atomic + +  +nonResourceRules‹"`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.² +arrayºF +D +B#/definitions/io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRuleú# +x-kubernetes-list-type atomic + +· +/io.k8s.api.policy.v1beta1.PodSecurityPolicyListƒ "=PodSecurityPolicyList is a list of PodSecurityPolicy objects.šitems² +objectÊÄ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +w +itemsn""items is a list of schema objects.² +arrayº= +; +9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ë +metadata¾ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúf +x-kubernetes-group-version-kindCA- version: v1beta1 + group: policy + kind: PodSecurityPolicyList + +å. +/io.k8s.api.policy.v1beta1.PodSecurityPolicySpec±."2PodSecurityPolicySpec defines the policy enforced.šseLinuxš runAsUseršsupplementalGroupsšfsGroup² +objectʹ- +¤ +allowPrivilegeEscalation‡"yallowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.² +boolean +· +allowedFlexVolumes "ÓallowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "volumes" field.² +arrayº= +; +9#/definitions/io.k8s.api.policy.v1beta1.AllowedFlexVolume +¿ +allowedHostPathsª"`allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.² +arrayº; +9 +7#/definitions/io.k8s.api.policy.v1beta1.AllowedHostPath +’ +defaultAddCapabilities÷"ÚdefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.² +arrayº + ² +string +· +defaultAllowPrivilegeEscalation“"„defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.² +boolean + +forbiddenSysctlsø"ÛforbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in "*" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + +Examples: e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.² +arrayº + ² +string +p + hostNetworka"ShostNetwork determines if the policy allows the use of HostNetwork in the pod spec.² +boolean +_ + +privilegedQ"Cprivileged determines if a pod can request to be run as privileged.² +boolean +ö +readOnlyRootFilesystemÛ"ÌreadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.² +boolean +Í + runtimeClass¼ +C#/definitions/io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions"ôruntimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled. +¥ +volumes™"}volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.² +arrayº + ² +string +² +allowedCapabilitiesš"ýallowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.² +arrayº + ² +string +î +allowedUnsafeSysctlsÕ"¸allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in "*" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. + +Examples: e.g. "foo/*" allows "foo/bar", "foo/baz", etc. e.g. "foo.*" allows "foo.bar", "foo.baz", etc.² +arrayº + ² +string +¥ +fsGroup™ +>#/definitions/io.k8s.api.policy.v1beta1.FSGroupStrategyOptions"WfsGroup is the strategy that will dictate what fs group is used by the SecurityContext. +d +hostPIDY"KhostPID determines if the policy allows the use of HostPID in the pod spec.² +boolean +œ + hostPortsŽ"FhostPorts determines which host port ranges are allowed to be exposed.² +arrayº9 +7 +5#/definitions/io.k8s.api.policy.v1beta1.HostPortRange +­ + runAsUserŸ +@#/definitions/io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions"[runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + +seLinux‘ +>#/definitions/io.k8s.api.policy.v1beta1.SELinuxStrategyOptions"OseLinux is the strategy that will dictate the allowable labels that may be set. +Ò +supplementalGroups» +I#/definitions/io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions"nsupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. +” +allowedCSIDriversþ"²AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate.² +arrayº< +: +8#/definitions/io.k8s.api.policy.v1beta1.AllowedCSIDriver +´ + +runAsGroup¥ +A#/definitions/io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions"ßRunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled. +ú +allowedProcMountTypesà"ÃAllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.² +arrayº + ² +string +d +hostIPCY"KhostIPC determines if the policy allows the use of HostIPC in the pod spec.² +boolean +Ç +requiredDropCapabilitiesª"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.² +arrayº + ² +string +â +Hio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceStatus•"AAPIServiceStatus contains derived information about an API server² +objectÊà +À + +conditions±"$Current service state of apiService.² +arrayº] +[ +Y#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceConditionú +x-kubernetes-list-typemap +ú' +x-kubernetes-patch-merge-keytype +ú' +x-kubernetes-patch-strategymerge +ú' +x-kubernetes-list-map-keys - type + +ÿ +%io.k8s.api.core.v1.CephFSVolumeSourceÕ "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.šmonitors² +objectÊ« +˜ +user"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² +string +¦ +monitors™"}Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² +arrayº + ² +string +e +path]"POptional: Used as the mounted root, rather than the full Ceph tree, default is /² +string +Î +readOnlyÁ"²Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² +boolean +¾ + +secretFile¯"¡Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² +string +ë + secretRefÝ +5#/definitions/io.k8s.api.core.v1.LocalObjectReference"£Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it +ú +(io.k8s.api.core.v1.ConfigMapVolumeSourceÍ "ÅAdapts a ConfigMap into a volume. + +The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.² +objectÊö +ê + defaultModeÚint32"ÄOptional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² +integer +ª +items "âIf unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.KeyToPath +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +S +optionalG"9Specify whether the ConfigMap or its keys must be defined² +boolean += +io.k8s.api.core.v1.Containerî<"AA single application container that you want to run within a pod.šname² +objectÊ•< +‚ + livenessProbeð +&#/definitions/io.k8s.api.core.v1.Probe"ÅPeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +¡ +readinessProbeŽ +&#/definitions/io.k8s.api.core.v1.Probe"ãPeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +â + resourcesÔ +5#/definitions/io.k8s.api.core.v1.ResourceRequirements"šCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + +securityContextü +0#/definitions/io.k8s.api.core.v1.SecurityContext"ÇSecurity options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +Ô + startupProbeà +&#/definitions/io.k8s.api.core.v1.Probe"˜StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +Å +terminationMessagePathª"œOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.² +string +ñ + volumeMountsà"HPod volumes to mount into the container's filesystem. Cannot be updated.² +arrayº0 +. +,#/definitions/io.k8s.api.core.v1.VolumeMountú, +x-kubernetes-patch-merge-key  +mountPath +ú' +x-kubernetes-patch-strategymerge + +Ö +envFromÊ"ˆList of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.² +arrayº2 +0 +.#/definitions/io.k8s.api.core.v1.EnvFromSource +Ï +stdinÅ"¶Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.² +boolean + +ttyz"lWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.² +boolean +ó + volumeDevicesá"GvolumeDevices is the list of block devices to be used by the container.² +arrayº1 +/ +-#/definitions/io.k8s.api.core.v1.VolumeDeviceú- +x-kubernetes-patch-merge-key  devicePath +ú' +x-kubernetes-patch-strategymerge + +À + +workingDir±"£Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.² +string + +imageƒ"õDocker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.² +string +ß +env×"IList of environment variables to set in the container. Cannot be updated.² +arrayº+ +) +'#/definitions/io.k8s.api.core.v1.EnvVarú' +x-kubernetes-patch-merge-keyname +ú' +x-kubernetes-patch-strategymerge + +¨ + lifecycleš +*#/definitions/io.k8s.api.core.v1.Lifecycle"lActions that the management system should take in response to container lifecycle events. Cannot be updated. +• +nameŒ"Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.² +string +û +commandï"ÒEntrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell² +arrayº + ² +string +‡ +imagePullPolicyó"åImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images² +string +” +portsŠ"ŠList of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.² +arrayº2 +0 +.#/definitions/io.k8s.api.core.v1.ContainerPortú; +x-kubernetes-list-map-keys- containerPort +- protocol +ú +x-kubernetes-list-typemap +ú0 +x-kubernetes-patch-merge-keycontainerPort +ú' +x-kubernetes-patch-strategymerge + +× + stdinOnceÉ"ºWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false² +boolean +æ +terminationMessagePolicyÉ"»Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.² +string +ß +argsÖ"¹Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell² +arrayº + ² +string +Ê + io.k8s.api.core.v1.NamespaceSpec¥"6NamespaceSpec describes the attributes on a Namespace.² +objectÊÞ +Û + +finalizersÌ"¯Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/² +arrayº + ² +string +Ì +%io.k8s.api.networking.v1.IngressClass¢ "óIngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.² +objectÊ· +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +â +specÙ +7#/definitions/io.k8s.api.networking.v1.IngressClassSpec"Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúc +x-kubernetes-group-version-kind@>- version: v1 + group: networking.k8s.io + kind: IngressClass + +Ò +io.k8s.api.rbac.v1beta1.Role± "ÒRole is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.22.² +objectÊâ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +h +metadata\ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. +y +rulesp"-Rules holds all the PolicyRules for this Role² +arrayº4 +2 +0#/definitions/io.k8s.api.rbac.v1beta1.PolicyRuleúh +x-kubernetes-group-version-kindEC- kind: Role + version: v1beta1 + group: rbac.authorization.k8s.io + +Ï +$io.k8s.api.batch.v1beta1.CronJobSpec¦ "YCronJobSpec describes how the job execution will look like and when it will actually run.šscheduleš jobTemplate² +objectÊ£ + +¿ +successfulJobsHistoryLimit int32"ŠThe number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.² +integer +  +suspend”"…This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.² +boolean +Ì +concurrencyPolicy¶"¨Specifies how to treat concurrent executions of a Job. Valid values are: - "Allow" (default): allows CronJobs to run concurrently; - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - "Replace": cancels currently running job and replaces it with a new one² +string +· +failedJobsHistoryLimitœint32"†The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.² +integer +‰ + jobTemplatez +6#/definitions/io.k8s.api.batch.v1beta1.JobTemplateSpec"@Specifies the job that will be created when executing a CronJob. +] +scheduleQ"DThe schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.² +string +Ç +startingDeadlineSeconds«int64"•Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.² +integer +Î +%io.k8s.api.core.v1.ConfigMapEnvSource¤"ÂConfigMapEnvSource selects a ConfigMap to populate the environment variables with. + +The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.² +objectÊÐ +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +G +optional;"-Specify whether the ConfigMap must be defined² +boolean +Ì +io.k8s.api.core.v1.VolumeMount©"@VolumeMount describes a mounting of a Volume within a container.šnameš mountPath² +objectÊÅ +: +name2"%This must match the Name of a Volume.² +string +t +readOnlyh"ZMounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.² +boolean +ƒ +subPathx"kPath within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).² +string +¿ + subPathExpr¯"¡Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.² +string +q + mountPathd"WPath within the container at which the volume should be mounted. Must not contain ':'.² +string +Õ +mountPropagationÀ"²mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.² +string +´ +.io.k8s.api.networking.v1.IngressServiceBackend"CIngressServiceBackend references a Kubernetes Service as a Backend.šname² +objectʦ +x +namep"cName is the referenced service. The service must exist in the same namespace as the Ingress object.² +string +© +port  +9#/definitions/io.k8s.api.networking.v1.ServiceBackendPort"cPort of the referenced service. A port name or port number is required for a IngressServiceBackend. +· +1io.k8s.api.storage.v1beta1.VolumeAttachmentSource"ÝVolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.² +objectÊ’ +À +inlineVolumeSpec« +5#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec"ñinlineVolumeSpec contains all the information necessary to attach a persistent volume defined by a pod's inline VolumeSource. This field is populated only for the CSIMigration feature. It contains translated fields from a pod's inline VolumeSource to a PersistentVolumeSpec. This field is beta-level and is only honored by servers that enabled the CSIMigration feature. +M +persistentVolumeName5"(Name of the persistent volume to attach.² +string +£ +Zio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionConditionÄ"YCustomResourceDefinitionCondition contains details for the current condition of this pod.štypešstatus² +objectÊÊ +e +messageZ"Mmessage is a human-readable message indicating details about last transition.² +string +j +reason`"Sreason is a unique, one-word, CamelCase reason for the condition's last transition.² +string +Z +statusP"Cstatus is the status of the condition. Can be True, False, Unknown.² +string +q +typei"\type is the type of the condition. Types include Established, NamesAccepted and Terminating.² +string +¥ +lastTransitionTimeŽ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"SlastTransitionTime last time the condition transitioned from one status to another. +ö6 +%io.k8s.api.core.v1.EphemeralContainerÌ6"ƒAn EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.šname² +objectÊ°1 +Ö +envFromÊ"ˆList of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.² +arrayº2 +0 +.#/definitions/io.k8s.api.core.v1.EnvFromSource +‡ +imagePullPolicyó"åImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images² +string +© +name "’Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.² +string + +securityContextl +0#/definitions/io.k8s.api.core.v1.SecurityContext"8SecurityContext is not allowed for ephemeral containers. +í +targetContainerNameÕ"ÇIf set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.² +string +ó + volumeDevicesá"GvolumeDevices is the list of block devices to be used by the container.² +arrayº1 +/ +-#/definitions/io.k8s.api.core.v1.VolumeDeviceú- +x-kubernetes-patch-merge-key  devicePath +ú' +x-kubernetes-patch-strategymerge + +ß +argsÖ"¹Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell² +arrayº + ² +string +û +commandï"ÒEntrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell² +arrayº + ² +string +À + +workingDir±"£Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.² +string +Á + resources³ +5#/definitions/io.k8s.api.core.v1.ResourceRequirements"zResources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. +j + startupProbeZ +&#/definitions/io.k8s.api.core.v1.Probe"0Probes are not allowed for ephemeral containers. +ñ + volumeMountsà"HPod volumes to mount into the container's filesystem. Cannot be updated.² +arrayº0 +. +,#/definitions/io.k8s.api.core.v1.VolumeMountú, +x-kubernetes-patch-merge-key  +mountPath +ú' +x-kubernetes-patch-strategymerge + +ß +env×"IList of environment variables to set in the container. Cannot be updated.² +arrayº+ +) +'#/definitions/io.k8s.api.core.v1.EnvVarú' +x-kubernetes-patch-merge-keyname +ú' +x-kubernetes-patch-strategymerge + +l +readinessProbeZ +&#/definitions/io.k8s.api.core.v1.Probe"0Probes are not allowed for ephemeral containers. +Ï +stdinÅ"¶Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.² +boolean +× + stdinOnceÉ"ºWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false² +boolean +æ +terminationMessagePolicyÉ"»Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.² +string + +ttyz"lWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.² +boolean +m + lifecycle` +*#/definitions/io.k8s.api.core.v1.Lifecycle"2Lifecycle is not allowed for ephemeral containers. +k + livenessProbeZ +&#/definitions/io.k8s.api.core.v1.Probe"0Probes are not allowed for ephemeral containers. +Å +terminationMessagePathª"œOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.² +string +i +image`"SDocker image name. More info: https://kubernetes.io/docs/concepts/containers/images² +string +y +portsp"/Ports are not allowed for ephemeral containers.² +arrayº2 +0 +.#/definitions/io.k8s.api.core.v1.ContainerPort +· +'io.k8s.api.core.v1.LocalObjectReference‹"sLocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.² +objectʇ +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +° +Bio.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReferenceé"jPriorityLevelConfigurationReference contains information that points to the "request-priority" being used.šname² +objectÊh +f +name^"Q`name` is the name of the priority level configuration being referenced Required.² +string +¹ +#io.k8s.api.rbac.v1beta1.RoleBinding‘ "¨RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22.šroleRef² +objectÊÛ +‰ +subjects}"=Subjects holds references to the objects the role applies to.² +arrayº1 +/ +-#/definitions/io.k8s.api.rbac.v1beta1.Subject +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +h +metadata\ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. +å +roleRefÙ +-#/definitions/io.k8s.api.rbac.v1beta1.RoleRef"§RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.úo +x-kubernetes-group-version-kindLJ- group: rbac.authorization.k8s.io + kind: RoleBinding + version: v1beta1 + +– +2io.k8s.apimachinery.pkg.apis.meta.v1.Preconditionsß"ZPreconditions must be fulfilled before an operation (update, delete, etc.) is carried out.² +objectÊu +- +uid&"Specifies the target UID.² +string +D +resourceVersion1"$Specifies the target ResourceVersion² +string +ç +io.k8s.api.core.v1.ConfigMapÆ"7ConfigMap holds configuration data for pods to consume.² +objectʪ +˜ + +binaryData‰"åBinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.ª +byte² +string² +object +Ï +dataÆ"¨Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.ª + ² +string² +object +è + immutableÚ"ËImmutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.² +boolean +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúQ +x-kubernetes-group-version-kind.,- group: "" + kind: ConfigMap + version: v1 + +Ó +1io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"%Represents a vSphere volume resource.š +volumePath² +objectÊÚ +À +fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² +string +x +storagePolicyIDe"XStorage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.² +string +V +storagePolicyNameA"4Storage Policy Based Management (SPBM) profile name.² +string +C + +volumePath5"(Path that identifies vSphere volume vmdk² +string +´ +4io.k8s.api.core.v1.PersistentVolumeClaimVolumeSourceû"žPersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).š claimName² +objectÊ¿ +á + claimNameÓ"ÅClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims² +string +Y +readOnlyM"?Will force the ReadOnly setting in VolumeMounts. Default false.² +boolean +° +)io.k8s.api.networking.v1.IngressClassSpec‚"DIngressClassSpec provides information about the class of an Ingress.² +objectÊ­ +¦ + +controller—"‰Controller refers to the name of the controller that should handle this class. This allows for different "flavors" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. "acme.io/ingress-controller". This field is immutable.² +string + + +parametersò +F#/definitions/io.k8s.api.networking.v1.IngressClassParametersReference"§Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. +¬ +;io.k8s.api.admissionregistration.v1beta1.RuleWithOperationsì "‚RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.² +objectÊØ +´ + apiGroups¦"‰APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.² +arrayº + ² +string +¼ + apiVersions¬"APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.² +arrayº + ² +string +  + +operations‘"ôOperations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.² +arrayº + ² +string +– + resourcesˆ"ëResources is a list of resources this rule applies to. + +For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. + +If wildcard is present, the validation rule will ensure resources do not overlap with each other. + +Depending on the enclosing object, subresources might not be allowed. Required.² +arrayº + ² +string +£ +scope™"‹scope specifies the scope of this rule. Valid values are "Cluster", "Namespaced", and "*" "Cluster" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. "Namespaced" means that only namespaced resources will match this rule. "*" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is "*".² +string +² + io.k8s.api.core.v1.NodeCondition"8NodeCondition contains condition information for a node.štypešstatus² +objectÊ´ +, +type$"Type of node condition.² +string +€ +lastHeartbeatTimek +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"0Last time we got an update on a given condition. +Œ +lastTransitionTimev +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time";Last time the condition transit from one status to another. +X +messageM"@Human readable message indicating details about last transition.² +string +J +reason@"3(brief) reason for the condition's last transition.² +string +L +statusB"5Status of the condition, one of True, False, Unknown.² +string +Ú +8io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec"¢SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set² +objectÊé +Æ +extra¼"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.ª +² +arrayº + ² +string² +object +M +groupD"(Groups is the groups you're testing for.² +arrayº + ² +string +¯ +nonResourceAttributes• +D#/definitions/io.k8s.api.authorization.v1beta1.NonResourceAttributes"MNonResourceAttributes describes information for a non-resource access request +¯ +resourceAttributes˜ +A#/definitions/io.k8s.api.authorization.v1beta1.ResourceAttributes"SResourceAuthorizationAttributes describes information for a resource access request +> +uid7"*UID information about the requesting user.² +string +ª +user¡"“User is the user you're testing for. If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups² +string +â +io.k8s.api.rbac.v1.RoleRefÃ"?RoleRef contains information that points to the role being usedšapiGroupškindšname² +objectÊÚ +P +apiGroupD"7APIGroup is the group for the resource being referenced² +string +B +kind:"-Kind is the type of resource being referenced² +string +B +name:"-Name is the name of resource being referenced² +string +›, +#io.k8s.api.storage.v1.CSIDriverSpecó+"2CSIDriverSpec is the specification of a CSIDriver.² +objectÊ°+ +€ +storageCapacityì"ÝIf set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information. + +The check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object. + +Alternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published. + +This field is immutable. + +This is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.² +boolean +å + tokenRequestsÓ"éTokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: "csi.storage.k8s.io/serviceAccount.tokens": { + "": { + "token": , + "expirationTimestamp": , + }, + ... +} + +Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. + +This is a beta feature and only available when the CSIServiceAccountToken feature is enabled.² +arrayº4 +2 +0#/definitions/io.k8s.api.storage.v1.TokenRequestú# +x-kubernetes-list-type atomic + +ê +volumeLifecycleModesÑ"‘volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is "Persistent", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta. + +This field is immutable.² +arrayº + ² +stringú +x-kubernetes-list-typeset + +É +attachRequired¶"§attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. + +This field is immutable.² +boolean +Û + fsGroupPolicyÉ"»Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate. + +This field is immutable.² +string +© +podInfoOnMount– "‡ If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. "csi.storage.k8s.io/pod.name": pod.Name "csi.storage.k8s.io/pod.namespace": pod.Namespace "csi.storage.k8s.io/pod.uid": string(pod.UID) "csi.storage.k8s.io/ephemeral": "true" if the volume is an ephemeral inline volume + defined by a CSIVolumeSource, otherwise "false" + +"csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver. + +This field is immutable.² +boolean +ÿ +requiresRepublishé"ÚRequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false. + +Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. + +This is a beta feature and only available when the CSIServiceAccountToken feature is enabled.² +boolean +ì +Nio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation™"YExternalDocumentation allows referencing an external resource for extended documentation.² +objectÊ0 + +url ² +string + + description ² +string +ƒ +Jio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookConversion´""1metricName is the name of the metric in question.² +string +§ +metricSelector” +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"PmetricSelector is used to identify a specific time series within a given metric. +Ë +targetAverageValue´ +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"utargetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue. +¹ + targetValue© +;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"jtargetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue. +Ï +*io.k8s.api.networking.v1.NetworkPolicyPort "6NetworkPolicyPort describes a port to allow traffic on² +objectÊÙ +™ +endPortint32"÷If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate "NetworkPolicyEndPort".² +integer +¶ +port­ +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"ëThe port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched. + +protocolu"hThe protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.² +string +ý +io.k8s.api.core.v1.Taintà"`The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint.škeyšeffect² +objectÊà +  +effect•"‡Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.² +string +D +key="0Required. The taint key to be applied to a node.² +string +­ + timeAddedŸ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"dTimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. +E +value<"/The taint value corresponding to the taint key.² +string +° +)io.k8s.api.discovery.v1.EndpointSliceList‚"6EndpointSliceList represents a list of endpoint slicesšitems² +objectÊÉ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +f +items]"List of endpoint slices² +arrayº7 +5 +3#/definitions/io.k8s.api.discovery.v1.EndpointSlice +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +b +metadataV +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úg +x-kubernetes-group-version-kindDB- group: discovery.k8s.io + kind: EndpointSliceList + version: v1 + + ++io.k8s.api.extensions.v1beta1.IngressStatusÑ"8IngressStatus describe the current state of the Ingress.² +objectʈ +… + loadBalanceru +3#/definitions/io.k8s.api.core.v1.LoadBalancerStatus">LoadBalancer contains the current status of the load-balancer. +˜ +5io.k8s.api.authorization.v1.SubjectAccessReviewStatusÞ"SubjectAccessReviewStatusšallowed² +objectʪ +c +allowedX"JAllowed is required. True if the action would be allowed, false otherwise.² +boolean +‚ +denied÷"èDenied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.² +boolean +Þ +evaluationErrorÊ"¼EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.² +string +] +reasonS"FReason is optional. It indicates why a request was allowed or denied.² +string +— +"io.k8s.api.core.v1.TCPSocketActionð"=TCPSocketAction describes an action based on opening a socketšport² +objectÊ› +Ç +port¾ +=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"}Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. +O +hostG":Optional: Host name to connect to, defaults to the pod IP.² +string +³ +]io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersionÑ"//...` if `served` is true.² +string +é ++io.k8s.api.flowcontrol.v1beta1.GroupSubject¹"?GroupSubject holds detailed information for group-kind subject.šname² +objectÊâ +ß +nameÖ"Èname is the user group that matches, or "*" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.² +string +Ú ++io.k8s.api.policy.v1beta1.AllowedFlexVolumeª"LAllowedFlexVolume represents a single Flexvolume that is allowed to be used.šdriver² +objectÊE +C +driver9",driver is the name of the Flexvolume driver.² +string + +1io.k8s.api.core.v1.PersistentVolumeClaimConditionÙ"BPersistentVolumeClaimCondition contails details about state of pvcštypešstatus² +objectÊö +n + lastProbeTime] +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time""Last time we probed the condition. +‘ +lastTransitionTime{ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. +X +messageM"@Human-readable message indicating details about last transition.² +string +é +reasonÞ"ÐUnique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.² +string + +status ² +string + +type ² +string +Ú + +io.k8s.api.core.v1.Toleration¸ +"The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .² +objectÊ™ +¸ +effect­"ŸEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.² +string +Ó +keyË"½Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.² +string +ö +operatoré"ÛOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.² +string +ç +tolerationSecondsÑint64"»TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.² +integer +¢ +value˜"ŠValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.² +string +± +,io.k8s.api.core.v1.ReplicationControllerSpec€ "KReplicationControllerSpec is the specification of a replication controller.² +objectʤ + +ƒ +minReadySecondsïint32"ÙMinimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)² +integer +  +replicas“int32"ýReplicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller² +integer +¸ +selector«"Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectorsª + ² +string² +object +½ +template° +0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"ûTemplate is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template +Ê +io.k8s.api.core.v1.Sysctl¬"+Sysctl defines a kernel parameter to be setšnamešvalue² +objectÊb +0 +value'"Value of a property to set² +string +. +name&"Name of a property to set² +string +Ž ++io.k8s.api.core.v1.TopologySpreadConstraintÞ"XTopologySpreadConstraint specifies how to spread matching pods among the given topology.šmaxSkewš topologyKeyšwhenUnsatisfiable² +objectÊÉ +Ì +maxSkewÀint32"ªMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.² +integer +§ + topologyKey—"‰TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.² +string +Ï +whenUnsatisfiable¹"«WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. +A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.² +string +û + labelSelectoré +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"¤LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. +ˆ +.io.k8s.apimachinery.pkg.apis.meta.v1.MicroTimeV date-time">MicroTime is version of Time with microsecond level precision.² +string +„ +io.k8s.api.core.v1.KeyToPathã",Maps a string key to a path within a volume.škeyšpath² +objectÊ™ +' +key "The key to project.² +string +» +mode²int32"œOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² +integer +¯ +path¦"˜The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.² +string +” +%io.k8s.api.core.v1.NamespaceConditionê"=NamespaceCondition contains details about state of namespace.štypešstatus² +objectÊŒ + +message ² +string + +reason ² +string +L +statusB"5Status of the condition, one of True, False, Unknown.² +string +< +type4"'Type of namespace controller condition.² +string +O +lastTransitionTime9 +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time +á +&io.k8s.api.core.v1.ComponentStatusList¶ "Status of all the conditions for the component as a list of ComponentStatus objects. Deprecated: This API is deprecated in v1.19+šitems² +objectʽ +l +itemsc" List of ComponentStatus objects.² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.ComponentStatus +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringú[ +x-kubernetes-group-version-kind86- group: "" + kind: ComponentStatusList + version: v1 + + +(io.k8s.api.core.v1.PersistentVolumeClaim• "NPersistentVolumeClaim is a user's request for and claim to a persistent volume² +objectÊÖ + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +ø +specï +:#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec"°Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims +† +statusû +<#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimStatus"ºStatus represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaimsú] +x-kubernetes-group-version-kind:8- kind: PersistentVolumeClaim + version: v1 + group: "" + + +(io.k8s.api.extensions.v1beta1.IngressTLSâ"MIngressTLS describes the transport layer security associated with an Ingress.² +objectÊ„ +š +hosts"óHosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.² +arrayº + ² +string +ä + +secretNameÕ"ÇSecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.² +string +Ÿ +-io.k8s.api.networking.v1.HTTPIngressRuleValueí"£HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.špaths² +objectÊ° +­ +paths£"4A collection of paths that map requests to backends.² +arrayº: +8 +6#/definitions/io.k8s.api.networking.v1.HTTPIngressPathú# +x-kubernetes-list-type atomic + +™ +1io.k8s.api.policy.v1beta1.PodDisruptionBudgetListã"@PodDisruptionBudgetList is a collection of PodDisruptionBudgets.šitems² +objectÊŸ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +U +itemsL² +arrayº? += +;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +I +metadata= +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMetaúh +x-kubernetes-group-version-kindEC- kind: PodDisruptionBudgetList + version: v1beta1 + group: policy + +Õ +Qio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceConversionÿ"MCustomResourceConversion describes how to convert different versions of a CR.šstrategy² +objectÊ– +Å +strategy¸"ªstrategy specifies how custom resources are converted between versions. Allowed values are: - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set.² +string +Ë +webhook¿ +X#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookConversion"cwebhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. +Æ +%io.k8s.api.coordination.v1beta1.Leaseœ "Lease defines a lease concept.² +objectʇ +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +µ +metadata¨ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ï +specÆ +7#/definitions/io.k8s.api.coordination.v1beta1.LeaseSpec"ŠSpecification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +stringúc +x-kubernetes-group-version-kind@>- group: coordination.k8s.io + kind: Lease + version: v1beta1 + +Ÿ +(io.k8s.api.core.v1.AzureDiskVolumeSourceò"TAzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.šdiskNamešdiskURI² +objectÊø +À +fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² +string +à +kind×"ÉExpected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared² +string +x +readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² +boolean +K + cachingMode<"/Host Caching mode: None, Read Only, Read Write.² +string +F +diskName:"-The Name of the data disk in the blob storage² +string +A +diskURI6")The URI the data disk in the blob storage² +string +û +)io.k8s.api.autoscaling.v2beta1.MetricSpecÍ"|MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).štype² +objectʹ + +object· +?#/definitions/io.k8s.api.autoscaling.v2beta1.ObjectMetricSource"tobject refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). +• +podsŒ +=#/definitions/io.k8s.api.autoscaling.v2beta1.PodsMetricSource"Êpods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. +™ +resourceŒ +A#/definitions/io.k8s.api.autoscaling.v2beta1.ResourceMetricSource"Æresource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. +£ +typeš"Œtype is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled² +string +  +containerResourceŠ +J#/definitions/io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource"»container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. +ô +externalç +A#/definitions/io.k8s.api.autoscaling.v2beta1.ExternalMetricSource"¡external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). +û +)io.k8s.api.autoscaling.v2beta2.MetricSpecÍ"|MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).štype² +objectʹ +™ +resourceŒ +A#/definitions/io.k8s.api.autoscaling.v2beta2.ResourceMetricSource"Æresource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. +£ +typeš"Œtype is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled² +string +  +containerResourceŠ +J#/definitions/io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource"»container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. +ô +externalç +A#/definitions/io.k8s.api.autoscaling.v2beta2.ExternalMetricSource"¡external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). + +object· +?#/definitions/io.k8s.api.autoscaling.v2beta2.ObjectMetricSource"tobject refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). +• +podsŒ +=#/definitions/io.k8s.api.autoscaling.v2beta2.PodsMetricSource"Êpods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. +¦ +%io.k8s.api.core.v1.CinderVolumeSourceü"ëRepresents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.švolumeID² +objectÊô +ƒ +fsTypeø"êFilesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² +string +Á +readOnly´"¥Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² +boolean +ž + secretRef +5#/definitions/io.k8s.api.core.v1.LocalObjectReference"WOptional: points to a secret object containing parameters used to connect to OpenStack. +† +volumeIDz"mvolume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² +string +Æ +#io.k8s.api.core.v1.SecretProjectionž"Adapts a secret into a projected volume. + +The contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.² +objectÊÿ +¤ +itemsš"ÜIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.² +arrayº. +, +*#/definitions/io.k8s.api.core.v1.KeyToPath +„ +name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² +string +O +optionalC"5Specify whether the Secret or its key must be defined² +boolean +´ +)io.k8s.api.discovery.v1beta1.EndpointPort† "7EndpointPort represents a Port used by an EndpointSlice² +objectʾ +Ù + appProtocolÉ"»The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.² +string +Å +name¼"®The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.² +string +³ +portªint32"”The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.² +integer +b +protocolV"IThe IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.² +string +± +/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventý"6Event represents a single event to a watched resource.štypešobject² +objectÊ× +¿ +object´ +:#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension"õObject is: + * If Type is Added or Modified: the new state of the object. + * If Type is Deleted: the state of the object immediately before deletion. + * If Type is Error: *Status is recommended; other types may make sense + depending on context. + +type ² +stringúË +x-kubernetes-group-version-kind§¤- group: "" + kind: WatchEvent + version: v1 +- version: v1 + group: admission.k8s.io + kind: WatchEvent +- group: admission.k8s.io + kind: WatchEvent + version: v1beta1 +- group: admissionregistration.k8s.io + kind: WatchEvent + version: v1 +- group: admissionregistration.k8s.io + kind: WatchEvent + version: v1beta1 +- group: apiextensions.k8s.io + kind: WatchEvent + version: v1 +- kind: WatchEvent + version: v1beta1 + group: apiextensions.k8s.io +- group: apiregistration.k8s.io + kind: WatchEvent + version: v1 +- group: apiregistration.k8s.io + kind: WatchEvent + version: v1beta1 +- group: apps + kind: WatchEvent + version: v1 +- group: apps + kind: WatchEvent + version: v1beta1 +- group: apps + kind: WatchEvent + version: v1beta2 +- kind: WatchEvent + version: v1 + group: authentication.k8s.io +- group: authentication.k8s.io + kind: WatchEvent + version: v1beta1 +- kind: WatchEvent + version: v1 + group: authorization.k8s.io +- group: authorization.k8s.io + kind: WatchEvent + version: v1beta1 +- group: autoscaling + kind: WatchEvent + version: v1 +- kind: WatchEvent + version: v2beta1 + group: autoscaling +- group: autoscaling + kind: WatchEvent + version: v2beta2 +- group: batch + kind: WatchEvent + version: v1 +- group: batch + kind: WatchEvent + version: v1beta1 +- group: certificates.k8s.io + kind: WatchEvent + version: v1 +- group: certificates.k8s.io + kind: WatchEvent + version: v1beta1 +- group: coordination.k8s.io + kind: WatchEvent + version: v1 +- group: coordination.k8s.io + kind: WatchEvent + version: v1beta1 +- group: discovery.k8s.io + kind: WatchEvent + version: v1 +- group: discovery.k8s.io + kind: WatchEvent + version: v1beta1 +- group: events.k8s.io + kind: WatchEvent + version: v1 +- group: events.k8s.io + kind: WatchEvent + version: v1beta1 +- group: extensions + kind: WatchEvent + version: v1beta1 +- kind: WatchEvent + version: v1alpha1 + group: flowcontrol.apiserver.k8s.io +- group: flowcontrol.apiserver.k8s.io + kind: WatchEvent + version: v1beta1 +- group: imagepolicy.k8s.io + kind: WatchEvent + version: v1alpha1 +- kind: WatchEvent + version: v1alpha1 + group: internal.apiserver.k8s.io +- group: networking.k8s.io + kind: WatchEvent + version: v1 +- group: networking.k8s.io + kind: WatchEvent + version: v1beta1 +- group: node.k8s.io + kind: WatchEvent + version: v1 +- version: v1alpha1 + group: node.k8s.io + kind: WatchEvent +- group: node.k8s.io + kind: WatchEvent + version: v1beta1 +- version: v1 + group: policy + kind: WatchEvent +- version: v1beta1 + group: policy + kind: WatchEvent +- kind: WatchEvent + version: v1 + group: rbac.authorization.k8s.io +- version: v1alpha1 + group: rbac.authorization.k8s.io + kind: WatchEvent +- group: rbac.authorization.k8s.io + kind: WatchEvent + version: v1beta1 +- group: scheduling.k8s.io + kind: WatchEvent + version: v1 +- group: scheduling.k8s.io + kind: WatchEvent + version: v1alpha1 +- group: scheduling.k8s.io + kind: WatchEvent + version: v1beta1 +- group: storage.k8s.io + kind: WatchEvent + version: v1 +- group: storage.k8s.io + kind: WatchEvent + version: v1alpha1 +- group: storage.k8s.io + kind: WatchEvent + version: v1beta1 + +Á +-io.k8s.api.authorization.v1beta1.ResourceRule"¬ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.šverbs² +objectÊÉ +ƒ + apiGroupsõ"ØAPIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. "*" means all.² +arrayº + ² +string +¹ + resourceNames§"ŠResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.² +arrayº + ² +string +ä + resourcesÖ"¹Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.² +arrayº + ² +string + +verbs“"wVerb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all.² +arrayº + ² +string + +:io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatusÞ"SubjectAccessReviewStatusšallowed² +objectʪ +‚ +denied÷"èDenied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.² +boolean +Þ +evaluationErrorÊ"¼EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.² +string +] +reasonS"FReason is optional. It indicates why a request was allowed or denied.² +string +c +allowedX"JAllowed is required. True if the action would be allowed, false otherwise.² +boolean +ž +Fio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceSpecÓ"£APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.šgroupPriorityMinimumšversionPriority² +objectÊõ +ì +caBundleßbyte"¥CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.² +stringú# +x-kubernetes-list-type atomic + +C +group:"-Group is the API group name this server hosts² +string +± +groupPriorityMinimum˜int32"‚GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s² +integer +Í +insecureSkipTLSVerify³"¤InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.² +boolean +ù +serviceí +V#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.ServiceReference"’Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled. +X +versionM"@Version is the API version this server hosts. For example, "v1"² +string +ä +versionPriorityÐint32"ºVersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.² +integer +· +!io.k8s.api.core.v1.NodeSystemInfo‘ "CNodeSystemInfo is a set of ids/uuids to uniquely identify the node.š machineIDš +systemUUIDšbootIDš kernelVersionšosImagešcontainerRuntimeVersionškubeletVersionškubeProxyVersionšoperatingSystemš architecture² +objectÊ¢ +H +kubeProxyVersion4"'KubeProxy Version reported by the node.² +string +D +kubeletVersion2"%Kubelet Version reported by the node.² +string +ß + machineIDÑ"ÃMachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html² +string +I +operatingSystem6")The Operating System reported by the node² +string +þ + +systemUUIDï"áSystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid² +string +n + kernelVersion]"PKernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).² +string +4 +bootID*"Boot ID reported by the node.² +string +‡ +containerRuntimeVersionl"_ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).² +string +n +osImagec"VOS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).² +string +B + architecture2"%The Architecture reported by the node² +string +¥ +>io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDRâ"ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.š +clientCIDRš serverAddress² +objectʦ +€ + +clientCIDRr"eThe CIDR with which clients can match their IP to figure out the server address that they should use.² +string +  + serverAddressŽ"€Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.² +string +Ô +(io.k8s.api.core.v1.DownwardAPIProjection§"™Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.² +objectÊ} +{ +itemsr")Items is a list of DownwardAPIVolume file² +arrayº: +8 +6#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile +‚ +*io.k8s.api.core.v1.DownwardAPIVolumeSourceÓ"“DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.² +objectÊ® +¬ + defaultModeœint32"†Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² +integer +} +itemst"+Items is a list of downward API volume file² +arrayº: +8 +6#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile +« +/io.k8s.api.core.v1.CinderPersistentVolumeSource÷"ëRepresents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.švolumeID² +objectÊï +Á +readOnly´"¥Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² +boolean +™ + secretRef‹ +0#/definitions/io.k8s.api.core.v1.SecretReference"WOptional: points to a secret object containing parameters used to connect to OpenStack. +† +volumeIDz"mvolume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² +string +ƒ +fsTypeø"êFilesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² +string +å +,io.k8s.api.flowcontrol.v1beta1.LimitResponse´"PLimitResponse defines how to handle requests that can not be executed right now.štype² +objectÊé +à +queuing· +A#/definitions/io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration"r`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `"Queue"`. +  +type—"‰`type` is "Queue" or "Reject". "Queue" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. "Reject" means that requests that can not be executed upon arrival are rejected. Required.² +stringú` +x-kubernetes-unionsIG- discriminator: type + fields-to-discriminateBy: + queuing: Queuing + +´ +Uio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionListÚ"KCustomResourceDefinitionList is a list of CustomResourceDefinition objects.šitems² +objectÊý +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +² +items¨"6items list individual CustomResourceDefinition objects² +arrayºc +a +_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +I +metadata= +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMetaúv +x-kubernetes-group-version-kindSQ- group: apiextensions.k8s.io + kind: CustomResourceDefinitionList + version: v1 + +Ç + +(io.k8s.api.authentication.v1.TokenReviewš +"§TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.šspec² +objectÊñ +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta +~ +specv +:#/definitions/io.k8s.api.authentication.v1.TokenReviewSpec"8Spec holds information about the request being evaluated +¤ +status™ +<#/definitions/io.k8s.api.authentication.v1.TokenReviewStatus"YStatus is filled in by the server and indicates whether the request can be authenticated. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúf +x-kubernetes-group-version-kindCA- version: v1 + group: authentication.k8s.io + kind: TokenReview + +ª +9io.k8s.api.authorization.v1beta1.LocalSubjectAccessReviewì "çLocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.šspec² +objectÊò +ö +specí +F#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec"¢Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted. +¬ +status¡ +H#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or not +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +K +metadata? +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMetaúw +x-kubernetes-group-version-kindTR- group: authorization.k8s.io + kind: LocalSubjectAccessReview + version: v1beta1 + +“ + +,io.k8s.api.core.v1.ReplicationControllerListâ "EReplicationControllerList is a collection of replication controllers.šitems² +objectÊ  +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Î +itemsÄ"{List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller² +arrayº: +8 +6#/definitions/io.k8s.api.core.v1.ReplicationControllerúa +x-kubernetes-group-version-kind><- kind: ReplicationControllerList + version: v1 + group: "" + +¾ +%io.k8s.api.extensions.v1beta1.Ingress”"‹Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.² +objectʘ + +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ý +specÔ +7#/definitions/io.k8s.api.extensions.v1beta1.IngressSpec"˜Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +ã +statusØ +9#/definitions/io.k8s.api.extensions.v1beta1.IngressStatus"šStatus is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusú\ +x-kubernetes-group-version-kind97- group: extensions + kind: Ingress + version: v1beta1 + +à +Nio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBoolq"oJSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. +ø + +,io.k8s.apimachinery.pkg.runtime.RawExtensionÇ +"¹ +RawExtension is used to hold extensions in external versions. + +To use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types. + +// Internal package: type MyAPIObject struct { + runtime.TypeMeta `json:",inline"` + MyPlugin runtime.Object `json:"myPlugin"` +} type PluginA struct { + AOption string `json:"aOption"` +} + +// External package: type MyAPIObject struct { + runtime.TypeMeta `json:",inline"` + MyPlugin runtime.RawExtension `json:"myPlugin"` +} type PluginA struct { + AOption string `json:"aOption"` +} + +// On the wire, the JSON will look something like this: { + "kind":"MyAPIObject", + "apiVersion":"v1", + "myPlugin": { + "kind":"PluginA", + "aOption":"foo", + }, +} + +So what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)² +object +õ + +io.k8s.api.batch.v1.CronJobSpecÑ +"YCronJobSpec describes how the job execution will look like and when it will actually run.šscheduleš jobTemplate² +objectÊÎ +Ç +startingDeadlineSeconds«int64"•Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.² +integer +— +successfulJobsHistoryLimityint32"dThe number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.² +integer +  +suspend”"…This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.² +boolean +Ì +concurrencyPolicy¶"¨Specifies how to treat concurrent executions of a Job. Valid values are: - "Allow" (default): allows CronJobs to run concurrently; - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - "Replace": cancels currently running job and replaces it with a new one² +string + +failedJobsHistoryLimituint32"`The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.² +integer +„ + jobTemplateu +1#/definitions/io.k8s.api.batch.v1.JobTemplateSpec"@Specifies the job that will be created when executing a CronJob. +] +scheduleQ"DThe schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.² +string +à +#io.k8s.api.batch.v1.JobTemplateSpec¸"QJobTemplateSpec describes the data a Job should have when created from a template² +objectÊÖ +ù +metadataì +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ªStandard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +× +specÎ +)#/definitions/io.k8s.api.batch.v1.JobSpec" Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +ø +"io.k8s.api.core.v1.RBDVolumeSourceÑ "ˆRepresents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.šmonitorsšimage² +objectʤ +‚ +userz"mThe rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +string +³ +fsType¨"šFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd² +string +r +imagei"\The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +string +« +keyringŸ"‘Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +string +Ž +monitors"eA collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +arrayº + ² +string +€ +poolx"kThe rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +string +´ +readOnly§"˜ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² +boolean +ù + secretRefë +5#/definitions/io.k8s.api.core.v1.LocalObjectReference"±SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + +*io.k8s.api.discovery.v1beta1.EndpointSliceà"ÜEndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.š addressTypeš endpoints² +objectÊí +ð + addressTypeà"ÒaddressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.² +string +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +ä + endpointsÖ"jendpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.² +arrayº7 +5 +3#/definitions/io.k8s.api.discovery.v1beta1.Endpointú# +x-kubernetes-list-type atomic + +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +h +metadata\ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. +© +portsŸ"®ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates "all ports". Each slice may include a maximum of 100 ports.² +arrayº; +9 +7#/definitions/io.k8s.api.discovery.v1beta1.EndpointPortú# +x-kubernetes-list-type atomic +úh +x-kubernetes-group-version-kindEC- version: v1beta1 + group: discovery.k8s.io + kind: EndpointSlice + +µ +#io.k8s.api.networking.v1.IngressTLS"MIngressTLS describes the transport layer security associated with an Ingress.² +objectʯ +À +hosts¶"óHosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.² +arrayº + ² +stringú# +x-kubernetes-list-type atomic + +é + +secretNameÚ"ÌSecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.² +string +® +"io.k8s.api.rbac.v1.RoleBindingList‡"/RoleBindingList is a collection of RoleBindingsšitems² +objectÊÎ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +g +items^"Items is a list of RoleBindings² +arrayº0 +. +,#/definitions/io.k8s.api.rbac.v1.RoleBinding +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +f +metadataZ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.ún +x-kubernetes-group-version-kindKI- group: rbac.authorization.k8s.io + kind: RoleBindingList + version: v1 + +å +2io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails® "éStatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.² +objectʳ + +kind„"öThe kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +— +nameŽ"€The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).² +string +— +retryAfterSecondsint32"ëIf specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.² +integer +¥ +uid"UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids² +string +à +causesÕ"ƒThe Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.² +arrayºB +@ +>#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause +b +groupY"LThe group attribute of the resource associated with the status StatusReason.² +string +ˆ +%io.k8s.api.apps.v1.ControllerRevisionÞ"±ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.šrevision² +objectʱ +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +b +revisionVint64"ARevision indicates the revision of the state represented by Data.² +integer +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +y +dataq +:#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension"3Data is the serialized representation of the state.ú\ +x-kubernetes-group-version-kind97- group: apps + kind: ControllerRevision + version: v1 + +Þ + io.k8s.api.core.v1.ContainerPort¹">ContainerPort represents a network port in a single container.š containerPort² +objectÊÚ +A +hostIP7"*What host IP to bind the external port to.² +string +å +hostPortØint32"ÂNumber of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.² +integer +Æ +name½"¯If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.² +string +Y +protocolM"@Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".² +string +ˆ + containerPortwint32"bNumber of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.² +integer +¨ + +1io.k8s.api.storage.v1beta1.CSIStorageCapacityListò "ECSIStorageCapacityList is a collection of CSIStorageCapacity objects.šitems² +objectÊ¢ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +Õ +itemsË"0Items is the list of CSIStorageCapacity objects.² +arrayº? += +;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityú' +x-kubernetes-list-map-keys - name +ú +x-kubernetes-list-typemap + +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ê +metadata½ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúo +x-kubernetes-group-version-kindLJ- group: storage.k8s.io + kind: CSIStorageCapacityList + version: v1beta1 + +¬ ++io.k8s.apimachinery.pkg.apis.meta.v1.Statusü"CStatus is a return value for calls that don't return other objects.² +objectÊ× +U +messageJ"=A human-readable description of the status of this operation.² +string +Ï +metadata +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds +ä +reasonÙ"ËA machine-readable description of why this operation is in the "Failure" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.² +string +À +statusµ"§Status of the operation. One of: "Success" or "Failure". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status² +string +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +V +codeNint32"9Suggested HTTP return code for this status, 0 if not set.² +integer +¬ +details  +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"ÛExtended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +stringúN +x-kubernetes-group-version-kind+)- group: "" + kind: Status + version: v1 + + +-io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpecë "GFlowSchemaSpec describes how the FlowSchema's specification looks like.špriorityLevelConfiguration² +objectÊö + +ô +rulesê"ì`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.² +arrayºH +F +D#/definitions/io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjectsú# +x-kubernetes-list-type atomic + +¦ +distinguisherMethodŽ +D#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod"Å`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + +matchingPrecedenceøint32"â`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.² +integer +Á +priorityLevelConfiguration¢ +P#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference"Í`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required. +” +(io.k8s.api.networking.v1beta1.IngressTLSç"MIngressTLS describes the transport layer security associated with an Ingress.² +objectʉ +š +hosts"óHosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.² +arrayº + ² +string +é + +secretNameÚ"ÌSecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.² +string +¾ +io.k8s.api.core.v1.PodAffinity› "?Pod affinity is a group of inter pod affinity scheduling rules.² +objectÊË + +ì +/preferredDuringSchedulingIgnoredDuringExecution¸"ìThe scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.² +arrayº< +: +8#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm +Ù +.requiredDuringSchedulingIgnoredDuringExecution¦"âIf the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.² +arrayº4 +2 +0#/definitions/io.k8s.api.core.v1.PodAffinityTerm +¯ +io.k8s.api.rbac.v1.Subject"ÆSubject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.škindšname² +objectʪ +9 +name1"$Name of the object being referenced.² +string +à + namespaceµ"§Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty the Authorizer should report an error.² +string +à +apiGroup¶"¨APIGroup holds the API group of the referenced subject. Defaults to "" for ServiceAccount subjects. Defaults to "rbac.authorization.k8s.io" for User and Group subjects.² +string +à +kind×"ÉKind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". If the Authorizer does not recognized the kind value, the Authorizer should report an error.² +string +« +)io.k8s.api.apps.v1.ControllerRevisionListý"UControllerRevisionList is a resource containing a list of ControllerRevision objects.šitems² +objectʬ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +w +itemsn"(Items is the list of ControllerRevisions² +arrayº7 +5 +3#/definitions/io.k8s.api.apps.v1.ControllerRevision +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +³ +metadata¦ +;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataú` +x-kubernetes-group-version-kind=;- group: apps + kind: ControllerRevisionList + version: v1 + +† +3io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSourceÎ +"¡Represents a Persistent Disk resource in AWS. + +An AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.švolumeID² +objectÊ +Ä +fsType¹"«Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore² +string +¨ + partitionšint32"„The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).² +integer +ä +readOnly×"ÈSpecify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore² +boolean +´ +volumeID§"™Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore² +string +ð +!io.k8s.api.core.v1.ClientIPConfigÊ"QClientIPConfig represents the configurations of Client IP based session affinity.² +objectÊè +å +timeoutSecondsÒint32"¼timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours).² +integer +ò, +2io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions»,":DeleteOptions may be provided when deleting an API object.² +objectÊ‹ +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +  +dryRun•"øWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed² +arrayº + ² +string +Þ +gracePeriodSecondsÇint64"±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.² +integer +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ä +orphanDependents¯" Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.² +boolean + + preconditions° +@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"lMust be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned. +û +propagationPolicyå"×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.² +stringúá +x-kubernetes-group-version-kind½º- group: "" + kind: DeleteOptions + version: v1 +- group: admission.k8s.io + kind: DeleteOptions + version: v1 +- version: v1beta1 + group: admission.k8s.io + kind: DeleteOptions +- group: admissionregistration.k8s.io + kind: DeleteOptions + version: v1 +- group: admissionregistration.k8s.io + kind: DeleteOptions + version: v1beta1 +- kind: DeleteOptions + version: v1 + group: apiextensions.k8s.io +- group: apiextensions.k8s.io + kind: DeleteOptions + version: v1beta1 +- group: apiregistration.k8s.io + kind: DeleteOptions + version: v1 +- kind: DeleteOptions + version: v1beta1 + group: apiregistration.k8s.io +- version: v1 + group: apps + kind: DeleteOptions +- group: apps + kind: DeleteOptions + version: v1beta1 +- group: apps + kind: DeleteOptions + version: v1beta2 +- group: authentication.k8s.io + kind: DeleteOptions + version: v1 +- group: authentication.k8s.io + kind: DeleteOptions + version: v1beta1 +- version: v1 + group: authorization.k8s.io + kind: DeleteOptions +- group: authorization.k8s.io + kind: DeleteOptions + version: v1beta1 +- group: autoscaling + kind: DeleteOptions + version: v1 +- group: autoscaling + kind: DeleteOptions + version: v2beta1 +- version: v2beta2 + group: autoscaling + kind: DeleteOptions +- group: batch + kind: DeleteOptions + version: v1 +- group: batch + kind: DeleteOptions + version: v1beta1 +- group: certificates.k8s.io + kind: DeleteOptions + version: v1 +- kind: DeleteOptions + version: v1beta1 + group: certificates.k8s.io +- group: coordination.k8s.io + kind: DeleteOptions + version: v1 +- group: coordination.k8s.io + kind: DeleteOptions + version: v1beta1 +- kind: DeleteOptions + version: v1 + group: discovery.k8s.io +- group: discovery.k8s.io + kind: DeleteOptions + version: v1beta1 +- group: events.k8s.io + kind: DeleteOptions + version: v1 +- group: events.k8s.io + kind: DeleteOptions + version: v1beta1 +- kind: DeleteOptions + version: v1beta1 + group: extensions +- version: v1alpha1 + group: flowcontrol.apiserver.k8s.io + kind: DeleteOptions +- group: flowcontrol.apiserver.k8s.io + kind: DeleteOptions + version: v1beta1 +- group: imagepolicy.k8s.io + kind: DeleteOptions + version: v1alpha1 +- group: internal.apiserver.k8s.io + kind: DeleteOptions + version: v1alpha1 +- kind: DeleteOptions + version: v1 + group: networking.k8s.io +- kind: DeleteOptions + version: v1beta1 + group: networking.k8s.io +- group: node.k8s.io + kind: DeleteOptions + version: v1 +- group: node.k8s.io + kind: DeleteOptions + version: v1alpha1 +- group: node.k8s.io + kind: DeleteOptions + version: v1beta1 +- kind: DeleteOptions + version: v1 + group: policy +- version: v1beta1 + group: policy + kind: DeleteOptions +- group: rbac.authorization.k8s.io + kind: DeleteOptions + version: v1 +- version: v1alpha1 + group: rbac.authorization.k8s.io + kind: DeleteOptions +- version: v1beta1 + group: rbac.authorization.k8s.io + kind: DeleteOptions +- kind: DeleteOptions + version: v1 + group: scheduling.k8s.io +- group: scheduling.k8s.io + kind: DeleteOptions + version: v1alpha1 +- version: v1beta1 + group: scheduling.k8s.io + kind: DeleteOptions +- group: storage.k8s.io + kind: DeleteOptions + version: v1 +- group: storage.k8s.io + kind: DeleteOptions + version: v1alpha1 +- group: storage.k8s.io + kind: DeleteOptions + version: v1beta1 + +“ +io.k8s.api.events.v1.Eventô"¾Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.š eventTime² +objectʽ + +reportingInstance¬"žreportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.² +string +Ì +actionÁ"³action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ñ +relatedÅ +0#/definitions/io.k8s.api.core.v1.ObjectReference"related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object. +¹ +reportingController¡"“reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.² +string +ˆ +deprecatedCountuint32"`deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.² +integer +¦ +deprecatedSource‘ +,#/definitions/io.k8s.api.core.v1.EventSource"adeprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. +¤ +reason™"‹reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.² +string +š +series +.#/definitions/io.k8s.api.events.v1.EventSeries"]series is data about the Event series this event represents or nil if it's a singleton Event. +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +— + eventTime‰ +<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"IeventTime is the time when this Event was first observed. It is required. +½ +note´"¦note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.² +string +¶ + regarding¨ +0#/definitions/io.k8s.api.core.v1.ObjectReference"óregarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object. +³ +typeª"œtype is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.² +string +Á +deprecatedFirstTimestamp¤ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"ideprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. +¿ +deprecatedLastTimestamp£ +7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"hdeprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.úX +x-kubernetes-group-version-kind53- kind: Event + version: v1 + group: events.k8s.io + +² +,io.k8s.api.storage.v1.VolumeAttachmentSource"ÝVolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.² +objectÊ’ +À +inlineVolumeSpec« +5#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec"ñinlineVolumeSpec contains all the information necessary to attach a persistent volume defined by a pod's inline VolumeSource. This field is populated only for the CSIMigration feature. It contains translated fields from a pod's inline VolumeSource to a PersistentVolumeSpec. This field is beta-level and is only honored by servers that enabled the CSIMigration feature. +M +persistentVolumeName5"(Name of the persistent volume to attach.² +string +œ +io.k8s.api.core.v1.EnvVarSourceø"']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. +¤ +resourceFieldRef +6#/definitions/io.k8s.api.core.v1.ResourceFieldSelector"ÔSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. +v + secretKeyReff +2#/definitions/io.k8s.api.core.v1.SecretKeySelector"0Selects a key of a secret in the pod's namespace +ã +io.k8s.api.core.v1.Namespace "MNamespace provides a scope for Names. Use of multiple namespaces is optional.² +objectÊ + +â +status× +0#/definitions/io.k8s.api.core.v1.NamespaceStatus"¢Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +¾ + +apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² +string +¹ +kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² +string +Ò +metadataÅ +=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +Ö +specÍ +.#/definitions/io.k8s.api.core.v1.NamespaceSpec"šSpec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúQ +x-kubernetes-group-version-kind.,- group: "" + kind: Namespace + version: v1 +b + + BearerTokenjO +M + BearerToken>< +apiKey authorizationheader"Bearer Token authentication \ No newline at end of file diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go new file mode 100644 index 000000000..49647be4b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Code generated for package kustomizationapi by go-bindata DO NOT EDIT. (@generated) +// sources: +// kustomizationapi/swagger.json +package kustomizationapi + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// Mode return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _kustomizationapiSwaggerJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x56\xc1\x6e\xdb\x30\x0c\xbd\xe7\x2b\x04\x6d\xc7\xd8\x45\x6e\x43\x6e\xc3\x0e\x3b\x14\x05\x0a\x74\xb7\xa1\x07\xc6\xa1\x5d\xce\x8e\xa4\x51\xb4\xb1\x6c\xc8\xbf\x0f\xd6\x62\xd7\x4a\xec\x75\x0b\x1a\xac\x4b\x0f\x06\x0c\x99\x7c\x4f\xe4\x7b\x24\xfc\x63\xa6\x94\x5e\x63\x4e\x86\x84\xac\xf1\x7a\xa9\xda\x23\xa5\x34\xd9\xb4\x7c\xe7\x53\x70\x94\x82\x73\x3e\x6d\x16\xe9\x07\x6b\x72\x2a\x6e\xc0\xbd\xe7\xe2\x31\x52\x29\xed\xd8\x3a\x64\x21\x1c\x9e\x2a\xa5\x3f\xa2\x41\x06\xb1\x7c\x90\x10\x3e\xbe\x65\xcc\xf5\x52\xe9\x37\x57\x03\xfe\xab\x11\xda\x18\xa5\x87\xd8\xed\xdf\x76\xf3\xee\x1a\xb0\x5e\x07\x14\xa8\x6e\x87\x17\xca\xa1\xf2\xd8\x07\xc9\xd6\x61\x4b\x6b\x57\x5f\x30\x13\xdd\x9f\x7f\x4b\xca\x7a\x85\x6c\x50\xd0\x27\x05\xdb\xda\x25\x0d\xb2\x27\x6b\x92\x92\xcc\x5a\x2f\xd5\xe7\x9e\x3a\xaa\x23\xc4\xb6\x88\x65\xed\xc5\x6e\xe8\x3b\xa6\x59\x68\x54\x28\x84\x6c\x4f\x11\xa2\xf7\x58\x3a\xee\x65\x14\xb2\xa7\x6d\xa3\x9a\xc5\x0a\x05\x16\xc7\x45\xdf\xcf\x06\xa5\x8f\x69\x75\x87\x19\xa3\xbc\x0c\xa1\x1e\xab\xeb\xba\x1f\xe1\x77\x8a\x78\x61\x32\xc5\xa5\x08\x3c\x10\xe0\xf9\xd5\x9d\xd2\x6b\x52\x60\x03\x1b\xf4\x0e\xb2\x3f\x6f\xfe\x3c\x4e\x3e\x25\x6f\x85\x0f\xd0\x90\xe5\x53\x72\xaf\x9b\x5b\x20\xbe\xb3\x35\x67\x78\xba\x23\x63\x94\x0b\x71\x56\x2c\xfe\xf3\x9b\xeb\x7a\x7f\x19\x90\x5f\x50\xbd\xb9\x18\xbf\xd6\xc4\x18\x17\xa4\x3f\x6d\x1d\xde\xa0\x40\xc7\x74\x3f\x7f\xca\x8c\x59\xb7\xfb\xfa\x4a\x0e\x05\x26\xc1\xcd\xa1\xea\x7f\xa3\x7b\xbc\x5d\x07\x20\xbb\xf9\x98\x11\x81\x19\xb6\x71\x27\x23\x4d\x1d\x48\xf6\x90\x6c\x90\x0b\x4c\x4a\xdc\xb6\x29\x61\x26\x9e\xca\xf0\xc2\x20\x58\x84\x84\x90\x3d\xee\x75\x1f\x56\xc5\xd9\x9a\x31\xd8\x44\x2f\xb2\x13\xff\xf5\x30\xc6\xc3\x72\x86\x61\x9c\xd8\x83\x93\xc3\x55\x91\x20\x43\x75\xb4\x33\x27\x5c\x34\xb5\x8b\x7f\x6f\x90\x51\x1b\xe7\x54\x1d\xaf\xea\xf3\xd3\xa2\x69\xfe\x0d\xeb\xeb\xf8\x8f\x89\x0d\x78\xaa\xc1\x67\xed\xb3\xfb\x19\x00\x00\xff\xff\x2f\x39\x79\xd0\x6e\x0c\x00\x00") + +func kustomizationapiSwaggerJsonBytes() ([]byte, error) { + return bindataRead( + _kustomizationapiSwaggerJson, + "kustomizationapi/swagger.json", + ) +} + +func kustomizationapiSwaggerJson() (*asset, error) { + bytes, err := kustomizationapiSwaggerJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "kustomizationapi/swagger.json", size: 3182, mode: os.FileMode(420), modTime: time.Unix(1615228558, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "kustomizationapi/swagger.json": kustomizationapiSwaggerJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "kustomizationapi": &bintree{nil, map[string]*bintree{ + "swagger.json": &bintree{kustomizationapiSwaggerJson, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = os.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.json b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.json new file mode 100644 index 000000000..7441a5ee6 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.json @@ -0,0 +1,130 @@ +{ + "definitions": { + "io.k8s.api.apps.v1.ConfigMapArgs": { + "properties": { + "GeneratorArgs": { + "$ref": "#/definitions/io.k8s.api.apps.v1.GeneratorArgs" + } + }, + "additionalProperties": false, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "kustomize.config.k8s.io", + "kind": "ConfigMapArgs", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.apps.v1.SecretArgs": { + "properties": { + "GeneratorArgs": { + "$ref": "#/definitions/io.k8s.api.apps.v1.GeneratorArgs" + }, + "type": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "kustomize.config.k8s.io", + "kind": "SecretArgs", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.apps.v1.GeneratorArgs": { + "properties": { + "namespace": { + "type": "string" + }, + "name": { + "type": "string" + }, + "behavior": { + "type": "string" + }, + "KvPairSources": { + "$ref": "#/definitions/io.k8s.api.apps.v1.KvPairSources" + } + }, + "additionalProperties": false, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "kustomize.config.k8s.io", + "kind": "GeneratorArgs", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.apps.v1.Kustomization": { + "required": [ + "TypeMeta" + ], + "properties": { + "configMapGenerator": { + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ConfigMapArgs" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "secretGenerator": { + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.SecretArgs" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "additionalProperties": false, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "kustomize.config.k8s.io", + "kind": "Kustomization", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.apps.v1.KvPairSources": { + "properties": { + "literals": { + "items": { + "type": "string" + }, + "type": "array" + }, + "files": { + "items": { + "type": "string" + }, + "type": "array" + }, + "envs": { + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "kustomize.config.k8s.io", + "kind": "KvPairSources", + "version": "v1beta1" + } + ] + } + } +} \ No newline at end of file diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go new file mode 100644 index 000000000..eb5396d85 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go @@ -0,0 +1,776 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package openapi + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + + openapi_v2 "github.com/google/gnostic-models/openapiv2" + "google.golang.org/protobuf/proto" + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi" + "sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi" + "sigs.k8s.io/kustomize/kyaml/yaml" + k8syaml "sigs.k8s.io/yaml" +) + +var ( + // schemaLock is the lock for schema related globals. + // + // NOTE: This lock helps with preventing panics that might occur due to the data + // race that concurrent access on this variable might cause but it doesn't + // fully fix the issue described in https://github.com/kubernetes-sigs/kustomize/issues/4824. + // For instance concurrently running goroutines where each of them calls SetSchema() + // and/or GetSchemaVersion might end up received nil errors (success) whereas the + // seconds one would overwrite the global variable that has been written by the + // first one. + schemaLock sync.RWMutex //nolint:gochecknoglobals + + // kubernetesOpenAPIVersion specifies which builtin kubernetes schema to use. + kubernetesOpenAPIVersion string //nolint:gochecknoglobals + + // globalSchema contains global state information about the openapi + globalSchema openapiData //nolint:gochecknoglobals + + // customSchemaFile stores the custom OpenApi schema if it is provided + customSchema []byte //nolint:gochecknoglobals +) + +// openapiData contains the parsed openapi state. this is in a struct rather than +// a list of vars so that it can be reset from tests. +type openapiData struct { + // schema holds the OpenAPI schema data + schema spec.Schema + + // schemaForResourceType is a map of Resource types to their schemas + schemaByResourceType map[yaml.TypeMeta]*spec.Schema + + // namespaceabilityByResourceType stores whether a given Resource type + // is namespaceable or not + namespaceabilityByResourceType map[yaml.TypeMeta]bool + + // noUseBuiltInSchema stores whether we want to prevent using the built-n + // Kubernetes schema as part of the global schema + noUseBuiltInSchema bool + + // schemaInit stores whether or not we've parsed the schema already, + // so that we only reparse the when necessary (to speed up performance) + schemaInit bool +} + +type format string + +const ( + JsonOrYaml format = "jsonOrYaml" + Proto format = "proto" +) + +// precomputedIsNamespaceScoped precomputes IsNamespaceScoped for known types. This avoids Schema creation, +// which is expensive +// The test output from TestIsNamespaceScopedPrecompute shows the expected map in go syntax,and can be copy and pasted +// from the failure if it changes. +var precomputedIsNamespaceScoped = map[yaml.TypeMeta]bool{ + {APIVersion: "admissionregistration.k8s.io/v1", Kind: "MutatingWebhookConfiguration"}: false, + {APIVersion: "admissionregistration.k8s.io/v1", Kind: "ValidatingWebhookConfiguration"}: false, + {APIVersion: "admissionregistration.k8s.io/v1beta1", Kind: "MutatingWebhookConfiguration"}: false, + {APIVersion: "admissionregistration.k8s.io/v1beta1", Kind: "ValidatingWebhookConfiguration"}: false, + {APIVersion: "apiextensions.k8s.io/v1", Kind: "CustomResourceDefinition"}: false, + {APIVersion: "apiextensions.k8s.io/v1beta1", Kind: "CustomResourceDefinition"}: false, + {APIVersion: "apiregistration.k8s.io/v1", Kind: "APIService"}: false, + {APIVersion: "apiregistration.k8s.io/v1beta1", Kind: "APIService"}: false, + {APIVersion: "apps/v1", Kind: "ControllerRevision"}: true, + {APIVersion: "apps/v1", Kind: "DaemonSet"}: true, + {APIVersion: "apps/v1", Kind: "Deployment"}: true, + {APIVersion: "apps/v1", Kind: "ReplicaSet"}: true, + {APIVersion: "apps/v1", Kind: "StatefulSet"}: true, + {APIVersion: "autoscaling/v1", Kind: "HorizontalPodAutoscaler"}: true, + {APIVersion: "autoscaling/v1", Kind: "Scale"}: true, + {APIVersion: "autoscaling/v2beta1", Kind: "HorizontalPodAutoscaler"}: true, + {APIVersion: "autoscaling/v2beta2", Kind: "HorizontalPodAutoscaler"}: true, + {APIVersion: "batch/v1", Kind: "CronJob"}: true, + {APIVersion: "batch/v1", Kind: "Job"}: true, + {APIVersion: "batch/v1beta1", Kind: "CronJob"}: true, + {APIVersion: "certificates.k8s.io/v1", Kind: "CertificateSigningRequest"}: false, + {APIVersion: "certificates.k8s.io/v1beta1", Kind: "CertificateSigningRequest"}: false, + {APIVersion: "coordination.k8s.io/v1", Kind: "Lease"}: true, + {APIVersion: "coordination.k8s.io/v1beta1", Kind: "Lease"}: true, + {APIVersion: "discovery.k8s.io/v1", Kind: "EndpointSlice"}: true, + {APIVersion: "discovery.k8s.io/v1beta1", Kind: "EndpointSlice"}: true, + {APIVersion: "events.k8s.io/v1", Kind: "Event"}: true, + {APIVersion: "events.k8s.io/v1beta1", Kind: "Event"}: true, + {APIVersion: "extensions/v1beta1", Kind: "Ingress"}: true, + {APIVersion: "flowcontrol.apiserver.k8s.io/v1beta1", Kind: "FlowSchema"}: false, + {APIVersion: "flowcontrol.apiserver.k8s.io/v1beta1", Kind: "PriorityLevelConfiguration"}: false, + {APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}: true, + {APIVersion: "networking.k8s.io/v1", Kind: "IngressClass"}: false, + {APIVersion: "networking.k8s.io/v1", Kind: "NetworkPolicy"}: true, + {APIVersion: "networking.k8s.io/v1beta1", Kind: "Ingress"}: true, + {APIVersion: "networking.k8s.io/v1beta1", Kind: "IngressClass"}: false, + {APIVersion: "node.k8s.io/v1", Kind: "RuntimeClass"}: false, + {APIVersion: "node.k8s.io/v1beta1", Kind: "RuntimeClass"}: false, + {APIVersion: "policy/v1", Kind: "PodDisruptionBudget"}: true, + {APIVersion: "policy/v1beta1", Kind: "PodDisruptionBudget"}: true, + {APIVersion: "policy/v1beta1", Kind: "PodSecurityPolicy"}: false, // remove after openapi upgrades to v1.25. + {APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"}: false, + {APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRoleBinding"}: false, + {APIVersion: "rbac.authorization.k8s.io/v1", Kind: "Role"}: true, + {APIVersion: "rbac.authorization.k8s.io/v1", Kind: "RoleBinding"}: true, + {APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "ClusterRole"}: false, + {APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "ClusterRoleBinding"}: false, + {APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "Role"}: true, + {APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "RoleBinding"}: true, + {APIVersion: "scheduling.k8s.io/v1", Kind: "PriorityClass"}: false, + {APIVersion: "scheduling.k8s.io/v1beta1", Kind: "PriorityClass"}: false, + {APIVersion: "storage.k8s.io/v1", Kind: "CSIDriver"}: false, + {APIVersion: "storage.k8s.io/v1", Kind: "CSINode"}: false, + {APIVersion: "storage.k8s.io/v1", Kind: "StorageClass"}: false, + {APIVersion: "storage.k8s.io/v1", Kind: "VolumeAttachment"}: false, + {APIVersion: "storage.k8s.io/v1beta1", Kind: "CSIDriver"}: false, + {APIVersion: "storage.k8s.io/v1beta1", Kind: "CSINode"}: false, + {APIVersion: "storage.k8s.io/v1beta1", Kind: "CSIStorageCapacity"}: true, + {APIVersion: "storage.k8s.io/v1beta1", Kind: "StorageClass"}: false, + {APIVersion: "storage.k8s.io/v1beta1", Kind: "VolumeAttachment"}: false, + {APIVersion: "v1", Kind: "ComponentStatus"}: false, + {APIVersion: "v1", Kind: "ConfigMap"}: true, + {APIVersion: "v1", Kind: "Endpoints"}: true, + {APIVersion: "v1", Kind: "Event"}: true, + {APIVersion: "v1", Kind: "LimitRange"}: true, + {APIVersion: "v1", Kind: "Namespace"}: false, + {APIVersion: "v1", Kind: "Node"}: false, + {APIVersion: "v1", Kind: "NodeProxyOptions"}: false, + {APIVersion: "v1", Kind: "PersistentVolume"}: false, + {APIVersion: "v1", Kind: "PersistentVolumeClaim"}: true, + {APIVersion: "v1", Kind: "Pod"}: true, + {APIVersion: "v1", Kind: "PodAttachOptions"}: true, + {APIVersion: "v1", Kind: "PodExecOptions"}: true, + {APIVersion: "v1", Kind: "PodPortForwardOptions"}: true, + {APIVersion: "v1", Kind: "PodProxyOptions"}: true, + {APIVersion: "v1", Kind: "PodTemplate"}: true, + {APIVersion: "v1", Kind: "ReplicationController"}: true, + {APIVersion: "v1", Kind: "ResourceQuota"}: true, + {APIVersion: "v1", Kind: "Secret"}: true, + {APIVersion: "v1", Kind: "Service"}: true, + {APIVersion: "v1", Kind: "ServiceAccount"}: true, + {APIVersion: "v1", Kind: "ServiceProxyOptions"}: true, +} + +// ResourceSchema wraps the OpenAPI Schema. +type ResourceSchema struct { + // Schema is the OpenAPI schema for a Resource or field + Schema *spec.Schema +} + +// IsEmpty returns true if the ResourceSchema is empty +func (rs *ResourceSchema) IsMissingOrNull() bool { + if rs == nil || rs.Schema == nil { + return true + } + return reflect.DeepEqual(*rs.Schema, spec.Schema{}) +} + +// SchemaForResourceType returns the Schema for the given Resource +// TODO(pwittrock): create a version of this function that will return a schema +// which can be used for duck-typed Resources -- e.g. contains common fields such +// as metadata, replicas and spec.template.spec +func SchemaForResourceType(t yaml.TypeMeta) *ResourceSchema { + initSchema() + rs, found := globalSchema.schemaByResourceType[t] + if !found { + return nil + } + return &ResourceSchema{Schema: rs} +} + +// SupplementaryOpenAPIFieldName is the conventional field name (JSON/YAML) containing +// supplementary OpenAPI definitions. +const SupplementaryOpenAPIFieldName = "openAPI" + +const Definitions = "definitions" + +// AddSchemaFromFile reads the file at path and parses the OpenAPI definitions +// from the field "openAPI", also returns a function to clean the added definitions +// The returned clean function is a no-op on error, or else it's a function +// that the caller should use to remove the added openAPI definitions from +// global schema +func SchemaFromFile(path string) (*spec.Schema, error) { + object, err := parseOpenAPI(path) + if err != nil { + return nil, err + } + + return schemaUsingField(object, SupplementaryOpenAPIFieldName) +} + +// DefinitionRefs returns the list of openAPI definition references present in the +// input openAPIPath +func DefinitionRefs(openAPIPath string) ([]string, error) { + object, err := parseOpenAPI(openAPIPath) + if err != nil { + return nil, err + } + return definitionRefsFromRNode(object) +} + +// definitionRefsFromRNode returns the list of openAPI definitions keys from input +// yaml RNode +func definitionRefsFromRNode(object *yaml.RNode) ([]string, error) { + definitions, err := object.Pipe(yaml.Lookup(SupplementaryOpenAPIFieldName, Definitions)) + if definitions == nil { + return nil, err + } + if err != nil { + return nil, err + } + return definitions.Fields() +} + +// parseOpenAPI reads openAPIPath yaml and converts it to RNode +func parseOpenAPI(openAPIPath string) (*yaml.RNode, error) { + b, err := os.ReadFile(openAPIPath) + if err != nil { + return nil, err + } + + object, err := yaml.Parse(string(b)) + if err != nil { + return nil, errors.Errorf("invalid file %q: %v", openAPIPath, err) + } + return object, nil +} + +// addSchemaUsingField parses the OpenAPI definitions from the specified field. +// If field is the empty string, use the whole document as OpenAPI. +func schemaUsingField(object *yaml.RNode, field string) (*spec.Schema, error) { + if field != "" { + // get the field containing the openAPI + m := object.Field(field) + if m.IsNilOrEmpty() { + // doesn't contain openAPI definitions + return nil, nil + } + object = m.Value + } + + oAPI, err := object.String() + if err != nil { + return nil, err + } + + // convert the yaml openAPI to a JSON string by unmarshalling it to an + // interface{} and the marshalling it to a string + var o interface{} + err = yaml.Unmarshal([]byte(oAPI), &o) + if err != nil { + return nil, err + } + j, err := json.Marshal(o) + if err != nil { + return nil, err + } + + var sc spec.Schema + err = sc.UnmarshalJSON(j) + if err != nil { + return nil, err + } + + return &sc, nil +} + +// AddSchema parses s, and adds definitions from s to the global schema. +func AddSchema(s []byte) error { + return parse(s, JsonOrYaml) +} + +// ResetOpenAPI resets the openapi data to empty +func ResetOpenAPI() { + schemaLock.Lock() + defer schemaLock.Unlock() + + globalSchema = openapiData{} + customSchema = nil + kubernetesOpenAPIVersion = "" +} + +// AddDefinitions adds the definitions to the global schema. +func AddDefinitions(definitions spec.Definitions) { + // initialize values if they have not yet been set + if globalSchema.schemaByResourceType == nil { + globalSchema.schemaByResourceType = map[yaml.TypeMeta]*spec.Schema{} + } + if globalSchema.schema.Definitions == nil { + globalSchema.schema.Definitions = spec.Definitions{} + } + + // index the schema definitions so we can lookup them up for Resources + for k := range definitions { + // index by GVK, if no GVK is found then it is the schema for a subfield + // of a Resource + d := definitions[k] + + // copy definitions to the schema + globalSchema.schema.Definitions[k] = d + gvk, found := d.VendorExtensible.Extensions[kubernetesGVKExtensionKey] + if !found { + continue + } + // cast the extension to a []map[string]string + exts, ok := gvk.([]interface{}) + if !ok { + continue + } + + for i := range exts { + typeMeta, ok := toTypeMeta(exts[i]) + if !ok { + continue + } + globalSchema.schemaByResourceType[typeMeta] = &d + } + } +} + +func toTypeMeta(ext interface{}) (yaml.TypeMeta, bool) { + m, ok := ext.(map[string]interface{}) + if !ok { + return yaml.TypeMeta{}, false + } + + apiVersion := m[versionKey].(string) + if g, ok := m[groupKey].(string); ok && g != "" { + apiVersion = g + "/" + apiVersion + } + return yaml.TypeMeta{Kind: m[kindKey].(string), APIVersion: apiVersion}, true +} + +// Resolve resolves the reference against the global schema +func Resolve(ref *spec.Ref, schema *spec.Schema) (*spec.Schema, error) { + return resolve(schema, ref) +} + +// Schema returns the global schema +func Schema() *spec.Schema { + return rootSchema() +} + +// GetSchema parses s into a ResourceSchema, resolving References within the +// global schema. +func GetSchema(s string, schema *spec.Schema) (*ResourceSchema, error) { + var sc spec.Schema + if err := sc.UnmarshalJSON([]byte(s)); err != nil { + return nil, errors.Wrap(err) + } + if sc.Ref.String() != "" { + r, err := Resolve(&sc.Ref, schema) + if err != nil { + return nil, errors.Wrap(err) + } + sc = *r + } + + return &ResourceSchema{Schema: &sc}, nil +} + +// IsNamespaceScoped determines whether a resource is namespace or +// cluster-scoped by looking at the information in the openapi schema. +// The second return value tells whether the provided type could be found +// in the openapi schema. If the value is false here, the scope of the +// resource is not known. If the type is found, the first return value will +// be true if the resource is namespace-scoped, and false if the type is +// cluster-scoped. +func IsNamespaceScoped(typeMeta yaml.TypeMeta) (bool, bool) { + if res, f := precomputedIsNamespaceScoped[typeMeta]; f { + return res, true + } + return isNamespaceScopedFromSchema(typeMeta) +} + +func isNamespaceScopedFromSchema(typeMeta yaml.TypeMeta) (bool, bool) { + initSchema() + isNamespaceScoped, found := globalSchema.namespaceabilityByResourceType[typeMeta] + return isNamespaceScoped, found +} + +// IsCertainlyClusterScoped returns true for Node, Namespace, etc. and +// false for Pod, Deployment, etc. and kinds that aren't recognized in the +// openapi data. See: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces +func IsCertainlyClusterScoped(typeMeta yaml.TypeMeta) bool { + nsScoped, found := IsNamespaceScoped(typeMeta) + return found && !nsScoped +} + +// SuppressBuiltInSchemaUse can be called to prevent using the built-in Kubernetes +// schema as part of the global schema. +// Must be called before the schema is used. +func SuppressBuiltInSchemaUse() { + globalSchema.noUseBuiltInSchema = true +} + +// Elements returns the Schema for the elements of an array. +func (rs *ResourceSchema) Elements() *ResourceSchema { + // load the schema from swagger files + initSchema() + + if len(rs.Schema.Type) != 1 || rs.Schema.Type[0] != "array" { + // either not an array, or array has multiple types + return nil + } + if rs == nil || rs.Schema == nil || rs.Schema.Items == nil { + // no-scheme for the items + return nil + } + s := *rs.Schema.Items.Schema + for s.Ref.String() != "" { + sc, e := Resolve(&s.Ref, Schema()) + if e != nil { + return nil + } + s = *sc + } + return &ResourceSchema{Schema: &s} +} + +const Elements = "[]" + +// Lookup calls either Field or Elements for each item in the path. +// If the path item is "[]", then Elements is called, otherwise +// Field is called. +// If any Field or Elements call returns nil, then Lookup returns +// nil immediately. +func (rs *ResourceSchema) Lookup(path ...string) *ResourceSchema { + s := rs + for _, p := range path { + if s == nil { + break + } + if p == Elements { + s = s.Elements() + continue + } + s = s.Field(p) + } + return s +} + +// Field returns the Schema for a field. +func (rs *ResourceSchema) Field(field string) *ResourceSchema { + // load the schema from swagger files + initSchema() + + // locate the Schema + s, found := rs.Schema.Properties[field] + switch { + case found: + // no-op, continue with s as the schema + case rs.Schema.AdditionalProperties != nil && rs.Schema.AdditionalProperties.Schema != nil: + // map field type -- use Schema of the value + // (the key doesn't matter, they all have the same value type) + s = *rs.Schema.AdditionalProperties.Schema + default: + // no Schema found from either swagger files or line comments + return nil + } + + // resolve the reference to the Schema if the Schema has one + for s.Ref.String() != "" { + sc, e := Resolve(&s.Ref, Schema()) + if e != nil { + return nil + } + s = *sc + } + + // return the merged Schema + return &ResourceSchema{Schema: &s} +} + +// PatchStrategyAndKeyList returns the patch strategy and complete merge key list +func (rs *ResourceSchema) PatchStrategyAndKeyList() (string, []string) { + ps, found := rs.Schema.Extensions[kubernetesPatchStrategyExtensionKey] + if !found { + // empty patch strategy + return "", []string{} + } + mkList, found := rs.Schema.Extensions[kubernetesMergeKeyMapList] + if found { + // mkList is []interface, convert to []string + mkListStr := make([]string, len(mkList.([]interface{}))) + for i, v := range mkList.([]interface{}) { + mkListStr[i] = v.(string) + } + return ps.(string), mkListStr + } + mk, found := rs.Schema.Extensions[kubernetesMergeKeyExtensionKey] + if !found { + // no mergeKey -- may be a primitive associative list (e.g. finalizers) + return ps.(string), []string{} + } + return ps.(string), []string{mk.(string)} +} + +// PatchStrategyAndKey returns the patch strategy and merge key extensions +func (rs *ResourceSchema) PatchStrategyAndKey() (string, string) { + ps, found := rs.Schema.Extensions[kubernetesPatchStrategyExtensionKey] + if !found { + // empty patch strategy + return "", "" + } + + mk, found := rs.Schema.Extensions[kubernetesMergeKeyExtensionKey] + if !found { + // no mergeKey -- may be a primitive associative list (e.g. finalizers) + mk = "" + } + return ps.(string), mk.(string) +} + +const ( + // kubernetesOpenAPIDefaultVersion is the latest version number of the statically compiled in + // OpenAPI schema for kubernetes built-in types + kubernetesOpenAPIDefaultVersion = kubernetesapi.DefaultOpenAPI + + // kustomizationAPIAssetName is the name of the asset containing the statically compiled in + // OpenAPI definitions for Kustomization built-in types + kustomizationAPIAssetName = "kustomizationapi/swagger.json" + + // kubernetesGVKExtensionKey is the key to lookup the kubernetes group version kind extension + // -- the extension is an array of objects containing a gvk + kubernetesGVKExtensionKey = "x-kubernetes-group-version-kind" + + // kubernetesMergeKeyExtensionKey is the key to lookup the kubernetes merge key extension + // -- the extension is a string + kubernetesMergeKeyExtensionKey = "x-kubernetes-patch-merge-key" + + // kubernetesPatchStrategyExtensionKey is the key to lookup the kubernetes patch strategy + // extension -- the extension is a string + kubernetesPatchStrategyExtensionKey = "x-kubernetes-patch-strategy" + + // kubernetesMergeKeyMapList is the list of merge keys when there needs to be multiple + // -- the extension is an array of strings + kubernetesMergeKeyMapList = "x-kubernetes-list-map-keys" + + // groupKey is the key to lookup the group from the GVK extension + groupKey = "group" + // versionKey is the key to lookup the version from the GVK extension + versionKey = "version" + // kindKey is the to lookup the kind from the GVK extension + kindKey = "kind" +) + +// SetSchema sets the kubernetes OpenAPI schema version to use +func SetSchema(openAPIField map[string]string, schema []byte, reset bool) error { + schemaLock.Lock() + defer schemaLock.Unlock() + + // this should only be set once + schemaIsSet := (kubernetesOpenAPIVersion != "") || customSchema != nil + if schemaIsSet && !reset { + return nil + } + + version, versionProvided := openAPIField["version"] + + // use custom schema + if schema != nil { + if versionProvided { + return fmt.Errorf("builtin version and custom schema provided, cannot use both") + } + customSchema = schema + kubernetesOpenAPIVersion = "custom" + // if the schema is changed, initSchema should parse the new schema + globalSchema.schemaInit = false + return nil + } + + // use builtin version + kubernetesOpenAPIVersion = version + if kubernetesOpenAPIVersion == "" { + return nil + } + if _, ok := kubernetesapi.OpenAPIMustAsset[kubernetesOpenAPIVersion]; !ok { + return fmt.Errorf("the specified OpenAPI version is not built in") + } + + customSchema = nil + // if the schema is changed, initSchema should parse the new schema + globalSchema.schemaInit = false + return nil +} + +// GetSchemaVersion returns what kubernetes OpenAPI version is being used +func GetSchemaVersion() string { + schemaLock.RLock() + defer schemaLock.RUnlock() + + switch { + case kubernetesOpenAPIVersion == "" && customSchema == nil: + return kubernetesOpenAPIDefaultVersion + case customSchema != nil: + return "using custom schema from file provided" + default: + return kubernetesOpenAPIVersion + } +} + +// initSchema parses the json schema +func initSchema() { + schemaLock.Lock() + defer schemaLock.Unlock() + + if globalSchema.schemaInit { + return + } + globalSchema.schemaInit = true + + // TODO(natasha41575): Accept proto-formatted schema files + if customSchema != nil { + err := parse(customSchema, JsonOrYaml) + if err != nil { + panic(fmt.Errorf("invalid schema file: %w", err)) + } + } else { + if kubernetesOpenAPIVersion == "" { + parseBuiltinSchema(kubernetesOpenAPIDefaultVersion) + } else { + parseBuiltinSchema(kubernetesOpenAPIVersion) + } + } + + if err := parse(kustomizationapi.MustAsset(kustomizationAPIAssetName), JsonOrYaml); err != nil { + // this should never happen + panic(err) + } +} + +// parseBuiltinSchema calls parse to parse the json or proto schemas +func parseBuiltinSchema(version string) { + if globalSchema.noUseBuiltInSchema { + // don't parse the built in schema + return + } + // parse the swagger, this should never fail + assetName := filepath.Join( + "kubernetesapi", + strings.ReplaceAll(version, ".", "_"), + "swagger.pb") + + if err := parse(kubernetesapi.OpenAPIMustAsset[version](assetName), Proto); err != nil { + // this should never happen + panic(err) + } +} + +// parse parses and indexes a single json or proto schema +func parse(b []byte, format format) error { + var swagger spec.Swagger + switch { + case format == Proto: + doc := &openapi_v2.Document{} + // We parse protobuf and get an openapi_v2.Document here. + if err := proto.Unmarshal(b, doc); err != nil { + return fmt.Errorf("openapi proto unmarshalling failed: %w", err) + } + // convert the openapi_v2.Document back to Swagger + _, err := swagger.FromGnostic(doc) + if err != nil { + return errors.Wrap(err) + } + + case format == JsonOrYaml: + if len(b) > 0 && b[0] != byte('{') { + var err error + b, err = k8syaml.YAMLToJSON(b) + if err != nil { + return errors.Wrap(err) + } + } + if err := swagger.UnmarshalJSON(b); err != nil { + return errors.Wrap(err) + } + } + + AddDefinitions(swagger.Definitions) + findNamespaceability(swagger.Paths) + return nil +} + +// findNamespaceability looks at the api paths for the resource to determine +// if it is cluster-scoped or namespace-scoped. The gvk of the resource +// for each path is found by looking at the x-kubernetes-group-version-kind +// extension. If a path exists for the resource that contains a namespace path +// parameter, the resource is namespace-scoped. +func findNamespaceability(paths *spec.Paths) { + if globalSchema.namespaceabilityByResourceType == nil { + globalSchema.namespaceabilityByResourceType = make(map[yaml.TypeMeta]bool) + } + + if paths == nil { + return + } + + for path, pathInfo := range paths.Paths { + if pathInfo.Get == nil { + continue + } + gvk, found := pathInfo.Get.VendorExtensible.Extensions[kubernetesGVKExtensionKey] + if !found { + continue + } + typeMeta, found := toTypeMeta(gvk) + if !found { + continue + } + + if strings.Contains(path, "namespaces/{namespace}") { + // if we find a namespace path parameter, we just update the map + // directly + globalSchema.namespaceabilityByResourceType[typeMeta] = true + } else if _, found := globalSchema.namespaceabilityByResourceType[typeMeta]; !found { + // if the resource doesn't have the namespace path parameter, we + // only add it to the map if it doesn't already exist. + globalSchema.namespaceabilityByResourceType[typeMeta] = false + } + } +} + +func resolve(root interface{}, ref *spec.Ref) (*spec.Schema, error) { + if s, ok := root.(*spec.Schema); ok && s == nil { + return nil, nil + } + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, errors.Wrap(err) + } + switch sch := res.(type) { + case spec.Schema: + return &sch, nil + case *spec.Schema: + return sch, nil + case map[string]interface{}: + b, err := json.Marshal(sch) + if err != nil { + return nil, err + } + newSch := new(spec.Schema) + if err = json.Unmarshal(b, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, errors.Wrap(fmt.Errorf("unknown type for the resolved reference")) + } +} + +func rootSchema() *spec.Schema { + initSchema() + return &globalSchema.schema +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/order/syncorder.go b/vendor/sigs.k8s.io/kustomize/kyaml/order/syncorder.go new file mode 100644 index 000000000..57bc86503 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/order/syncorder.go @@ -0,0 +1,121 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package order + +import ( + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// SyncOrder recursively sorts the map node keys in 'to' node to match the order of +// map node keys in 'from' node at same tree depth, additional keys are moved to the end +// Field order might be altered due to round-tripping in arbitrary functions. +// This functionality helps to retain the original order of fields to avoid unnecessary diffs. +func SyncOrder(from, to *yaml.RNode) error { + // from node should not be modified, it should be just used as a reference + fromCopy := from.Copy() + if err := syncOrder(fromCopy, to); err != nil { + return errors.Errorf("failed to sync field order: %q", err.Error()) + } + rearrangeHeadCommentOfSeqNode(to.YNode()) + return nil +} + +func syncOrder(from, to *yaml.RNode) error { + if from.IsNilOrEmpty() || to.IsNilOrEmpty() { + return nil + } + switch from.YNode().Kind { + case yaml.DocumentNode: + // Traverse the child of the documents + return syncOrder(yaml.NewRNode(from.YNode()), yaml.NewRNode(to.YNode())) + case yaml.MappingNode: + return VisitFields(from, to, func(fNode, tNode *yaml.MapNode) error { + // Traverse each field value + if fNode == nil || tNode == nil { + return nil + } + return syncOrder(fNode.Value, tNode.Value) + }) + case yaml.SequenceNode: + return VisitElements(from, to, syncOrder) // Traverse each list element + } + return nil +} + +// VisitElements calls fn for each element in a SequenceNode. +// Returns an error for non-SequenceNodes +func VisitElements(from, to *yaml.RNode, fn func(fNode, tNode *yaml.RNode) error) error { + fElements, err := from.Elements() + if err != nil { + return errors.Wrap(err) + } + + tElements, err := to.Elements() + if err != nil { + return errors.Wrap(err) + } + for i := range fElements { + if i >= len(tElements) { + return nil + } + if err := fn(fElements[i], tElements[i]); err != nil { + return errors.Wrap(err) + } + } + return nil +} + +// VisitFields calls fn for each field in the RNode. +// Returns an error for non-MappingNodes. +func VisitFields(from, to *yaml.RNode, fn func(fNode, tNode *yaml.MapNode) error) error { + srcFieldNames, err := from.Fields() + if err != nil { + return nil + } + yaml.SyncMapNodesOrder(from, to) + // visit each field + for _, fieldName := range srcFieldNames { + if err := fn(from.Field(fieldName), to.Field(fieldName)); err != nil { + return errors.Wrap(err) + } + } + return nil +} + +// rearrangeHeadCommentOfSeqNode addresses a remote corner case due to moving a +// map node in a sequence node with a head comment to the top +func rearrangeHeadCommentOfSeqNode(node *yaml.Node) { + if node == nil { + return + } + switch node.Kind { + case yaml.DocumentNode: + for _, node := range node.Content { + rearrangeHeadCommentOfSeqNode(node) + } + + case yaml.MappingNode: + for _, node := range node.Content { + rearrangeHeadCommentOfSeqNode(node) + } + + case yaml.SequenceNode: + for _, node := range node.Content { + // for each child mapping node, transfer the head comment of it's + // first child scalar node to the head comment of itself + if len(node.Content) > 0 && node.Content[0].Kind == yaml.ScalarNode { + if node.HeadComment == "" { + node.HeadComment = node.Content[0].HeadComment + continue + } + + if node.Content[0].HeadComment != "" { + node.HeadComment += "\n" + node.Content[0].HeadComment + node.Content[0].HeadComment = "" + } + } + } + } +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/resid/gvk.go b/vendor/sigs.k8s.io/kustomize/kyaml/resid/gvk.go new file mode 100644 index 000000000..0289cadd7 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/resid/gvk.go @@ -0,0 +1,255 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resid + +import ( + "strings" + + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Gvk identifies a Kubernetes API type. +// https://git.k8s.io/design-proposals-archive/api-machinery/api-group.md +type Gvk struct { + Group string `json:"group,omitempty" yaml:"group,omitempty"` + Version string `json:"version,omitempty" yaml:"version,omitempty"` + Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` + // isClusterScoped is true if the object is known, per the openapi + // data in use, to be cluster scoped, and false otherwise. + isClusterScoped bool +} + +func NewGvk(g, v, k string) Gvk { + result := Gvk{Group: g, Version: v, Kind: k} + result.isClusterScoped = + openapi.IsCertainlyClusterScoped(result.AsTypeMeta()) + return result +} + +func GvkFromNode(r *yaml.RNode) Gvk { + g, v := ParseGroupVersion(r.GetApiVersion()) + return NewGvk(g, v, r.GetKind()) +} + +// FromKind makes a Gvk with only the kind specified. +func FromKind(k string) Gvk { + return NewGvk("", "", k) +} + +// ParseGroupVersion parses a KRM metadata apiVersion field. +func ParseGroupVersion(apiVersion string) (group, version string) { + if i := strings.Index(apiVersion, "/"); i > -1 { + return apiVersion[:i], apiVersion[i+1:] + } + return "", apiVersion +} + +// GvkFromString makes a Gvk from the output of Gvk.String(). +func GvkFromString(s string) Gvk { + values := strings.Split(s, fieldSep) + if len(values) < 3 { + // ...then the string didn't come from Gvk.String(). + return Gvk{ + Group: noGroup, + Version: noVersion, + Kind: noKind, + } + } + k := values[0] + if k == noKind { + k = "" + } + v := values[1] + if v == noVersion { + v = "" + } + g := strings.Join(values[2:], fieldSep) + if g == noGroup { + g = "" + } + return NewGvk(g, v, k) +} + +// Values that are brief but meaningful in logs. +const ( + noGroup = "[noGrp]" + noVersion = "[noVer]" + noKind = "[noKind]" + fieldSep = "." +) + +// String returns a string representation of the GVK. +func (x Gvk) String() string { + g := x.Group + if g == "" { + g = noGroup + } + v := x.Version + if v == "" { + v = noVersion + } + k := x.Kind + if k == "" { + k = noKind + } + return strings.Join([]string{k, v, g}, fieldSep) +} + +// stableSortString returns a GVK representation that ensures determinism and +// backwards-compatibility in testing, logging, ... +func (x Gvk) stableSortString() string { + stableNoGroup := "~G" + stableNoVersion := "~V" + stableNoKind := "~K" + stableFieldSeparator := "_" + + g := x.Group + if g == "" { + g = stableNoGroup + } + v := x.Version + if v == "" { + v = stableNoVersion + } + k := x.Kind + if k == "" { + k = stableNoKind + } + return strings.Join([]string{g, v, k}, stableFieldSeparator) +} + +// ApiVersion returns the combination of Group and Version +func (x Gvk) ApiVersion() string { + if x.Group != "" { + return x.Group + "/" + x.Version + } + return x.Version +} + +// StringWoEmptyField returns a string representation of the GVK. Non-exist +// fields will be omitted. This is called when generating a filename for the +// resource. +func (x Gvk) StringWoEmptyField() string { + var s []string + if x.Group != "" { + s = append(s, x.Group) + } + if x.Version != "" { + s = append(s, x.Version) + } + if x.Kind != "" { + s = append(s, x.Kind) + } + return strings.Join(s, "_") +} + +// Equals returns true if the Gvk's have equal fields. +func (x Gvk) Equals(o Gvk) bool { + return x.Group == o.Group && x.Version == o.Version && x.Kind == o.Kind +} + +// An attempt to order things to help k8s, e.g. +// a Service should come before things that refer to it. +// Namespace should be first. +// In some cases order just specified to provide determinism. +var orderFirst = []string{ + "Namespace", + "ResourceQuota", + "StorageClass", + "CustomResourceDefinition", + "ServiceAccount", + "PodSecurityPolicy", + "Role", + "ClusterRole", + "RoleBinding", + "ClusterRoleBinding", + "ConfigMap", + "Secret", + "Endpoints", + "Service", + "LimitRange", + "PriorityClass", + "PersistentVolume", + "PersistentVolumeClaim", + "Deployment", + "StatefulSet", + "CronJob", + "PodDisruptionBudget", +} +var orderLast = []string{ + "MutatingWebhookConfiguration", + "ValidatingWebhookConfiguration", +} +var typeOrders = func() map[string]int { + m := map[string]int{} + for i, n := range orderFirst { + m[n] = -len(orderFirst) + i + } + for i, n := range orderLast { + m[n] = 1 + i + } + return m +}() + +// IsLessThan returns true if self is less than the argument. +func (x Gvk) IsLessThan(o Gvk) bool { + indexI := typeOrders[x.Kind] + indexJ := typeOrders[o.Kind] + if indexI != indexJ { + return indexI < indexJ + } + return x.stableSortString() < o.stableSortString() +} + +// IsSelected returns true if `selector` selects `x`; otherwise, false. +// If `selector` and `x` are the same, return true. +// If `selector` is nil, it is considered a wildcard match, returning true. +// If selector fields are empty, they are considered wildcards matching +// anything in the corresponding fields, e.g. +// +// this item: +// +// +// is selected by +// +// +// but rejected by +// +// +func (x Gvk) IsSelected(selector *Gvk) bool { + if selector == nil { + return true + } + if len(selector.Group) > 0 { + if x.Group != selector.Group { + return false + } + } + if len(selector.Version) > 0 { + if x.Version != selector.Version { + return false + } + } + if len(selector.Kind) > 0 { + if x.Kind != selector.Kind { + return false + } + } + return true +} + +// AsTypeMeta returns a yaml.TypeMeta from x's information. +func (x Gvk) AsTypeMeta() yaml.TypeMeta { + return yaml.TypeMeta{ + APIVersion: x.ApiVersion(), + Kind: x.Kind, + } +} + +// IsClusterScoped returns true if the Gvk is certainly cluster scoped +// with respect to the available openapi data. +func (x Gvk) IsClusterScoped() bool { + return x.isClusterScoped +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/resid/resid.go b/vendor/sigs.k8s.io/kustomize/kyaml/resid/resid.go new file mode 100644 index 000000000..cddbcdde6 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/resid/resid.go @@ -0,0 +1,145 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package resid + +import ( + "reflect" + "strings" + + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// ResId is an identifier of a k8s resource object. +type ResId struct { + // Gvk of the resource. + Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` + + // Name of the resource. + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // Namespace the resource belongs to, if it can belong to a namespace. + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` +} + +// NewResIdWithNamespace creates new ResId +// in a given namespace. +func NewResIdWithNamespace(k Gvk, n, ns string) ResId { + return ResId{Gvk: k, Name: n, Namespace: ns} +} + +// NewResId creates new ResId. +func NewResId(k Gvk, n string) ResId { + return NewResIdWithNamespace(k, n, "") +} + +// NewResIdKindOnly creates a new ResId. +func NewResIdKindOnly(k string, n string) ResId { + return NewResId(FromKind(k), n) +} + +const ( + noNamespace = "[noNs]" + noName = "[noName]" + separator = "/" + TotallyNotANamespace = "_non_namespaceable_" + DefaultNamespace = "default" +) + +// String of ResId based on GVK, name and prefix +func (id ResId) String() string { + ns := id.Namespace + if ns == "" { + ns = noNamespace + } + nm := id.Name + if nm == "" { + nm = noName + } + return strings.Join( + []string{id.Gvk.String(), strings.Join([]string{nm, ns}, fieldSep)}, separator) +} + +func FromString(s string) ResId { + values := strings.Split(s, separator) + gvk := GvkFromString(values[0]) + + values = strings.Split(values[1], fieldSep) + last := len(values) - 1 + + ns := values[last] + if ns == noNamespace { + ns = "" + } + nm := strings.Join(values[:last], fieldSep) + if nm == noName { + nm = "" + } + return ResId{ + Gvk: gvk, + Namespace: ns, + Name: nm, + } +} + +// FromRNode returns the ResId for the RNode +func FromRNode(rn *yaml.RNode) ResId { + group, version := ParseGroupVersion(rn.GetApiVersion()) + return NewResIdWithNamespace( + Gvk{Group: group, Version: version, Kind: rn.GetKind()}, rn.GetName(), rn.GetNamespace()) +} + +// GvknEquals returns true if the other id matches +// Group/Version/Kind/name. +func (id ResId) GvknEquals(o ResId) bool { + return id.Name == o.Name && id.Gvk.Equals(o.Gvk) +} + +// IsSelectedBy returns true if self is selected by the argument. +func (id ResId) IsSelectedBy(selector ResId) bool { + return (selector.Name == "" || selector.Name == id.Name) && + (selector.Namespace == "" || selector.IsNsEquals(id)) && + id.Gvk.IsSelected(&selector.Gvk) +} + +// Equals returns true if the other id matches +// namespace/Group/Version/Kind/name. +func (id ResId) Equals(o ResId) bool { + return id.IsNsEquals(o) && id.GvknEquals(o) +} + +// IsNsEquals returns true if the id is in +// the same effective namespace. +func (id ResId) IsNsEquals(o ResId) bool { + return id.EffectiveNamespace() == o.EffectiveNamespace() +} + +// IsInDefaultNs returns true if id is a namespaceable +// ResId and the Namespace is either not set or set +// to DefaultNamespace. +func (id ResId) IsInDefaultNs() bool { + return !id.IsClusterScoped() && id.isPutativelyDefaultNs() +} + +func (id ResId) isPutativelyDefaultNs() bool { + return id.Namespace == "" || id.Namespace == DefaultNamespace +} + +// EffectiveNamespace returns a non-ambiguous, non-empty +// namespace for use in reporting and equality tests. +func (id ResId) EffectiveNamespace() string { + // The order of these checks matters. + if id.IsClusterScoped() { + return TotallyNotANamespace + } + if id.isPutativelyDefaultNs() { + return DefaultNamespace + } + return id.Namespace +} + +// IsEmpty returns true of all of the id's fields are +// empty strings +func (id ResId) IsEmpty() bool { + return reflect.DeepEqual(id, ResId{}) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/runfn/runfn.go b/vendor/sigs.k8s.io/kustomize/kyaml/runfn/runfn.go new file mode 100644 index 000000000..6b36d7d3a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/runfn/runfn.go @@ -0,0 +1,549 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package runfn + +import ( + "fmt" + "io" + "os" + "os/user" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "sync/atomic" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/fn/runtime/container" + "sigs.k8s.io/kustomize/kyaml/fn/runtime/exec" + "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" + "sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/kio/kioutil" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// RunFns runs the set of configuration functions in a local directory against +// the Resources in that directory +type RunFns struct { + StorageMounts []runtimeutil.StorageMount + + // Path is the path to the directory containing functions + Path string + + // FunctionPaths Paths allows functions to be specified outside the configuration + // directory. + // Functions provided on FunctionPaths are globally scoped. + // If FunctionPaths length is > 0, then NoFunctionsFromInput defaults to true + FunctionPaths []string + + // Functions is an explicit list of functions to run against the input. + // Functions provided on Functions are globally scoped. + // If Functions length is > 0, then NoFunctionsFromInput defaults to true + Functions []*yaml.RNode + + // GlobalScope if true, functions read from input will be scoped globally rather + // than only to Resources under their subdirs. + GlobalScope bool + + // Input can be set to read the Resources from Input rather than from a directory + Input io.Reader + + // Network enables network access for functions that declare it + Network bool + + // Output can be set to write the result to Output rather than back to the directory + Output io.Writer + + // NoFunctionsFromInput if set to true will not read any functions from the input, + // and only use explicit sources + NoFunctionsFromInput *bool + + // EnableStarlark will enable functions run as starlark scripts + EnableStarlark bool + + // EnableExec will enable exec functions + EnableExec bool + + // DisableContainers will disable functions run as containers + DisableContainers bool + + // ResultsDir is where to write each functions results + ResultsDir string + + // LogSteps enables logging the function that is running. + LogSteps bool + + // LogWriter can be set to write the logs to LogWriter rather than stderr if LogSteps is enabled. + LogWriter io.Writer + + // resultsCount is used to generate the results filename for each container + resultsCount uint32 + + // functionFilterProvider provides a filter to perform the function. + // this is a variable so it can be mocked in tests + functionFilterProvider func( + filter runtimeutil.FunctionSpec, api *yaml.RNode, currentUser currentUserFunc) (kio.Filter, error) + + // AsCurrentUser is a boolean to indicate whether docker container should use + // the uid and gid that run the command + AsCurrentUser bool + + // Env contains environment variables that will be exported to container + Env []string + + // ContinueOnEmptyResult configures what happens when the underlying pipeline + // returns an empty result. + // If it is false (default), subsequent functions will be skipped and the + // result will be returned immediately. + // If it is true, the empty result will be provided as input to the next + // function in the list. + ContinueOnEmptyResult bool + + // WorkingDir specifies which working directory an exec function should run in. + WorkingDir string +} + +// Execute runs the command +func (r RunFns) Execute() error { + // make the path absolute so it works on mac + var err error + r.Path, err = filepath.Abs(r.Path) + if err != nil { + return errors.Wrap(err) + } + + // default the containerFilterProvider if it hasn't been override. Split out for testing. + (&r).init() + nodes, fltrs, output, err := r.getNodesAndFilters() + if err != nil { + return err + } + return r.runFunctions(nodes, output, fltrs) +} + +func (r RunFns) getNodesAndFilters() ( + *kio.PackageBuffer, []kio.Filter, *kio.LocalPackageReadWriter, error) { + // Read Resources from Directory or Input + buff := &kio.PackageBuffer{} + p := kio.Pipeline{Outputs: []kio.Writer{buff}} + // save the output dir because we will need it to write back + // the same one for reading must be used for writing if deleting Resources + var outputPkg *kio.LocalPackageReadWriter + if r.Path != "" { + outputPkg = &kio.LocalPackageReadWriter{PackagePath: r.Path, MatchFilesGlob: kio.MatchAll} + } + + if r.Input == nil { + p.Inputs = []kio.Reader{outputPkg} + } else { + p.Inputs = []kio.Reader{&kio.ByteReader{Reader: r.Input}} + } + if err := p.Execute(); err != nil { + return nil, nil, outputPkg, err + } + + fltrs, err := r.getFilters(buff.Nodes) + if err != nil { + return nil, nil, outputPkg, err + } + return buff, fltrs, outputPkg, nil +} + +func (r RunFns) getFilters(nodes []*yaml.RNode) ([]kio.Filter, error) { + var fltrs []kio.Filter + + // fns from annotations on the input resources + f, err := r.getFunctionsFromInput(nodes) + if err != nil { + return nil, err + } + fltrs = append(fltrs, f...) + + // fns from directories specified on the struct + f, err = r.getFunctionsFromFunctionPaths() + if err != nil { + return nil, err + } + fltrs = append(fltrs, f...) + + // explicit fns specified on the struct + f, err = r.getFunctionsFromFunctions() + if err != nil { + return nil, err + } + fltrs = append(fltrs, f...) + + return fltrs, nil +} + +// runFunctions runs the fltrs against the input and writes to either r.Output or output +func (r RunFns) runFunctions( + input kio.Reader, output kio.Writer, fltrs []kio.Filter) error { + // use the previously read Resources as input + var outputs []kio.Writer + if r.Output == nil { + // write back to the package + outputs = append(outputs, output) + } else { + // write to the output instead of the directory if r.Output is specified or + // the output is nil (reading from Input) + outputs = append(outputs, kio.ByteWriter{Writer: r.Output}) + } + + var err error + pipeline := kio.Pipeline{ + Inputs: []kio.Reader{input}, + Filters: fltrs, + Outputs: outputs, + ContinueOnEmptyResult: r.ContinueOnEmptyResult, + } + if r.LogSteps { + err = pipeline.ExecuteWithCallback(func(op kio.Filter) { + var identifier string + + switch filter := op.(type) { + case *container.Filter: + identifier = filter.Image + case *exec.Filter: + identifier = filter.Path + case *starlark.Filter: + identifier = filter.String() + default: + identifier = "unknown-type function" + } + + _, _ = fmt.Fprintf(r.LogWriter, "Running %s\n", identifier) + }) + } else { + err = pipeline.Execute() + } + if err != nil { + return err + } + + // check for deferred function errors + var errs []string + for i := range fltrs { + cf, ok := fltrs[i].(runtimeutil.DeferFailureFunction) + if !ok { + continue + } + if cf.GetExit() != nil { + errs = append(errs, cf.GetExit().Error()) + } + } + if len(errs) > 0 { + return fmt.Errorf(strings.Join(errs, "\n---\n")) + } + return nil +} + +// getFunctionsFromInput scans the input for functions and runs them +func (r RunFns) getFunctionsFromInput(nodes []*yaml.RNode) ([]kio.Filter, error) { + if *r.NoFunctionsFromInput { + return nil, nil + } + + buff := &kio.PackageBuffer{} + err := kio.Pipeline{ + Inputs: []kio.Reader{&kio.PackageBuffer{Nodes: nodes}}, + Filters: []kio.Filter{&runtimeutil.IsReconcilerFilter{}}, + Outputs: []kio.Writer{buff}, + }.Execute() + if err != nil { + return nil, err + } + err = sortFns(buff) + if err != nil { + return nil, err + } + return r.getFunctionFilters(false, buff.Nodes...) +} + +// getFunctionsFromFunctionPaths returns the set of functions read from r.FunctionPaths +// as a slice of Filters +func (r RunFns) getFunctionsFromFunctionPaths() ([]kio.Filter, error) { + buff := &kio.PackageBuffer{} + for i := range r.FunctionPaths { + err := kio.Pipeline{ + Inputs: []kio.Reader{ + kio.LocalPackageReader{PackagePath: r.FunctionPaths[i]}, + }, + Outputs: []kio.Writer{buff}, + }.Execute() + if err != nil { + return nil, err + } + } + return r.getFunctionFilters(true, buff.Nodes...) +} + +// getFunctionsFromFunctions returns the set of explicitly provided functions as +// Filters +func (r RunFns) getFunctionsFromFunctions() ([]kio.Filter, error) { + return r.getFunctionFilters(true, r.Functions...) +} + +// mergeContainerEnv will merge the envs specified by command line (imperative) and config +// file (declarative). If they have same key, the imperative value will be respected. +func (r RunFns) mergeContainerEnv(envs []string) []string { + imperative := runtimeutil.NewContainerEnvFromStringSlice(r.Env) + declarative := runtimeutil.NewContainerEnvFromStringSlice(envs) + for key, value := range imperative.EnvVars { + declarative.AddKeyValue(key, value) + } + + for _, key := range imperative.VarsToExport { + declarative.AddKey(key) + } + + return declarative.Raw() +} + +func (r RunFns) getFunctionFilters(global bool, fns ...*yaml.RNode) ( + []kio.Filter, error) { + var fltrs []kio.Filter + for i := range fns { + api := fns[i] + spec, err := runtimeutil.GetFunctionSpec(api) + if err != nil { + return nil, fmt.Errorf("failed to get FunctionSpec: %w", err) + } + if spec == nil { + // resource doesn't have function spec + continue + } + if spec.Container.Network && !r.Network { + // TODO(eddiezane): Provide error info about which function needs the network + return fltrs, errors.Errorf("network required but not enabled with --network") + } + // merge envs from imperative and declarative + spec.Container.Env = r.mergeContainerEnv(spec.Container.Env) + + c, err := r.functionFilterProvider(*spec, api, user.Current) + if err != nil { + return nil, err + } + + if c == nil { + continue + } + cf, ok := c.(*container.Filter) + if ok { + if global { + cf.Exec.GlobalScope = true + } + cf.Exec.WorkingDir = r.WorkingDir + } + fltrs = append(fltrs, c) + } + return fltrs, nil +} + +// sortFns sorts functions so that functions with the longest paths come first +func sortFns(buff *kio.PackageBuffer) error { + var outerErr error + // sort the nodes so that we traverse them depth first + // functions deeper in the file system tree should be run first + sort.Slice(buff.Nodes, func(i, j int) bool { + if err := kioutil.CopyLegacyAnnotations(buff.Nodes[i]); err != nil { + return false + } + if err := kioutil.CopyLegacyAnnotations(buff.Nodes[j]); err != nil { + return false + } + mi, _ := buff.Nodes[i].GetMeta() + pi := filepath.ToSlash(mi.Annotations[kioutil.PathAnnotation]) + + mj, _ := buff.Nodes[j].GetMeta() + pj := filepath.ToSlash(mj.Annotations[kioutil.PathAnnotation]) + + // If the path is the same, we decide the ordering based on the + // index annotation. + if pi == pj { + iIndex, err := strconv.Atoi(mi.Annotations[kioutil.IndexAnnotation]) + if err != nil { + outerErr = err + return false + } + jIndex, err := strconv.Atoi(mj.Annotations[kioutil.IndexAnnotation]) + if err != nil { + outerErr = err + return false + } + return iIndex < jIndex + } + + if filepath.Base(path.Dir(pi)) == "functions" { + // don't count the functions dir, the functions are scoped 1 level above + pi = filepath.Dir(path.Dir(pi)) + } else { + pi = filepath.Dir(pi) + } + + if filepath.Base(path.Dir(pj)) == "functions" { + // don't count the functions dir, the functions are scoped 1 level above + pj = filepath.Dir(path.Dir(pj)) + } else { + pj = filepath.Dir(pj) + } + + // i is "less" than j (comes earlier) if its depth is greater -- e.g. run + // i before j if it is deeper in the directory structure + li := len(strings.Split(pi, "/")) + if pi == "." { + // local dir should have 0 path elements instead of 1 + li = 0 + } + lj := len(strings.Split(pj, "/")) + if pj == "." { + // local dir should have 0 path elements instead of 1 + lj = 0 + } + if li != lj { + // use greater-than because we want to sort with the longest + // paths FIRST rather than last + return li > lj + } + + // sort by path names if depths are equal + return pi < pj + }) + return outerErr +} + +// init initializes the RunFns with a containerFilterProvider. +func (r *RunFns) init() { + if r.NoFunctionsFromInput == nil { + // default no functions from input if any function sources are explicitly provided + nfn := len(r.FunctionPaths) > 0 || len(r.Functions) > 0 + r.NoFunctionsFromInput = &nfn + } + + // if no path is specified, default reading from stdin and writing to stdout + if r.Path == "" { + if r.Output == nil { + r.Output = os.Stdout + } + if r.Input == nil { + r.Input = os.Stdin + } + } + + // functionFilterProvider set the filter provider + if r.functionFilterProvider == nil { + r.functionFilterProvider = r.ffp + } + + // if LogSteps is enabled and LogWriter is not specified, use stderr + if r.LogSteps && r.LogWriter == nil { + r.LogWriter = os.Stderr + } +} + +type currentUserFunc func() (*user.User, error) + +// getUIDGID will return "nobody" if asCurrentUser is false. Otherwise +// return "uid:gid" according to the return from currentUser function. +func getUIDGID(asCurrentUser bool, currentUser currentUserFunc) (string, error) { + if !asCurrentUser { + return "nobody", nil + } + + u, err := currentUser() + if err != nil { + return "", err + } + return fmt.Sprintf("%s:%s", u.Uid, u.Gid), nil +} + +// ffp provides function filters +func (r *RunFns) ffp(spec runtimeutil.FunctionSpec, api *yaml.RNode, currentUser currentUserFunc) (kio.Filter, error) { + var resultsFile string + if r.ResultsDir != "" { + resultsFile = filepath.Join(r.ResultsDir, fmt.Sprintf( + "results-%v.yaml", r.resultsCount)) + atomic.AddUint32(&r.resultsCount, 1) + } + if !r.DisableContainers && spec.Container.Image != "" { + // TODO: Add a test for this behavior + uidgid, err := getUIDGID(r.AsCurrentUser, currentUser) + if err != nil { + return nil, err + } + + // Storage mounts can either come from kustomize fn run --mounts, + // or from the declarative function mounts field. + storageMounts := spec.Container.StorageMounts + storageMounts = append(storageMounts, r.StorageMounts...) + + c := container.NewContainer( + runtimeutil.ContainerSpec{ + Image: spec.Container.Image, + Network: spec.Container.Network, + StorageMounts: storageMounts, + Env: spec.Container.Env, + }, + uidgid, + ) + cf := &c + cf.Exec.FunctionConfig = api + cf.Exec.GlobalScope = r.GlobalScope + cf.Exec.ResultsFile = resultsFile + cf.Exec.DeferFailure = spec.DeferFailure + return cf, nil + } + if r.EnableStarlark && (spec.Starlark.Path != "" || spec.Starlark.URL != "") { + // the script path is relative to the function config file + m, err := api.GetMeta() + if err != nil { + return nil, errors.Wrap(err) + } + + var p string + if spec.Starlark.Path != "" { + pathAnno := m.Annotations[kioutil.PathAnnotation] + if pathAnno == "" { + pathAnno = m.Annotations[kioutil.LegacyPathAnnotation] + } + p = filepath.ToSlash(path.Clean(pathAnno)) + + spec.Starlark.Path = filepath.ToSlash(path.Clean(spec.Starlark.Path)) + if filepath.IsAbs(spec.Starlark.Path) || path.IsAbs(spec.Starlark.Path) { + return nil, errors.Errorf( + "absolute function path %s not allowed", spec.Starlark.Path) + } + if strings.HasPrefix(spec.Starlark.Path, "..") { + return nil, errors.Errorf( + "function path %s not allowed to start with ../", spec.Starlark.Path) + } + p = filepath.ToSlash(filepath.Join(r.Path, filepath.Dir(p), spec.Starlark.Path)) + } + + sf := &starlark.Filter{Name: spec.Starlark.Name, Path: p, URL: spec.Starlark.URL} + + sf.FunctionConfig = api + sf.GlobalScope = r.GlobalScope + sf.ResultsFile = resultsFile + sf.DeferFailure = spec.DeferFailure + return sf, nil + } + + if r.EnableExec && spec.Exec.Path != "" { + ef := &exec.Filter{ + Path: spec.Exec.Path, + WorkingDir: r.WorkingDir, + } + + ef.FunctionConfig = api + ef.GlobalScope = r.GlobalScope + ef.ResultsFile = resultsFile + ef.DeferFailure = spec.DeferFailure + return ef, nil + } + + return nil, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/sets/string.go b/vendor/sigs.k8s.io/kustomize/kyaml/sets/string.go new file mode 100644 index 000000000..d590e5328 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/sets/string.go @@ -0,0 +1,64 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package sets + +type String map[string]interface{} + +func (s String) Len() int { + return len(s) +} + +func (s String) List() []string { + val := make([]string, 0, len(s)) + for k := range s { + val = append(val, k) + } + return val +} + +func (s String) Has(val string) bool { + _, found := s[val] + return found +} + +func (s String) Insert(vals ...string) { + for _, val := range vals { + s[val] = nil + } +} + +func (s String) Difference(s2 String) String { + s3 := String{} + for k := range s { + if _, found := s2[k]; !found { + s3.Insert(k) + } + } + return s3 +} + +func (s String) SymmetricDifference(s2 String) String { + s3 := String{} + for k := range s { + if _, found := s2[k]; !found { + s3.Insert(k) + } + } + for k := range s2 { + if _, found := s[k]; !found { + s3.Insert(k) + } + } + return s3 +} + +func (s String) Intersection(s2 String) String { + s3 := String{} + for k := range s { + if _, found := s2[k]; found { + s3.Insert(k) + } + } + return s3 +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/sets/stringlist.go b/vendor/sigs.k8s.io/kustomize/kyaml/sets/stringlist.go new file mode 100644 index 000000000..2d75978fc --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/sets/stringlist.go @@ -0,0 +1,44 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package sets + +// StringList is a set, where each element of +// the set is a string slice. +type StringList [][]string + +func (s StringList) Len() int { + return len(s) +} + +func (s StringList) Insert(val []string) StringList { + if !s.Has(val) { + return append(s, val) + } + return s +} + +func (s StringList) Has(val []string) bool { + if len(s) == 0 { + return false + } + + for i := range s { + if isStringSliceEqual(s[i], val) { + return true + } + } + return false +} + +func isStringSliceEqual(s []string, t []string) bool { + if len(s) != len(t) { + return false + } + for i := range s { + if s[i] != t[i] { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/sliceutil/slice.go b/vendor/sigs.k8s.io/kustomize/kyaml/sliceutil/slice.go new file mode 100644 index 000000000..23e3ad7c2 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/sliceutil/slice.go @@ -0,0 +1,25 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package sliceutil + +// Contains return true if string e is present in slice s +func Contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +// Remove removes the first occurrence of r in slice s +// and returns remaining slice +func Remove(s []string, r string) []string { + for i, v := range s { + if v == r { + return append(s[:i], s[i+1:]...) + } + } + return s +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/utils/pathsplitter.go b/vendor/sigs.k8s.io/kustomize/kyaml/utils/pathsplitter.go new file mode 100644 index 000000000..6ce1f899d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/utils/pathsplitter.go @@ -0,0 +1,71 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import "strings" + +// TODO: Move these to kyaml + +// PathSplitter splits a delimited string, permitting escaped delimiters. +func PathSplitter(path string, delimiter string) []string { + ps := strings.Split(path, delimiter) + var res []string + + // allow path to start with forward slash + // i.e. /a/b/c + if len(ps) > 1 && ps[0] == "" { + ps = ps[1:] + } + + res = append(res, ps[0]) + for i := 1; i < len(ps); i++ { + last := len(res) - 1 + if strings.HasSuffix(res[last], `\`) { + res[last] = strings.TrimSuffix(res[last], `\`) + delimiter + ps[i] + } else { + res = append(res, ps[i]) + } + } + return res +} + +// SmarterPathSplitter splits a path, retaining bracketed elements. +// If the element is a list entry identifier (defined by the '='), +// it will retain the brackets. +// E.g. "[name=com.foo.someapp]" survives as one thing after splitting +// "spec.template.spec.containers.[name=com.foo.someapp].image" +// See kyaml/yaml/match.go for use of list entry identifiers. +// If the element is a mapping entry identifier, it will remove the +// brackets. +// E.g. "a.b.c" survives as one thing after splitting +// "metadata.annotations.[a.b.c] +// This function uses `PathSplitter`, so it also respects escaped delimiters. +func SmarterPathSplitter(path string, delimiter string) []string { + var result []string + split := PathSplitter(path, delimiter) + + for i := 0; i < len(split); i++ { + elem := split[i] + if strings.HasPrefix(elem, "[") && !strings.HasSuffix(elem, "]") { + // continue until we find the matching "]" + bracketed := []string{elem} + for i < len(split)-1 { + i++ + bracketed = append(bracketed, split[i]) + if strings.HasSuffix(split[i], "]") { + break + } + } + bracketedStr := strings.Join(bracketed, delimiter) + if strings.Contains(bracketedStr, "=") { + result = append(result, bracketedStr) + } else { + result = append(result, strings.Trim(bracketedStr, "[]")) + } + } else { + result = append(result, elem) + } + } + return result +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go new file mode 100644 index 000000000..5f45424d9 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go @@ -0,0 +1,109 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "bytes" + "io" + + "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" +) + +const ( + WideSequenceStyle SequenceIndentStyle = "wide" + CompactSequenceStyle SequenceIndentStyle = "compact" + DefaultIndent = 2 + // BareSeqNodeWrappingKey kyaml uses reader annotations to track resources, it is not possible to + // add them to bare sequence nodes, this key is used to wrap such bare + // sequence nodes into map node, byteio_writer unwraps it while writing back + BareSeqNodeWrappingKey = "bareSeqNodeWrappingKey" +) + +// SeqIndentType holds the indentation style for sequence nodes +type SequenceIndentStyle string + +// EncoderOptions are options that can be used to configure the encoder, +// do not expose new options without considerable justification +type EncoderOptions struct { + // SeqIndent is the indentation style for YAML Sequence nodes + SeqIndent SequenceIndentStyle +} + +// Expose the yaml.v3 functions so this package can be used as a replacement + +type Decoder = yaml.Decoder +type Encoder = yaml.Encoder +type IsZeroer = yaml.IsZeroer +type Kind = yaml.Kind +type Marshaler = yaml.Marshaler +type Node = yaml.Node +type Style = yaml.Style +type TypeError = yaml.TypeError +type Unmarshaler = yaml.Unmarshaler + +var Marshal = func(in interface{}) ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(in) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +var Unmarshal = yaml.Unmarshal +var NewDecoder = yaml.NewDecoder +var NewEncoder = func(w io.Writer) *yaml.Encoder { + e := yaml.NewEncoder(w) + e.SetIndent(DefaultIndent) + e.CompactSeqIndent() + return e +} + +// MarshalWithOptions marshals the input interface with provided options +func MarshalWithOptions(in interface{}, opts *EncoderOptions) ([]byte, error) { + var buf bytes.Buffer + err := NewEncoderWithOptions(&buf, opts).Encode(in) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// NewEncoderWithOptions returns the encoder with provided options +func NewEncoderWithOptions(w io.Writer, opts *EncoderOptions) *yaml.Encoder { + encoder := NewEncoder(w) + encoder.SetIndent(DefaultIndent) + if opts.SeqIndent == WideSequenceStyle { + encoder.DefaultSeqIndent() + } else { + encoder.CompactSeqIndent() + } + return encoder +} + +var AliasNode yaml.Kind = yaml.AliasNode +var DocumentNode yaml.Kind = yaml.DocumentNode +var MappingNode yaml.Kind = yaml.MappingNode +var ScalarNode yaml.Kind = yaml.ScalarNode +var SequenceNode yaml.Kind = yaml.SequenceNode + +func nodeKindString(k yaml.Kind) string { + return map[yaml.Kind]string{ + yaml.SequenceNode: "SequenceNode", + yaml.MappingNode: "MappingNode", + yaml.ScalarNode: "ScalarNode", + yaml.DocumentNode: "DocumentNode", + yaml.AliasNode: "AliasNode", + }[k] +} + +var DoubleQuotedStyle yaml.Style = yaml.DoubleQuotedStyle +var FlowStyle yaml.Style = yaml.FlowStyle +var FoldedStyle yaml.Style = yaml.FoldedStyle +var LiteralStyle yaml.Style = yaml.LiteralStyle +var SingleQuotedStyle yaml.Style = yaml.SingleQuotedStyle +var TaggedStyle yaml.Style = yaml.TaggedStyle + +const ( + MergeTag = "!!merge" +) diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go new file mode 100644 index 000000000..a47114000 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "reflect" + "strings" + + y1_1 "gopkg.in/yaml.v2" + "k8s.io/kube-openapi/pkg/validation/spec" + y1_2 "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" +) + +// typeToTag maps OpenAPI schema types to yaml 1.2 tags +var typeToTag = map[string]string{ + "string": NodeTagString, + "integer": NodeTagInt, + "boolean": NodeTagBool, + "number": NodeTagFloat, +} + +// FormatNonStringStyle makes sure that values which parse as non-string values in yaml 1.1 +// are correctly formatted given the Schema type. +func FormatNonStringStyle(node *Node, schema spec.Schema) { + if len(schema.Type) != 1 { + return + } + t := schema.Type[0] + + if !IsYaml1_1NonString(node) { + return + } + switch { + case t == "string" && schema.Format != "int-or-string": + if (node.Style&DoubleQuotedStyle == 0) && (node.Style&SingleQuotedStyle == 0) { + // must quote values so they are parsed as strings + node.Style = DoubleQuotedStyle + } + case t == "boolean" || t == "integer" || t == "number": + if (node.Style&DoubleQuotedStyle != 0) || (node.Style&SingleQuotedStyle != 0) { + // must NOT quote the values so they aren't parsed as strings + node.Style = 0 + } + default: + return + } + + // if the node tag is null, make sure we don't add any non-null tags + // https://github.com/GoogleContainerTools/kpt/issues/2321 + if node.Tag == NodeTagNull { + // must NOT quote null values + node.Style = 0 + return + } + if tag, found := typeToTag[t]; found { + // make sure the right tag is set + node.Tag = tag + } +} + +// IsYaml1_1NonString returns true if the value parses as a non-string value in yaml 1.1 +// when unquoted. +// +// Note: yaml 1.2 uses different keywords than yaml 1.1. Example: yaml 1.2 interprets +// `field: on` and `field: "on"` as equivalent (both strings). However Yaml 1.1 interprets +// `field: on` as on being a bool and `field: "on"` as on being a string. +// If an input is read with `field: "on"`, and the style is changed from DoubleQuote to 0, +// it will change the type of the field from a string to a bool. For this reason, fields +// which are keywords in yaml 1.1 should never have their style changed, as it would break +// backwards compatibility with yaml 1.1 -- which is what is used by the Kubernetes apiserver. +func IsYaml1_1NonString(node *Node) bool { + if node.Kind != y1_2.ScalarNode { + // not a keyword + return false + } + return IsValueNonString(node.Value) +} + +func IsValueNonString(value string) bool { + if value == "" { + return false + } + if strings.Contains(value, "\n") { + // multi-line strings will fail to unmarshal + return false + } + // check if the value will unmarshal into a non-string value using a yaml 1.1 parser + var i1 interface{} + if err := y1_1.Unmarshal([]byte(value), &i1); err != nil { + return false + } + if reflect.TypeOf(i1) != stringType { + return true + } + + return false +} + +var stringType = reflect.TypeOf("string") diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/const.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/const.go new file mode 100644 index 000000000..6a2cc4516 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/const.go @@ -0,0 +1,30 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +const ( + // NodeTagNull is the tag set for a yaml.Document that contains no data; + // e.g. it isn't a Map, Slice, Document, etc + NodeTagNull = "!!null" + NodeTagFloat = "!!float" + NodeTagString = "!!str" + NodeTagBool = "!!bool" + NodeTagInt = "!!int" + NodeTagMap = "!!map" + NodeTagSeq = "!!seq" + NodeTagEmpty = "" +) + +// Field names +const ( + AnnotationsField = "annotations" + APIVersionField = "apiVersion" + KindField = "kind" + MetadataField = "metadata" + DataField = "data" + BinaryDataField = "binaryData" + NameField = "name" + NamespaceField = "namespace" + LabelsField = "labels" +) diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/datamap.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/datamap.go new file mode 100644 index 000000000..f4b7e6664 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/datamap.go @@ -0,0 +1,121 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "encoding/base64" + "sort" + "strings" + "unicode/utf8" +) + +// SortedMapKeys returns a sorted slice of keys to the given map. +// Writing this function never gets old. +func SortedMapKeys(m map[string]string) []string { + keys := make([]string, len(m)) + i := 0 + for k := range m { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +func (rn *RNode) LoadMapIntoConfigMapData(m map[string]string) error { + for _, k := range SortedMapKeys(m) { + fldName, vrN := makeConfigMapValueRNode(m[k]) + if _, err := rn.Pipe( + LookupCreate(MappingNode, fldName), + SetField(k, vrN)); err != nil { + return err + } + } + return nil +} + +func (rn *RNode) LoadMapIntoConfigMapBinaryData(m map[string]string) error { + for _, k := range SortedMapKeys(m) { + _, vrN := makeConfigMapValueRNode(m[k]) + // we know this is binary data + fldName := BinaryDataField + if _, err := rn.Pipe( + LookupCreate(MappingNode, fldName), + SetField(k, vrN)); err != nil { + return err + } + } + return nil +} + +func makeConfigMapValueRNode(s string) (field string, rN *RNode) { + yN := &Node{Kind: ScalarNode} + yN.Tag = NodeTagString + if utf8.ValidString(s) { + field = DataField + yN.Value = s + } else { + field = BinaryDataField + yN.Value = encodeBase64(s) + } + if strings.Contains(yN.Value, "\n") { + yN.Style = LiteralStyle + } + return field, NewRNode(yN) +} + +func (rn *RNode) LoadMapIntoSecretData(m map[string]string) error { + mapNode, err := rn.Pipe(LookupCreate(MappingNode, DataField)) + if err != nil { + return err + } + for _, k := range SortedMapKeys(m) { + vrN := makeSecretValueRNode(m[k]) + if _, err := mapNode.Pipe(SetField(k, vrN)); err != nil { + return err + } + } + return nil +} + +// In a secret, all data is base64 encoded, regardless of its conformance +// or lack thereof to UTF-8. +func makeSecretValueRNode(s string) *RNode { + yN := &Node{Kind: ScalarNode} + // Purposely don't use YAML tags to identify the data as being plain text or + // binary. It kubernetes Secrets the values in the `data` map are expected + // to be base64 encoded, and in ConfigMaps that same can be said for the + // values in the `binaryData` field. + yN.Tag = NodeTagString + yN.Value = encodeBase64(s) + if strings.Contains(yN.Value, "\n") { + yN.Style = LiteralStyle + } + return NewRNode(yN) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/doc.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/doc.go new file mode 100644 index 000000000..b58811cf8 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/doc.go @@ -0,0 +1,49 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package yaml contains libraries for manipulating individual Kubernetes Resource +// Configuration as yaml, keeping yaml structure and comments. +// +// Parsing Resources +// +// Typically Resources will be initialized as collections through the kio package libraries. +// However it is possible to directly initialize Resources using Parse. +// resource, err := yaml.Parse("apiVersion: apps/v1\nkind: Deployment") +// +// Processing Resources +// +// Individual Resources are manipulated using the Pipe and PipeE to apply Filter functions +// to transform the Resource data. +// err := resource.PipeE(yaml.SetAnnotation("key", "value")) +// +// If multiple Filter functions are provided to Pipe or PipeE, each function is applied to +// the result of the last function -- e.g. yaml.Lookup(...), yaml.SetField(...) +// +// Field values may also be retrieved using Pipe. +// annotationValue, err := resource.Pipe(yaml.GetAnnotation("key")) +// +// See http://www.linfo.org/filters.html for a definition of filters. +// +// Common Filters +// +// There are a number of standard filter functions provided by the yaml package. +// +// Working with annotations: +// [AnnotationSetter{}, AnnotationGetter{}, AnnotationClearer{}] +// +// Working with fields by path: +// [PathMatcher{}, PathGetter{}] +// +// Working with individual fields on Maps and Objects: +// [FieldMatcher{}, FieldSetter{}, FieldGetter{}] +// +// Working with individual elements in Sequences: +// [ElementAppender{}, ElementSetter{}, ElementMatcher{}] +// +// Writing Filters +// +// Users may implement their own filter functions. When doing so, can be necessary to work with +// the RNode directly rather than through Pipe. RNode provides a number of functions for doing +// so. See: +// [GetMeta(), Fields(), Elements(), String()] +package yaml diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/filters.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/filters.go new file mode 100644 index 000000000..e7d4b5f7b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/filters.go @@ -0,0 +1,146 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +// Filters is the list of serializable Pipeline Filters +var Filters = map[string]func() Filter{ + "AnnotationClearer": func() Filter { return &AnnotationClearer{} }, + "AnnotationGetter": func() Filter { return &AnnotationGetter{} }, + "AnnotationSetter": func() Filter { return &AnnotationSetter{} }, + "LabelSetter": func() Filter { return &LabelSetter{} }, + "ElementAppender": func() Filter { return &ElementAppender{} }, + "ElementMatcher": func() Filter { return &ElementMatcher{} }, + "FieldClearer": func() Filter { return &FieldClearer{} }, + "FilterMatcher": func() Filter { return &FilterMatcher{} }, + "FieldMatcher": func() Filter { return &FieldMatcher{} }, + "FieldSetter": func() Filter { return &FieldSetter{} }, + "PathGetter": func() Filter { return &PathGetter{} }, + "PathMatcher": func() Filter { return &PathMatcher{} }, + "Parser": func() Filter { return &Parser{} }, + "PrefixSetter": func() Filter { return &PrefixSetter{} }, + "ValueReplacer": func() Filter { return &ValueReplacer{} }, + "SuffixSetter": func() Filter { return &SuffixSetter{} }, + "TeePiper": func() Filter { return &TeePiper{} }, +} + +// YFilter wraps the Filter interface so the filter can be represented as +// data and can be unmarshalled into a struct from a yaml config file. +// This allows Pipelines to be expressed as data rather than code. +type YFilter struct { + Filter +} + +func (y YFilter) MarshalYAML() (interface{}, error) { + return y.Filter, nil +} + +func (y *YFilter) UnmarshalYAML(unmarshal func(interface{}) error) error { + meta := &ResourceMeta{} + if err := unmarshal(meta); err != nil { + return err + } + filter, found := Filters[meta.Kind] + if !found { + var knownFilters []string + for k := range Filters { + knownFilters = append(knownFilters, k) + } + sort.Strings(knownFilters) + return fmt.Errorf("unsupported Filter Kind %s: may be one of: [%s]", + meta.Kind, strings.Join(knownFilters, ",")) + } + y.Filter = filter() + + if err := unmarshal(y.Filter); err != nil { + return err + } + return nil +} + +type YFilters []YFilter + +func (y YFilters) Filters() []Filter { + f := make([]Filter, 0, len(y)) + for i := range y { + f = append(f, y[i].Filter) + } + return f +} + +type FilterMatcher struct { + Kind string `yaml:"kind"` + + // Filters are the set of Filters run by TeePiper. + Filters YFilters `yaml:"pipeline,omitempty"` +} + +func (t FilterMatcher) Filter(rn *RNode) (*RNode, error) { + v, err := rn.Pipe(t.Filters.Filters()...) + if v == nil || err != nil { + return nil, err + } + // return the original input if the pipeline resolves to true + return rn, err +} + +type ValueReplacer struct { + Kind string `yaml:"kind"` + + StringMatch string `yaml:"stringMatch"` + RegexMatch string `yaml:"regexMatch"` + Replace string `yaml:"replace"` + Count int `yaml:"count"` +} + +func (s ValueReplacer) Filter(object *RNode) (*RNode, error) { + if s.Count == 0 { + s.Count = -1 + } + switch { + case s.StringMatch != "": + object.value.Value = strings.Replace(object.value.Value, s.StringMatch, s.Replace, s.Count) + case s.RegexMatch != "": + r, err := regexp.Compile(s.RegexMatch) + if err != nil { + return nil, fmt.Errorf("ValueReplacer RegexMatch does not compile: %v", err) + } + object.value.Value = r.ReplaceAllString(object.value.Value, s.Replace) + default: + return nil, fmt.Errorf("ValueReplacer missing StringMatch and RegexMatch") + } + return object, nil +} + +type PrefixSetter struct { + Kind string `yaml:"kind"` + + Value string `yaml:"value"` +} + +func (s PrefixSetter) Filter(object *RNode) (*RNode, error) { + if !strings.HasPrefix(object.value.Value, s.Value) { + object.value.Value = s.Value + object.value.Value + } + return object, nil +} + +type SuffixSetter struct { + Kind string `yaml:"kind"` + + Value string `yaml:"value"` +} + +func (s SuffixSetter) Filter(object *RNode) (*RNode, error) { + if !strings.HasSuffix(object.value.Value, s.Value) { + object.value.Value += s.Value + } + return object, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go new file mode 100644 index 000000000..ae63d258b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go @@ -0,0 +1,878 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/davecgh/go-spew/spew" + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" +) + +// Append creates an ElementAppender +func Append(elements ...*yaml.Node) ElementAppender { + return ElementAppender{Elements: elements} +} + +// ElementAppender adds all element to a SequenceNode's Content. +// Returns Elements[0] if len(Elements) == 1, otherwise returns nil. +type ElementAppender struct { + Kind string `yaml:"kind,omitempty"` + + // Elem is the value to append. + Elements []*yaml.Node `yaml:"elements,omitempty"` +} + +func (a ElementAppender) Filter(rn *RNode) (*RNode, error) { + if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { + return nil, err + } + for i := range a.Elements { + rn.YNode().Content = append(rn.Content(), a.Elements[i]) + } + if len(a.Elements) == 1 { + return NewRNode(a.Elements[0]), nil + } + return nil, nil +} + +// ElementSetter sets the value for an Element in an associative list. +// ElementSetter will append, replace or delete an element in an associative list. +// To append, user a key-value pair that doesn't exist in the sequence. this +// behavior is intended to handle the case that not matching element found. It's +// not designed for this purpose. To append an element, please use ElementAppender. +// To replace, set the key-value pair and a non-nil Element. +// To delete, set the key-value pair and leave the Element as nil. +// Every key must have a corresponding value. +type ElementSetter struct { + Kind string `yaml:"kind,omitempty"` + + // Element is the new value to set -- remove the existing element if nil + Element *Node + + // Key is a list of fields on the elements. It is used to find matching elements to + // update / delete + Keys []string + + // Value is a list of field values on the elements corresponding to the keys. It is + // used to find matching elements to update / delete. + Values []string +} + +// isMappingNode returns whether node is a mapping node +func (e ElementSetter) isMappingNode(node *RNode) bool { + return ErrorIfInvalid(node, yaml.MappingNode) == nil +} + +// isMappingSetter returns is this setter intended to set a mapping node +func (e ElementSetter) isMappingSetter() bool { + return len(e.Keys) > 0 && e.Keys[0] != "" && + len(e.Values) > 0 && e.Values[0] != "" +} + +func (e ElementSetter) Filter(rn *RNode) (*RNode, error) { + if len(e.Keys) == 0 { + e.Keys = append(e.Keys, "") + } + + if err := ErrorIfInvalid(rn, SequenceNode); err != nil { + return nil, err + } + + // build the new Content slice + var newContent []*yaml.Node + matchingElementFound := false + for i := range rn.YNode().Content { + elem := rn.Content()[i] + newNode := NewRNode(elem) + + // empty elements are not valid -- they at least need an associative key + if IsMissingOrNull(newNode) || IsEmptyMap(newNode) { + continue + } + // keep non-mapping node in the Content when we want to match a mapping. + if !e.isMappingNode(newNode) && e.isMappingSetter() { + newContent = append(newContent, elem) + continue + } + + // check if this is the element we are matching + var val *RNode + var err error + found := true + for j := range e.Keys { + if j < len(e.Values) { + val, err = newNode.Pipe(FieldMatcher{Name: e.Keys[j], StringValue: e.Values[j]}) + } + if err != nil { + return nil, err + } + if val == nil { + found = false + break + } + } + if !found { + // not the element we are looking for, keep it in the Content + if len(e.Values) > 0 { + newContent = append(newContent, elem) + } + continue + } + matchingElementFound = true + + // deletion operation -- remove the element from the new Content + if e.Element == nil { + continue + } + // replace operation -- replace the element in the Content + newContent = append(newContent, e.Element) + } + rn.YNode().Content = newContent + + // deletion operation -- return nil + if IsMissingOrNull(NewRNode(e.Element)) { + return nil, nil + } + + // append operation -- add the element to the Content + if !matchingElementFound { + rn.YNode().Content = append(rn.YNode().Content, e.Element) + } + + return NewRNode(e.Element), nil +} + +// GetElementByIndex will return a Filter which can be applied to a sequence +// node to get the element specified by the index +func GetElementByIndex(index int) ElementIndexer { + return ElementIndexer{Index: index} +} + +// ElementIndexer picks the element with a specified index. Index starts from +// 0 to len(list) - 1. a hyphen ("-") means the last index. +type ElementIndexer struct { + Index int +} + +// Filter implements Filter +func (i ElementIndexer) Filter(rn *RNode) (*RNode, error) { + // rn.Elements will return error if rn is not a sequence node. + elems, err := rn.Elements() + if err != nil { + return nil, err + } + if i.Index < 0 { + return elems[len(elems)-1], nil + } + if i.Index >= len(elems) { + return nil, nil + } + return elems[i.Index], nil +} + +// Clear returns a FieldClearer +func Clear(name string) FieldClearer { + return FieldClearer{Name: name} +} + +// FieldClearer removes the field or map key. +// Returns a RNode with the removed field or map entry. +type FieldClearer struct { + Kind string `yaml:"kind,omitempty"` + + // Name is the name of the field or key in the map. + Name string `yaml:"name,omitempty"` + + IfEmpty bool `yaml:"ifEmpty,omitempty"` +} + +func (c FieldClearer) Filter(rn *RNode) (*RNode, error) { + if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { + return nil, err + } + + var removed *RNode + visitFieldsWhileTrue(rn.Content(), func(key, value *yaml.Node, keyIndex int) bool { + if key.Value != c.Name { + return true + } + + // the name matches: remove these 2 elements from the list because + // they are treated as a fieldName/fieldValue pair. + if c.IfEmpty { + if len(value.Content) > 0 { + return true + } + } + + // save the item we are about to remove + removed = NewRNode(value) + if len(rn.YNode().Content) > keyIndex+2 { + l := len(rn.YNode().Content) + // remove from the middle of the list + rn.YNode().Content = rn.Content()[:keyIndex] + rn.YNode().Content = append( + rn.YNode().Content, + rn.Content()[keyIndex+2:l]...) + } else { + // remove from the end of the list + rn.YNode().Content = rn.Content()[:keyIndex] + } + return false + }) + + return removed, nil +} + +func MatchElement(field, value string) ElementMatcher { + return ElementMatcher{Keys: []string{field}, Values: []string{value}} +} + +func MatchElementList(keys []string, values []string) ElementMatcher { + return ElementMatcher{Keys: keys, Values: values} +} + +func GetElementByKey(key string) ElementMatcher { + return ElementMatcher{Keys: []string{key}, MatchAnyValue: true} +} + +// ElementMatcher returns the first element from a Sequence matching the +// specified key-value pairs. If there's no match, and no configuration error, +// the matcher returns nil, nil. +type ElementMatcher struct { + Kind string `yaml:"kind,omitempty"` + + // Keys are the list of fields upon which to match this element. + Keys []string + + // Values are the list of values upon which to match this element. + Values []string + + // Create will create the Element if it is not found + Create *RNode `yaml:"create,omitempty"` + + // MatchAnyValue indicates that matcher should only consider the key and ignore + // the actual value in the list. Values must be empty when MatchAnyValue is + // set to true. + MatchAnyValue bool `yaml:"noValue,omitempty"` +} + +func (e ElementMatcher) Filter(rn *RNode) (*RNode, error) { + if len(e.Keys) == 0 { + e.Keys = append(e.Keys, "") + } + if len(e.Values) == 0 { + e.Values = append(e.Values, "") + } + + if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { + return nil, err + } + if e.MatchAnyValue && len(e.Values) != 0 && e.Values[0] != "" { + return nil, fmt.Errorf("Values must be empty when MatchAnyValue is set to true") + } + + // SequenceNode Content is a slice of ScalarNodes. Each ScalarNode has a + // YNode containing the primitive data. + if len(e.Keys) == 0 || len(e.Keys[0]) == 0 { + for i := range rn.Content() { + if rn.Content()[i].Value == e.Values[0] { + return &RNode{value: rn.Content()[i]}, nil + } + } + if e.Create != nil { + return rn.Pipe(Append(e.Create.YNode())) + } + return nil, nil + } + + // SequenceNode Content is a slice of MappingNodes. Each MappingNode has Content + // with a slice of key-value pairs containing the fields. + for i := range rn.Content() { + // cast the entry to a RNode so we can operate on it + elem := NewRNode(rn.Content()[i]) + var field *RNode + var err error + + // only check mapping node + if err = ErrorIfInvalid(elem, yaml.MappingNode); err != nil { + continue + } + + if !e.MatchAnyValue && len(e.Keys) != len(e.Values) { + return nil, fmt.Errorf("length of keys must equal length of values when MatchAnyValue is false") + } + + matchesElement := true + for i := range e.Keys { + if e.MatchAnyValue { + field, err = elem.Pipe(Get(e.Keys[i])) + } else { + field, err = elem.Pipe(MatchField(e.Keys[i], e.Values[i])) + } + if !IsFoundOrError(field, err) { + // this is not the element we are looking for + matchesElement = false + break + } + } + if matchesElement { + return elem, err + } + } + + // create the element + if e.Create != nil { + return rn.Pipe(Append(e.Create.YNode())) + } + + return nil, nil +} + +func Get(name string) FieldMatcher { + return FieldMatcher{Name: name} +} + +func MatchField(name, value string) FieldMatcher { + return FieldMatcher{Name: name, Value: NewScalarRNode(value)} +} + +func Match(value string) FieldMatcher { + return FieldMatcher{Value: NewScalarRNode(value)} +} + +// FieldMatcher returns the value of a named field or map entry. +type FieldMatcher struct { + Kind string `yaml:"kind,omitempty"` + + // Name of the field to return + Name string `yaml:"name,omitempty"` + + // YNode of the field to return. + // Optional. Will only need to match field name if unset. + Value *RNode `yaml:"value,omitempty"` + + StringValue string `yaml:"stringValue,omitempty"` + + StringRegexValue string `yaml:"stringRegexValue,omitempty"` + + // Create will cause the field to be created with this value + // if it is set. + Create *RNode `yaml:"create,omitempty"` +} + +func (f FieldMatcher) Filter(rn *RNode) (*RNode, error) { + if f.StringValue != "" && f.Value == nil { + f.Value = NewScalarRNode(f.StringValue) + } + + // never match nil or null fields + if IsMissingOrNull(rn) { + return nil, nil + } + + if f.Name == "" { + if err := ErrorIfInvalid(rn, yaml.ScalarNode); err != nil { + return nil, err + } + switch { + case f.StringRegexValue != "": + // TODO(pwittrock): pre-compile this when unmarshalling and cache to a field + rg, err := regexp.Compile(f.StringRegexValue) + if err != nil { + return nil, err + } + if match := rg.MatchString(rn.value.Value); match { + return rn, nil + } + return nil, nil + case GetValue(rn) == GetValue(f.Value): + return rn, nil + default: + return nil, nil + } + } + + if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { + return nil, err + } + + var returnNode *RNode + requireMatchFieldValue := f.Value != nil + visitMappingNodeFields(rn.Content(), func(key, value *yaml.Node) { + if !requireMatchFieldValue || value.Value == f.Value.YNode().Value { + returnNode = NewRNode(value) + } + }, f.Name) + if returnNode != nil { + return returnNode, nil + } + + if f.Create != nil { + return rn.Pipe(SetField(f.Name, f.Create)) + } + + return nil, nil +} + +// Lookup returns a PathGetter to lookup a field by its path. +func Lookup(path ...string) PathGetter { + return PathGetter{Path: path} +} + +// LookupCreate returns a PathGetter to lookup a field by its path and create it if it doesn't already +// exist. +func LookupCreate(kind yaml.Kind, path ...string) PathGetter { + return PathGetter{Path: path, Create: kind} +} + +// ConventionalContainerPaths is a list of paths at which containers typically appear in workload APIs. +// It is intended for use with LookupFirstMatch. +var ConventionalContainerPaths = [][]string{ + // e.g. Deployment, ReplicaSet, DaemonSet, Job, StatefulSet + {"spec", "template", "spec", "containers"}, + // e.g. CronJob + {"spec", "jobTemplate", "spec", "template", "spec", "containers"}, + // e.g. Pod + {"spec", "containers"}, + // e.g. PodTemplate + {"template", "spec", "containers"}, +} + +// LookupFirstMatch returns a Filter for locating a value that may exist at one of several possible paths. +// For example, it can be used with ConventionalContainerPaths to find the containers field in a standard workload resource. +// If more than one of the paths exists in the resource, the first will be returned. If none exist, +// nil will be returned. If an error is encountered during lookup, it will be returned. +func LookupFirstMatch(paths [][]string) Filter { + return FilterFunc(func(object *RNode) (*RNode, error) { + var result *RNode + var err error + for _, path := range paths { + result, err = object.Pipe(PathGetter{Path: path}) + if err != nil { + return nil, errors.Wrap(err) + } + if result != nil { + return result, nil + } + } + return nil, nil + }) +} + +// PathGetter returns the RNode under Path. +type PathGetter struct { + Kind string `yaml:"kind,omitempty"` + + // Path is a slice of parts leading to the RNode to lookup. + // Each path part may be one of: + // * FieldMatcher -- e.g. "spec" + // * Map Key -- e.g. "app.k8s.io/version" + // * List Entry -- e.g. "[name=nginx]" or "[=-jar]" or "0" or "-" + // + // Map Keys and Fields are equivalent. + // See FieldMatcher for more on Fields and Map Keys. + // + // List Entries can be specified as map entry to match [fieldName=fieldValue] + // or a positional index like 0 to get the element. - (unquoted hyphen) is + // special and means the last element. + // + // See Elem for more on List Entries. + // + // Examples: + // * spec.template.spec.container with matching name: [name=nginx] + // * spec.template.spec.container.argument matching a value: [=-jar] + Path []string `yaml:"path,omitempty"` + + // Create will cause missing path parts to be created as they are walked. + // + // * The leaf Node (final path) will be created with a Kind matching Create + // * Intermediary Nodes will be created as either a MappingNodes or + // SequenceNodes as appropriate for each's Path location. + // * If a list item is specified by a index (an offset or "-"), this item will + // not be created even Create is set. + Create yaml.Kind `yaml:"create,omitempty"` + + // Style is the style to apply to created value Nodes. + // Created key Nodes keep an unspecified Style. + Style yaml.Style `yaml:"style,omitempty"` +} + +func (l PathGetter) Filter(rn *RNode) (*RNode, error) { + var err error + fieldPath := append([]string{}, rn.FieldPath()...) + match := rn + + // iterate over path until encountering an error or missing value + l.Path = cleanPath(l.Path) + for i := range l.Path { + var part, nextPart string + part = l.Path[i] + if len(l.Path) > i+1 { + nextPart = l.Path[i+1] + } + var fltr Filter + fltr, err = l.getFilter(part, nextPart, &fieldPath) + if err != nil { + return nil, err + } + match, err = match.Pipe(fltr) + if IsMissingOrError(match, err) { + return nil, err + } + match.AppendToFieldPath(fieldPath...) + } + return match, nil +} + +func (l PathGetter) getFilter(part, nextPart string, fieldPath *[]string) (Filter, error) { + idx, err := strconv.Atoi(part) + switch { + case err == nil: + // part is a number + if idx < 0 { + return nil, fmt.Errorf("array index %d cannot be negative", idx) + } + return GetElementByIndex(idx), nil + case part == "-": + // part is a hyphen + return GetElementByIndex(-1), nil + case part == "*": + // PathGetter is not support for wildcard matching + return nil, errors.Errorf("wildcard is not supported in PathGetter") + case IsListIndex(part): + // part is surrounded by brackets + return l.elemFilter(part) + default: + // mapping node + *fieldPath = append(*fieldPath, part) + return l.fieldFilter(part, getPathPartKind(nextPart, l.Create)) + } +} + +func (l PathGetter) elemFilter(part string) (Filter, error) { + name, value, err := SplitIndexNameValue(part) + if err != nil { + return nil, errors.Wrap(err) + } + if !IsCreate(l.Create) { + return MatchElement(name, value), nil + } + + var elem *RNode + primitiveElement := len(name) == 0 + if primitiveElement { + // append a ScalarNode + elem = NewScalarRNode(value) + elem.YNode().Style = l.Style + } else { + // append a MappingNode + match := NewRNode(&yaml.Node{Kind: yaml.ScalarNode, Value: value, Style: l.Style}) + elem = NewRNode(&yaml.Node{ + Kind: yaml.MappingNode, + Content: []*yaml.Node{{Kind: yaml.ScalarNode, Value: name}, match.YNode()}, + Style: l.Style, + }) + } + // Append the Node + return ElementMatcher{Keys: []string{name}, Values: []string{value}, Create: elem}, nil +} + +func (l PathGetter) fieldFilter( + name string, kind yaml.Kind) (Filter, error) { + if !IsCreate(l.Create) { + return Get(name), nil + } + return FieldMatcher{Name: name, Create: &RNode{value: &yaml.Node{Kind: kind, Style: l.Style}}}, nil +} + +func getPathPartKind(nextPart string, defaultKind yaml.Kind) yaml.Kind { + if IsListIndex(nextPart) { + // if nextPart is of the form [a=b], then it is an index into a Sequence + // so the current part must be a SequenceNode + return yaml.SequenceNode + } + if IsIdxNumber(nextPart) { + return yaml.SequenceNode + } + if nextPart == "" { + // final name in the path, use the default kind provided + return defaultKind + } + + // non-sequence intermediate Node + return yaml.MappingNode +} + +func SetField(name string, value *RNode) FieldSetter { + return FieldSetter{Name: name, Value: value} +} + +func Set(value *RNode) FieldSetter { + return FieldSetter{Value: value} +} + +// MapEntrySetter sets a map entry to a value. If it finds a key with the same +// value, it will override both Key and Value RNodes, including style and any +// other metadata. If it doesn't find the key, it will insert a new map entry. +// It will set the field, even if it's empty or nil, unlike the FieldSetter. +// This is useful for rebuilding some pre-existing RNode structure. +type MapEntrySetter struct { + // Name is the name of the field or key to lookup in a MappingNode. + // If Name is unspecified, it will use the Key's Value + Name string `yaml:"name,omitempty"` + + // Value is the value to set. + Value *RNode `yaml:"value,omitempty"` + + // Key is the map key to set. + Key *RNode `yaml:"key,omitempty"` +} + +func (s MapEntrySetter) Filter(rn *RNode) (*RNode, error) { + if rn == nil { + return nil, errors.Errorf("Can't set map entry on a nil RNode") + } + if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { + return nil, err + } + if s.Name == "" { + s.Name = GetValue(s.Key) + } + + content := rn.Content() + fieldStillNotFound := true + visitFieldsWhileTrue(content, func(key, value *yaml.Node, keyIndex int) bool { + if key.Value == s.Name { + content[keyIndex] = s.Key.YNode() + content[keyIndex+1] = s.Value.YNode() + fieldStillNotFound = false + } + return fieldStillNotFound + }) + if !fieldStillNotFound { + return rn, nil + } + + // create the field + rn.YNode().Content = append( + rn.YNode().Content, + s.Key.YNode(), + s.Value.YNode()) + return rn, nil +} + +// FieldSetter sets a field or map entry to a value. +type FieldSetter struct { + Kind string `yaml:"kind,omitempty"` + + // Name is the name of the field or key to lookup in a MappingNode. + // If Name is unspecified, and the input is a ScalarNode, FieldSetter will set the + // value on the ScalarNode. + Name string `yaml:"name,omitempty"` + + // Comments for the field + Comments Comments `yaml:"comments,omitempty"` + + // Value is the value to set. + // Optional if Kind is set. + Value *RNode `yaml:"value,omitempty"` + + StringValue string `yaml:"stringValue,omitempty"` + + // OverrideStyle can be set to override the style of the existing node + // when setting it. Otherwise, if an existing node is found, the style is + // retained. + OverrideStyle bool `yaml:"overrideStyle,omitempty"` +} + +func (s FieldSetter) Filter(rn *RNode) (*RNode, error) { + if s.StringValue != "" && s.Value == nil { + s.Value = NewScalarRNode(s.StringValue) + } + + // need to set style for strings not recognized by yaml 1.1 to quoted if not previously set + // TODO: fix in upstream yaml library so this can be handled with yaml SetString + if s.Value.IsStringValue() && !s.OverrideStyle && s.Value.YNode().Style == 0 && IsYaml1_1NonString(s.Value.YNode()) { + s.Value.YNode().Style = yaml.DoubleQuotedStyle + } + + if s.Name == "" { + if err := ErrorIfInvalid(rn, yaml.ScalarNode); err != nil { + return rn, err + } + if IsMissingOrNull(s.Value) { + return rn, nil + } + // only apply the style if there is not an existing style + // or we want to override it + if !s.OverrideStyle || s.Value.YNode().Style == 0 { + // keep the original style if it exists + s.Value.YNode().Style = rn.YNode().Style + } + rn.SetYNode(s.Value.YNode()) + return rn, nil + } + + // Clear the field if it is empty, or explicitly null + if s.Value == nil || s.Value.IsTaggedNull() { + return rn.Pipe(Clear(s.Name)) + } + + field, err := rn.Pipe(FieldMatcher{Name: s.Name}) + if err != nil { + return nil, err + } + if field != nil { + // only apply the style if there is not an existing style + // or we want to override it + if !s.OverrideStyle || field.YNode().Style == 0 { + // keep the original style if it exists + s.Value.YNode().Style = field.YNode().Style + } + // need to def ref the Node since field is ephemeral + field.SetYNode(s.Value.YNode()) + return field, nil + } + + // create the field + rn.YNode().Content = append( + rn.YNode().Content, + &yaml.Node{ + Kind: yaml.ScalarNode, + Value: s.Name, + HeadComment: s.Comments.HeadComment, + LineComment: s.Comments.LineComment, + FootComment: s.Comments.FootComment, + }, + s.Value.YNode()) + return s.Value, nil +} + +// Tee calls the provided Filters, and returns its argument rather than the result +// of the filters. +// May be used to fork sub-filters from a call. +// e.g. locate field, set value; locate another field, set another value +func Tee(filters ...Filter) Filter { + return TeePiper{Filters: filters} +} + +// TeePiper Calls a slice of Filters and returns its input. +// May be used to fork sub-filters from a call. +// e.g. locate field, set value; locate another field, set another value +type TeePiper struct { + Kind string `yaml:"kind,omitempty"` + + // Filters are the set of Filters run by TeePiper. + Filters []Filter `yaml:"filters,omitempty"` +} + +func (t TeePiper) Filter(rn *RNode) (*RNode, error) { + _, err := rn.Pipe(t.Filters...) + return rn, err +} + +// IsCreate returns true if kind is specified +func IsCreate(kind yaml.Kind) bool { + return kind != 0 +} + +// IsMissingOrError returns true if rn is NOT found or err is non-nil +func IsMissingOrError(rn *RNode, err error) bool { + return rn == nil || err != nil +} + +// IsFoundOrError returns true if rn is found or err is non-nil +func IsFoundOrError(rn *RNode, err error) bool { + return rn != nil || err != nil +} + +func ErrorIfAnyInvalidAndNonNull(kind yaml.Kind, rn ...*RNode) error { + for i := range rn { + if IsMissingOrNull(rn[i]) { + continue + } + if err := ErrorIfInvalid(rn[i], kind); err != nil { + return err + } + } + return nil +} + +type InvalidNodeKindError struct { + expectedKind yaml.Kind + node *RNode +} + +func (e *InvalidNodeKindError) Error() string { + msg := fmt.Sprintf("wrong node kind: expected %s but got %s", + nodeKindString(e.expectedKind), nodeKindString(e.node.YNode().Kind)) + if content, err := e.node.String(); err == nil { + msg += fmt.Sprintf(": node contents:\n%s", content) + } + return msg +} + +func (e *InvalidNodeKindError) ActualNodeKind() Kind { + return e.node.YNode().Kind +} + +func ErrorIfInvalid(rn *RNode, kind yaml.Kind) error { + if IsMissingOrNull(rn) { + // node has no type, pass validation + return nil + } + + if rn.YNode().Kind != kind { + return &InvalidNodeKindError{node: rn, expectedKind: kind} + } + + if kind == yaml.MappingNode { + if len(rn.YNode().Content)%2 != 0 { + return errors.Errorf( + "yaml MappingNodes must have even length contents: %v", spew.Sdump(rn)) + } + } + + return nil +} + +// IsListIndex returns true if p is an index into a Val. +// e.g. [fieldName=fieldValue] +// e.g. [=primitiveValue] +func IsListIndex(p string) bool { + return strings.HasPrefix(p, "[") && strings.HasSuffix(p, "]") +} + +// IsIdxNumber returns true if p is an index number. +// e.g. 1 +func IsIdxNumber(p string) bool { + idx, err := strconv.Atoi(p) + return err == nil && idx >= 0 +} + +// IsWildcard returns true if p is matching every elements. +// e.g. "*" +func IsWildcard(p string) bool { + return p == "*" +} + +// SplitIndexNameValue splits a lookup part Val index into the field name +// and field value to match. +// e.g. splits [name=nginx] into (name, nginx) +// e.g. splits [=-jar] into ("", -jar) +func SplitIndexNameValue(p string) (string, string, error) { + elem := strings.TrimSuffix(p, "]") + elem = strings.TrimPrefix(elem, "[") + parts := strings.SplitN(elem, "=", 2) + if len(parts) == 1 { + return "", "", fmt.Errorf("list path element must contain fieldName=fieldValue for element to match") + } + return parts[0], parts[1], nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/copied.deepcopy.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/copied.deepcopy.go new file mode 100644 index 000000000..52f32be88 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/copied.deepcopy.go @@ -0,0 +1,44 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/labels/zz_generated.deepcopy.go + +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Requirement) DeepCopyInto(out *Requirement) { + *out = *in + if in.strValues != nil { + in, out := &in.strValues, &out.strValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement. +func (in *Requirement) DeepCopy() *Requirement { + if in == nil { + return nil + } + out := new(Requirement) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/labels.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/labels.go new file mode 100644 index 000000000..300014eac --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/labels.go @@ -0,0 +1,192 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/labels/labels.go + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "fmt" + "sort" + "strings" +) + +// Labels allows you to present labels independently from their storage. +type Labels interface { + // Has returns whether the provided label exists. + Has(label string) (exists bool) + + // Get returns the value for the provided label. + Get(label string) (value string) +} + +// Set is a map of label:value. It implements Labels. +type Set map[string]string + +// String returns all labels listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided label exists in the map. +func (ls Set) Has(label string) bool { + _, exists := ls[label] + return exists +} + +// Get returns the value in the map for the provided label. +func (ls Set) Get(label string) string { + return ls[label] +} + +// AsSelector converts labels into a selectors. It does not +// perform any validation, which means the server will reject +// the request if the Set contains invalid values. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} + +// AsValidatedSelector converts labels into a selectors. +// The Set is validated client-side, which allows to catch errors early. +func (ls Set) AsValidatedSelector() (Selector, error) { + return ValidatedSelectorFromSet(ls) +} + +// AsSelectorPreValidated converts labels into a selector, but +// assumes that labels are already validated and thus doesn't +// perform any validation. +// According to our measurements this is significantly faster +// in codepaths that matter at high scale. +func (ls Set) AsSelectorPreValidated() Selector { + return SelectorFromValidatedSet(ls) +} + +// FormatLabels convert label map into plain string +func FormatLabels(labelMap map[string]string) string { + l := Set(labelMap).String() + if l == "" { + l = "" + } + return l +} + +// Conflicts takes 2 maps and returns true if there a key match between +// the maps but the value doesn't match, and returns false in other cases +func Conflicts(labels1, labels2 Set) bool { + small := labels1 + big := labels2 + if len(labels2) < len(labels1) { + small = labels2 + big = labels1 + } + + for k, v := range small { + if val, match := big[k]; match { + if val != v { + return true + } + } + } + + return false +} + +// Merge combines given maps, and does not check for any conflicts +// between the maps. In case of conflicts, second map (labels2) wins +func Merge(labels1, labels2 Set) Set { + mergedMap := Set{} + + for k, v := range labels1 { + mergedMap[k] = v + } + for k, v := range labels2 { + mergedMap[k] = v + } + return mergedMap +} + +// Equals returns true if the given maps are equal +func Equals(labels1, labels2 Set) bool { + if len(labels1) != len(labels2) { + return false + } + + for k, v := range labels1 { + value, ok := labels2[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// AreLabelsInWhiteList verifies if the provided label list +// is in the provided whitelist and returns true, otherwise false. +func AreLabelsInWhiteList(labels, whitelist Set) bool { + if len(whitelist) == 0 { + return true + } + + for k, v := range labels { + value, ok := whitelist[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// ConvertSelectorToLabelsMap converts selector string to labels map +// and validates keys and values +func ConvertSelectorToLabelsMap(selector string) (Set, error) { + labelsMap := Set{} + + if len(selector) == 0 { + return labelsMap, nil + } + + labels := strings.Split(selector, ",") + for _, label := range labels { + l := strings.Split(label, "=") + if len(l) != 2 { + return labelsMap, fmt.Errorf("invalid selector: %s", l) + } + key := strings.TrimSpace(l[0]) + if err := validateLabelKey(key); err != nil { + return labelsMap, err + } + value := strings.TrimSpace(l[1]) + if err := validateLabelValue(key, value); err != nil { + return labelsMap, err + } + labelsMap[key] = value + } + return labelsMap, nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/selector.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/selector.go new file mode 100644 index 000000000..73c5ae6a6 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/selector.go @@ -0,0 +1,925 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/labels/selector.go + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + + "log" + "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection" + "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets" + "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation" +) + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Selector represents a label selector. +type Selector interface { + // Matches returns true if this selector matches the given set of labels. + Matches(Labels) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // String returns a human readable string that represents this selector. + String() string + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector + + // Requirements converts this interface into Requirements to expose + // more detailed selection information. + // If there are querying parameters, it will return converted requirements and selectable=true. + // If this selector doesn't want to select anything, it will return selectable=false. + Requirements() (requirements Requirements, selectable bool) + + // Make a deep copy of the selector. + DeepCopySelector() Selector + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific label to be set, and if so returns the value it + // requires. + RequiresExactMatch(label string) (value string, found bool) +} + +// Everything returns a selector that matches all labels. +func Everything() Selector { + return internalSelector{} +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { + return "", false +} + +// Nothing returns a selector that matches no labels +func Nothing() Selector { + return nothingSelector{} +} + +// NewSelector returns a nil selector +func NewSelector() Selector { + return internalSelector(nil) +} + +type internalSelector []Requirement + +func (s internalSelector) DeepCopy() internalSelector { + if s == nil { + return nil + } + result := make([]Requirement, len(s)) + for i := range s { + s[i].DeepCopyInto(&result[i]) + } + return result +} + +func (s internalSelector) DeepCopySelector() Selector { + return s.DeepCopy() +} + +// ByKey sorts requirements by key to obtain deterministic parser +type ByKey []Requirement + +func (a ByKey) Len() int { return len(a) } + +func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } + +// Requirement contains values, a key, and an operator that relates the key and values. +// The zero value of Requirement is invalid. +// Requirement implements both set based match and exact match +// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +type Requirement struct { + key string + operator selection.Operator + // In huge majority of cases we have at most one value here. + // It is generally faster to operate on a single-element slice + // than on a single-element map, so we have a slice here. + strValues []string +} + +// NewRequirement is the constructor for a Requirement. +// If any of these rules is violated, an error is returned: +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. +// (2) If the operator is In or NotIn, the values set must be non-empty. +// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// (4) If the operator is Exists or DoesNotExist, the value set must be empty. +// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// (6) The key is invalid due to its length, or sequence +// of characters. See validateLabelKey for more details. +// +// The empty string is a valid value in the input values set. +func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { + if err := validateLabelKey(key); err != nil { + return nil, err + } + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + return nil, fmt.Errorf("exact-match compatibility requires one single value") + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + return nil, fmt.Errorf("values set must be empty for exists and does not exist") + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer") + } + } + default: + return nil, fmt.Errorf("operator '%v' is not recognized", op) + } + + for i := range vals { + if err := validateLabelValue(key, vals[i]); err != nil { + return nil, err + } + } + return &Requirement{key: key, operator: op, strValues: vals}, nil +} + +func (r *Requirement) hasValue(value string) bool { + for i := range r.strValues { + if r.strValues[i] == value { + return true + } + } + return false +} + +// Matches returns true if the Requirement matches the input Labels. +// There is a match in the following cases: +// (1) The operator is Exists and Labels has the Requirement's key. +// (2) The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// (3) The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// (4) The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. +func (r *Requirement) Matches(ls Labels) bool { + switch r.operator { + case selection.In, selection.Equals, selection.DoubleEquals: + if !ls.Has(r.key) { + return false + } + return r.hasValue(ls.Get(r.key)) + case selection.NotIn, selection.NotEquals: + if !ls.Has(r.key) { + return true + } + return !r.hasValue(ls.Get(r.key)) + case selection.Exists: + return ls.Has(r.key) + case selection.DoesNotExist: + return !ls.Has(r.key) + case selection.GreaterThan, selection.LessThan: + if !ls.Has(r.key) { + return false + } + lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) + if err != nil { + log.Printf("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + return false + } + + // There should be only one strValue in r.strValues, and can be converted to an integer. + if len(r.strValues) != 1 { + log.Printf("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + return false + } + + var rValue int64 + for i := range r.strValues { + rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) + if err != nil { + log.Printf("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + return false + } + } + return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) + default: + return false + } +} + +// Key returns requirement key +func (r *Requirement) Key() string { + return r.key +} + +// Operator returns requirement operator +func (r *Requirement) Operator() selection.Operator { + return r.operator +} + +// Values returns requirement values +func (r *Requirement) Values() sets.String { + ret := sets.String{} + for i := range r.strValues { + ret.Insert(r.strValues[i]) + } + return ret +} + +// Empty returns true if the internalSelector doesn't restrict selection space +func (lsel internalSelector) Empty() bool { + if lsel == nil { + return true + } + return len(lsel) == 0 +} + +// String returns a human-readable string that represents this +// Requirement. If called on an invalid Requirement, an error is +// returned. See NewRequirement for creating a valid Requirement. +func (r *Requirement) String() string { + var buffer bytes.Buffer + if r.operator == selection.DoesNotExist { + buffer.WriteString("!") + } + buffer.WriteString(r.key) + + switch r.operator { + case selection.Equals: + buffer.WriteString("=") + case selection.DoubleEquals: + buffer.WriteString("==") + case selection.NotEquals: + buffer.WriteString("!=") + case selection.In: + buffer.WriteString(" in ") + case selection.NotIn: + buffer.WriteString(" notin ") + case selection.GreaterThan: + buffer.WriteString(">") + case selection.LessThan: + buffer.WriteString("<") + case selection.Exists, selection.DoesNotExist: + return buffer.String() + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString("(") + } + if len(r.strValues) == 1 { + buffer.WriteString(r.strValues[0]) + } else { // only > 1 since == 0 prohibited by NewRequirement + // normalizes value order on output, without mutating the in-memory selector representation + // also avoids normalization when it is not required, and ensures we do not mutate shared data + buffer.WriteString(strings.Join(safeSort(r.strValues), ",")) + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString(")") + } + return buffer.String() +} + +// safeSort sort input strings without modification +func safeSort(in []string) []string { + if sort.StringsAreSorted(in) { + return in + } + out := make([]string, len(in)) + copy(out, in) + sort.Strings(out) + return out +} + +// Add adds requirements to the selector. It copies the current selector returning a new one +func (lsel internalSelector) Add(reqs ...Requirement) Selector { + var sel internalSelector + for ix := range lsel { + sel = append(sel, lsel[ix]) + } + for _, r := range reqs { + sel = append(sel, r) + } + sort.Sort(ByKey(sel)) + return sel +} + +// Matches for a internalSelector returns true if all +// its Requirements match the input Labels. If any +// Requirement does not match, false is returned. +func (lsel internalSelector) Matches(l Labels) bool { + for ix := range lsel { + if matches := lsel[ix].Matches(l); !matches { + return false + } + } + return true +} + +func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } + +// String returns a comma-separated string of all +// the internalSelector Requirements' human-readable strings. +func (lsel internalSelector) String() string { + var reqs []string + for ix := range lsel { + reqs = append(reqs, lsel[ix].String()) + } + return strings.Join(reqs, ",") +} + +// RequiresExactMatch introspect whether a given selector requires a single specific field +// to be set, and if so returns the value it requires. +func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range lsel { + if lsel[ix].key == label { + switch lsel[ix].operator { + case selection.Equals, selection.DoubleEquals, selection.In: + if len(lsel[ix].strValues) == 1 { + return lsel[ix].strValues[0], true + } + } + return "", false + } + } + return "", false +} + +// Token represents constant definition for lexer token +type Token int + +const ( + // ErrorToken represents scan error + ErrorToken Token = iota + // EndOfStringToken represents end of string + EndOfStringToken + // ClosedParToken represents close parenthesis + ClosedParToken + // CommaToken represents the comma + CommaToken + // DoesNotExistToken represents logic not + DoesNotExistToken + // DoubleEqualsToken represents double equals + DoubleEqualsToken + // EqualsToken represents equal + EqualsToken + // GreaterThanToken represents greater than + GreaterThanToken + // IdentifierToken represents identifier, e.g. keys and values + IdentifierToken + // InToken represents in + InToken + // LessThanToken represents less than + LessThanToken + // NotEqualsToken represents not equal + NotEqualsToken + // NotInToken represents not in + NotInToken + // OpenParToken represents open parenthesis + OpenParToken +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ")": ClosedParToken, + ",": CommaToken, + "!": DoesNotExistToken, + "==": DoubleEqualsToken, + "=": EqualsToken, + ">": GreaterThanToken, + "in": InToken, + "<": LessThanToken, + "!=": NotEqualsToken, + "notin": NotInToken, + "(": OpenParToken, +} + +// ScannedItem contains the Token and the literal produced by the lexer. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detect if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', '!', '(', ')', ',', '>', '<': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read return the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token? + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators. "!=", "==", "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIDOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// ParserContext represents context during parsing: +// some literal for example 'in' and 'notin' can be +// recognized as operator for example 'x in (a)' but +// it can be recognized as value for example 'value in (in)' +type ParserContext int + +const ( + // KeyAndOperator represents key and operator + KeyAndOperator ParserContext = iota + // Values represents values + Values +) + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead(context ParserContext) (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// consume returns current token and string. Increments the position +func (p *Parser) consume(context ParserContext) (Token, string) { + p.position++ + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of Requirement objects. +func (p *Parser) parse() (internalSelector, error) { + p.scan() // init scannedItems + + var requirements internalSelector + for { + tok, lit := p.lookahead(Values) + switch tok { + case IdentifierToken, DoesNotExistToken: + r, err := p.parseRequirement() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + requirements = append(requirements, *r) + t, l := p.consume(Values) + switch t { + case EndOfStringToken: + return requirements, nil + case CommaToken: + t2, l2 := p.lookahead(Values) + if t2 != IdentifierToken && t2 != DoesNotExistToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return requirements, nil + default: + return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) + } + } +} + +func (p *Parser) parseRequirement() (*Requirement, error) { + key, operator, err := p.parseKeyAndInferOperator() + if err != nil { + return nil, err + } + if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked + return NewRequirement(key, operator, []string{}) + } + operator, err = p.parseOperator() + if err != nil { + return nil, err + } + var values sets.String + switch operator { + case selection.In, selection.NotIn: + values, err = p.parseValues() + case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: + values, err = p.parseExactValue() + } + if err != nil { + return nil, err + } + return NewRequirement(key, operator, values.List()) + +} + +// parseKeyAndInferOperator parse literals. +// in case of no operator '!, in, notin, ==, =, !=' are found +// the 'exists' operator is inferred +func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { + var operator selection.Operator + tok, literal := p.consume(Values) + if tok == DoesNotExistToken { + operator = selection.DoesNotExist + tok, literal = p.consume(Values) + } + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", "", err + } + if err := validateLabelKey(literal); err != nil { + return "", "", err + } + if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { + if operator != selection.DoesNotExist { + operator = selection.Exists + } + } + return literal, operator, nil +} + +// parseOperator return operator and eventually matchType +// matchType can be exact +func (p *Parser) parseOperator() (op selection.Operator, err error) { + tok, lit := p.consume(KeyAndOperator) + switch tok { + // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator + case InToken: + op = selection.In + case EqualsToken: + op = selection.Equals + case DoubleEqualsToken: + op = selection.DoubleEquals + case GreaterThanToken: + op = selection.GreaterThan + case LessThanToken: + op = selection.LessThan + case NotInToken: + op = selection.NotIn + case NotEqualsToken: + op = selection.NotEquals + default: + return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) + } + return op, nil +} + +// parseValues parses the values for set based matching (x,y,z) +func (p *Parser) parseValues() (sets.String, error) { + tok, lit := p.consume(Values) + if tok != OpenParToken { + return nil, fmt.Errorf("found '%s' expected: '('", lit) + } + tok, lit = p.lookahead(Values) + switch tok { + case IdentifierToken, CommaToken: + s, err := p.parseIdentifiersList() // handles general cases + if err != nil { + return s, err + } + if tok, _ = p.consume(Values); tok != ClosedParToken { + return nil, fmt.Errorf("found '%s', expected: ')'", lit) + } + return s, nil + case ClosedParToken: // handles "()" + p.consume(Values) + return sets.NewString(""), nil + default: + return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) + } +} + +// parseIdentifiersList parses a (possibly empty) list of +// of comma separated (possibly empty) identifiers +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() + for { + tok, lit := p.consume(Values) + switch tok { + case IdentifierToken: + s.Insert(lit) + tok2, lit2 := p.lookahead(Values) + switch tok2 { + case CommaToken: + continue + case ClosedParToken: + return s, nil + default: + return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) + } + case CommaToken: // handled here since we can have "(," + if s.Len() == 0 { + s.Insert("") // to handle (, + } + tok2, _ := p.lookahead(Values) + if tok2 == ClosedParToken { + s.Insert("") // to handle ,) Double "" removed by StringSet + return s, nil + } + if tok2 == CommaToken { + p.consume(Values) + s.Insert("") // to handle ,, Double "" removed by StringSet + } + default: // it can be operator + return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) + } + } +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() + tok, lit := p.lookahead(Values) + if tok == EndOfStringToken || tok == CommaToken { + s.Insert("") + return s, nil + } + tok, lit = p.consume(Values) + if tok == IdentifierToken { + s.Insert(lit) + return s, nil + } + return nil, fmt.Errorf("found '%s', expected: identifier", lit) +} + +// Parse takes a string representing a selector and returns a selector +// object, or an error. This parsing function differs from ParseSelector +// as they parse different selectors with different syntaxes. +// The input will cause an error if it does not follow this form: +// +// ::= | "," +// ::= [!] KEY [ | ] +// ::= "" | +// ::= | +// ::= "notin" +// ::= "in" +// ::= "(" ")" +// ::= VALUE | VALUE "," +// ::= ["="|"=="|"!="] VALUE +// +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. +// Delimiter is white space: (' ', '\t') +// Example of valid syntax: +// "x in (foo,,baz),y,z notin ()" +// +// Note: +// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// (2) Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// (3) The empty string is a valid VALUE +// (4) A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// (5) A requirement with just !KEY requires that the KEY not exist. +// +func Parse(selector string) (Selector, error) { + parsedSelector, err := parse(selector) + if err == nil { + return parsedSelector, nil + } + return nil, err +} + +// parse parses the string representation of the selector and returns the internalSelector struct. +// The callers of this method can then decide how to return the internalSelector struct to their +// callers. This function has two callers now, one returns a Selector interface and the other +// returns a list of requirements. +func parse(selector string) (internalSelector, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + items, err := p.parse() + if err != nil { + return nil, err + } + sort.Sort(ByKey(items)) // sort to grant determistic parsing + return internalSelector(items), err +} + +func validateLabelKey(k string) error { + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) + } + return nil +} + +func validateLabelValue(k, v string) error { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) + } + return nil +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +// It does not perform any validation, which means the server will reject +// the request if the Set contains invalid values. +func SelectorFromSet(ls Set) Selector { + return SelectorFromValidatedSet(ls) +} + +// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +// The Set is validated client-side, which allows to catch errors early. +func ValidatedSelectorFromSet(ls Set) (Selector, error) { + if ls == nil || len(ls) == 0 { + return internalSelector{}, nil + } + requirements := make([]Requirement, 0, len(ls)) + for label, value := range ls { + r, err := NewRequirement(label, selection.Equals, []string{value}) + if err != nil { + return nil, err + } + requirements = append(requirements, *r) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return internalSelector(requirements), nil +} + +// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. +// A nil and empty Sets are considered equivalent to Everything(). +// It assumes that Set is already validated and doesn't do any validation. +func SelectorFromValidatedSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + requirements := make([]Requirement, 0, len(ls)) + for label, value := range ls { + requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return internalSelector(requirements) +} + +// ParseToRequirements takes a string representing a selector and returns a list of +// requirements. This function is suitable for those callers that perform additional +// processing on selector requirements. +// See the documentation for Parse() function for more details. +// TODO: Consider exporting the internalSelector type instead. +func ParseToRequirements(selector string) ([]Requirement, error) { + return parse(selector) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection/operator.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection/operator.go new file mode 100644 index 000000000..29c443df4 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection/operator.go @@ -0,0 +1,36 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/selection/operator.go + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selection + +// Operator represents a key/field's relationship to value(s). +// See labels.Requirement and fields.Requirement for more details. +type Operator string + +const ( + DoesNotExist Operator = "!" + Equals Operator = "=" + DoubleEquals Operator = "==" + In Operator = "in" + NotEquals Operator = "!=" + NotIn Operator = "notin" + Exists Operator = "exists" + GreaterThan Operator = "gt" + LessThan Operator = "lt" +) diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors/errors.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors/errors.go new file mode 100644 index 000000000..24d040e06 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors/errors.go @@ -0,0 +1,252 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/errors/errors.go + +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "fmt" + + "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets" +) + +// MessageCountMap contains occurrence for each error message. +type MessageCountMap map[string]int + +// Aggregate represents an object that contains multiple errors, but does not +// necessarily have singular semantic meaning. +// The aggregate can be used with `errors.Is()` to check for the occurrence of +// a specific error type. +// Errors.As() is not supported, because the caller presumably cares about a +// specific error of potentially multiple that match the given type. +type Aggregate interface { + error + Errors() []error + Is(error) bool +} + +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func NewAggregate(errlist []error) Aggregate { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregate []error + +// Error is part of the error interface. +func (agg aggregate) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + seenerrs := sets.NewString() + result := "" + agg.visit(func(err error) bool { + msg := err.Error() + if seenerrs.Has(msg) { + return false + } + seenerrs.Insert(msg) + if len(seenerrs) > 1 { + result += ", " + } + result += msg + return false + }) + if len(seenerrs) == 1 { + return result + } + return "[" + result + "]" +} + +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + if match := err.visit(f); match { + return match + } + case Aggregate: + for _, nestedErr := range err.Errors() { + if match := f(nestedErr); match { + return match + } + } + default: + if match := f(err); match { + return match + } + } + } + + return false +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Matcher is used to match errors. Returns true if the error matches. +type Matcher func(error) bool + +// FilterOut removes all errors that match any of the matchers from the input +// error. If the input is a singular error, only that error is tested. If the +// input implements the Aggregate interface, the list of errors will be +// processed recursively. +// +// This can be used, for example, to remove known-OK errors (such as io.EOF or +// os.PathNotFound) from a list of errors. +func FilterOut(err error, fns ...Matcher) error { + if err == nil { + return nil + } + if agg, ok := err.(Aggregate); ok { + return NewAggregate(filterErrors(agg.Errors(), fns...)) + } + if !matchesError(err, fns...) { + return err + } + return nil +} + +// matchesError returns true if any Matcher returns true +func matchesError(err error, fns ...Matcher) bool { + for _, fn := range fns { + if fn(err) { + return true + } + } + return false +} + +// filterErrors returns any errors (or nested errors, if the list contains +// nested Errors) for which all fns return false. If no errors +// remain a nil list is returned. The resulting silec will have all +// nested slices flattened as a side effect. +func filterErrors(list []error, fns ...Matcher) []error { + result := []error{} + for _, err := range list { + r := FilterOut(err, fns...) + if r != nil { + result = append(result, r) + } + } + return result +} + +// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary +// nesting, and flattens them all into a single Aggregate, recursively. +func Flatten(agg Aggregate) Aggregate { + result := []error{} + if agg == nil { + return nil + } + for _, err := range agg.Errors() { + if a, ok := err.(Aggregate); ok { + r := Flatten(a) + if r != nil { + result = append(result, r.Errors()...) + } + } else { + if err != nil { + result = append(result, err) + } + } + } + return NewAggregate(result) +} + +// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate +func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { + if m == nil { + return nil + } + result := make([]error, 0, len(m)) + for errStr, count := range m { + var countStr string + if count > 1 { + countStr = fmt.Sprintf(" (repeated %v times)", count) + } + result = append(result, fmt.Errorf("%v%v", errStr, countStr)) + } + return NewAggregate(result) +} + +// Reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func Reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + +// AggregateGoroutines runs the provided functions in parallel, stuffing all +// non-nil errors into the returned Aggregate. +// Returns nil if all the functions complete successfully. +func AggregateGoroutines(funcs ...func() error) Aggregate { + errChan := make(chan error, len(funcs)) + for _, f := range funcs { + go func(f func() error) { errChan <- f() }(f) + } + errs := make([]error, 0) + for i := 0; i < cap(errChan); i++ { + if err := <-errChan; err != nil { + errs = append(errs, err) + } + } + return NewAggregate(errs) +} + +// ErrPreconditionViolated is returned when the precondition is violated +var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/empty.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/empty.go new file mode 100644 index 000000000..ef404add1 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/empty.go @@ -0,0 +1,24 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/sets/empty.go + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/string.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/string.go new file mode 100644 index 000000000..8af1bac2a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/string.go @@ -0,0 +1,206 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/sets/string.go + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) String { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) String { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/errors.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/errors.go new file mode 100644 index 000000000..20229a5b6 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/errors.go @@ -0,0 +1,275 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/validation/field/errors.go + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + utilerrors "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors" + "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets" +) + +// Error is an implementation of the 'error' interface, which represents a +// field-level validation error. +type Error struct { + Type ErrorType + Field string + BadValue interface{} + Detail string +} + +var _ error = &Error{} + +// Error implements the error interface. +func (v *Error) Error() string { + return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +} + +// ErrorBody returns the error message without the field name. This is useful +// for building nice-looking higher-level error reporting. +func (v *Error) ErrorBody() string { + var s string + switch v.Type { + case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: + s = v.Type.String() + default: + value := v.BadValue + valueType := reflect.TypeOf(value) + if value == nil || valueType == nil { + value = "null" + } else if valueType.Kind() == reflect.Ptr { + if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { + value = "null" + } else { + value = reflectValue.Elem().Interface() + } + } + switch t := value.(type) { + case int64, int32, float64, float32, bool: + // use simple printer for simple types + s = fmt.Sprintf("%s: %v", v.Type, value) + case string: + s = fmt.Sprintf("%s: %q", v.Type, t) + case fmt.Stringer: + // anything that defines String() is better than raw struct + s = fmt.Sprintf("%s: %s", v.Type, t.String()) + default: + // fallback to raw struct + // TODO: internal types have panic guards against json.Marshalling to prevent + // accidental use of internal types in external serialized form. For now, use + // %#v, although it would be better to show a more expressive output in the future + s = fmt.Sprintf("%s: %#v", v.Type, value) + } + } + if len(v.Detail) != 0 { + s += fmt.Sprintf(": %s", v.Detail) + } + return s +} + +// ErrorType is a machine readable value providing more detail about why +// a field is invalid. These values are expected to match 1-1 with +// CauseType in api/types.go. +type ErrorType string + +// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. +const ( + // ErrorTypeNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). See NotFound(). + ErrorTypeNotFound ErrorType = "FieldValueNotFound" + // ErrorTypeRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). See + // Required(). + ErrorTypeRequired ErrorType = "FieldValueRequired" + // ErrorTypeDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). See Duplicate(). + ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" + // ErrorTypeInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). See Invalid(). + ErrorTypeInvalid ErrorType = "FieldValueInvalid" + // ErrorTypeNotSupported is used to report unknown values for enumerated + // fields (e.g. a list of valid values). See NotSupported(). + ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" + // ErrorTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + ErrorTypeForbidden ErrorType = "FieldValueForbidden" + // ErrorTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + ErrorTypeTooMany ErrorType = "FieldValueTooMany" + // ErrorTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + ErrorTypeInternal ErrorType = "InternalError" +) + +// String converts a ErrorType into its corresponding canonical error message. +func (t ErrorType) String() string { + switch t { + case ErrorTypeNotFound: + return "Not found" + case ErrorTypeRequired: + return "Required value" + case ErrorTypeDuplicate: + return "Duplicate value" + case ErrorTypeInvalid: + return "Invalid value" + case ErrorTypeNotSupported: + return "Unsupported value" + case ErrorTypeForbidden: + return "Forbidden" + case ErrorTypeTooLong: + return "Too long" + case ErrorTypeTooMany: + return "Too many" + case ErrorTypeInternal: + return "Internal error" + default: + panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) + } +} + +// NotFound returns a *Error indicating "value not found". This is +// used to report failure to find a requested value (e.g. looking up an ID). +func NotFound(field *Path, value interface{}) *Error { + return &Error{ErrorTypeNotFound, field.String(), value, ""} +} + +// Required returns a *Error indicating "value required". This is used +// to report required values that are not provided (e.g. empty strings, null +// values, or empty arrays). +func Required(field *Path, detail string) *Error { + return &Error{ErrorTypeRequired, field.String(), "", detail} +} + +// Duplicate returns a *Error indicating "duplicate value". This is +// used to report collisions of values that must be unique (e.g. names or IDs). +func Duplicate(field *Path, value interface{}) *Error { + return &Error{ErrorTypeDuplicate, field.String(), value, ""} +} + +// Invalid returns a *Error indicating "invalid value". This is used +// to report malformed values (e.g. failed regex match, too long, out of bounds). +func Invalid(field *Path, value interface{}, detail string) *Error { + return &Error{ErrorTypeInvalid, field.String(), value, detail} +} + +// NotSupported returns a *Error indicating "unsupported value". +// This is used to report unknown values for enumerated fields (e.g. a list of +// valid values). +func NotSupported(field *Path, value interface{}, validValues []string) *Error { + detail := "" + if validValues != nil && len(validValues) > 0 { + quotedValues := make([]string, len(validValues)) + for i, v := range validValues { + quotedValues[i] = strconv.Quote(v) + } + detail = "supported values: " + strings.Join(quotedValues, ", ") + } + return &Error{ErrorTypeNotSupported, field.String(), value, detail} +} + +// Forbidden returns a *Error indicating "forbidden". This is used to +// report valid (as per formatting rules) values which would be accepted under +// some conditions, but which are not permitted by current conditions (e.g. +// security policy). +func Forbidden(field *Path, detail string) *Error { + return &Error{ErrorTypeForbidden, field.String(), "", detail} +} + +// TooLong returns a *Error indicating "too long". This is used to +// report that the given value is too long. This is similar to +// Invalid, but the returned error will not include the too-long +// value. +func TooLong(field *Path, value interface{}, maxLength int) *Error { + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} +} + +// TooMany returns a *Error indicating "too many". This is used to +// report that a given list has too many items. This is similar to TooLong, +// but the returned error indicates quantity instead of length. +func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { + return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} +} + +// InternalError returns a *Error indicating "internal error". This is used +// to signal that an error was found that was not directly related to user +// input. The err argument must be non-nil. +func InternalError(field *Path, err error) *Error { + return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} +} + +// ErrorList holds a set of Errors. It is plausible that we might one day have +// non-field errors in this same umbrella package, but for now we don't, so +// we can keep it simple and leave ErrorList here. +type ErrorList []*Error + +// NewErrorTypeMatcher returns an errors.Matcher that returns true +// if the provided error is a Error and has the provided ErrorType. +func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { + return func(err error) bool { + if e, ok := err.(*Error); ok { + return e.Type == t + } + return false + } +} + +// ToAggregate converts the ErrorList into an errors.Aggregate. +func (list ErrorList) ToAggregate() utilerrors.Aggregate { + errs := make([]error, 0, len(list)) + errorMsgs := sets.NewString() + for _, err := range list { + msg := fmt.Sprintf("%v", err) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, err) + } + return utilerrors.NewAggregate(errs) +} + +func fromAggregate(agg utilerrors.Aggregate) ErrorList { + errs := agg.Errors() + list := make(ErrorList, len(errs)) + for i := range errs { + list[i] = errs[i].(*Error) + } + return list +} + +// Filter removes items from the ErrorList that match the provided fns. +func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { + err := utilerrors.FilterOut(list.ToAggregate(), fns...) + if err == nil { + return nil + } + // FilterOut takes an Aggregate and returns an Aggregate + return fromAggregate(err.(utilerrors.Aggregate)) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/path.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/path.go new file mode 100644 index 000000000..44cdf997a --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/path.go @@ -0,0 +1,94 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/validation/field/path.go + +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "bytes" + "fmt" + "strconv" +) + +// Path represents the path from some root to a particular field. +type Path struct { + name string // the name of this field or "" if this is an index + index string // if name == "", this is a subscript (index or map key) of the previous element + parent *Path // nil if this is the root element +} + +// NewPath creates a root Path object. +func NewPath(name string, moreNames ...string) *Path { + r := &Path{name: name, parent: nil} + for _, anotherName := range moreNames { + r = &Path{name: anotherName, parent: r} + } + return r +} + +// Root returns the root element of this Path. +func (p *Path) Root() *Path { + for ; p.parent != nil; p = p.parent { + // Do nothing. + } + return p +} + +// Child creates a new Path that is a child of the method receiver. +func (p *Path) Child(name string, moreNames ...string) *Path { + r := NewPath(name, moreNames...) + r.Root().parent = p + return r +} + +// Index indicates that the previous Path is to be subscripted by an int. +// This sets the same underlying value as Key. +func (p *Path) Index(index int) *Path { + return &Path{index: strconv.Itoa(index), parent: p} +} + +// Key indicates that the previous Path is to be subscripted by a string. +// This sets the same underlying value as Index. +func (p *Path) Key(key string) *Path { + return &Path{index: key, parent: p} +} + +// String produces a string representation of the Path. +func (p *Path) String() string { + // make a slice to iterate + elems := []*Path{} + for ; p != nil; p = p.parent { + elems = append(elems, p) + } + + // iterate, but it has to be backwards + buf := bytes.NewBuffer(nil) + for i := range elems { + p := elems[len(elems)-1-i] + if p.parent != nil && len(p.name) > 0 { + // This is either the root or it is a subscript. + buf.WriteString(".") + } + if len(p.name) > 0 { + buf.WriteString(p.name) + } else { + fmt.Fprintf(buf, "[%s]", p.index) + } + } + return buf.String() +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/validation.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/validation.go new file mode 100644 index 000000000..5e1ddbc46 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/validation.go @@ -0,0 +1,506 @@ +// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. +// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/validation/validation.go + +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "math" + "net" + "regexp" + "strconv" + "strings" + + "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field" +) + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// IsQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsQualifiedName(value string) []string { + var errs []string + parts := strings.Split(value, "/") + var name string + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + errs = append(errs, "prefix part "+EmptyError()) + } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { + errs = append(errs, prefixEach(msgs, "prefix part ")...) + } + default: + return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") + } + + if len(name) == 0 { + errs = append(errs, "name part "+EmptyError()) + } else if len(name) > qualifiedNameMaxLength { + errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) + } + if !qualifiedNameRegexp.MatchString(name) { + errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) + } + return errs +} + +// IsFullyQualifiedName checks if the name is fully qualified. This is similar +// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of +// 2 and does not accept a trailing . as valid. +// TODO: This function is deprecated and preserved until all callers migrate to +// IsFullyQualifiedDomainName; please don't add new callers. +func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 3 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots")) + } + return allErrors +} + +// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This +// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments +// instead of 3 and accepts a trailing . as valid. +func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if strings.HasSuffix(name, ".") { + name = name[:len(name)-1] + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 2 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) + } + for _, label := range strings.Split(name, ".") { + if errs := IsDNS1123Label(label); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, label, strings.Join(errs, ","))) + } + } + return allErrors +} + +// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may +// contain: +// * unreserved characters (alphanumeric, '-', '.', '_', '~') +// * percent-encoded octets +// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=") +// * a colon character (":") +const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+` + +var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$") + +// IsDomainPrefixedPath checks if the given string is a domain-prefixed path +// (e.g. acme.io/foo). All characters before the first "/" must be a valid +// subdomain as defined by RFC 1123. All characters trailing the first "/" must +// be valid HTTP Path characters as defined by RFC 3986. +func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList { + var allErrs field.ErrorList + if len(dpPath) == 0 { + return append(allErrs, field.Required(fldPath, "")) + } + + segments := strings.SplitN(dpPath, "/", 2) + if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 { + return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")")) + } + + host := segments[0] + for _, err := range IsDNS1123Subdomain(host) { + allErrs = append(allErrs, field.Invalid(fldPath, host, err)) + } + + path := segments[1] + if !httpPathRegexp.MatchString(path) { + return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt))) + } + + return allErrs +} + +const labelValueFmt string = "(" + qualifiedNameFmt + ")?" +const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" + +// LabelValueMaxLength is a label's max length +const LabelValueMaxLength int = 63 + +var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") + +// IsValidLabelValue tests whether the value passed is a valid label value. If +// the value is not valid, a list of error strings is returned. Otherwise an +// empty list (or nil) is returned. +func IsValidLabelValue(value string) []string { + var errs []string + if len(value) > LabelValueMaxLength { + errs = append(errs, MaxLenError(LabelValueMaxLength)) + } + if !labelValueRegexp.MatchString(value) { + errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) + } + return errs +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" + +// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) +const DNS1123LabelMaxLength int = 63 + +var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") + +// IsDNS1123Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1123). +func IsDNS1123Label(value string) []string { + var errs []string + if len(value) > DNS1123LabelMaxLength { + errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) + } + if !dns1123LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } + return errs +} + +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// IsDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func IsDNS1123Subdomain(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + return errs +} + +const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" + +// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) +const DNS1035LabelMaxLength int = 63 + +var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") + +// IsDNS1035Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1035). +func IsDNS1035Label(value string) []string { + var errs []string + if len(value) > DNS1035LabelMaxLength { + errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) + } + if !dns1035LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) + } + return errs +} + +// wildcard definition - RFC 1034 section 4.3.3. +// examples: +// - valid: *.bar.com, *.foo.bar.com +// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * +const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt +const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" + +// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a +// wildcard subdomain in DNS (RFC 1034 section 4.3.3). +func IsWildcardDNS1123Subdomain(value string) []string { + wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") + + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !wildcardDNS1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) + } + return errs +} + +const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" +const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" + +var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") + +// IsCIdentifier tests for a string that conforms the definition of an identifier +// in C. This checks the format, but not the length. +func IsCIdentifier(value string) []string { + if !cIdentifierRegexp.MatchString(value) { + return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} + } + return nil +} + +// IsValidPortNum tests that the argument is a valid, non-zero port number. +func IsValidPortNum(port int) []string { + if 1 <= port && port <= 65535 { + return nil + } + return []string{InclusiveRangeError(1, 65535)} +} + +// IsInRange tests that the argument is in an inclusive range. +func IsInRange(value int, min int, max int) []string { + if value >= min && value <= max { + return nil + } + return []string{InclusiveRangeError(min, max)} +} + +// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 +// TODO: once we have a type for UID/GID we should make these that type. +const ( + minUserID = 0 + maxUserID = math.MaxInt32 + minGroupID = 0 + maxGroupID = math.MaxInt32 +) + +// IsValidGroupID tests that the argument is a valid Unix GID. +func IsValidGroupID(gid int64) []string { + if minGroupID <= gid && gid <= maxGroupID { + return nil + } + return []string{InclusiveRangeError(minGroupID, maxGroupID)} +} + +// IsValidUserID tests that the argument is a valid Unix UID. +func IsValidUserID(uid int64) []string { + if minUserID <= uid && uid <= maxUserID { + return nil + } + return []string{InclusiveRangeError(minUserID, maxUserID)} +} + +var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") +var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") + +// IsValidPortName check that the argument is valid syntax. It must be +// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] +// and must contain at least one letter [a-z]. It must not start or end with a +// hyphen, nor contain adjacent hyphens. +// +// Note: We only allow lower-case characters, even though RFC 6335 is case +// insensitive. +func IsValidPortName(port string) []string { + var errs []string + if len(port) > 15 { + errs = append(errs, MaxLenError(15)) + } + if !portNameCharsetRegex.MatchString(port) { + errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") + } + if !portNameOneLetterRegexp.MatchString(port) { + errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + } + if strings.Contains(port, "--") { + errs = append(errs, "must not contain consecutive hyphens") + } + if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { + errs = append(errs, "must not begin or end with a hyphen") + } + return errs +} + +// IsValidIP tests that the argument is a valid IP address. +func IsValidIP(value string) []string { + if net.ParseIP(value) == nil { + return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + } + return nil +} + +// IsValidIPv4Address tests that the argument is a valid IPv4 address. +func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() == nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) + } + return allErrors +} + +// IsValidIPv6Address tests that the argument is a valid IPv6 address. +func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) + } + return allErrors +} + +const percentFmt string = "[0-9]+%" +const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" + +var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") + +// IsValidPercent checks that string is in the form of a percentage +func IsValidPercent(percent string) []string { + if !percentRegexp.MatchString(percent) { + return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} + } + return nil +} + +const httpHeaderNameFmt string = "[-A-Za-z0-9]+" +const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" + +var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") + +// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's +// definition of a valid header field name (a stricter subset than RFC7230). +func IsHTTPHeaderName(value string) []string { + if !httpHeaderNameRegexp.MatchString(value) { + return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} + } + return nil +} + +const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" +const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" + +var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") + +// IsEnvVarName tests if a string is a valid environment variable name. +func IsEnvVarName(value string) []string { + var errs []string + if !envVarNameRegexp.MatchString(value) { + errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1")) + } + + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + +const configMapKeyFmt = `[-._a-zA-Z0-9]+` +const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" + +var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") + +// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret +func IsConfigMapKey(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !configMapKeyRegexp.MatchString(value) { + errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) + } + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + +// MaxLenError returns a string explanation of a "string too long" validation +// failure. +func MaxLenError(length int) string { + return fmt.Sprintf("must be no more than %d characters", length) +} + +// RegexError returns a string explanation of a regex validation failure. +func RegexError(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +// EmptyError returns a string explanation of a "must not be empty" validation +// failure. +func EmptyError() string { + return "must be non-empty" +} + +func prefixEach(msgs []string, prefix string) []string { + for i := range msgs { + msgs[i] = prefix + msgs[i] + } + return msgs +} + +// InclusiveRangeError returns a string explanation of a numeric "must be +// between" validation failure. +func InclusiveRangeError(lo, hi int) string { + return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) +} + +func hasChDirPrefix(value string) []string { + var errs []string + switch { + case value == ".": + errs = append(errs, `must not be '.'`) + case value == "..": + errs = append(errs, `must not be '..'`) + case strings.HasPrefix(value, ".."): + errs = append(errs, `must not start with '..'`) + } + return errs +} + +// IsValidSocketAddr checks that string represents a valid socket address +// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) +func IsValidSocketAddr(value string) []string { + var errs []string + ip, port, err := net.SplitHostPort(value) + if err != nil { + errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") + return errs + } + portInt, _ := strconv.Atoi(port) + errs = append(errs, IsValidPortNum(portInt)...) + errs = append(errs, IsValidIP(ip)...) + return errs +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go new file mode 100644 index 000000000..2ae8c1665 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go @@ -0,0 +1,137 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" +) + +// AnnotationClearer removes an annotation at metadata.annotations. +// Returns nil if the annotation or field does not exist. +type AnnotationClearer struct { + Kind string `yaml:"kind,omitempty"` + Key string `yaml:"key,omitempty"` +} + +func (c AnnotationClearer) Filter(rn *RNode) (*RNode, error) { + return rn.Pipe( + PathGetter{Path: []string{MetadataField, AnnotationsField}}, + FieldClearer{Name: c.Key}) +} + +func ClearAnnotation(key string) AnnotationClearer { + return AnnotationClearer{Key: key} +} + +// ClearEmptyAnnotations clears the keys, annotations +// and metadata if they are empty/null +func ClearEmptyAnnotations(rn *RNode) error { + _, err := rn.Pipe(Lookup(MetadataField), FieldClearer{ + Name: AnnotationsField, IfEmpty: true}) + if err != nil { + return errors.Wrap(err) + } + _, err = rn.Pipe(FieldClearer{Name: MetadataField, IfEmpty: true}) + if err != nil { + return errors.Wrap(err) + } + return nil +} + +// k8sMetaSetter sets a name at metadata.{key}. +// Creates metadata if does not exist. +type k8sMetaSetter struct { + Key string `yaml:"key,omitempty"` + Value string `yaml:"value,omitempty"` +} + +func (s k8sMetaSetter) Filter(rn *RNode) (*RNode, error) { + _, err := rn.Pipe( + PathGetter{Path: []string{MetadataField}, Create: yaml.MappingNode}, + FieldSetter{Name: s.Key, Value: NewStringRNode(s.Value)}) + return rn, err +} + +func SetK8sName(value string) k8sMetaSetter { + return k8sMetaSetter{Key: NameField, Value: value} +} + +func SetK8sNamespace(value string) k8sMetaSetter { + return k8sMetaSetter{Key: NamespaceField, Value: value} +} + +// AnnotationSetter sets an annotation at metadata.annotations. +// Creates metadata.annotations if does not exist. +type AnnotationSetter struct { + Kind string `yaml:"kind,omitempty"` + Key string `yaml:"key,omitempty"` + Value string `yaml:"value,omitempty"` +} + +func (s AnnotationSetter) Filter(rn *RNode) (*RNode, error) { + v := NewStringRNode(s.Value) + // some tools get confused about the type if annotations are not quoted + v.YNode().Style = yaml.SingleQuotedStyle + if err := ClearEmptyAnnotations(rn); err != nil { + return nil, err + } + return addMetadataNode(rn, AnnotationsField, s.Key, v) +} + +func SetAnnotation(key, value string) AnnotationSetter { + return AnnotationSetter{Key: key, Value: value} +} + +// AnnotationGetter gets an annotation at metadata.annotations. +// Returns nil if metadata.annotations does not exist. +type AnnotationGetter struct { + Kind string `yaml:"kind,omitempty"` + Key string `yaml:"key,omitempty"` + Value string `yaml:"value,omitempty"` +} + +// AnnotationGetter returns the annotation value. +// Returns "", nil if the annotation does not exist. +func (g AnnotationGetter) Filter(rn *RNode) (*RNode, error) { + v, err := rn.Pipe( + PathGetter{Path: []string{MetadataField, AnnotationsField, g.Key}}) + if v == nil || err != nil { + return v, err + } + if g.Value == "" || v.value.Value == g.Value { + return v, err + } + return nil, err +} + +func GetAnnotation(key string) AnnotationGetter { + return AnnotationGetter{Key: key} +} + +// LabelSetter sets a label at metadata.labels. +// Creates metadata.labels if does not exist. +type LabelSetter struct { + Kind string `yaml:"kind,omitempty"` + Key string `yaml:"key,omitempty"` + Value string `yaml:"value,omitempty"` +} + +func (s LabelSetter) Filter(rn *RNode) (*RNode, error) { + v := NewStringRNode(s.Value) + // some tools get confused about the type if labels are not quoted + v.YNode().Style = yaml.SingleQuotedStyle + return addMetadataNode(rn, LabelsField, s.Key, v) +} + +func addMetadataNode(rn *RNode, field, key string, v *RNode) (*RNode, error) { + return rn.Pipe( + PathGetter{ + Path: []string{MetadataField, field}, Create: yaml.MappingNode}, + FieldSetter{Name: key, Value: v}) +} + +func SetLabel(key, value string) LabelSetter { + return LabelSetter{Key: key, Value: value} +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/mapnode.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/mapnode.go new file mode 100644 index 000000000..31b41b40f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/mapnode.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +// MapNode wraps a field key and value. +type MapNode struct { + Key *RNode + Value *RNode +} + +// IsNilOrEmpty returns true if the MapNode is nil, +// has no value, or has a value that appears empty. +func (mn *MapNode) IsNilOrEmpty() bool { + return mn == nil || mn.Value.IsNilOrEmpty() +} + +type MapNodeSlice []*MapNode + +func (m MapNodeSlice) Keys() []*RNode { + var keys []*RNode + for i := range m { + if m[i] != nil { + keys = append(keys, m[i].Key) + } + } + return keys +} + +func (m MapNodeSlice) Values() []*RNode { + var values []*RNode + for i := range m { + if m[i] != nil { + values = append(values, m[i].Value) + } else { + values = append(values, nil) + } + } + return values +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go new file mode 100644 index 000000000..1749d3872 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go @@ -0,0 +1,333 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" +) + +// PathMatcher returns all RNodes matching the path wrapped in a SequenceNode. +// Lists may have multiple elements matching the path, and each matching element +// is added to the return result. +// If Path points to a SequenceNode, the SequenceNode is wrapped in another SequenceNode +// If Path does not contain any lists, the result is still wrapped in a SequenceNode of len == 1 +type PathMatcher struct { + Kind string `yaml:"kind,omitempty"` + + // Path is a slice of parts leading to the RNode to lookup. + // Each path part may be one of: + // * FieldMatcher -- e.g. "spec" + // * Map Key -- e.g. "app.k8s.io/version" + // * List Entry -- e.g. "[name=nginx]" or "[=-jar]" or "0" + // + // Map Keys and Fields are equivalent. + // See FieldMatcher for more on Fields and Map Keys. + // + // List Entries are specified as map entry to match [fieldName=fieldValue]. + // See Elem for more on List Entries. + // + // Examples: + // * spec.template.spec.container with matching name: [name=nginx] -- match 'name': 'nginx' + // * spec.template.spec.container.argument matching a value: [=-jar] -- match '-jar' + Path []string `yaml:"path,omitempty"` + + // Matches is set by PathMatch to publish the matched element values for each node. + // After running PathMatcher.Filter, each node from the SequenceNode result may be + // looked up in Matches to find the field values that were matched. + Matches map[*Node][]string + + // StripComments may be set to remove the comments on the matching Nodes. + // This is useful for if the nodes are to be printed in FlowStyle. + StripComments bool + + // Create will cause missing path parts to be created as they are walked. + // + // * The leaf Node (final path) will be created with a Kind matching Create + // * Intermediary Nodes will be created as either a MappingNodes or + // SequenceNodes as appropriate for each's Path location. + // * Nodes identified by an index will only be created if the index indicates + // an append operation (i.e. index=len(list)) + Create yaml.Kind `yaml:"create,omitempty"` + + val *RNode + field string + matchRegex string +} + +func (p *PathMatcher) stripComments(n *Node) { + if n == nil { + return + } + if p.StripComments { + n.LineComment = "" + n.HeadComment = "" + n.FootComment = "" + for i := range n.Content { + p.stripComments(n.Content[i]) + } + } +} + +func (p *PathMatcher) Filter(rn *RNode) (*RNode, error) { + val, err := p.filter(rn) + if err != nil { + return nil, err + } + p.stripComments(val.YNode()) + return val, err +} + +func (p *PathMatcher) filter(rn *RNode) (*RNode, error) { + p.Matches = map[*Node][]string{} + + if len(p.Path) == 0 { + // return the element wrapped in a SequenceNode + p.appendRNode("", rn) + return p.val, nil + } + + if IsIdxNumber(p.Path[0]) { + return p.doIndexSeq(rn) + } + + if IsListIndex(p.Path[0]) { + // match seq elements + return p.doSeq(rn) + } + + if IsWildcard(p.Path[0]) { + // match every elements (*) + return p.doMatchEvery(rn) + } + // match a field + return p.doField(rn) +} + +func (p *PathMatcher) doMatchEvery(rn *RNode) (*RNode, error) { + if err := rn.VisitElements(p.visitEveryElem); err != nil { + return nil, err + } + + return p.val, nil +} + +func (p *PathMatcher) visitEveryElem(elem *RNode) error { + fieldName := p.Path[0] + // recurse on the matching element + pm := &PathMatcher{Path: p.Path[1:], Create: p.Create} + add, err := pm.filter(elem) + for k, v := range pm.Matches { + p.Matches[k] = v + } + if err != nil || add == nil { + return err + } + p.append(fieldName, add.Content()...) + + return nil +} + +func (p *PathMatcher) doField(rn *RNode) (*RNode, error) { + // lookup the field + field, err := rn.Pipe(Get(p.Path[0])) + if err != nil || (!IsCreate(p.Create) && field == nil) { + return nil, err + } + + if IsCreate(p.Create) && field == nil { + var nextPart string + if len(p.Path) > 1 { + nextPart = p.Path[1] + } + nextPartKind := getPathPartKind(nextPart, p.Create) + field = &RNode{value: &yaml.Node{Kind: nextPartKind}} + err := rn.PipeE(SetField(p.Path[0], field)) + if err != nil { + return nil, err + } + } + + // recurse on the field, removing the first element of the path + pm := &PathMatcher{Path: p.Path[1:], Create: p.Create} + p.val, err = pm.filter(field) + p.Matches = pm.Matches + return p.val, err +} + +// doIndexSeq iterates over a sequence and appends elements matching the index p.Val +func (p *PathMatcher) doIndexSeq(rn *RNode) (*RNode, error) { + // parse to index number + idx, err := strconv.Atoi(p.Path[0]) + if err != nil { + return nil, err + } + + elements, err := rn.Elements() + if err != nil { + return nil, err + } + + if len(elements) == idx && IsCreate(p.Create) { + var nextPart string + if len(p.Path) > 1 { + nextPart = p.Path[1] + } + elem := &yaml.Node{Kind: getPathPartKind(nextPart, p.Create)} + err = rn.PipeE(Append(elem)) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to append element for %q", p.Path[0]) + } + elements = append(elements, NewRNode(elem)) + } + + if len(elements) < idx+1 { + return nil, fmt.Errorf("index %d specified but only %d elements found", idx, len(elements)) + } + // get target element + element := elements[idx] + + // recurse on the matching element + pm := &PathMatcher{Path: p.Path[1:], Create: p.Create} + add, err := pm.filter(element) + for k, v := range pm.Matches { + p.Matches[k] = v + } + if err != nil || add == nil { + return nil, err + } + p.append("", add.Content()...) + return p.val, nil +} + +// doSeq iterates over a sequence and appends elements matching the path regex to p.Val +func (p *PathMatcher) doSeq(rn *RNode) (*RNode, error) { + // parse the field + match pair + var err error + p.field, p.matchRegex, err = SplitIndexNameValue(p.Path[0]) + if err != nil { + return nil, err + } + + primitiveElement := len(p.field) == 0 + if primitiveElement { + err = rn.VisitElements(p.visitPrimitiveElem) + } else { + err = rn.VisitElements(p.visitElem) + } + if err != nil { + return nil, err + } + if !p.val.IsNil() && len(p.val.YNode().Content) == 0 { + p.val = nil + } + + if !IsCreate(p.Create) || p.val != nil { + return p.val, nil + } + + var elem *yaml.Node + valueNode := NewScalarRNode(p.matchRegex).YNode() + if primitiveElement { + elem = valueNode + } else { + elem = &yaml.Node{ + Kind: yaml.MappingNode, + Content: []*yaml.Node{{Kind: yaml.ScalarNode, Value: p.field}, valueNode}, + } + } + err = rn.PipeE(Append(elem)) + if err != nil { + return nil, errors.WrapPrefixf(err, "failed to create element for %q", p.Path[0]) + } + // re-do the sequence search; this time we'll find the element we just created + return p.doSeq(rn) +} + +func (p *PathMatcher) visitPrimitiveElem(elem *RNode) error { + r, err := regexp.Compile(p.matchRegex) + if err != nil { + return err + } + + str, err := elem.String() + if err != nil { + return err + } + str = strings.TrimSpace(str) + if !r.MatchString(str) { + return nil + } + + p.appendRNode("", elem) + return nil +} + +func (p *PathMatcher) visitElem(elem *RNode) error { + r, err := regexp.Compile(p.matchRegex) + if err != nil { + return err + } + + // check if this elements field matches the regex + val := elem.Field(p.field) + if val == nil || val.Value == nil { + return nil + } + str, err := val.Value.String() + if err != nil { + return err + } + str = strings.TrimSpace(str) + if !r.MatchString(str) { + return nil + } + + // recurse on the matching element + pm := &PathMatcher{Path: p.Path[1:], Create: p.Create} + add, err := pm.filter(elem) + for k, v := range pm.Matches { + p.Matches[k] = v + } + if err != nil || add == nil { + return err + } + p.append(str, add.Content()...) + return nil +} + +func (p *PathMatcher) appendRNode(path string, node *RNode) { + p.append(path, node.YNode()) +} + +func (p *PathMatcher) append(path string, nodes ...*Node) { + if p.val == nil { + p.val = NewRNode(&Node{Kind: SequenceNode}) + } + for i := range nodes { + node := nodes[i] + p.val.YNode().Content = append(p.val.YNode().Content, node) + // record the path if specified + if path != "" { + p.Matches[node] = append(p.Matches[node], path) + } + } +} + +func cleanPath(path []string) []string { + var p []string + for _, elem := range path { + elem = strings.TrimSpace(elem) + if len(elem) == 0 { + continue + } + p = append(p, elem) + } + return p +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go new file mode 100644 index 000000000..8b328ab9d --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go @@ -0,0 +1,188 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package merge2 contains libraries for merging fields from one RNode to another +// RNode +package merge2 + +import ( + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/walk" +) + +// Merge merges fields from src into dest. +func Merge(src, dest *yaml.RNode, mergeOptions yaml.MergeOptions) (*yaml.RNode, error) { + return walk.Walker{ + Sources: []*yaml.RNode{dest, src}, + Visitor: Merger{}, + MergeOptions: mergeOptions, + }.Walk() +} + +// MergeStrings parses the arguments, and merges fields from srcStr into destStr. +func MergeStrings(srcStr, destStr string, infer bool, mergeOptions yaml.MergeOptions) (string, error) { + src, err := yaml.Parse(srcStr) + if err != nil { + return "", err + } + dest, err := yaml.Parse(destStr) + if err != nil { + return "", err + } + + result, err := walk.Walker{ + Sources: []*yaml.RNode{dest, src}, + Visitor: Merger{}, + InferAssociativeLists: infer, + MergeOptions: mergeOptions, + }.Walk() + if err != nil { + return "", err + } + + return result.String() +} + +type Merger struct { + // for forwards compatibility when new functions are added to the interface +} + +var _ walk.Visitor = Merger{} + +func (m Merger) VisitMap(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { + if err := m.SetComments(nodes); err != nil { + return nil, err + } + if err := m.SetStyle(nodes); err != nil { + return nil, err + } + if yaml.IsMissingOrNull(nodes.Dest()) { + // Add + ps, _ := determineSmpDirective(nodes.Origin()) + if ps == smpDelete { + return walk.ClearNode, nil + } + + // If Origin is missing, preserve explicitly set null in Dest ("null", "~", etc) + if nodes.Origin().IsNil() && !nodes.Dest().IsNil() && len(nodes.Dest().YNode().Value) > 0 { + // Return a new node so that it won't have a "!!null" tag and therefore won't be cleared. + return yaml.NewScalarRNode(nodes.Dest().YNode().Value), nil + } + + return nodes.Origin(), nil + } + if nodes.Origin().IsTaggedNull() { + // clear the value + return walk.ClearNode, nil + } + + ps, err := determineSmpDirective(nodes.Origin()) + if err != nil { + return nil, err + } + + switch ps { + case smpDelete: + return walk.ClearNode, nil + case smpReplace: + return nodes.Origin(), nil + default: + return nodes.Dest(), nil + } +} + +func (m Merger) VisitScalar(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { + if err := m.SetComments(nodes); err != nil { + return nil, err + } + if err := m.SetStyle(nodes); err != nil { + return nil, err + } + // Override value + if nodes.Origin() != nil { + return nodes.Origin(), nil + } + // Keep + return nodes.Dest(), nil +} + +func (m Merger) VisitList(nodes walk.Sources, s *openapi.ResourceSchema, kind walk.ListKind) (*yaml.RNode, error) { + if err := m.SetComments(nodes); err != nil { + return nil, err + } + if err := m.SetStyle(nodes); err != nil { + return nil, err + } + if kind == walk.NonAssociateList { + // Override value + if nodes.Origin() != nil { + return nodes.Origin(), nil + } + // Keep + return nodes.Dest(), nil + } + + // Add + if yaml.IsMissingOrNull(nodes.Dest()) { + return nodes.Origin(), nil + } + // Clear + if nodes.Origin().IsTaggedNull() { + return walk.ClearNode, nil + } + + ps, err := determineSmpDirective(nodes.Origin()) + if err != nil { + return nil, err + } + + switch ps { + case smpDelete: + return walk.ClearNode, nil + case smpReplace: + return nodes.Origin(), nil + default: + return nodes.Dest(), nil + } +} + +func (m Merger) SetStyle(sources walk.Sources) error { + source := sources.Origin() + dest := sources.Dest() + if dest == nil || dest.YNode() == nil || source == nil || source.YNode() == nil { + // avoid panic + return nil + } + + // copy the style from the source. + // special case: if the dest was an empty map or seq, then it probably had + // folded style applied, but we actually want to keep the style of the origin + // in this case (even if it was the default). otherwise the merged elements + // will get folded even though this probably isn't what is desired. + if dest.YNode().Kind != yaml.ScalarNode && len(dest.YNode().Content) == 0 { + dest.YNode().Style = source.YNode().Style + } + return nil +} + +// SetComments copies the dest comments to the source comments if they are present +// on the source. +func (m Merger) SetComments(sources walk.Sources) error { + source := sources.Origin() + dest := sources.Dest() + if dest == nil || dest.YNode() == nil || source == nil || source.YNode() == nil { + // avoid panic + return nil + } + if source.YNode().FootComment != "" { + dest.YNode().FootComment = source.YNode().FootComment + } + if source.YNode().HeadComment != "" { + dest.YNode().HeadComment = source.YNode().HeadComment + } + if source.YNode().LineComment != "" { + dest.YNode().LineComment = source.YNode().LineComment + } + return nil +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective.go new file mode 100644 index 000000000..f38b188ea --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective.go @@ -0,0 +1,101 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package merge2 + +import ( + "fmt" + + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// A strategic merge patch directive. +// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md +// +//go:generate stringer -type=smpDirective -linecomment +type smpDirective int + +const ( + smpUnknown smpDirective = iota // unknown + smpReplace // replace + smpDelete // delete + smpMerge // merge +) + +const strategicMergePatchDirectiveKey = "$patch" + +// Examine patch for a strategic merge patch directive. +// If found, return it, and remove the directive from the patch. +func determineSmpDirective(patch *yaml.RNode) (smpDirective, error) { + if patch == nil { + return smpMerge, nil + } + switch patch.YNode().Kind { + case yaml.SequenceNode: + return determineSequenceNodePatchStrategy(patch) + case yaml.MappingNode: + return determineMappingNodePatchStrategy(patch) + default: + return smpUnknown, fmt.Errorf( + "no implemented strategic merge patch strategy for '%s' ('%s')", + patch.YNode().ShortTag(), patch.MustString()) + } +} + +func determineSequenceNodePatchStrategy(patch *yaml.RNode) (smpDirective, error) { + // get the $patch element + node, err := patch.Pipe(yaml.GetElementByKey(strategicMergePatchDirectiveKey)) + // if there are more than 1 key/value pair in the map, then this $patch + // is not for the sequence + if err != nil || node == nil || node.YNode() == nil || len(node.Content()) > 2 { + return smpMerge, nil + } + // get the value + value, err := node.Pipe(yaml.Get(strategicMergePatchDirectiveKey)) + if err != nil || value == nil || value.YNode() == nil { + return smpMerge, nil + } + v := value.YNode().Value + if v == smpDelete.String() { + return smpDelete, elideSequencePatchDirective(patch, v) + } + if v == smpReplace.String() { + return smpReplace, elideSequencePatchDirective(patch, v) + } + if v == smpMerge.String() { + return smpMerge, elideSequencePatchDirective(patch, v) + } + return smpUnknown, fmt.Errorf( + "unknown patch strategy '%s'", v) +} + +func determineMappingNodePatchStrategy(patch *yaml.RNode) (smpDirective, error) { + node, err := patch.Pipe(yaml.Get(strategicMergePatchDirectiveKey)) + if err != nil || node == nil || node.YNode() == nil { + return smpMerge, nil + } + v := node.YNode().Value + if v == smpDelete.String() { + return smpDelete, elideMappingPatchDirective(patch) + } + if v == smpReplace.String() { + return smpReplace, elideMappingPatchDirective(patch) + } + if v == smpMerge.String() { + return smpMerge, elideMappingPatchDirective(patch) + } + return smpUnknown, fmt.Errorf( + "unknown patch strategy '%s'", v) +} + +func elideMappingPatchDirective(patch *yaml.RNode) error { + return patch.PipeE(yaml.Clear(strategicMergePatchDirectiveKey)) +} + +func elideSequencePatchDirective(patch *yaml.RNode, value string) error { + return patch.PipeE(yaml.ElementSetter{ + Element: nil, + Keys: []string{strategicMergePatchDirectiveKey}, + Values: []string{value}, + }) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective_string.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective_string.go new file mode 100644 index 000000000..b4f937f0e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=smpDirective -linecomment"; DO NOT EDIT. + +package merge2 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[smpUnknown-0] + _ = x[smpReplace-1] + _ = x[smpDelete-2] + _ = x[smpMerge-3] +} + +const _smpDirective_name = "unknownreplacedeletemerge" + +var _smpDirective_index = [...]uint8{0, 7, 14, 20, 25} + +func (i smpDirective) String() string { + if i < 0 || i >= smpDirective(len(_smpDirective_index)-1) { + return "smpDirective(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _smpDirective_name[_smpDirective_index[i]:_smpDirective_index[i+1]] +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/merge3.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/merge3.go new file mode 100644 index 000000000..664270e5b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/merge3.go @@ -0,0 +1,45 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package merge contains libraries for merging fields from one RNode to another +// RNode +package merge3 + +import ( + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/walk" +) + +func Merge(dest, original, update *yaml.RNode) (*yaml.RNode, error) { + // if update == nil && original != nil => declarative deletion + + return walk.Walker{ + Visitor: Visitor{}, + VisitKeysAsScalars: true, + Sources: []*yaml.RNode{dest, original, update}}.Walk() +} + +func MergeStrings(dest, original, update string, infer bool) (string, error) { + srcOriginal, err := yaml.Parse(original) + if err != nil { + return "", err + } + srcUpdated, err := yaml.Parse(update) + if err != nil { + return "", err + } + d, err := yaml.Parse(dest) + if err != nil { + return "", err + } + + result, err := walk.Walker{ + InferAssociativeLists: infer, + Visitor: Visitor{}, + VisitKeysAsScalars: true, + Sources: []*yaml.RNode{d, srcOriginal, srcUpdated}}.Walk() + if err != nil { + return "", err + } + return result.String() +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/visitor.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/visitor.go new file mode 100644 index 000000000..978deff0c --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/visitor.go @@ -0,0 +1,172 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package merge3 + +import ( + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/walk" +) + +type ConflictStrategy uint + +const ( + // TODO: Support more strategies + TakeUpdate ConflictStrategy = 1 + iota +) + +type Visitor struct{} + +func (m Visitor) VisitMap(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { + if nodes.Updated().IsTaggedNull() || nodes.Dest().IsTaggedNull() { + // explicitly cleared from either dest or update + return walk.ClearNode, nil + } + if nodes.Dest() == nil && nodes.Updated() == nil { + // implicitly cleared missing from both dest and update + return walk.ClearNode, nil + } + + if nodes.Dest() == nil { + // not cleared, but missing from the dest + // initialize a new value that can be recursively merged + return yaml.NewRNode(&yaml.Node{Kind: yaml.MappingNode}), nil + } + + // recursively merge the dest with the original and updated + return nodes.Dest(), nil +} + +func (m Visitor) visitAList(nodes walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { + if yaml.IsMissingOrNull(nodes.Updated()) && !yaml.IsMissingOrNull(nodes.Origin()) { + // implicitly cleared from update -- element was deleted + return walk.ClearNode, nil + } + if yaml.IsMissingOrNull(nodes.Dest()) { + // not cleared, but missing from the dest + // initialize a new value that can be recursively merged + return yaml.NewRNode(&yaml.Node{Kind: yaml.SequenceNode}), nil + } + + // recursively merge the dest with the original and updated + return nodes.Dest(), nil +} + +func (m Visitor) VisitScalar(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { + if nodes.Updated().IsTaggedNull() || nodes.Dest().IsTaggedNull() { + // explicitly cleared from either dest or update + return nil, nil + } + if yaml.IsMissingOrNull(nodes.Updated()) != yaml.IsMissingOrNull(nodes.Origin()) { + // value added or removed in update + return nodes.Updated(), nil + } + if yaml.IsMissingOrNull(nodes.Updated()) && yaml.IsMissingOrNull(nodes.Origin()) { + // value added or removed in update + return nodes.Dest(), nil + } + + values, err := m.getStrValues(nodes) + if err != nil { + return nil, err + } + + if (values.Dest == "" || values.Dest == values.Origin) && values.Origin != values.Update { + // if local is nil or is unchanged but there is new update + return nodes.Updated(), nil + } + + if nodes.Updated().YNode().Value != nodes.Origin().YNode().Value { + // value changed in update + return nodes.Updated(), nil + } + + // unchanged between origin and update, keep the dest + return nodes.Dest(), nil +} + +func (m Visitor) visitNAList(nodes walk.Sources) (*yaml.RNode, error) { + if nodes.Updated().IsTaggedNull() || nodes.Dest().IsTaggedNull() { + // explicitly cleared from either dest or update + return walk.ClearNode, nil + } + + if yaml.IsMissingOrNull(nodes.Updated()) != yaml.IsMissingOrNull(nodes.Origin()) { + // value added or removed in update + return nodes.Updated(), nil + } + if yaml.IsMissingOrNull(nodes.Updated()) && yaml.IsMissingOrNull(nodes.Origin()) { + // value not present in source or dest + return nodes.Dest(), nil + } + + // compare origin and update values to see if they have changed + values, err := m.getStrValues(nodes) + if err != nil { + return nil, err + } + if values.Update != values.Origin { + // value changed in update + return nodes.Updated(), nil + } + + // unchanged between origin and update, keep the dest + return nodes.Dest(), nil +} + +func (m Visitor) VisitList(nodes walk.Sources, s *openapi.ResourceSchema, kind walk.ListKind) (*yaml.RNode, error) { + if kind == walk.AssociativeList { + return m.visitAList(nodes, s) + } + // non-associative list + return m.visitNAList(nodes) +} + +func (m Visitor) getStrValues(nodes walk.Sources) (strValues, error) { + var uStr, oStr, dStr string + var err error + if nodes.Updated() != nil && nodes.Updated().YNode() != nil { + s := nodes.Updated().YNode().Style + defer func() { + nodes.Updated().YNode().Style = s + }() + nodes.Updated().YNode().Style = yaml.FlowStyle | yaml.SingleQuotedStyle + uStr, err = nodes.Updated().String() + if err != nil { + return strValues{}, err + } + } + if nodes.Origin() != nil && nodes.Origin().YNode() != nil { + s := nodes.Origin().YNode().Style + defer func() { + nodes.Origin().YNode().Style = s + }() + nodes.Origin().YNode().Style = yaml.FlowStyle | yaml.SingleQuotedStyle + oStr, err = nodes.Origin().String() + if err != nil { + return strValues{}, err + } + } + if nodes.Dest() != nil && nodes.Dest().YNode() != nil { + s := nodes.Dest().YNode().Style + defer func() { + nodes.Dest().YNode().Style = s + }() + nodes.Dest().YNode().Style = yaml.FlowStyle | yaml.SingleQuotedStyle + dStr, err = nodes.Dest().String() + if err != nil { + return strValues{}, err + } + } + + return strValues{Origin: oStr, Update: uStr, Dest: dStr}, nil +} + +type strValues struct { + Origin string + Update string + Dest string +} + +var _ walk.Visitor = Visitor{} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/order.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/order.go new file mode 100644 index 000000000..4e01c6489 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/order.go @@ -0,0 +1,107 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +// fieldSortOrder contains the relative ordering of fields when formatting an +// object. +var fieldSortOrder = []string{ + // top-level metadata + "name", "generateName", "namespace", "clusterName", + "apiVersion", "kind", "metadata", "type", + "labels", "annotations", + "spec", "status", + + // secret and configmap + "stringData", "data", "binaryData", + + // cronjobspec, daemonsetspec, deploymentspec, statefulsetspec, + // jobspec fields + "parallelism", "completions", "activeDeadlineSeconds", "backoffLimit", + "replicas", "selector", "manualSelector", "template", + "ttlSecondsAfterFinished", "volumeClaimTemplates", "service", "serviceName", + "podManagementPolicy", "updateStrategy", "strategy", "minReadySeconds", + "revision", "revisionHistoryLimit", "paused", "progressDeadlineSeconds", + + // podspec + // podspec scalars + "restartPolicy", "terminationGracePeriodSeconds", + "activeDeadlineSeconds", "dnsPolicy", "serviceAccountName", + "serviceAccount", "automountServiceAccountToken", "nodeName", + "hostNetwork", "hostPID", "hostIPC", "shareProcessNamespace", "hostname", + "subdomain", "schedulerName", "priorityClassName", "priority", + "runtimeClassName", "enableServiceLinks", + + // podspec lists and maps + "nodeSelector", "hostAliases", + + // podspec objects + "initContainers", "containers", "volumes", "securityContext", + "imagePullSecrets", "affinity", "tolerations", "dnsConfig", + "readinessGates", + + // containers + "image", "command", "args", "workingDir", "ports", "envFrom", "env", + "resources", "volumeMounts", "volumeDevices", "livenessProbe", + "readinessProbe", "lifecycle", "terminationMessagePath", + "terminationMessagePolicy", "imagePullPolicy", "securityContext", + "stdin", "stdinOnce", "tty", + + // service + "clusterIP", "externalIPs", "loadBalancerIP", "loadBalancerSourceRanges", + "externalName", "externalTrafficPolicy", "sessionAffinity", + + // ports + "protocol", "port", "targetPort", "hostPort", "containerPort", "hostIP", + + // volumemount + "readOnly", "mountPath", "subPath", "subPathExpr", "mountPropagation", + + // envvar + envvarsource + "value", "valueFrom", "fieldRef", "resourceFieldRef", "configMapKeyRef", + "secretKeyRef", "prefix", "configMapRef", "secretRef", +} + +type set map[string]interface{} + +func newSet(values ...string) set { + m := map[string]interface{}{} + for _, value := range values { + m[value] = nil + } + return m +} + +func (s set) Has(key string) bool { + _, found := s[key] + return found +} + +// WhitelistedListSortKinds contains the set of kinds that are whitelisted +// for sorting list field elements +var WhitelistedListSortKinds = newSet( + "CronJob", "DaemonSet", "Deployment", "Job", "ReplicaSet", "StatefulSet", + "ValidatingWebhookConfiguration") + +// WhitelistedListSortApis contains the set of apis that are whitelisted for +// sorting list field elements +var WhitelistedListSortApis = newSet( + "apps/v1", "apps/v1beta1", "apps/v1beta2", "batch/v1", "batch/v1beta1", + "extensions/v1beta1", "v1", "admissionregistration.k8s.io/v1") + +// WhitelistedListSortFields contains json paths to list fields that should +// be sorted, and the field they should be sorted by +var WhitelistedListSortFields = map[string]string{ + ".spec.template.spec.containers": "name", + ".webhooks.rules.operations": "", +} + +// FieldOrder indexes fields and maps them to relative precedence +var FieldOrder = func() map[string]int { + // create an index of field orderings + fo := map[string]int{} + for i, f := range fieldSortOrder { + fo[f] = i + 1 + } + return fo +}() diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go new file mode 100644 index 000000000..7406c525e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go @@ -0,0 +1,1368 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "encoding/json" + "fmt" + "log" + "os" + "regexp" + "strconv" + "strings" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" + "sigs.k8s.io/kustomize/kyaml/sliceutil" + "sigs.k8s.io/kustomize/kyaml/utils" + "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels" +) + +// MakeNullNode returns an RNode that represents an empty document. +func MakeNullNode() *RNode { + return NewRNode(&Node{Tag: NodeTagNull}) +} + +// IsMissingOrNull is true if the RNode is nil or explicitly tagged null. +// TODO: make this a method on RNode. +func IsMissingOrNull(node *RNode) bool { + return node.IsNil() || node.YNode().Tag == NodeTagNull +} + +// IsEmptyMap returns true if the RNode is an empty node or an empty map. +// TODO: make this a method on RNode. +func IsEmptyMap(node *RNode) bool { + return IsMissingOrNull(node) || IsYNodeEmptyMap(node.YNode()) +} + +// GetValue returns underlying yaml.Node Value field +func GetValue(node *RNode) string { + if IsMissingOrNull(node) { + return "" + } + return node.YNode().Value +} + +// Parse parses a yaml string into an *RNode. +// To parse multiple resources, consider a kio.ByteReader +func Parse(value string) (*RNode, error) { + return Parser{Value: value}.Filter(nil) +} + +// ReadFile parses a single Resource from a yaml file. +// To parse multiple resources, consider a kio.ByteReader +func ReadFile(path string) (*RNode, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return Parse(string(b)) +} + +// WriteFile writes a single Resource to a yaml file +func WriteFile(node *RNode, path string) error { + out, err := node.String() + if err != nil { + return err + } + return errors.WrapPrefixf(os.WriteFile(path, []byte(out), 0600), "writing RNode to file") +} + +// UpdateFile reads the file at path, applies the filter to it, and write the result back. +// path must contain a exactly 1 resource (YAML). +func UpdateFile(filter Filter, path string) error { + // Read the yaml + y, err := ReadFile(path) + if err != nil { + return err + } + + // Update the yaml + if err := y.PipeE(filter); err != nil { + return err + } + + // Write the yaml + return WriteFile(y, path) +} + +// MustParse parses a yaml string into an *RNode and panics if there is an error +func MustParse(value string) *RNode { + v, err := Parser{Value: value}.Filter(nil) + if err != nil { + panic(err) + } + return v +} + +// NewScalarRNode returns a new Scalar *RNode containing the provided scalar value. +func NewScalarRNode(value string) *RNode { + return &RNode{ + value: &yaml.Node{ + Kind: yaml.ScalarNode, + Value: value, + }} +} + +// NewStringRNode returns a new Scalar *RNode containing the provided string. +// If the string is non-utf8, it will be base64 encoded, and the tag +// will indicate binary data. +func NewStringRNode(value string) *RNode { + n := yaml.Node{Kind: yaml.ScalarNode} + n.SetString(value) + return NewRNode(&n) +} + +// NewListRNode returns a new List *RNode containing the provided scalar values. +func NewListRNode(values ...string) *RNode { + seq := &RNode{value: &yaml.Node{Kind: yaml.SequenceNode}} + for _, v := range values { + seq.value.Content = append(seq.value.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Value: v, + }) + } + return seq +} + +// NewMapRNode returns a new Map *RNode containing the provided values +func NewMapRNode(values *map[string]string) *RNode { + m := &RNode{value: &yaml.Node{ + Kind: yaml.MappingNode, + }} + if values == nil { + return m + } + + for k, v := range *values { + m.value.Content = append(m.value.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Value: k, + }, &yaml.Node{ + Kind: yaml.ScalarNode, + Value: v, + }) + } + + return m +} + +// SyncMapNodesOrder sorts the map node keys in 'to' node to match the order of +// map node keys in 'from' node, additional keys are moved to the end +func SyncMapNodesOrder(from, to *RNode) { + to.Copy() + res := &RNode{value: &yaml.Node{ + Kind: to.YNode().Kind, + Style: to.YNode().Style, + Tag: to.YNode().Tag, + Anchor: to.YNode().Anchor, + Alias: to.YNode().Alias, + HeadComment: to.YNode().HeadComment, + LineComment: to.YNode().LineComment, + FootComment: to.YNode().FootComment, + Line: to.YNode().Line, + Column: to.YNode().Column, + }} + + fromFieldNames, err := from.Fields() + if err != nil { + return + } + + toFieldNames, err := to.Fields() + if err != nil { + return + } + + for _, fieldName := range fromFieldNames { + if !sliceutil.Contains(toFieldNames, fieldName) { + continue + } + // append the common nodes in the order defined in 'from' node + res.value.Content = append(res.value.Content, to.Field(fieldName).Key.YNode(), to.Field(fieldName).Value.YNode()) + toFieldNames = sliceutil.Remove(toFieldNames, fieldName) + } + + for _, fieldName := range toFieldNames { + // append the residual nodes which are not present in 'from' node + res.value.Content = append(res.value.Content, to.Field(fieldName).Key.YNode(), to.Field(fieldName).Value.YNode()) + } + + to.SetYNode(res.YNode()) +} + +// NewRNode returns a new RNode pointer containing the provided Node. +func NewRNode(value *yaml.Node) *RNode { + return &RNode{value: value} +} + +// RNode provides functions for manipulating Kubernetes Resources +// Objects unmarshalled into *yaml.Nodes +type RNode struct { + // fieldPath contains the path from the root of the KubernetesObject to + // this field. + // Only field names are captured in the path. + // e.g. a image field in a Deployment would be + // 'spec.template.spec.containers.image' + fieldPath []string + + // FieldValue contains the value. + // FieldValue is always set: + // field: field value + // list entry: list entry value + // object root: object root + value *yaml.Node + + Match []string +} + +// Copy returns a distinct copy. +func (rn *RNode) Copy() *RNode { + if rn == nil { + return nil + } + result := *rn + result.value = CopyYNode(rn.value) + return &result +} + +var ErrMissingMetadata = fmt.Errorf("missing Resource metadata") + +// IsNil is true if the node is nil, or its underlying YNode is nil. +func (rn *RNode) IsNil() bool { + return rn == nil || rn.YNode() == nil +} + +// IsTaggedNull is true if a non-nil node is explicitly tagged Null. +func (rn *RNode) IsTaggedNull() bool { + return !rn.IsNil() && IsYNodeTaggedNull(rn.YNode()) +} + +// IsNilOrEmpty is true if the node is nil, +// has no YNode, or has YNode that appears empty. +func (rn *RNode) IsNilOrEmpty() bool { + return rn.IsNil() || IsYNodeNilOrEmpty(rn.YNode()) +} + +// IsStringValue is true if the RNode is not nil and is scalar string node +func (rn *RNode) IsStringValue() bool { + return !rn.IsNil() && IsYNodeString(rn.YNode()) +} + +// GetMeta returns the ResourceMeta for an RNode +func (rn *RNode) GetMeta() (ResourceMeta, error) { + if IsMissingOrNull(rn) { + return ResourceMeta{}, nil + } + missingMeta := true + n := rn + if n.YNode().Kind == DocumentNode { + // get the content is this is the document node + n = NewRNode(n.Content()[0]) + } + + // don't decode into the struct directly or it will fail on UTF-8 issues + // which appear in comments + m := ResourceMeta{} + + // TODO: consider optimizing this parsing + if f := n.Field(APIVersionField); !f.IsNilOrEmpty() { + m.APIVersion = GetValue(f.Value) + missingMeta = false + } + if f := n.Field(KindField); !f.IsNilOrEmpty() { + m.Kind = GetValue(f.Value) + missingMeta = false + } + + mf := n.Field(MetadataField) + if mf.IsNilOrEmpty() { + if missingMeta { + return m, ErrMissingMetadata + } + return m, nil + } + meta := mf.Value + + if f := meta.Field(NameField); !f.IsNilOrEmpty() { + m.Name = f.Value.YNode().Value + missingMeta = false + } + if f := meta.Field(NamespaceField); !f.IsNilOrEmpty() { + m.Namespace = GetValue(f.Value) + missingMeta = false + } + + if f := meta.Field(LabelsField); !f.IsNilOrEmpty() { + m.Labels = map[string]string{} + _ = f.Value.VisitFields(func(node *MapNode) error { + m.Labels[GetValue(node.Key)] = GetValue(node.Value) + return nil + }) + missingMeta = false + } + if f := meta.Field(AnnotationsField); !f.IsNilOrEmpty() { + m.Annotations = map[string]string{} + _ = f.Value.VisitFields(func(node *MapNode) error { + m.Annotations[GetValue(node.Key)] = GetValue(node.Value) + return nil + }) + missingMeta = false + } + + if missingMeta { + return m, ErrMissingMetadata + } + return m, nil +} + +// Pipe sequentially invokes each Filter, and passes the result to the next +// Filter. +// +// Analogous to http://www.linfo.org/pipes.html +// +// * rn is provided as input to the first Filter. +// * if any Filter returns an error, immediately return the error +// * if any Filter returns a nil RNode, immediately return nil, nil +// * if all Filters succeed with non-empty results, return the final result +func (rn *RNode) Pipe(functions ...Filter) (*RNode, error) { + // check if rn is nil to make chaining Pipe calls easier + if rn == nil { + return nil, nil + } + + var v *RNode + var err error + if rn.value != nil && rn.value.Kind == yaml.DocumentNode { + // the first node may be a DocumentNode containing a single MappingNode + v = &RNode{value: rn.value.Content[0]} + } else { + v = rn + } + + // return each fn in sequence until encountering an error or missing value + for _, c := range functions { + v, err = c.Filter(v) + if err != nil || v == nil { + return v, errors.Wrap(err) + } + } + return v, err +} + +// PipeE runs Pipe, dropping the *RNode return value. +// Useful for directly returning the Pipe error value from functions. +func (rn *RNode) PipeE(functions ...Filter) error { + _, err := rn.Pipe(functions...) + return errors.Wrap(err) +} + +// Document returns the Node for the value. +func (rn *RNode) Document() *yaml.Node { + return rn.value +} + +// YNode returns the yaml.Node value. If the yaml.Node value is a DocumentNode, +// YNode will return the DocumentNode Content entry instead of the DocumentNode. +func (rn *RNode) YNode() *yaml.Node { + if rn == nil || rn.value == nil { + return nil + } + if rn.value.Kind == yaml.DocumentNode { + return rn.value.Content[0] + } + return rn.value +} + +// SetYNode sets the yaml.Node value on an RNode. +func (rn *RNode) SetYNode(node *yaml.Node) { + if rn.value == nil || node == nil { + rn.value = node + return + } + *rn.value = *node +} + +// GetKind returns the kind, if it exists, else empty string. +func (rn *RNode) GetKind() string { + if node := rn.getMapFieldValue(KindField); node != nil { + return node.Value + } + return "" +} + +// SetKind sets the kind. +func (rn *RNode) SetKind(k string) { + rn.SetMapField(NewScalarRNode(k), KindField) +} + +// GetApiVersion returns the apiversion, if it exists, else empty string. +func (rn *RNode) GetApiVersion() string { + if node := rn.getMapFieldValue(APIVersionField); node != nil { + return node.Value + } + return "" +} + +// SetApiVersion sets the apiVersion. +func (rn *RNode) SetApiVersion(av string) { + rn.SetMapField(NewScalarRNode(av), APIVersionField) +} + +// getMapFieldValue returns the value (*yaml.Node) of a mapping field. +// The value might be nil. Also, the function returns nil, not an error, +// if this node is not a mapping node, or if this node does not have the +// given field, so this function cannot be used to make distinctions +// between these cases. +func (rn *RNode) getMapFieldValue(field string) *yaml.Node { + var result *yaml.Node + visitMappingNodeFields(rn.Content(), func(key, value *yaml.Node) { + result = value + }, field) + return result +} + +// GetName returns the name, or empty string if +// field not found. The setter is more restrictive. +func (rn *RNode) GetName() string { + return rn.getMetaStringField(NameField) +} + +// getMetaStringField returns the value of a string field in metadata. +func (rn *RNode) getMetaStringField(fName string) string { + md := rn.getMetaData() + if md == nil { + return "" + } + var result string + visitMappingNodeFields(md.Content, func(key, value *yaml.Node) { + if !IsYNodeNilOrEmpty(value) { + result = value.Value + } + }, fName) + return result +} + +// getMetaData returns the *yaml.Node of the metadata field. +// Return nil if field not found (no error). +func (rn *RNode) getMetaData() *yaml.Node { + if IsMissingOrNull(rn) { + return nil + } + content := rn.Content() + if rn.YNode().Kind == DocumentNode { + // get the content if this is the document node + content = content[0].Content + } + var mf *yaml.Node + visitMappingNodeFields(content, func(key, value *yaml.Node) { + if !IsYNodeNilOrEmpty(value) { + mf = value + } + }, MetadataField) + return mf +} + +// SetName sets the metadata name field. +func (rn *RNode) SetName(name string) error { + return rn.SetMapField(NewScalarRNode(name), MetadataField, NameField) +} + +// GetNamespace gets the metadata namespace field, or empty string if +// field not found. The setter is more restrictive. +func (rn *RNode) GetNamespace() string { + return rn.getMetaStringField(NamespaceField) +} + +// SetNamespace tries to set the metadata namespace field. If the argument +// is empty, the field is dropped. +func (rn *RNode) SetNamespace(ns string) error { + meta, err := rn.Pipe(Lookup(MetadataField)) + if err != nil { + return err + } + if ns == "" { + if rn == nil { + return nil + } + return meta.PipeE(Clear(NamespaceField)) + } + return rn.SetMapField( + NewScalarRNode(ns), MetadataField, NamespaceField) +} + +// GetAnnotations gets the metadata annotations field. +// If the annotations field is missing, returns an empty map. +// Use another method to check for missing metadata. +// If specific annotations are provided, then the map is +// restricted to only those entries with keys that match +// one of the specific annotations. If no annotations are +// provided, then the map will contain all entries. +func (rn *RNode) GetAnnotations(annotations ...string) map[string]string { + return rn.getMapFromMeta(AnnotationsField, annotations...) +} + +// SetAnnotations tries to set the metadata annotations field. +func (rn *RNode) SetAnnotations(m map[string]string) error { + return rn.setMapInMetadata(m, AnnotationsField) +} + +// GetLabels gets the metadata labels field. +// If the labels field is missing, returns an empty map. +// Use another method to check for missing metadata. +// If specific labels are provided, then the map is +// restricted to only those entries with keys that match +// one of the specific labels. If no labels are +// provided, then the map will contain all entries. +func (rn *RNode) GetLabels(labels ...string) map[string]string { + return rn.getMapFromMeta(LabelsField, labels...) +} + +// getMapFromMeta returns a map, sometimes empty, from the fName +// field in the node's metadata field. +// If specific fields are provided, then the map is +// restricted to only those entries with keys that match +// one of the specific fields. If no fields are +// provided, then the map will contain all entries. +func (rn *RNode) getMapFromMeta(fName string, fields ...string) map[string]string { + meta := rn.getMetaData() + if meta == nil { + return make(map[string]string) + } + + var result map[string]string + + visitMappingNodeFields(meta.Content, func(_, fNameValue *yaml.Node) { + // fName is found in metadata; create the map from its content + expectedSize := len(fields) + if expectedSize == 0 { + expectedSize = len(fNameValue.Content) / 2 //nolint: gomnd + } + result = make(map[string]string, expectedSize) + + visitMappingNodeFields(fNameValue.Content, func(key, value *yaml.Node) { + result[key.Value] = value.Value + }, fields...) + }, fName) + + if result == nil { + return make(map[string]string) + } + return result +} + +// SetLabels sets the metadata labels field. +func (rn *RNode) SetLabels(m map[string]string) error { + return rn.setMapInMetadata(m, LabelsField) +} + +// This established proper quoting on string values, and sorts by key. +func (rn *RNode) setMapInMetadata(m map[string]string, field string) error { + meta, err := rn.Pipe(LookupCreate(MappingNode, MetadataField)) + if err != nil { + return err + } + if err = meta.PipeE(Clear(field)); err != nil { + return err + } + if len(m) == 0 { + return nil + } + mapNode, err := meta.Pipe(LookupCreate(MappingNode, field)) + if err != nil { + return err + } + for _, k := range SortedMapKeys(m) { + if _, err := mapNode.Pipe( + SetField(k, NewStringRNode(m[k]))); err != nil { + return err + } + } + return nil +} + +func (rn *RNode) SetMapField(value *RNode, path ...string) error { + return rn.PipeE( + LookupCreate(yaml.MappingNode, path[0:len(path)-1]...), + SetField(path[len(path)-1], value), + ) +} + +func (rn *RNode) GetDataMap() map[string]string { + n, err := rn.Pipe(Lookup(DataField)) + if err != nil { + return nil + } + result := map[string]string{} + _ = n.VisitFields(func(node *MapNode) error { + result[GetValue(node.Key)] = GetValue(node.Value) + return nil + }) + return result +} + +func (rn *RNode) GetBinaryDataMap() map[string]string { + n, err := rn.Pipe(Lookup(BinaryDataField)) + if err != nil { + return nil + } + result := map[string]string{} + _ = n.VisitFields(func(node *MapNode) error { + result[GetValue(node.Key)] = GetValue(node.Value) + return nil + }) + return result +} + +// GetValidatedDataMap retrieves the data map and returns an error if the data +// map contains entries which are not included in the expectedKeys set. +func (rn *RNode) GetValidatedDataMap(expectedKeys []string) (map[string]string, error) { + dataMap := rn.GetDataMap() + err := rn.validateDataMap(dataMap, expectedKeys) + return dataMap, err +} + +func (rn *RNode) validateDataMap(dataMap map[string]string, expectedKeys []string) error { + if dataMap == nil { + return fmt.Errorf("The datamap is unassigned") + } + for key := range dataMap { + found := false + for _, expected := range expectedKeys { + if expected == key { + found = true + } + } + if !found { + return fmt.Errorf("an unexpected key (%v) was found", key) + } + } + return nil +} + +func (rn *RNode) SetDataMap(m map[string]string) { + if rn == nil { + log.Fatal("cannot set data map on nil Rnode") + } + if err := rn.PipeE(Clear(DataField)); err != nil { + log.Fatal(err) + } + if len(m) == 0 { + return + } + if err := rn.LoadMapIntoConfigMapData(m); err != nil { + log.Fatal(err) + } +} + +func (rn *RNode) SetBinaryDataMap(m map[string]string) { + if rn == nil { + log.Fatal("cannot set binaryData map on nil Rnode") + } + if err := rn.PipeE(Clear(BinaryDataField)); err != nil { + log.Fatal(err) + } + if len(m) == 0 { + return + } + if err := rn.LoadMapIntoConfigMapBinaryData(m); err != nil { + log.Fatal(err) + } +} + +// AppendToFieldPath appends a field name to the FieldPath. +func (rn *RNode) AppendToFieldPath(parts ...string) { + rn.fieldPath = append(rn.fieldPath, parts...) +} + +// FieldPath returns the field path from the Resource root node, to rn. +// Does not include list indexes. +func (rn *RNode) FieldPath() []string { + return rn.fieldPath +} + +// String returns string representation of the RNode +func (rn *RNode) String() (string, error) { + if rn == nil { + return "", nil + } + return String(rn.value) +} + +// MustString returns string representation of the RNode or panics if there is an error +func (rn *RNode) MustString() string { + s, err := rn.String() + if err != nil { + panic(err) + } + return s +} + +// Content returns Node Content field. +func (rn *RNode) Content() []*yaml.Node { + if rn == nil { + return nil + } + return rn.YNode().Content +} + +// Fields returns the list of field names for a MappingNode. +// Returns an error for non-MappingNodes. +func (rn *RNode) Fields() ([]string, error) { + if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { + return nil, errors.Wrap(err) + } + var fields []string + visitMappingNodeFields(rn.Content(), func(key, value *yaml.Node) { + fields = append(fields, key.Value) + }) + return fields, nil +} + +// FieldRNodes returns the list of field key RNodes for a MappingNode. +// Returns an error for non-MappingNodes. +func (rn *RNode) FieldRNodes() ([]*RNode, error) { + if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { + return nil, errors.Wrap(err) + } + var fields []*RNode + visitMappingNodeFields(rn.Content(), func(key, value *yaml.Node) { + // for each key node in the input mapping node contents create equivalent rNode + rNode := &RNode{} + rNode.SetYNode(key) + fields = append(fields, rNode) + }) + return fields, nil +} + +// Field returns a fieldName, fieldValue pair for MappingNodes. +// Returns nil for non-MappingNodes. +func (rn *RNode) Field(field string) *MapNode { + if rn.YNode().Kind != yaml.MappingNode { + return nil + } + var result *MapNode + visitMappingNodeFields(rn.Content(), func(key, value *yaml.Node) { + result = &MapNode{Key: NewRNode(key), Value: NewRNode(value)} + }, field) + return result +} + +// VisitFields calls fn for each field in the RNode. +// Returns an error for non-MappingNodes. +func (rn *RNode) VisitFields(fn func(node *MapNode) error) error { + // get the list of srcFieldNames + srcFieldNames, err := rn.Fields() + if err != nil { + return errors.Wrap(err) + } + + // visit each field + for _, fieldName := range srcFieldNames { + if err := fn(rn.Field(fieldName)); err != nil { + return errors.Wrap(err) + } + } + return nil +} + +// visitMappingNodeFields calls fn for fields in the content, in content order. +// The caller is responsible to ensure the node is a mapping node. If fieldNames +// are specified, then fn is called only for the fields that match the given +// fieldNames. +func visitMappingNodeFields(content []*yaml.Node, fn func(key, value *yaml.Node), fieldNames ...string) { + switch len(fieldNames) { + case 0: // visit all fields + visitFieldsWhileTrue(content, func(key, value *yaml.Node, _ int) bool { + fn(key, value) + return true + }) + case 1: // visit single field + visitFieldsWhileTrue(content, func(key, value *yaml.Node, _ int) bool { + if key == nil { + return true + } + if fieldNames[0] == key.Value { + fn(key, value) + return false + } + return true + }) + default: // visit specified fields + fieldsStillToVisit := make(map[string]bool, len(fieldNames)) + for _, fieldName := range fieldNames { + fieldsStillToVisit[fieldName] = true + } + visitFieldsWhileTrue(content, func(key, value *yaml.Node, _ int) bool { + if key == nil { + return true + } + if fieldsStillToVisit[key.Value] { + fn(key, value) + delete(fieldsStillToVisit, key.Value) + } + return len(fieldsStillToVisit) > 0 + }) + } +} + +// visitFieldsWhileTrue calls fn for the fields in content, in content order, +// until either fn returns false or all fields have been visited. The caller +// should ensure that content is from a mapping node, or fits the same expected +// pattern (consecutive key/value entries in the slice). +func visitFieldsWhileTrue(content []*yaml.Node, fn func(key, value *yaml.Node, keyIndex int) bool) { + for i := 0; i < len(content); i += 2 { + continueVisiting := fn(content[i], content[i+1], i) + if !continueVisiting { + return + } + } +} + +// Elements returns the list of elements in the RNode. +// Returns an error for non-SequenceNodes. +func (rn *RNode) Elements() ([]*RNode, error) { + if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { + return nil, errors.Wrap(err) + } + var elements []*RNode + for i := 0; i < len(rn.Content()); i++ { + elements = append(elements, NewRNode(rn.Content()[i])) + } + return elements, nil +} + +// ElementValues returns a list of all observed values for a given field name +// in a list of elements. +// Returns error for non-SequenceNodes. +func (rn *RNode) ElementValues(key string) ([]string, error) { + if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { + return nil, errors.Wrap(err) + } + var elements []string + for i := 0; i < len(rn.Content()); i++ { + field := NewRNode(rn.Content()[i]).Field(key) + if !field.IsNilOrEmpty() { + elements = append(elements, field.Value.YNode().Value) + } + } + return elements, nil +} + +// ElementValuesList returns a list of lists, where each list is a set of +// values corresponding to each key in keys. +// Returns error for non-SequenceNodes. +func (rn *RNode) ElementValuesList(keys []string) ([][]string, error) { + if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { + return nil, errors.Wrap(err) + } + elements := make([][]string, len(rn.Content())) + + for i := 0; i < len(rn.Content()); i++ { + for _, key := range keys { + field := NewRNode(rn.Content()[i]).Field(key) + if field.IsNilOrEmpty() { + elements[i] = append(elements[i], "") + } else { + elements[i] = append(elements[i], field.Value.YNode().Value) + } + } + } + return elements, nil +} + +// Element returns the element in the list which contains the field matching the value. +// Returns nil for non-SequenceNodes or if no Element matches. +func (rn *RNode) Element(key, value string) *RNode { + if rn.YNode().Kind != yaml.SequenceNode { + return nil + } + elem, err := rn.Pipe(MatchElement(key, value)) + if err != nil { + return nil + } + return elem +} + +// ElementList returns the element in the list in which all fields keys[i] matches all +// corresponding values[i]. +// Returns nil for non-SequenceNodes or if no Element matches. +func (rn *RNode) ElementList(keys []string, values []string) *RNode { + if rn.YNode().Kind != yaml.SequenceNode { + return nil + } + elem, err := rn.Pipe(MatchElementList(keys, values)) + if err != nil { + return nil + } + return elem +} + +// VisitElements calls fn for each element in a SequenceNode. +// Returns an error for non-SequenceNodes +func (rn *RNode) VisitElements(fn func(node *RNode) error) error { + elements, err := rn.Elements() + if err != nil { + return errors.Wrap(err) + } + + for i := range elements { + if err := fn(elements[i]); err != nil { + return errors.Wrap(err) + } + } + return nil +} + +// AssociativeSequenceKeys is a map of paths to sequences that have associative keys. +// The order sets the precedence of the merge keys -- if multiple keys are present +// in Resources in a list, then the FIRST key which ALL elements in the list have is used as the +// associative key for merging that list. +// Only infer name as a merge key. +var AssociativeSequenceKeys = []string{"name"} + +// IsAssociative returns true if the RNode contains an AssociativeSequenceKey as a field. +func (rn *RNode) IsAssociative() bool { + return rn.GetAssociativeKey() != "" +} + +// GetAssociativeKey returns the AssociativeSequenceKey used to merge the elements in the +// SequenceNode, or "" if the list is not associative. +func (rn *RNode) GetAssociativeKey() string { + // look for any associative keys in the first element + for _, key := range AssociativeSequenceKeys { + if checkKey(key, rn.Content()) { + return key + } + } + + // element doesn't have an associative keys + return "" +} + +// MarshalJSON creates a byte slice from the RNode. +func (rn *RNode) MarshalJSON() ([]byte, error) { + s, err := rn.String() + if err != nil { + return nil, err + } + + if rn.YNode().Kind == SequenceNode { + var a []interface{} + if err := Unmarshal([]byte(s), &a); err != nil { + return nil, err + } + return json.Marshal(a) + } + + m := map[string]interface{}{} + if err := Unmarshal([]byte(s), &m); err != nil { + return nil, err + } + return json.Marshal(m) +} + +// UnmarshalJSON overwrites this RNode with data from []byte. +func (rn *RNode) UnmarshalJSON(b []byte) error { + m := map[string]interface{}{} + if err := json.Unmarshal(b, &m); err != nil { + return err + } + r, err := FromMap(m) + if err != nil { + return err + } + rn.value = r.value + return nil +} + +// DeAnchor inflates all YAML aliases with their anchor values. +// All YAML anchor data is permanently removed (feel free to call Copy first). +func (rn *RNode) DeAnchor() (err error) { + rn.value, err = deAnchor(rn.value) + return +} + +// deAnchor removes all AliasNodes from the yaml.Node's tree, replacing +// them with what they point to. All Anchor fields (these are used to mark +// anchor definitions) are cleared. +func deAnchor(yn *yaml.Node) (res *yaml.Node, err error) { + if yn == nil { + return nil, nil + } + if yn.Anchor != "" { + // This node defines an anchor. Clear the field so that it + // doesn't show up when marshalling. + if yn.Kind == yaml.AliasNode { + // Maybe this is OK, but for now treating it as a bug. + return nil, fmt.Errorf( + "anchor %q defined using alias %v", yn.Anchor, yn.Alias) + } + yn.Anchor = "" + } + switch yn.Kind { + case yaml.ScalarNode: + return yn, nil + case yaml.AliasNode: + result, err := deAnchor(yn.Alias) + if err != nil { + return nil, err + } + return CopyYNode(result), nil + case yaml.MappingNode: + toMerge, err := removeMergeTags(yn) + if err != nil { + return nil, err + } + err = mergeAll(yn, toMerge) + if err != nil { + return nil, err + } + fallthrough + case yaml.DocumentNode, yaml.SequenceNode: + for i := range yn.Content { + yn.Content[i], err = deAnchor(yn.Content[i]) + if err != nil { + return nil, err + } + } + return yn, nil + default: + return nil, fmt.Errorf("cannot deAnchor kind %q", yn.Kind) + } +} + +// isMerge returns if the node is tagged with !!merge +func isMerge(yn *yaml.Node) bool { + return yn.Tag == MergeTag +} + +// findMergeValues receives either a MappingNode, a AliasNode or a potentially +// mixed list of MappingNodes and AliasNodes. It returns a list of MappingNodes. +func findMergeValues(yn *yaml.Node) ([]*yaml.Node, error) { + if yn == nil { + return []*yaml.Node{}, nil + } + switch yn.Kind { + case MappingNode: + return []*yaml.Node{yn}, nil + case AliasNode: + if yn.Alias != nil && yn.Alias.Kind != MappingNode { + return nil, errors.Errorf("invalid map merge: received alias for a non-map value") + } + return []*yaml.Node{yn.Alias}, nil + case SequenceNode: + mergeValues := []*yaml.Node{} + for i := 0; i < len(yn.Content); i++ { + if yn.Content[i].Kind == SequenceNode { + return nil, errors.Errorf("invalid map merge: received a nested sequence") + } + newMergeValues, err := findMergeValues(yn.Content[i]) + if err != nil { + return nil, err + } + mergeValues = append(newMergeValues, mergeValues...) + } + return mergeValues, nil + default: + return nil, errors.Errorf("map merge requires map or sequence of maps as the value") + } +} + +// getMergeTagValue receives a MappingNode yaml node, and it searches for +// merge tagged keys and return its value yaml node. If the key is duplicated, +// it fails. +func getMergeTagValue(yn *yaml.Node) (*yaml.Node, error) { + var result *yaml.Node + var err error + visitFieldsWhileTrue(yn.Content, func(key, value *yaml.Node, _ int) bool { + if isMerge(key) { + if result != nil { + err = fmt.Errorf("duplicate merge key") + result = nil + return false + } + result = value + } + return true + }) + return result, err +} + +// removeMergeTags removes all merge tags and returns a ordered list of yaml +// nodes to merge and a error +func removeMergeTags(yn *yaml.Node) ([]*yaml.Node, error) { + if yn == nil || yn.Content == nil { + return nil, nil + } + if yn.Kind != yaml.MappingNode { + return nil, nil + } + value, err := getMergeTagValue(yn) + if err != nil { + return nil, err + } + toMerge, err := findMergeValues(value) + if err != nil { + return nil, err + } + err = NewRNode(yn).PipeE(Clear("<<")) + if err != nil { + return nil, err + } + return toMerge, nil +} + +func mergeAll(yn *yaml.Node, toMerge []*yaml.Node) error { + // We only need to start with a copy of the existing node because we need to + // maintain duplicated keys and style + rn := NewRNode(yn).Copy() + toMerge = append(toMerge, yn) + for i := range toMerge { + rnToMerge := NewRNode(toMerge[i]).Copy() + err := rnToMerge.VisitFields(func(node *MapNode) error { + return rn.PipeE(MapEntrySetter{Key: node.Key, Value: node.Value}) + }) + if err != nil { + return err + } + } + *yn = *rn.value + return nil +} + +// GetValidatedMetadata returns metadata after subjecting it to some tests. +func (rn *RNode) GetValidatedMetadata() (ResourceMeta, error) { + m, err := rn.GetMeta() + if err != nil { + return m, err + } + if m.Kind == "" { + return m, fmt.Errorf("missing kind in object %v", m) + } + if strings.HasSuffix(m.Kind, "List") { + // A list doesn't require a name. + return m, nil + } + if m.NameMeta.Name == "" { + return m, fmt.Errorf("missing metadata.name in object %v", m) + } + return m, nil +} + +// MatchesAnnotationSelector returns true on a selector match to annotations. +func (rn *RNode) MatchesAnnotationSelector(selector string) (bool, error) { + s, err := labels.Parse(selector) + if err != nil { + return false, err + } + return s.Matches(labels.Set(rn.GetAnnotations())), nil +} + +// MatchesLabelSelector returns true on a selector match to labels. +func (rn *RNode) MatchesLabelSelector(selector string) (bool, error) { + s, err := labels.Parse(selector) + if err != nil { + return false, err + } + return s.Matches(labels.Set(rn.GetLabels())), nil +} + +// HasNilEntryInList returns true if the RNode contains a list which has +// a nil item, along with the path to the missing item. +// TODO(broken): This doesn't do what it claims to do. +// (see TODO in unit test and pr 1513). +func (rn *RNode) HasNilEntryInList() (bool, string) { + return hasNilEntryInList(rn.value) +} + +func hasNilEntryInList(in interface{}) (bool, string) { + switch v := in.(type) { + case map[string]interface{}: + for key, s := range v { + if result, path := hasNilEntryInList(s); result { + return result, key + "/" + path + } + } + case []interface{}: + for index, s := range v { + if s == nil { + return true, "" + } + if result, path := hasNilEntryInList(s); result { + return result, "[" + strconv.Itoa(index) + "]/" + path + } + } + } + return false, "" +} + +func FromMap(m map[string]interface{}) (*RNode, error) { + c, err := Marshal(m) + if err != nil { + return nil, err + } + return Parse(string(c)) +} + +func (rn *RNode) Map() (map[string]interface{}, error) { + if rn == nil || rn.value == nil { + return make(map[string]interface{}), nil + } + var result map[string]interface{} + if err := rn.value.Decode(&result); err != nil { + // Should not be able to create an RNode that cannot be decoded; + // this is an unrecoverable error. + str, _ := rn.String() + return nil, fmt.Errorf("received error %w for the following resource:\n%s", err, str) + } + return result, nil +} + +// ConvertJSONToYamlNode parses input json string and returns equivalent yaml node +func ConvertJSONToYamlNode(jsonStr string) (*RNode, error) { + var body map[string]interface{} + err := json.Unmarshal([]byte(jsonStr), &body) + if err != nil { + return nil, err + } + yml, err := yaml.Marshal(body) + if err != nil { + return nil, err + } + node, err := Parse(string(yml)) + if err != nil { + return nil, err + } + return node, nil +} + +// checkKey returns true if all elems have the key +func checkKey(key string, elems []*Node) bool { + count := 0 + for i := range elems { + elem := NewRNode(elems[i]) + if elem.Field(key) != nil { + count++ + } + } + return count == len(elems) +} + +// GetSlice returns the contents of the slice field at the given path. +func (rn *RNode) GetSlice(path string) ([]interface{}, error) { + value, err := rn.GetFieldValue(path) + if err != nil { + return nil, err + } + if sliceValue, ok := value.([]interface{}); ok { + return sliceValue, nil + } + return nil, fmt.Errorf("node %s is not a slice", path) +} + +// GetString returns the contents of the string field at the given path. +func (rn *RNode) GetString(path string) (string, error) { + value, err := rn.GetFieldValue(path) + if err != nil { + return "", err + } + if v, ok := value.(string); ok { + return v, nil + } + return "", fmt.Errorf("node %s is not a string: %v", path, value) +} + +// GetFieldValue finds period delimited fields. +// TODO: When doing kustomize var replacement, which is likely a +// a primary use of this function and the reason it returns interface{} +// rather than string, we do conversion from Nodes to Go types and back +// to nodes. We should figure out how to do replacement using raw nodes, +// assuming we keep the var feature in kustomize. +// The other end of this is: refvar.go:updateNodeValue. +func (rn *RNode) GetFieldValue(path string) (interface{}, error) { + fields := convertSliceIndex(utils.SmarterPathSplitter(path, ".")) + rn, err := rn.Pipe(Lookup(fields...)) + if err != nil { + return nil, err + } + if rn == nil { + return nil, NoFieldError{path} + } + yn := rn.YNode() + + // If this is an alias node, resolve it + if yn.Kind == yaml.AliasNode { + yn = yn.Alias + } + + // Return value as map for DocumentNode and MappingNode kinds + if yn.Kind == yaml.DocumentNode || yn.Kind == yaml.MappingNode { + var result map[string]interface{} + if err := yn.Decode(&result); err != nil { + return nil, err + } + return result, err + } + + // Return value as slice for SequenceNode kind + if yn.Kind == yaml.SequenceNode { + var result []interface{} + if err := yn.Decode(&result); err != nil { + return nil, err + } + return result, nil + } + if yn.Kind != yaml.ScalarNode { + return nil, fmt.Errorf("expected ScalarNode, got Kind=%d", yn.Kind) + } + + switch yn.Tag { + case NodeTagString: + return yn.Value, nil + case NodeTagInt: + return strconv.Atoi(yn.Value) + case NodeTagFloat: + return strconv.ParseFloat(yn.Value, 64) + case NodeTagBool: + return strconv.ParseBool(yn.Value) + default: + // Possibly this should be an error or log. + return yn.Value, nil + } +} + +// convertSliceIndex traverses the items in `fields` and find +// if there is a slice index in the item and change it to a +// valid Lookup field path. For example, 'ports[0]' will be +// converted to 'ports' and '0'. +func convertSliceIndex(fields []string) []string { + var res []string + for _, s := range fields { + if !strings.HasSuffix(s, "]") { + res = append(res, s) + continue + } + re := regexp.MustCompile(`^(.*)\[(\d+)\]$`) + groups := re.FindStringSubmatch(s) + if len(groups) == 0 { + // no match, add to result + res = append(res, s) + continue + } + if groups[1] != "" { + res = append(res, groups[1]) + } + res = append(res, groups[2]) + } + return res +} + +type NoFieldError struct { + Field string +} + +func (e NoFieldError) Error() string { + return fmt.Sprintf("no field named '%s'", e.Field) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/schema/schema.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/schema/schema.go new file mode 100644 index 000000000..9ee592f84 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/schema/schema.go @@ -0,0 +1,44 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package schema contains libraries for working with the yaml and openapi packages. +package schema + +import ( + "strings" + + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// IsAssociative returns true if all elements in the list contain an +// AssociativeSequenceKey as a field. +func IsAssociative(schema *openapi.ResourceSchema, nodes []*yaml.RNode, infer bool) bool { + if schema != nil { + return schemaHasMergeStrategy(schema) + } + if !infer { + return false + } + for i := range nodes { + node := nodes[i] + if yaml.IsMissingOrNull(node) { + continue + } + if node.IsAssociative() { + return true + } + } + return false +} + +func schemaHasMergeStrategy(schema *openapi.ResourceSchema) bool { + tmp, _ := schema.PatchStrategyAndKey() + strategies := strings.Split(tmp, ",") + for _, s := range strategies { + if s == "merge" { + return true + } + } + return false +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go new file mode 100644 index 000000000..897e2edaa --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go @@ -0,0 +1,299 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "bytes" + "strings" + + "sigs.k8s.io/kustomize/kyaml/errors" + "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" + "sigs.k8s.io/kustomize/kyaml/sets" +) + +// CopyYNode returns a distinct copy of its argument. +// Use https://github.com/jinzhu/copier instead? +func CopyYNode(n *yaml.Node) *yaml.Node { + if n == nil { + return nil + } + c := *n + if len(n.Content) > 0 { + // Using Go 'copy' here doesn't yield independent slices. + c.Content = make([]*Node, len(n.Content)) + for i, item := range n.Content { + c.Content[i] = CopyYNode(item) + } + } + return &c +} + +// IsYNodeTaggedNull returns true if the node is explicitly tagged Null. +func IsYNodeTaggedNull(n *yaml.Node) bool { + return n != nil && n.Tag == NodeTagNull +} + +// IsYNodeEmptyMap is true if the Node is a non-nil empty map. +func IsYNodeEmptyMap(n *yaml.Node) bool { + return n != nil && n.Kind == yaml.MappingNode && len(n.Content) == 0 +} + +// IsYNodeEmptySeq is true if the Node is a non-nil empty sequence. +func IsYNodeEmptySeq(n *yaml.Node) bool { + return n != nil && n.Kind == yaml.SequenceNode && len(n.Content) == 0 +} + +// IsYNodeNilOrEmpty is true if the Node is nil or appears empty. +func IsYNodeNilOrEmpty(n *yaml.Node) bool { + return n == nil || + IsYNodeTaggedNull(n) || + IsYNodeEmptyMap(n) || + IsYNodeEmptySeq(n) || + IsYNodeZero(n) +} + +// IsYNodeEmptyDoc is true if the node is a Document with no content. +// E.g.: "---\n---" +func IsYNodeEmptyDoc(n *yaml.Node) bool { + return n.Kind == yaml.DocumentNode && n.Content[0].Tag == NodeTagNull +} + +func IsYNodeString(n *yaml.Node) bool { + return n.Kind == yaml.ScalarNode && + (n.Tag == NodeTagString || n.Tag == NodeTagEmpty) +} + +// IsYNodeZero is true if all the public fields in the Node are empty. +// Which means it's not initialized and should be omitted when marshal. +// The Node itself has a method IsZero but it is not released +// in yaml.v3. https://pkg.go.dev/gopkg.in/yaml.v3#Node.IsZero +func IsYNodeZero(n *yaml.Node) bool { + // TODO: Change this to use IsZero when it's avaialable. + return n != nil && n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && + n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && + n.Line == 0 && n.Column == 0 +} + +// Parser parses values into configuration. +type Parser struct { + Kind string `yaml:"kind,omitempty"` + Value string `yaml:"value,omitempty"` +} + +func (p Parser) Filter(_ *RNode) (*RNode, error) { + d := yaml.NewDecoder(bytes.NewBuffer([]byte(p.Value))) + o := &RNode{value: &yaml.Node{}} + return o, d.Decode(o.value) +} + +// TODO(pwittrock): test this +func GetStyle(styles ...string) Style { + var style Style + for _, s := range styles { + switch s { + case "TaggedStyle": + style |= TaggedStyle + case "DoubleQuotedStyle": + style |= DoubleQuotedStyle + case "SingleQuotedStyle": + style |= SingleQuotedStyle + case "LiteralStyle": + style |= LiteralStyle + case "FoldedStyle": + style |= FoldedStyle + case "FlowStyle": + style |= FlowStyle + } + } + return style +} + +// Filter defines a function to manipulate an individual RNode such as by changing +// its values, or returning a field. +// +// When possible, Filters should be serializable to yaml so that they can be described +// declaratively as data. +// +// Analogous to http://www.linfo.org/filters.html +type Filter interface { + Filter(object *RNode) (*RNode, error) +} + +type FilterFunc func(object *RNode) (*RNode, error) + +func (f FilterFunc) Filter(object *RNode) (*RNode, error) { + return f(object) +} + +// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta +// No need for a direct dependence; the fields are stable. +type TypeMeta struct { + // APIVersion is the apiVersion field of a Resource + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` + // Kind is the kind field of a Resource + Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` +} + +// NameMeta contains name information. +type NameMeta struct { + // Name is the metadata.name field of a Resource + Name string `json:"name,omitempty" yaml:"name,omitempty"` + // Namespace is the metadata.namespace field of a Resource + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` +} + +// ResourceMeta contains the metadata for a both Resource Type and Resource. +type ResourceMeta struct { + TypeMeta `json:",inline" yaml:",inline"` + // ObjectMeta is the metadata field of a Resource + ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` +} + +// ObjectMeta contains metadata about a Resource +type ObjectMeta struct { + NameMeta `json:",inline" yaml:",inline"` + // Labels is the metadata.labels field of a Resource + Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` + // Annotations is the metadata.annotations field of a Resource. + Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` +} + +// GetIdentifier returns a ResourceIdentifier that includes +// the information needed to uniquely identify a resource in a cluster. +func (m *ResourceMeta) GetIdentifier() ResourceIdentifier { + return ResourceIdentifier{ + TypeMeta: m.TypeMeta, + NameMeta: m.NameMeta, + } +} + +// ResourceIdentifier contains the information needed to uniquely +// identify a resource in a cluster. +type ResourceIdentifier struct { + TypeMeta `json:",inline" yaml:",inline"` + NameMeta `json:",inline" yaml:",inline"` +} + +// Comments struct is comment yaml comment types +type Comments struct { + LineComment string `yaml:"lineComment,omitempty"` + HeadComment string `yaml:"headComment,omitempty"` + FootComment string `yaml:"footComment,omitempty"` +} + +func (r *ResourceIdentifier) GetName() string { + return r.Name +} + +func (r *ResourceIdentifier) GetNamespace() string { + return r.Namespace +} + +func (r *ResourceIdentifier) GetAPIVersion() string { + return r.APIVersion +} + +func (r *ResourceIdentifier) GetKind() string { + return r.Kind +} + +const ( + Trim = "Trim" + Flow = "Flow" +) + +// String returns a string value for a Node, applying the supplied formatting options +func String(node *yaml.Node, opts ...string) (string, error) { + if node == nil { + return "", nil + } + optsSet := sets.String{} + optsSet.Insert(opts...) + if optsSet.Has(Flow) { + oldStyle := node.Style + defer func() { + node.Style = oldStyle + }() + node.Style = yaml.FlowStyle + } + + b := &bytes.Buffer{} + e := NewEncoder(b) + err := e.Encode(node) + errClose := e.Close() + if err == nil { + err = errClose + } + val := b.String() + if optsSet.Has(Trim) { + val = strings.TrimSpace(val) + } + return val, errors.Wrap(err) +} + +// MergeOptionsListIncreaseDirection is the type of list growth in merge +type MergeOptionsListIncreaseDirection int + +const ( + MergeOptionsListAppend MergeOptionsListIncreaseDirection = iota + MergeOptionsListPrepend +) + +// MergeOptions is a struct which contains the options for merge +type MergeOptions struct { + // ListIncreaseDirection indicates should merge function prepend the items from + // source list to destination or append. + ListIncreaseDirection MergeOptionsListIncreaseDirection +} + +// Since ObjectMeta and TypeMeta are stable, we manually create DeepCopy funcs for ResourceMeta and ObjectMeta. +// For TypeMeta and NameMeta no DeepCopy funcs are required, as they only contain basic types. + +// DeepCopyInto copies the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + out.NameMeta = in.NameMeta + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy copies the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto copies the receiver, writing into out. in must be non-nil. +func (in *ResourceMeta) DeepCopyInto(out *ResourceMeta) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +// DeepCopy copies the receiver, creating a new ResourceMeta. +func (in *ResourceMeta) DeepCopy() *ResourceMeta { + if in == nil { + return nil + } + out := new(ResourceMeta) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/util.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/util.go new file mode 100644 index 000000000..8c9439342 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/util.go @@ -0,0 +1,70 @@ +// Copyright 2021 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package yaml + +import ( + "strings" +) + +// DeriveSeqIndentStyle derives the sequence indentation annotation value for the resource, +// originalYAML is the input yaml string, +// the style is decided by deriving the existing sequence indentation of first sequence node +func DeriveSeqIndentStyle(originalYAML string) string { + lines := strings.Split(originalYAML, "\n") + for i, line := range lines { + elems := strings.SplitN(line, "- ", 2) + if len(elems) != 2 { + continue + } + // prefix of "- " must be sequence of spaces + if strings.Trim(elems[0], " ") != "" { + continue + } + numSpacesBeforeSeqElem := len(elems[0]) + + // keyLine is the line before the first sequence element + keyLine := keyLineBeforeSeqElem(lines, i) + if keyLine == "" { + // there is no keyLine for this sequence node + // all of those lines are comments + continue + } + numSpacesBeforeKeyElem := len(keyLine) - len(strings.TrimLeft(keyLine, " ")) + trimmedKeyLine := strings.Trim(keyLine, " ") + if strings.Count(trimmedKeyLine, ":") != 1 || !strings.HasSuffix(trimmedKeyLine, ":") { + // if the key line doesn't contain only one : that too at the end, + // this is not a sequence node, it is a wrapped sequence node string + // ignore it + continue + } + + if numSpacesBeforeSeqElem == numSpacesBeforeKeyElem { + return string(CompactSequenceStyle) + } + + if numSpacesBeforeSeqElem-numSpacesBeforeKeyElem == 2 { + return string(WideSequenceStyle) + } + } + + return string(CompactSequenceStyle) +} + +// keyLineBeforeSeqElem iterates through the lines before the first seqElement +// and tries to find the non-comment key line for the sequence node +func keyLineBeforeSeqElem(lines []string, seqElemIndex int) string { + // start with the previous line of sequence element + i := seqElemIndex - 1 + for ; i >= 0; i-- { + line := lines[i] + trimmedLine := strings.Trim(line, " ") + if strings.HasPrefix(trimmedLine, "#") { // commented line + continue + } + // we have a non-commented line which can have a trailing comment + parts := strings.SplitN(line, "#", 2) + return parts[0] // throw away the trailing comment part + } + return "" +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/associative_sequence.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/associative_sequence.go new file mode 100644 index 000000000..5cea8d92f --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/associative_sequence.go @@ -0,0 +1,385 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package walk + +import ( + "strings" + + "github.com/go-errors/errors" + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/sets" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// appendListNode will append the nodes from src to dst and return dst. +// src and dst should be both sequence node. key is used to call ElementSetter. +// ElementSetter will use key-value pair to find and set the element in sequence +// node. +func appendListNode(dst, src *yaml.RNode, keys []string) (*yaml.RNode, error) { + var err error + for _, elem := range src.Content() { + // If key is empty, we know this is a scalar value and we can directly set the + // node + if keys[0] == "" { + _, err = dst.Pipe(yaml.ElementSetter{ + Element: elem, + Keys: []string{""}, + Values: []string{elem.Value}, + }) + if err != nil { + return nil, err + } + continue + } + + // we need to get the value for key so that we can find the element to set + // in sequence. + v := []string{} + for _, key := range keys { + tmpNode := yaml.NewRNode(elem) + valueNode, err := tmpNode.Pipe(yaml.Get(key)) + if err != nil { + return nil, err + } + if valueNode.IsNil() { + // no key found, directly append to dst + err = dst.PipeE(yaml.Append(elem)) + if err != nil { + return nil, err + } + continue + } + v = append(v, valueNode.YNode().Value) + } + + // When there are multiple keys, ElementSetter appends the node to dst + // even if the output is already in dst. We remove the node from dst to + // prevent duplicates. + if len(keys) > 1 { + _, err = dst.Pipe(yaml.ElementSetter{ + Keys: keys, + Values: v, + }) + if err != nil { + return nil, err + } + } + + // We use the key and value from elem to find the corresponding element in dst. + // Then we will use ElementSetter to replace the element with elem. If we cannot + // find the item, the element will be appended. + _, err = dst.Pipe(yaml.ElementSetter{ + Element: elem, + Keys: keys, + Values: v, + }) + if err != nil { + return nil, err + } + } + return dst, nil +} + +// validateKeys returns a list of valid key-value pairs +// if secondary merge key values are missing, use only the available merge keys +func validateKeys(valuesList [][]string, values []string, keys []string) ([]string, []string) { + validKeys := make([]string, 0) + validValues := make([]string, 0) + validKeySet := sets.String{} + for _, values := range valuesList { + for i, v := range values { + if v != "" { + validKeySet.Insert(keys[i]) + } + } + } + if validKeySet.Len() == 0 { // if values missing, fall back to primary keys + return keys, values + } + for _, k := range keys { + if validKeySet.Has(k) { + validKeys = append(validKeys, k) + } + } + for i, v := range values { + if v != "" || validKeySet.Has(keys[i]) { + validValues = append(validValues, v) + } + } + return validKeys, validValues +} + +// mergeValues merges values together - e.g. if two containerPorts +// have the same port and targetPort but one has an empty protocol +// and the other doesn't, they are treated as the same containerPort +func mergeValues(valuesList [][]string) [][]string { + for i, values1 := range valuesList { + for j, values2 := range valuesList { + if matched, values := match(values1, values2); matched { + valuesList[i] = values + valuesList[j] = values + } + } + } + return valuesList +} + +// two values match if they have at least one common element and +// corresponding elements only differ if one is an empty string +func match(values1 []string, values2 []string) (bool, []string) { + if len(values1) != len(values2) { + return false, nil + } + var commonElement bool + var res []string + for i := range values1 { + if values1[i] == values2[i] { + commonElement = true + res = append(res, values1[i]) + continue + } + if values1[i] != "" && values2[i] != "" { + return false, nil + } + if values1[i] != "" { + res = append(res, values1[i]) + } else { + res = append(res, values2[i]) + } + } + return commonElement, res +} + +// setAssociativeSequenceElements recursively set the elements in the list +func (l *Walker) setAssociativeSequenceElements(valuesList [][]string, keys []string, dest *yaml.RNode) (*yaml.RNode, error) { + // itemsToBeAdded contains the items that will be added to dest + itemsToBeAdded := yaml.NewListRNode() + var schema *openapi.ResourceSchema + if l.Schema != nil { + schema = l.Schema.Elements() + } + if len(keys) > 1 { + valuesList = mergeValues(valuesList) + } + + // each element in valuesList is a list of values corresponding to the keys + // for example, for the following yaml: + // - containerPort: 8080 + // protocol: UDP + // - containerPort: 8080 + // protocol: TCP + // `keys` would be [containerPort, protocol] + // and `valuesList` would be [ [8080, UDP], [8080, TCP] ] + var validKeys []string + var validValues []string + for _, values := range valuesList { + if len(values) == 0 { + continue + } + + validKeys, validValues = validateKeys(valuesList, values, keys) + val, err := Walker{ + VisitKeysAsScalars: l.VisitKeysAsScalars, + InferAssociativeLists: l.InferAssociativeLists, + Visitor: l, + Schema: schema, + Sources: l.elementValueList(validKeys, validValues), + MergeOptions: l.MergeOptions, + }.Walk() + if err != nil { + return nil, err + } + + exit := false + for i, key := range validKeys { + // delete the node from **dest** if it's null or empty + if yaml.IsMissingOrNull(val) || yaml.IsEmptyMap(val) { + _, err = dest.Pipe(yaml.ElementSetter{ + Keys: validKeys, + Values: validValues, + }) + if err != nil { + return nil, err + } + exit = true + } else if val.Field(key) == nil && validValues[i] != "" { + // make sure the key is set on the field + _, err = val.Pipe(yaml.SetField(key, yaml.NewScalarRNode(validValues[i]))) + if err != nil { + return nil, err + } + } + } + if exit { + continue + } + + // Add the val to the sequence. val will replace the item in the sequence if + // there is an item that matches all key-value pairs. Otherwise val will be appended + // the sequence. + _, err = itemsToBeAdded.Pipe(yaml.ElementSetter{ + Element: val.YNode(), + Keys: validKeys, + Values: validValues, + }) + if err != nil { + return nil, err + } + } + + var err error + if len(valuesList) > 0 { + if l.MergeOptions.ListIncreaseDirection == yaml.MergeOptionsListPrepend { + // items from patches are needed to be prepended. so we append the + // dest to itemsToBeAdded + dest, err = appendListNode(itemsToBeAdded, dest, validKeys) + } else { + // append the items + dest, err = appendListNode(dest, itemsToBeAdded, validKeys) + } + } + + if err != nil { + return nil, err + } + // sequence is empty + if yaml.IsMissingOrNull(dest) { + return nil, nil + } + return dest, nil +} + +func (l *Walker) walkAssociativeSequence() (*yaml.RNode, error) { + // may require initializing the dest node + dest, err := l.Sources.setDestNode(l.VisitList(l.Sources, l.Schema, AssociativeList)) + if dest == nil || err != nil { + return nil, err + } + + // get the merge key(s) from schema + var strategy string + var keys []string + if l.Schema != nil { + strategy, keys = l.Schema.PatchStrategyAndKeyList() + } + if strategy == "" && len(keys) == 0 { // neither strategy nor keys present in the schema -- infer the key + // find the list of elements we need to recursively walk + key, err := l.elementKey() + if err != nil { + return nil, err + } + if key != "" { + keys = append(keys, key) + } + } + + // non-primitive associative list -- merge the elements + values := l.elementValues(keys) + if len(values) != 0 || len(keys) > 0 { + return l.setAssociativeSequenceElements(values, keys, dest) + } + + // primitive associative list -- merge the values + return l.setAssociativeSequenceElements(l.elementPrimitiveValues(), []string{""}, dest) +} + +// elementKey returns the merge key to use for the associative list +func (l Walker) elementKey() (string, error) { + var key string + for i := range l.Sources { + if l.Sources[i] != nil && len(l.Sources[i].Content()) > 0 { + newKey := l.Sources[i].GetAssociativeKey() + if key != "" && key != newKey { + return "", errors.Errorf( + "conflicting merge keys [%s,%s] for field %s", + key, newKey, strings.Join(l.Path, ".")) + } + key = newKey + } + } + if key == "" { + return "", errors.Errorf("no merge key found for field %s", + strings.Join(l.Path, ".")) + } + return key, nil +} + +// elementValues returns a slice containing all values for the field across all elements +// from all sources. +// Return value slice is ordered using the original ordering from the elements, where +// elements missing from earlier sources appear later. +func (l Walker) elementValues(keys []string) [][]string { + // use slice to to keep elements in the original order + var returnValues [][]string + var seen sets.StringList + + // if we are doing append, dest node should be the first. + // otherwise dest node should be the last. + beginIdx := 0 + if l.MergeOptions.ListIncreaseDirection == yaml.MergeOptionsListPrepend { + beginIdx = 1 + } + for i := range l.Sources { + src := l.Sources[(i+beginIdx)%len(l.Sources)] + if src == nil { + continue + } + + // add the value of the field for each element + // don't check error, we know this is a list node + values, _ := src.ElementValuesList(keys) + for _, s := range values { + if len(s) == 0 || seen.Has(s) { + continue + } + returnValues = append(returnValues, s) + seen = seen.Insert(s) + } + } + return returnValues +} + +// elementPrimitiveValues returns the primitive values in an associative list -- eg. finalizers +func (l Walker) elementPrimitiveValues() [][]string { + // use slice to to keep elements in the original order + var returnValues [][]string + seen := sets.String{} + // if we are doing append, dest node should be the first. + // otherwise dest node should be the last. + beginIdx := 0 + if l.MergeOptions.ListIncreaseDirection == yaml.MergeOptionsListPrepend { + beginIdx = 1 + } + for i := range l.Sources { + src := l.Sources[(i+beginIdx)%len(l.Sources)] + if src == nil { + continue + } + + // add the value of the field for each element + // don't check error, we know this is a list node + for _, item := range src.YNode().Content { + if seen.Has(item.Value) { + continue + } + returnValues = append(returnValues, []string{item.Value}) + seen.Insert(item.Value) + } + } + return returnValues +} + +// fieldValue returns a slice containing each source's value for fieldName +func (l Walker) elementValueList(keys []string, values []string) []*yaml.RNode { + keys, values = validateKeys([][]string{values}, values, keys) + var fields []*yaml.RNode + for i := range l.Sources { + if l.Sources[i] == nil { + fields = append(fields, nil) + continue + } + fields = append(fields, l.Sources[i].ElementList(keys, values)) + } + return fields +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go new file mode 100644 index 000000000..998af6d32 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go @@ -0,0 +1,173 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package walk + +import ( + "sort" + + "sigs.k8s.io/kustomize/kyaml/fieldmeta" + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/sets" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// walkMap returns the value of VisitMap +// +// - call VisitMap +// - set the return value on l.Dest +// - walk each source field +// - set each source field value on l.Dest +func (l Walker) walkMap() (*yaml.RNode, error) { + // get the new map value + dest, err := l.Sources.setDestNode(l.VisitMap(l.Sources, l.Schema)) + if dest == nil || err != nil { + return nil, err + } + + // recursively set the field values on the map + for _, key := range l.fieldNames() { + var res *yaml.RNode + var keys []*yaml.RNode + if l.VisitKeysAsScalars { + // visit the map keys as if they were scalars, + // this is necessary if doing things such as copying + // comments + for i := range l.Sources { + // construct the sources from the keys + if l.Sources[i] == nil { + keys = append(keys, nil) + continue + } + field := l.Sources[i].Field(key) + if field == nil || yaml.IsMissingOrNull(field.Key) { + keys = append(keys, nil) + continue + } + keys = append(keys, field.Key) + } + // visit the sources as a scalar + // keys don't have any schema --pass in nil + res, err = l.Visitor.VisitScalar(keys, nil) + if err != nil { + return nil, err + } + } + + var s *openapi.ResourceSchema + if l.Schema != nil { + s = l.Schema.Field(key) + } + fv, commentSch := l.fieldValue(key) + if commentSch != nil { + s = commentSch + } + val, err := Walker{ + VisitKeysAsScalars: l.VisitKeysAsScalars, + InferAssociativeLists: l.InferAssociativeLists, + Visitor: l, + Schema: s, + Sources: fv, + MergeOptions: l.MergeOptions, + Path: append(l.Path, key)}.Walk() + if err != nil { + return nil, err + } + + // transfer the comments of res to dest node + var comments yaml.Comments + if !yaml.IsMissingOrNull(res) { + comments = yaml.Comments{ + LineComment: res.YNode().LineComment, + HeadComment: res.YNode().HeadComment, + FootComment: res.YNode().FootComment, + } + if len(keys) > 0 && !yaml.IsMissingOrNull(keys[DestIndex]) { + keys[DestIndex].YNode().HeadComment = res.YNode().HeadComment + keys[DestIndex].YNode().LineComment = res.YNode().LineComment + keys[DestIndex].YNode().FootComment = res.YNode().FootComment + } + } + + // this handles empty and non-empty values + _, err = dest.Pipe(yaml.FieldSetter{Name: key, Comments: comments, Value: val}) + if err != nil { + return nil, err + } + } + + return dest, nil +} + +// valueIfPresent returns node.Value if node is non-nil, otherwise returns nil +func (l Walker) valueIfPresent(node *yaml.MapNode) (*yaml.RNode, *openapi.ResourceSchema) { + if node == nil { + return nil, nil + } + + // parse the schema for the field if present + var s *openapi.ResourceSchema + fm := fieldmeta.FieldMeta{} + var err error + // check the value for a schema + if err = fm.Read(node.Value); err == nil { + s = &openapi.ResourceSchema{Schema: &fm.Schema} + if fm.Schema.Ref.String() != "" { + r, err := openapi.Resolve(&fm.Schema.Ref, openapi.Schema()) + if err == nil && r != nil { + s.Schema = r + } + } + } + // check the key for a schema -- this will be used + // when the value is a Sequence (comments are attached) + // to the key + if fm.IsEmpty() { + if err = fm.Read(node.Key); err == nil { + s = &openapi.ResourceSchema{Schema: &fm.Schema} + } + if fm.Schema.Ref.String() != "" { + r, err := openapi.Resolve(&fm.Schema.Ref, openapi.Schema()) + if err == nil && r != nil { + s.Schema = r + } + } + } + return node.Value, s +} + +// fieldNames returns a sorted slice containing the names of all fields that appear in any of +// the sources +func (l Walker) fieldNames() []string { + fields := sets.String{} + for _, s := range l.Sources { + if s == nil { + continue + } + // don't check error, we know this is a mapping node + sFields, _ := s.Fields() + fields.Insert(sFields...) + } + result := fields.List() + sort.Strings(result) + return result +} + +// fieldValue returns a slice containing each source's value for fieldName +func (l Walker) fieldValue(fieldName string) ([]*yaml.RNode, *openapi.ResourceSchema) { + var fields []*yaml.RNode + var sch *openapi.ResourceSchema + for i := range l.Sources { + if l.Sources[i] == nil { + fields = append(fields, nil) + continue + } + field := l.Sources[i].Field(fieldName) + f, s := l.valueIfPresent(field) + fields = append(fields, f) + if sch == nil && !s.IsMissingOrNull() { + sch = s + } + } + return fields, sch +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/nonassociative_sequence.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/nonassociative_sequence.go new file mode 100644 index 000000000..91b187e5b --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/nonassociative_sequence.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package walk + +import ( + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// walkNonAssociativeSequence returns the value of VisitList +func (l Walker) walkNonAssociativeSequence() (*yaml.RNode, error) { + return l.VisitList(l.Sources, l.Schema, NonAssociateList) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/scalar.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/scalar.go new file mode 100644 index 000000000..1a26f6dff --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/scalar.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package walk + +import "sigs.k8s.io/kustomize/kyaml/yaml" + +// walkScalar returns the value of VisitScalar +func (l Walker) walkScalar() (*yaml.RNode, error) { + return l.VisitScalar(l.Sources, l.Schema) +} diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/visitor.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/visitor.go new file mode 100644 index 000000000..153ac2945 --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/visitor.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package walk + +import ( + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type ListKind int32 + +const ( + AssociativeList ListKind = 1 + iota + NonAssociateList +) + +// Visitor is invoked by walk with source and destination node pairs +type Visitor interface { + VisitMap(Sources, *openapi.ResourceSchema) (*yaml.RNode, error) + + VisitScalar(Sources, *openapi.ResourceSchema) (*yaml.RNode, error) + + VisitList(Sources, *openapi.ResourceSchema, ListKind) (*yaml.RNode, error) +} + +// ClearNode is returned if GrepFilter should do nothing after calling Set +var ClearNode *yaml.RNode diff --git a/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/walk.go b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/walk.go new file mode 100644 index 000000000..68de1324e --- /dev/null +++ b/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/walk.go @@ -0,0 +1,186 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package walk + +import ( + "fmt" + "os" + "strings" + + "sigs.k8s.io/kustomize/kyaml/fieldmeta" + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/schema" +) + +// Walker walks the Source RNode and modifies the RNode provided to GrepFilter. +type Walker struct { + // Visitor is invoked by GrepFilter + Visitor + + Schema *openapi.ResourceSchema + + // Source is the RNode to walk. All Source fields and associative list elements + // will be visited. + Sources Sources + + // Path is the field path to the current Source Node. + Path []string + + // InferAssociativeLists if set to true will infer merge strategies for + // fields which it doesn't have the schema based on the fields in the + // list elements. + InferAssociativeLists bool + + // VisitKeysAsScalars if true will call VisitScalar on map entry keys, + // providing nil as the OpenAPI schema. + VisitKeysAsScalars bool + + // MergeOptions is a struct to store options for merge + MergeOptions yaml.MergeOptions +} + +// Kind returns the kind of the first non-null node in Sources. +func (l Walker) Kind() yaml.Kind { + for _, s := range l.Sources { + if !yaml.IsMissingOrNull(s) { + return s.YNode().Kind + } + } + return 0 +} + +// Walk will recursively traverse every item in the Sources and perform corresponding +// actions on them +func (l Walker) Walk() (*yaml.RNode, error) { + l.Schema = l.GetSchema() + + // invoke the handler for the corresponding node type + switch l.Kind() { + case yaml.MappingNode: + if err := yaml.ErrorIfAnyInvalidAndNonNull(yaml.MappingNode, l.Sources...); err != nil { + return nil, err + } + return l.walkMap() + case yaml.SequenceNode: + if err := yaml.ErrorIfAnyInvalidAndNonNull(yaml.SequenceNode, l.Sources...); err != nil { + return nil, err + } + // AssociativeSequence means the items in the sequence are associative. They can be merged + // according to merge key. + if schema.IsAssociative(l.Schema, l.Sources, l.InferAssociativeLists) { + return l.walkAssociativeSequence() + } + return l.walkNonAssociativeSequence() + + case yaml.ScalarNode: + if err := yaml.ErrorIfAnyInvalidAndNonNull(yaml.ScalarNode, l.Sources...); err != nil { + return nil, err + } + return l.walkScalar() + case 0: + // walk empty nodes as maps + return l.walkMap() + default: + return nil, nil + } +} + +func (l Walker) GetSchema() *openapi.ResourceSchema { + for i := range l.Sources { + r := l.Sources[i] + if yaml.IsMissingOrNull(r) { + continue + } + + fm := fieldmeta.FieldMeta{} + if err := fm.Read(r); err == nil && !fm.IsEmpty() { + // per-field schema, this is fine + if fm.Schema.Ref.String() != "" { + // resolve the reference + s, err := openapi.Resolve(&fm.Schema.Ref, openapi.Schema()) + if err == nil && s != nil { + fm.Schema = *s + } + } + return &openapi.ResourceSchema{Schema: &fm.Schema} + } + } + + if l.Schema != nil { + return l.Schema + } + for i := range l.Sources { + r := l.Sources[i] + if yaml.IsMissingOrNull(r) { + continue + } + + m, _ := r.GetMeta() + if m.Kind == "" || m.APIVersion == "" { + continue + } + + s := openapi.SchemaForResourceType(yaml.TypeMeta{Kind: m.Kind, APIVersion: m.APIVersion}) + if s != nil { + return s + } + } + return nil +} + +const ( + DestIndex = iota + OriginIndex + UpdatedIndex +) + +// Sources are a list of RNodes. First item is the dest node, followed by +// multiple source nodes. +type Sources []*yaml.RNode + +// Dest returns the destination node +func (s Sources) Dest() *yaml.RNode { + if len(s) <= DestIndex { + return nil + } + return s[DestIndex] +} + +// Origin returns the origin node +func (s Sources) Origin() *yaml.RNode { + if len(s) <= OriginIndex { + return nil + } + return s[OriginIndex] +} + +// Updated returns the updated node +func (s Sources) Updated() *yaml.RNode { + if len(s) <= UpdatedIndex { + return nil + } + return s[UpdatedIndex] +} + +func (s Sources) String() string { + var values []string + for i := range s { + str, err := s[i].String() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + values = append(values, str) + } + return strings.Join(values, "\n") +} + +// setDestNode sets the destination source node +func (s Sources) setDestNode(node *yaml.RNode, err error) (*yaml.RNode, error) { + if err != nil { + return nil, err + } + s[0] = node + return node, nil +}